Thanks to visit codestin.com
Credit goes to github.com

Skip to content

[pull] master from comfyanonymous:master #50

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
May 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions comfy/k_diffusion/sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -1277,6 +1277,7 @@ def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None
phi1_fn = lambda t: torch.expm1(t) / t
phi2_fn = lambda t: (phi1_fn(t) - 1.0) / t

old_sigma_down = None
old_denoised = None
uncond_denoised = None
def post_cfg_function(args):
Expand Down Expand Up @@ -1304,9 +1305,9 @@ def post_cfg_function(args):
x = x + d * dt
else:
# Second order multistep method in https://arxiv.org/pdf/2308.02157
t, t_next, t_prev = t_fn(sigmas[i]), t_fn(sigma_down), t_fn(sigmas[i - 1])
t, t_old, t_next, t_prev = t_fn(sigmas[i]), t_fn(old_sigma_down), t_fn(sigma_down), t_fn(sigmas[i - 1])
h = t_next - t
c2 = (t_prev - t) / h
c2 = (t_prev - t_old) / h

phi1_val, phi2_val = phi1_fn(-h), phi2_fn(-h)
b1 = torch.nan_to_num(phi1_val - phi2_val / c2, nan=0.0)
Expand All @@ -1326,6 +1327,7 @@ def post_cfg_function(args):
old_denoised = uncond_denoised
else:
old_denoised = denoised
old_sigma_down = sigma_down
return x

@torch.no_grad()
Expand Down
13 changes: 3 additions & 10 deletions comfy_api_nodes/nodes_ideogram.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,9 +234,7 @@ def download_and_process_images(image_urls):

class IdeogramV1(ComfyNodeABC):
"""
Generates images synchronously using the Ideogram V1 model.

Images links are available for a limited period of time; if you would like to keep the image, you must download it.
Generates images using the Ideogram V1 model.
"""

def __init__(self):
Expand Down Expand Up @@ -365,9 +363,7 @@ def api_call(

class IdeogramV2(ComfyNodeABC):
"""
Generates images synchronously using the Ideogram V2 model.

Images links are available for a limited period of time; if you would like to keep the image, you must download it.
Generates images using the Ideogram V2 model.
"""

def __init__(self):
Expand Down Expand Up @@ -536,10 +532,7 @@ def api_call(

class IdeogramV3(ComfyNodeABC):
"""
Generates images synchronously using the Ideogram V3 model.

Supports both regular image generation from text prompts and image editing with mask.
Images links are available for a limited period of time; if you would like to keep the image, you must download it.
Generates images using the Ideogram V3 model. Supports both regular image generation from text prompts and image editing with mask.
"""

def __init__(self):
Expand Down
35 changes: 33 additions & 2 deletions comfy_api_nodes/nodes_kling.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,33 @@ def validate_image_result_response(response) -> None:
raise KlingApiError(error_msg)


def validate_input_image(image: torch.Tensor) -> None:
"""
Validates the input image adheres to the expectations of the Kling API:
- The image resolution should not be less than 300*300px
- The aspect ratio of the image should be between 1:2.5 ~ 2.5:1

See: https://app.klingai.com/global/dev/document-api/apiReference/model/imageToVideo
"""
if len(image.shape) == 4:
height, width = image.shape[1], image.shape[2]
elif len(image.shape) == 3:
height, width = image.shape[0], image.shape[1]
else:
raise ValueError("Invalid image tensor shape.")

# Ensure minimum resolution is met
if height < 300:
raise ValueError("Image height must be at least 300px")
if width < 300:
raise ValueError("Image width must be at least 300px")

# Ensure aspect ratio is within acceptable range
aspect_ratio = width / height
if aspect_ratio < 1 / 2.5 or aspect_ratio > 2.5:
raise ValueError("Image aspect ratio must be between 1:2.5 and 2.5:1")


def get_camera_control_input_config(
tooltip: str, default: float = 0.0
) -> tuple[IO, InputTypeOptions]:
Expand Down Expand Up @@ -530,7 +557,10 @@ def INPUT_TYPES(s):
return {
"required": {
"start_frame": model_field_to_node_input(
IO.IMAGE, KlingImage2VideoRequest, "image"
IO.IMAGE,
KlingImage2VideoRequest,
"image",
tooltip="The reference image used to generate the video.",
),
"prompt": model_field_to_node_input(
IO.STRING, KlingImage2VideoRequest, "prompt", multiline=True
Expand Down Expand Up @@ -607,9 +637,10 @@ def api_call(
auth_token: Optional[str] = None,
) -> tuple[VideoFromFile]:
validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V)
validate_input_image(start_frame)

if camera_control is not None:
# Camera control type for image 2 video is always simple
# Camera control type for image 2 video is always `simple`
camera_control.type = KlingCameraControlType.simple

initial_operation = SynchronousOperation(
Expand Down
Loading