Thanks to visit codestin.com
Credit goes to github.com

Skip to content

[pull] master from comfyanonymous:master #54

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
May 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 9 additions & 13 deletions comfy/ldm/ace/vae/music_vocoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,7 @@

import numpy as np
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from torch.nn.utils.parametrize import remove_parametrizations as remove_weight_norm
# from diffusers.models.modeling_utils import ModelMixin
# from diffusers.loaders import FromOriginalModelMixin
# from diffusers.configuration_utils import ConfigMixin, register_to_config

from .music_log_mel import LogMelSpectrogram

Expand Down Expand Up @@ -259,7 +255,7 @@ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):

self.convs1 = nn.ModuleList(
[
weight_norm(
torch.nn.utils.parametrizations.weight_norm(
ops.Conv1d(
channels,
channels,
Expand All @@ -269,7 +265,7 @@ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
torch.nn.utils.parametrizations.weight_norm(
ops.Conv1d(
channels,
channels,
Expand All @@ -279,7 +275,7 @@ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
padding=get_padding(kernel_size, dilation[1]),
)
),
weight_norm(
torch.nn.utils.parametrizations.weight_norm(
ops.Conv1d(
channels,
channels,
Expand All @@ -294,7 +290,7 @@ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):

self.convs2 = nn.ModuleList(
[
weight_norm(
torch.nn.utils.parametrizations.weight_norm(
ops.Conv1d(
channels,
channels,
Expand All @@ -304,7 +300,7 @@ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
padding=get_padding(kernel_size, 1),
)
),
weight_norm(
torch.nn.utils.parametrizations.weight_norm(
ops.Conv1d(
channels,
channels,
Expand All @@ -314,7 +310,7 @@ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
padding=get_padding(kernel_size, 1),
)
),
weight_norm(
torch.nn.utils.parametrizations.weight_norm(
ops.Conv1d(
channels,
channels,
Expand Down Expand Up @@ -366,7 +362,7 @@ def __init__(
prod(upsample_rates) == hop_length
), f"hop_length must be {prod(upsample_rates)}"

self.conv_pre = weight_norm(
self.conv_pre = torch.nn.utils.parametrizations.weight_norm(
ops.Conv1d(
num_mels,
upsample_initial_channel,
Expand All @@ -386,7 +382,7 @@ def __init__(
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
c_cur = upsample_initial_channel // (2 ** (i + 1))
self.ups.append(
weight_norm(
torch.nn.utils.parametrizations.weight_norm(
ops.ConvTranspose1d(
upsample_initial_channel // (2**i),
upsample_initial_channel // (2 ** (i + 1)),
Expand Down Expand Up @@ -421,7 +417,7 @@ def __init__(
self.resblocks.append(ResBlock1(ch, k, d))

self.activation_post = post_activation()
self.conv_post = weight_norm(
self.conv_post = torch.nn.utils.parametrizations.weight_norm(
ops.Conv1d(
ch,
1,
Expand Down
10 changes: 2 additions & 8 deletions comfy/ldm/audio/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,16 +75,10 @@ def forward(self, x):
return x

def WNConv1d(*args, **kwargs):
try:
return torch.nn.utils.parametrizations.weight_norm(ops.Conv1d(*args, **kwargs))
except:
return torch.nn.utils.weight_norm(ops.Conv1d(*args, **kwargs)) #support pytorch 2.1 and older
return torch.nn.utils.parametrizations.weight_norm(ops.Conv1d(*args, **kwargs))

def WNConvTranspose1d(*args, **kwargs):
try:
return torch.nn.utils.parametrizations.weight_norm(ops.ConvTranspose1d(*args, **kwargs))
except:
return torch.nn.utils.weight_norm(ops.ConvTranspose1d(*args, **kwargs)) #support pytorch 2.1 and older
return torch.nn.utils.parametrizations.weight_norm(ops.ConvTranspose1d(*args, **kwargs))

def get_activation(activation: Literal["elu", "snake", "none"], antialias=False, channels=None) -> nn.Module:
if activation == "elu":
Expand Down
1 change: 0 additions & 1 deletion comfy_api_nodes/nodes_kling.py
Original file line number Diff line number Diff line change
Expand Up @@ -671,7 +671,6 @@ def api_call(
negative_prompt=negative_prompt if negative_prompt else None,
cfg_scale=cfg_scale,
mode=KlingVideoGenMode(mode),
aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio),
duration=KlingVideoGenDuration(duration),
camera_control=camera_control,
),
Expand Down
Loading
Loading