Compare commits

...

13 Commits

Author SHA1 Message Date
comfyanonymous
696672905f Add mochi support to readme. 2024-11-04 04:55:07 -05:00
comfyanonymous
6c9dbde7de Fix mochi all in one checkpoint t5xxl key names. 2024-11-03 01:40:42 -05:00
comfyanonymous
ee8abf0cff Update folder paths: "clip" -> "text_encoders"
You can still use models/clip but the folder might get removed eventually
on new installs of ComfyUI.
2024-11-02 15:35:38 -04:00
comfyanonymous
fabf449feb Mochi VAE encoder. 2024-11-01 17:33:09 -04:00
Uriel Deveaud
cc9cf6d1bd Rename some nodes in Display Name Mappings (nodes.py) (#5439)
* Update nodes_images.py

Nodes menu has inconsistency in names, some with spaces between words, other not.

* Update nodes.py

Include the node mapping name line for Image Crop Node

* Update nodes_images.py

* Rename image nodes

add space between words for consistency > Display name mappings
2024-10-31 15:18:05 -04:00
Aarni Koskela
1c8286a44b Avoid SyntaxWarning in UniPC docstring (#5442) 2024-10-31 15:17:26 -04:00
comfyanonymous
1af4a47fd1 Bump up mac version for attention upcast bug workaround. 2024-10-31 15:15:31 -04:00
Uriel Deveaud
f2aaa0a475 Rename ImageCrop to Image Crop (#5424)
* Update nodes_images.py

Nodes menu has inconsistency in names, some with spaces between words, other not.

* Update nodes.py

Include the node mapping name line for Image Crop Node

* Update nodes_images.py
2024-10-31 00:35:34 -04:00
comfyanonymous
daa1565b93 Fix diffusers flux controlnet regression. 2024-10-30 13:11:34 -04:00
comfyanonymous
09fdb2b269 Support SD3.5 medium diffusers format weights and loras. 2024-10-30 04:24:00 -04:00
Chenlei Hu
65a8659182 Update web content to release v1.3.26 (#5413)
* Update web content to release v1.3.26

* nit
2024-10-29 14:14:06 -04:00
comfyanonymous
770ab200f2 Cleanup SkipLayerGuidanceSD3 node. 2024-10-29 10:11:46 -04:00
Dango233
954683d0db SLG first implementation for SD3.5 (#5404)
* SLG first implementation for SD3.5

* * Simplify and align with comfy style
2024-10-29 09:59:21 -04:00
37 changed files with 4743 additions and 3947 deletions

View File

@@ -40,6 +40,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin
- Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything. - Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything.
- Fully supports SD1.x, SD2.x, [SDXL](https://comfyanonymous.github.io/ComfyUI_examples/sdxl/), [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/), [Stable Cascade](https://comfyanonymous.github.io/ComfyUI_examples/stable_cascade/), [SD3](https://comfyanonymous.github.io/ComfyUI_examples/sd3/) and [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/) - Fully supports SD1.x, SD2.x, [SDXL](https://comfyanonymous.github.io/ComfyUI_examples/sdxl/), [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/), [Stable Cascade](https://comfyanonymous.github.io/ComfyUI_examples/stable_cascade/), [SD3](https://comfyanonymous.github.io/ComfyUI_examples/sd3/) and [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/)
- [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/) - [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/)
- [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/)
- Asynchronous Queue system - Asynchronous Queue system
- Many optimizations: Only re-executes the parts of the workflow that changes between executions. - Many optimizations: Only re-executes the parts of the workflow that changes between executions.
- Smart memory management: can automatically run models on GPUs with as low as 1GB vram. - Smart memory management: can automatically run models on GPUs with as low as 1GB vram.

View File

@@ -16,7 +16,7 @@ class NoiseScheduleVP:
continuous_beta_0=0.1, continuous_beta_0=0.1,
continuous_beta_1=20., continuous_beta_1=20.,
): ):
"""Create a wrapper class for the forward SDE (VP type). r"""Create a wrapper class for the forward SDE (VP type).
*** ***
Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.

View File

@@ -2,12 +2,16 @@
#adapted to ComfyUI #adapted to ComfyUI
from typing import Callable, List, Optional, Tuple, Union from typing import Callable, List, Optional, Tuple, Union
from functools import partial
import math
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from einops import rearrange from einops import rearrange
from comfy.ldm.modules.attention import optimized_attention
import comfy.ops import comfy.ops
ops = comfy.ops.disable_weight_init ops = comfy.ops.disable_weight_init
@@ -158,8 +162,10 @@ class ResBlock(nn.Module):
*, *,
affine: bool = True, affine: bool = True,
attn_block: Optional[nn.Module] = None, attn_block: Optional[nn.Module] = None,
padding_mode: str = "replicate",
causal: bool = True, causal: bool = True,
prune_bottleneck: bool = False,
padding_mode: str,
bias: bool = True,
): ):
super().__init__() super().__init__()
self.channels = channels self.channels = channels
@@ -170,23 +176,23 @@ class ResBlock(nn.Module):
nn.SiLU(inplace=True), nn.SiLU(inplace=True),
PConv3d( PConv3d(
in_channels=channels, in_channels=channels,
out_channels=channels, out_channels=channels // 2 if prune_bottleneck else channels,
kernel_size=(3, 3, 3), kernel_size=(3, 3, 3),
stride=(1, 1, 1), stride=(1, 1, 1),
padding_mode=padding_mode, padding_mode=padding_mode,
bias=True, bias=bias,
# causal=causal, causal=causal,
), ),
norm_fn(channels, affine=affine), norm_fn(channels, affine=affine),
nn.SiLU(inplace=True), nn.SiLU(inplace=True),
PConv3d( PConv3d(
in_channels=channels, in_channels=channels // 2 if prune_bottleneck else channels,
out_channels=channels, out_channels=channels,
kernel_size=(3, 3, 3), kernel_size=(3, 3, 3),
stride=(1, 1, 1), stride=(1, 1, 1),
padding_mode=padding_mode, padding_mode=padding_mode,
bias=True, bias=bias,
# causal=causal, causal=causal,
), ),
) )
@@ -206,6 +212,81 @@ class ResBlock(nn.Module):
return self.attn_block(x) return self.attn_block(x)
class Attention(nn.Module):
def __init__(
self,
dim: int,
head_dim: int = 32,
qkv_bias: bool = False,
out_bias: bool = True,
qk_norm: bool = True,
) -> None:
super().__init__()
self.head_dim = head_dim
self.num_heads = dim // head_dim
self.qk_norm = qk_norm
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
self.out = nn.Linear(dim, dim, bias=out_bias)
def forward(
self,
x: torch.Tensor,
) -> torch.Tensor:
"""Compute temporal self-attention.
Args:
x: Input tensor. Shape: [B, C, T, H, W].
chunk_size: Chunk size for large tensors.
Returns:
x: Output tensor. Shape: [B, C, T, H, W].
"""
B, _, T, H, W = x.shape
if T == 1:
# No attention for single frame.
x = x.movedim(1, -1) # [B, C, T, H, W] -> [B, T, H, W, C]
qkv = self.qkv(x)
_, _, x = qkv.chunk(3, dim=-1) # Throw away queries and keys.
x = self.out(x)
return x.movedim(-1, 1) # [B, T, H, W, C] -> [B, C, T, H, W]
# 1D temporal attention.
x = rearrange(x, "B C t h w -> (B h w) t C")
qkv = self.qkv(x)
# Input: qkv with shape [B, t, 3 * num_heads * head_dim]
# Output: x with shape [B, num_heads, t, head_dim]
q, k, v = qkv.view(qkv.shape[0], qkv.shape[1], 3, self.num_heads, self.head_dim).transpose(1, 3).unbind(2)
if self.qk_norm:
q = F.normalize(q, p=2, dim=-1)
k = F.normalize(k, p=2, dim=-1)
x = optimized_attention(q, k, v, self.num_heads, skip_reshape=True)
assert x.size(0) == q.size(0)
x = self.out(x)
x = rearrange(x, "(B h w) t C -> B C t h w", B=B, h=H, w=W)
return x
class AttentionBlock(nn.Module):
def __init__(
self,
dim: int,
**attn_kwargs,
) -> None:
super().__init__()
self.norm = norm_fn(dim)
self.attn = Attention(dim, **attn_kwargs)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x + self.attn(self.norm(x))
class CausalUpsampleBlock(nn.Module): class CausalUpsampleBlock(nn.Module):
def __init__( def __init__(
self, self,
@@ -244,14 +325,9 @@ class CausalUpsampleBlock(nn.Module):
return x return x
def block_fn(channels, *, has_attention: bool = False, **block_kwargs): def block_fn(channels, *, affine: bool = True, has_attention: bool = False, **block_kwargs):
assert has_attention is False #NOTE: if this is ever true add back the attention code. attn_block = AttentionBlock(channels) if has_attention else None
return ResBlock(channels, affine=affine, attn_block=attn_block, **block_kwargs)
attn_block = None #AttentionBlock(channels) if has_attention else None
return ResBlock(
channels, affine=True, attn_block=attn_block, **block_kwargs
)
class DownsampleBlock(nn.Module): class DownsampleBlock(nn.Module):
@@ -288,8 +364,9 @@ class DownsampleBlock(nn.Module):
out_channels=out_channels, out_channels=out_channels,
kernel_size=(temporal_reduction, spatial_reduction, spatial_reduction), kernel_size=(temporal_reduction, spatial_reduction, spatial_reduction),
stride=(temporal_reduction, spatial_reduction, spatial_reduction), stride=(temporal_reduction, spatial_reduction, spatial_reduction),
# First layer in each block always uses replicate padding
padding_mode="replicate", padding_mode="replicate",
bias=True, bias=block_kwargs["bias"],
) )
) )
@@ -382,7 +459,7 @@ class Decoder(nn.Module):
blocks = [] blocks = []
first_block = [ first_block = [
nn.Conv3d(latent_dim, ch[-1], kernel_size=(1, 1, 1)) ops.Conv3d(latent_dim, ch[-1], kernel_size=(1, 1, 1))
] # Input layer. ] # Input layer.
# First set of blocks preserve channel count. # First set of blocks preserve channel count.
for _ in range(num_res_blocks[-1]): for _ in range(num_res_blocks[-1]):
@@ -452,11 +529,165 @@ class Decoder(nn.Module):
return self.output_proj(x).contiguous() return self.output_proj(x).contiguous()
class LatentDistribution:
def __init__(self, mean: torch.Tensor, logvar: torch.Tensor):
"""Initialize latent distribution.
Args:
mean: Mean of the distribution. Shape: [B, C, T, H, W].
logvar: Logarithm of variance of the distribution. Shape: [B, C, T, H, W].
"""
assert mean.shape == logvar.shape
self.mean = mean
self.logvar = logvar
def sample(self, temperature=1.0, generator: torch.Generator = None, noise=None):
if temperature == 0.0:
return self.mean
if noise is None:
noise = torch.randn(self.mean.shape, device=self.mean.device, dtype=self.mean.dtype, generator=generator)
else:
assert noise.device == self.mean.device
noise = noise.to(self.mean.dtype)
if temperature != 1.0:
raise NotImplementedError(f"Temperature {temperature} is not supported.")
# Just Gaussian sample with no scaling of variance.
return noise * torch.exp(self.logvar * 0.5) + self.mean
def mode(self):
return self.mean
class Encoder(nn.Module):
def __init__(
self,
*,
in_channels: int,
base_channels: int,
channel_multipliers: List[int],
num_res_blocks: List[int],
latent_dim: int,
temporal_reductions: List[int],
spatial_reductions: List[int],
prune_bottlenecks: List[bool],
has_attentions: List[bool],
affine: bool = True,
bias: bool = True,
input_is_conv_1x1: bool = False,
padding_mode: str,
):
super().__init__()
self.temporal_reductions = temporal_reductions
self.spatial_reductions = spatial_reductions
self.base_channels = base_channels
self.channel_multipliers = channel_multipliers
self.num_res_blocks = num_res_blocks
self.latent_dim = latent_dim
self.fourier_features = FourierFeatures()
ch = [mult * base_channels for mult in channel_multipliers]
num_down_blocks = len(ch) - 1
assert len(num_res_blocks) == num_down_blocks + 2
layers = (
[ops.Conv3d(in_channels, ch[0], kernel_size=(1, 1, 1), bias=True)]
if not input_is_conv_1x1
else [Conv1x1(in_channels, ch[0])]
)
assert len(prune_bottlenecks) == num_down_blocks + 2
assert len(has_attentions) == num_down_blocks + 2
block = partial(block_fn, padding_mode=padding_mode, affine=affine, bias=bias)
for _ in range(num_res_blocks[0]):
layers.append(block(ch[0], has_attention=has_attentions[0], prune_bottleneck=prune_bottlenecks[0]))
prune_bottlenecks = prune_bottlenecks[1:]
has_attentions = has_attentions[1:]
assert len(temporal_reductions) == len(spatial_reductions) == len(ch) - 1
for i in range(num_down_blocks):
layer = DownsampleBlock(
ch[i],
ch[i + 1],
num_res_blocks=num_res_blocks[i + 1],
temporal_reduction=temporal_reductions[i],
spatial_reduction=spatial_reductions[i],
prune_bottleneck=prune_bottlenecks[i],
has_attention=has_attentions[i],
affine=affine,
bias=bias,
padding_mode=padding_mode,
)
layers.append(layer)
# Additional blocks.
for _ in range(num_res_blocks[-1]):
layers.append(block(ch[-1], has_attention=has_attentions[-1], prune_bottleneck=prune_bottlenecks[-1]))
self.layers = nn.Sequential(*layers)
# Output layers.
self.output_norm = norm_fn(ch[-1])
self.output_proj = Conv1x1(ch[-1], 2 * latent_dim, bias=False)
@property
def temporal_downsample(self):
return math.prod(self.temporal_reductions)
@property
def spatial_downsample(self):
return math.prod(self.spatial_reductions)
def forward(self, x) -> LatentDistribution:
"""Forward pass.
Args:
x: Input video tensor. Shape: [B, C, T, H, W]. Scaled to [-1, 1]
Returns:
means: Latent tensor. Shape: [B, latent_dim, t, h, w]. Scaled [-1, 1].
h = H // 8, w = W // 8, t - 1 = (T - 1) // 6
logvar: Shape: [B, latent_dim, t, h, w].
"""
assert x.ndim == 5, f"Expected 5D input, got {x.shape}"
x = self.fourier_features(x)
x = self.layers(x)
x = self.output_norm(x)
x = F.silu(x, inplace=True)
x = self.output_proj(x)
means, logvar = torch.chunk(x, 2, dim=1)
assert means.ndim == 5
assert logvar.shape == means.shape
assert means.size(1) == self.latent_dim
return LatentDistribution(means, logvar)
class VideoVAE(nn.Module): class VideoVAE(nn.Module):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.encoder = None #TODO once the model releases self.encoder = Encoder(
in_channels=15,
base_channels=64,
channel_multipliers=[1, 2, 4, 6],
num_res_blocks=[3, 3, 4, 6, 3],
latent_dim=12,
temporal_reductions=[1, 2, 3],
spatial_reductions=[2, 2, 2],
prune_bottlenecks=[False, False, False, False, False],
has_attentions=[False, True, True, True, True],
affine=True,
bias=True,
input_is_conv_1x1=True,
padding_mode="replicate"
)
self.decoder = Decoder( self.decoder = Decoder(
out_channels=3, out_channels=3,
base_channels=128, base_channels=128,
@@ -474,7 +705,7 @@ class VideoVAE(nn.Module):
) )
def encode(self, x): def encode(self, x):
return self.encoder(x) return self.encoder(x).mode()
def decode(self, x): def decode(self, x):
return self.decoder(x) return self.decoder(x)

View File

@@ -393,6 +393,13 @@ def attention_xformers(q, k, v, heads, mask=None, attn_precision=None, skip_resh
return out return out
if model_management.is_nvidia(): #pytorch 2.3 and up seem to have this issue.
SDP_BATCH_LIMIT = 2**15
else:
#TODO: other GPUs ?
SDP_BATCH_LIMIT = 2**31
def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False): def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False):
if skip_reshape: if skip_reshape:
b, _, _, dim_head = q.shape b, _, _, dim_head = q.shape
@@ -404,10 +411,15 @@ def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_resha
(q, k, v), (q, k, v),
) )
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) if SDP_BATCH_LIMIT >= q.shape[0]:
out = ( out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
out.transpose(1, 2).reshape(b, -1, heads * dim_head) out = (
) out.transpose(1, 2).reshape(b, -1, heads * dim_head)
)
else:
out = torch.empty((q.shape[0], q.shape[2], heads * dim_head), dtype=q.dtype, layout=q.layout, device=q.device)
for i in range(0, q.shape[0], SDP_BATCH_LIMIT):
out[i : i + SDP_BATCH_LIMIT] = torch.nn.functional.scaled_dot_product_attention(q[i : i + SDP_BATCH_LIMIT], k[i : i + SDP_BATCH_LIMIT], v[i : i + SDP_BATCH_LIMIT], attn_mask=mask, dropout_p=0.0, is_causal=False).transpose(1, 2).reshape(-1, q.shape[2], heads * dim_head)
return out return out

View File

@@ -540,7 +540,11 @@ def model_config_from_diffusers_unet(state_dict):
def convert_diffusers_mmdit(state_dict, output_prefix=""): def convert_diffusers_mmdit(state_dict, output_prefix=""):
out_sd = {} out_sd = {}
if 'transformer_blocks.0.attn.norm_added_k.weight' in state_dict: #Flux if 'joint_transformer_blocks.0.attn.add_k_proj.weight' in state_dict: #AuraFlow
num_joint = count_blocks(state_dict, 'joint_transformer_blocks.{}.')
num_single = count_blocks(state_dict, 'single_transformer_blocks.{}.')
sd_map = comfy.utils.auraflow_to_diffusers({"n_double_layers": num_joint, "n_layers": num_joint + num_single}, output_prefix=output_prefix)
elif 'x_embedder.weight' in state_dict: #Flux
depth = count_blocks(state_dict, 'transformer_blocks.{}.') depth = count_blocks(state_dict, 'transformer_blocks.{}.')
depth_single_blocks = count_blocks(state_dict, 'single_transformer_blocks.{}.') depth_single_blocks = count_blocks(state_dict, 'single_transformer_blocks.{}.')
hidden_size = state_dict["x_embedder.bias"].shape[0] hidden_size = state_dict["x_embedder.bias"].shape[0]
@@ -549,10 +553,6 @@ def convert_diffusers_mmdit(state_dict, output_prefix=""):
num_blocks = count_blocks(state_dict, 'transformer_blocks.{}.') num_blocks = count_blocks(state_dict, 'transformer_blocks.{}.')
depth = state_dict["pos_embed.proj.weight"].shape[0] // 64 depth = state_dict["pos_embed.proj.weight"].shape[0] // 64
sd_map = comfy.utils.mmdit_to_diffusers({"depth": depth, "num_blocks": num_blocks}, output_prefix=output_prefix) sd_map = comfy.utils.mmdit_to_diffusers({"depth": depth, "num_blocks": num_blocks}, output_prefix=output_prefix)
elif 'joint_transformer_blocks.0.attn.add_k_proj.weight' in state_dict: #AuraFlow
num_joint = count_blocks(state_dict, 'joint_transformer_blocks.{}.')
num_single = count_blocks(state_dict, 'single_transformer_blocks.{}.')
sd_map = comfy.utils.auraflow_to_diffusers({"n_double_layers": num_joint, "n_layers": num_joint + num_single}, output_prefix=output_prefix)
else: else:
return None return None

View File

@@ -896,7 +896,7 @@ def force_upcast_attention_dtype():
upcast = args.force_upcast_attention upcast = args.force_upcast_attention
try: try:
macos_version = tuple(int(n) for n in platform.mac_ver()[0].split(".")) macos_version = tuple(int(n) for n in platform.mac_ver()[0].split("."))
if (14, 5) <= macos_version <= (15, 0, 1): # black image bug on recent versions of macOS if (14, 5) <= macos_version <= (15, 2): # black image bug on recent versions of macOS
upcast = True upcast = True
except: except:
pass pass

View File

@@ -171,6 +171,7 @@ class VAE:
self.downscale_ratio = 8 self.downscale_ratio = 8
self.upscale_ratio = 8 self.upscale_ratio = 8
self.latent_channels = 4 self.latent_channels = 4
self.latent_dim = 2
self.output_channels = 3 self.output_channels = 3
self.process_input = lambda image: image * 2.0 - 1.0 self.process_input = lambda image: image * 2.0 - 1.0
self.process_output = lambda image: torch.clamp((image + 1.0) / 2.0, min=0.0, max=1.0) self.process_output = lambda image: torch.clamp((image + 1.0) / 2.0, min=0.0, max=1.0)
@@ -240,16 +241,22 @@ class VAE:
self.output_channels = 2 self.output_channels = 2
self.upscale_ratio = 2048 self.upscale_ratio = 2048
self.downscale_ratio = 2048 self.downscale_ratio = 2048
self.latent_dim = 1
self.process_output = lambda audio: audio self.process_output = lambda audio: audio
self.process_input = lambda audio: audio self.process_input = lambda audio: audio
self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32]
elif "blocks.2.blocks.3.stack.5.weight" in sd or "decoder.blocks.2.blocks.3.stack.5.weight" in sd: #genmo mochi vae elif "blocks.2.blocks.3.stack.5.weight" in sd or "decoder.blocks.2.blocks.3.stack.5.weight" in sd or "layers.4.layers.1.attn_block.attn.qkv.weight" in sd or "encoder.layers.4.layers.1.attn_block.attn.qkv.weight": #genmo mochi vae
if "blocks.2.blocks.3.stack.5.weight" in sd: if "blocks.2.blocks.3.stack.5.weight" in sd:
sd = comfy.utils.state_dict_prefix_replace(sd, {"": "decoder."}) sd = comfy.utils.state_dict_prefix_replace(sd, {"": "decoder."})
if "layers.4.layers.1.attn_block.attn.qkv.weight" in sd:
sd = comfy.utils.state_dict_prefix_replace(sd, {"": "encoder."})
self.first_stage_model = comfy.ldm.genmo.vae.model.VideoVAE() self.first_stage_model = comfy.ldm.genmo.vae.model.VideoVAE()
self.latent_channels = 12 self.latent_channels = 12
self.latent_dim = 3
self.memory_used_decode = lambda shape, dtype: (1000 * shape[2] * shape[3] * shape[4] * (6 * 8 * 8)) * model_management.dtype_size(dtype) self.memory_used_decode = lambda shape, dtype: (1000 * shape[2] * shape[3] * shape[4] * (6 * 8 * 8)) * model_management.dtype_size(dtype)
self.memory_used_encode = lambda shape, dtype: (1.5 * max(shape[2], 7) * shape[3] * shape[4] * (6 * 8 * 8)) * model_management.dtype_size(dtype)
self.upscale_ratio = (lambda a: max(0, a * 6 - 5), 8, 8) self.upscale_ratio = (lambda a: max(0, a * 6 - 5), 8, 8)
self.working_dtypes = [torch.float16, torch.float32]
else: else:
logging.warning("WARNING: No VAE weights detected, VAE not initalized.") logging.warning("WARNING: No VAE weights detected, VAE not initalized.")
self.first_stage_model = None self.first_stage_model = None
@@ -361,17 +368,22 @@ class VAE:
def encode(self, pixel_samples): def encode(self, pixel_samples):
pixel_samples = self.vae_encode_crop_pixels(pixel_samples) pixel_samples = self.vae_encode_crop_pixels(pixel_samples)
pixel_samples = pixel_samples.movedim(-1,1) pixel_samples = pixel_samples.movedim(-1, 1)
if self.latent_dim == 3:
pixel_samples = pixel_samples.movedim(1, 0).unsqueeze(0)
try: try:
memory_used = self.memory_used_encode(pixel_samples.shape, self.vae_dtype) memory_used = self.memory_used_encode(pixel_samples.shape, self.vae_dtype)
model_management.load_models_gpu([self.patcher], memory_required=memory_used) model_management.load_models_gpu([self.patcher], memory_required=memory_used)
free_memory = model_management.get_free_memory(self.device) free_memory = model_management.get_free_memory(self.device)
batch_number = int(free_memory / max(1, memory_used)) batch_number = int(free_memory / max(1, memory_used))
batch_number = max(1, batch_number) batch_number = max(1, batch_number)
samples = torch.empty((pixel_samples.shape[0], self.latent_channels) + tuple(map(lambda a: a // self.downscale_ratio, pixel_samples.shape[2:])), device=self.output_device) samples = None
for x in range(0, pixel_samples.shape[0], batch_number): for x in range(0, pixel_samples.shape[0], batch_number):
pixels_in = self.process_input(pixel_samples[x:x+batch_number]).to(self.vae_dtype).to(self.device) pixels_in = self.process_input(pixel_samples[x:x + batch_number]).to(self.vae_dtype).to(self.device)
samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).to(self.output_device).float() out = self.first_stage_model.encode(pixels_in).to(self.output_device).float()
if samples is None:
samples = torch.empty((pixel_samples.shape[0],) + tuple(out.shape[1:]), device=self.output_device)
samples[x:x + batch_number] = out
except model_management.OOM_EXCEPTION as e: except model_management.OOM_EXCEPTION as e:
logging.warning("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.") logging.warning("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")

View File

@@ -12,7 +12,7 @@ class T5XXLModel(comfy.text_encoders.sd3_clip.T5XXLModel):
class MochiT5XXL(sd1_clip.SD1ClipModel): class MochiT5XXL(sd1_clip.SD1ClipModel):
def __init__(self, device="cpu", dtype=None, model_options={}): def __init__(self, device="cpu", dtype=None, model_options={}):
super().__init__(device=device, dtype=dtype, clip_name="t5xxl", clip_model=T5XXLModel, model_options=model_options) super().__init__(device=device, dtype=dtype, name="t5xxl", clip_model=T5XXLModel, model_options=model_options)
class T5XXLTokenizer(sd1_clip.SDTokenizer): class T5XXLTokenizer(sd1_clip.SDTokenizer):

View File

@@ -316,10 +316,18 @@ MMDIT_MAP_BLOCK = {
("context_block.mlp.fc1.weight", "ff_context.net.0.proj.weight"), ("context_block.mlp.fc1.weight", "ff_context.net.0.proj.weight"),
("context_block.mlp.fc2.bias", "ff_context.net.2.bias"), ("context_block.mlp.fc2.bias", "ff_context.net.2.bias"),
("context_block.mlp.fc2.weight", "ff_context.net.2.weight"), ("context_block.mlp.fc2.weight", "ff_context.net.2.weight"),
("context_block.attn.ln_q.weight", "attn.norm_added_q.weight"),
("context_block.attn.ln_k.weight", "attn.norm_added_k.weight"),
("x_block.adaLN_modulation.1.bias", "norm1.linear.bias"), ("x_block.adaLN_modulation.1.bias", "norm1.linear.bias"),
("x_block.adaLN_modulation.1.weight", "norm1.linear.weight"), ("x_block.adaLN_modulation.1.weight", "norm1.linear.weight"),
("x_block.attn.proj.bias", "attn.to_out.0.bias"), ("x_block.attn.proj.bias", "attn.to_out.0.bias"),
("x_block.attn.proj.weight", "attn.to_out.0.weight"), ("x_block.attn.proj.weight", "attn.to_out.0.weight"),
("x_block.attn.ln_q.weight", "attn.norm_q.weight"),
("x_block.attn.ln_k.weight", "attn.norm_k.weight"),
("x_block.attn2.proj.bias", "attn2.to_out.0.bias"),
("x_block.attn2.proj.weight", "attn2.to_out.0.weight"),
("x_block.attn2.ln_q.weight", "attn2.norm_q.weight"),
("x_block.attn2.ln_k.weight", "attn2.norm_k.weight"),
("x_block.mlp.fc1.bias", "ff.net.0.proj.bias"), ("x_block.mlp.fc1.bias", "ff.net.0.proj.bias"),
("x_block.mlp.fc1.weight", "ff.net.0.proj.weight"), ("x_block.mlp.fc1.weight", "ff.net.0.proj.weight"),
("x_block.mlp.fc2.bias", "ff.net.2.bias"), ("x_block.mlp.fc2.bias", "ff.net.2.bias"),
@@ -349,6 +357,12 @@ def mmdit_to_diffusers(mmdit_config, output_prefix=""):
key_map["{}add_k_proj.{}".format(k, end)] = (qkv, (0, offset, offset)) key_map["{}add_k_proj.{}".format(k, end)] = (qkv, (0, offset, offset))
key_map["{}add_v_proj.{}".format(k, end)] = (qkv, (0, offset * 2, offset)) key_map["{}add_v_proj.{}".format(k, end)] = (qkv, (0, offset * 2, offset))
k = "{}.attn2.".format(block_from)
qkv = "{}.x_block.attn2.qkv.{}".format(block_to, end)
key_map["{}to_q.{}".format(k, end)] = (qkv, (0, 0, offset))
key_map["{}to_k.{}".format(k, end)] = (qkv, (0, offset, offset))
key_map["{}to_v.{}".format(k, end)] = (qkv, (0, offset * 2, offset))
for k in MMDIT_MAP_BLOCK: for k in MMDIT_MAP_BLOCK:
key_map["{}.{}".format(block_from, k[1])] = "{}.{}".format(block_to, k[0]) key_map["{}.{}".format(block_from, k[1])] = "{}.{}".format(block_to, k[0])

View File

@@ -3,11 +3,11 @@ import comfy.sd
import comfy.model_management import comfy.model_management
import nodes import nodes
import torch import torch
import re
class TripleCLIPLoader: class TripleCLIPLoader:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "clip_name1": (folder_paths.get_filename_list("clip"), ), "clip_name2": (folder_paths.get_filename_list("clip"), ), "clip_name3": (folder_paths.get_filename_list("clip"), ) return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ), "clip_name2": (folder_paths.get_filename_list("text_encoders"), ), "clip_name3": (folder_paths.get_filename_list("text_encoders"), )
}} }}
RETURN_TYPES = ("CLIP",) RETURN_TYPES = ("CLIP",)
FUNCTION = "load_clip" FUNCTION = "load_clip"
@@ -15,9 +15,9 @@ class TripleCLIPLoader:
CATEGORY = "advanced/loaders" CATEGORY = "advanced/loaders"
def load_clip(self, clip_name1, clip_name2, clip_name3): def load_clip(self, clip_name1, clip_name2, clip_name3):
clip_path1 = folder_paths.get_full_path_or_raise("clip", clip_name1) clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1)
clip_path2 = folder_paths.get_full_path_or_raise("clip", clip_name2) clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2)
clip_path3 = folder_paths.get_full_path_or_raise("clip", clip_name3) clip_path3 = folder_paths.get_full_path_or_raise("text_encoders", clip_name3)
clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2, clip_path3], embedding_directory=folder_paths.get_folder_paths("embeddings")) clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2, clip_path3], embedding_directory=folder_paths.get_folder_paths("embeddings"))
return (clip,) return (clip,)
@@ -95,11 +95,70 @@ class ControlNetApplySD3(nodes.ControlNetApplyAdvanced):
CATEGORY = "conditioning/controlnet" CATEGORY = "conditioning/controlnet"
DEPRECATED = True DEPRECATED = True
class SkipLayerGuidanceSD3:
'''
Enhance guidance towards detailed dtructure by having another set of CFG negative with skipped layers.
Inspired by Perturbed Attention Guidance (https://arxiv.org/abs/2403.17377)
Experimental implementation by Dango233@StabilityAI.
'''
@classmethod
def INPUT_TYPES(s):
return {"required": {"model": ("MODEL", ),
"layers": ("STRING", {"default": "7, 8, 9", "multiline": False}),
"scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.1}),
"start_percent": ("FLOAT", {"default": 0.01, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_percent": ("FLOAT", {"default": 0.15, "min": 0.0, "max": 1.0, "step": 0.001})
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "skip_guidance"
CATEGORY = "advanced/guidance"
def skip_guidance(self, model, layers, scale, start_percent, end_percent):
if layers == "" or layers == None:
return (model, )
# check if layer is comma separated integers
def skip(args, extra_args):
return args
model_sampling = model.get_model_object("model_sampling")
sigma_start = model_sampling.percent_to_sigma(start_percent)
sigma_end = model_sampling.percent_to_sigma(end_percent)
def post_cfg_function(args):
model = args["model"]
cond_pred = args["cond_denoised"]
cond = args["cond"]
cfg_result = args["denoised"]
sigma = args["sigma"]
x = args["input"]
model_options = args["model_options"].copy()
for layer in layers:
model_options = comfy.model_patcher.set_model_options_patch_replace(model_options, skip, "dit", "double_block", layer)
model_sampling.percent_to_sigma(start_percent)
sigma_ = sigma[0].item()
if scale > 0 and sigma_ >= sigma_end and sigma_ <= sigma_start:
(slg,) = comfy.samplers.calc_cond_batch(model, [cond], x, sigma, model_options)
cfg_result = cfg_result + (cond_pred - slg) * scale
return cfg_result
layers = re.findall(r'\d+', layers)
layers = [int(i) for i in layers]
m = model.clone()
m.set_model_sampler_post_cfg_function(post_cfg_function)
return (m, )
NODE_CLASS_MAPPINGS = { NODE_CLASS_MAPPINGS = {
"TripleCLIPLoader": TripleCLIPLoader, "TripleCLIPLoader": TripleCLIPLoader,
"EmptySD3LatentImage": EmptySD3LatentImage, "EmptySD3LatentImage": EmptySD3LatentImage,
"CLIPTextEncodeSD3": CLIPTextEncodeSD3, "CLIPTextEncodeSD3": CLIPTextEncodeSD3,
"ControlNetApplySD3": ControlNetApplySD3, "ControlNetApplySD3": ControlNetApplySD3,
"SkipLayerGuidanceSD3": SkipLayerGuidanceSD3,
} }
NODE_DISPLAY_NAME_MAPPINGS = { NODE_DISPLAY_NAME_MAPPINGS = {

View File

@@ -18,7 +18,7 @@ folder_names_and_paths["configs"] = ([os.path.join(models_dir, "configs")], [".y
folder_names_and_paths["loras"] = ([os.path.join(models_dir, "loras")], supported_pt_extensions) folder_names_and_paths["loras"] = ([os.path.join(models_dir, "loras")], supported_pt_extensions)
folder_names_and_paths["vae"] = ([os.path.join(models_dir, "vae")], supported_pt_extensions) folder_names_and_paths["vae"] = ([os.path.join(models_dir, "vae")], supported_pt_extensions)
folder_names_and_paths["clip"] = ([os.path.join(models_dir, "clip")], supported_pt_extensions) folder_names_and_paths["text_encoders"] = ([os.path.join(models_dir, "text_encoders"), os.path.join(models_dir, "clip")], supported_pt_extensions)
folder_names_and_paths["diffusion_models"] = ([os.path.join(models_dir, "unet"), os.path.join(models_dir, "diffusion_models")], supported_pt_extensions) folder_names_and_paths["diffusion_models"] = ([os.path.join(models_dir, "unet"), os.path.join(models_dir, "diffusion_models")], supported_pt_extensions)
folder_names_and_paths["clip_vision"] = ([os.path.join(models_dir, "clip_vision")], supported_pt_extensions) folder_names_and_paths["clip_vision"] = ([os.path.join(models_dir, "clip_vision")], supported_pt_extensions)
folder_names_and_paths["style_models"] = ([os.path.join(models_dir, "style_models")], supported_pt_extensions) folder_names_and_paths["style_models"] = ([os.path.join(models_dir, "style_models")], supported_pt_extensions)
@@ -81,7 +81,8 @@ extension_mimetypes_cache = {
} }
def map_legacy(folder_name: str) -> str: def map_legacy(folder_name: str) -> str:
legacy = {"unet": "diffusion_models"} legacy = {"unet": "diffusion_models",
"clip": "text_encoders"}
return legacy.get(folder_name, folder_name) return legacy.get(folder_name, folder_name)
if not os.path.exists(input_directory): if not os.path.exists(input_directory):

View File

@@ -888,7 +888,7 @@ class UNETLoader:
class CLIPLoader: class CLIPLoader:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ), return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ),
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi"], ), "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi"], ),
}} }}
RETURN_TYPES = ("CLIP",) RETURN_TYPES = ("CLIP",)
@@ -908,15 +908,15 @@ class CLIPLoader:
else: else:
clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
clip_path = folder_paths.get_full_path_or_raise("clip", clip_name) clip_path = folder_paths.get_full_path_or_raise("text_encoders", clip_name)
clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type) clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type)
return (clip,) return (clip,)
class DualCLIPLoader: class DualCLIPLoader:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "clip_name1": (folder_paths.get_filename_list("clip"), ), return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ),
"clip_name2": (folder_paths.get_filename_list("clip"), ), "clip_name2": (folder_paths.get_filename_list("text_encoders"), ),
"type": (["sdxl", "sd3", "flux"], ), "type": (["sdxl", "sd3", "flux"], ),
}} }}
RETURN_TYPES = ("CLIP",) RETURN_TYPES = ("CLIP",)
@@ -925,8 +925,8 @@ class DualCLIPLoader:
CATEGORY = "advanced/loaders" CATEGORY = "advanced/loaders"
def load_clip(self, clip_name1, clip_name2, type): def load_clip(self, clip_name1, clip_name2, type):
clip_path1 = folder_paths.get_full_path_or_raise("clip", clip_name1) clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1)
clip_path2 = folder_paths.get_full_path_or_raise("clip", clip_name2) clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2)
if type == "sdxl": if type == "sdxl":
clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
elif type == "sd3": elif type == "sd3":
@@ -1957,6 +1957,12 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"ImageInvert": "Invert Image", "ImageInvert": "Invert Image",
"ImagePadForOutpaint": "Pad Image for Outpainting", "ImagePadForOutpaint": "Pad Image for Outpainting",
"ImageBatch": "Batch Images", "ImageBatch": "Batch Images",
"ImageCrop": "Image Crop",
"ImageBlend": "Image Blend",
"ImageBlur": "Image Blur",
"ImageQuantize": "Image Quantize",
"ImageSharpen": "Image Sharpen",
"ImageScaleToTotalPixels": "Scale Image to Total Pixels",
# _for_testing # _for_testing
"VAEDecodeTiled": "VAE Decode (Tiled)", "VAEDecodeTiled": "VAE Decode (Tiled)",
"VAEEncodeTiled": "VAE Encode (Tiled)", "VAEEncodeTiled": "VAE Encode (Tiled)",

View File

@@ -1,8 +1,8 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, bK as useExtensionStore, u as useSettingStore, r as ref, o as onMounted, q as computed, g as openBlock, h as createElementBlock, i as createVNode, y as withCtx, z as unref, bL as script$1, A as createBaseVNode, x as createBlock, M as Fragment, N as renderList, am as toDisplayString, ap as createTextVNode, j as createCommentVNode, D as script$4 } from "./index-CgU1oKZt.js"; import { d as defineComponent, bQ as useExtensionStore, u as useSettingStore, r as ref, o as onMounted, q as computed, g as openBlock, h as createElementBlock, i as createVNode, y as withCtx, z as unref, bR as script$1, A as createBaseVNode, x as createBlock, N as Fragment, O as renderList, a6 as toDisplayString, aw as createTextVNode, j as createCommentVNode, D as script$4 } from "./index-BHayQCxv.js";
import { s as script, a as script$2, b as script$3 } from "./index-DBWDcZsl.js"; import { s as script, a as script$2, b as script$3 } from "./index-CwRXxFdA.js";
import "./index-DYEEBf64.js"; import "./index-C_wOqB0f.js";
const _hoisted_1 = { class: "extension-panel" }; const _hoisted_1 = { class: "extension-panel" };
const _hoisted_2 = { class: "mt-4" }; const _hoisted_2 = { class: "mt-4" };
const _sfc_main = /* @__PURE__ */ defineComponent({ const _sfc_main = /* @__PURE__ */ defineComponent({
@@ -100,4 +100,4 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export { export {
_sfc_main as default _sfc_main as default
}; };
//# sourceMappingURL=ExtensionPanel-DZLYjWBj.js.map //# sourceMappingURL=ExtensionPanel-BmKi_NKS.js.map

View File

@@ -1 +1 @@
{"version":3,"file":"ExtensionPanel-DZLYjWBj.js","sources":["../../src/components/dialog/content/setting/ExtensionPanel.vue"],"sourcesContent":["<template>\n <div class=\"extension-panel\">\n <DataTable :value=\"extensionStore.extensions\" stripedRows size=\"small\">\n <Column field=\"name\" :header=\"$t('extensionName')\" sortable></Column>\n <Column\n :pt=\"{\n bodyCell: 'flex items-center justify-end'\n }\"\n >\n <template #body=\"slotProps\">\n <ToggleSwitch\n v-model=\"editingEnabledExtensions[slotProps.data.name]\"\n @change=\"updateExtensionStatus\"\n />\n </template>\n </Column>\n </DataTable>\n <div class=\"mt-4\">\n <Message v-if=\"hasChanges\" severity=\"info\">\n <ul>\n <li v-for=\"ext in changedExtensions\" :key=\"ext.name\">\n <span>\n {{ extensionStore.isExtensionEnabled(ext.name) ? '[-]' : '[+]' }}\n </span>\n {{ ext.name }}\n </li>\n </ul>\n </Message>\n <Button\n :label=\"$t('reloadToApplyChanges')\"\n icon=\"pi pi-refresh\"\n @click=\"applyChanges\"\n :disabled=\"!hasChanges\"\n text\n fluid\n severity=\"danger\"\n />\n </div>\n </div>\n</template>\n\n<script setup lang=\"ts\">\nimport { ref, computed, onMounted } from 'vue'\nimport { useExtensionStore } from '@/stores/extensionStore'\nimport { useSettingStore } from '@/stores/settingStore'\nimport DataTable from 'primevue/datatable'\nimport Column from 'primevue/column'\nimport ToggleSwitch from 'primevue/toggleswitch'\nimport Button from 'primevue/button'\nimport Message from 'primevue/message'\n\nconst extensionStore = useExtensionStore()\nconst settingStore = useSettingStore()\n\nconst editingEnabledExtensions = ref<Record<string, boolean>>({})\n\nonMounted(() => {\n extensionStore.extensions.forEach((ext) => {\n editingEnabledExtensions.value[ext.name] =\n extensionStore.isExtensionEnabled(ext.name)\n })\n})\n\nconst changedExtensions = computed(() => {\n return extensionStore.extensions.filter(\n (ext) =>\n editingEnabledExtensions.value[ext.name] !==\n extensionStore.isExtensionEnabled(ext.name)\n )\n})\n\nconst hasChanges = computed(() => {\n return changedExtensions.value.length > 0\n})\n\nconst updateExtensionStatus = () => {\n const editingDisabledExtensionNames = Object.entries(\n editingEnabledExtensions.value\n )\n .filter(([_, enabled]) => !enabled)\n .map(([name]) => name)\n\n settingStore.set('Comfy.Extension.Disabled', [\n ...extensionStore.inactiveDisabledExtensionNames,\n ...editingDisabledExtensionNames\n ])\n}\n\nconst applyChanges = () => {\n // Refresh the page to apply changes\n window.location.reload()\n}\n</script>\n"],"names":[],"mappings":";;;;;;;;;;AAmDA,UAAM,iBAAiB;AACvB,UAAM,eAAe;AAEf,UAAA,2BAA2B,IAA6B,CAAA,CAAE;AAEhE,cAAU,MAAM;AACC,qBAAA,WAAW,QAAQ,CAAC,QAAQ;AACzC,iCAAyB,MAAM,IAAI,IAAI,IACrC,eAAe,mBAAmB,IAAI,IAAI;AAAA,MAAA,CAC7C;AAAA,IAAA,CACF;AAEK,UAAA,oBAAoB,SAAS,MAAM;AACvC,aAAO,eAAe,WAAW;AAAA,QAC/B,CAAC,QACC,yBAAyB,MAAM,IAAI,IAAI,MACvC,eAAe,mBAAmB,IAAI,IAAI;AAAA,MAAA;AAAA,IAC9C,CACD;AAEK,UAAA,aAAa,SAAS,MAAM;AACzB,aAAA,kBAAkB,MAAM,SAAS;AAAA,IAAA,CACzC;AAED,UAAM,wBAAwB,6BAAM;AAClC,YAAM,gCAAgC,OAAO;AAAA,QAC3C,yBAAyB;AAAA,MAExB,EAAA,OAAO,CAAC,CAAC,GAAG,OAAO,MAAM,CAAC,OAAO,EACjC,IAAI,CAAC,CAAC,IAAI,MAAM,IAAI;AAEvB,mBAAa,IAAI,4BAA4B;AAAA,QAC3C,GAAG,eAAe;AAAA,QAClB,GAAG;AAAA,MAAA,CACJ;AAAA,IAAA,GAV2B;AAa9B,UAAM,eAAe,6BAAM;AAEzB,aAAO,SAAS;IAAO,GAFJ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"} {"version":3,"file":"ExtensionPanel-BmKi_NKS.js","sources":["../../src/components/dialog/content/setting/ExtensionPanel.vue"],"sourcesContent":["<template>\n <div class=\"extension-panel\">\n <DataTable :value=\"extensionStore.extensions\" stripedRows size=\"small\">\n <Column field=\"name\" :header=\"$t('extensionName')\" sortable></Column>\n <Column\n :pt=\"{\n bodyCell: 'flex items-center justify-end'\n }\"\n >\n <template #body=\"slotProps\">\n <ToggleSwitch\n v-model=\"editingEnabledExtensions[slotProps.data.name]\"\n @change=\"updateExtensionStatus\"\n />\n </template>\n </Column>\n </DataTable>\n <div class=\"mt-4\">\n <Message v-if=\"hasChanges\" severity=\"info\">\n <ul>\n <li v-for=\"ext in changedExtensions\" :key=\"ext.name\">\n <span>\n {{ extensionStore.isExtensionEnabled(ext.name) ? '[-]' : '[+]' }}\n </span>\n {{ ext.name }}\n </li>\n </ul>\n </Message>\n <Button\n :label=\"$t('reloadToApplyChanges')\"\n icon=\"pi pi-refresh\"\n @click=\"applyChanges\"\n :disabled=\"!hasChanges\"\n text\n fluid\n severity=\"danger\"\n />\n </div>\n </div>\n</template>\n\n<script setup lang=\"ts\">\nimport { ref, computed, onMounted } from 'vue'\nimport { useExtensionStore } from '@/stores/extensionStore'\nimport { useSettingStore } from '@/stores/settingStore'\nimport DataTable from 'primevue/datatable'\nimport Column from 'primevue/column'\nimport ToggleSwitch from 'primevue/toggleswitch'\nimport Button from 'primevue/button'\nimport Message from 'primevue/message'\n\nconst extensionStore = useExtensionStore()\nconst settingStore = useSettingStore()\n\nconst editingEnabledExtensions = ref<Record<string, boolean>>({})\n\nonMounted(() => {\n extensionStore.extensions.forEach((ext) => {\n editingEnabledExtensions.value[ext.name] =\n extensionStore.isExtensionEnabled(ext.name)\n })\n})\n\nconst changedExtensions = computed(() => {\n return extensionStore.extensions.filter(\n (ext) =>\n editingEnabledExtensions.value[ext.name] !==\n extensionStore.isExtensionEnabled(ext.name)\n )\n})\n\nconst hasChanges = computed(() => {\n return changedExtensions.value.length > 0\n})\n\nconst updateExtensionStatus = () => {\n const editingDisabledExtensionNames = Object.entries(\n editingEnabledExtensions.value\n )\n .filter(([_, enabled]) => !enabled)\n .map(([name]) => name)\n\n settingStore.set('Comfy.Extension.Disabled', [\n ...extensionStore.inactiveDisabledExtensionNames,\n ...editingDisabledExtensionNames\n ])\n}\n\nconst applyChanges = () => {\n // Refresh the page to apply changes\n window.location.reload()\n}\n</script>\n"],"names":[],"mappings":";;;;;;;;;;AAmDA,UAAM,iBAAiB;AACvB,UAAM,eAAe;AAEf,UAAA,2BAA2B,IAA6B,CAAA,CAAE;AAEhE,cAAU,MAAM;AACC,qBAAA,WAAW,QAAQ,CAAC,QAAQ;AACzC,iCAAyB,MAAM,IAAI,IAAI,IACrC,eAAe,mBAAmB,IAAI,IAAI;AAAA,MAAA,CAC7C;AAAA,IAAA,CACF;AAEK,UAAA,oBAAoB,SAAS,MAAM;AACvC,aAAO,eAAe,WAAW;AAAA,QAC/B,CAAC,QACC,yBAAyB,MAAM,IAAI,IAAI,MACvC,eAAe,mBAAmB,IAAI,IAAI;AAAA,MAAA;AAAA,IAC9C,CACD;AAEK,UAAA,aAAa,SAAS,MAAM;AACzB,aAAA,kBAAkB,MAAM,SAAS;AAAA,IAAA,CACzC;AAED,UAAM,wBAAwB,6BAAM;AAClC,YAAM,gCAAgC,OAAO;AAAA,QAC3C,yBAAyB;AAAA,MAExB,EAAA,OAAO,CAAC,CAAC,GAAG,OAAO,MAAM,CAAC,OAAO,EACjC,IAAI,CAAC,CAAC,IAAI,MAAM,IAAI;AAEvB,mBAAa,IAAI,4BAA4B;AAAA,QAC3C,GAAG,eAAe;AAAA,QAClB,GAAG;AAAA,MAAA,CACJ;AAAA,IAAA,GAV2B;AAa9B,UAAM,eAAe,6BAAM;AAEzB,aAAO,SAAS;IAAO,GAFJ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}

File diff suppressed because one or more lines are too long

1
web/assets/GraphView-C4blCugc.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -1,13 +1,13 @@
.group-title-editor.node-title-editor[data-v-fc3f26e3] { .group-title-editor.node-title-editor[data-v-8a100d5a] {
z-index: 9999; z-index: 9999;
padding: 0.25rem; padding: 0.25rem;
} }
[data-v-fc3f26e3] .editable-text { [data-v-8a100d5a] .editable-text {
width: 100%; width: 100%;
height: 100%; height: 100%;
} }
[data-v-fc3f26e3] .editable-text input { [data-v-8a100d5a] .editable-text input {
width: 100%; width: 100%;
height: 100%; height: 100%;
/* Override the default font size */ /* Override the default font size */
@@ -45,7 +45,7 @@
--sidebar-icon-size: 1rem; --sidebar-icon-size: 1rem;
} }
.side-tool-bar-container[data-v-b6bfc188] { .side-tool-bar-container[data-v-37fd2fa4] {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
align-items: center; align-items: center;
@@ -58,36 +58,39 @@
background-color: var(--comfy-menu-bg); background-color: var(--comfy-menu-bg);
color: var(--fg-color); color: var(--fg-color);
} }
.side-tool-bar-end[data-v-b6bfc188] { .side-tool-bar-end[data-v-37fd2fa4] {
align-self: flex-end; align-self: flex-end;
margin-top: auto; margin-top: auto;
} }
.p-splitter-gutter { [data-v-b49f20b1] .p-splitter-gutter {
pointer-events: auto; pointer-events: auto;
} }
.gutter-hidden { .side-bar-panel[data-v-b49f20b1] {
display: none !important;
}
.side-bar-panel[data-v-b9df3042] {
background-color: var(--bg-color); background-color: var(--bg-color);
pointer-events: auto; pointer-events: auto;
} }
.splitter-overlay[data-v-b9df3042] { .bottom-panel[data-v-b49f20b1] {
width: 100%; background-color: var(--bg-color);
height: 100%; pointer-events: auto;
position: absolute; }
top: 0; .splitter-overlay[data-v-b49f20b1] {
left: 0;
background-color: transparent;
pointer-events: none; pointer-events: none;
border-style: none;
background-color: transparent;
}
.splitter-overlay-root[data-v-b49f20b1] {
position: absolute;
top: 0px;
left: 0px;
height: 100%;
width: 100%;
/* Set it the same as the ComfyUI menu */ /* Set it the same as the ComfyUI menu */
/* Note: Lite-graph DOM widgets have the same z-index as the node id, so /* Note: Lite-graph DOM widgets have the same z-index as the node id, so
999 should be sufficient to make sure splitter overlays on node's DOM 999 should be sufficient to make sure splitter overlays on node's DOM
widgets */ widgets */
z-index: 999; z-index: 999;
border: none;
} }
[data-v-37f672ab] .highlight { [data-v-37f672ab] .highlight {
@@ -251,7 +254,7 @@
display: none; display: none;
} }
.comfyui-menu[data-v-b13fdc92] { .comfyui-menu[data-v-ad2c662b] {
width: 100vw; width: 100vw;
background: var(--comfy-menu-bg); background: var(--comfy-menu-bg);
color: var(--fg-color); color: var(--fg-color);
@@ -263,13 +266,13 @@
grid-column: 1/-1; grid-column: 1/-1;
max-height: 90vh; max-height: 90vh;
} }
.comfyui-menu.dropzone[data-v-b13fdc92] { .comfyui-menu.dropzone[data-v-ad2c662b] {
background: var(--p-highlight-background); background: var(--p-highlight-background);
} }
.comfyui-menu.dropzone-active[data-v-b13fdc92] { .comfyui-menu.dropzone-active[data-v-ad2c662b] {
background: var(--p-highlight-background-focus); background: var(--p-highlight-background-focus);
} }
.comfyui-logo[data-v-b13fdc92] { .comfyui-logo[data-v-ad2c662b] {
font-size: 1.2em; font-size: 1.2em;
-webkit-user-select: none; -webkit-user-select: none;
-moz-user-select: none; -moz-user-select: none;

File diff suppressed because one or more lines are too long

View File

@@ -1,8 +1,8 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, q as computed, g as openBlock, h as createElementBlock, M as Fragment, N as renderList, i as createVNode, y as withCtx, ap as createTextVNode, am as toDisplayString, z as unref, at as script, j as createCommentVNode, r as ref, bH as FilterMatchMode, K as useKeybindingStore, F as useCommandStore, aC as watchEffect, aZ as useToast, t as resolveDirective, bI as SearchBox, A as createBaseVNode, D as script$2, x as createBlock, af as script$4, b2 as withModifiers, aA as script$6, v as withDirectives, P as pushScopeId, Q as popScopeId, by as KeyComboImpl, bJ as KeybindingImpl, _ as _export_sfc } from "./index-CgU1oKZt.js"; import { d as defineComponent, q as computed, g as openBlock, h as createElementBlock, N as Fragment, O as renderList, i as createVNode, y as withCtx, aw as createTextVNode, a6 as toDisplayString, z as unref, aA as script, j as createCommentVNode, r as ref, bN as FilterMatchMode, M as useKeybindingStore, F as useCommandStore, aJ as watchEffect, b9 as useToast, t as resolveDirective, bO as SearchBox, A as createBaseVNode, D as script$2, x as createBlock, ao as script$4, be as withModifiers, aH as script$6, v as withDirectives, P as pushScopeId, Q as popScopeId, bJ as KeyComboImpl, bP as KeybindingImpl, _ as _export_sfc } from "./index-BHayQCxv.js";
import { s as script$1, a as script$3, b as script$5 } from "./index-DBWDcZsl.js"; import { s as script$1, a as script$3, b as script$5 } from "./index-CwRXxFdA.js";
import "./index-DYEEBf64.js"; import "./index-C_wOqB0f.js";
const _hoisted_1$1 = { const _hoisted_1$1 = {
key: 0, key: 0,
class: "px-2" class: "px-2"
@@ -261,4 +261,4 @@ const KeybindingPanel = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "d
export { export {
KeybindingPanel as default KeybindingPanel as default
}; };
//# sourceMappingURL=KeybindingPanel-YkUFoiMw.js.map //# sourceMappingURL=KeybindingPanel-Dm_3sBT5.js.map

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

1
web/assets/index-BHayQCxv.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -1,7 +1,7 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { bu as ComfyDialog, bv as $el, bw as ComfyApp, c as app, k as LiteGraph, aP as LGraphCanvas, bx as DraggableList, a_ as useToastStore, ax as useNodeDefStore, bq as api, L as LGraphGroup, by as KeyComboImpl, K as useKeybindingStore, F as useCommandStore, e as LGraphNode, bz as ComfyWidgets, bA as applyTextReplacements, av as NodeSourceType, bB as NodeBadgeMode, u as useSettingStore, q as computed, bC as getColorPalette, w as watch, bD as BadgePosition, aR as LGraphBadge, bE as _, bF as defaultColorPalette } from "./index-CgU1oKZt.js"; import { bF as ComfyDialog, bG as $el, bH as ComfyApp, c as app, k as LiteGraph, b0 as LGraphCanvas, bI as DraggableList, ba as useToastStore, aE as useNodeDefStore, bC as api, L as LGraphGroup, bJ as KeyComboImpl, M as useKeybindingStore, F as useCommandStore, e as LGraphNode, bK as ComfyWidgets, bL as applyTextReplacements } from "./index-BHayQCxv.js";
import { mergeIfValid, getWidgetConfig, setWidgetConfig } from "./widgetInputs-DNVvusS1.js"; import { mergeIfValid, getWidgetConfig, setWidgetConfig } from "./widgetInputs-DdecKYqd.js";
class ClipspaceDialog extends ComfyDialog { class ClipspaceDialog extends ComfyDialog {
static { static {
__name(this, "ClipspaceDialog"); __name(this, "ClipspaceDialog");
@@ -37,7 +37,9 @@ class ClipspaceDialog extends ComfyDialog {
...self.createButtons() ...self.createButtons()
]); ]);
if (self.element) { if (self.element) {
self.element.removeChild(self.element.firstChild); if (self.element.firstChild) {
self.element.removeChild(self.element.firstChild);
}
self.element.appendChild(children); self.element.appendChild(children);
} else { } else {
self.element = $el("div.comfy-modal", { parent: document.body }, [ self.element = $el("div.comfy-modal", { parent: document.body }, [
@@ -76,7 +78,7 @@ class ClipspaceDialog extends ComfyDialog {
return buttons; return buttons;
} }
createImgSettings() { createImgSettings() {
if (ComfyApp.clipspace.imgs) { if (ComfyApp.clipspace?.imgs) {
const combo_items = []; const combo_items = [];
const imgs = ComfyApp.clipspace.imgs; const imgs = ComfyApp.clipspace.imgs;
for (let i = 0; i < imgs.length; i++) { for (let i = 0; i < imgs.length; i++) {
@@ -87,8 +89,10 @@ class ClipspaceDialog extends ComfyDialog {
{ {
id: "clipspace_img_selector", id: "clipspace_img_selector",
onchange: /* @__PURE__ */ __name((event) => { onchange: /* @__PURE__ */ __name((event) => {
ComfyApp.clipspace["selectedIndex"] = event.target.selectedIndex; if (event.target && ComfyApp.clipspace) {
ClipspaceDialog.invalidatePreview(); ComfyApp.clipspace["selectedIndex"] = event.target.selectedIndex;
ClipspaceDialog.invalidatePreview();
}
}, "onchange") }, "onchange")
}, },
combo_items combo_items
@@ -102,7 +106,9 @@ class ClipspaceDialog extends ComfyDialog {
{ {
id: "clipspace_img_paste_mode", id: "clipspace_img_paste_mode",
onchange: /* @__PURE__ */ __name((event) => { onchange: /* @__PURE__ */ __name((event) => {
ComfyApp.clipspace["img_paste_mode"] = event.target.value; if (event.target && ComfyApp.clipspace) {
ComfyApp.clipspace["img_paste_mode"] = event.target.value;
}
}, "onchange") }, "onchange")
}, },
[ [
@@ -127,7 +133,7 @@ class ClipspaceDialog extends ComfyDialog {
} }
} }
createImgPreview() { createImgPreview() {
if (ComfyApp.clipspace.imgs) { if (ComfyApp.clipspace?.imgs) {
return $el("img", { id: "clipspace_preview", ondragstart: /* @__PURE__ */ __name(() => false, "ondragstart") }); return $el("img", { id: "clipspace_preview", ondragstart: /* @__PURE__ */ __name(() => false, "ondragstart") });
} else return []; } else return [];
} }
@@ -173,9 +179,9 @@ const ext$2 = {
requestAnimationFrame(() => { requestAnimationFrame(() => {
const currentNode = LGraphCanvas.active_canvas.current_node; const currentNode = LGraphCanvas.active_canvas.current_node;
const clickedComboValue = currentNode.widgets?.filter( const clickedComboValue = currentNode.widgets?.filter(
(w) => w.type === "combo" && w.options.values.length === values.length (w) => w.type === "combo" && w.options.values?.length === values.length
).find( ).find(
(w) => w.options.values.every((v, i) => v === values[i]) (w) => w.options.values?.every((v, i) => v === values[i])
)?.value; )?.value;
let selectedIndex = clickedComboValue ? values.findIndex((v) => v === clickedComboValue) : 0; let selectedIndex = clickedComboValue ? values.findIndex((v) => v === clickedComboValue) : 0;
if (selectedIndex < 0) { if (selectedIndex < 0) {
@@ -244,7 +250,7 @@ const ext$2 = {
filter.addEventListener("input", () => { filter.addEventListener("input", () => {
const term = filter.value.toLocaleLowerCase(); const term = filter.value.toLocaleLowerCase();
displayedItems = items.filter((item) => { displayedItems = items.filter((item) => {
const isVisible = !term || item.textContent.toLocaleLowerCase().includes(term); const isVisible = !term || item.textContent?.toLocaleLowerCase().includes(term);
item.style.display = isVisible ? "block" : "none"; item.style.display = isVisible ? "block" : "none";
return isVisible; return isVisible;
}); });
@@ -1224,7 +1230,7 @@ class GroupNodeConfig {
checkPrimitiveConnection(link, inputName, inputs) { checkPrimitiveConnection(link, inputName, inputs) {
const sourceNode = this.nodeData.nodes[link[0]]; const sourceNode = this.nodeData.nodes[link[0]];
if (sourceNode.type === "PrimitiveNode") { if (sourceNode.type === "PrimitiveNode") {
const [sourceNodeId, _2, targetNodeId, __] = link; const [sourceNodeId, _, targetNodeId, __] = link;
const primitiveDef = this.primitiveDefs[sourceNodeId]; const primitiveDef = this.primitiveDefs[sourceNodeId];
const targetWidget = inputs[inputName]; const targetWidget = inputs[inputName];
const primitiveConfig = primitiveDef.input.required.value; const primitiveConfig = primitiveDef.input.required.value;
@@ -1619,7 +1625,7 @@ class GroupNodeHandler {
return newNodes; return newNodes;
}; };
const getExtraMenuOptions = this.node.getExtraMenuOptions; const getExtraMenuOptions = this.node.getExtraMenuOptions;
this.node.getExtraMenuOptions = function(_2, options) { this.node.getExtraMenuOptions = function(_, options) {
getExtraMenuOptions?.apply(this, arguments); getExtraMenuOptions?.apply(this, arguments);
let optionIndex = options.findIndex((o) => o.content === "Outputs"); let optionIndex = options.findIndex((o) => o.content === "Outputs");
if (optionIndex === -1) optionIndex = options.length; if (optionIndex === -1) optionIndex = options.length;
@@ -1793,7 +1799,7 @@ class GroupNodeHandler {
} else if (innerNode.type === "Reroute") { } else if (innerNode.type === "Reroute") {
const rerouteLinks = this.groupData.linksFrom[old.node.index]; const rerouteLinks = this.groupData.linksFrom[old.node.index];
if (rerouteLinks) { if (rerouteLinks) {
for (const [_2, , targetNodeId, targetSlot] of rerouteLinks["0"]) { for (const [_, , targetNodeId, targetSlot] of rerouteLinks["0"]) {
const node = this.innerNodes[targetNodeId]; const node = this.innerNodes[targetNodeId];
const input = node.inputs[targetSlot]; const input = node.inputs[targetSlot];
if (input.widget) { if (input.widget) {
@@ -2408,10 +2414,10 @@ const ext = {
defaultValue: 2, defaultValue: 2,
type: "combo", type: "combo",
options: [ options: [
{ value: LiteGraph.STRAIGHT_LINK, text: "Straight" }, { value: LiteGraph.STRAIGHT_LINK.toString(), text: "Straight" },
{ value: LiteGraph.LINEAR_LINK, text: "Linear" }, { value: LiteGraph.LINEAR_LINK.toString(), text: "Linear" },
{ value: LiteGraph.SPLINE_LINK, text: "Spline" }, { value: LiteGraph.SPLINE_LINK.toString(), text: "Spline" },
{ value: LiteGraph.HIDDEN_LINK, text: "Hidden" } { value: LiteGraph.HIDDEN_LINK.toString(), text: "Hidden" }
], ],
onChange(value) { onChange(value) {
app2.canvas.links_render_mode = +value; app2.canvas.links_render_mode = +value;
@@ -3934,7 +3940,7 @@ app.registerExtension({
}; };
this.isVirtualNode = true; this.isVirtualNode = true;
} }
getExtraMenuOptions(_2, options) { getExtraMenuOptions(_, options) {
options.unshift( options.unshift(
{ {
content: (this.properties.showOutputText ? "Hide" : "Show") + " Type", content: (this.properties.showOutputText ? "Hide" : "Show") + " Type",
@@ -4157,7 +4163,7 @@ app.registerExtension({
slot_types_default_in: {}, slot_types_default_in: {},
async beforeRegisterNodeDef(nodeType, nodeData, app2) { async beforeRegisterNodeDef(nodeType, nodeData, app2) {
var nodeId = nodeData.name; var nodeId = nodeData.name;
const inputs = nodeData["input"]["required"]; const inputs = nodeData["input"]?.["required"];
for (const inputKey in inputs) { for (const inputKey in inputs) {
var input = inputs[inputKey]; var input = inputs[inputKey];
if (typeof input[0] !== "string") continue; if (typeof input[0] !== "string") continue;
@@ -4179,19 +4185,19 @@ app.registerExtension({
nodeType.comfyClass nodeType.comfyClass
); );
} }
var outputs = nodeData["output"]; var outputs = nodeData["output"] ?? [];
for (const key in outputs) { for (const el of outputs) {
var type = outputs[key]; const type2 = el;
if (!(type in this.slot_types_default_in)) { if (!(type2 in this.slot_types_default_in)) {
this.slot_types_default_in[type] = ["Reroute"]; this.slot_types_default_in[type2] = ["Reroute"];
} }
this.slot_types_default_in[type].push(nodeId); this.slot_types_default_in[type2].push(nodeId);
if (!(type in LiteGraph.registered_slot_out_types)) { if (!(type2 in LiteGraph.registered_slot_out_types)) {
LiteGraph.registered_slot_out_types[type] = { nodes: [] }; LiteGraph.registered_slot_out_types[type2] = { nodes: [] };
} }
LiteGraph.registered_slot_out_types[type].nodes.push(nodeType.comfyClass); LiteGraph.registered_slot_out_types[type2].nodes.push(nodeType.comfyClass);
if (!LiteGraph.slot_types_out.includes(type)) { if (!LiteGraph.slot_types_out.includes(type2)) {
LiteGraph.slot_types_out.push(type); LiteGraph.slot_types_out.push(type2);
} }
} }
var maxNum = this.suggestionsNumber.value; var maxNum = this.suggestionsNumber.value;
@@ -4276,7 +4282,7 @@ app.registerExtension({
} else { } else {
w = node.size[0]; w = node.size[0];
h = node.size[1]; h = node.size[1];
let titleMode = node.constructor.title_mode; const titleMode = node.constructor.title_mode;
if (titleMode !== LiteGraph.TRANSPARENT_TITLE && titleMode !== LiteGraph.NO_TITLE) { if (titleMode !== LiteGraph.TRANSPARENT_TITLE && titleMode !== LiteGraph.NO_TITLE) {
h += LiteGraph.NODE_TITLE_HEIGHT; h += LiteGraph.NODE_TITLE_HEIGHT;
shiftY -= LiteGraph.NODE_TITLE_HEIGHT; shiftY -= LiteGraph.NODE_TITLE_HEIGHT;
@@ -4627,108 +4633,4 @@ app.registerExtension({
}; };
} }
}); });
function getNodeSource(node) { //# sourceMappingURL=index-BReiUkk9.js.map
const nodeDef = node.constructor.nodeData;
if (!nodeDef) {
return null;
}
const nodeDefStore = useNodeDefStore();
return nodeDefStore.nodeDefsByName[nodeDef.name]?.nodeSource ?? null;
}
__name(getNodeSource, "getNodeSource");
function isCoreNode(node) {
return getNodeSource(node)?.type === NodeSourceType.Core;
}
__name(isCoreNode, "isCoreNode");
function badgeTextVisible(node, badgeMode) {
return badgeMode === NodeBadgeMode.None || isCoreNode(node) && badgeMode === NodeBadgeMode.HideBuiltIn;
}
__name(badgeTextVisible, "badgeTextVisible");
function getNodeIdBadgeText(node, nodeIdBadgeMode) {
return badgeTextVisible(node, nodeIdBadgeMode) ? "" : `#${node.id}`;
}
__name(getNodeIdBadgeText, "getNodeIdBadgeText");
function getNodeSourceBadgeText(node, nodeSourceBadgeMode) {
const nodeSource = getNodeSource(node);
return badgeTextVisible(node, nodeSourceBadgeMode) ? "" : nodeSource?.badgeText ?? "";
}
__name(getNodeSourceBadgeText, "getNodeSourceBadgeText");
function getNodeLifeCycleBadgeText(node, nodeLifeCycleBadgeMode) {
let text = "";
const nodeDef = node.constructor.nodeData;
if (!nodeDef) {
return "";
}
if (nodeDef.deprecated) {
text = "[DEPR]";
}
if (nodeDef.experimental) {
text = "[BETA]";
}
return badgeTextVisible(node, nodeLifeCycleBadgeMode) ? "" : text;
}
__name(getNodeLifeCycleBadgeText, "getNodeLifeCycleBadgeText");
class NodeBadgeExtension {
static {
__name(this, "NodeBadgeExtension");
}
constructor(nodeIdBadgeMode = null, nodeSourceBadgeMode = null, nodeLifeCycleBadgeMode = null, colorPalette = null) {
this.nodeIdBadgeMode = nodeIdBadgeMode;
this.nodeSourceBadgeMode = nodeSourceBadgeMode;
this.nodeLifeCycleBadgeMode = nodeLifeCycleBadgeMode;
this.colorPalette = colorPalette;
}
name = "Comfy.NodeBadge";
init(app2) {
const settingStore = useSettingStore();
this.nodeSourceBadgeMode = computed(
() => settingStore.get("Comfy.NodeBadge.NodeSourceBadgeMode")
);
this.nodeIdBadgeMode = computed(
() => settingStore.get("Comfy.NodeBadge.NodeIdBadgeMode")
);
this.nodeLifeCycleBadgeMode = computed(
() => settingStore.get(
"Comfy.NodeBadge.NodeLifeCycleBadgeMode"
)
);
this.colorPalette = computed(
() => getColorPalette(settingStore.get("Comfy.ColorPalette"))
);
watch(this.nodeSourceBadgeMode, () => {
app2.graph.setDirtyCanvas(true, true);
});
watch(this.nodeIdBadgeMode, () => {
app2.graph.setDirtyCanvas(true, true);
});
watch(this.nodeLifeCycleBadgeMode, () => {
app2.graph.setDirtyCanvas(true, true);
});
}
nodeCreated(node, app2) {
node.badgePosition = BadgePosition.TopRight;
node.badge_enabled = true;
const badge = computed(
() => new LGraphBadge({
text: _.truncate(
[
getNodeIdBadgeText(node, this.nodeIdBadgeMode.value),
getNodeLifeCycleBadgeText(
node,
this.nodeLifeCycleBadgeMode.value
),
getNodeSourceBadgeText(node, this.nodeSourceBadgeMode.value)
].filter((s) => s.length > 0).join(" "),
{
length: 31
}
),
fgColor: this.colorPalette.value.colors.litegraph_base?.BADGE_FG_COLOR || defaultColorPalette.colors.litegraph_base.BADGE_FG_COLOR,
bgColor: this.colorPalette.value.colors.litegraph_base?.BADGE_BG_COLOR || defaultColorPalette.colors.litegraph_base.BADGE_BG_COLOR
})
);
node.badges.push(() => badge.value);
}
}
app.registerExtension(new NodeBadgeExtension());
//# sourceMappingURL=index-D36_Nnai.js.map

1
web/assets/index-BReiUkk9.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -1,51 +1,30 @@
:root { .no-results-placeholder[data-v-a1e982e0] .p-card {
--red-600: #dc3545;
}
.comfy-missing-nodes[data-v-0a88b934] {
font-family: monospace;
color: var(--red-600);
padding: 1.5rem;
background-color: var(--surface-ground); background-color: var(--surface-ground);
border-radius: var(--border-radius); text-align: center;
box-shadow: var(--card-shadow); box-shadow: unset;
} }
.warning-title[data-v-0a88b934] { .no-results-placeholder h3[data-v-a1e982e0] {
margin-top: 0; color: var(--text-color);
margin-bottom: 0.5rem;
}
.no-results-placeholder p[data-v-a1e982e0] {
color: var(--text-color-secondary);
margin-bottom: 1rem; margin-bottom: 1rem;
} }
.warning-description[data-v-0a88b934] {
margin-bottom: 1rem; .comfy-missing-nodes[data-v-05a7c5eb] {
}
.missing-nodes-list[data-v-0a88b934] {
max-height: 300px; max-height: 300px;
overflow-y: auto; overflow-y: auto;
} }
.missing-nodes-list.maximized[data-v-0a88b934] { .node-hint[data-v-05a7c5eb] {
max-height: unset;
}
.missing-node-item[data-v-0a88b934] {
display: flex;
align-items: center;
padding: 0.5rem;
}
.node-type[data-v-0a88b934] {
font-weight: 600;
color: var(--text-color);
}
.node-hint[data-v-0a88b934] {
margin-left: 0.5rem; margin-left: 0.5rem;
font-style: italic; font-style: italic;
color: var(--text-color-secondary); color: var(--text-color-secondary);
} }
[data-v-0a88b934] .p-button { [data-v-05a7c5eb] .p-button {
margin-left: auto; margin-left: auto;
} }
.added-nodes-warning[data-v-0a88b934] {
margin-top: 1rem;
font-style: italic;
}
:root { :root {
--red-600: #dc3545; --red-600: #dc3545;
@@ -217,24 +196,6 @@
border: none !important; border: none !important;
} }
.no-results-placeholder[data-v-c19e9e10] {
height: 100%;
padding: 2rem;
}
.no-results-placeholder[data-v-c19e9e10] .p-card {
background-color: var(--surface-ground);
text-align: center;
box-shadow: unset;
}
.no-results-placeholder h3[data-v-c19e9e10] {
color: var(--text-color);
margin-bottom: 0.5rem;
}
.no-results-placeholder p[data-v-c19e9e10] {
color: var(--text-color-secondary);
margin-bottom: 1rem;
}
.settings-tab-panels { .settings-tab-panels {
padding-top: 0px !important; padding-top: 0px !important;
} }
@@ -1808,6 +1769,9 @@ cursor: pointer;
max-width: 3200px; max-width: 3200px;
} }
} }
.pointer-events-none{
pointer-events: none;
}
.pointer-events-auto{ .pointer-events-auto{
pointer-events: auto; pointer-events: auto;
} }
@@ -2049,6 +2013,9 @@ cursor: pointer;
margin-top: calc(0.5rem * calc(1 - var(--tw-space-y-reverse))); margin-top: calc(0.5rem * calc(1 - var(--tw-space-y-reverse)));
margin-bottom: calc(0.5rem * var(--tw-space-y-reverse)); margin-bottom: calc(0.5rem * var(--tw-space-y-reverse));
} }
.justify-self-end{
justify-self: end;
}
.overflow-hidden{ .overflow-hidden{
overflow: hidden; overflow: hidden;
} }
@@ -2072,6 +2039,9 @@ cursor: pointer;
.whitespace-pre-line{ .whitespace-pre-line{
white-space: pre-line; white-space: pre-line;
} }
.whitespace-pre-wrap{
white-space: pre-wrap;
}
.text-wrap{ .text-wrap{
text-wrap: wrap; text-wrap: wrap;
} }
@@ -2135,9 +2105,15 @@ cursor: pointer;
.p-2{ .p-2{
padding: 0.5rem; padding: 0.5rem;
} }
.p-3{
padding: 0.75rem;
}
.p-4{ .p-4{
padding: 1rem; padding: 1rem;
} }
.p-8{
padding: 2rem;
}
.px-0{ .px-0{
padding-left: 0px; padding-left: 0px;
padding-right: 0px; padding-right: 0px;
@@ -2146,6 +2122,10 @@ cursor: pointer;
padding-left: 0.5rem; padding-left: 0.5rem;
padding-right: 0.5rem; padding-right: 0.5rem;
} }
.px-4{
padding-left: 1rem;
padding-right: 1rem;
}
.py-0{ .py-0{
padding-top: 0px; padding-top: 0px;
padding-bottom: 0px; padding-bottom: 0px;

View File

@@ -1,6 +1,6 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { bM as script$4, A as createBaseVNode, g as openBlock, h as createElementBlock, m as mergeProps } from "./index-CgU1oKZt.js"; import { bS as script$4, A as createBaseVNode, g as openBlock, h as createElementBlock, m as mergeProps } from "./index-BHayQCxv.js";
var script$3 = { var script$3 = {
name: "BarsIcon", name: "BarsIcon",
"extends": script$4 "extends": script$4
@@ -99,4 +99,4 @@ export {
script$2 as c, script$2 as c,
script as s script as s
}; };
//# sourceMappingURL=index-DYEEBf64.js.map //# sourceMappingURL=index-C_wOqB0f.js.map

File diff suppressed because one or more lines are too long

1
web/assets/index-CgU1oKZt.js.map generated vendored

File diff suppressed because one or more lines are too long

View File

@@ -1,7 +1,7 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { bM as script$s, A as createBaseVNode, g as openBlock, h as createElementBlock, m as mergeProps, B as BaseStyle, R as script$t, am as toDisplayString, al as Ripple, t as resolveDirective, v as withDirectives, x as createBlock, O as resolveDynamicComponent, bN as script$u, l as resolveComponent, C as normalizeClass, ao as createSlots, y as withCtx, bf as script$v, b5 as script$w, M as Fragment, N as renderList, ap as createTextVNode, aX as setAttribute, a1 as UniqueComponentId, aV as normalizeProps, p as renderSlot, j as createCommentVNode, ac as equals, aS as script$x, bO as script$y, bP as getFirstFocusableElement, a5 as OverlayEventBus, Y as getVNodeProp, a3 as resolveFieldData, bQ as invokeElementMethod, bR as getAttribute, bS as getNextElementSibling, W as getOuterWidth, bT as getPreviousElementSibling, D as script$z, ai as script$A, bU as script$B, aU as script$D, a0 as isNotEmpty, b2 as withModifiers, X as getOuterHeight, bV as _default, a2 as ZIndex, a4 as focus, a7 as addStyle, a9 as absolutePosition, aa as ConnectedOverlayScrollHandler, ab as isTouchDevice, bW as FilterOperator, ah as script$E, bX as FocusTrap, i as createVNode, an as Transition, bY as withKeys, bZ as getIndex, s as script$G, b_ as isClickable, b$ as clearSelection, c0 as localeComparator, c1 as sort, c2 as FilterService, bH as FilterMatchMode, ae as findSingle, c3 as findIndexInList, c4 as find, c5 as exportCSV, c6 as getOffset, c7 as getHiddenElementOuterWidth, c8 as getHiddenElementOuterHeight, c9 as reorderArray, ca as getWindowScrollTop, cb as removeClass, cc as addClass, a6 as isEmpty, ag as script$H, aj as script$I, aT as script$L, $ as vShow } from "./index-CgU1oKZt.js"; import { bS as script$s, A as createBaseVNode, g as openBlock, h as createElementBlock, m as mergeProps, B as BaseStyle, R as script$t, a6 as toDisplayString, a1 as Ripple, t as resolveDirective, v as withDirectives, x as createBlock, J as resolveDynamicComponent, bT as script$u, l as resolveComponent, C as normalizeClass, av as createSlots, y as withCtx, br as script$v, bh as script$w, N as Fragment, O as renderList, aw as createTextVNode, b7 as setAttribute, ad as UniqueComponentId, b5 as normalizeProps, p as renderSlot, j as createCommentVNode, a4 as equals, b2 as script$x, bU as script$y, bV as getFirstFocusableElement, ag as OverlayEventBus, a8 as getVNodeProp, af as resolveFieldData, bW as invokeElementMethod, a2 as getAttribute, bX as getNextElementSibling, Y as getOuterWidth, bY as getPreviousElementSibling, D as script$z, ar as script$A, a0 as script$B, b4 as script$D, ac as isNotEmpty, be as withModifiers, W as getOuterHeight, bZ as _default, ae as ZIndex, a3 as focus, ai as addStyle, ak as absolutePosition, al as ConnectedOverlayScrollHandler, am as isTouchDevice, b_ as FilterOperator, aq as script$E, b$ as FocusTrap, i as createVNode, au as Transition, c0 as withKeys, c1 as getIndex, s as script$G, c2 as isClickable, c3 as clearSelection, c4 as localeComparator, c5 as sort, c6 as FilterService, bN as FilterMatchMode, V as findSingle, c7 as findIndexInList, c8 as find, c9 as exportCSV, X as getOffset, ca as getHiddenElementOuterWidth, cb as getHiddenElementOuterHeight, cc as reorderArray, cd as getWindowScrollTop, ce as removeClass, cf as addClass, ah as isEmpty, ap as script$H, as as script$I, b3 as script$L, ab as vShow } from "./index-BHayQCxv.js";
import { b as script$C, c as script$F, s as script$J, a as script$K } from "./index-DYEEBf64.js"; import { b as script$C, c as script$F, s as script$J, a as script$K } from "./index-C_wOqB0f.js";
var script$r = { var script$r = {
name: "ArrowDownIcon", name: "ArrowDownIcon",
"extends": script$s "extends": script$s
@@ -8994,4 +8994,4 @@ export {
script as b, script as b,
script$2 as s script$2 as s
}; };
//# sourceMappingURL=index-DBWDcZsl.js.map //# sourceMappingURL=index-CwRXxFdA.js.map

File diff suppressed because one or more lines are too long

1
web/assets/index-D36_Nnai.js.map generated vendored

File diff suppressed because one or more lines are too long

View File

@@ -1,6 +1,6 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { bq as api, bv as $el } from "./index-CgU1oKZt.js"; import { bC as api, bG as $el } from "./index-BHayQCxv.js";
function createSpinner() { function createSpinner() {
const div = document.createElement("div"); const div = document.createElement("div");
div.innerHTML = `<div class="lds-ring"><div></div><div></div><div></div><div></div></div>`; div.innerHTML = `<div class="lds-ring"><div></div><div></div><div></div><div></div></div>`;
@@ -126,4 +126,4 @@ window.comfyAPI.userSelection.UserSelectionScreen = UserSelectionScreen;
export { export {
UserSelectionScreen UserSelectionScreen
}; };
//# sourceMappingURL=userSelection-DVDwxLD5.js.map //# sourceMappingURL=userSelection-DITGVoWz.js.map

File diff suppressed because one or more lines are too long

View File

@@ -1,6 +1,6 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { e as LGraphNode, c as app, bA as applyTextReplacements, bz as ComfyWidgets, bG as addValueControlWidgets, k as LiteGraph } from "./index-CgU1oKZt.js"; import { e as LGraphNode, c as app, bL as applyTextReplacements, bK as ComfyWidgets, bM as addValueControlWidgets, k as LiteGraph } from "./index-BHayQCxv.js";
const CONVERTED_TYPE = "converted-widget"; const CONVERTED_TYPE = "converted-widget";
const VALID_TYPES = [ const VALID_TYPES = [
"STRING", "STRING",
@@ -753,4 +753,4 @@ export {
mergeIfValid, mergeIfValid,
setWidgetConfig setWidgetConfig
}; };
//# sourceMappingURL=widgetInputs-DNVvusS1.js.map //# sourceMappingURL=widgetInputs-DdecKYqd.js.map

File diff suppressed because one or more lines are too long

4
web/index.html vendored
View File

@@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no"> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
<link rel="stylesheet" type="text/css" href="user.css" /> <link rel="stylesheet" type="text/css" href="user.css" />
<link rel="stylesheet" type="text/css" href="materialdesignicons.min.css" /> <link rel="stylesheet" type="text/css" href="materialdesignicons.min.css" />
<script type="module" crossorigin src="./assets/index-CgU1oKZt.js"></script> <script type="module" crossorigin src="./assets/index-BHayQCxv.js"></script>
<link rel="stylesheet" crossorigin href="./assets/index-BDQCPKeJ.css"> <link rel="stylesheet" crossorigin href="./assets/index-BitceZ14.css">
</head> </head>
<body class="litegraph grid"> <body class="litegraph grid">
<div id="vue-app"></div> <div id="vue-app"></div>