Compare commits
18 Commits
webfiltere
...
venv-manag
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3f48337ae | ||
|
|
ded60c33a0 | ||
|
|
8bb858e4d3 | ||
|
|
57893c843f | ||
|
|
65da29aaa9 | ||
|
|
10024a38ea | ||
|
|
87f9130778 | ||
|
|
7e84bf5373 | ||
|
|
4f3b50ba51 | ||
|
|
e930a387d6 | ||
|
|
d8e5662822 | ||
|
|
3d44a09812 | ||
|
|
62690eddec | ||
|
|
05eb10b43a | ||
|
|
f5e4e976f4 | ||
|
|
aee2908d03 | ||
|
|
dc46db7aa4 | ||
|
|
7046983d95 |
@@ -110,7 +110,6 @@ ComfyUI follows a weekly release cycle every Friday, with three interconnected r
|
||||
|
||||
2. **[ComfyUI Desktop](https://github.com/Comfy-Org/desktop)**
|
||||
- Builds a new release using the latest stable core version
|
||||
- Version numbers match the core release (e.g., Desktop v1.7.0 uses Core v1.7.0)
|
||||
|
||||
3. **[ComfyUI Frontend](https://github.com/Comfy-Org/ComfyUI_frontend)**
|
||||
- Weekly frontend updates are merged into the core repository
|
||||
@@ -198,11 +197,11 @@ Put your VAE in: models/vae
|
||||
### AMD GPUs (Linux only)
|
||||
AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version:
|
||||
|
||||
```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.2.4```
|
||||
```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.3```
|
||||
|
||||
This is the command to install the nightly with ROCm 6.3 which might have some performance improvements:
|
||||
This is the command to install the nightly with ROCm 6.4 which might have some performance improvements:
|
||||
|
||||
```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.3```
|
||||
```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.4```
|
||||
|
||||
### Intel GPUs (Windows and Linux)
|
||||
|
||||
|
||||
125
app/venv_management.py
Normal file
125
app/venv_management.py
Normal file
@@ -0,0 +1,125 @@
|
||||
import torch
|
||||
import torchvision
|
||||
import torchaudio
|
||||
from dataclasses import dataclass
|
||||
|
||||
import importlib
|
||||
if importlib.util.find_spec("torch_directml"):
|
||||
from pip._vendor import pkg_resources
|
||||
|
||||
|
||||
class VEnvException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class TorchVersionInfo:
|
||||
name: str = None
|
||||
version: str = None
|
||||
extension: str = None
|
||||
is_nightly: bool = False
|
||||
is_cpu: bool = False
|
||||
is_cuda: bool = False
|
||||
is_xpu: bool = False
|
||||
is_rocm: bool = False
|
||||
is_directml: bool = False
|
||||
|
||||
|
||||
def get_bootstrap_requirements_string():
|
||||
'''
|
||||
Get string to insert into a 'pip install' command to get the same torch dependencies as current venv.
|
||||
'''
|
||||
torch_info = get_torch_info(torch)
|
||||
packages = [torchvision, torchaudio]
|
||||
infos = [torch_info] + [get_torch_info(x) for x in packages]
|
||||
# directml should be first dependency, if exists
|
||||
directml_info = get_torch_directml_info()
|
||||
if directml_info is not None:
|
||||
infos = [directml_info] + infos
|
||||
# create list of strings to combine into install string
|
||||
install_str_list = []
|
||||
for info in infos:
|
||||
info_string = f"{info.name}=={info.version}"
|
||||
if not info.is_cpu and not info.is_directml:
|
||||
info_string = f"{info_string}+{info.extension}"
|
||||
install_str_list.append(info_string)
|
||||
# handle extra_index_url, if needed
|
||||
extra_index_url = get_index_url(torch_info)
|
||||
if extra_index_url:
|
||||
install_str_list.append(extra_index_url)
|
||||
# format nightly install properly
|
||||
if torch_info.is_nightly:
|
||||
install_str_list = ["--pre"] + install_str_list
|
||||
|
||||
install_str = " ".join(install_str_list)
|
||||
return install_str
|
||||
|
||||
def get_index_url(info: TorchVersionInfo=None):
|
||||
'''
|
||||
Get --extra-index-url (or --index-url) for torch install.
|
||||
'''
|
||||
if info is None:
|
||||
info = get_torch_info()
|
||||
# for cpu, don't need any index_url
|
||||
if info.is_cpu and not info.is_nightly:
|
||||
return None
|
||||
# otherwise, format index_url
|
||||
base_url = "https://download.pytorch.org/whl/"
|
||||
if info.is_nightly:
|
||||
base_url = f"--index-url {base_url}nightly/"
|
||||
else:
|
||||
base_url = f"--extra-index-url {base_url}"
|
||||
base_url = f"{base_url}{info.extension}"
|
||||
return base_url
|
||||
|
||||
def get_torch_info(package=None):
|
||||
'''
|
||||
Get info about an installed torch-related package.
|
||||
'''
|
||||
if package is None:
|
||||
package = torch
|
||||
info = TorchVersionInfo(name=package.__name__)
|
||||
info.version = package.__version__
|
||||
info.extension = None
|
||||
info.is_nightly = False
|
||||
# get extension, separate from version
|
||||
info.version, info.extension = info.version.split('+', 1)
|
||||
if info.extension.startswith('cpu'):
|
||||
info.is_cpu = True
|
||||
elif info.extension.startswith('cu'):
|
||||
info.is_cuda = True
|
||||
elif info.extension.startswith('rocm'):
|
||||
info.is_rocm = True
|
||||
elif info.extension.startswith('xpu'):
|
||||
info.is_xpu = True
|
||||
# TODO: add checks for some odd pytorch versions, if possible
|
||||
|
||||
# check if nightly install
|
||||
if 'dev' in info.version:
|
||||
info.is_nightly = True
|
||||
|
||||
return info
|
||||
|
||||
def get_torch_directml_info():
|
||||
'''
|
||||
Get info specifically about torch-directml package.
|
||||
|
||||
Returns None if torch-directml is not installed.
|
||||
'''
|
||||
# the import string and the pip string are different
|
||||
pip_name = "torch-directml"
|
||||
# if no torch_directml, do nothing
|
||||
if not importlib.util.find_spec("torch_directml"):
|
||||
return None
|
||||
info = TorchVersionInfo(name=pip_name)
|
||||
info.is_directml = True
|
||||
for p in pkg_resources.working_set:
|
||||
if p.project_name.lower() == pip_name:
|
||||
info.version = p.version
|
||||
if p.version is None:
|
||||
return None
|
||||
return info
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(get_bootstrap_requirements_string())
|
||||
@@ -78,8 +78,6 @@ def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False):
|
||||
pl_sd = torch.load(ckpt, map_location=device, weights_only=True, **torch_args)
|
||||
else:
|
||||
pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle)
|
||||
if "global_step" in pl_sd:
|
||||
logging.debug(f"Global Step: {pl_sd['global_step']}")
|
||||
if "state_dict" in pl_sd:
|
||||
sd = pl_sd["state_dict"]
|
||||
else:
|
||||
|
||||
5
comfy_api/torch_helpers/__init__.py
Normal file
5
comfy_api/torch_helpers/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from .torch_compile import set_torch_compile_wrapper
|
||||
|
||||
__all__ = [
|
||||
"set_torch_compile_wrapper",
|
||||
]
|
||||
69
comfy_api/torch_helpers/torch_compile.py
Normal file
69
comfy_api/torch_helpers/torch_compile.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from __future__ import annotations
|
||||
import torch
|
||||
|
||||
import comfy.utils
|
||||
from comfy.patcher_extension import WrappersMP
|
||||
from typing import TYPE_CHECKING, Callable, Optional
|
||||
if TYPE_CHECKING:
|
||||
from comfy.model_patcher import ModelPatcher
|
||||
from comfy.patcher_extension import WrapperExecutor
|
||||
|
||||
|
||||
COMPILE_KEY = "torch.compile"
|
||||
TORCH_COMPILE_KWARGS = "torch_compile_kwargs"
|
||||
|
||||
|
||||
def apply_torch_compile_factory(compiled_module_dict: dict[str, Callable]) -> Callable:
|
||||
'''
|
||||
Create a wrapper that will refer to the compiled_diffusion_model.
|
||||
'''
|
||||
def apply_torch_compile_wrapper(executor: WrapperExecutor, *args, **kwargs):
|
||||
try:
|
||||
orig_modules = {}
|
||||
for key, value in compiled_module_dict.items():
|
||||
orig_modules[key] = comfy.utils.get_attr(executor.class_obj, key)
|
||||
comfy.utils.set_attr(executor.class_obj, key, value)
|
||||
return executor(*args, **kwargs)
|
||||
finally:
|
||||
for key, value in orig_modules.items():
|
||||
comfy.utils.set_attr(executor.class_obj, key, value)
|
||||
return apply_torch_compile_wrapper
|
||||
|
||||
|
||||
def set_torch_compile_wrapper(model: ModelPatcher, backend: str, options: Optional[dict[str,str]]=None,
|
||||
mode: Optional[str]=None, fullgraph=False, dynamic: Optional[bool]=None,
|
||||
keys: list[str]=["diffusion_model"], *args, **kwargs):
|
||||
'''
|
||||
Perform torch.compile that will be applied at sample time for either the whole model or specific params of the BaseModel instance.
|
||||
|
||||
When keys is None, it will default to using ["diffusion_model"], compiling the whole diffusion_model.
|
||||
When a list of keys is provided, it will perform torch.compile on only the selected modules.
|
||||
'''
|
||||
# clear out any other torch.compile wrappers
|
||||
model.remove_wrappers_with_key(WrappersMP.APPLY_MODEL, COMPILE_KEY)
|
||||
# if no keys, default to 'diffusion_model'
|
||||
if not keys:
|
||||
keys = ["diffusion_model"]
|
||||
# create kwargs dict that can be referenced later
|
||||
compile_kwargs = {
|
||||
"backend": backend,
|
||||
"options": options,
|
||||
"mode": mode,
|
||||
"fullgraph": fullgraph,
|
||||
"dynamic": dynamic,
|
||||
}
|
||||
# get a dict of compiled keys
|
||||
compiled_modules = {}
|
||||
for key in keys:
|
||||
compiled_modules[key] = torch.compile(
|
||||
model=model.get_model_object(key),
|
||||
**compile_kwargs,
|
||||
)
|
||||
# add torch.compile wrapper
|
||||
wrapper_func = apply_torch_compile_factory(
|
||||
compiled_module_dict=compiled_modules,
|
||||
)
|
||||
# store wrapper to run on BaseModel's apply_model function
|
||||
model.add_wrapper_with_key(WrappersMP.APPLY_MODEL, COMPILE_KEY, wrapper_func)
|
||||
# keep compile kwargs for reference
|
||||
model.model_options[TORCH_COMPILE_KWARGS] = compile_kwargs
|
||||
@@ -65,6 +65,12 @@ from comfy_api_nodes.apinode_utils import (
|
||||
download_url_to_image_tensor,
|
||||
)
|
||||
from comfy_api_nodes.mapper_utils import model_field_to_node_input
|
||||
from comfy_api_nodes.util.validation_utils import (
|
||||
validate_image_dimensions,
|
||||
validate_image_aspect_ratio,
|
||||
validate_video_dimensions,
|
||||
validate_video_duration,
|
||||
)
|
||||
from comfy_api.input.basic_types import AudioInput
|
||||
from comfy_api.input.video_types import VideoInput
|
||||
from comfy_api.input_impl import VideoFromFile
|
||||
@@ -80,18 +86,16 @@ PATH_CHARACTER_IMAGE = f"/proxy/kling/{KLING_API_VERSION}/images/generations"
|
||||
PATH_VIRTUAL_TRY_ON = f"/proxy/kling/{KLING_API_VERSION}/images/kolors-virtual-try-on"
|
||||
PATH_IMAGE_GENERATIONS = f"/proxy/kling/{KLING_API_VERSION}/images/generations"
|
||||
|
||||
|
||||
MAX_PROMPT_LENGTH_T2V = 2500
|
||||
MAX_PROMPT_LENGTH_I2V = 500
|
||||
MAX_PROMPT_LENGTH_IMAGE_GEN = 500
|
||||
MAX_NEGATIVE_PROMPT_LENGTH_IMAGE_GEN = 200
|
||||
MAX_PROMPT_LENGTH_LIP_SYNC = 120
|
||||
|
||||
# TODO: adjust based on tests
|
||||
AVERAGE_DURATION_T2V = 319 # 319,
|
||||
AVERAGE_DURATION_I2V = 164 # 164,
|
||||
AVERAGE_DURATION_LIP_SYNC = 120
|
||||
AVERAGE_DURATION_VIRTUAL_TRY_ON = 19 # 19,
|
||||
AVERAGE_DURATION_T2V = 319
|
||||
AVERAGE_DURATION_I2V = 164
|
||||
AVERAGE_DURATION_LIP_SYNC = 455
|
||||
AVERAGE_DURATION_VIRTUAL_TRY_ON = 19
|
||||
AVERAGE_DURATION_IMAGE_GEN = 32
|
||||
AVERAGE_DURATION_VIDEO_EFFECTS = 320
|
||||
AVERAGE_DURATION_VIDEO_EXTEND = 320
|
||||
@@ -211,23 +215,8 @@ def validate_input_image(image: torch.Tensor) -> None:
|
||||
|
||||
See: https://app.klingai.com/global/dev/document-api/apiReference/model/imageToVideo
|
||||
"""
|
||||
if len(image.shape) == 4:
|
||||
height, width = image.shape[1], image.shape[2]
|
||||
elif len(image.shape) == 3:
|
||||
height, width = image.shape[0], image.shape[1]
|
||||
else:
|
||||
raise ValueError("Invalid image tensor shape.")
|
||||
|
||||
# Ensure minimum resolution is met
|
||||
if height < 300:
|
||||
raise ValueError("Image height must be at least 300px")
|
||||
if width < 300:
|
||||
raise ValueError("Image width must be at least 300px")
|
||||
|
||||
# Ensure aspect ratio is within acceptable range
|
||||
aspect_ratio = width / height
|
||||
if aspect_ratio < 1 / 2.5 or aspect_ratio > 2.5:
|
||||
raise ValueError("Image aspect ratio must be between 1:2.5 and 2.5:1")
|
||||
validate_image_dimensions(image, min_width=300, min_height=300)
|
||||
validate_image_aspect_ratio(image, min_aspect_ratio=1 / 2.5, max_aspect_ratio=2.5)
|
||||
|
||||
|
||||
def get_camera_control_input_config(
|
||||
@@ -1243,6 +1232,17 @@ class KlingLipSyncBase(KlingNodeBase):
|
||||
RETURN_TYPES = ("VIDEO", "STRING", "STRING")
|
||||
RETURN_NAMES = ("VIDEO", "video_id", "duration")
|
||||
|
||||
def validate_lip_sync_video(self, video: VideoInput):
|
||||
"""
|
||||
Validates the input video adheres to the expectations of the Kling Lip Sync API:
|
||||
- Video length does not exceed 10s and is not shorter than 2s
|
||||
- Length and width dimensions should both be between 720px and 1920px
|
||||
|
||||
See: https://app.klingai.com/global/dev/document-api/apiReference/model/videoTolip
|
||||
"""
|
||||
validate_video_dimensions(video, 720, 1920)
|
||||
validate_video_duration(video, 2, 10)
|
||||
|
||||
def validate_text(self, text: str):
|
||||
if not text:
|
||||
raise ValueError("Text is required")
|
||||
@@ -1282,6 +1282,7 @@ class KlingLipSyncBase(KlingNodeBase):
|
||||
) -> tuple[VideoFromFile, str, str]:
|
||||
if text:
|
||||
self.validate_text(text)
|
||||
self.validate_lip_sync_video(video)
|
||||
|
||||
# Upload video to Comfy API and get download URL
|
||||
video_url = upload_video_to_comfyapi(video, auth_kwargs=kwargs)
|
||||
@@ -1352,7 +1353,7 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase):
|
||||
},
|
||||
}
|
||||
|
||||
DESCRIPTION = "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file."
|
||||
DESCRIPTION = "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length."
|
||||
|
||||
def api_call(
|
||||
self,
|
||||
@@ -1464,7 +1465,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase):
|
||||
},
|
||||
}
|
||||
|
||||
DESCRIPTION = "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt."
|
||||
DESCRIPTION = "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length."
|
||||
|
||||
def api_call(
|
||||
self,
|
||||
|
||||
0
comfy_api_nodes/util/__init__.py
Normal file
0
comfy_api_nodes/util/__init__.py
Normal file
100
comfy_api_nodes/util/validation_utils.py
Normal file
100
comfy_api_nodes/util/validation_utils.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from comfy_api.input.video_types import VideoInput
|
||||
|
||||
|
||||
def get_image_dimensions(image: torch.Tensor) -> tuple[int, int]:
|
||||
if len(image.shape) == 4:
|
||||
return image.shape[1], image.shape[2]
|
||||
elif len(image.shape) == 3:
|
||||
return image.shape[0], image.shape[1]
|
||||
else:
|
||||
raise ValueError("Invalid image tensor shape.")
|
||||
|
||||
|
||||
def validate_image_dimensions(
|
||||
image: torch.Tensor,
|
||||
min_width: Optional[int] = None,
|
||||
max_width: Optional[int] = None,
|
||||
min_height: Optional[int] = None,
|
||||
max_height: Optional[int] = None,
|
||||
):
|
||||
height, width = get_image_dimensions(image)
|
||||
|
||||
if min_width is not None and width < min_width:
|
||||
raise ValueError(f"Image width must be at least {min_width}px, got {width}px")
|
||||
if max_width is not None and width > max_width:
|
||||
raise ValueError(f"Image width must be at most {max_width}px, got {width}px")
|
||||
if min_height is not None and height < min_height:
|
||||
raise ValueError(
|
||||
f"Image height must be at least {min_height}px, got {height}px"
|
||||
)
|
||||
if max_height is not None and height > max_height:
|
||||
raise ValueError(f"Image height must be at most {max_height}px, got {height}px")
|
||||
|
||||
|
||||
def validate_image_aspect_ratio(
|
||||
image: torch.Tensor,
|
||||
min_aspect_ratio: Optional[float] = None,
|
||||
max_aspect_ratio: Optional[float] = None,
|
||||
):
|
||||
width, height = get_image_dimensions(image)
|
||||
aspect_ratio = width / height
|
||||
|
||||
if min_aspect_ratio is not None and aspect_ratio < min_aspect_ratio:
|
||||
raise ValueError(
|
||||
f"Image aspect ratio must be at least {min_aspect_ratio}, got {aspect_ratio}"
|
||||
)
|
||||
if max_aspect_ratio is not None and aspect_ratio > max_aspect_ratio:
|
||||
raise ValueError(
|
||||
f"Image aspect ratio must be at most {max_aspect_ratio}, got {aspect_ratio}"
|
||||
)
|
||||
|
||||
|
||||
def validate_video_dimensions(
|
||||
video: VideoInput,
|
||||
min_width: Optional[int] = None,
|
||||
max_width: Optional[int] = None,
|
||||
min_height: Optional[int] = None,
|
||||
max_height: Optional[int] = None,
|
||||
):
|
||||
try:
|
||||
width, height = video.get_dimensions()
|
||||
except Exception as e:
|
||||
logging.error("Error getting dimensions of video: %s", e)
|
||||
return
|
||||
|
||||
if min_width is not None and width < min_width:
|
||||
raise ValueError(f"Video width must be at least {min_width}px, got {width}px")
|
||||
if max_width is not None and width > max_width:
|
||||
raise ValueError(f"Video width must be at most {max_width}px, got {width}px")
|
||||
if min_height is not None and height < min_height:
|
||||
raise ValueError(
|
||||
f"Video height must be at least {min_height}px, got {height}px"
|
||||
)
|
||||
if max_height is not None and height > max_height:
|
||||
raise ValueError(f"Video height must be at most {max_height}px, got {height}px")
|
||||
|
||||
|
||||
def validate_video_duration(
|
||||
video: VideoInput,
|
||||
min_duration: Optional[float] = None,
|
||||
max_duration: Optional[float] = None,
|
||||
):
|
||||
try:
|
||||
duration = video.get_duration()
|
||||
except Exception as e:
|
||||
logging.error("Error getting duration of video: %s", e)
|
||||
return
|
||||
|
||||
epsilon = 0.0001
|
||||
if min_duration is not None and min_duration - epsilon > duration:
|
||||
raise ValueError(
|
||||
f"Video duration must be at least {min_duration}s, got {duration}s"
|
||||
)
|
||||
if max_duration is not None and duration > max_duration + epsilon:
|
||||
raise ValueError(
|
||||
f"Video duration must be at most {max_duration}s, got {duration}s"
|
||||
)
|
||||
@@ -31,6 +31,7 @@ class T5TokenizerOptions:
|
||||
}
|
||||
}
|
||||
|
||||
CATEGORY = "_for_testing/conditioning"
|
||||
RETURN_TYPES = ("CLIP",)
|
||||
FUNCTION = "set_options"
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import os
|
||||
import re
|
||||
from io import BytesIO
|
||||
from inspect import cleandoc
|
||||
import torch
|
||||
|
||||
from comfy.comfy_types import FileLocator
|
||||
|
||||
@@ -74,6 +75,24 @@ class ImageFromBatch:
|
||||
s = s_in[batch_index:batch_index + length].clone()
|
||||
return (s,)
|
||||
|
||||
|
||||
class ImageAddNoise:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "image": ("IMAGE",),
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "control_after_generate": True, "tooltip": "The random seed used for creating the noise."}),
|
||||
"strength": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
}}
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "repeat"
|
||||
|
||||
CATEGORY = "image"
|
||||
|
||||
def repeat(self, image, seed, strength):
|
||||
generator = torch.manual_seed(seed)
|
||||
s = torch.clip((image + strength * torch.randn(image.size(), generator=generator, device="cpu").to(image)), min=0.0, max=1.0)
|
||||
return (s,)
|
||||
|
||||
class SaveAnimatedWEBP:
|
||||
def __init__(self):
|
||||
self.output_dir = folder_paths.get_output_directory()
|
||||
@@ -295,6 +314,7 @@ NODE_CLASS_MAPPINGS = {
|
||||
"ImageCrop": ImageCrop,
|
||||
"RepeatImageBatch": RepeatImageBatch,
|
||||
"ImageFromBatch": ImageFromBatch,
|
||||
"ImageAddNoise": ImageAddNoise,
|
||||
"SaveAnimatedWEBP": SaveAnimatedWEBP,
|
||||
"SaveAnimatedPNG": SaveAnimatedPNG,
|
||||
"SaveSVGNode": SaveSVGNode,
|
||||
|
||||
@@ -8,7 +8,8 @@ class StringConcatenate():
|
||||
return {
|
||||
"required": {
|
||||
"string_a": (IO.STRING, {"multiline": True}),
|
||||
"string_b": (IO.STRING, {"multiline": True})
|
||||
"string_b": (IO.STRING, {"multiline": True}),
|
||||
"delimiter": (IO.STRING, {"multiline": False, "default": ""})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,8 +17,8 @@ class StringConcatenate():
|
||||
FUNCTION = "execute"
|
||||
CATEGORY = "utils/string"
|
||||
|
||||
def execute(self, string_a, string_b, **kwargs):
|
||||
return string_a + string_b,
|
||||
def execute(self, string_a, string_b, delimiter, **kwargs):
|
||||
return delimiter.join((string_a, string_b)),
|
||||
|
||||
class StringSubstring():
|
||||
@classmethod
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import torch
|
||||
from comfy_api.torch_helpers import set_torch_compile_wrapper
|
||||
|
||||
|
||||
class TorchCompileModel:
|
||||
@classmethod
|
||||
@@ -14,7 +15,7 @@ class TorchCompileModel:
|
||||
|
||||
def patch(self, model, backend):
|
||||
m = model.clone()
|
||||
m.add_object_patch("diffusion_model", torch.compile(model=m.get_model_object("diffusion_model"), backend=backend))
|
||||
set_torch_compile_wrapper(model=m, backend=backend)
|
||||
return (m, )
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# This file is automatically generated by the build process when version is
|
||||
# updated in pyproject.toml.
|
||||
__version__ = "0.3.34"
|
||||
__version__ = "0.3.35"
|
||||
|
||||
@@ -909,7 +909,6 @@ class PromptQueue:
|
||||
self.currently_running = {}
|
||||
self.history = {}
|
||||
self.flags = {}
|
||||
server.prompt_queue = self
|
||||
|
||||
def put(self, item):
|
||||
with self.mutex:
|
||||
@@ -954,6 +953,7 @@ class PromptQueue:
|
||||
self.history[prompt[1]].update(history_result)
|
||||
self.server.queue_updated()
|
||||
|
||||
# Note: slow
|
||||
def get_current_queue(self):
|
||||
with self.mutex:
|
||||
out = []
|
||||
@@ -961,6 +961,13 @@ class PromptQueue:
|
||||
out += [x]
|
||||
return (out, copy.deepcopy(self.queue))
|
||||
|
||||
# read-safe as long as queue items are immutable
|
||||
def get_current_queue_volatile(self):
|
||||
with self.mutex:
|
||||
running = [x for x in self.currently_running.values()]
|
||||
queued = copy.copy(self.queue)
|
||||
return (running, queued)
|
||||
|
||||
def get_tasks_remaining(self):
|
||||
with self.mutex:
|
||||
return len(self.queue) + len(self.currently_running)
|
||||
|
||||
3
main.py
3
main.py
@@ -260,7 +260,6 @@ def start_comfyui(asyncio_loop=None):
|
||||
asyncio_loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(asyncio_loop)
|
||||
prompt_server = server.PromptServer(asyncio_loop)
|
||||
q = execution.PromptQueue(prompt_server)
|
||||
|
||||
hook_breaker_ac10a0.save_functions()
|
||||
nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes, init_api_nodes=not args.disable_api_nodes)
|
||||
@@ -271,7 +270,7 @@ def start_comfyui(asyncio_loop=None):
|
||||
prompt_server.add_routes()
|
||||
hijack_progress(prompt_server)
|
||||
|
||||
threading.Thread(target=prompt_worker, daemon=True, args=(q, prompt_server,)).start()
|
||||
threading.Thread(target=prompt_worker, daemon=True, args=(prompt_server.prompt_queue, prompt_server,)).start()
|
||||
|
||||
if args.quick_test_for_ci:
|
||||
exit(0)
|
||||
|
||||
2
nodes.py
2
nodes.py
@@ -1940,7 +1940,7 @@ class ImagePadForOutpaint:
|
||||
|
||||
mask[top:top + d2, left:left + d3] = t
|
||||
|
||||
return (new_image, mask)
|
||||
return (new_image, mask.unsqueeze(0))
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "ComfyUI"
|
||||
version = "0.3.34"
|
||||
version = "0.3.35"
|
||||
readme = "README.md"
|
||||
license = { file = "LICENSE" }
|
||||
requires-python = ">=3.9"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
comfyui-frontend-package==1.19.9
|
||||
comfyui-workflow-templates==0.1.14
|
||||
comfyui-workflow-templates==0.1.18
|
||||
torch
|
||||
torchsde
|
||||
torchvision
|
||||
|
||||
@@ -29,6 +29,7 @@ import comfy.model_management
|
||||
import node_helpers
|
||||
from comfyui_version import __version__
|
||||
from app.frontend_management import FrontendManager
|
||||
|
||||
from app.user_manager import UserManager
|
||||
from app.model_manager import ModelFileManager
|
||||
from app.custom_node_manager import CustomNodeManager
|
||||
@@ -159,7 +160,7 @@ class PromptServer():
|
||||
self.custom_node_manager = CustomNodeManager()
|
||||
self.internal_routes = InternalRoutes(self)
|
||||
self.supports = ["custom_nodes_from_web"]
|
||||
self.prompt_queue = None
|
||||
self.prompt_queue = execution.PromptQueue(self)
|
||||
self.loop = loop
|
||||
self.messages = asyncio.Queue()
|
||||
self.client_session:Optional[aiohttp.ClientSession] = None
|
||||
@@ -226,7 +227,7 @@ class PromptServer():
|
||||
return response
|
||||
|
||||
@routes.get("/embeddings")
|
||||
def get_embeddings(self):
|
||||
def get_embeddings(request):
|
||||
embeddings = folder_paths.get_filename_list("embeddings")
|
||||
return web.json_response(list(map(lambda a: os.path.splitext(a)[0], embeddings)))
|
||||
|
||||
@@ -282,7 +283,6 @@ class PromptServer():
|
||||
a.update(f.read())
|
||||
b.update(image.file.read())
|
||||
image.file.seek(0)
|
||||
f.close()
|
||||
return a.hexdigest() == b.hexdigest()
|
||||
return False
|
||||
|
||||
@@ -621,7 +621,7 @@ class PromptServer():
|
||||
@routes.get("/queue")
|
||||
async def get_queue(request):
|
||||
queue_info = {}
|
||||
current_queue = self.prompt_queue.get_current_queue()
|
||||
current_queue = self.prompt_queue.get_current_queue_volatile()
|
||||
queue_info['queue_running'] = current_queue[0]
|
||||
queue_info['queue_pending'] = current_queue[1]
|
||||
return web.json_response(queue_info)
|
||||
|
||||
Reference in New Issue
Block a user