Compare commits
9 Commits
venv-manag
...
v0.3.36
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ad3bd8aa49 | ||
|
|
5a87757ef9 | ||
|
|
464aece92b | ||
|
|
0b50d4c0db | ||
|
|
30b2eb8a93 | ||
|
|
f85c08df06 | ||
|
|
4202e956a0 | ||
|
|
b838c36720 | ||
|
|
fc39184ea9 |
@@ -1,125 +0,0 @@
|
|||||||
import torch
|
|
||||||
import torchvision
|
|
||||||
import torchaudio
|
|
||||||
from dataclasses import dataclass
|
|
||||||
|
|
||||||
import importlib
|
|
||||||
if importlib.util.find_spec("torch_directml"):
|
|
||||||
from pip._vendor import pkg_resources
|
|
||||||
|
|
||||||
|
|
||||||
class VEnvException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class TorchVersionInfo:
|
|
||||||
name: str = None
|
|
||||||
version: str = None
|
|
||||||
extension: str = None
|
|
||||||
is_nightly: bool = False
|
|
||||||
is_cpu: bool = False
|
|
||||||
is_cuda: bool = False
|
|
||||||
is_xpu: bool = False
|
|
||||||
is_rocm: bool = False
|
|
||||||
is_directml: bool = False
|
|
||||||
|
|
||||||
|
|
||||||
def get_bootstrap_requirements_string():
|
|
||||||
'''
|
|
||||||
Get string to insert into a 'pip install' command to get the same torch dependencies as current venv.
|
|
||||||
'''
|
|
||||||
torch_info = get_torch_info(torch)
|
|
||||||
packages = [torchvision, torchaudio]
|
|
||||||
infos = [torch_info] + [get_torch_info(x) for x in packages]
|
|
||||||
# directml should be first dependency, if exists
|
|
||||||
directml_info = get_torch_directml_info()
|
|
||||||
if directml_info is not None:
|
|
||||||
infos = [directml_info] + infos
|
|
||||||
# create list of strings to combine into install string
|
|
||||||
install_str_list = []
|
|
||||||
for info in infos:
|
|
||||||
info_string = f"{info.name}=={info.version}"
|
|
||||||
if not info.is_cpu and not info.is_directml:
|
|
||||||
info_string = f"{info_string}+{info.extension}"
|
|
||||||
install_str_list.append(info_string)
|
|
||||||
# handle extra_index_url, if needed
|
|
||||||
extra_index_url = get_index_url(torch_info)
|
|
||||||
if extra_index_url:
|
|
||||||
install_str_list.append(extra_index_url)
|
|
||||||
# format nightly install properly
|
|
||||||
if torch_info.is_nightly:
|
|
||||||
install_str_list = ["--pre"] + install_str_list
|
|
||||||
|
|
||||||
install_str = " ".join(install_str_list)
|
|
||||||
return install_str
|
|
||||||
|
|
||||||
def get_index_url(info: TorchVersionInfo=None):
|
|
||||||
'''
|
|
||||||
Get --extra-index-url (or --index-url) for torch install.
|
|
||||||
'''
|
|
||||||
if info is None:
|
|
||||||
info = get_torch_info()
|
|
||||||
# for cpu, don't need any index_url
|
|
||||||
if info.is_cpu and not info.is_nightly:
|
|
||||||
return None
|
|
||||||
# otherwise, format index_url
|
|
||||||
base_url = "https://download.pytorch.org/whl/"
|
|
||||||
if info.is_nightly:
|
|
||||||
base_url = f"--index-url {base_url}nightly/"
|
|
||||||
else:
|
|
||||||
base_url = f"--extra-index-url {base_url}"
|
|
||||||
base_url = f"{base_url}{info.extension}"
|
|
||||||
return base_url
|
|
||||||
|
|
||||||
def get_torch_info(package=None):
|
|
||||||
'''
|
|
||||||
Get info about an installed torch-related package.
|
|
||||||
'''
|
|
||||||
if package is None:
|
|
||||||
package = torch
|
|
||||||
info = TorchVersionInfo(name=package.__name__)
|
|
||||||
info.version = package.__version__
|
|
||||||
info.extension = None
|
|
||||||
info.is_nightly = False
|
|
||||||
# get extension, separate from version
|
|
||||||
info.version, info.extension = info.version.split('+', 1)
|
|
||||||
if info.extension.startswith('cpu'):
|
|
||||||
info.is_cpu = True
|
|
||||||
elif info.extension.startswith('cu'):
|
|
||||||
info.is_cuda = True
|
|
||||||
elif info.extension.startswith('rocm'):
|
|
||||||
info.is_rocm = True
|
|
||||||
elif info.extension.startswith('xpu'):
|
|
||||||
info.is_xpu = True
|
|
||||||
# TODO: add checks for some odd pytorch versions, if possible
|
|
||||||
|
|
||||||
# check if nightly install
|
|
||||||
if 'dev' in info.version:
|
|
||||||
info.is_nightly = True
|
|
||||||
|
|
||||||
return info
|
|
||||||
|
|
||||||
def get_torch_directml_info():
|
|
||||||
'''
|
|
||||||
Get info specifically about torch-directml package.
|
|
||||||
|
|
||||||
Returns None if torch-directml is not installed.
|
|
||||||
'''
|
|
||||||
# the import string and the pip string are different
|
|
||||||
pip_name = "torch-directml"
|
|
||||||
# if no torch_directml, do nothing
|
|
||||||
if not importlib.util.find_spec("torch_directml"):
|
|
||||||
return None
|
|
||||||
info = TorchVersionInfo(name=pip_name)
|
|
||||||
info.is_directml = True
|
|
||||||
for p in pkg_resources.working_set:
|
|
||||||
if p.project_name.lower() == pip_name:
|
|
||||||
info.version = p.version
|
|
||||||
if p.version is None:
|
|
||||||
return None
|
|
||||||
return info
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
print(get_bootstrap_requirements_string())
|
|
||||||
@@ -88,6 +88,7 @@ parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE"
|
|||||||
|
|
||||||
parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.")
|
parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.")
|
||||||
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.")
|
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.")
|
||||||
|
parser.add_argument("--supports-fp8-compute", action="store_true", help="ComfyUI will act like if the device supports fp8 compute.")
|
||||||
|
|
||||||
class LatentPreviewMethod(enum.Enum):
|
class LatentPreviewMethod(enum.Enum):
|
||||||
NoPreviews = "none"
|
NoPreviews = "none"
|
||||||
|
|||||||
@@ -163,7 +163,7 @@ class Chroma(nn.Module):
|
|||||||
distil_guidance = timestep_embedding(guidance.detach().clone(), 16).to(img.device, img.dtype)
|
distil_guidance = timestep_embedding(guidance.detach().clone(), 16).to(img.device, img.dtype)
|
||||||
|
|
||||||
# get all modulation index
|
# get all modulation index
|
||||||
modulation_index = timestep_embedding(torch.arange(mod_index_length), 32).to(img.device, img.dtype)
|
modulation_index = timestep_embedding(torch.arange(mod_index_length, device=img.device), 32).to(img.device, img.dtype)
|
||||||
# we need to broadcast the modulation index here so each batch has all of the index
|
# we need to broadcast the modulation index here so each batch has all of the index
|
||||||
modulation_index = modulation_index.unsqueeze(0).repeat(img.shape[0], 1, 1).to(img.device, img.dtype)
|
modulation_index = modulation_index.unsqueeze(0).repeat(img.shape[0], 1, 1).to(img.device, img.dtype)
|
||||||
# and we need to broadcast timestep and guidance along too
|
# and we need to broadcast timestep and guidance along too
|
||||||
|
|||||||
@@ -20,8 +20,11 @@ if model_management.xformers_enabled():
|
|||||||
if model_management.sage_attention_enabled():
|
if model_management.sage_attention_enabled():
|
||||||
try:
|
try:
|
||||||
from sageattention import sageattn
|
from sageattention import sageattn
|
||||||
except ModuleNotFoundError:
|
except ModuleNotFoundError as e:
|
||||||
logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention")
|
if e.name == "sageattention":
|
||||||
|
logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention")
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
exit(-1)
|
exit(-1)
|
||||||
|
|
||||||
if model_management.flash_attention_enabled():
|
if model_management.flash_attention_enabled():
|
||||||
|
|||||||
@@ -635,7 +635,7 @@ class VaceWanModel(WanModel):
|
|||||||
t,
|
t,
|
||||||
context,
|
context,
|
||||||
vace_context,
|
vace_context,
|
||||||
vace_strength=1.0,
|
vace_strength,
|
||||||
clip_fea=None,
|
clip_fea=None,
|
||||||
freqs=None,
|
freqs=None,
|
||||||
transformer_options={},
|
transformer_options={},
|
||||||
@@ -661,8 +661,11 @@ class VaceWanModel(WanModel):
|
|||||||
context = torch.concat([context_clip, context], dim=1)
|
context = torch.concat([context_clip, context], dim=1)
|
||||||
context_img_len = clip_fea.shape[-2]
|
context_img_len = clip_fea.shape[-2]
|
||||||
|
|
||||||
|
orig_shape = list(vace_context.shape)
|
||||||
|
vace_context = vace_context.movedim(0, 1).reshape([-1] + orig_shape[2:])
|
||||||
c = self.vace_patch_embedding(vace_context.float()).to(vace_context.dtype)
|
c = self.vace_patch_embedding(vace_context.float()).to(vace_context.dtype)
|
||||||
c = c.flatten(2).transpose(1, 2)
|
c = c.flatten(2).transpose(1, 2)
|
||||||
|
c = list(c.split(orig_shape[0], dim=0))
|
||||||
|
|
||||||
# arguments
|
# arguments
|
||||||
x_orig = x
|
x_orig = x
|
||||||
@@ -682,8 +685,9 @@ class VaceWanModel(WanModel):
|
|||||||
|
|
||||||
ii = self.vace_layers_mapping.get(i, None)
|
ii = self.vace_layers_mapping.get(i, None)
|
||||||
if ii is not None:
|
if ii is not None:
|
||||||
c_skip, c = self.vace_blocks[ii](c, x=x_orig, e=e0, freqs=freqs, context=context, context_img_len=context_img_len)
|
for iii in range(len(c)):
|
||||||
x += c_skip * vace_strength
|
c_skip, c[iii] = self.vace_blocks[ii](c[iii], x=x_orig, e=e0, freqs=freqs, context=context, context_img_len=context_img_len)
|
||||||
|
x += c_skip * vace_strength[iii]
|
||||||
del c_skip
|
del c_skip
|
||||||
# head
|
# head
|
||||||
x = self.head(x, e)
|
x = self.head(x, e)
|
||||||
|
|||||||
@@ -1062,20 +1062,25 @@ class WAN21_Vace(WAN21):
|
|||||||
vace_frames = kwargs.get("vace_frames", None)
|
vace_frames = kwargs.get("vace_frames", None)
|
||||||
if vace_frames is None:
|
if vace_frames is None:
|
||||||
noise_shape[1] = 32
|
noise_shape[1] = 32
|
||||||
vace_frames = torch.zeros(noise_shape, device=noise.device, dtype=noise.dtype)
|
vace_frames = [torch.zeros(noise_shape, device=noise.device, dtype=noise.dtype)]
|
||||||
|
|
||||||
for i in range(0, vace_frames.shape[1], 16):
|
|
||||||
vace_frames = vace_frames.clone()
|
|
||||||
vace_frames[:, i:i + 16] = self.process_latent_in(vace_frames[:, i:i + 16])
|
|
||||||
|
|
||||||
mask = kwargs.get("vace_mask", None)
|
mask = kwargs.get("vace_mask", None)
|
||||||
if mask is None:
|
if mask is None:
|
||||||
noise_shape[1] = 64
|
noise_shape[1] = 64
|
||||||
mask = torch.ones(noise_shape, device=noise.device, dtype=noise.dtype)
|
mask = [torch.ones(noise_shape, device=noise.device, dtype=noise.dtype)] * len(vace_frames)
|
||||||
|
|
||||||
out['vace_context'] = comfy.conds.CONDRegular(torch.cat([vace_frames.to(noise), mask.to(noise)], dim=1))
|
vace_frames_out = []
|
||||||
|
for j in range(len(vace_frames)):
|
||||||
|
vf = vace_frames[j].clone()
|
||||||
|
for i in range(0, vf.shape[1], 16):
|
||||||
|
vf[:, i:i + 16] = self.process_latent_in(vf[:, i:i + 16])
|
||||||
|
vf = torch.cat([vf, mask[j]], dim=1)
|
||||||
|
vace_frames_out.append(vf)
|
||||||
|
|
||||||
vace_strength = kwargs.get("vace_strength", 1.0)
|
vace_frames = torch.stack(vace_frames_out, dim=1)
|
||||||
|
out['vace_context'] = comfy.conds.CONDRegular(vace_frames)
|
||||||
|
|
||||||
|
vace_strength = kwargs.get("vace_strength", [1.0] * len(vace_frames_out))
|
||||||
out['vace_strength'] = comfy.conds.CONDConstant(vace_strength)
|
out['vace_strength'] = comfy.conds.CONDConstant(vace_strength)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|||||||
@@ -1257,6 +1257,9 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def supports_fp8_compute(device=None):
|
def supports_fp8_compute(device=None):
|
||||||
|
if args.supports_fp8_compute:
|
||||||
|
return True
|
||||||
|
|
||||||
if not is_nvidia():
|
if not is_nvidia():
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ class Load3D():
|
|||||||
|
|
||||||
os.makedirs(input_dir, exist_ok=True)
|
os.makedirs(input_dir, exist_ok=True)
|
||||||
|
|
||||||
files = [normalize_path(os.path.join("3d", f)) for f in os.listdir(input_dir) if f.endswith(('.gltf', '.glb', '.obj', '.mtl', '.fbx', '.stl'))]
|
files = [normalize_path(os.path.join("3d", f)) for f in os.listdir(input_dir) if f.endswith(('.gltf', '.glb', '.obj', '.fbx', '.stl'))]
|
||||||
|
|
||||||
return {"required": {
|
return {"required": {
|
||||||
"model_file": (sorted(files), {"file_upload": True}),
|
"model_file": (sorted(files), {"file_upload": True}),
|
||||||
|
|||||||
@@ -268,8 +268,9 @@ class WanVaceToVideo:
|
|||||||
trim_latent = reference_image.shape[2]
|
trim_latent = reference_image.shape[2]
|
||||||
|
|
||||||
mask = mask.unsqueeze(0)
|
mask = mask.unsqueeze(0)
|
||||||
positive = node_helpers.conditioning_set_values(positive, {"vace_frames": control_video_latent, "vace_mask": mask, "vace_strength": strength})
|
|
||||||
negative = node_helpers.conditioning_set_values(negative, {"vace_frames": control_video_latent, "vace_mask": mask, "vace_strength": strength})
|
positive = node_helpers.conditioning_set_values(positive, {"vace_frames": [control_video_latent], "vace_mask": [mask], "vace_strength": [strength]}, append=True)
|
||||||
|
negative = node_helpers.conditioning_set_values(negative, {"vace_frames": [control_video_latent], "vace_mask": [mask], "vace_strength": [strength]}, append=True)
|
||||||
|
|
||||||
latent = torch.zeros([batch_size, 16, latent_length, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
latent = torch.zeros([batch_size, 16, latent_length, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||||
out_latent = {}
|
out_latent = {}
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
# This file is automatically generated by the build process when version is
|
# This file is automatically generated by the build process when version is
|
||||||
# updated in pyproject.toml.
|
# updated in pyproject.toml.
|
||||||
__version__ = "0.3.35"
|
__version__ = "0.3.36"
|
||||||
|
|||||||
@@ -5,12 +5,18 @@ from comfy.cli_args import args
|
|||||||
|
|
||||||
from PIL import ImageFile, UnidentifiedImageError
|
from PIL import ImageFile, UnidentifiedImageError
|
||||||
|
|
||||||
def conditioning_set_values(conditioning, values={}):
|
def conditioning_set_values(conditioning, values={}, append=False):
|
||||||
c = []
|
c = []
|
||||||
for t in conditioning:
|
for t in conditioning:
|
||||||
n = [t[0], t[1].copy()]
|
n = [t[0], t[1].copy()]
|
||||||
for k in values:
|
for k in values:
|
||||||
n[1][k] = values[k]
|
val = values[k]
|
||||||
|
if append:
|
||||||
|
old_val = n[1].get(k, None)
|
||||||
|
if old_val is not None:
|
||||||
|
val = old_val + val
|
||||||
|
|
||||||
|
n[1][k] = val
|
||||||
c.append(n)
|
c.append(n)
|
||||||
|
|
||||||
return c
|
return c
|
||||||
|
|||||||
11
nodes.py
11
nodes.py
@@ -1103,16 +1103,7 @@ class unCLIPConditioning:
|
|||||||
if strength == 0:
|
if strength == 0:
|
||||||
return (conditioning, )
|
return (conditioning, )
|
||||||
|
|
||||||
c = []
|
c = node_helpers.conditioning_set_values(conditioning, {"unclip_conditioning": [{"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation}]}, append=True)
|
||||||
for t in conditioning:
|
|
||||||
o = t[1].copy()
|
|
||||||
x = {"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation}
|
|
||||||
if "unclip_conditioning" in o:
|
|
||||||
o["unclip_conditioning"] = o["unclip_conditioning"][:] + [x]
|
|
||||||
else:
|
|
||||||
o["unclip_conditioning"] = [x]
|
|
||||||
n = [t[0], o]
|
|
||||||
c.append(n)
|
|
||||||
return (c, )
|
return (c, )
|
||||||
|
|
||||||
class GLIGENLoader:
|
class GLIGENLoader:
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "ComfyUI"
|
name = "ComfyUI"
|
||||||
version = "0.3.35"
|
version = "0.3.36"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = { file = "LICENSE" }
|
license = { file = "LICENSE" }
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
comfyui-frontend-package==1.19.9
|
comfyui-frontend-package==1.20.5
|
||||||
comfyui-workflow-templates==0.1.18
|
comfyui-workflow-templates==0.1.18
|
||||||
torch
|
torch
|
||||||
torchsde
|
torchsde
|
||||||
|
|||||||
Reference in New Issue
Block a user