Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54eb2654ba | ||
|
|
89a84e32d2 | ||
|
|
e5799c4899 | ||
|
|
a0651359d7 |
@@ -620,6 +620,9 @@ def convert_config(unet_config):
|
||||
|
||||
|
||||
def unet_config_from_diffusers_unet(state_dict, dtype=None):
|
||||
if "conv_in.weight" not in state_dict:
|
||||
return None
|
||||
|
||||
match = {}
|
||||
transformer_depth = []
|
||||
|
||||
|
||||
@@ -301,7 +301,7 @@ try:
|
||||
logging.info("AMD arch: {}".format(arch))
|
||||
if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
|
||||
if torch_version_numeric[0] >= 2 and torch_version_numeric[1] >= 7: # works on 2.6 but doesn't actually seem to improve much
|
||||
if any((a in arch) for a in ["gfx1100", "gfx1101"]): # TODO: more arches
|
||||
if any((a in arch) for a in ["gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches
|
||||
ENABLE_PYTORCH_ATTENTION = True
|
||||
except:
|
||||
pass
|
||||
@@ -695,7 +695,7 @@ def unet_inital_load_device(parameters, dtype):
|
||||
return torch_dev
|
||||
|
||||
cpu_dev = torch.device("cpu")
|
||||
if DISABLE_SMART_MEMORY:
|
||||
if DISABLE_SMART_MEMORY or vram_state == VRAMState.NO_VRAM:
|
||||
return cpu_dev
|
||||
|
||||
model_size = dtype_size(dtype) * parameters
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
comfyui-frontend-package==1.20.5
|
||||
comfyui-frontend-package==1.20.6
|
||||
comfyui-workflow-templates==0.1.18
|
||||
torch
|
||||
torchsde
|
||||
|
||||
Reference in New Issue
Block a user