Compare commits
81 Commits
model_mana
...
model-path
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
095d867147 | ||
|
|
caeb27c3a5 | ||
|
|
3d06e1c555 | ||
|
|
43a74c0de1 | ||
|
|
af93c8d1ee | ||
|
|
832e3f5ca3 | ||
|
|
079eccc92a | ||
|
|
b6951768c4 | ||
|
|
fca304debf | ||
|
|
14880e6dba | ||
|
|
f1059b0b82 | ||
|
|
debabccb84 | ||
|
|
37cd448529 | ||
|
|
94f21f9301 | ||
|
|
60653004e5 | ||
|
|
a57d635c5f | ||
|
|
016b219dcc | ||
|
|
8ac2dddeed | ||
|
|
3e880ac709 | ||
|
|
e5ea112a90 | ||
|
|
8d88bfaff9 | ||
|
|
ed4d92b721 | ||
|
|
932ae8d9ca | ||
|
|
44e19a28d3 | ||
|
|
0a0df5f136 | ||
|
|
24d6871e47 | ||
|
|
9e1d301129 | ||
|
|
768e035868 | ||
|
|
669e0497ea | ||
|
|
541dc08547 | ||
|
|
8d8dc9a262 | ||
|
|
2f98c24360 | ||
|
|
ef85058e97 | ||
|
|
f9230bd357 | ||
|
|
537c27cbf3 | ||
|
|
6ff2e4d550 | ||
|
|
222f48c0f2 | ||
|
|
13fd4d6e45 | ||
|
|
1210d094c7 | ||
|
|
255edf2246 | ||
|
|
4f011b9a00 | ||
|
|
67feb05299 | ||
|
|
6d21740346 | ||
|
|
7fbf4b72fe | ||
|
|
14ca5f5a10 | ||
|
|
ce557cfb88 | ||
|
|
96e2a45193 | ||
|
|
dfa2b6d129 | ||
|
|
f3566f0894 | ||
|
|
ca69b41cee | ||
|
|
a058f52090 | ||
|
|
d6bbe8c40f | ||
|
|
a7fe0a94de | ||
|
|
e857dd48b8 | ||
|
|
d303cb5341 | ||
|
|
fb2ad645a3 | ||
|
|
d8a7a32779 | ||
|
|
a00e1489d2 | ||
|
|
ebf038d4fa | ||
|
|
b4de04a1c1 | ||
|
|
b1a02131c9 | ||
|
|
3a3910f91d | ||
|
|
507199d9a8 | ||
|
|
2f3ab40b62 | ||
|
|
7fc3ccdcc2 | ||
|
|
55add50220 | ||
|
|
0aa2368e46 | ||
|
|
cca96a85ae | ||
|
|
619b8cde74 | ||
|
|
31831e6ef1 | ||
|
|
88ceb28e20 | ||
|
|
23289a6a5c | ||
|
|
9d8b6c1f46 | ||
|
|
6320d05696 | ||
|
|
25683b5b02 | ||
|
|
4758fb64b9 | ||
|
|
008761166f | ||
|
|
bfd5dfd611 | ||
|
|
55ade36d01 | ||
|
|
2e20e399ea | ||
|
|
3baf92d120 |
2
.github/workflows/stable-release.yml
vendored
2
.github/workflows/stable-release.yml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
description: 'CUDA version'
|
||||
required: true
|
||||
type: string
|
||||
default: "124"
|
||||
default: "126"
|
||||
python_minor:
|
||||
description: 'Python minor version'
|
||||
required: true
|
||||
|
||||
4
.github/workflows/test-build.yml
vendored
4
.github/workflows/test-build.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
@@ -28,4 +28,4 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install -r requirements.txt
|
||||
|
||||
2
.github/workflows/test-unit.yml
vendored
2
.github/workflows/test-unit.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: '3.12'
|
||||
- name: Install requirements
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
|
||||
@@ -17,7 +17,7 @@ on:
|
||||
description: 'cuda version'
|
||||
required: true
|
||||
type: string
|
||||
default: "124"
|
||||
default: "126"
|
||||
|
||||
python_minor:
|
||||
description: 'python minor version'
|
||||
|
||||
@@ -7,7 +7,7 @@ on:
|
||||
description: 'cuda version'
|
||||
required: true
|
||||
type: string
|
||||
default: "124"
|
||||
default: "126"
|
||||
|
||||
python_minor:
|
||||
description: 'python minor version'
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# Python web server
|
||||
/api_server/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata
|
||||
/app/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata
|
||||
/utils/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata
|
||||
|
||||
# Frontend assets
|
||||
/web/ @huchenlei @webfiltered @pythongosssss @yoland68 @robinjhuang
|
||||
|
||||
14
README.md
14
README.md
@@ -47,11 +47,13 @@ This ui will let you design and execute advanced stable diffusion pipelines usin
|
||||
- [AuraFlow](https://comfyanonymous.github.io/ComfyUI_examples/aura_flow/)
|
||||
- [HunyuanDiT](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_dit/)
|
||||
- [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/)
|
||||
- [Lumina Image 2.0](https://comfyanonymous.github.io/ComfyUI_examples/lumina2/)
|
||||
- Video Models
|
||||
- [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/)
|
||||
- [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/)
|
||||
- [LTX-Video](https://comfyanonymous.github.io/ComfyUI_examples/ltxv/)
|
||||
- [Hunyuan Video](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/)
|
||||
- [Nvidia Cosmos](https://comfyanonymous.github.io/ComfyUI_examples/cosmos/)
|
||||
- [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/)
|
||||
- Asynchronous Queue system
|
||||
- Many optimizations: Only re-executes the parts of the workflow that changes between executions.
|
||||
@@ -129,6 +131,8 @@ Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you
|
||||
|
||||
If you have trouble extracting it, right click the file -> properties -> unblock
|
||||
|
||||
If you have a 50 series Blackwell card like a 5090 or 5080 see [this discussion thread](https://github.com/comfyanonymous/ComfyUI/discussions/6643)
|
||||
|
||||
#### How do I share models between another UI and ComfyUI?
|
||||
|
||||
See the [Config file](extra_model_paths.yaml.example) to set the search paths for models. In the standalone windows build you can find this file in the ComfyUI directory. Rename this file to extra_model_paths.yaml and edit it with your favorite text editor.
|
||||
@@ -139,7 +143,7 @@ To run it on services like paperspace, kaggle or colab you can use my [Jupyter N
|
||||
|
||||
## Manual Install (Windows, Linux)
|
||||
|
||||
Note that some dependencies do not yet support python 3.13 so using 3.12 is recommended.
|
||||
python 3.13 is supported but using 3.12 is recommended because some custom nodes and their dependencies might not support it yet.
|
||||
|
||||
Git clone this repo.
|
||||
|
||||
@@ -151,11 +155,11 @@ Put your VAE in: models/vae
|
||||
### AMD GPUs (Linux only)
|
||||
AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version:
|
||||
|
||||
```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.2```
|
||||
```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.2.4```
|
||||
|
||||
This is the command to install the nightly with ROCm 6.2 which might have some performance improvements:
|
||||
This is the command to install the nightly with ROCm 6.3 which might have some performance improvements:
|
||||
|
||||
```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.2.4```
|
||||
```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.3```
|
||||
|
||||
### Intel GPUs (Windows and Linux)
|
||||
|
||||
@@ -185,7 +189,7 @@ Additional discussion and help can be found [here](https://github.com/comfyanony
|
||||
|
||||
Nvidia users should install stable pytorch using this command:
|
||||
|
||||
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu124```
|
||||
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu126```
|
||||
|
||||
This is the command to install pytorch nightly instead which might have performance improvements:
|
||||
|
||||
|
||||
119
alembic.ini
119
alembic.ini
@@ -1,119 +0,0 @@
|
||||
# A generic, single database configuration.
|
||||
|
||||
[alembic]
|
||||
# path to migration scripts
|
||||
# Use forward slashes (/) also on windows to provide an os agnostic path
|
||||
script_location = alembic_db
|
||||
|
||||
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
|
||||
# Uncomment the line below if you want the files to be prepended with date and time
|
||||
# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
|
||||
# for all available tokens
|
||||
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
|
||||
|
||||
# sys.path path, will be prepended to sys.path if present.
|
||||
# defaults to the current working directory.
|
||||
prepend_sys_path = .
|
||||
|
||||
# timezone to use when rendering the date within the migration file
|
||||
# as well as the filename.
|
||||
# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library.
|
||||
# Any required deps can installed by adding `alembic[tz]` to the pip requirements
|
||||
# string value is passed to ZoneInfo()
|
||||
# leave blank for localtime
|
||||
# timezone =
|
||||
|
||||
# max length of characters to apply to the "slug" field
|
||||
# truncate_slug_length = 40
|
||||
|
||||
# set to 'true' to run the environment during
|
||||
# the 'revision' command, regardless of autogenerate
|
||||
# revision_environment = false
|
||||
|
||||
# set to 'true' to allow .pyc and .pyo files without
|
||||
# a source .py file to be detected as revisions in the
|
||||
# versions/ directory
|
||||
# sourceless = false
|
||||
|
||||
# version location specification; This defaults
|
||||
# to alembic_db/versions. When using multiple version
|
||||
# directories, initial revisions must be specified with --version-path.
|
||||
# The path separator used here should be the separator specified by "version_path_separator" below.
|
||||
# version_locations = %(here)s/bar:%(here)s/bat:alembic_db/versions
|
||||
|
||||
# version path separator; As mentioned above, this is the character used to split
|
||||
# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
|
||||
# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
|
||||
# Valid values for version_path_separator are:
|
||||
#
|
||||
# version_path_separator = :
|
||||
# version_path_separator = ;
|
||||
# version_path_separator = space
|
||||
# version_path_separator = newline
|
||||
#
|
||||
# Use os.pathsep. Default configuration used for new projects.
|
||||
version_path_separator = os
|
||||
|
||||
# set to 'true' to search source files recursively
|
||||
# in each "version_locations" directory
|
||||
# new in Alembic version 1.10
|
||||
# recursive_version_locations = false
|
||||
|
||||
# the output encoding used when revision files
|
||||
# are written from script.py.mako
|
||||
# output_encoding = utf-8
|
||||
|
||||
sqlalchemy.url = sqlite:///user/comfyui.db
|
||||
|
||||
|
||||
[post_write_hooks]
|
||||
# post_write_hooks defines scripts or Python functions that are run
|
||||
# on newly generated revision scripts. See the documentation for further
|
||||
# detail and examples
|
||||
|
||||
# format using "black" - use the console_scripts runner, against the "black" entrypoint
|
||||
# hooks = black
|
||||
# black.type = console_scripts
|
||||
# black.entrypoint = black
|
||||
# black.options = -l 79 REVISION_SCRIPT_FILENAME
|
||||
|
||||
# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
|
||||
# hooks = ruff
|
||||
# ruff.type = exec
|
||||
# ruff.executable = %(here)s/.venv/bin/ruff
|
||||
# ruff.options = check --fix REVISION_SCRIPT_FILENAME
|
||||
|
||||
# Logging configuration
|
||||
[loggers]
|
||||
keys = root,sqlalchemy,alembic
|
||||
|
||||
[handlers]
|
||||
keys = console
|
||||
|
||||
[formatters]
|
||||
keys = generic
|
||||
|
||||
[logger_root]
|
||||
level = WARNING
|
||||
handlers = console
|
||||
qualname =
|
||||
|
||||
[logger_sqlalchemy]
|
||||
level = WARNING
|
||||
handlers =
|
||||
qualname = sqlalchemy.engine
|
||||
|
||||
[logger_alembic]
|
||||
level = INFO
|
||||
handlers =
|
||||
qualname = alembic
|
||||
|
||||
[handler_console]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
level = NOTSET
|
||||
formatter = generic
|
||||
|
||||
[formatter_generic]
|
||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
||||
datefmt = %H:%M:%S
|
||||
@@ -1,3 +0,0 @@
|
||||
## Generate new revision
|
||||
1. Update models in `/app/database/models.py`
|
||||
2. Run `alembic revision --autogenerate -m "{your message}"`
|
||||
@@ -1,75 +0,0 @@
|
||||
from logging.config import fileConfig
|
||||
|
||||
from sqlalchemy import engine_from_config
|
||||
from sqlalchemy import pool
|
||||
|
||||
from alembic import context
|
||||
|
||||
# this is the Alembic Config object, which provides
|
||||
# access to the values within the .ini file in use.
|
||||
config = context.config
|
||||
|
||||
# Interpret the config file for Python logging.
|
||||
# This line sets up loggers basically.
|
||||
if config.config_file_name is not None:
|
||||
fileConfig(config.config_file_name)
|
||||
|
||||
from app.database.models import Base
|
||||
target_metadata = Base.metadata
|
||||
|
||||
# other values from the config, defined by the needs of env.py,
|
||||
# can be acquired:
|
||||
# my_important_option = config.get_main_option("my_important_option")
|
||||
# ... etc.
|
||||
|
||||
|
||||
def run_migrations_offline() -> None:
|
||||
"""Run migrations in 'offline' mode.
|
||||
|
||||
This configures the context with just a URL
|
||||
and not an Engine, though an Engine is acceptable
|
||||
here as well. By skipping the Engine creation
|
||||
we don't even need a DBAPI to be available.
|
||||
|
||||
Calls to context.execute() here emit the given string to the
|
||||
script output.
|
||||
|
||||
"""
|
||||
url = config.get_main_option("sqlalchemy.url")
|
||||
context.configure(
|
||||
url=url,
|
||||
target_metadata=target_metadata,
|
||||
literal_binds=True,
|
||||
dialect_opts={"paramstyle": "named"},
|
||||
)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
def run_migrations_online() -> None:
|
||||
"""Run migrations in 'online' mode.
|
||||
|
||||
In this scenario we need to create an Engine
|
||||
and associate a connection with the context.
|
||||
|
||||
"""
|
||||
connectable = engine_from_config(
|
||||
config.get_section(config.config_ini_section, {}),
|
||||
prefix="sqlalchemy.",
|
||||
poolclass=pool.NullPool,
|
||||
)
|
||||
|
||||
with connectable.connect() as connection:
|
||||
context.configure(
|
||||
connection=connection, target_metadata=target_metadata
|
||||
)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
if context.is_offline_mode():
|
||||
run_migrations_offline()
|
||||
else:
|
||||
run_migrations_online()
|
||||
@@ -1,28 +0,0 @@
|
||||
"""${message}
|
||||
|
||||
Revision ID: ${up_revision}
|
||||
Revises: ${down_revision | comma,n}
|
||||
Create Date: ${create_date}
|
||||
|
||||
"""
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
${imports if imports else ""}
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = ${repr(up_revision)}
|
||||
down_revision: Union[str, None] = ${repr(down_revision)}
|
||||
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
|
||||
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Upgrade schema."""
|
||||
${upgrades if upgrades else "pass"}
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Downgrade schema."""
|
||||
${downgrades if downgrades else "pass"}
|
||||
@@ -1,58 +0,0 @@
|
||||
"""init
|
||||
|
||||
Revision ID: 2fb22c4fff36
|
||||
Revises:
|
||||
Create Date: 2025-03-27 19:00:47.686079
|
||||
|
||||
"""
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '2fb22c4fff36'
|
||||
down_revision: Union[str, None] = None
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Upgrade schema."""
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('model',
|
||||
sa.Column('type', sa.Text(), nullable=False),
|
||||
sa.Column('path', sa.Text(), nullable=False),
|
||||
sa.Column('title', sa.Text(), nullable=True),
|
||||
sa.Column('description', sa.Text(), nullable=True),
|
||||
sa.Column('architecture', sa.Text(), nullable=True),
|
||||
sa.Column('hash', sa.Text(), nullable=True),
|
||||
sa.Column('source_url', sa.Text(), nullable=True),
|
||||
sa.Column('date_added', sa.DateTime(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),
|
||||
sa.PrimaryKeyConstraint('type', 'path')
|
||||
)
|
||||
op.create_table('tag',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('name', sa.Text(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name')
|
||||
)
|
||||
op.create_table('model_tag',
|
||||
sa.Column('model_type', sa.Text(), nullable=False),
|
||||
sa.Column('model_path', sa.Text(), nullable=False),
|
||||
sa.Column('tag_id', sa.Integer(), nullable=False),
|
||||
sa.ForeignKeyConstraint(['model_type', 'model_path'], ['model.type', 'model.path'], ondelete='CASCADE'),
|
||||
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('model_type', 'model_path', 'tag_id')
|
||||
)
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Downgrade schema."""
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_table('model_tag')
|
||||
op.drop_table('tag')
|
||||
op.drop_table('model')
|
||||
# ### end Alembic commands ###
|
||||
@@ -1,7 +1,6 @@
|
||||
from aiohttp import web
|
||||
from typing import Optional
|
||||
from folder_paths import models_dir, user_directory, output_directory, folder_names_and_paths
|
||||
from api_server.services.file_service import FileService
|
||||
from folder_paths import folder_names_and_paths
|
||||
from api_server.services.terminal_service import TerminalService
|
||||
import app.logger
|
||||
|
||||
@@ -15,26 +14,10 @@ class InternalRoutes:
|
||||
def __init__(self, prompt_server):
|
||||
self.routes: web.RouteTableDef = web.RouteTableDef()
|
||||
self._app: Optional[web.Application] = None
|
||||
self.file_service = FileService({
|
||||
"models": models_dir,
|
||||
"user": user_directory,
|
||||
"output": output_directory
|
||||
})
|
||||
self.prompt_server = prompt_server
|
||||
self.terminal_service = TerminalService(prompt_server)
|
||||
|
||||
def setup_routes(self):
|
||||
@self.routes.get('/files')
|
||||
async def list_files(request):
|
||||
directory_key = request.query.get('directory', '')
|
||||
try:
|
||||
file_list = self.file_service.list_files(directory_key)
|
||||
return web.json_response({"files": file_list})
|
||||
except ValueError as e:
|
||||
return web.json_response({"error": str(e)}, status=400)
|
||||
except Exception as e:
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
@self.routes.get('/logs')
|
||||
async def get_logs(request):
|
||||
return web.json_response("".join([(l["t"] + " - " + l["m"]) for l in app.logger.get_logs()]))
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
from typing import Dict, List, Optional
|
||||
from api_server.utils.file_operations import FileSystemOperations, FileSystemItem
|
||||
|
||||
class FileService:
|
||||
def __init__(self, allowed_directories: Dict[str, str], file_system_ops: Optional[FileSystemOperations] = None):
|
||||
self.allowed_directories: Dict[str, str] = allowed_directories
|
||||
self.file_system_ops: FileSystemOperations = file_system_ops or FileSystemOperations()
|
||||
|
||||
def list_files(self, directory_key: str) -> List[FileSystemItem]:
|
||||
if directory_key not in self.allowed_directories:
|
||||
raise ValueError("Invalid directory key")
|
||||
directory_path: str = self.allowed_directories[directory_key]
|
||||
return self.file_system_ops.walk_directory(directory_path)
|
||||
@@ -4,12 +4,93 @@ import os
|
||||
import folder_paths
|
||||
import glob
|
||||
from aiohttp import web
|
||||
import json
|
||||
import logging
|
||||
from functools import lru_cache
|
||||
|
||||
from utils.json_util import merge_json_recursive
|
||||
|
||||
|
||||
# Extra locale files to load into main.json
|
||||
EXTRA_LOCALE_FILES = [
|
||||
"nodeDefs.json",
|
||||
"commands.json",
|
||||
"settings.json",
|
||||
]
|
||||
|
||||
|
||||
def safe_load_json_file(file_path: str) -> dict:
|
||||
if not os.path.exists(file_path):
|
||||
return {}
|
||||
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except json.JSONDecodeError:
|
||||
logging.error(f"Error loading {file_path}")
|
||||
return {}
|
||||
|
||||
|
||||
class CustomNodeManager:
|
||||
"""
|
||||
Placeholder to refactor the custom node management features from ComfyUI-Manager.
|
||||
Currently it only contains the custom workflow templates feature.
|
||||
"""
|
||||
@lru_cache(maxsize=1)
|
||||
def build_translations(self):
|
||||
"""Load all custom nodes translations during initialization. Translations are
|
||||
expected to be loaded from `locales/` folder.
|
||||
|
||||
The folder structure is expected to be the following:
|
||||
- custom_nodes/
|
||||
- custom_node_1/
|
||||
- locales/
|
||||
- en/
|
||||
- main.json
|
||||
- commands.json
|
||||
- settings.json
|
||||
|
||||
returned translations are expected to be in the following format:
|
||||
{
|
||||
"en": {
|
||||
"nodeDefs": {...},
|
||||
"commands": {...},
|
||||
"settings": {...},
|
||||
...{other main.json keys}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
translations = {}
|
||||
|
||||
for folder in folder_paths.get_folder_paths("custom_nodes"):
|
||||
# Sort glob results for deterministic ordering
|
||||
for custom_node_dir in sorted(glob.glob(os.path.join(folder, "*/"))):
|
||||
locales_dir = os.path.join(custom_node_dir, "locales")
|
||||
if not os.path.exists(locales_dir):
|
||||
continue
|
||||
|
||||
for lang_dir in glob.glob(os.path.join(locales_dir, "*/")):
|
||||
lang_code = os.path.basename(os.path.dirname(lang_dir))
|
||||
|
||||
if lang_code not in translations:
|
||||
translations[lang_code] = {}
|
||||
|
||||
# Load main.json
|
||||
main_file = os.path.join(lang_dir, "main.json")
|
||||
node_translations = safe_load_json_file(main_file)
|
||||
|
||||
# Load extra locale files
|
||||
for extra_file in EXTRA_LOCALE_FILES:
|
||||
extra_file_path = os.path.join(lang_dir, extra_file)
|
||||
key = extra_file.split(".")[0]
|
||||
json_data = safe_load_json_file(extra_file_path)
|
||||
if json_data:
|
||||
node_translations[key] = json_data
|
||||
|
||||
if node_translations:
|
||||
translations[lang_code] = merge_json_recursive(
|
||||
translations[lang_code], node_translations
|
||||
)
|
||||
|
||||
return translations
|
||||
|
||||
def add_routes(self, routes, webapp, loadedModules):
|
||||
|
||||
@routes.get("/workflow_templates")
|
||||
@@ -18,17 +99,36 @@ class CustomNodeManager:
|
||||
files = [
|
||||
file
|
||||
for folder in folder_paths.get_folder_paths("custom_nodes")
|
||||
for file in glob.glob(os.path.join(folder, '*/example_workflows/*.json'))
|
||||
for file in glob.glob(
|
||||
os.path.join(folder, "*/example_workflows/*.json")
|
||||
)
|
||||
]
|
||||
workflow_templates_dict = {} # custom_nodes folder name -> example workflow names
|
||||
workflow_templates_dict = (
|
||||
{}
|
||||
) # custom_nodes folder name -> example workflow names
|
||||
for file in files:
|
||||
custom_nodes_name = os.path.basename(os.path.dirname(os.path.dirname(file)))
|
||||
custom_nodes_name = os.path.basename(
|
||||
os.path.dirname(os.path.dirname(file))
|
||||
)
|
||||
workflow_name = os.path.splitext(os.path.basename(file))[0]
|
||||
workflow_templates_dict.setdefault(custom_nodes_name, []).append(workflow_name)
|
||||
workflow_templates_dict.setdefault(custom_nodes_name, []).append(
|
||||
workflow_name
|
||||
)
|
||||
return web.json_response(workflow_templates_dict)
|
||||
|
||||
# Serve workflow templates from custom nodes.
|
||||
for module_name, module_dir in loadedModules:
|
||||
workflows_dir = os.path.join(module_dir, 'example_workflows')
|
||||
workflows_dir = os.path.join(module_dir, "example_workflows")
|
||||
if os.path.exists(workflows_dir):
|
||||
webapp.add_routes([web.static('/api/workflow_templates/' + module_name, workflows_dir)])
|
||||
webapp.add_routes(
|
||||
[
|
||||
web.static(
|
||||
"/api/workflow_templates/" + module_name, workflows_dir
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
@routes.get("/i18n")
|
||||
async def get_i18n(request):
|
||||
"""Returns translations from all custom nodes' locales folders."""
|
||||
return web.json_response(self.build_translations())
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from app.database.models import Tag
|
||||
from comfy.cli_args import args
|
||||
|
||||
try:
|
||||
import alembic
|
||||
import sqlalchemy
|
||||
except ImportError as e:
|
||||
req_path = os.path.abspath(
|
||||
os.path.join(os.path.dirname(__file__), "../..", "requirements.txt")
|
||||
)
|
||||
logging.error(
|
||||
f"\n\n********** ERROR ***********\n\nRequirements are not installed ({e}). Please install the requirements.txt file by running:\n{sys.executable} -s -m pip install -r {req_path}\n\nIf you are on the portable package you can run: update\\update_comfyui.bat to solve this problem\n********** ERROR **********\n"
|
||||
)
|
||||
exit(-1)
|
||||
|
||||
from alembic import command
|
||||
from alembic.config import Config
|
||||
from alembic.runtime.migration import MigrationContext
|
||||
from alembic.script import ScriptDirectory
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
Session = None
|
||||
|
||||
|
||||
def get_alembic_config():
|
||||
root_path = os.path.join(os.path.dirname(__file__), "../..")
|
||||
config_path = os.path.abspath(os.path.join(root_path, "alembic.ini"))
|
||||
scripts_path = os.path.abspath(os.path.join(root_path, "alembic_db"))
|
||||
|
||||
config = Config(config_path)
|
||||
config.set_main_option("script_location", scripts_path)
|
||||
config.set_main_option("sqlalchemy.url", args.database_url)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_db_path():
|
||||
url = args.database_url
|
||||
if url.startswith("sqlite:///"):
|
||||
return url.split("///")[1]
|
||||
else:
|
||||
raise ValueError(f"Unsupported database URL '{url}'.")
|
||||
|
||||
|
||||
def init_db():
|
||||
db_url = args.database_url
|
||||
logging.debug(f"Database URL: {db_url}")
|
||||
|
||||
config = get_alembic_config()
|
||||
|
||||
# Check if we need to upgrade
|
||||
engine = create_engine(db_url)
|
||||
conn = engine.connect()
|
||||
|
||||
context = MigrationContext.configure(conn)
|
||||
current_rev = context.get_current_revision()
|
||||
|
||||
script = ScriptDirectory.from_config(config)
|
||||
target_rev = script.get_current_head()
|
||||
|
||||
if current_rev != target_rev:
|
||||
# Backup the database pre upgrade
|
||||
db_path = get_db_path()
|
||||
backup_path = db_path + ".bkp"
|
||||
if os.path.exists(db_path):
|
||||
shutil.copy(db_path, backup_path)
|
||||
else:
|
||||
backup_path = None
|
||||
|
||||
try:
|
||||
command.upgrade(config, target_rev)
|
||||
logging.info(f"Database upgraded from {current_rev} to {target_rev}")
|
||||
except Exception as e:
|
||||
if backup_path:
|
||||
# Restore the database from backup if upgrade fails
|
||||
shutil.copy(backup_path, db_path)
|
||||
os.remove(backup_path)
|
||||
logging.error(f"Error upgrading database: {e}")
|
||||
raise e
|
||||
|
||||
global Session
|
||||
Session = sessionmaker(bind=engine)
|
||||
|
||||
if not current_rev:
|
||||
# Init db, populate models
|
||||
from app.model_processor import model_processor
|
||||
|
||||
session = create_session()
|
||||
model_processor.populate_models(session)
|
||||
|
||||
# populate tags
|
||||
tags = (
|
||||
"character",
|
||||
"style",
|
||||
"concept",
|
||||
"clothing",
|
||||
"pose",
|
||||
"background",
|
||||
"vehicle",
|
||||
"object",
|
||||
"animal",
|
||||
"action",
|
||||
)
|
||||
for tag in tags:
|
||||
session.add(Tag(name=tag))
|
||||
|
||||
session.commit()
|
||||
|
||||
def can_create_session():
|
||||
return Session is not None
|
||||
|
||||
def create_session():
|
||||
return Session()
|
||||
@@ -1,76 +0,0 @@
|
||||
from sqlalchemy import (
|
||||
Column,
|
||||
Integer,
|
||||
Text,
|
||||
DateTime,
|
||||
Table,
|
||||
ForeignKeyConstraint,
|
||||
)
|
||||
from sqlalchemy.orm import relationship, declarative_base
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
def to_dict(obj):
|
||||
fields = obj.__table__.columns.keys()
|
||||
return {
|
||||
field: (val.to_dict() if hasattr(val, "to_dict") else val)
|
||||
for field in fields
|
||||
if (val := getattr(obj, field))
|
||||
}
|
||||
|
||||
|
||||
ModelTag = Table(
|
||||
"model_tag",
|
||||
Base.metadata,
|
||||
Column(
|
||||
"model_type",
|
||||
Text,
|
||||
primary_key=True,
|
||||
),
|
||||
Column(
|
||||
"model_path",
|
||||
Text,
|
||||
primary_key=True,
|
||||
),
|
||||
Column("tag_id", Integer, primary_key=True),
|
||||
ForeignKeyConstraint(
|
||||
["model_type", "model_path"], ["model.type", "model.path"], ondelete="CASCADE"
|
||||
),
|
||||
ForeignKeyConstraint(["tag_id"], ["tag.id"], ondelete="CASCADE"),
|
||||
)
|
||||
|
||||
|
||||
class Model(Base):
|
||||
__tablename__ = "model"
|
||||
|
||||
type = Column(Text, primary_key=True)
|
||||
path = Column(Text, primary_key=True)
|
||||
title = Column(Text)
|
||||
description = Column(Text)
|
||||
architecture = Column(Text)
|
||||
hash = Column(Text)
|
||||
source_url = Column(Text)
|
||||
date_added = Column(DateTime, server_default=func.now())
|
||||
|
||||
# Relationship with tags
|
||||
tags = relationship("Tag", secondary=ModelTag, back_populates="models")
|
||||
|
||||
def to_dict(self):
|
||||
dict = to_dict(self)
|
||||
dict["tags"] = [tag.to_dict() for tag in self.tags]
|
||||
return dict
|
||||
|
||||
|
||||
class Tag(Base):
|
||||
__tablename__ = "tag"
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
name = Column(Text, nullable=False, unique=True)
|
||||
|
||||
# Relationship with models
|
||||
models = relationship("Model", secondary=ModelTag, back_populates="tags")
|
||||
|
||||
def to_dict(self):
|
||||
return to_dict(self)
|
||||
@@ -1,30 +1,19 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import base64
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
from app.database.db import create_session
|
||||
import folder_paths
|
||||
import glob
|
||||
import comfy.utils
|
||||
from aiohttp import web
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
from folder_paths import map_legacy, filter_files_extensions, get_full_path
|
||||
from app.database.models import Tag, Model
|
||||
from app.model_processor import get_model_previews, model_processor
|
||||
from utils.web import dumps
|
||||
from sqlalchemy.orm import joinedload
|
||||
import sqlalchemy.exc
|
||||
from folder_paths import map_legacy, filter_files_extensions, filter_files_content_types
|
||||
|
||||
|
||||
def bad_request(message: str):
|
||||
return web.json_response({"error": message}, status=400)
|
||||
|
||||
def missing_field(field: str):
|
||||
return bad_request(f"{field} is required")
|
||||
|
||||
def not_found(message: str):
|
||||
return web.json_response({"error": message + " not found"}, status=404)
|
||||
|
||||
class ModelFileManager:
|
||||
def __init__(self) -> None:
|
||||
self.cache: dict[str, tuple[list[dict], dict[str, float], float]] = {}
|
||||
@@ -73,7 +62,7 @@ class ModelFileManager:
|
||||
folder = folders[0][path_index]
|
||||
full_filename = os.path.join(folder, filename)
|
||||
|
||||
previews = get_model_previews(full_filename)
|
||||
previews = self.get_model_previews(full_filename)
|
||||
default_preview = previews[0] if len(previews) > 0 else None
|
||||
if default_preview is None or (isinstance(default_preview, str) and not os.path.isfile(default_preview)):
|
||||
return web.Response(status=404)
|
||||
@@ -87,183 +76,6 @@ class ModelFileManager:
|
||||
except:
|
||||
return web.Response(status=404)
|
||||
|
||||
@routes.get("/v2/models")
|
||||
async def get_models(request):
|
||||
with create_session() as session:
|
||||
model_path = request.query.get("path", None)
|
||||
model_type = request.query.get("type", None)
|
||||
query = session.query(Model).options(joinedload(Model.tags))
|
||||
if model_path:
|
||||
query = query.filter(Model.path == model_path)
|
||||
if model_type:
|
||||
query = query.filter(Model.type == model_type)
|
||||
models = query.all()
|
||||
if model_path and model_type:
|
||||
if len(models) == 0:
|
||||
return not_found("Model")
|
||||
return web.json_response(models[0].to_dict(), dumps=dumps)
|
||||
|
||||
return web.json_response([model.to_dict() for model in models], dumps=dumps)
|
||||
|
||||
@routes.post("/v2/models")
|
||||
async def add_model(request):
|
||||
with create_session() as session:
|
||||
data = await request.json()
|
||||
model_type = data.get("type", None)
|
||||
model_path = data.get("path", None)
|
||||
|
||||
if not model_type:
|
||||
return missing_field("type")
|
||||
if not model_path:
|
||||
return missing_field("path")
|
||||
|
||||
tags = data.pop("tags", [])
|
||||
fields = Model.metadata.tables["model"].columns.keys()
|
||||
|
||||
# Validate keys are valid model fields
|
||||
for key in data.keys():
|
||||
if key not in fields:
|
||||
return bad_request(f"Invalid field: {key}")
|
||||
|
||||
# Validate file exists
|
||||
if not get_full_path(model_type, model_path):
|
||||
return not_found(f"File '{model_type}/{model_path}'")
|
||||
|
||||
model = Model()
|
||||
for field in fields:
|
||||
if field in data:
|
||||
setattr(model, field, data[field])
|
||||
|
||||
model.tags = session.query(Tag).filter(Tag.id.in_(tags)).all()
|
||||
for tag in tags:
|
||||
if tag not in [t.id for t in model.tags]:
|
||||
return not_found(f"Tag '{tag}'")
|
||||
|
||||
try:
|
||||
session.add(model)
|
||||
session.commit()
|
||||
except sqlalchemy.exc.IntegrityError as e:
|
||||
session.rollback()
|
||||
return bad_request(e.orig.args[0])
|
||||
|
||||
model_processor.run()
|
||||
|
||||
return web.json_response(model.to_dict(), dumps=dumps)
|
||||
|
||||
@routes.delete("/v2/models")
|
||||
async def delete_model(request):
|
||||
with create_session() as session:
|
||||
model_path = request.query.get("path", None)
|
||||
model_type = request.query.get("type", None)
|
||||
if not model_path:
|
||||
return missing_field("path")
|
||||
if not model_type:
|
||||
return missing_field("type")
|
||||
|
||||
full_path = get_full_path(model_type, model_path)
|
||||
if full_path:
|
||||
return bad_request("Model file exists, please delete the file before deleting the model record.")
|
||||
|
||||
model = session.query(Model).filter(Model.path == model_path, Model.type == model_type).first()
|
||||
if not model:
|
||||
return not_found("Model")
|
||||
session.delete(model)
|
||||
session.commit()
|
||||
return web.Response(status=204)
|
||||
|
||||
@routes.get("/v2/tags")
|
||||
async def get_tags(request):
|
||||
with create_session() as session:
|
||||
tags = session.query(Tag).all()
|
||||
return web.json_response(
|
||||
[{"id": tag.id, "name": tag.name} for tag in tags]
|
||||
)
|
||||
|
||||
@routes.post("/v2/tags")
|
||||
async def create_tag(request):
|
||||
with create_session() as session:
|
||||
data = await request.json()
|
||||
name = data.get("name", None)
|
||||
if not name:
|
||||
return missing_field("name")
|
||||
tag = Tag(name=name)
|
||||
session.add(tag)
|
||||
session.commit()
|
||||
return web.json_response({"id": tag.id, "name": tag.name})
|
||||
|
||||
@routes.delete("/v2/tags")
|
||||
async def delete_tag(request):
|
||||
with create_session() as session:
|
||||
tag_id = request.query.get("id", None)
|
||||
if not tag_id:
|
||||
return missing_field("id")
|
||||
tag = session.query(Tag).filter(Tag.id == tag_id).first()
|
||||
if not tag:
|
||||
return not_found("Tag")
|
||||
session.delete(tag)
|
||||
session.commit()
|
||||
return web.Response(status=204)
|
||||
|
||||
@routes.post("/v2/models/tags")
|
||||
async def add_model_tag(request):
|
||||
with create_session() as session:
|
||||
data = await request.json()
|
||||
tag_id = data.get("tag", None)
|
||||
model_path = data.get("path", None)
|
||||
model_type = data.get("type", None)
|
||||
|
||||
if tag_id is None:
|
||||
return missing_field("tag")
|
||||
if model_path is None:
|
||||
return missing_field("path")
|
||||
if model_type is None:
|
||||
return missing_field("type")
|
||||
|
||||
try:
|
||||
tag_id = int(tag_id)
|
||||
except ValueError:
|
||||
return bad_request("Invalid tag id")
|
||||
|
||||
tag = session.query(Tag).filter(Tag.id == tag_id).first()
|
||||
model = session.query(Model).filter(Model.path == model_path, Model.type == model_type).first()
|
||||
if not model:
|
||||
return not_found("Model")
|
||||
model.tags.append(tag)
|
||||
session.commit()
|
||||
return web.json_response(model.to_dict(), dumps=dumps)
|
||||
|
||||
@routes.delete("/v2/models/tags")
|
||||
async def delete_model_tag(request):
|
||||
with create_session() as session:
|
||||
tag_id = request.query.get("tag", None)
|
||||
model_path = request.query.get("path", None)
|
||||
model_type = request.query.get("type", None)
|
||||
|
||||
if tag_id is None:
|
||||
return missing_field("tag")
|
||||
if model_path is None:
|
||||
return missing_field("path")
|
||||
if model_type is None:
|
||||
return missing_field("type")
|
||||
|
||||
try:
|
||||
tag_id = int(tag_id)
|
||||
except ValueError:
|
||||
return bad_request("Invalid tag id")
|
||||
|
||||
model = session.query(Model).filter(Model.path == model_path, Model.type == model_type).first()
|
||||
if not model:
|
||||
return not_found("Model")
|
||||
model.tags = [tag for tag in model.tags if tag.id != tag_id]
|
||||
session.commit()
|
||||
return web.Response(status=204)
|
||||
|
||||
|
||||
|
||||
@routes.get("/v2/models/missing")
|
||||
async def get_missing_models(request):
|
||||
return web.json_response(model_processor.missing_models)
|
||||
|
||||
def get_model_file_list(self, folder_name: str):
|
||||
folder_name = map_legacy(folder_name)
|
||||
folders = folder_paths.folder_names_and_paths[folder_name]
|
||||
@@ -334,5 +146,39 @@ class ModelFileManager:
|
||||
|
||||
return [{"name": f, "pathIndex": pathIndex} for f in result], dirs, time.perf_counter()
|
||||
|
||||
def get_model_previews(self, filepath: str) -> list[str | BytesIO]:
|
||||
dirname = os.path.dirname(filepath)
|
||||
|
||||
if not os.path.exists(dirname):
|
||||
return []
|
||||
|
||||
basename = os.path.splitext(filepath)[0]
|
||||
match_files = glob.glob(f"{basename}.*", recursive=False)
|
||||
image_files = filter_files_content_types(match_files, "image")
|
||||
safetensors_file = next(filter(lambda x: x.endswith(".safetensors"), match_files), None)
|
||||
safetensors_metadata = {}
|
||||
|
||||
result: list[str | BytesIO] = []
|
||||
|
||||
for filename in image_files:
|
||||
_basename = os.path.splitext(filename)[0]
|
||||
if _basename == basename:
|
||||
result.append(filename)
|
||||
if _basename == f"{basename}.preview":
|
||||
result.append(filename)
|
||||
|
||||
if safetensors_file:
|
||||
safetensors_filepath = os.path.join(dirname, safetensors_file)
|
||||
header = comfy.utils.safetensors_header(safetensors_filepath, max_size=8*1024*1024)
|
||||
if header:
|
||||
safetensors_metadata = json.loads(header)
|
||||
safetensors_images = safetensors_metadata.get("__metadata__", {}).get("ssmd_cover_images", None)
|
||||
if safetensors_images:
|
||||
safetensors_images = json.loads(safetensors_images)
|
||||
for image in safetensors_images:
|
||||
result.append(BytesIO(base64.b64decode(image)))
|
||||
|
||||
return result
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.clear_cache()
|
||||
|
||||
@@ -1,263 +0,0 @@
|
||||
import base64
|
||||
from datetime import datetime
|
||||
import glob
|
||||
import hashlib
|
||||
from io import BytesIO
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
import comfy.utils
|
||||
from app.database.models import Model
|
||||
from app.database.db import create_session
|
||||
from comfy.cli_args import args
|
||||
from folder_paths import (
|
||||
filter_files_content_types,
|
||||
get_full_path,
|
||||
folder_names_and_paths,
|
||||
get_filename_list,
|
||||
)
|
||||
from PIL import Image
|
||||
from urllib import request
|
||||
|
||||
|
||||
def get_model_previews(
|
||||
filepath: str, check_metadata: bool = True
|
||||
) -> list[str | BytesIO]:
|
||||
dirname = os.path.dirname(filepath)
|
||||
|
||||
if not os.path.exists(dirname):
|
||||
return []
|
||||
|
||||
basename = os.path.splitext(filepath)[0]
|
||||
match_files = glob.glob(f"{basename}.*", recursive=False)
|
||||
image_files = filter_files_content_types(match_files, "image")
|
||||
|
||||
result: list[str | BytesIO] = []
|
||||
|
||||
for filename in image_files:
|
||||
_basename = os.path.splitext(filename)[0]
|
||||
if _basename == basename:
|
||||
result.append(filename)
|
||||
if _basename == f"{basename}.preview":
|
||||
result.append(filename)
|
||||
|
||||
if not check_metadata:
|
||||
return result
|
||||
|
||||
safetensors_file = next(
|
||||
filter(lambda x: x.endswith(".safetensors"), match_files), None
|
||||
)
|
||||
safetensors_metadata = {}
|
||||
|
||||
if safetensors_file:
|
||||
safetensors_filepath = os.path.join(dirname, safetensors_file)
|
||||
header = comfy.utils.safetensors_header(
|
||||
safetensors_filepath, max_size=8 * 1024 * 1024
|
||||
)
|
||||
if header:
|
||||
safetensors_metadata = json.loads(header)
|
||||
safetensors_images = safetensors_metadata.get("__metadata__", {}).get(
|
||||
"ssmd_cover_images", None
|
||||
)
|
||||
if safetensors_images:
|
||||
safetensors_images = json.loads(safetensors_images)
|
||||
for image in safetensors_images:
|
||||
result.append(BytesIO(base64.b64decode(image)))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class ModelProcessor:
|
||||
def __init__(self):
|
||||
self._thread = None
|
||||
self._lock = threading.Lock()
|
||||
self._run = False
|
||||
self.missing_models = []
|
||||
|
||||
def run(self):
|
||||
if args.disable_model_processing:
|
||||
return
|
||||
|
||||
if self._thread is None:
|
||||
# Lock to prevent multiple threads from starting
|
||||
with self._lock:
|
||||
self._run = True
|
||||
if self._thread is None:
|
||||
self._thread = threading.Thread(target=self._process_models)
|
||||
self._thread.daemon = True
|
||||
self._thread.start()
|
||||
|
||||
def populate_models(self, session):
|
||||
# Ensure database state matches filesystem
|
||||
|
||||
existing_models = session.query(Model).all()
|
||||
|
||||
for folder_name in folder_names_and_paths.keys():
|
||||
if folder_name == "custom_nodes" or folder_name == "configs":
|
||||
continue
|
||||
seen = set()
|
||||
files = get_filename_list(folder_name)
|
||||
|
||||
for file in files:
|
||||
if file in seen:
|
||||
logging.warning(f"Skipping duplicate named model: {file}")
|
||||
continue
|
||||
seen.add(file)
|
||||
|
||||
existing_model = None
|
||||
for model in existing_models:
|
||||
if model.path == file and model.type == folder_name:
|
||||
existing_model = model
|
||||
break
|
||||
|
||||
if existing_model:
|
||||
# Model already exists in db, remove from list and skip
|
||||
existing_models.remove(existing_model)
|
||||
continue
|
||||
|
||||
file_path = get_full_path(folder_name, file)
|
||||
|
||||
model = Model(
|
||||
path=file,
|
||||
type=folder_name,
|
||||
date_added=datetime.fromtimestamp(os.path.getctime(file_path)),
|
||||
)
|
||||
session.add(model)
|
||||
|
||||
for model in existing_models:
|
||||
if not get_full_path(model.type, model.path):
|
||||
logging.warning(f"Model {model.path} not found")
|
||||
self.missing_models.append({"type": model.type, "path": model.path})
|
||||
|
||||
session.commit()
|
||||
|
||||
def _get_models(self, session):
|
||||
models = session.query(Model).filter(Model.hash == None).all()
|
||||
return models
|
||||
|
||||
def _process_file(self, model_path):
|
||||
is_safetensors = model_path.endswith(".safetensors")
|
||||
metadata = {}
|
||||
h = hashlib.sha256()
|
||||
|
||||
with open(model_path, "rb", buffering=0) as f:
|
||||
if is_safetensors:
|
||||
# Read header length (8 bytes)
|
||||
header_size_bytes = f.read(8)
|
||||
header_len = int.from_bytes(header_size_bytes, "little")
|
||||
h.update(header_size_bytes)
|
||||
|
||||
# Read header
|
||||
header_bytes = f.read(header_len)
|
||||
h.update(header_bytes)
|
||||
try:
|
||||
metadata = json.loads(header_bytes)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# Read rest of file
|
||||
b = bytearray(128 * 1024)
|
||||
mv = memoryview(b)
|
||||
while n := f.readinto(mv):
|
||||
h.update(mv[:n])
|
||||
|
||||
return h.hexdigest(), metadata
|
||||
|
||||
def _populate_info(self, model, metadata):
|
||||
model.title = metadata.get("modelspec.title", None)
|
||||
model.description = metadata.get("modelspec.description", None)
|
||||
model.architecture = metadata.get("modelspec.architecture", None)
|
||||
|
||||
def _extract_image(self, model_path, metadata):
|
||||
# check if image already exists
|
||||
if len(get_model_previews(model_path, check_metadata=False)) > 0:
|
||||
return
|
||||
|
||||
image_path = os.path.splitext(model_path)[0] + ".webp"
|
||||
if os.path.exists(image_path):
|
||||
return
|
||||
|
||||
cover_images = metadata.get("ssmd_cover_images", None)
|
||||
image = None
|
||||
if cover_images:
|
||||
try:
|
||||
cover_images = json.loads(cover_images)
|
||||
if len(cover_images) > 0:
|
||||
image_data = cover_images[0]
|
||||
image = Image.open(BytesIO(base64.b64decode(image_data)))
|
||||
except Exception as e:
|
||||
logging.warning(
|
||||
f"Error extracting cover image for model {model_path}: {e}"
|
||||
)
|
||||
|
||||
if not image:
|
||||
thumbnail = metadata.get("modelspec.thumbnail", None)
|
||||
if thumbnail:
|
||||
try:
|
||||
response = request.urlopen(thumbnail)
|
||||
image = Image.open(response)
|
||||
except Exception as e:
|
||||
logging.warning(
|
||||
f"Error extracting thumbnail for model {model_path}: {e}"
|
||||
)
|
||||
|
||||
if image:
|
||||
image.thumbnail((512, 512))
|
||||
image.save(image_path)
|
||||
image.close()
|
||||
|
||||
def _process_models(self):
|
||||
with create_session() as session:
|
||||
checked = set()
|
||||
self.populate_models(session)
|
||||
|
||||
while self._run:
|
||||
self._run = False
|
||||
|
||||
models = self._get_models(session)
|
||||
|
||||
if len(models) == 0:
|
||||
break
|
||||
|
||||
for model in models:
|
||||
# prevent looping on the same model if it crashes
|
||||
if model.path in checked:
|
||||
continue
|
||||
|
||||
checked.add(model.path)
|
||||
|
||||
try:
|
||||
time.sleep(0)
|
||||
now = time.time()
|
||||
model_path = get_full_path(model.type, model.path)
|
||||
|
||||
if not model_path:
|
||||
logging.warning(f"Model {model.path} not found")
|
||||
self.missing_models.append(model.path)
|
||||
continue
|
||||
|
||||
logging.debug(f"Processing model {model_path}")
|
||||
hash, header = self._process_file(model_path)
|
||||
logging.debug(
|
||||
f"Processed model {model_path} in {time.time() - now} seconds"
|
||||
)
|
||||
model.hash = hash
|
||||
|
||||
if header:
|
||||
metadata = header.get("__metadata__", None)
|
||||
|
||||
if metadata:
|
||||
self._populate_info(model, metadata)
|
||||
self._extract_image(model_path, metadata)
|
||||
|
||||
session.commit()
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing model {model.path}: {e}")
|
||||
|
||||
with self._lock:
|
||||
self._thread = None
|
||||
|
||||
|
||||
model_processor = ModelProcessor()
|
||||
@@ -43,10 +43,11 @@ parser.add_argument("--tls-certfile", type=str, help="Path to TLS (SSL) certific
|
||||
parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.")
|
||||
parser.add_argument("--max-upload-size", type=float, default=100, help="Set the maximum upload size in MB.")
|
||||
|
||||
parser.add_argument("--base-directory", type=str, default=None, help="Set the ComfyUI base directory for models, custom_nodes, input, output, temp, and user directories.")
|
||||
parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.")
|
||||
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
|
||||
parser.add_argument("--temp-directory", type=str, default=None, help="Set the ComfyUI temp directory (default is in the ComfyUI directory).")
|
||||
parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory.")
|
||||
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory. Overrides --base-directory.")
|
||||
parser.add_argument("--temp-directory", type=str, default=None, help="Set the ComfyUI temp directory (default is in the ComfyUI directory). Overrides --base-directory.")
|
||||
parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory. Overrides --base-directory.")
|
||||
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
|
||||
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
|
||||
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
|
||||
@@ -176,13 +177,9 @@ parser.add_argument(
|
||||
help="The local filesystem path to the directory where the frontend is located. Overrides --front-end-version.",
|
||||
)
|
||||
|
||||
parser.add_argument("--user-directory", type=is_valid_directory, default=None, help="Set the ComfyUI user directory with an absolute path.")
|
||||
parser.add_argument("--user-directory", type=is_valid_directory, default=None, help="Set the ComfyUI user directory with an absolute path. Overrides --base-directory.")
|
||||
|
||||
database_default_path = os.path.abspath(
|
||||
os.path.join(os.path.dirname(__file__), "..", "user", "comfyui.db")
|
||||
)
|
||||
parser.add_argument("--database-url", type=str, default=f"sqlite:///{database_default_path}", help="Specify the database URL, e.g. for an in-memory database you can use 'sqlite:///:memory:'.")
|
||||
parser.add_argument("--disable-model-processing", action="store_true", help="Disable model file processing, e.g. computing hashes and extracting metadata.")
|
||||
parser.add_argument("--enable-compress-response-body", action="store_true", help="Enable compressing response body.")
|
||||
|
||||
if comfy.options.args_parsing:
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -102,9 +102,9 @@ class CLIPTextModel_(torch.nn.Module):
|
||||
mask = None
|
||||
if attention_mask is not None:
|
||||
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
|
||||
mask = mask.masked_fill(mask.to(torch.bool), float("-inf"))
|
||||
mask = mask.masked_fill(mask.to(torch.bool), -torch.finfo(x.dtype).max)
|
||||
|
||||
causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1)
|
||||
causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(-torch.finfo(x.dtype).max).triu_(1)
|
||||
if mask is not None:
|
||||
mask += causal_mask
|
||||
else:
|
||||
|
||||
@@ -3,9 +3,6 @@ import math
|
||||
import comfy.utils
|
||||
|
||||
|
||||
def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9)
|
||||
return abs(a*b) // math.gcd(a, b)
|
||||
|
||||
class CONDRegular:
|
||||
def __init__(self, cond):
|
||||
self.cond = cond
|
||||
@@ -46,7 +43,7 @@ class CONDCrossAttn(CONDRegular):
|
||||
if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen
|
||||
return False
|
||||
|
||||
mult_min = lcm(s1[1], s2[1])
|
||||
mult_min = math.lcm(s1[1], s2[1])
|
||||
diff = mult_min // min(s1[1], s2[1])
|
||||
if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much
|
||||
return False
|
||||
@@ -57,7 +54,7 @@ class CONDCrossAttn(CONDRegular):
|
||||
crossattn_max_len = self.cond.shape[1]
|
||||
for x in others:
|
||||
c = x.cond
|
||||
crossattn_max_len = lcm(crossattn_max_len, c.shape[1])
|
||||
crossattn_max_len = math.lcm(crossattn_max_len, c.shape[1])
|
||||
conds.append(c)
|
||||
|
||||
out = []
|
||||
|
||||
@@ -4,105 +4,6 @@ import logging
|
||||
|
||||
# conversion code from https://github.com/huggingface/diffusers/blob/main/scripts/convert_diffusers_to_original_stable_diffusion.py
|
||||
|
||||
# =================#
|
||||
# UNet Conversion #
|
||||
# =================#
|
||||
|
||||
unet_conversion_map = [
|
||||
# (stable-diffusion, HF Diffusers)
|
||||
("time_embed.0.weight", "time_embedding.linear_1.weight"),
|
||||
("time_embed.0.bias", "time_embedding.linear_1.bias"),
|
||||
("time_embed.2.weight", "time_embedding.linear_2.weight"),
|
||||
("time_embed.2.bias", "time_embedding.linear_2.bias"),
|
||||
("input_blocks.0.0.weight", "conv_in.weight"),
|
||||
("input_blocks.0.0.bias", "conv_in.bias"),
|
||||
("out.0.weight", "conv_norm_out.weight"),
|
||||
("out.0.bias", "conv_norm_out.bias"),
|
||||
("out.2.weight", "conv_out.weight"),
|
||||
("out.2.bias", "conv_out.bias"),
|
||||
]
|
||||
|
||||
unet_conversion_map_resnet = [
|
||||
# (stable-diffusion, HF Diffusers)
|
||||
("in_layers.0", "norm1"),
|
||||
("in_layers.2", "conv1"),
|
||||
("out_layers.0", "norm2"),
|
||||
("out_layers.3", "conv2"),
|
||||
("emb_layers.1", "time_emb_proj"),
|
||||
("skip_connection", "conv_shortcut"),
|
||||
]
|
||||
|
||||
unet_conversion_map_layer = []
|
||||
# hardcoded number of downblocks and resnets/attentions...
|
||||
# would need smarter logic for other networks.
|
||||
for i in range(4):
|
||||
# loop over downblocks/upblocks
|
||||
|
||||
for j in range(2):
|
||||
# loop over resnets/attentions for downblocks
|
||||
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
|
||||
sd_down_res_prefix = f"input_blocks.{3 * i + j + 1}.0."
|
||||
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
|
||||
|
||||
if i < 3:
|
||||
# no attention layers in down_blocks.3
|
||||
hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
|
||||
sd_down_atn_prefix = f"input_blocks.{3 * i + j + 1}.1."
|
||||
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
|
||||
|
||||
for j in range(3):
|
||||
# loop over resnets/attentions for upblocks
|
||||
hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
|
||||
sd_up_res_prefix = f"output_blocks.{3 * i + j}.0."
|
||||
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
|
||||
|
||||
if i > 0:
|
||||
# no attention layers in up_blocks.0
|
||||
hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
|
||||
sd_up_atn_prefix = f"output_blocks.{3 * i + j}.1."
|
||||
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
|
||||
|
||||
if i < 3:
|
||||
# no downsample in down_blocks.3
|
||||
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
|
||||
sd_downsample_prefix = f"input_blocks.{3 * (i + 1)}.0.op."
|
||||
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
|
||||
|
||||
# no upsample in up_blocks.3
|
||||
hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
|
||||
sd_upsample_prefix = f"output_blocks.{3 * i + 2}.{1 if i == 0 else 2}."
|
||||
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
|
||||
|
||||
hf_mid_atn_prefix = "mid_block.attentions.0."
|
||||
sd_mid_atn_prefix = "middle_block.1."
|
||||
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
|
||||
|
||||
for j in range(2):
|
||||
hf_mid_res_prefix = f"mid_block.resnets.{j}."
|
||||
sd_mid_res_prefix = f"middle_block.{2 * j}."
|
||||
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
|
||||
|
||||
|
||||
def convert_unet_state_dict(unet_state_dict):
|
||||
# buyer beware: this is a *brittle* function,
|
||||
# and correct output requires that all of these pieces interact in
|
||||
# the exact order in which I have arranged them.
|
||||
mapping = {k: k for k in unet_state_dict.keys()}
|
||||
for sd_name, hf_name in unet_conversion_map:
|
||||
mapping[hf_name] = sd_name
|
||||
for k, v in mapping.items():
|
||||
if "resnets" in k:
|
||||
for sd_part, hf_part in unet_conversion_map_resnet:
|
||||
v = v.replace(hf_part, sd_part)
|
||||
mapping[k] = v
|
||||
for k, v in mapping.items():
|
||||
for sd_part, hf_part in unet_conversion_map_layer:
|
||||
v = v.replace(hf_part, sd_part)
|
||||
mapping[k] = v
|
||||
new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
|
||||
return new_state_dict
|
||||
|
||||
|
||||
# ================#
|
||||
# VAE Conversion #
|
||||
# ================#
|
||||
@@ -213,6 +114,7 @@ textenc_pattern = re.compile("|".join(protected.keys()))
|
||||
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
|
||||
code2idx = {"q": 0, "k": 1, "v": 2}
|
||||
|
||||
|
||||
# This function exists because at the time of writing torch.cat can't do fp8 with cuda
|
||||
def cat_tensors(tensors):
|
||||
x = 0
|
||||
@@ -229,6 +131,7 @@ def cat_tensors(tensors):
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def convert_text_enc_state_dict_v20(text_enc_dict, prefix=""):
|
||||
new_state_dict = {}
|
||||
capture_qkv_weight = {}
|
||||
@@ -284,5 +187,3 @@ def convert_text_enc_state_dict_v20(text_enc_dict, prefix=""):
|
||||
|
||||
def convert_text_enc_state_dict(text_enc_dict):
|
||||
return text_enc_dict
|
||||
|
||||
|
||||
|
||||
@@ -661,7 +661,7 @@ class UniPC:
|
||||
|
||||
if x_t is None:
|
||||
if use_predictor:
|
||||
pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
|
||||
pred_res = torch.tensordot(D1s, rhos_p, dims=([1], [0])) # torch.einsum('k,bkchw->bchw', rhos_p, D1s)
|
||||
else:
|
||||
pred_res = 0
|
||||
x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * pred_res
|
||||
@@ -669,7 +669,7 @@ class UniPC:
|
||||
if use_corrector:
|
||||
model_t = self.model_fn(x_t, t)
|
||||
if D1s is not None:
|
||||
corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
|
||||
corr_res = torch.tensordot(D1s, rhos_c[:-1], dims=([1], [0])) # torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
|
||||
else:
|
||||
corr_res = 0
|
||||
D1_t = (model_t - model_prev_0)
|
||||
|
||||
@@ -40,7 +40,7 @@ def get_sigmas_polyexponential(n, sigma_min, sigma_max, rho=1., device='cpu'):
|
||||
def get_sigmas_vp(n, beta_d=19.9, beta_min=0.1, eps_s=1e-3, device='cpu'):
|
||||
"""Constructs a continuous VP noise schedule."""
|
||||
t = torch.linspace(1, eps_s, n, device=device)
|
||||
sigmas = torch.sqrt(torch.exp(beta_d * t ** 2 / 2 + beta_min * t) - 1)
|
||||
sigmas = torch.sqrt(torch.special.expm1(beta_d * t ** 2 / 2 + beta_min * t))
|
||||
return append_zero(sigmas)
|
||||
|
||||
|
||||
@@ -1267,7 +1267,7 @@ def sample_dpmpp_2m_cfg_pp(model, x, sigmas, extra_args=None, callback=None, dis
|
||||
return x
|
||||
|
||||
@torch.no_grad()
|
||||
def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., noise_sampler=None, cfg_pp=False):
|
||||
def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., noise_sampler=None, eta=1., cfg_pp=False):
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
seed = extra_args.get("seed", None)
|
||||
noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
|
||||
@@ -1289,50 +1289,80 @@ def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None
|
||||
extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
|
||||
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
if s_churn > 0:
|
||||
gamma = min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
|
||||
sigma_hat = sigmas[i] * (gamma + 1)
|
||||
else:
|
||||
gamma = 0
|
||||
sigma_hat = sigmas[i]
|
||||
|
||||
if gamma > 0:
|
||||
eps = torch.randn_like(x) * s_noise
|
||||
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
|
||||
denoised = model(x, sigma_hat * s_in, **extra_args)
|
||||
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
|
||||
if callback is not None:
|
||||
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
|
||||
if sigmas[i + 1] == 0 or old_denoised is None:
|
||||
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
|
||||
if sigma_down == 0 or old_denoised is None:
|
||||
# Euler method
|
||||
if cfg_pp:
|
||||
d = to_d(x, sigma_hat, uncond_denoised)
|
||||
x = denoised + d * sigmas[i + 1]
|
||||
d = to_d(x, sigmas[i], uncond_denoised)
|
||||
x = denoised + d * sigma_down
|
||||
else:
|
||||
d = to_d(x, sigma_hat, denoised)
|
||||
dt = sigmas[i + 1] - sigma_hat
|
||||
d = to_d(x, sigmas[i], denoised)
|
||||
dt = sigma_down - sigmas[i]
|
||||
x = x + d * dt
|
||||
else:
|
||||
# Second order multistep method in https://arxiv.org/pdf/2308.02157
|
||||
t, t_next, t_prev = t_fn(sigmas[i]), t_fn(sigmas[i + 1]), t_fn(sigmas[i - 1])
|
||||
t, t_next, t_prev = t_fn(sigmas[i]), t_fn(sigma_down), t_fn(sigmas[i - 1])
|
||||
h = t_next - t
|
||||
c2 = (t_prev - t) / h
|
||||
|
||||
phi1_val, phi2_val = phi1_fn(-h), phi2_fn(-h)
|
||||
b1 = torch.nan_to_num(phi1_val - 1.0 / c2 * phi2_val, nan=0.0)
|
||||
b2 = torch.nan_to_num(1.0 / c2 * phi2_val, nan=0.0)
|
||||
b1 = torch.nan_to_num(phi1_val - phi2_val / c2, nan=0.0)
|
||||
b2 = torch.nan_to_num(phi2_val / c2, nan=0.0)
|
||||
|
||||
if cfg_pp:
|
||||
x = x + (denoised - uncond_denoised)
|
||||
x = sigma_fn(h) * x + h * (b1 * uncond_denoised + b2 * old_denoised)
|
||||
else:
|
||||
x = sigma_fn(h) * x + h * (b1 * denoised + b2 * old_denoised)
|
||||
|
||||
x = (sigma_fn(t_next) / sigma_fn(t)) * x + h * (b1 * denoised + b2 * old_denoised)
|
||||
# Noise addition
|
||||
if sigmas[i + 1] > 0:
|
||||
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
|
||||
|
||||
old_denoised = denoised
|
||||
if cfg_pp:
|
||||
old_denoised = uncond_denoised
|
||||
else:
|
||||
old_denoised = denoised
|
||||
return x
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., noise_sampler=None):
|
||||
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_churn=s_churn, s_tmin=s_tmin, s_tmax=s_tmax, s_noise=s_noise, noise_sampler=noise_sampler, cfg_pp=False)
|
||||
def sample_res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., noise_sampler=None):
|
||||
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=0., cfg_pp=False)
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_res_multistep_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., noise_sampler=None):
|
||||
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_churn=s_churn, s_tmin=s_tmin, s_tmax=s_tmax, s_noise=s_noise, noise_sampler=noise_sampler, cfg_pp=True)
|
||||
def sample_res_multistep_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., noise_sampler=None):
|
||||
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=0., cfg_pp=True)
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_res_multistep_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
|
||||
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=eta, cfg_pp=False)
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_res_multistep_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
|
||||
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=eta, cfg_pp=True)
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_gradient_estimation(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2.):
|
||||
"""Gradient-estimation sampler. Paper: https://openreview.net/pdf?id=o2ND9v0CeK"""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
old_d = None
|
||||
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
||||
d = to_d(x, sigmas[i], denoised)
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||
dt = sigmas[i + 1] - sigmas[i]
|
||||
if i == 0:
|
||||
# Euler method
|
||||
x = x + d * dt
|
||||
else:
|
||||
# Gradient estimation
|
||||
d_bar = ge_gamma * d + (1 - ge_gamma) * old_d
|
||||
x = x + d_bar * dt
|
||||
old_d = d
|
||||
return x
|
||||
|
||||
@@ -168,14 +168,18 @@ class Attention(nn.Module):
|
||||
k = self.to_k[1](k)
|
||||
v = self.to_v[1](v)
|
||||
if self.is_selfattn and rope_emb is not None: # only apply to self-attention!
|
||||
q = apply_rotary_pos_emb(q, rope_emb)
|
||||
k = apply_rotary_pos_emb(k, rope_emb)
|
||||
return q, k, v
|
||||
# apply_rotary_pos_emb inlined
|
||||
q_shape = q.shape
|
||||
q = q.reshape(*q.shape[:-1], 2, -1).movedim(-2, -1).unsqueeze(-2)
|
||||
q = rope_emb[..., 0] * q[..., 0] + rope_emb[..., 1] * q[..., 1]
|
||||
q = q.movedim(-1, -2).reshape(*q_shape).to(x.dtype)
|
||||
|
||||
def cal_attn(self, q, k, v, mask=None):
|
||||
out = optimized_attention(q, k, v, self.heads, skip_reshape=True, mask=mask, skip_output_reshape=True)
|
||||
out = rearrange(out, " b n s c -> s b (n c)")
|
||||
return self.to_out(out)
|
||||
# apply_rotary_pos_emb inlined
|
||||
k_shape = k.shape
|
||||
k = k.reshape(*k.shape[:-1], 2, -1).movedim(-2, -1).unsqueeze(-2)
|
||||
k = rope_emb[..., 0] * k[..., 0] + rope_emb[..., 1] * k[..., 1]
|
||||
k = k.movedim(-1, -2).reshape(*k_shape).to(x.dtype)
|
||||
return q, k, v
|
||||
|
||||
def forward(
|
||||
self,
|
||||
@@ -191,7 +195,10 @@ class Attention(nn.Module):
|
||||
context (Optional[Tensor]): The key tensor of shape [B, Mk, K] or use x as context [self attention] if None
|
||||
"""
|
||||
q, k, v = self.cal_qkv(x, context, mask, rope_emb=rope_emb, **kwargs)
|
||||
return self.cal_attn(q, k, v, mask)
|
||||
out = optimized_attention(q, k, v, self.heads, skip_reshape=True, mask=mask, skip_output_reshape=True)
|
||||
del q, k, v
|
||||
out = rearrange(out, " b n s c -> s b (n c)")
|
||||
return self.to_out(out)
|
||||
|
||||
|
||||
class FeedForward(nn.Module):
|
||||
@@ -788,10 +795,7 @@ class GeneralDITTransformerBlock(nn.Module):
|
||||
crossattn_mask: Optional[torch.Tensor] = None,
|
||||
rope_emb_L_1_1_D: Optional[torch.Tensor] = None,
|
||||
adaln_lora_B_3D: Optional[torch.Tensor] = None,
|
||||
extra_per_block_pos_emb: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
if extra_per_block_pos_emb is not None:
|
||||
x = x + extra_per_block_pos_emb
|
||||
for block in self.blocks:
|
||||
x = block(
|
||||
x,
|
||||
|
||||
@@ -30,6 +30,8 @@ import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import logging
|
||||
|
||||
from comfy.ldm.modules.diffusionmodules.model import vae_attention
|
||||
|
||||
from .patching import (
|
||||
Patcher,
|
||||
Patcher3D,
|
||||
@@ -400,6 +402,8 @@ class CausalAttnBlock(nn.Module):
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
|
||||
self.optimized_attention = vae_attention()
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
h_ = x
|
||||
h_ = self.norm(h_)
|
||||
@@ -413,18 +417,7 @@ class CausalAttnBlock(nn.Module):
|
||||
v, batch_size = time2batch(v)
|
||||
|
||||
b, c, h, w = q.shape
|
||||
q = q.reshape(b, c, h * w)
|
||||
q = q.permute(0, 2, 1)
|
||||
k = k.reshape(b, c, h * w)
|
||||
w_ = torch.bmm(q, k)
|
||||
w_ = w_ * (int(c) ** (-0.5))
|
||||
w_ = F.softmax(w_, dim=2)
|
||||
|
||||
# attend to values
|
||||
v = v.reshape(b, c, h * w)
|
||||
w_ = w_.permute(0, 2, 1)
|
||||
h_ = torch.bmm(v, w_)
|
||||
h_ = h_.reshape(b, c, h, w)
|
||||
h_ = self.optimized_attention(q, k, v)
|
||||
|
||||
h_ = batch2time(h_, batch_size)
|
||||
h_ = self.proj_out(h_)
|
||||
@@ -871,18 +864,16 @@ class EncoderFactorized(nn.Module):
|
||||
x = self.patcher3d(x)
|
||||
|
||||
# downsampling
|
||||
hs = [self.conv_in(x)]
|
||||
h = self.conv_in(x)
|
||||
for i_level in range(self.num_resolutions):
|
||||
for i_block in range(self.num_res_blocks):
|
||||
h = self.down[i_level].block[i_block](hs[-1])
|
||||
h = self.down[i_level].block[i_block](h)
|
||||
if len(self.down[i_level].attn) > 0:
|
||||
h = self.down[i_level].attn[i_block](h)
|
||||
hs.append(h)
|
||||
if i_level != self.num_resolutions - 1:
|
||||
hs.append(self.down[i_level].downsample(hs[-1]))
|
||||
h = self.down[i_level].downsample(h)
|
||||
|
||||
# middle
|
||||
h = hs[-1]
|
||||
h = self.mid.block_1(h)
|
||||
h = self.mid.attn_1(h)
|
||||
h = self.mid.block_2(h)
|
||||
|
||||
@@ -281,54 +281,76 @@ class UnPatcher3D(UnPatcher):
|
||||
hh = hh.to(dtype=dtype)
|
||||
|
||||
xlll, xllh, xlhl, xlhh, xhll, xhlh, xhhl, xhhh = torch.chunk(x, 8, dim=1)
|
||||
del x
|
||||
|
||||
# Height height transposed convolutions.
|
||||
xll = F.conv_transpose3d(
|
||||
xlll, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)
|
||||
)
|
||||
del xlll
|
||||
|
||||
xll += F.conv_transpose3d(
|
||||
xllh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)
|
||||
)
|
||||
del xllh
|
||||
|
||||
xlh = F.conv_transpose3d(
|
||||
xlhl, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)
|
||||
)
|
||||
del xlhl
|
||||
|
||||
xlh += F.conv_transpose3d(
|
||||
xlhh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)
|
||||
)
|
||||
del xlhh
|
||||
|
||||
xhl = F.conv_transpose3d(
|
||||
xhll, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)
|
||||
)
|
||||
del xhll
|
||||
|
||||
xhl += F.conv_transpose3d(
|
||||
xhlh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)
|
||||
)
|
||||
del xhlh
|
||||
|
||||
xhh = F.conv_transpose3d(
|
||||
xhhl, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)
|
||||
)
|
||||
del xhhl
|
||||
|
||||
xhh += F.conv_transpose3d(
|
||||
xhhh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)
|
||||
)
|
||||
del xhhh
|
||||
|
||||
# Handles width transposed convolutions.
|
||||
xl = F.conv_transpose3d(
|
||||
xll, hl.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)
|
||||
)
|
||||
del xll
|
||||
|
||||
xl += F.conv_transpose3d(
|
||||
xlh, hh.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)
|
||||
)
|
||||
del xlh
|
||||
|
||||
xh = F.conv_transpose3d(
|
||||
xhl, hl.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)
|
||||
)
|
||||
del xhl
|
||||
|
||||
xh += F.conv_transpose3d(
|
||||
xhh, hh.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)
|
||||
)
|
||||
del xhh
|
||||
|
||||
# Handles time axis transposed convolutions.
|
||||
x = F.conv_transpose3d(
|
||||
xl, hl.unsqueeze(3).unsqueeze(4), groups=g, stride=(2, 1, 1)
|
||||
)
|
||||
del xl
|
||||
|
||||
x += F.conv_transpose3d(
|
||||
xh, hh.unsqueeze(3).unsqueeze(4), groups=g, stride=(2, 1, 1)
|
||||
)
|
||||
|
||||
@@ -168,7 +168,7 @@ class GeneralDIT(nn.Module):
|
||||
operations=operations,
|
||||
)
|
||||
|
||||
self.build_pos_embed(device=device)
|
||||
self.build_pos_embed(device=device, dtype=dtype)
|
||||
self.block_x_format = block_x_format
|
||||
self.use_adaln_lora = use_adaln_lora
|
||||
self.adaln_lora_dim = adaln_lora_dim
|
||||
@@ -210,7 +210,7 @@ class GeneralDIT(nn.Module):
|
||||
operations=operations,
|
||||
)
|
||||
|
||||
def build_pos_embed(self, device=None):
|
||||
def build_pos_embed(self, device=None, dtype=None):
|
||||
if self.pos_emb_cls == "rope3d":
|
||||
cls_type = VideoRopePosition3DEmb
|
||||
else:
|
||||
@@ -242,6 +242,7 @@ class GeneralDIT(nn.Module):
|
||||
kwargs["w_extrapolation_ratio"] = self.extra_w_extrapolation_ratio
|
||||
kwargs["t_extrapolation_ratio"] = self.extra_t_extrapolation_ratio
|
||||
kwargs["device"] = device
|
||||
kwargs["dtype"] = dtype
|
||||
self.extra_pos_embedder = LearnablePosEmbAxis(
|
||||
**kwargs,
|
||||
)
|
||||
@@ -292,7 +293,7 @@ class GeneralDIT(nn.Module):
|
||||
x_B_T_H_W_D = self.x_embedder(x_B_C_T_H_W)
|
||||
|
||||
if self.extra_per_block_abs_pos_emb:
|
||||
extra_pos_emb = self.extra_pos_embedder(x_B_T_H_W_D, fps=fps, device=x_B_C_T_H_W.device)
|
||||
extra_pos_emb = self.extra_pos_embedder(x_B_T_H_W_D, fps=fps, device=x_B_C_T_H_W.device, dtype=x_B_C_T_H_W.dtype)
|
||||
else:
|
||||
extra_pos_emb = None
|
||||
|
||||
@@ -476,6 +477,8 @@ class GeneralDIT(nn.Module):
|
||||
inputs["original_shape"],
|
||||
)
|
||||
extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D = inputs["extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D"].to(x.dtype)
|
||||
del inputs
|
||||
|
||||
if extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D is not None:
|
||||
assert (
|
||||
x.shape == extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D.shape
|
||||
@@ -486,6 +489,8 @@ class GeneralDIT(nn.Module):
|
||||
self.blocks["block0"].x_format == block.x_format
|
||||
), f"First block has x_format {self.blocks[0].x_format}, got {block.x_format}"
|
||||
|
||||
if extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D is not None:
|
||||
x += extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D
|
||||
x = block(
|
||||
x,
|
||||
affline_emb_B_D,
|
||||
@@ -493,7 +498,6 @@ class GeneralDIT(nn.Module):
|
||||
crossattn_mask,
|
||||
rope_emb_L_1_1_D=rope_emb_L_1_1_D,
|
||||
adaln_lora_B_3D=adaln_lora_B_3D,
|
||||
extra_per_block_pos_emb=extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D,
|
||||
)
|
||||
|
||||
x_B_T_H_W_D = rearrange(x, "T H W B D -> B T H W D")
|
||||
|
||||
@@ -41,12 +41,12 @@ def normalize(x: torch.Tensor, dim: Optional[List[int]] = None, eps: float = 0)
|
||||
|
||||
|
||||
class VideoPositionEmb(nn.Module):
|
||||
def forward(self, x_B_T_H_W_C: torch.Tensor, fps=Optional[torch.Tensor], device=None) -> torch.Tensor:
|
||||
def forward(self, x_B_T_H_W_C: torch.Tensor, fps=Optional[torch.Tensor], device=None, dtype=None) -> torch.Tensor:
|
||||
"""
|
||||
It delegates the embedding generation to generate_embeddings function.
|
||||
"""
|
||||
B_T_H_W_C = x_B_T_H_W_C.shape
|
||||
embeddings = self.generate_embeddings(B_T_H_W_C, fps=fps, device=device)
|
||||
embeddings = self.generate_embeddings(B_T_H_W_C, fps=fps, device=device, dtype=dtype)
|
||||
|
||||
return embeddings
|
||||
|
||||
@@ -104,6 +104,7 @@ class VideoRopePosition3DEmb(VideoPositionEmb):
|
||||
w_ntk_factor: Optional[float] = None,
|
||||
t_ntk_factor: Optional[float] = None,
|
||||
device=None,
|
||||
dtype=None,
|
||||
):
|
||||
"""
|
||||
Generate embeddings for the given input size.
|
||||
@@ -173,6 +174,7 @@ class LearnablePosEmbAxis(VideoPositionEmb):
|
||||
len_w: int,
|
||||
len_t: int,
|
||||
device=None,
|
||||
dtype=None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
@@ -184,17 +186,16 @@ class LearnablePosEmbAxis(VideoPositionEmb):
|
||||
self.interpolation = interpolation
|
||||
assert self.interpolation in ["crop"], f"Unknown interpolation method {self.interpolation}"
|
||||
|
||||
self.pos_emb_h = nn.Parameter(torch.empty(len_h, model_channels, device=device))
|
||||
self.pos_emb_w = nn.Parameter(torch.empty(len_w, model_channels, device=device))
|
||||
self.pos_emb_t = nn.Parameter(torch.empty(len_t, model_channels, device=device))
|
||||
self.pos_emb_h = nn.Parameter(torch.empty(len_h, model_channels, device=device, dtype=dtype))
|
||||
self.pos_emb_w = nn.Parameter(torch.empty(len_w, model_channels, device=device, dtype=dtype))
|
||||
self.pos_emb_t = nn.Parameter(torch.empty(len_t, model_channels, device=device, dtype=dtype))
|
||||
|
||||
|
||||
def generate_embeddings(self, B_T_H_W_C: torch.Size, fps=Optional[torch.Tensor], device=None) -> torch.Tensor:
|
||||
def generate_embeddings(self, B_T_H_W_C: torch.Size, fps=Optional[torch.Tensor], device=None, dtype=None) -> torch.Tensor:
|
||||
B, T, H, W, _ = B_T_H_W_C
|
||||
if self.interpolation == "crop":
|
||||
emb_h_H = self.pos_emb_h[:H].to(device=device)
|
||||
emb_w_W = self.pos_emb_w[:W].to(device=device)
|
||||
emb_t_T = self.pos_emb_t[:T].to(device=device)
|
||||
emb_h_H = self.pos_emb_h[:H].to(device=device, dtype=dtype)
|
||||
emb_w_W = self.pos_emb_w[:W].to(device=device, dtype=dtype)
|
||||
emb_t_T = self.pos_emb_t[:T].to(device=device, dtype=dtype)
|
||||
emb = (
|
||||
repeat(emb_t_T, "t d-> b t h w d", b=B, h=H, w=W)
|
||||
+ repeat(emb_h_H, "h d-> b t h w d", b=B, t=T, w=W)
|
||||
|
||||
@@ -18,6 +18,7 @@ import logging
|
||||
import torch
|
||||
from torch import nn
|
||||
from enum import Enum
|
||||
import math
|
||||
|
||||
from .cosmos_tokenizer.layers3d import (
|
||||
EncoderFactorized,
|
||||
@@ -89,8 +90,8 @@ class CausalContinuousVideoTokenizer(nn.Module):
|
||||
self.distribution = IdentityDistribution() # ContinuousFormulation[formulation_name].value()
|
||||
|
||||
num_parameters = sum(param.numel() for param in self.parameters())
|
||||
logging.info(f"model={self.name}, num_parameters={num_parameters:,}")
|
||||
logging.info(
|
||||
logging.debug(f"model={self.name}, num_parameters={num_parameters:,}")
|
||||
logging.debug(
|
||||
f"z_channels={z_channels}, latent_channels={self.latent_channels}."
|
||||
)
|
||||
|
||||
@@ -105,17 +106,23 @@ class CausalContinuousVideoTokenizer(nn.Module):
|
||||
z, posteriors = self.distribution(moments)
|
||||
latent_ch = z.shape[1]
|
||||
latent_t = z.shape[2]
|
||||
dtype = z.dtype
|
||||
mean = self.latent_mean.view(latent_ch, -1)[:, : latent_t].reshape([1, latent_ch, -1, 1, 1]).to(dtype=dtype, device=z.device)
|
||||
std = self.latent_std.view(latent_ch, -1)[:, : latent_t].reshape([1, latent_ch, -1, 1, 1]).to(dtype=dtype, device=z.device)
|
||||
in_dtype = z.dtype
|
||||
mean = self.latent_mean.view(latent_ch, -1)
|
||||
std = self.latent_std.view(latent_ch, -1)
|
||||
|
||||
mean = mean.repeat(1, math.ceil(latent_t / mean.shape[-1]))[:, : latent_t].reshape([1, latent_ch, -1, 1, 1]).to(dtype=in_dtype, device=z.device)
|
||||
std = std.repeat(1, math.ceil(latent_t / std.shape[-1]))[:, : latent_t].reshape([1, latent_ch, -1, 1, 1]).to(dtype=in_dtype, device=z.device)
|
||||
return ((z - mean) / std) * self.sigma_data
|
||||
|
||||
def decode(self, z):
|
||||
in_dtype = z.dtype
|
||||
latent_ch = z.shape[1]
|
||||
latent_t = z.shape[2]
|
||||
mean = self.latent_mean.view(latent_ch, -1)[:, : latent_t].reshape([1, latent_ch, -1, 1, 1]).to(dtype=in_dtype, device=z.device)
|
||||
std = self.latent_std.view(latent_ch, -1)[:, : latent_t].reshape([1, latent_ch, -1, 1, 1]).to(dtype=in_dtype, device=z.device)
|
||||
mean = self.latent_mean.view(latent_ch, -1)
|
||||
std = self.latent_std.view(latent_ch, -1)
|
||||
|
||||
mean = mean.repeat(1, math.ceil(latent_t / mean.shape[-1]))[:, : latent_t].reshape([1, latent_ch, -1, 1, 1]).to(dtype=in_dtype, device=z.device)
|
||||
std = std.repeat(1, math.ceil(latent_t / std.shape[-1]))[:, : latent_t].reshape([1, latent_ch, -1, 1, 1]).to(dtype=in_dtype, device=z.device)
|
||||
|
||||
z = z / self.sigma_data
|
||||
z = z * std + mean
|
||||
|
||||
@@ -230,8 +230,7 @@ class SingleStreamBlock(nn.Module):
|
||||
|
||||
def forward(self, x: Tensor, vec: Tensor, pe: Tensor, attn_mask=None) -> Tensor:
|
||||
mod, _ = self.modulation(vec)
|
||||
x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift
|
||||
qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
|
||||
qkv, mlp = torch.split(self.linear1((1 + mod.scale) * self.pre_norm(x) + mod.shift), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
|
||||
|
||||
q, k, v = qkv.view(qkv.shape[0], qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
||||
q, k = self.norm(q, k, v)
|
||||
|
||||
@@ -5,8 +5,15 @@ from torch import Tensor
|
||||
from comfy.ldm.modules.attention import optimized_attention
|
||||
import comfy.model_management
|
||||
|
||||
|
||||
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None) -> Tensor:
|
||||
q, k = apply_rope(q, k, pe)
|
||||
q_shape = q.shape
|
||||
k_shape = k.shape
|
||||
|
||||
q = q.float().reshape(*q.shape[:-1], -1, 1, 2)
|
||||
k = k.float().reshape(*k.shape[:-1], -1, 1, 2)
|
||||
q = (pe[..., 0] * q[..., 0] + pe[..., 1] * q[..., 1]).reshape(*q_shape).type_as(v)
|
||||
k = (pe[..., 0] * k[..., 0] + pe[..., 1] * k[..., 1]).reshape(*k_shape).type_as(v)
|
||||
|
||||
heads = q.shape[1]
|
||||
x = optimized_attention(q, k, v, heads, skip_reshape=True, mask=mask)
|
||||
|
||||
@@ -109,9 +109,8 @@ class Flux(nn.Module):
|
||||
img = self.img_in(img)
|
||||
vec = self.time_in(timestep_embedding(timesteps, 256).to(img.dtype))
|
||||
if self.params.guidance_embed:
|
||||
if guidance is None:
|
||||
raise ValueError("Didn't get guidance strength for guidance distilled model.")
|
||||
vec = vec + self.guidance_in(timestep_embedding(guidance, 256).to(img.dtype))
|
||||
if guidance is not None:
|
||||
vec = vec + self.guidance_in(timestep_embedding(guidance, 256).to(img.dtype))
|
||||
|
||||
vec = vec + self.vector_in(y[:,:self.params.vec_in_dim])
|
||||
txt = self.txt_in(txt)
|
||||
@@ -186,7 +185,7 @@ class Flux(nn.Module):
|
||||
img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels)
|
||||
return img
|
||||
|
||||
def forward(self, x, timestep, context, y, guidance, control=None, transformer_options={}, **kwargs):
|
||||
def forward(self, x, timestep, context, y, guidance=None, control=None, transformer_options={}, **kwargs):
|
||||
bs, c, h, w = x.shape
|
||||
patch_size = self.patch_size
|
||||
x = comfy.ldm.common_dit.pad_to_patch_size(x, (patch_size, patch_size))
|
||||
|
||||
@@ -240,9 +240,8 @@ class HunyuanVideo(nn.Module):
|
||||
vec = vec + self.vector_in(y[:, :self.params.vec_in_dim])
|
||||
|
||||
if self.params.guidance_embed:
|
||||
if guidance is None:
|
||||
raise ValueError("Didn't get guidance strength for guidance distilled model.")
|
||||
vec = vec + self.guidance_in(timestep_embedding(guidance, 256).to(img.dtype))
|
||||
if guidance is not None:
|
||||
vec = vec + self.guidance_in(timestep_embedding(guidance, 256).to(img.dtype))
|
||||
|
||||
if txt_mask is not None and not torch.is_floating_point(txt_mask):
|
||||
txt_mask = (txt_mask - 1).to(img.dtype) * torch.finfo(img.dtype).max
|
||||
@@ -314,7 +313,7 @@ class HunyuanVideo(nn.Module):
|
||||
img = img.reshape(initial_shape)
|
||||
return img
|
||||
|
||||
def forward(self, x, timestep, context, y, guidance, attention_mask=None, control=None, transformer_options={}, **kwargs):
|
||||
def forward(self, x, timestep, context, y, guidance=None, attention_mask=None, control=None, transformer_options={}, **kwargs):
|
||||
bs, c, t, h, w = x.shape
|
||||
patch_size = self.patch_size
|
||||
t_len = ((t + (patch_size[0] // 2)) // patch_size[0])
|
||||
|
||||
619
comfy/ldm/lumina/model.py
Normal file
619
comfy/ldm/lumina/model.py
Normal file
@@ -0,0 +1,619 @@
|
||||
# Code from: https://github.com/Alpha-VLLM/Lumina-Image-2.0/blob/main/models/model.py
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from comfy.ldm.modules.diffusionmodules.mmdit import TimestepEmbedder, RMSNorm
|
||||
from comfy.ldm.modules.attention import optimized_attention_masked
|
||||
from comfy.ldm.flux.layers import EmbedND
|
||||
|
||||
|
||||
def modulate(x, scale):
|
||||
return x * (1 + scale.unsqueeze(1))
|
||||
|
||||
#############################################################################
|
||||
# Core NextDiT Model #
|
||||
#############################################################################
|
||||
|
||||
|
||||
class JointAttention(nn.Module):
|
||||
"""Multi-head attention module."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
n_heads: int,
|
||||
n_kv_heads: Optional[int],
|
||||
qk_norm: bool,
|
||||
operation_settings={},
|
||||
):
|
||||
"""
|
||||
Initialize the Attention module.
|
||||
|
||||
Args:
|
||||
dim (int): Number of input dimensions.
|
||||
n_heads (int): Number of heads.
|
||||
n_kv_heads (Optional[int]): Number of kv heads, if using GQA.
|
||||
|
||||
"""
|
||||
super().__init__()
|
||||
self.n_kv_heads = n_heads if n_kv_heads is None else n_kv_heads
|
||||
self.n_local_heads = n_heads
|
||||
self.n_local_kv_heads = self.n_kv_heads
|
||||
self.n_rep = self.n_local_heads // self.n_local_kv_heads
|
||||
self.head_dim = dim // n_heads
|
||||
|
||||
self.qkv = operation_settings.get("operations").Linear(
|
||||
dim,
|
||||
(n_heads + self.n_kv_heads + self.n_kv_heads) * self.head_dim,
|
||||
bias=False,
|
||||
device=operation_settings.get("device"),
|
||||
dtype=operation_settings.get("dtype"),
|
||||
)
|
||||
self.out = operation_settings.get("operations").Linear(
|
||||
n_heads * self.head_dim,
|
||||
dim,
|
||||
bias=False,
|
||||
device=operation_settings.get("device"),
|
||||
dtype=operation_settings.get("dtype"),
|
||||
)
|
||||
|
||||
if qk_norm:
|
||||
self.q_norm = RMSNorm(self.head_dim, elementwise_affine=True, **operation_settings)
|
||||
self.k_norm = RMSNorm(self.head_dim, elementwise_affine=True, **operation_settings)
|
||||
else:
|
||||
self.q_norm = self.k_norm = nn.Identity()
|
||||
|
||||
@staticmethod
|
||||
def apply_rotary_emb(
|
||||
x_in: torch.Tensor,
|
||||
freqs_cis: torch.Tensor,
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Apply rotary embeddings to input tensors using the given frequency
|
||||
tensor.
|
||||
|
||||
This function applies rotary embeddings to the given query 'xq' and
|
||||
key 'xk' tensors using the provided frequency tensor 'freqs_cis'. The
|
||||
input tensors are reshaped as complex numbers, and the frequency tensor
|
||||
is reshaped for broadcasting compatibility. The resulting tensors
|
||||
contain rotary embeddings and are returned as real tensors.
|
||||
|
||||
Args:
|
||||
x_in (torch.Tensor): Query or Key tensor to apply rotary embeddings.
|
||||
freqs_cis (torch.Tensor): Precomputed frequency tensor for complex
|
||||
exponentials.
|
||||
|
||||
Returns:
|
||||
Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor
|
||||
and key tensor with rotary embeddings.
|
||||
"""
|
||||
|
||||
t_ = x_in.reshape(*x_in.shape[:-1], -1, 1, 2)
|
||||
t_out = freqs_cis[..., 0] * t_[..., 0] + freqs_cis[..., 1] * t_[..., 1]
|
||||
return t_out.reshape(*x_in.shape)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
x_mask: torch.Tensor,
|
||||
freqs_cis: torch.Tensor,
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
|
||||
Args:
|
||||
x:
|
||||
x_mask:
|
||||
freqs_cis:
|
||||
|
||||
Returns:
|
||||
|
||||
"""
|
||||
bsz, seqlen, _ = x.shape
|
||||
|
||||
xq, xk, xv = torch.split(
|
||||
self.qkv(x),
|
||||
[
|
||||
self.n_local_heads * self.head_dim,
|
||||
self.n_local_kv_heads * self.head_dim,
|
||||
self.n_local_kv_heads * self.head_dim,
|
||||
],
|
||||
dim=-1,
|
||||
)
|
||||
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
|
||||
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
|
||||
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
|
||||
|
||||
xq = self.q_norm(xq)
|
||||
xk = self.k_norm(xk)
|
||||
|
||||
xq = JointAttention.apply_rotary_emb(xq, freqs_cis=freqs_cis)
|
||||
xk = JointAttention.apply_rotary_emb(xk, freqs_cis=freqs_cis)
|
||||
|
||||
n_rep = self.n_local_heads // self.n_local_kv_heads
|
||||
if n_rep >= 1:
|
||||
xk = xk.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3)
|
||||
xv = xv.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3)
|
||||
output = optimized_attention_masked(xq.movedim(1, 2), xk.movedim(1, 2), xv.movedim(1, 2), self.n_local_heads, x_mask, skip_reshape=True)
|
||||
|
||||
return self.out(output)
|
||||
|
||||
|
||||
class FeedForward(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
hidden_dim: int,
|
||||
multiple_of: int,
|
||||
ffn_dim_multiplier: Optional[float],
|
||||
operation_settings={},
|
||||
):
|
||||
"""
|
||||
Initialize the FeedForward module.
|
||||
|
||||
Args:
|
||||
dim (int): Input dimension.
|
||||
hidden_dim (int): Hidden dimension of the feedforward layer.
|
||||
multiple_of (int): Value to ensure hidden dimension is a multiple
|
||||
of this value.
|
||||
ffn_dim_multiplier (float, optional): Custom multiplier for hidden
|
||||
dimension. Defaults to None.
|
||||
|
||||
"""
|
||||
super().__init__()
|
||||
# custom dim factor multiplier
|
||||
if ffn_dim_multiplier is not None:
|
||||
hidden_dim = int(ffn_dim_multiplier * hidden_dim)
|
||||
hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
|
||||
|
||||
self.w1 = operation_settings.get("operations").Linear(
|
||||
dim,
|
||||
hidden_dim,
|
||||
bias=False,
|
||||
device=operation_settings.get("device"),
|
||||
dtype=operation_settings.get("dtype"),
|
||||
)
|
||||
self.w2 = operation_settings.get("operations").Linear(
|
||||
hidden_dim,
|
||||
dim,
|
||||
bias=False,
|
||||
device=operation_settings.get("device"),
|
||||
dtype=operation_settings.get("dtype"),
|
||||
)
|
||||
self.w3 = operation_settings.get("operations").Linear(
|
||||
dim,
|
||||
hidden_dim,
|
||||
bias=False,
|
||||
device=operation_settings.get("device"),
|
||||
dtype=operation_settings.get("dtype"),
|
||||
)
|
||||
|
||||
# @torch.compile
|
||||
def _forward_silu_gating(self, x1, x3):
|
||||
return F.silu(x1) * x3
|
||||
|
||||
def forward(self, x):
|
||||
return self.w2(self._forward_silu_gating(self.w1(x), self.w3(x)))
|
||||
|
||||
|
||||
class JointTransformerBlock(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
layer_id: int,
|
||||
dim: int,
|
||||
n_heads: int,
|
||||
n_kv_heads: int,
|
||||
multiple_of: int,
|
||||
ffn_dim_multiplier: float,
|
||||
norm_eps: float,
|
||||
qk_norm: bool,
|
||||
modulation=True,
|
||||
operation_settings={},
|
||||
) -> None:
|
||||
"""
|
||||
Initialize a TransformerBlock.
|
||||
|
||||
Args:
|
||||
layer_id (int): Identifier for the layer.
|
||||
dim (int): Embedding dimension of the input features.
|
||||
n_heads (int): Number of attention heads.
|
||||
n_kv_heads (Optional[int]): Number of attention heads in key and
|
||||
value features (if using GQA), or set to None for the same as
|
||||
query.
|
||||
multiple_of (int):
|
||||
ffn_dim_multiplier (float):
|
||||
norm_eps (float):
|
||||
|
||||
"""
|
||||
super().__init__()
|
||||
self.dim = dim
|
||||
self.head_dim = dim // n_heads
|
||||
self.attention = JointAttention(dim, n_heads, n_kv_heads, qk_norm, operation_settings=operation_settings)
|
||||
self.feed_forward = FeedForward(
|
||||
dim=dim,
|
||||
hidden_dim=4 * dim,
|
||||
multiple_of=multiple_of,
|
||||
ffn_dim_multiplier=ffn_dim_multiplier,
|
||||
operation_settings=operation_settings,
|
||||
)
|
||||
self.layer_id = layer_id
|
||||
self.attention_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
|
||||
self.ffn_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
|
||||
|
||||
self.attention_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
|
||||
self.ffn_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
|
||||
|
||||
self.modulation = modulation
|
||||
if modulation:
|
||||
self.adaLN_modulation = nn.Sequential(
|
||||
nn.SiLU(),
|
||||
operation_settings.get("operations").Linear(
|
||||
min(dim, 1024),
|
||||
4 * dim,
|
||||
bias=True,
|
||||
device=operation_settings.get("device"),
|
||||
dtype=operation_settings.get("dtype"),
|
||||
),
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
x_mask: torch.Tensor,
|
||||
freqs_cis: torch.Tensor,
|
||||
adaln_input: Optional[torch.Tensor]=None,
|
||||
):
|
||||
"""
|
||||
Perform a forward pass through the TransformerBlock.
|
||||
|
||||
Args:
|
||||
x (torch.Tensor): Input tensor.
|
||||
freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Output tensor after applying attention and
|
||||
feedforward layers.
|
||||
|
||||
"""
|
||||
if self.modulation:
|
||||
assert adaln_input is not None
|
||||
scale_msa, gate_msa, scale_mlp, gate_mlp = self.adaLN_modulation(adaln_input).chunk(4, dim=1)
|
||||
|
||||
x = x + gate_msa.unsqueeze(1).tanh() * self.attention_norm2(
|
||||
self.attention(
|
||||
modulate(self.attention_norm1(x), scale_msa),
|
||||
x_mask,
|
||||
freqs_cis,
|
||||
)
|
||||
)
|
||||
x = x + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(
|
||||
self.feed_forward(
|
||||
modulate(self.ffn_norm1(x), scale_mlp),
|
||||
)
|
||||
)
|
||||
else:
|
||||
assert adaln_input is None
|
||||
x = x + self.attention_norm2(
|
||||
self.attention(
|
||||
self.attention_norm1(x),
|
||||
x_mask,
|
||||
freqs_cis,
|
||||
)
|
||||
)
|
||||
x = x + self.ffn_norm2(
|
||||
self.feed_forward(
|
||||
self.ffn_norm1(x),
|
||||
)
|
||||
)
|
||||
return x
|
||||
|
||||
|
||||
class FinalLayer(nn.Module):
|
||||
"""
|
||||
The final layer of NextDiT.
|
||||
"""
|
||||
|
||||
def __init__(self, hidden_size, patch_size, out_channels, operation_settings={}):
|
||||
super().__init__()
|
||||
self.norm_final = operation_settings.get("operations").LayerNorm(
|
||||
hidden_size,
|
||||
elementwise_affine=False,
|
||||
eps=1e-6,
|
||||
device=operation_settings.get("device"),
|
||||
dtype=operation_settings.get("dtype"),
|
||||
)
|
||||
self.linear = operation_settings.get("operations").Linear(
|
||||
hidden_size,
|
||||
patch_size * patch_size * out_channels,
|
||||
bias=True,
|
||||
device=operation_settings.get("device"),
|
||||
dtype=operation_settings.get("dtype"),
|
||||
)
|
||||
|
||||
self.adaLN_modulation = nn.Sequential(
|
||||
nn.SiLU(),
|
||||
operation_settings.get("operations").Linear(
|
||||
min(hidden_size, 1024),
|
||||
hidden_size,
|
||||
bias=True,
|
||||
device=operation_settings.get("device"),
|
||||
dtype=operation_settings.get("dtype"),
|
||||
),
|
||||
)
|
||||
|
||||
def forward(self, x, c):
|
||||
scale = self.adaLN_modulation(c)
|
||||
x = modulate(self.norm_final(x), scale)
|
||||
x = self.linear(x)
|
||||
return x
|
||||
|
||||
|
||||
class NextDiT(nn.Module):
|
||||
"""
|
||||
Diffusion model with a Transformer backbone.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
patch_size: int = 2,
|
||||
in_channels: int = 4,
|
||||
dim: int = 4096,
|
||||
n_layers: int = 32,
|
||||
n_refiner_layers: int = 2,
|
||||
n_heads: int = 32,
|
||||
n_kv_heads: Optional[int] = None,
|
||||
multiple_of: int = 256,
|
||||
ffn_dim_multiplier: Optional[float] = None,
|
||||
norm_eps: float = 1e-5,
|
||||
qk_norm: bool = False,
|
||||
cap_feat_dim: int = 5120,
|
||||
axes_dims: List[int] = (16, 56, 56),
|
||||
axes_lens: List[int] = (1, 512, 512),
|
||||
image_model=None,
|
||||
device=None,
|
||||
dtype=None,
|
||||
operations=None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.dtype = dtype
|
||||
operation_settings = {"operations": operations, "device": device, "dtype": dtype}
|
||||
self.in_channels = in_channels
|
||||
self.out_channels = in_channels
|
||||
self.patch_size = patch_size
|
||||
|
||||
self.x_embedder = operation_settings.get("operations").Linear(
|
||||
in_features=patch_size * patch_size * in_channels,
|
||||
out_features=dim,
|
||||
bias=True,
|
||||
device=operation_settings.get("device"),
|
||||
dtype=operation_settings.get("dtype"),
|
||||
)
|
||||
|
||||
self.noise_refiner = nn.ModuleList(
|
||||
[
|
||||
JointTransformerBlock(
|
||||
layer_id,
|
||||
dim,
|
||||
n_heads,
|
||||
n_kv_heads,
|
||||
multiple_of,
|
||||
ffn_dim_multiplier,
|
||||
norm_eps,
|
||||
qk_norm,
|
||||
modulation=True,
|
||||
operation_settings=operation_settings,
|
||||
)
|
||||
for layer_id in range(n_refiner_layers)
|
||||
]
|
||||
)
|
||||
self.context_refiner = nn.ModuleList(
|
||||
[
|
||||
JointTransformerBlock(
|
||||
layer_id,
|
||||
dim,
|
||||
n_heads,
|
||||
n_kv_heads,
|
||||
multiple_of,
|
||||
ffn_dim_multiplier,
|
||||
norm_eps,
|
||||
qk_norm,
|
||||
modulation=False,
|
||||
operation_settings=operation_settings,
|
||||
)
|
||||
for layer_id in range(n_refiner_layers)
|
||||
]
|
||||
)
|
||||
|
||||
self.t_embedder = TimestepEmbedder(min(dim, 1024), **operation_settings)
|
||||
self.cap_embedder = nn.Sequential(
|
||||
RMSNorm(cap_feat_dim, eps=norm_eps, elementwise_affine=True, **operation_settings),
|
||||
operation_settings.get("operations").Linear(
|
||||
cap_feat_dim,
|
||||
dim,
|
||||
bias=True,
|
||||
device=operation_settings.get("device"),
|
||||
dtype=operation_settings.get("dtype"),
|
||||
),
|
||||
)
|
||||
|
||||
self.layers = nn.ModuleList(
|
||||
[
|
||||
JointTransformerBlock(
|
||||
layer_id,
|
||||
dim,
|
||||
n_heads,
|
||||
n_kv_heads,
|
||||
multiple_of,
|
||||
ffn_dim_multiplier,
|
||||
norm_eps,
|
||||
qk_norm,
|
||||
operation_settings=operation_settings,
|
||||
)
|
||||
for layer_id in range(n_layers)
|
||||
]
|
||||
)
|
||||
self.norm_final = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
|
||||
self.final_layer = FinalLayer(dim, patch_size, self.out_channels, operation_settings=operation_settings)
|
||||
|
||||
assert (dim // n_heads) == sum(axes_dims)
|
||||
self.axes_dims = axes_dims
|
||||
self.axes_lens = axes_lens
|
||||
self.rope_embedder = EmbedND(dim=dim // n_heads, theta=10000.0, axes_dim=axes_dims)
|
||||
self.dim = dim
|
||||
self.n_heads = n_heads
|
||||
|
||||
def unpatchify(
|
||||
self, x: torch.Tensor, img_size: List[Tuple[int, int]], cap_size: List[int], return_tensor=False
|
||||
) -> List[torch.Tensor]:
|
||||
"""
|
||||
x: (N, T, patch_size**2 * C)
|
||||
imgs: (N, H, W, C)
|
||||
"""
|
||||
pH = pW = self.patch_size
|
||||
imgs = []
|
||||
for i in range(x.size(0)):
|
||||
H, W = img_size[i]
|
||||
begin = cap_size[i]
|
||||
end = begin + (H // pH) * (W // pW)
|
||||
imgs.append(
|
||||
x[i][begin:end]
|
||||
.view(H // pH, W // pW, pH, pW, self.out_channels)
|
||||
.permute(4, 0, 2, 1, 3)
|
||||
.flatten(3, 4)
|
||||
.flatten(1, 2)
|
||||
)
|
||||
|
||||
if return_tensor:
|
||||
imgs = torch.stack(imgs, dim=0)
|
||||
return imgs
|
||||
|
||||
def patchify_and_embed(
|
||||
self, x: List[torch.Tensor] | torch.Tensor, cap_feats: torch.Tensor, cap_mask: torch.Tensor, t: torch.Tensor, num_tokens
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, List[Tuple[int, int]], List[int], torch.Tensor]:
|
||||
bsz = len(x)
|
||||
pH = pW = self.patch_size
|
||||
device = x[0].device
|
||||
dtype = x[0].dtype
|
||||
|
||||
if cap_mask is not None:
|
||||
l_effective_cap_len = cap_mask.sum(dim=1).tolist()
|
||||
else:
|
||||
l_effective_cap_len = [num_tokens] * bsz
|
||||
|
||||
if cap_mask is not None and not torch.is_floating_point(cap_mask):
|
||||
cap_mask = (cap_mask - 1).to(dtype) * torch.finfo(dtype).max
|
||||
|
||||
img_sizes = [(img.size(1), img.size(2)) for img in x]
|
||||
l_effective_img_len = [(H // pH) * (W // pW) for (H, W) in img_sizes]
|
||||
|
||||
max_seq_len = max(
|
||||
(cap_len+img_len for cap_len, img_len in zip(l_effective_cap_len, l_effective_img_len))
|
||||
)
|
||||
max_cap_len = max(l_effective_cap_len)
|
||||
max_img_len = max(l_effective_img_len)
|
||||
|
||||
position_ids = torch.zeros(bsz, max_seq_len, 3, dtype=torch.int32, device=device)
|
||||
|
||||
for i in range(bsz):
|
||||
cap_len = l_effective_cap_len[i]
|
||||
img_len = l_effective_img_len[i]
|
||||
H, W = img_sizes[i]
|
||||
H_tokens, W_tokens = H // pH, W // pW
|
||||
assert H_tokens * W_tokens == img_len
|
||||
|
||||
position_ids[i, :cap_len, 0] = torch.arange(cap_len, dtype=torch.int32, device=device)
|
||||
position_ids[i, cap_len:cap_len+img_len, 0] = cap_len
|
||||
row_ids = torch.arange(H_tokens, dtype=torch.int32, device=device).view(-1, 1).repeat(1, W_tokens).flatten()
|
||||
col_ids = torch.arange(W_tokens, dtype=torch.int32, device=device).view(1, -1).repeat(H_tokens, 1).flatten()
|
||||
position_ids[i, cap_len:cap_len+img_len, 1] = row_ids
|
||||
position_ids[i, cap_len:cap_len+img_len, 2] = col_ids
|
||||
|
||||
freqs_cis = self.rope_embedder(position_ids).movedim(1, 2).to(dtype)
|
||||
|
||||
# build freqs_cis for cap and image individually
|
||||
cap_freqs_cis_shape = list(freqs_cis.shape)
|
||||
# cap_freqs_cis_shape[1] = max_cap_len
|
||||
cap_freqs_cis_shape[1] = cap_feats.shape[1]
|
||||
cap_freqs_cis = torch.zeros(*cap_freqs_cis_shape, device=device, dtype=freqs_cis.dtype)
|
||||
|
||||
img_freqs_cis_shape = list(freqs_cis.shape)
|
||||
img_freqs_cis_shape[1] = max_img_len
|
||||
img_freqs_cis = torch.zeros(*img_freqs_cis_shape, device=device, dtype=freqs_cis.dtype)
|
||||
|
||||
for i in range(bsz):
|
||||
cap_len = l_effective_cap_len[i]
|
||||
img_len = l_effective_img_len[i]
|
||||
cap_freqs_cis[i, :cap_len] = freqs_cis[i, :cap_len]
|
||||
img_freqs_cis[i, :img_len] = freqs_cis[i, cap_len:cap_len+img_len]
|
||||
|
||||
# refine context
|
||||
for layer in self.context_refiner:
|
||||
cap_feats = layer(cap_feats, cap_mask, cap_freqs_cis)
|
||||
|
||||
# refine image
|
||||
flat_x = []
|
||||
for i in range(bsz):
|
||||
img = x[i]
|
||||
C, H, W = img.size()
|
||||
img = img.view(C, H // pH, pH, W // pW, pW).permute(1, 3, 2, 4, 0).flatten(2).flatten(0, 1)
|
||||
flat_x.append(img)
|
||||
x = flat_x
|
||||
padded_img_embed = torch.zeros(bsz, max_img_len, x[0].shape[-1], device=device, dtype=x[0].dtype)
|
||||
padded_img_mask = torch.zeros(bsz, max_img_len, dtype=dtype, device=device)
|
||||
for i in range(bsz):
|
||||
padded_img_embed[i, :l_effective_img_len[i]] = x[i]
|
||||
padded_img_mask[i, l_effective_img_len[i]:] = -torch.finfo(dtype).max
|
||||
|
||||
padded_img_embed = self.x_embedder(padded_img_embed)
|
||||
padded_img_mask = padded_img_mask.unsqueeze(1)
|
||||
for layer in self.noise_refiner:
|
||||
padded_img_embed = layer(padded_img_embed, padded_img_mask, img_freqs_cis, t)
|
||||
|
||||
if cap_mask is not None:
|
||||
mask = torch.zeros(bsz, max_seq_len, dtype=dtype, device=device)
|
||||
mask[:, :max_cap_len] = cap_mask[:, :max_cap_len]
|
||||
else:
|
||||
mask = None
|
||||
|
||||
padded_full_embed = torch.zeros(bsz, max_seq_len, self.dim, device=device, dtype=x[0].dtype)
|
||||
for i in range(bsz):
|
||||
cap_len = l_effective_cap_len[i]
|
||||
img_len = l_effective_img_len[i]
|
||||
|
||||
padded_full_embed[i, :cap_len] = cap_feats[i, :cap_len]
|
||||
padded_full_embed[i, cap_len:cap_len+img_len] = padded_img_embed[i, :img_len]
|
||||
|
||||
return padded_full_embed, mask, img_sizes, l_effective_cap_len, freqs_cis
|
||||
|
||||
# def forward(self, x, t, cap_feats, cap_mask):
|
||||
def forward(self, x, timesteps, context, num_tokens, attention_mask=None, **kwargs):
|
||||
t = 1.0 - timesteps
|
||||
cap_feats = context
|
||||
cap_mask = attention_mask
|
||||
"""
|
||||
Forward pass of NextDiT.
|
||||
t: (N,) tensor of diffusion timesteps
|
||||
y: (N,) tensor of text tokens/features
|
||||
"""
|
||||
|
||||
t = self.t_embedder(t, dtype=x.dtype) # (N, D)
|
||||
adaln_input = t
|
||||
|
||||
cap_feats = self.cap_embedder(cap_feats) # (N, L, D) # todo check if able to batchify w.o. redundant compute
|
||||
|
||||
x_is_tensor = isinstance(x, torch.Tensor)
|
||||
x, mask, img_size, cap_size, freqs_cis = self.patchify_and_embed(x, cap_feats, cap_mask, t, num_tokens)
|
||||
freqs_cis = freqs_cis.to(x.device)
|
||||
|
||||
for layer in self.layers:
|
||||
x = layer(x, mask, freqs_cis, adaln_input)
|
||||
|
||||
x = self.final_layer(x, adaln_input)
|
||||
x = self.unpatchify(x, img_size, cap_size, return_tensor=x_is_tensor)
|
||||
|
||||
return -x
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import math
|
||||
import sys
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from torch import nn, einsum
|
||||
@@ -16,7 +18,11 @@ if model_management.xformers_enabled():
|
||||
import xformers.ops
|
||||
|
||||
if model_management.sage_attention_enabled():
|
||||
from sageattention import sageattn
|
||||
try:
|
||||
from sageattention import sageattn
|
||||
except ModuleNotFoundError:
|
||||
logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention")
|
||||
exit(-1)
|
||||
|
||||
from comfy.cli_args import args
|
||||
import comfy.ops
|
||||
|
||||
@@ -321,7 +321,7 @@ class SelfAttention(nn.Module):
|
||||
|
||||
class RMSNorm(torch.nn.Module):
|
||||
def __init__(
|
||||
self, dim: int, elementwise_affine: bool = False, eps: float = 1e-6, device=None, dtype=None
|
||||
self, dim: int, elementwise_affine: bool = False, eps: float = 1e-6, device=None, dtype=None, **kwargs
|
||||
):
|
||||
"""
|
||||
Initialize the RMSNorm normalization layer.
|
||||
|
||||
@@ -293,6 +293,17 @@ def pytorch_attention(q, k, v):
|
||||
return out
|
||||
|
||||
|
||||
def vae_attention():
|
||||
if model_management.xformers_enabled_vae():
|
||||
logging.info("Using xformers attention in VAE")
|
||||
return xformers_attention
|
||||
elif model_management.pytorch_attention_enabled():
|
||||
logging.info("Using pytorch attention in VAE")
|
||||
return pytorch_attention
|
||||
else:
|
||||
logging.info("Using split attention in VAE")
|
||||
return normal_attention
|
||||
|
||||
class AttnBlock(nn.Module):
|
||||
def __init__(self, in_channels, conv_op=ops.Conv2d):
|
||||
super().__init__()
|
||||
@@ -320,15 +331,7 @@ class AttnBlock(nn.Module):
|
||||
stride=1,
|
||||
padding=0)
|
||||
|
||||
if model_management.xformers_enabled_vae():
|
||||
logging.info("Using xformers attention in VAE")
|
||||
self.optimized_attention = xformers_attention
|
||||
elif model_management.pytorch_attention_enabled():
|
||||
logging.info("Using pytorch attention in VAE")
|
||||
self.optimized_attention = pytorch_attention
|
||||
else:
|
||||
logging.info("Using split attention in VAE")
|
||||
self.optimized_attention = normal_attention
|
||||
self.optimized_attention = vae_attention()
|
||||
|
||||
def forward(self, x):
|
||||
h_ = x
|
||||
@@ -699,9 +702,6 @@ class Decoder(nn.Module):
|
||||
padding=1)
|
||||
|
||||
def forward(self, z, **kwargs):
|
||||
#assert z.shape[1:] == self.z_shape[1:]
|
||||
self.last_z_shape = z.shape
|
||||
|
||||
# timestep embedding
|
||||
temb = None
|
||||
|
||||
|
||||
@@ -307,7 +307,6 @@ def model_lora_keys_unet(model, key_map={}):
|
||||
if k.endswith(".weight"):
|
||||
key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_")
|
||||
key_map["lora_unet_{}".format(key_lora)] = k
|
||||
key_map["lora_prior_unet_{}".format(key_lora)] = k #cascade lora: TODO put lora key prefix in the model config
|
||||
key_map["{}".format(k[:-len(".weight")])] = k #generic lora format without any weird key names
|
||||
else:
|
||||
key_map["{}".format(k)] = k #generic lora format for not .weight without any weird key names
|
||||
@@ -327,6 +326,13 @@ def model_lora_keys_unet(model, key_map={}):
|
||||
diffusers_lora_key = diffusers_lora_key[:-2]
|
||||
key_map[diffusers_lora_key] = unet_key
|
||||
|
||||
if isinstance(model, comfy.model_base.StableCascade_C):
|
||||
for k in sdk:
|
||||
if k.startswith("diffusion_model."):
|
||||
if k.endswith(".weight"):
|
||||
key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_")
|
||||
key_map["lora_prior_unet_{}".format(key_lora)] = k
|
||||
|
||||
if isinstance(model, comfy.model_base.SD3): #Diffusers lora SD3
|
||||
diffusers_keys = comfy.utils.mmdit_to_diffusers(model.model_config.unet_config, output_prefix="diffusion_model.")
|
||||
for k in diffusers_keys:
|
||||
|
||||
@@ -34,6 +34,7 @@ import comfy.ldm.flux.model
|
||||
import comfy.ldm.lightricks.model
|
||||
import comfy.ldm.hunyuan_video.model
|
||||
import comfy.ldm.cosmos.model
|
||||
import comfy.ldm.lumina.model
|
||||
|
||||
import comfy.model_management
|
||||
import comfy.patcher_extension
|
||||
@@ -148,7 +149,9 @@ class BaseModel(torch.nn.Module):
|
||||
|
||||
xc = xc.to(dtype)
|
||||
t = self.model_sampling.timestep(t).float()
|
||||
context = context.to(dtype)
|
||||
if context is not None:
|
||||
context = context.to(dtype)
|
||||
|
||||
extra_conds = {}
|
||||
for o in kwargs:
|
||||
extra = kwargs[o]
|
||||
@@ -163,9 +166,6 @@ class BaseModel(torch.nn.Module):
|
||||
def get_dtype(self):
|
||||
return self.diffusion_model.dtype
|
||||
|
||||
def is_adm(self):
|
||||
return self.adm_channels > 0
|
||||
|
||||
def encode_adm(self, **kwargs):
|
||||
return None
|
||||
|
||||
@@ -549,6 +549,10 @@ class SD_X4Upscaler(BaseModel):
|
||||
|
||||
out['c_concat'] = comfy.conds.CONDNoiseShape(image)
|
||||
out['y'] = comfy.conds.CONDRegular(noise_level)
|
||||
|
||||
cross_attn = kwargs.get("cross_attn", None)
|
||||
if cross_attn is not None:
|
||||
out['c_crossattn'] = comfy.conds.CONDCrossAttn(cross_attn)
|
||||
return out
|
||||
|
||||
class IP2P:
|
||||
@@ -806,7 +810,10 @@ class Flux(BaseModel):
|
||||
(h_tok, w_tok) = (math.ceil(shape[2] / self.diffusion_model.patch_size), math.ceil(shape[3] / self.diffusion_model.patch_size))
|
||||
attention_mask = utils.upscale_dit_mask(attention_mask, mask_ref_size, (h_tok, w_tok))
|
||||
out['attention_mask'] = comfy.conds.CONDRegular(attention_mask)
|
||||
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([kwargs.get("guidance", 3.5)]))
|
||||
|
||||
guidance = kwargs.get("guidance", 3.5)
|
||||
if guidance is not None:
|
||||
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance]))
|
||||
return out
|
||||
|
||||
class GenmoMochi(BaseModel):
|
||||
@@ -863,7 +870,10 @@ class HunyuanVideo(BaseModel):
|
||||
cross_attn = kwargs.get("cross_attn", None)
|
||||
if cross_attn is not None:
|
||||
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
|
||||
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([kwargs.get("guidance", 6.0)]))
|
||||
|
||||
guidance = kwargs.get("guidance", 6.0)
|
||||
if guidance is not None:
|
||||
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance]))
|
||||
return out
|
||||
|
||||
class CosmosVideo(BaseModel):
|
||||
@@ -892,3 +902,19 @@ class CosmosVideo(BaseModel):
|
||||
latent_image = latent_image + noise
|
||||
latent_image = self.model_sampling.calculate_input(torch.tensor([sigma_noise_augmentation], device=latent_image.device, dtype=latent_image.dtype), latent_image)
|
||||
return latent_image * ((sigma ** 2 + self.model_sampling.sigma_data ** 2) ** 0.5)
|
||||
|
||||
class Lumina2(BaseModel):
|
||||
def __init__(self, model_config, model_type=ModelType.FLOW, device=None):
|
||||
super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.lumina.model.NextDiT)
|
||||
|
||||
def extra_conds(self, **kwargs):
|
||||
out = super().extra_conds(**kwargs)
|
||||
attention_mask = kwargs.get("attention_mask", None)
|
||||
if attention_mask is not None:
|
||||
if torch.numel(attention_mask) != attention_mask.sum():
|
||||
out['attention_mask'] = comfy.conds.CONDRegular(attention_mask)
|
||||
out['num_tokens'] = comfy.conds.CONDConstant(max(1, torch.sum(attention_mask).item()))
|
||||
cross_attn = kwargs.get("cross_attn", None)
|
||||
if cross_attn is not None:
|
||||
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
|
||||
return out
|
||||
|
||||
@@ -239,7 +239,7 @@ def detect_unet_config(state_dict, key_prefix):
|
||||
dit_config["micro_condition"] = False
|
||||
return dit_config
|
||||
|
||||
if '{}blocks.block0.blocks.0.block.attn.to_q.0.weight'.format(key_prefix) in state_dict_keys:
|
||||
if '{}blocks.block0.blocks.0.block.attn.to_q.0.weight'.format(key_prefix) in state_dict_keys: # Cosmos
|
||||
dit_config = {}
|
||||
dit_config["image_model"] = "cosmos"
|
||||
dit_config["max_img_h"] = 240
|
||||
@@ -284,6 +284,21 @@ def detect_unet_config(state_dict, key_prefix):
|
||||
dit_config["extra_per_block_abs_pos_emb_type"] = "learnable"
|
||||
return dit_config
|
||||
|
||||
if '{}cap_embedder.1.weight'.format(key_prefix) in state_dict_keys: # Lumina 2
|
||||
dit_config = {}
|
||||
dit_config["image_model"] = "lumina2"
|
||||
dit_config["patch_size"] = 2
|
||||
dit_config["in_channels"] = 16
|
||||
dit_config["dim"] = 2304
|
||||
dit_config["cap_feat_dim"] = 2304
|
||||
dit_config["n_layers"] = 26
|
||||
dit_config["n_heads"] = 24
|
||||
dit_config["n_kv_heads"] = 8
|
||||
dit_config["qk_norm"] = True
|
||||
dit_config["axes_dims"] = [32, 32, 32]
|
||||
dit_config["axes_lens"] = [300, 512, 512]
|
||||
return dit_config
|
||||
|
||||
if '{}input_blocks.0.0.weight'.format(key_prefix) not in state_dict_keys:
|
||||
return None
|
||||
|
||||
|
||||
@@ -218,7 +218,7 @@ def is_amd():
|
||||
|
||||
MIN_WEIGHT_MEMORY_RATIO = 0.4
|
||||
if is_nvidia():
|
||||
MIN_WEIGHT_MEMORY_RATIO = 0.2
|
||||
MIN_WEIGHT_MEMORY_RATIO = 0.1
|
||||
|
||||
ENABLE_PYTORCH_ATTENTION = False
|
||||
if args.use_pytorch_cross_attention:
|
||||
@@ -241,6 +241,12 @@ if ENABLE_PYTORCH_ATTENTION:
|
||||
torch.backends.cuda.enable_flash_sdp(True)
|
||||
torch.backends.cuda.enable_mem_efficient_sdp(True)
|
||||
|
||||
try:
|
||||
if is_nvidia() and args.fast:
|
||||
torch.backends.cuda.matmul.allow_fp16_accumulation = True
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
if int(torch_version[0]) == 2 and int(torch_version[2]) >= 5:
|
||||
torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True)
|
||||
@@ -535,14 +541,11 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu
|
||||
vram_set_state = vram_state
|
||||
lowvram_model_memory = 0
|
||||
if lowvram_available and (vram_set_state == VRAMState.LOW_VRAM or vram_set_state == VRAMState.NORMAL_VRAM) and not force_full_load:
|
||||
model_size = loaded_model.model_memory_required(torch_dev)
|
||||
loaded_memory = loaded_model.model_loaded_memory()
|
||||
current_free_mem = get_free_memory(torch_dev) + loaded_memory
|
||||
|
||||
lowvram_model_memory = max(64 * 1024 * 1024, (current_free_mem - minimum_memory_required), min(current_free_mem * MIN_WEIGHT_MEMORY_RATIO, current_free_mem - minimum_inference_memory()))
|
||||
lowvram_model_memory = max(0.1, lowvram_model_memory - loaded_memory)
|
||||
if model_size <= lowvram_model_memory: #only switch to lowvram if really necessary
|
||||
lowvram_model_memory = 0
|
||||
|
||||
if vram_set_state == VRAMState.NO_VRAM:
|
||||
lowvram_model_memory = 0.1
|
||||
|
||||
@@ -31,6 +31,7 @@ class EPS:
|
||||
return model_input - model_output * sigma
|
||||
|
||||
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
|
||||
if max_denoise:
|
||||
noise = noise * torch.sqrt(1.0 + sigma ** 2.0)
|
||||
else:
|
||||
@@ -61,9 +62,11 @@ class CONST:
|
||||
return model_input - model_output * sigma
|
||||
|
||||
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
|
||||
return sigma * noise + (1.0 - sigma) * latent_image
|
||||
|
||||
def inverse_noise_scaling(self, sigma, latent):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (latent.ndim - 1))
|
||||
return latent / (1.0 - sigma)
|
||||
|
||||
class ModelSamplingDiscrete(torch.nn.Module):
|
||||
|
||||
@@ -58,7 +58,6 @@ def convert_cond(cond):
|
||||
temp = c[1].copy()
|
||||
model_conds = temp.get("model_conds", {})
|
||||
if c[0] is not None:
|
||||
model_conds["c_crossattn"] = comfy.conds.CONDCrossAttn(c[0]) #TODO: remove
|
||||
temp["cross_attn"] = c[0]
|
||||
temp["model_conds"] = model_conds
|
||||
temp["uuid"] = uuid.uuid4()
|
||||
|
||||
@@ -12,7 +12,6 @@ import collections
|
||||
from comfy import model_management
|
||||
import math
|
||||
import logging
|
||||
import comfy.samplers
|
||||
import comfy.sampler_helpers
|
||||
import comfy.model_patcher
|
||||
import comfy.patcher_extension
|
||||
@@ -178,7 +177,7 @@ def finalize_default_conds(model: 'BaseModel', hooked_to_run: dict[comfy.hooks.H
|
||||
cond = default_conds[i]
|
||||
for x in cond:
|
||||
# do get_area_and_mult to get all the expected values
|
||||
p = comfy.samplers.get_area_and_mult(x, x_in, timestep)
|
||||
p = get_area_and_mult(x, x_in, timestep)
|
||||
if p is None:
|
||||
continue
|
||||
# replace p's mult with calculated mult
|
||||
@@ -215,7 +214,7 @@ def _calc_cond_batch(model: 'BaseModel', conds: list[list[dict]], x_in: torch.Te
|
||||
default_c.append(x)
|
||||
has_default_conds = True
|
||||
continue
|
||||
p = comfy.samplers.get_area_and_mult(x, x_in, timestep)
|
||||
p = get_area_and_mult(x, x_in, timestep)
|
||||
if p is None:
|
||||
continue
|
||||
if p.hooks is not None:
|
||||
@@ -687,7 +686,8 @@ class Sampler:
|
||||
KSAMPLER_NAMES = ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2","dpm_2", "dpm_2_ancestral",
|
||||
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu",
|
||||
"dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm",
|
||||
"ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp"]
|
||||
"ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp",
|
||||
"gradient_estimation"]
|
||||
|
||||
class KSAMPLER(Sampler):
|
||||
def __init__(self, sampler_function, extra_options={}, inpaint_options={}):
|
||||
|
||||
15
comfy/sd.py
15
comfy/sd.py
@@ -36,6 +36,7 @@ import comfy.text_encoders.genmo
|
||||
import comfy.text_encoders.lt
|
||||
import comfy.text_encoders.hunyuan_video
|
||||
import comfy.text_encoders.cosmos
|
||||
import comfy.text_encoders.lumina2
|
||||
|
||||
import comfy.model_patcher
|
||||
import comfy.lora
|
||||
@@ -388,8 +389,8 @@ class VAE:
|
||||
ddconfig = {'z_channels': 16, 'latent_channels': self.latent_channels, 'z_factor': 1, 'resolution': 1024, 'in_channels': 3, 'out_channels': 3, 'channels': 128, 'channels_mult': [2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [32], 'dropout': 0.0, 'patch_size': 4, 'num_groups': 1, 'temporal_compression': 8, 'spacial_compression': 8}
|
||||
self.first_stage_model = comfy.ldm.cosmos.vae.CausalContinuousVideoTokenizer(**ddconfig)
|
||||
#TODO: these values are a bit off because this is not a standard VAE
|
||||
self.memory_used_decode = lambda shape, dtype: (220 * shape[2] * shape[3] * shape[4] * (8 * 8 * 8)) * model_management.dtype_size(dtype)
|
||||
self.memory_used_encode = lambda shape, dtype: (500 * max(shape[2], 2) * shape[3] * shape[4]) * model_management.dtype_size(dtype)
|
||||
self.memory_used_decode = lambda shape, dtype: (50 * shape[2] * shape[3] * shape[4] * (8 * 8 * 8)) * model_management.dtype_size(dtype)
|
||||
self.memory_used_encode = lambda shape, dtype: (50 * (round((shape[2] + 7) / 8) * 8) * shape[3] * shape[4]) * model_management.dtype_size(dtype)
|
||||
self.working_dtypes = [torch.bfloat16, torch.float32]
|
||||
else:
|
||||
logging.warning("WARNING: No VAE weights detected, VAE not initalized.")
|
||||
@@ -657,6 +658,7 @@ class CLIPType(Enum):
|
||||
HUNYUAN_VIDEO = 9
|
||||
PIXART = 10
|
||||
COSMOS = 11
|
||||
LUMINA2 = 12
|
||||
|
||||
|
||||
def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}):
|
||||
@@ -675,6 +677,7 @@ class TEModel(Enum):
|
||||
T5_BASE = 6
|
||||
LLAMA3_8 = 7
|
||||
T5_XXL_OLD = 8
|
||||
GEMMA_2_2B = 9
|
||||
|
||||
def detect_te_model(sd):
|
||||
if "text_model.encoder.layers.30.mlp.fc1.weight" in sd:
|
||||
@@ -693,6 +696,8 @@ def detect_te_model(sd):
|
||||
return TEModel.T5_XXL_OLD
|
||||
if "encoder.block.0.layer.0.SelfAttention.k.weight" in sd:
|
||||
return TEModel.T5_BASE
|
||||
if 'model.layers.0.post_feedforward_layernorm.weight' in sd:
|
||||
return TEModel.GEMMA_2_2B
|
||||
if "model.layers.0.post_attention_layernorm.weight" in sd:
|
||||
return TEModel.LLAMA3_8
|
||||
return None
|
||||
@@ -730,6 +735,7 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
|
||||
if "text_projection" in clip_data[i]:
|
||||
clip_data[i]["text_projection.weight"] = clip_data[i]["text_projection"].transpose(0, 1) #old models saved with the CLIPSave node
|
||||
|
||||
tokenizer_data = {}
|
||||
clip_target = EmptyClass()
|
||||
clip_target.params = {}
|
||||
if len(clip_data) == 1:
|
||||
@@ -769,6 +775,10 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
|
||||
elif te_model == TEModel.T5_BASE:
|
||||
clip_target.clip = comfy.text_encoders.sa_t5.SAT5Model
|
||||
clip_target.tokenizer = comfy.text_encoders.sa_t5.SAT5Tokenizer
|
||||
elif te_model == TEModel.GEMMA_2_2B:
|
||||
clip_target.clip = comfy.text_encoders.lumina2.te(**llama_detect(clip_data))
|
||||
clip_target.tokenizer = comfy.text_encoders.lumina2.LuminaTokenizer
|
||||
tokenizer_data["spiece_model"] = clip_data[0].get("spiece_model", None)
|
||||
else:
|
||||
if clip_type == CLIPType.SD3:
|
||||
clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(clip_l=True, clip_g=False, t5=False)
|
||||
@@ -798,7 +808,6 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
|
||||
clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer
|
||||
|
||||
parameters = 0
|
||||
tokenizer_data = {}
|
||||
for c in clip_data:
|
||||
parameters += comfy.utils.calculate_parameters(c)
|
||||
tokenizer_data, model_options = comfy.text_encoders.long_clipl.model_options_long_clip(c, tokenizer_data, model_options)
|
||||
|
||||
@@ -421,10 +421,10 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
|
||||
return embed_out
|
||||
|
||||
class SDTokenizer:
|
||||
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, tokenizer_data={}):
|
||||
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, tokenizer_data={}, tokenizer_args={}):
|
||||
if tokenizer_path is None:
|
||||
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
|
||||
self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path)
|
||||
self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, **tokenizer_args)
|
||||
self.max_length = max_length
|
||||
self.min_length = min_length
|
||||
self.end_token = None
|
||||
@@ -585,9 +585,14 @@ class SDTokenizer:
|
||||
return {}
|
||||
|
||||
class SD1Tokenizer:
|
||||
def __init__(self, embedding_directory=None, tokenizer_data={}, clip_name="l", tokenizer=SDTokenizer):
|
||||
self.clip_name = clip_name
|
||||
self.clip = "clip_{}".format(self.clip_name)
|
||||
def __init__(self, embedding_directory=None, tokenizer_data={}, clip_name="l", tokenizer=SDTokenizer, name=None):
|
||||
if name is not None:
|
||||
self.clip_name = name
|
||||
self.clip = "{}".format(self.clip_name)
|
||||
else:
|
||||
self.clip_name = clip_name
|
||||
self.clip = "clip_{}".format(self.clip_name)
|
||||
|
||||
tokenizer = tokenizer_data.get("{}_tokenizer_class".format(self.clip), tokenizer)
|
||||
setattr(self, self.clip, tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data))
|
||||
|
||||
@@ -600,7 +605,7 @@ class SD1Tokenizer:
|
||||
return getattr(self, self.clip).untokenize(token_weight_pair)
|
||||
|
||||
def state_dict(self):
|
||||
return {}
|
||||
return getattr(self, self.clip).state_dict()
|
||||
|
||||
class SD1CheckpointClipModel(SDClipModel):
|
||||
def __init__(self, device="cpu", dtype=None, model_options={}):
|
||||
|
||||
@@ -15,6 +15,7 @@ import comfy.text_encoders.genmo
|
||||
import comfy.text_encoders.lt
|
||||
import comfy.text_encoders.hunyuan_video
|
||||
import comfy.text_encoders.cosmos
|
||||
import comfy.text_encoders.lumina2
|
||||
|
||||
from . import supported_models_base
|
||||
from . import latent_formats
|
||||
@@ -788,7 +789,7 @@ class HunyuanVideo(supported_models_base.BASE):
|
||||
unet_extra_config = {}
|
||||
latent_format = latent_formats.HunyuanVideo
|
||||
|
||||
memory_usage_factor = 2.0 #TODO
|
||||
memory_usage_factor = 1.8 #TODO
|
||||
|
||||
supported_inference_dtypes = [torch.bfloat16, torch.float32]
|
||||
|
||||
@@ -839,7 +840,7 @@ class CosmosT2V(supported_models_base.BASE):
|
||||
unet_extra_config = {}
|
||||
latent_format = latent_formats.Cosmos1CV8x8x8
|
||||
|
||||
memory_usage_factor = 2.4 #TODO
|
||||
memory_usage_factor = 1.6 #TODO
|
||||
|
||||
supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32] #TODO
|
||||
|
||||
@@ -865,6 +866,35 @@ class CosmosI2V(CosmosT2V):
|
||||
out = model_base.CosmosVideo(self, image_to_video=True, device=device)
|
||||
return out
|
||||
|
||||
models = [Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo, CosmosT2V, CosmosI2V]
|
||||
class Lumina2(supported_models_base.BASE):
|
||||
unet_config = {
|
||||
"image_model": "lumina2",
|
||||
}
|
||||
|
||||
sampling_settings = {
|
||||
"multiplier": 1.0,
|
||||
"shift": 6.0,
|
||||
}
|
||||
|
||||
memory_usage_factor = 1.2
|
||||
|
||||
unet_extra_config = {}
|
||||
latent_format = latent_formats.Flux
|
||||
|
||||
supported_inference_dtypes = [torch.bfloat16, torch.float32]
|
||||
|
||||
vae_key_prefix = ["vae."]
|
||||
text_encoder_key_prefix = ["text_encoders."]
|
||||
|
||||
def get_model(self, state_dict, prefix="", device=None):
|
||||
out = model_base.Lumina2(self, device=device)
|
||||
return out
|
||||
|
||||
def clip_target(self, state_dict={}):
|
||||
pref = self.text_encoder_key_prefix[0]
|
||||
hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}gemma2_2b.transformer.".format(pref))
|
||||
return supported_models_base.ClipTarget(comfy.text_encoders.lumina2.LuminaTokenizer, comfy.text_encoders.lumina2.te(**hunyuan_detect))
|
||||
|
||||
models = [Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2]
|
||||
|
||||
models += [SVD_img2vid]
|
||||
|
||||
@@ -118,7 +118,7 @@ class BertModel_(torch.nn.Module):
|
||||
mask = None
|
||||
if attention_mask is not None:
|
||||
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
|
||||
mask = mask.masked_fill(mask.to(torch.bool), float("-inf"))
|
||||
mask = mask.masked_fill(mask.to(torch.bool), -torch.finfo(x.dtype).max)
|
||||
|
||||
x, i = self.encoder(x, mask, intermediate_output)
|
||||
return x, i
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Any
|
||||
|
||||
@@ -21,15 +20,41 @@ class Llama2Config:
|
||||
max_position_embeddings: int = 8192
|
||||
rms_norm_eps: float = 1e-5
|
||||
rope_theta: float = 500000.0
|
||||
transformer_type: str = "llama"
|
||||
head_dim = 128
|
||||
rms_norm_add = False
|
||||
mlp_activation = "silu"
|
||||
|
||||
@dataclass
|
||||
class Gemma2_2B_Config:
|
||||
vocab_size: int = 256000
|
||||
hidden_size: int = 2304
|
||||
intermediate_size: int = 9216
|
||||
num_hidden_layers: int = 26
|
||||
num_attention_heads: int = 8
|
||||
num_key_value_heads: int = 4
|
||||
max_position_embeddings: int = 8192
|
||||
rms_norm_eps: float = 1e-6
|
||||
rope_theta: float = 10000.0
|
||||
transformer_type: str = "gemma2"
|
||||
head_dim = 256
|
||||
rms_norm_add = True
|
||||
mlp_activation = "gelu_pytorch_tanh"
|
||||
|
||||
class RMSNorm(nn.Module):
|
||||
def __init__(self, dim: int, eps: float = 1e-5, device=None, dtype=None):
|
||||
def __init__(self, dim: int, eps: float = 1e-5, add=False, device=None, dtype=None):
|
||||
super().__init__()
|
||||
self.eps = eps
|
||||
self.weight = nn.Parameter(torch.empty(dim, device=device, dtype=dtype))
|
||||
self.add = add
|
||||
|
||||
def forward(self, x: torch.Tensor):
|
||||
return comfy.ldm.common_dit.rms_norm(x, self.weight, self.eps)
|
||||
w = self.weight
|
||||
if self.add:
|
||||
w = w + 1.0
|
||||
|
||||
return comfy.ldm.common_dit.rms_norm(x, w, self.eps)
|
||||
|
||||
|
||||
|
||||
def rotate_half(x):
|
||||
@@ -68,13 +93,15 @@ class Attention(nn.Module):
|
||||
self.num_heads = config.num_attention_heads
|
||||
self.num_kv_heads = config.num_key_value_heads
|
||||
self.hidden_size = config.hidden_size
|
||||
self.head_dim = self.hidden_size // self.num_heads
|
||||
|
||||
self.head_dim = config.head_dim
|
||||
self.inner_size = self.num_heads * self.head_dim
|
||||
|
||||
ops = ops or nn
|
||||
self.q_proj = ops.Linear(config.hidden_size, config.hidden_size, bias=False, device=device, dtype=dtype)
|
||||
self.q_proj = ops.Linear(config.hidden_size, self.inner_size, bias=False, device=device, dtype=dtype)
|
||||
self.k_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False, device=device, dtype=dtype)
|
||||
self.v_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False, device=device, dtype=dtype)
|
||||
self.o_proj = ops.Linear(config.hidden_size, config.hidden_size, bias=False, device=device, dtype=dtype)
|
||||
self.o_proj = ops.Linear(self.inner_size, config.hidden_size, bias=False, device=device, dtype=dtype)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
@@ -84,7 +111,6 @@ class Attention(nn.Module):
|
||||
optimized_attention=None,
|
||||
):
|
||||
batch_size, seq_length, _ = hidden_states.shape
|
||||
|
||||
xq = self.q_proj(hidden_states)
|
||||
xk = self.k_proj(hidden_states)
|
||||
xv = self.v_proj(hidden_states)
|
||||
@@ -108,9 +134,13 @@ class MLP(nn.Module):
|
||||
self.gate_proj = ops.Linear(config.hidden_size, config.intermediate_size, bias=False, device=device, dtype=dtype)
|
||||
self.up_proj = ops.Linear(config.hidden_size, config.intermediate_size, bias=False, device=device, dtype=dtype)
|
||||
self.down_proj = ops.Linear(config.intermediate_size, config.hidden_size, bias=False, device=device, dtype=dtype)
|
||||
if config.mlp_activation == "silu":
|
||||
self.activation = torch.nn.functional.silu
|
||||
elif config.mlp_activation == "gelu_pytorch_tanh":
|
||||
self.activation = lambda a: torch.nn.functional.gelu(a, approximate="tanh")
|
||||
|
||||
def forward(self, x):
|
||||
return self.down_proj(F.silu(self.gate_proj(x)) * self.up_proj(x))
|
||||
return self.down_proj(self.activation(self.gate_proj(x)) * self.up_proj(x))
|
||||
|
||||
class TransformerBlock(nn.Module):
|
||||
def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None):
|
||||
@@ -146,6 +176,45 @@ class TransformerBlock(nn.Module):
|
||||
|
||||
return x
|
||||
|
||||
class TransformerBlockGemma2(nn.Module):
|
||||
def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None):
|
||||
super().__init__()
|
||||
self.self_attn = Attention(config, device=device, dtype=dtype, ops=ops)
|
||||
self.mlp = MLP(config, device=device, dtype=dtype, ops=ops)
|
||||
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
|
||||
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
|
||||
self.pre_feedforward_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
|
||||
self.post_feedforward_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
freqs_cis: Optional[torch.Tensor] = None,
|
||||
optimized_attention=None,
|
||||
):
|
||||
# Self Attention
|
||||
residual = x
|
||||
x = self.input_layernorm(x)
|
||||
x = self.self_attn(
|
||||
hidden_states=x,
|
||||
attention_mask=attention_mask,
|
||||
freqs_cis=freqs_cis,
|
||||
optimized_attention=optimized_attention,
|
||||
)
|
||||
|
||||
x = self.post_attention_layernorm(x)
|
||||
x = residual + x
|
||||
|
||||
# MLP
|
||||
residual = x
|
||||
x = self.pre_feedforward_layernorm(x)
|
||||
x = self.mlp(x)
|
||||
x = self.post_feedforward_layernorm(x)
|
||||
x = residual + x
|
||||
|
||||
return x
|
||||
|
||||
class Llama2_(nn.Module):
|
||||
def __init__(self, config, device=None, dtype=None, ops=None):
|
||||
super().__init__()
|
||||
@@ -158,17 +227,27 @@ class Llama2_(nn.Module):
|
||||
device=device,
|
||||
dtype=dtype
|
||||
)
|
||||
if self.config.transformer_type == "gemma2":
|
||||
transformer = TransformerBlockGemma2
|
||||
self.normalize_in = True
|
||||
else:
|
||||
transformer = TransformerBlock
|
||||
self.normalize_in = False
|
||||
|
||||
self.layers = nn.ModuleList([
|
||||
TransformerBlock(config, device=device, dtype=dtype, ops=ops)
|
||||
transformer(config, device=device, dtype=dtype, ops=ops)
|
||||
for _ in range(config.num_hidden_layers)
|
||||
])
|
||||
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, device=device, dtype=dtype)
|
||||
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
|
||||
# self.lm_head = ops.Linear(config.hidden_size, config.vocab_size, bias=False, device=device, dtype=dtype)
|
||||
|
||||
def forward(self, x, attention_mask=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None):
|
||||
x = self.embed_tokens(x, out_dtype=dtype)
|
||||
|
||||
freqs_cis = precompute_freqs_cis(self.config.hidden_size // self.config.num_attention_heads,
|
||||
if self.normalize_in:
|
||||
x *= self.config.hidden_size ** 0.5
|
||||
|
||||
freqs_cis = precompute_freqs_cis(self.config.head_dim,
|
||||
x.shape[1],
|
||||
self.config.rope_theta,
|
||||
device=x.device)
|
||||
@@ -206,16 +285,7 @@ class Llama2_(nn.Module):
|
||||
|
||||
return x, intermediate
|
||||
|
||||
|
||||
class Llama2(torch.nn.Module):
|
||||
def __init__(self, config_dict, dtype, device, operations):
|
||||
super().__init__()
|
||||
config = Llama2Config(**config_dict)
|
||||
self.num_layers = config.num_hidden_layers
|
||||
|
||||
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
|
||||
self.dtype = dtype
|
||||
|
||||
class BaseLlama:
|
||||
def get_input_embeddings(self):
|
||||
return self.model.embed_tokens
|
||||
|
||||
@@ -224,3 +294,23 @@ class Llama2(torch.nn.Module):
|
||||
|
||||
def forward(self, input_ids, *args, **kwargs):
|
||||
return self.model(input_ids, *args, **kwargs)
|
||||
|
||||
|
||||
class Llama2(BaseLlama, torch.nn.Module):
|
||||
def __init__(self, config_dict, dtype, device, operations):
|
||||
super().__init__()
|
||||
config = Llama2Config(**config_dict)
|
||||
self.num_layers = config.num_hidden_layers
|
||||
|
||||
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
|
||||
self.dtype = dtype
|
||||
|
||||
|
||||
class Gemma2_2B(BaseLlama, torch.nn.Module):
|
||||
def __init__(self, config_dict, dtype, device, operations):
|
||||
super().__init__()
|
||||
config = Gemma2_2B_Config(**config_dict)
|
||||
self.num_layers = config.num_hidden_layers
|
||||
|
||||
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
|
||||
self.dtype = dtype
|
||||
|
||||
44
comfy/text_encoders/lumina2.py
Normal file
44
comfy/text_encoders/lumina2.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from comfy import sd1_clip
|
||||
from .spiece_tokenizer import SPieceTokenizer
|
||||
import comfy.text_encoders.llama
|
||||
|
||||
|
||||
class Gemma2BTokenizer(sd1_clip.SDTokenizer):
|
||||
def __init__(self, embedding_directory=None, tokenizer_data={}):
|
||||
tokenizer = tokenizer_data.get("spiece_model", None)
|
||||
super().__init__(tokenizer, pad_with_end=False, embedding_size=2304, embedding_key='gemma2_2b', tokenizer_class=SPieceTokenizer, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_args={"add_bos": True, "add_eos": False})
|
||||
|
||||
def state_dict(self):
|
||||
return {"spiece_model": self.tokenizer.serialize_model()}
|
||||
|
||||
|
||||
class LuminaTokenizer(sd1_clip.SD1Tokenizer):
|
||||
def __init__(self, embedding_directory=None, tokenizer_data={}):
|
||||
super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="gemma2_2b", tokenizer=Gemma2BTokenizer)
|
||||
|
||||
|
||||
class Gemma2_2BModel(sd1_clip.SDClipModel):
|
||||
def __init__(self, device="cpu", layer="hidden", layer_idx=-2, dtype=None, attention_mask=True, model_options={}):
|
||||
llama_scaled_fp8 = model_options.get("llama_scaled_fp8", None)
|
||||
if llama_scaled_fp8 is not None:
|
||||
model_options = model_options.copy()
|
||||
model_options["scaled_fp8"] = llama_scaled_fp8
|
||||
|
||||
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"start": 2, "pad": 0}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Gemma2_2B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
|
||||
|
||||
|
||||
class LuminaModel(sd1_clip.SD1ClipModel):
|
||||
def __init__(self, device="cpu", dtype=None, model_options={}):
|
||||
super().__init__(device=device, dtype=dtype, name="gemma2_2b", clip_model=Gemma2_2BModel, model_options=model_options)
|
||||
|
||||
|
||||
def te(dtype_llama=None, llama_scaled_fp8=None):
|
||||
class LuminaTEModel_(LuminaModel):
|
||||
def __init__(self, device="cpu", dtype=None, model_options={}):
|
||||
if llama_scaled_fp8 is not None and "llama_scaled_fp8" not in model_options:
|
||||
model_options = model_options.copy()
|
||||
model_options["llama_scaled_fp8"] = llama_scaled_fp8
|
||||
if dtype_llama is not None:
|
||||
dtype = dtype_llama
|
||||
super().__init__(device=device, dtype=dtype, model_options=model_options)
|
||||
return LuminaTEModel_
|
||||
@@ -1,21 +1,21 @@
|
||||
import torch
|
||||
|
||||
class SPieceTokenizer:
|
||||
add_eos = True
|
||||
|
||||
@staticmethod
|
||||
def from_pretrained(path):
|
||||
return SPieceTokenizer(path)
|
||||
def from_pretrained(path, **kwargs):
|
||||
return SPieceTokenizer(path, **kwargs)
|
||||
|
||||
def __init__(self, tokenizer_path):
|
||||
def __init__(self, tokenizer_path, add_bos=False, add_eos=True):
|
||||
self.add_bos = add_bos
|
||||
self.add_eos = add_eos
|
||||
import sentencepiece
|
||||
if torch.is_tensor(tokenizer_path):
|
||||
tokenizer_path = tokenizer_path.numpy().tobytes()
|
||||
|
||||
if isinstance(tokenizer_path, bytes):
|
||||
self.tokenizer = sentencepiece.SentencePieceProcessor(model_proto=tokenizer_path, add_eos=self.add_eos)
|
||||
self.tokenizer = sentencepiece.SentencePieceProcessor(model_proto=tokenizer_path, add_bos=self.add_bos, add_eos=self.add_eos)
|
||||
else:
|
||||
self.tokenizer = sentencepiece.SentencePieceProcessor(model_file=tokenizer_path, add_eos=self.add_eos)
|
||||
self.tokenizer = sentencepiece.SentencePieceProcessor(model_file=tokenizer_path, add_bos=self.add_bos, add_eos=self.add_eos)
|
||||
|
||||
def get_vocab(self):
|
||||
out = {}
|
||||
|
||||
@@ -203,7 +203,7 @@ class T5Stack(torch.nn.Module):
|
||||
mask = None
|
||||
if attention_mask is not None:
|
||||
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
|
||||
mask = mask.masked_fill(mask.to(torch.bool), float("-inf"))
|
||||
mask = mask.masked_fill(mask.to(torch.bool), -torch.finfo(x.dtype).max)
|
||||
|
||||
intermediate = None
|
||||
optimized_attention = optimized_attention_for_device(x.device, mask=attention_mask is not None, small_input=True)
|
||||
|
||||
@@ -43,13 +43,23 @@ if hasattr(torch.serialization, "add_safe_globals"): # TODO: this was added in
|
||||
torch.serialization.add_safe_globals([ModelCheckpoint, scalar, dtype, Float64DType, encode])
|
||||
ALWAYS_SAFE_LOAD = True
|
||||
logging.info("Checkpoint files will always be loaded safely.")
|
||||
|
||||
else:
|
||||
logging.info("Warning, you are using an old pytorch version and some ckpt/pt files might be loaded unsafely. Upgrading to 2.4 or above is recommended.")
|
||||
|
||||
def load_torch_file(ckpt, safe_load=False, device=None):
|
||||
if device is None:
|
||||
device = torch.device("cpu")
|
||||
if ckpt.lower().endswith(".safetensors") or ckpt.lower().endswith(".sft"):
|
||||
sd = safetensors.torch.load_file(ckpt, device=device.type)
|
||||
try:
|
||||
sd = safetensors.torch.load_file(ckpt, device=device.type)
|
||||
except Exception as e:
|
||||
if len(e.args) > 0:
|
||||
message = e.args[0]
|
||||
if "HeaderTooLarge" in message:
|
||||
raise ValueError("{}\n\nFile path: {}\n\nThe safetensors file is corrupt or invalid. Make sure this is actually a safetensors file and not a ckpt or pt or other filetype.".format(message, ckpt))
|
||||
if "MetadataIncompleteBuffer" in message:
|
||||
raise ValueError("{}\n\nFile path: {}\n\nThe safetensors file is corrupt/incomplete. Check the file size and make sure you have copied/downloaded it correctly.".format(message, ckpt))
|
||||
raise e
|
||||
else:
|
||||
if safe_load or ALWAYS_SAFE_LOAD:
|
||||
pl_sd = torch.load(ckpt, map_location=device, weights_only=True)
|
||||
|
||||
@@ -71,8 +71,8 @@ class CosmosImageToVideoLatent:
|
||||
mask[:, :, -latent_temp.shape[-3]:] *= 0.0
|
||||
|
||||
out_latent = {}
|
||||
out_latent["samples"] = latent
|
||||
out_latent["noise_mask"] = mask
|
||||
out_latent["samples"] = latent.repeat((batch_size, ) + (1,) * (latent.ndim - 1))
|
||||
out_latent["noise_mask"] = mask.repeat((batch_size, ) + (1,) * (mask.ndim - 1))
|
||||
return (out_latent,)
|
||||
|
||||
|
||||
|
||||
@@ -38,7 +38,26 @@ class FluxGuidance:
|
||||
return (c, )
|
||||
|
||||
|
||||
class FluxDisableGuidance:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"conditioning": ("CONDITIONING", ),
|
||||
}}
|
||||
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
FUNCTION = "append"
|
||||
|
||||
CATEGORY = "advanced/conditioning/flux"
|
||||
DESCRIPTION = "This node completely disables the guidance embed on Flux and Flux like models"
|
||||
|
||||
def append(self, conditioning):
|
||||
c = node_helpers.conditioning_set_values(conditioning, {"guidance": None})
|
||||
return (c, )
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"CLIPTextEncodeFlux": CLIPTextEncodeFlux,
|
||||
"FluxGuidance": FluxGuidance,
|
||||
"FluxDisableGuidance": FluxDisableGuidance,
|
||||
}
|
||||
|
||||
@@ -2,10 +2,14 @@ import comfy.utils
|
||||
import comfy_extras.nodes_post_processing
|
||||
import torch
|
||||
|
||||
def reshape_latent_to(target_shape, latent):
|
||||
|
||||
def reshape_latent_to(target_shape, latent, repeat_batch=True):
|
||||
if latent.shape[1:] != target_shape[1:]:
|
||||
latent = comfy.utils.common_upscale(latent, target_shape[3], target_shape[2], "bilinear", "center")
|
||||
return comfy.utils.repeat_to_batch_size(latent, target_shape[0])
|
||||
latent = comfy.utils.common_upscale(latent, target_shape[-1], target_shape[-2], "bilinear", "center")
|
||||
if repeat_batch:
|
||||
return comfy.utils.repeat_to_batch_size(latent, target_shape[0])
|
||||
else:
|
||||
return latent
|
||||
|
||||
|
||||
class LatentAdd:
|
||||
@@ -116,8 +120,7 @@ class LatentBatch:
|
||||
s1 = samples1["samples"]
|
||||
s2 = samples2["samples"]
|
||||
|
||||
if s1.shape[1:] != s2.shape[1:]:
|
||||
s2 = comfy.utils.common_upscale(s2, s1.shape[3], s1.shape[2], "bilinear", "center")
|
||||
s2 = reshape_latent_to(s1.shape, s2, repeat_batch=False)
|
||||
s = torch.cat((s1, s2), dim=0)
|
||||
samples_out["samples"] = s
|
||||
samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])])
|
||||
|
||||
@@ -19,11 +19,7 @@ class Load3D():
|
||||
"image": ("LOAD_3D", {}),
|
||||
"width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
|
||||
"height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
|
||||
"show_grid": ([True, False],),
|
||||
"camera_type": (["perspective", "orthographic"],),
|
||||
"view": (["front", "right", "top", "isometric"],),
|
||||
"material": (["original", "normal", "wireframe", "depth"],),
|
||||
"bg_color": ("STRING", {"default": "#000000", "multiline": False}),
|
||||
"light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
|
||||
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
|
||||
"fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}),
|
||||
@@ -69,14 +65,9 @@ class Load3DAnimation():
|
||||
"image": ("LOAD_3D_ANIMATION", {}),
|
||||
"width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
|
||||
"height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
|
||||
"show_grid": ([True, False],),
|
||||
"camera_type": (["perspective", "orthographic"],),
|
||||
"view": (["front", "right", "top", "isometric"],),
|
||||
"material": (["original", "normal", "wireframe", "depth"],),
|
||||
"bg_color": ("STRING", {"default": "#000000", "multiline": False}),
|
||||
"light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
|
||||
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
|
||||
"animation_speed": (["0.1", "0.5", "1", "1.5", "2"], {"default": "1"}),
|
||||
"fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}),
|
||||
}}
|
||||
|
||||
@@ -109,11 +100,29 @@ class Preview3D():
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"model_file": ("STRING", {"default": "", "multiline": False}),
|
||||
"show_grid": ([True, False],),
|
||||
"camera_type": (["perspective", "orthographic"],),
|
||||
"view": (["front", "right", "top", "isometric"],),
|
||||
"material": (["original", "normal", "wireframe", "depth"],),
|
||||
"bg_color": ("STRING", {"default": "#000000", "multiline": False}),
|
||||
"light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
|
||||
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
|
||||
"fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}),
|
||||
}}
|
||||
|
||||
OUTPUT_NODE = True
|
||||
RETURN_TYPES = ()
|
||||
|
||||
CATEGORY = "3d"
|
||||
|
||||
FUNCTION = "process"
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def process(self, model_file, **kwargs):
|
||||
return {"ui": {"model_file": [model_file]}, "result": ()}
|
||||
|
||||
class Preview3DAnimation():
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"model_file": ("STRING", {"default": "", "multiline": False}),
|
||||
"material": (["original", "normal", "wireframe", "depth"],),
|
||||
"light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
|
||||
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
|
||||
"fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}),
|
||||
@@ -133,11 +142,13 @@ class Preview3D():
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"Load3D": Load3D,
|
||||
"Load3DAnimation": Load3DAnimation,
|
||||
"Preview3D": Preview3D
|
||||
"Preview3D": Preview3D,
|
||||
"Preview3DAnimation": Preview3DAnimation
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"Load3D": "Load 3D",
|
||||
"Load3DAnimation": "Load 3D - Animation",
|
||||
"Preview3D": "Preview 3D"
|
||||
"Preview3D": "Preview 3D",
|
||||
"Preview3DAnimation": "Preview 3D - Animation"
|
||||
}
|
||||
|
||||
@@ -196,6 +196,54 @@ class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
return {"required": arg_dict}
|
||||
|
||||
class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
arg_dict = { "model1": ("MODEL",),
|
||||
"model2": ("MODEL",)}
|
||||
|
||||
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
||||
|
||||
arg_dict["pos_embedder."] = argument
|
||||
arg_dict["extra_pos_embedder."] = argument
|
||||
arg_dict["x_embedder."] = argument
|
||||
arg_dict["t_embedder."] = argument
|
||||
arg_dict["affline_norm."] = argument
|
||||
|
||||
|
||||
for i in range(28):
|
||||
arg_dict["blocks.block{}.".format(i)] = argument
|
||||
|
||||
arg_dict["final_layer."] = argument
|
||||
|
||||
return {"required": arg_dict}
|
||||
|
||||
class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
arg_dict = { "model1": ("MODEL",),
|
||||
"model2": ("MODEL",)}
|
||||
|
||||
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
||||
|
||||
arg_dict["pos_embedder."] = argument
|
||||
arg_dict["extra_pos_embedder."] = argument
|
||||
arg_dict["x_embedder."] = argument
|
||||
arg_dict["t_embedder."] = argument
|
||||
arg_dict["affline_norm."] = argument
|
||||
|
||||
|
||||
for i in range(36):
|
||||
arg_dict["blocks.block{}.".format(i)] = argument
|
||||
|
||||
arg_dict["final_layer."] = argument
|
||||
|
||||
return {"required": arg_dict}
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"ModelMergeSD1": ModelMergeSD1,
|
||||
"ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks
|
||||
@@ -206,4 +254,6 @@ NODE_CLASS_MAPPINGS = {
|
||||
"ModelMergeSD35_Large": ModelMergeSD35_Large,
|
||||
"ModelMergeMochiPreview": ModelMergeMochiPreview,
|
||||
"ModelMergeLTXV": ModelMergeLTXV,
|
||||
"ModelMergeCosmos7B": ModelMergeCosmos7B,
|
||||
"ModelMergeCosmos14B": ModelMergeCosmos14B,
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# This file is automatically generated by the build process when version is
|
||||
# updated in pyproject.toml.
|
||||
__version__ = "0.3.10"
|
||||
__version__ = "0.3.14"
|
||||
|
||||
@@ -7,11 +7,18 @@ import logging
|
||||
from typing import Literal
|
||||
from collections.abc import Collection
|
||||
|
||||
supported_pt_extensions: set[str] = {'.ckpt', '.pt', '.bin', '.pth', '.safetensors', '.pkl', '.sft'}
|
||||
from comfy.cli_args import args
|
||||
|
||||
supported_pt_extensions: set[str] = {'.ckpt', '.pt', '.pt2', '.bin', '.pth', '.safetensors', '.pkl', '.sft'}
|
||||
|
||||
folder_names_and_paths: dict[str, tuple[list[str], set[str]]] = {}
|
||||
|
||||
base_path = os.path.dirname(os.path.realpath(__file__))
|
||||
# --base-directory - Resets all default paths configured in folder_paths with a new base path
|
||||
if args.base_directory:
|
||||
base_path = os.path.abspath(args.base_directory)
|
||||
else:
|
||||
base_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
models_dir = os.path.join(base_path, "models")
|
||||
folder_names_and_paths["checkpoints"] = ([os.path.join(models_dir, "checkpoints")], supported_pt_extensions)
|
||||
folder_names_and_paths["configs"] = ([os.path.join(models_dir, "configs")], [".yaml"])
|
||||
@@ -39,10 +46,10 @@ folder_names_and_paths["photomaker"] = ([os.path.join(models_dir, "photomaker")]
|
||||
|
||||
folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""})
|
||||
|
||||
output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
|
||||
temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
|
||||
input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
|
||||
user_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "user")
|
||||
output_directory = os.path.join(base_path, "output")
|
||||
temp_directory = os.path.join(base_path, "temp")
|
||||
input_directory = os.path.join(base_path, "input")
|
||||
user_directory = os.path.join(base_path, "user")
|
||||
|
||||
filename_list_cache: dict[str, tuple[list[str], dict[str, float], float]] = {}
|
||||
|
||||
|
||||
@@ -12,7 +12,10 @@ MAX_PREVIEW_RESOLUTION = args.preview_size
|
||||
def preview_to_image(latent_image):
|
||||
latents_ubyte = (((latent_image + 1.0) / 2.0).clamp(0, 1) # change scale from -1..1 to 0..1
|
||||
.mul(0xFF) # to 0..255
|
||||
).to(device="cpu", dtype=torch.uint8, non_blocking=comfy.model_management.device_supports_non_blocking(latent_image.device))
|
||||
)
|
||||
if comfy.model_management.directml_enabled:
|
||||
latents_ubyte = latents_ubyte.to(dtype=torch.uint8)
|
||||
latents_ubyte = latents_ubyte.to(device="cpu", dtype=torch.uint8, non_blocking=comfy.model_management.device_supports_non_blocking(latent_image.device))
|
||||
|
||||
return Image.fromarray(latents_ubyte.numpy())
|
||||
|
||||
|
||||
14
main.py
14
main.py
@@ -138,8 +138,8 @@ import server
|
||||
from server import BinaryEventTypes
|
||||
import nodes
|
||||
import comfy.model_management
|
||||
from app.database.db import can_create_session, init_db
|
||||
from app.model_processor import model_processor
|
||||
import comfyui_version
|
||||
|
||||
|
||||
def cuda_malloc_warning():
|
||||
device = comfy.model_management.get_torch_device()
|
||||
@@ -264,11 +264,6 @@ def start_comfyui(asyncio_loop=None):
|
||||
|
||||
cuda_malloc_warning()
|
||||
|
||||
try:
|
||||
init_db()
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to initialize database. Please report this error as in future the database will be required: {e}")
|
||||
|
||||
prompt_server.add_routes()
|
||||
hijack_progress(prompt_server)
|
||||
|
||||
@@ -276,10 +271,6 @@ def start_comfyui(asyncio_loop=None):
|
||||
|
||||
if args.quick_test_for_ci:
|
||||
exit(0)
|
||||
|
||||
# Scan for changed model files and update db
|
||||
if can_create_session():
|
||||
model_processor.run()
|
||||
|
||||
os.makedirs(folder_paths.get_temp_directory(), exist_ok=True)
|
||||
call_on_start = None
|
||||
@@ -303,6 +294,7 @@ def start_comfyui(asyncio_loop=None):
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Running directly, just start ComfyUI.
|
||||
logging.info("ComfyUI version: {}".format(comfyui_version.__version__))
|
||||
event_loop, _, start_all_func = start_comfyui()
|
||||
try:
|
||||
event_loop.run_until_complete(start_all_func())
|
||||
|
||||
15
nodes.py
15
nodes.py
@@ -63,6 +63,8 @@ class CLIPTextEncode(ComfyNodeABC):
|
||||
DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images."
|
||||
|
||||
def encode(self, clip, text):
|
||||
if clip is None:
|
||||
raise RuntimeError("ERROR: clip input is invalid: None\n\nIf the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model.")
|
||||
tokens = clip.tokenize(text)
|
||||
return (clip.encode_from_tokens_scheduled(tokens), )
|
||||
|
||||
@@ -912,7 +914,7 @@ class CLIPLoader:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ),
|
||||
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos"], ),
|
||||
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2"], ),
|
||||
},
|
||||
"optional": {
|
||||
"device": (["default", "cpu"], {"advanced": True}),
|
||||
@@ -922,7 +924,7 @@ class CLIPLoader:
|
||||
|
||||
CATEGORY = "advanced/loaders"
|
||||
|
||||
DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 / clip-g / clip-l\nstable_audio: t5\nmochi: t5\ncosmos: old t5 xxl"
|
||||
DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 / clip-g / clip-l\nstable_audio: t5\nmochi: t5\ncosmos: old t5 xxl\nlumina2: gemma 2 2B"
|
||||
|
||||
def load_clip(self, clip_name, type="stable_diffusion", device="default"):
|
||||
if type == "stable_cascade":
|
||||
@@ -937,6 +939,10 @@ class CLIPLoader:
|
||||
clip_type = comfy.sd.CLIPType.LTXV
|
||||
elif type == "pixart":
|
||||
clip_type = comfy.sd.CLIPType.PIXART
|
||||
elif type == "cosmos":
|
||||
clip_type = comfy.sd.CLIPType.COSMOS
|
||||
elif type == "lumina2":
|
||||
clip_type = comfy.sd.CLIPType.LUMINA2
|
||||
else:
|
||||
clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
|
||||
|
||||
@@ -1058,10 +1064,11 @@ class StyleModelApply:
|
||||
for t in conditioning:
|
||||
(txt, keys) = t
|
||||
keys = keys.copy()
|
||||
if strength_type == "attn_bias" and strength != 1.0:
|
||||
# even if the strength is 1.0 (i.e, no change), if there's already a mask, we have to add to it
|
||||
if "attention_mask" in keys or (strength_type == "attn_bias" and strength != 1.0):
|
||||
# math.log raises an error if the argument is zero
|
||||
# torch.log returns -inf, which is what we want
|
||||
attn_bias = torch.log(torch.Tensor([strength]))
|
||||
attn_bias = torch.log(torch.Tensor([strength if strength_type == "attn_bias" else 1.0]))
|
||||
# get the size of the mask image
|
||||
mask_ref_size = keys.get("attention_mask_img_shape", (1, 1))
|
||||
n_ref = mask_ref_size[0] * mask_ref_size[1]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "ComfyUI"
|
||||
version = "0.3.10"
|
||||
version = "0.3.14"
|
||||
readme = "README.md"
|
||||
license = { file = "LICENSE" }
|
||||
requires-python = ">=3.9"
|
||||
|
||||
@@ -2,6 +2,7 @@ torch
|
||||
torchsde
|
||||
torchvision
|
||||
torchaudio
|
||||
numpy>=1.25.0
|
||||
einops
|
||||
transformers>=4.28.1
|
||||
tokenizers>=0.13.3
|
||||
@@ -13,8 +14,6 @@ Pillow
|
||||
scipy
|
||||
tqdm
|
||||
psutil
|
||||
alembic
|
||||
SQLAlchemy
|
||||
|
||||
#non essential dependencies:
|
||||
kornia>=0.7.1
|
||||
|
||||
23
server.py
23
server.py
@@ -52,6 +52,20 @@ async def cache_control(request: web.Request, handler):
|
||||
response.headers.setdefault('Cache-Control', 'no-cache')
|
||||
return response
|
||||
|
||||
|
||||
@web.middleware
|
||||
async def compress_body(request: web.Request, handler):
|
||||
accept_encoding = request.headers.get("Accept-Encoding", "")
|
||||
response: web.Response = await handler(request)
|
||||
if not isinstance(response, web.Response):
|
||||
return response
|
||||
if response.content_type not in ["application/json", "text/plain"]:
|
||||
return response
|
||||
if response.body and "gzip" in accept_encoding:
|
||||
response.enable_compression()
|
||||
return response
|
||||
|
||||
|
||||
def create_cors_middleware(allowed_origin: str):
|
||||
@web.middleware
|
||||
async def cors_middleware(request: web.Request, handler):
|
||||
@@ -150,6 +164,9 @@ class PromptServer():
|
||||
self.number = 0
|
||||
|
||||
middlewares = [cache_control]
|
||||
if args.enable_compress_response_body:
|
||||
middlewares.append(compress_body)
|
||||
|
||||
if args.enable_cors_header:
|
||||
middlewares.append(create_cors_middleware(args.enable_cors_header))
|
||||
else:
|
||||
@@ -329,6 +346,9 @@ class PromptServer():
|
||||
original_ref = json.loads(post.get("original_ref"))
|
||||
filename, output_dir = folder_paths.annotated_filepath(original_ref['filename'])
|
||||
|
||||
if not filename:
|
||||
return web.Response(status=400)
|
||||
|
||||
# validation for security: prevent accessing arbitrary path
|
||||
if filename[0] == '/' or '..' in filename:
|
||||
return web.Response(status=400)
|
||||
@@ -370,6 +390,9 @@ class PromptServer():
|
||||
filename = request.rel_url.query["filename"]
|
||||
filename,output_dir = folder_paths.annotated_filepath(filename)
|
||||
|
||||
if not filename:
|
||||
return web.Response(status=400)
|
||||
|
||||
# validation for security: prevent accessing arbitrary path
|
||||
if filename[0] == '/' or '..' in filename:
|
||||
return web.Response(status=400)
|
||||
|
||||
@@ -2,39 +2,146 @@ import pytest
|
||||
from aiohttp import web
|
||||
from unittest.mock import patch
|
||||
from app.custom_node_manager import CustomNodeManager
|
||||
import json
|
||||
|
||||
pytestmark = (
|
||||
pytest.mark.asyncio
|
||||
) # This applies the asyncio mark to all test functions in the module
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def custom_node_manager():
|
||||
return CustomNodeManager()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def app(custom_node_manager):
|
||||
app = web.Application()
|
||||
routes = web.RouteTableDef()
|
||||
custom_node_manager.add_routes(routes, app, [("ComfyUI-TestExtension1", "ComfyUI-TestExtension1")])
|
||||
custom_node_manager.add_routes(
|
||||
routes, app, [("ComfyUI-TestExtension1", "ComfyUI-TestExtension1")]
|
||||
)
|
||||
app.add_routes(routes)
|
||||
return app
|
||||
|
||||
|
||||
async def test_get_workflow_templates(aiohttp_client, app, tmp_path):
|
||||
client = await aiohttp_client(app)
|
||||
# Setup temporary custom nodes file structure with 1 workflow file
|
||||
custom_nodes_dir = tmp_path / "custom_nodes"
|
||||
example_workflows_dir = custom_nodes_dir / "ComfyUI-TestExtension1" / "example_workflows"
|
||||
example_workflows_dir = (
|
||||
custom_nodes_dir / "ComfyUI-TestExtension1" / "example_workflows"
|
||||
)
|
||||
example_workflows_dir.mkdir(parents=True)
|
||||
template_file = example_workflows_dir / "workflow1.json"
|
||||
template_file.write_text('')
|
||||
template_file.write_text("")
|
||||
|
||||
with patch('folder_paths.folder_names_and_paths', {
|
||||
'custom_nodes': ([str(custom_nodes_dir)], None)
|
||||
}):
|
||||
response = await client.get('/workflow_templates')
|
||||
with patch(
|
||||
"folder_paths.folder_names_and_paths",
|
||||
{"custom_nodes": ([str(custom_nodes_dir)], None)},
|
||||
):
|
||||
response = await client.get("/workflow_templates")
|
||||
assert response.status == 200
|
||||
workflows_dict = await response.json()
|
||||
assert isinstance(workflows_dict, dict)
|
||||
assert "ComfyUI-TestExtension1" in workflows_dict
|
||||
assert isinstance(workflows_dict["ComfyUI-TestExtension1"], list)
|
||||
assert workflows_dict["ComfyUI-TestExtension1"][0] == "workflow1"
|
||||
|
||||
|
||||
async def test_build_translations_empty_when_no_locales(custom_node_manager, tmp_path):
|
||||
custom_nodes_dir = tmp_path / "custom_nodes"
|
||||
custom_nodes_dir.mkdir(parents=True)
|
||||
|
||||
with patch("folder_paths.get_folder_paths", return_value=[str(custom_nodes_dir)]):
|
||||
translations = custom_node_manager.build_translations()
|
||||
assert translations == {}
|
||||
|
||||
|
||||
async def test_build_translations_loads_all_files(custom_node_manager, tmp_path):
|
||||
# Setup test directory structure
|
||||
custom_nodes_dir = tmp_path / "custom_nodes" / "test-extension"
|
||||
locales_dir = custom_nodes_dir / "locales" / "en"
|
||||
locales_dir.mkdir(parents=True)
|
||||
|
||||
# Create test translation files
|
||||
main_content = {"title": "Test Extension"}
|
||||
(locales_dir / "main.json").write_text(json.dumps(main_content))
|
||||
|
||||
node_defs = {"node1": "Node 1"}
|
||||
(locales_dir / "nodeDefs.json").write_text(json.dumps(node_defs))
|
||||
|
||||
commands = {"cmd1": "Command 1"}
|
||||
(locales_dir / "commands.json").write_text(json.dumps(commands))
|
||||
|
||||
settings = {"setting1": "Setting 1"}
|
||||
(locales_dir / "settings.json").write_text(json.dumps(settings))
|
||||
|
||||
with patch(
|
||||
"folder_paths.get_folder_paths", return_value=[tmp_path / "custom_nodes"]
|
||||
):
|
||||
translations = custom_node_manager.build_translations()
|
||||
|
||||
assert translations == {
|
||||
"en": {
|
||||
"title": "Test Extension",
|
||||
"nodeDefs": {"node1": "Node 1"},
|
||||
"commands": {"cmd1": "Command 1"},
|
||||
"settings": {"setting1": "Setting 1"},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
async def test_build_translations_handles_invalid_json(custom_node_manager, tmp_path):
|
||||
# Setup test directory structure
|
||||
custom_nodes_dir = tmp_path / "custom_nodes" / "test-extension"
|
||||
locales_dir = custom_nodes_dir / "locales" / "en"
|
||||
locales_dir.mkdir(parents=True)
|
||||
|
||||
# Create valid main.json
|
||||
main_content = {"title": "Test Extension"}
|
||||
(locales_dir / "main.json").write_text(json.dumps(main_content))
|
||||
|
||||
# Create invalid JSON file
|
||||
(locales_dir / "nodeDefs.json").write_text("invalid json{")
|
||||
|
||||
with patch(
|
||||
"folder_paths.get_folder_paths", return_value=[tmp_path / "custom_nodes"]
|
||||
):
|
||||
translations = custom_node_manager.build_translations()
|
||||
|
||||
assert translations == {
|
||||
"en": {
|
||||
"title": "Test Extension",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
async def test_build_translations_merges_multiple_extensions(
|
||||
custom_node_manager, tmp_path
|
||||
):
|
||||
# Setup test directory structure for two extensions
|
||||
custom_nodes_dir = tmp_path / "custom_nodes"
|
||||
ext1_dir = custom_nodes_dir / "extension1" / "locales" / "en"
|
||||
ext2_dir = custom_nodes_dir / "extension2" / "locales" / "en"
|
||||
ext1_dir.mkdir(parents=True)
|
||||
ext2_dir.mkdir(parents=True)
|
||||
|
||||
# Create translation files for extension 1
|
||||
ext1_main = {"title": "Extension 1", "shared": "Original"}
|
||||
(ext1_dir / "main.json").write_text(json.dumps(ext1_main))
|
||||
|
||||
# Create translation files for extension 2
|
||||
ext2_main = {"description": "Extension 2", "shared": "Override"}
|
||||
(ext2_dir / "main.json").write_text(json.dumps(ext2_main))
|
||||
|
||||
with patch("folder_paths.get_folder_paths", return_value=[str(custom_nodes_dir)]):
|
||||
translations = custom_node_manager.build_translations()
|
||||
|
||||
assert translations == {
|
||||
"en": {
|
||||
"title": "Extension 1",
|
||||
"description": "Extension 2",
|
||||
"shared": "Override", # Second extension should override first
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,33 +7,11 @@ from PIL import Image
|
||||
from aiohttp import web
|
||||
from unittest.mock import patch
|
||||
from app.model_manager import ModelFileManager
|
||||
from app.database.models import Base, Model, Tag
|
||||
from comfy.cli_args import args
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
pytestmark = (
|
||||
pytest.mark.asyncio
|
||||
) # This applies the asyncio mark to all test functions in the module
|
||||
|
||||
@pytest.fixture
|
||||
def session():
|
||||
# Configure in-memory database
|
||||
args.database_url = "sqlite:///:memory:"
|
||||
|
||||
# Create engine and session factory
|
||||
engine = create_engine(args.database_url)
|
||||
Session = sessionmaker(bind=engine)
|
||||
|
||||
# Create all tables
|
||||
Base.metadata.create_all(engine)
|
||||
|
||||
# Patch Session factory
|
||||
with patch('app.database.db.Session', Session):
|
||||
yield Session()
|
||||
|
||||
Base.metadata.drop_all(engine)
|
||||
|
||||
@pytest.fixture
|
||||
def model_manager():
|
||||
return ModelFileManager()
|
||||
@@ -82,287 +60,3 @@ async def test_get_model_preview_safetensors(aiohttp_client, app, tmp_path):
|
||||
|
||||
# Clean up
|
||||
img.close()
|
||||
|
||||
async def test_get_models(aiohttp_client, app, session):
|
||||
tag = Tag(name='test_tag')
|
||||
model = Model(
|
||||
type='checkpoints',
|
||||
path='model1.safetensors',
|
||||
title='Test Model'
|
||||
)
|
||||
model.tags.append(tag)
|
||||
session.add(tag)
|
||||
session.add(model)
|
||||
session.commit()
|
||||
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.get('/v2/models')
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert len(data) == 1
|
||||
assert data[0]['path'] == 'model1.safetensors'
|
||||
assert len(data[0]['tags']) == 1
|
||||
assert data[0]['tags'][0]['name'] == 'test_tag'
|
||||
|
||||
async def test_add_model(aiohttp_client, app, session):
|
||||
tag = Tag(name='test_tag')
|
||||
session.add(tag)
|
||||
session.commit()
|
||||
tag_id = tag.id
|
||||
|
||||
with patch('app.model_manager.model_processor') as mock_processor:
|
||||
with patch('app.model_manager.get_full_path', return_value='/checkpoints/model1.safetensors'):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.post('/v2/models', json={
|
||||
'type': 'checkpoints',
|
||||
'path': 'model1.safetensors',
|
||||
'title': 'Test Model',
|
||||
'tags': [tag_id]
|
||||
})
|
||||
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert data['path'] == 'model1.safetensors'
|
||||
assert len(data['tags']) == 1
|
||||
assert data['tags'][0]['name'] == 'test_tag'
|
||||
|
||||
# Ensure that models are re-processed after adding
|
||||
mock_processor.run.assert_called_once()
|
||||
|
||||
async def test_delete_model(aiohttp_client, app, session):
|
||||
model = Model(
|
||||
type='checkpoints',
|
||||
path='model1.safetensors',
|
||||
title='Test Model'
|
||||
)
|
||||
session.add(model)
|
||||
session.commit()
|
||||
|
||||
with patch('app.model_manager.get_full_path', return_value=None):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.delete('/v2/models?type=checkpoints&path=model1.safetensors')
|
||||
assert resp.status == 204
|
||||
|
||||
# Verify model was deleted
|
||||
model = session.query(Model).first()
|
||||
assert model is None
|
||||
|
||||
async def test_delete_model_file_exists(aiohttp_client, app, session):
|
||||
model = Model(
|
||||
type='checkpoints',
|
||||
path='model1.safetensors',
|
||||
title='Test Model'
|
||||
)
|
||||
session.add(model)
|
||||
session.commit()
|
||||
|
||||
with patch('app.model_manager.get_full_path', return_value='/checkpoints/model1.safetensors'):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.delete('/v2/models?type=checkpoints&path=model1.safetensors')
|
||||
assert resp.status == 400
|
||||
|
||||
data = await resp.json()
|
||||
assert "file exists" in data["error"].lower()
|
||||
|
||||
# Verify model was not deleted
|
||||
model = session.query(Model).first()
|
||||
assert model is not None
|
||||
assert model.path == 'model1.safetensors'
|
||||
|
||||
async def test_get_tags(aiohttp_client, app, session):
|
||||
tags = [Tag(name='tag1'), Tag(name='tag2')]
|
||||
for tag in tags:
|
||||
session.add(tag)
|
||||
session.commit()
|
||||
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.get('/v2/tags')
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert len(data) == 2
|
||||
assert {t['name'] for t in data} == {'tag1', 'tag2'}
|
||||
|
||||
async def test_create_tag(aiohttp_client, app, session):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.post('/v2/tags', json={'name': 'new_tag'})
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert data['name'] == 'new_tag'
|
||||
|
||||
# Verify tag was created
|
||||
tag = session.query(Tag).first()
|
||||
assert tag.name == 'new_tag'
|
||||
|
||||
async def test_delete_tag(aiohttp_client, app, session):
|
||||
tag = Tag(name='test_tag')
|
||||
session.add(tag)
|
||||
session.commit()
|
||||
tag_id = tag.id
|
||||
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.delete(f'/v2/tags?id={tag_id}')
|
||||
assert resp.status == 204
|
||||
|
||||
# Verify tag was deleted
|
||||
tag = session.query(Tag).first()
|
||||
assert tag is None
|
||||
|
||||
async def test_add_model_tag(aiohttp_client, app, session):
|
||||
tag = Tag(name='test_tag')
|
||||
model = Model(
|
||||
type='checkpoints',
|
||||
path='model1.safetensors',
|
||||
title='Test Model'
|
||||
)
|
||||
session.add(tag)
|
||||
session.add(model)
|
||||
session.commit()
|
||||
tag_id = tag.id
|
||||
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.post('/v2/models/tags', json={
|
||||
'tag': tag_id,
|
||||
'type': 'checkpoints',
|
||||
'path': 'model1.safetensors'
|
||||
})
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert len(data['tags']) == 1
|
||||
assert data['tags'][0]['name'] == 'test_tag'
|
||||
|
||||
async def test_delete_model_tag(aiohttp_client, app, session):
|
||||
tag = Tag(name='test_tag')
|
||||
model = Model(
|
||||
type='checkpoints',
|
||||
path='model1.safetensors',
|
||||
title='Test Model'
|
||||
)
|
||||
model.tags.append(tag)
|
||||
session.add(tag)
|
||||
session.add(model)
|
||||
session.commit()
|
||||
tag_id = tag.id
|
||||
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.delete(f'/v2/models/tags?tag={tag_id}&type=checkpoints&path=model1.safetensors')
|
||||
assert resp.status == 204
|
||||
|
||||
# Verify tag was removed
|
||||
model = session.query(Model).first()
|
||||
assert len(model.tags) == 0
|
||||
|
||||
async def test_add_model_duplicate(aiohttp_client, app, session):
|
||||
model = Model(
|
||||
type='checkpoints',
|
||||
path='model1.safetensors',
|
||||
title='Test Model'
|
||||
)
|
||||
session.add(model)
|
||||
session.commit()
|
||||
|
||||
with patch('app.model_manager.get_full_path', return_value='/checkpoints/model1.safetensors'):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.post('/v2/models', json={
|
||||
'type': 'checkpoints',
|
||||
'path': 'model1.safetensors',
|
||||
'title': 'Duplicate Model'
|
||||
})
|
||||
assert resp.status == 400
|
||||
|
||||
async def test_add_model_missing_fields(aiohttp_client, app, session):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.post('/v2/models', json={})
|
||||
assert resp.status == 400
|
||||
|
||||
async def test_add_tag_missing_name(aiohttp_client, app, session):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.post('/v2/tags', json={})
|
||||
assert resp.status == 400
|
||||
|
||||
async def test_delete_model_not_found(aiohttp_client, app, session):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.delete('/v2/models?type=checkpoints&path=nonexistent.safetensors')
|
||||
assert resp.status == 404
|
||||
|
||||
async def test_delete_tag_not_found(aiohttp_client, app, session):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.delete('/v2/tags?id=999')
|
||||
assert resp.status == 404
|
||||
|
||||
async def test_add_model_missing_path(aiohttp_client, app, session):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.post('/v2/models', json={
|
||||
'type': 'checkpoints',
|
||||
'title': 'Test Model'
|
||||
})
|
||||
assert resp.status == 400
|
||||
data = await resp.json()
|
||||
assert "path" in data["error"].lower()
|
||||
|
||||
async def test_add_model_invalid_field(aiohttp_client, app, session):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.post('/v2/models', json={
|
||||
'type': 'checkpoints',
|
||||
'path': 'model1.safetensors',
|
||||
'invalid_field': 'some value'
|
||||
})
|
||||
assert resp.status == 400
|
||||
data = await resp.json()
|
||||
assert "invalid field" in data["error"].lower()
|
||||
|
||||
async def test_add_model_nonexistent_file(aiohttp_client, app, session):
|
||||
with patch('app.model_manager.get_full_path', return_value=None):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.post('/v2/models', json={
|
||||
'type': 'checkpoints',
|
||||
'path': 'nonexistent.safetensors'
|
||||
})
|
||||
assert resp.status == 404
|
||||
data = await resp.json()
|
||||
assert "file" in data["error"].lower()
|
||||
|
||||
async def test_add_model_invalid_tag(aiohttp_client, app, session):
|
||||
with patch('app.model_manager.get_full_path', return_value='/checkpoints/model1.safetensors'):
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.post('/v2/models', json={
|
||||
'type': 'checkpoints',
|
||||
'path': 'model1.safetensors',
|
||||
'tags': [999] # Non-existent tag ID
|
||||
})
|
||||
assert resp.status == 404
|
||||
data = await resp.json()
|
||||
assert "tag" in data["error"].lower()
|
||||
|
||||
async def test_add_tag_to_nonexistent_model(aiohttp_client, app, session):
|
||||
# Create a tag but no model
|
||||
tag = Tag(name='test_tag')
|
||||
session.add(tag)
|
||||
session.commit()
|
||||
tag_id = tag.id
|
||||
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.post('/v2/models/tags', json={
|
||||
'tag': tag_id,
|
||||
'type': 'checkpoints',
|
||||
'path': 'nonexistent.safetensors'
|
||||
})
|
||||
assert resp.status == 404
|
||||
data = await resp.json()
|
||||
assert "model" in data["error"].lower()
|
||||
|
||||
async def test_delete_model_tag_invalid_tag_id(aiohttp_client, app, session):
|
||||
# Create a model first
|
||||
model = Model(
|
||||
type='checkpoints',
|
||||
path='model1.safetensors',
|
||||
title='Test Model'
|
||||
)
|
||||
session.add(model)
|
||||
session.commit()
|
||||
|
||||
client = await aiohttp_client(app)
|
||||
resp = await client.delete('/v2/models/tags?tag=not_a_number&type=checkpoint&path=model1.safetensors')
|
||||
assert resp.status == 400
|
||||
data = await resp.json()
|
||||
assert "invalid tag id" in data["error"].lower()
|
||||
|
||||
|
||||
@@ -1,19 +1,23 @@
|
||||
### 🗻 This file is created through the spirit of Mount Fuji at its peak
|
||||
# TODO(yoland): clean up this after I get back down
|
||||
import sys
|
||||
import pytest
|
||||
import os
|
||||
import tempfile
|
||||
from unittest.mock import patch
|
||||
from importlib import reload
|
||||
|
||||
import folder_paths
|
||||
import comfy.cli_args
|
||||
from comfy.options import enable_args_parsing
|
||||
enable_args_parsing()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def clear_folder_paths():
|
||||
# Clear the global dictionary before each test to ensure isolation
|
||||
original = folder_paths.folder_names_and_paths.copy()
|
||||
folder_paths.folder_names_and_paths.clear()
|
||||
# Reload the module after each test to ensure isolation
|
||||
yield
|
||||
folder_paths.folder_names_and_paths = original
|
||||
reload(folder_paths)
|
||||
|
||||
@pytest.fixture
|
||||
def temp_dir():
|
||||
@@ -21,7 +25,21 @@ def temp_dir():
|
||||
yield tmpdirname
|
||||
|
||||
|
||||
def test_get_directory_by_type():
|
||||
@pytest.fixture
|
||||
def set_base_dir():
|
||||
def _set_base_dir(base_dir):
|
||||
# Mock CLI args
|
||||
with patch.object(sys, 'argv', ["main.py", "--base-directory", base_dir]):
|
||||
reload(comfy.cli_args)
|
||||
reload(folder_paths)
|
||||
yield _set_base_dir
|
||||
# Reload the modules after each test to ensure isolation
|
||||
with patch.object(sys, 'argv', ["main.py"]):
|
||||
reload(comfy.cli_args)
|
||||
reload(folder_paths)
|
||||
|
||||
|
||||
def test_get_directory_by_type(clear_folder_paths):
|
||||
test_dir = "/test/dir"
|
||||
folder_paths.set_output_directory(test_dir)
|
||||
assert folder_paths.get_directory_by_type("output") == test_dir
|
||||
@@ -96,3 +114,49 @@ def test_get_save_image_path(temp_dir):
|
||||
assert counter == 1
|
||||
assert subfolder == ""
|
||||
assert filename_prefix == "test"
|
||||
|
||||
|
||||
def test_base_path_changes(set_base_dir):
|
||||
test_dir = os.path.abspath("/test/dir")
|
||||
set_base_dir(test_dir)
|
||||
|
||||
assert folder_paths.base_path == test_dir
|
||||
assert folder_paths.models_dir == os.path.join(test_dir, "models")
|
||||
assert folder_paths.input_directory == os.path.join(test_dir, "input")
|
||||
assert folder_paths.output_directory == os.path.join(test_dir, "output")
|
||||
assert folder_paths.temp_directory == os.path.join(test_dir, "temp")
|
||||
assert folder_paths.user_directory == os.path.join(test_dir, "user")
|
||||
|
||||
assert os.path.join(test_dir, "custom_nodes") in folder_paths.get_folder_paths("custom_nodes")
|
||||
|
||||
for name in ["checkpoints", "loras", "vae", "configs", "embeddings", "controlnet", "classifiers"]:
|
||||
assert folder_paths.get_folder_paths(name)[0] == os.path.join(test_dir, "models", name)
|
||||
|
||||
|
||||
def test_base_path_change_clears_old(set_base_dir):
|
||||
test_dir = os.path.abspath("/test/dir")
|
||||
set_base_dir(test_dir)
|
||||
|
||||
assert len(folder_paths.get_folder_paths("custom_nodes")) == 1
|
||||
|
||||
single_model_paths = [
|
||||
"checkpoints",
|
||||
"loras",
|
||||
"vae",
|
||||
"configs",
|
||||
"clip_vision",
|
||||
"style_models",
|
||||
"diffusers",
|
||||
"vae_approx",
|
||||
"gligen",
|
||||
"upscale_models",
|
||||
"embeddings",
|
||||
"hypernetworks",
|
||||
"photomaker",
|
||||
"classifiers",
|
||||
]
|
||||
for name in single_model_paths:
|
||||
assert len(folder_paths.get_folder_paths(name)) == 1
|
||||
|
||||
for name in ["controlnet", "diffusion_models", "text_encoders"]:
|
||||
assert len(folder_paths.get_folder_paths(name)) == 2
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
import pytest
|
||||
from aiohttp import web
|
||||
from unittest.mock import MagicMock, patch
|
||||
from api_server.routes.internal.internal_routes import InternalRoutes
|
||||
from api_server.services.file_service import FileService
|
||||
from folder_paths import models_dir, user_directory, output_directory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def internal_routes():
|
||||
return InternalRoutes(None)
|
||||
|
||||
@pytest.fixture
|
||||
def aiohttp_client_factory(aiohttp_client, internal_routes):
|
||||
async def _get_client():
|
||||
app = internal_routes.get_app()
|
||||
return await aiohttp_client(app)
|
||||
return _get_client
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_files_valid_directory(aiohttp_client_factory, internal_routes):
|
||||
mock_file_list = [
|
||||
{"name": "file1.txt", "path": "file1.txt", "type": "file", "size": 100},
|
||||
{"name": "dir1", "path": "dir1", "type": "directory"}
|
||||
]
|
||||
internal_routes.file_service.list_files = MagicMock(return_value=mock_file_list)
|
||||
client = await aiohttp_client_factory()
|
||||
resp = await client.get('/files?directory=models')
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert 'files' in data
|
||||
assert len(data['files']) == 2
|
||||
assert data['files'] == mock_file_list
|
||||
|
||||
# Check other valid directories
|
||||
resp = await client.get('/files?directory=user')
|
||||
assert resp.status == 200
|
||||
resp = await client.get('/files?directory=output')
|
||||
assert resp.status == 200
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_files_invalid_directory(aiohttp_client_factory, internal_routes):
|
||||
internal_routes.file_service.list_files = MagicMock(side_effect=ValueError("Invalid directory key"))
|
||||
client = await aiohttp_client_factory()
|
||||
resp = await client.get('/files?directory=invalid')
|
||||
assert resp.status == 400
|
||||
data = await resp.json()
|
||||
assert 'error' in data
|
||||
assert data['error'] == "Invalid directory key"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_files_exception(aiohttp_client_factory, internal_routes):
|
||||
internal_routes.file_service.list_files = MagicMock(side_effect=Exception("Unexpected error"))
|
||||
client = await aiohttp_client_factory()
|
||||
resp = await client.get('/files?directory=models')
|
||||
assert resp.status == 500
|
||||
data = await resp.json()
|
||||
assert 'error' in data
|
||||
assert data['error'] == "Unexpected error"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_files_no_directory_param(aiohttp_client_factory, internal_routes):
|
||||
mock_file_list = []
|
||||
internal_routes.file_service.list_files = MagicMock(return_value=mock_file_list)
|
||||
client = await aiohttp_client_factory()
|
||||
resp = await client.get('/files')
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert 'files' in data
|
||||
assert len(data['files']) == 0
|
||||
|
||||
def test_setup_routes(internal_routes):
|
||||
internal_routes.setup_routes()
|
||||
routes = internal_routes.routes
|
||||
assert any(route.method == 'GET' and str(route.path) == '/files' for route in routes)
|
||||
|
||||
def test_get_app(internal_routes):
|
||||
app = internal_routes.get_app()
|
||||
assert isinstance(app, web.Application)
|
||||
assert internal_routes._app is not None
|
||||
|
||||
def test_get_app_reuse(internal_routes):
|
||||
app1 = internal_routes.get_app()
|
||||
app2 = internal_routes.get_app()
|
||||
assert app1 is app2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_routes_added_to_app(aiohttp_client_factory, internal_routes):
|
||||
client = await aiohttp_client_factory()
|
||||
try:
|
||||
resp = await client.get('/files')
|
||||
print(f"Response received: status {resp.status}") # noqa: T201
|
||||
except Exception as e:
|
||||
print(f"Exception occurred during GET request: {e}") # noqa: T201
|
||||
raise
|
||||
|
||||
assert resp.status != 404, "Route /files does not exist"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_file_service_initialization():
|
||||
with patch('api_server.routes.internal.internal_routes.FileService') as MockFileService:
|
||||
# Create a mock instance
|
||||
mock_file_service_instance = MagicMock(spec=FileService)
|
||||
MockFileService.return_value = mock_file_service_instance
|
||||
internal_routes = InternalRoutes(None)
|
||||
|
||||
# Check if FileService was initialized with the correct parameters
|
||||
MockFileService.assert_called_once_with({
|
||||
"models": models_dir,
|
||||
"user": user_directory,
|
||||
"output": output_directory
|
||||
})
|
||||
|
||||
# Verify that the file_service attribute of InternalRoutes is set
|
||||
assert internal_routes.file_service == mock_file_service_instance
|
||||
@@ -1,54 +0,0 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock
|
||||
from api_server.services.file_service import FileService
|
||||
|
||||
@pytest.fixture
|
||||
def mock_file_system_ops():
|
||||
return MagicMock()
|
||||
|
||||
@pytest.fixture
|
||||
def file_service(mock_file_system_ops):
|
||||
allowed_directories = {
|
||||
"models": "/path/to/models",
|
||||
"user": "/path/to/user",
|
||||
"output": "/path/to/output"
|
||||
}
|
||||
return FileService(allowed_directories, file_system_ops=mock_file_system_ops)
|
||||
|
||||
def test_list_files_valid_directory(file_service, mock_file_system_ops):
|
||||
mock_file_system_ops.walk_directory.return_value = [
|
||||
{"name": "file1.txt", "path": "file1.txt", "type": "file", "size": 100},
|
||||
{"name": "dir1", "path": "dir1", "type": "directory"}
|
||||
]
|
||||
|
||||
result = file_service.list_files("models")
|
||||
|
||||
assert len(result) == 2
|
||||
assert result[0]["name"] == "file1.txt"
|
||||
assert result[1]["name"] == "dir1"
|
||||
mock_file_system_ops.walk_directory.assert_called_once_with("/path/to/models")
|
||||
|
||||
def test_list_files_invalid_directory(file_service):
|
||||
# Does not support walking directories outside of the allowed directories
|
||||
with pytest.raises(ValueError, match="Invalid directory key"):
|
||||
file_service.list_files("invalid_key")
|
||||
|
||||
def test_list_files_empty_directory(file_service, mock_file_system_ops):
|
||||
mock_file_system_ops.walk_directory.return_value = []
|
||||
|
||||
result = file_service.list_files("models")
|
||||
|
||||
assert len(result) == 0
|
||||
mock_file_system_ops.walk_directory.assert_called_once_with("/path/to/models")
|
||||
|
||||
@pytest.mark.parametrize("directory_key", ["models", "user", "output"])
|
||||
def test_list_files_all_allowed_directories(file_service, mock_file_system_ops, directory_key):
|
||||
mock_file_system_ops.walk_directory.return_value = [
|
||||
{"name": f"file_{directory_key}.txt", "path": f"file_{directory_key}.txt", "type": "file", "size": 100}
|
||||
]
|
||||
|
||||
result = file_service.list_files(directory_key)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0]["name"] == f"file_{directory_key}.txt"
|
||||
mock_file_system_ops.walk_directory.assert_called_once_with(f"/path/to/{directory_key}")
|
||||
71
tests-unit/utils/json_util_test.py
Normal file
71
tests-unit/utils/json_util_test.py
Normal file
@@ -0,0 +1,71 @@
|
||||
from utils.json_util import merge_json_recursive
|
||||
|
||||
|
||||
def test_merge_simple_dicts():
|
||||
base = {"a": 1, "b": 2}
|
||||
update = {"b": 3, "c": 4}
|
||||
expected = {"a": 1, "b": 3, "c": 4}
|
||||
assert merge_json_recursive(base, update) == expected
|
||||
|
||||
|
||||
def test_merge_nested_dicts():
|
||||
base = {"a": {"x": 1, "y": 2}, "b": 3}
|
||||
update = {"a": {"y": 4, "z": 5}}
|
||||
expected = {"a": {"x": 1, "y": 4, "z": 5}, "b": 3}
|
||||
assert merge_json_recursive(base, update) == expected
|
||||
|
||||
|
||||
def test_merge_lists():
|
||||
base = {"a": [1, 2], "b": 3}
|
||||
update = {"a": [3, 4]}
|
||||
expected = {"a": [1, 2, 3, 4], "b": 3}
|
||||
assert merge_json_recursive(base, update) == expected
|
||||
|
||||
|
||||
def test_merge_nested_lists():
|
||||
base = {"a": {"x": [1, 2]}}
|
||||
update = {"a": {"x": [3, 4]}}
|
||||
expected = {"a": {"x": [1, 2, 3, 4]}}
|
||||
assert merge_json_recursive(base, update) == expected
|
||||
|
||||
|
||||
def test_merge_mixed_types():
|
||||
base = {"a": [1, 2], "b": {"x": 1}}
|
||||
update = {"a": [3], "b": {"y": 2}}
|
||||
expected = {"a": [1, 2, 3], "b": {"x": 1, "y": 2}}
|
||||
assert merge_json_recursive(base, update) == expected
|
||||
|
||||
|
||||
def test_merge_overwrite_non_dict():
|
||||
base = {"a": 1}
|
||||
update = {"a": {"x": 2}}
|
||||
expected = {"a": {"x": 2}}
|
||||
assert merge_json_recursive(base, update) == expected
|
||||
|
||||
|
||||
def test_merge_empty_dicts():
|
||||
base = {}
|
||||
update = {"a": 1}
|
||||
expected = {"a": 1}
|
||||
assert merge_json_recursive(base, update) == expected
|
||||
|
||||
|
||||
def test_merge_none_values():
|
||||
base = {"a": None}
|
||||
update = {"a": {"x": 1}}
|
||||
expected = {"a": {"x": 1}}
|
||||
assert merge_json_recursive(base, update) == expected
|
||||
|
||||
|
||||
def test_merge_different_types():
|
||||
base = {"a": [1, 2]}
|
||||
update = {"a": "string"}
|
||||
expected = {"a": "string"}
|
||||
assert merge_json_recursive(base, update) == expected
|
||||
|
||||
|
||||
def test_merge_complex_nested():
|
||||
base = {"a": [1, 2], "b": {"x": [3, 4], "y": {"p": 1}}}
|
||||
update = {"a": [5], "b": {"x": [6], "y": {"q": 2}}}
|
||||
expected = {"a": [1, 2, 5], "b": {"x": [3, 4, 6], "y": {"p": 1, "q": 2}}}
|
||||
assert merge_json_recursive(base, update) == expected
|
||||
26
utils/json_util.py
Normal file
26
utils/json_util.py
Normal file
@@ -0,0 +1,26 @@
|
||||
def merge_json_recursive(base, update):
|
||||
"""Recursively merge two JSON-like objects.
|
||||
- Dictionaries are merged recursively
|
||||
- Lists are concatenated
|
||||
- Other types are overwritten by the update value
|
||||
|
||||
Args:
|
||||
base: Base JSON-like object
|
||||
update: Update JSON-like object to merge into base
|
||||
|
||||
Returns:
|
||||
Merged JSON-like object
|
||||
"""
|
||||
if not isinstance(base, dict) or not isinstance(update, dict):
|
||||
if isinstance(base, list) and isinstance(update, list):
|
||||
return base + update
|
||||
return update
|
||||
|
||||
merged = base.copy()
|
||||
for key, value in update.items():
|
||||
if key in merged:
|
||||
merged[key] = merge_json_recursive(merged[key], value)
|
||||
else:
|
||||
merged[key] = value
|
||||
|
||||
return merged
|
||||
12
utils/web.py
12
utils/web.py
@@ -1,12 +0,0 @@
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class DateTimeEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
return super().default(obj)
|
||||
|
||||
|
||||
dumps = DateTimeEncoder().encode
|
||||
23
web/assets/BaseViewTemplate-BNGF4K22.js
generated
vendored
23
web/assets/BaseViewTemplate-BNGF4K22.js
generated
vendored
@@ -1,23 +0,0 @@
|
||||
import { d as defineComponent, o as openBlock, f as createElementBlock, J as renderSlot, T as normalizeClass } from "./index-DjNHn37O.js";
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
__name: "BaseViewTemplate",
|
||||
props: {
|
||||
dark: { type: Boolean, default: false }
|
||||
},
|
||||
setup(__props) {
|
||||
const props = __props;
|
||||
return (_ctx, _cache) => {
|
||||
return openBlock(), createElementBlock("div", {
|
||||
class: normalizeClass(["font-sans w-screen h-screen flex items-center justify-center pointer-events-auto overflow-auto", [
|
||||
props.dark ? "text-neutral-300 bg-neutral-900 dark-theme" : "text-neutral-900 bg-neutral-300"
|
||||
]])
|
||||
}, [
|
||||
renderSlot(_ctx.$slots, "default")
|
||||
], 2);
|
||||
};
|
||||
}
|
||||
});
|
||||
export {
|
||||
_sfc_main as _
|
||||
};
|
||||
//# sourceMappingURL=BaseViewTemplate-BNGF4K22.js.map
|
||||
51
web/assets/BaseViewTemplate-Cz111_1A.js
generated
vendored
Normal file
51
web/assets/BaseViewTemplate-Cz111_1A.js
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
import { d as defineComponent, U as ref, p as onMounted, b4 as isElectron, W as nextTick, b5 as electronAPI, o as openBlock, f as createElementBlock, i as withDirectives, v as vShow, j as unref, b6 as isNativeWindow, m as createBaseVNode, A as renderSlot, ai as normalizeClass } from "./index-DqqhYDnY.js";
|
||||
const _hoisted_1 = { class: "flex-grow w-full flex items-center justify-center overflow-auto" };
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
__name: "BaseViewTemplate",
|
||||
props: {
|
||||
dark: { type: Boolean, default: false }
|
||||
},
|
||||
setup(__props) {
|
||||
const props = __props;
|
||||
const darkTheme = {
|
||||
color: "rgba(0, 0, 0, 0)",
|
||||
symbolColor: "#d4d4d4"
|
||||
};
|
||||
const lightTheme = {
|
||||
color: "rgba(0, 0, 0, 0)",
|
||||
symbolColor: "#171717"
|
||||
};
|
||||
const topMenuRef = ref(null);
|
||||
onMounted(async () => {
|
||||
if (isElectron()) {
|
||||
await nextTick();
|
||||
electronAPI().changeTheme({
|
||||
...props.dark ? darkTheme : lightTheme,
|
||||
height: topMenuRef.value.getBoundingClientRect().height
|
||||
});
|
||||
}
|
||||
});
|
||||
return (_ctx, _cache) => {
|
||||
return openBlock(), createElementBlock("div", {
|
||||
class: normalizeClass(["font-sans w-screen h-screen flex flex-col pointer-events-auto", [
|
||||
props.dark ? "text-neutral-300 bg-neutral-900 dark-theme" : "text-neutral-900 bg-neutral-300"
|
||||
]])
|
||||
}, [
|
||||
withDirectives(createBaseVNode("div", {
|
||||
ref_key: "topMenuRef",
|
||||
ref: topMenuRef,
|
||||
class: "app-drag w-full h-[var(--comfy-topbar-height)]"
|
||||
}, null, 512), [
|
||||
[vShow, unref(isNativeWindow)()]
|
||||
]),
|
||||
createBaseVNode("div", _hoisted_1, [
|
||||
renderSlot(_ctx.$slots, "default")
|
||||
])
|
||||
], 2);
|
||||
};
|
||||
}
|
||||
});
|
||||
export {
|
||||
_sfc_main as _
|
||||
};
|
||||
//# sourceMappingURL=BaseViewTemplate-Cz111_1A.js.map
|
||||
22
web/assets/DesktopStartView-FKlxS2Lt.js
generated
vendored
Normal file
22
web/assets/DesktopStartView-FKlxS2Lt.js
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
import { d as defineComponent, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, k as createVNode, j as unref, bz as script } from "./index-DqqhYDnY.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
|
||||
const _hoisted_1 = { class: "max-w-screen-sm w-screen p-8" };
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
__name: "DesktopStartView",
|
||||
setup(__props) {
|
||||
return (_ctx, _cache) => {
|
||||
return openBlock(), createBlock(_sfc_main$1, { dark: "" }, {
|
||||
default: withCtx(() => [
|
||||
createBaseVNode("div", _hoisted_1, [
|
||||
createVNode(unref(script), { mode: "indeterminate" })
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
});
|
||||
};
|
||||
}
|
||||
});
|
||||
export {
|
||||
_sfc_main as default
|
||||
};
|
||||
//# sourceMappingURL=DesktopStartView-FKlxS2Lt.js.map
|
||||
6
web/assets/DownloadGitView-DeC7MBzG.js → web/assets/DownloadGitView-DVXUne-M.js
generated
vendored
6
web/assets/DownloadGitView-DeC7MBzG.js → web/assets/DownloadGitView-DVXUne-M.js
generated
vendored
@@ -1,7 +1,7 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { d as defineComponent, o as openBlock, k as createBlock, M as withCtx, H as createBaseVNode, X as toDisplayString, N as createVNode, j as unref, l as script, bW as useRouter } from "./index-DjNHn37O.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BNGF4K22.js";
|
||||
import { d as defineComponent, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, l as script, be as useRouter } from "./index-DqqhYDnY.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
|
||||
const _hoisted_1 = { class: "max-w-screen-sm flex flex-col gap-8 p-8 bg-[url('/assets/images/Git-Logo-White.svg')] bg-no-repeat bg-right-top bg-origin-padding" };
|
||||
const _hoisted_2 = { class: "mt-24 text-4xl font-bold text-red-500" };
|
||||
const _hoisted_3 = { class: "space-y-4" };
|
||||
@@ -55,4 +55,4 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
export {
|
||||
_sfc_main as default
|
||||
};
|
||||
//# sourceMappingURL=DownloadGitView-DeC7MBzG.js.map
|
||||
//# sourceMappingURL=DownloadGitView-DVXUne-M.js.map
|
||||
9
web/assets/ExtensionPanel-D4Phn0Zr.js → web/assets/ExtensionPanel-iPOrhDVM.js
generated
vendored
9
web/assets/ExtensionPanel-D4Phn0Zr.js → web/assets/ExtensionPanel-iPOrhDVM.js
generated
vendored
@@ -1,9 +1,8 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { d as defineComponent, ab as ref, cn as FilterMatchMode, cs as useExtensionStore, a as useSettingStore, m as onMounted, c as computed, o as openBlock, k as createBlock, M as withCtx, N as createVNode, co as SearchBox, j as unref, bZ as script, H as createBaseVNode, f as createElementBlock, E as renderList, X as toDisplayString, aE as createTextVNode, F as Fragment, l as script$1, I as createCommentVNode, aI as script$3, bO as script$4, c4 as script$5, cp as _sfc_main$1 } from "./index-DjNHn37O.js";
|
||||
import { s as script$2, a as script$6 } from "./index-B5F0uxTQ.js";
|
||||
import "./index-B-aVupP5.js";
|
||||
import "./index-5HFeZax4.js";
|
||||
import { d as defineComponent, U as ref, dl as FilterMatchMode, dr as useExtensionStore, a as useSettingStore, p as onMounted, c as computed, o as openBlock, y as createBlock, z as withCtx, k as createVNode, dm as SearchBox, j as unref, bj as script, m as createBaseVNode, f as createElementBlock, D as renderList, E as toDisplayString, a7 as createTextVNode, F as Fragment, l as script$1, B as createCommentVNode, a4 as script$3, ax as script$4, bn as script$5, dn as _sfc_main$1 } from "./index-DqqhYDnY.js";
|
||||
import { g as script$2, h as script$6 } from "./index-BapOFhAR.js";
|
||||
import "./index-DXE47DZl.js";
|
||||
const _hoisted_1 = { class: "flex justify-end" };
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
__name: "ExtensionPanel",
|
||||
@@ -180,4 +179,4 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
export {
|
||||
_sfc_main as default
|
||||
};
|
||||
//# sourceMappingURL=ExtensionPanel-D4Phn0Zr.js.map
|
||||
//# sourceMappingURL=ExtensionPanel-iPOrhDVM.js.map
|
||||
157
web/assets/GraphView-CIRWBKTm.css → web/assets/GraphView-CVCdiww1.css
generated
vendored
157
web/assets/GraphView-CIRWBKTm.css → web/assets/GraphView-CVCdiww1.css
generated
vendored
@@ -1,8 +1,10 @@
|
||||
|
||||
.comfy-menu-hamburger[data-v-5661bed0] {
|
||||
pointer-events: auto;
|
||||
position: fixed;
|
||||
z-index: 9999;
|
||||
.comfy-menu-hamburger[data-v-7ed57d1a] {
|
||||
pointer-events: auto;
|
||||
position: fixed;
|
||||
z-index: 9999;
|
||||
display: flex;
|
||||
flex-direction: row
|
||||
}
|
||||
|
||||
[data-v-e50caa15] .p-splitter-gutter {
|
||||
@@ -39,14 +41,14 @@
|
||||
z-index: 999;
|
||||
}
|
||||
|
||||
.p-buttongroup-vertical[data-v-cf40dd39] {
|
||||
.p-buttongroup-vertical[data-v-cb8f9a1a] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
border-radius: var(--p-button-border-radius);
|
||||
overflow: hidden;
|
||||
border: 1px solid var(--p-panel-border-color);
|
||||
}
|
||||
.p-buttongroup-vertical .p-button[data-v-cf40dd39] {
|
||||
.p-buttongroup-vertical .p-button[data-v-cb8f9a1a] {
|
||||
margin: 0;
|
||||
border-radius: 0;
|
||||
}
|
||||
@@ -82,7 +84,7 @@
|
||||
font-size: inherit;
|
||||
}
|
||||
|
||||
[data-v-5741c9ae] .highlight {
|
||||
[data-v-fd0a74bd] .highlight {
|
||||
background-color: var(--p-primary-color);
|
||||
color: var(--p-primary-contrast-color);
|
||||
font-weight: bold;
|
||||
@@ -131,16 +133,7 @@
|
||||
border-right: 4px solid var(--p-button-text-primary-color);
|
||||
}
|
||||
|
||||
:root {
|
||||
--sidebar-width: 64px;
|
||||
--sidebar-icon-size: 1.5rem;
|
||||
}
|
||||
:root .small-sidebar {
|
||||
--sidebar-width: 40px;
|
||||
--sidebar-icon-size: 1rem;
|
||||
}
|
||||
|
||||
.side-tool-bar-container[data-v-37d8d7b4] {
|
||||
.side-tool-bar-container[data-v-33cac83a] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
@@ -153,18 +146,91 @@
|
||||
background-color: var(--comfy-menu-secondary-bg);
|
||||
color: var(--fg-color);
|
||||
box-shadow: var(--bar-shadow);
|
||||
|
||||
--sidebar-width: 4rem;
|
||||
--sidebar-icon-size: 1.5rem;
|
||||
}
|
||||
.side-tool-bar-end[data-v-37d8d7b4] {
|
||||
.side-tool-bar-container.small-sidebar[data-v-33cac83a] {
|
||||
--sidebar-width: 2.5rem;
|
||||
--sidebar-icon-size: 1rem;
|
||||
}
|
||||
.side-tool-bar-end[data-v-33cac83a] {
|
||||
align-self: flex-end;
|
||||
margin-top: auto;
|
||||
}
|
||||
|
||||
[data-v-b9328350] .p-inputtext {
|
||||
.status-indicator[data-v-8d011a31] {
|
||||
position: absolute;
|
||||
font-weight: 700;
|
||||
font-size: 1.5rem;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%)
|
||||
}
|
||||
|
||||
[data-v-54fadc45] .p-togglebutton {
|
||||
position: relative;
|
||||
flex-shrink: 0;
|
||||
border-radius: 0px;
|
||||
border-width: 0px;
|
||||
border-right-width: 1px;
|
||||
border-style: solid;
|
||||
background-color: transparent;
|
||||
padding: 0px;
|
||||
border-right-color: var(--border-color)
|
||||
}
|
||||
[data-v-54fadc45] .p-togglebutton::before {
|
||||
display: none
|
||||
}
|
||||
[data-v-54fadc45] .p-togglebutton:first-child {
|
||||
border-left-width: 1px;
|
||||
border-style: solid;
|
||||
border-left-color: var(--border-color)
|
||||
}
|
||||
[data-v-54fadc45] .p-togglebutton:not(:first-child) {
|
||||
border-left-width: 0px
|
||||
}
|
||||
[data-v-54fadc45] .p-togglebutton.p-togglebutton-checked {
|
||||
height: 100%;
|
||||
border-bottom-width: 1px;
|
||||
border-style: solid;
|
||||
border-bottom-color: var(--p-button-text-primary-color)
|
||||
}
|
||||
[data-v-54fadc45] .p-togglebutton:not(.p-togglebutton-checked) {
|
||||
opacity: 0.75
|
||||
}
|
||||
[data-v-54fadc45] .p-togglebutton-checked .close-button,[data-v-54fadc45] .p-togglebutton:hover .close-button {
|
||||
visibility: visible
|
||||
}
|
||||
[data-v-54fadc45] .p-togglebutton:hover .status-indicator {
|
||||
display: none
|
||||
}
|
||||
[data-v-54fadc45] .p-togglebutton .close-button {
|
||||
visibility: hidden
|
||||
}
|
||||
[data-v-54fadc45] .p-scrollpanel-content {
|
||||
height: 100%
|
||||
}
|
||||
|
||||
/* Scrollbar half opacity to avoid blocking the active tab bottom border */
|
||||
[data-v-54fadc45] .p-scrollpanel:hover .p-scrollpanel-bar,[data-v-54fadc45] .p-scrollpanel:active .p-scrollpanel-bar {
|
||||
opacity: 0.5
|
||||
}
|
||||
[data-v-54fadc45] .p-selectbutton {
|
||||
height: 100%;
|
||||
border-radius: 0px
|
||||
}
|
||||
|
||||
[data-v-38831d8e] .workflow-tabs {
|
||||
background-color: var(--comfy-menu-bg);
|
||||
}
|
||||
|
||||
[data-v-26957f1f] .p-inputtext {
|
||||
border-top-left-radius: 0;
|
||||
border-bottom-left-radius: 0;
|
||||
}
|
||||
|
||||
.comfyui-queue-button[data-v-7f4f551b] .p-splitbutton-dropdown {
|
||||
.comfyui-queue-button[data-v-91a628af] .p-splitbutton-dropdown {
|
||||
border-top-right-radius: 0;
|
||||
border-bottom-right-radius: 0;
|
||||
}
|
||||
@@ -195,55 +261,23 @@
|
||||
display: none;
|
||||
}
|
||||
|
||||
.top-menubar[data-v-6fecd137] .p-menubar-item-link svg {
|
||||
.top-menubar[data-v-56df69d2] .p-menubar-item-link svg {
|
||||
display: none;
|
||||
}
|
||||
[data-v-6fecd137] .p-menubar-submenu.dropdown-direction-up {
|
||||
[data-v-56df69d2] .p-menubar-submenu.dropdown-direction-up {
|
||||
top: auto;
|
||||
bottom: 100%;
|
||||
flex-direction: column-reverse;
|
||||
}
|
||||
.keybinding-tag[data-v-6fecd137] {
|
||||
.keybinding-tag[data-v-56df69d2] {
|
||||
background: var(--p-content-hover-background);
|
||||
border-color: var(--p-content-border-color);
|
||||
border-style: solid;
|
||||
}
|
||||
|
||||
.status-indicator[data-v-8d011a31] {
|
||||
position: absolute;
|
||||
font-weight: 700;
|
||||
font-size: 1.5rem;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%)
|
||||
}
|
||||
|
||||
[data-v-d485c044] .p-togglebutton::before {
|
||||
display: none
|
||||
}
|
||||
[data-v-d485c044] .p-togglebutton {
|
||||
position: relative;
|
||||
flex-shrink: 0;
|
||||
border-radius: 0px;
|
||||
background-color: transparent;
|
||||
padding: 0px
|
||||
}
|
||||
[data-v-d485c044] .p-togglebutton.p-togglebutton-checked {
|
||||
border-bottom-width: 2px;
|
||||
border-bottom-color: var(--p-button-text-primary-color)
|
||||
}
|
||||
[data-v-d485c044] .p-togglebutton-checked .close-button,[data-v-d485c044] .p-togglebutton:hover .close-button {
|
||||
visibility: visible
|
||||
}
|
||||
[data-v-d485c044] .p-togglebutton:hover .status-indicator {
|
||||
display: none
|
||||
}
|
||||
[data-v-d485c044] .p-togglebutton .close-button {
|
||||
visibility: hidden
|
||||
}
|
||||
|
||||
.comfyui-menu[data-v-878b63b8] {
|
||||
.comfyui-menu[data-v-929e7543] {
|
||||
width: 100vw;
|
||||
height: var(--comfy-topbar-height);
|
||||
background: var(--comfy-menu-bg);
|
||||
color: var(--fg-color);
|
||||
box-shadow: var(--bar-shadow);
|
||||
@@ -253,18 +287,17 @@
|
||||
z-index: 1000;
|
||||
order: 0;
|
||||
grid-column: 1/-1;
|
||||
max-height: 90vh;
|
||||
}
|
||||
.comfyui-menu.dropzone[data-v-878b63b8] {
|
||||
.comfyui-menu.dropzone[data-v-929e7543] {
|
||||
background: var(--p-highlight-background);
|
||||
}
|
||||
.comfyui-menu.dropzone-active[data-v-878b63b8] {
|
||||
.comfyui-menu.dropzone-active[data-v-929e7543] {
|
||||
background: var(--p-highlight-background-focus);
|
||||
}
|
||||
[data-v-878b63b8] .p-menubar-item-label {
|
||||
[data-v-929e7543] .p-menubar-item-label {
|
||||
line-height: revert;
|
||||
}
|
||||
.comfyui-logo[data-v-878b63b8] {
|
||||
.comfyui-logo[data-v-929e7543] {
|
||||
font-size: 1.2em;
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
4682
web/assets/GraphView-D9ZzDQZV.js
generated
vendored
Normal file
4682
web/assets/GraphView-D9ZzDQZV.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1288
web/assets/InstallView-CAcYt0HL.js
generated
vendored
1288
web/assets/InstallView-CAcYt0HL.js
generated
vendored
File diff suppressed because one or more lines are too long
945
web/assets/InstallView-CVZcZZXJ.js
generated
vendored
Normal file
945
web/assets/InstallView-CVZcZZXJ.js
generated
vendored
Normal file
@@ -0,0 +1,945 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { d as defineComponent, U as ref, bm as useModel, o as openBlock, f as createElementBlock, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, bn as script, bh as script$1, ar as withModifiers, z as withCtx, ab as script$2, K as useI18n, c as computed, ai as normalizeClass, B as createCommentVNode, a4 as script$3, a7 as createTextVNode, b5 as electronAPI, _ as _export_sfc, p as onMounted, r as resolveDirective, bg as script$4, i as withDirectives, bo as script$5, bp as script$6, l as script$7, y as createBlock, bj as script$8, bq as MigrationItems, w as watchEffect, F as Fragment, D as renderList, br as script$9, bs as mergeModels, bt as ValidationState, Y as normalizeI18nKey, O as watch, bu as checkMirrorReachable, bv as _sfc_main$7, bw as mergeValidationStates, bc as t, a$ as script$a, bx as CUDA_TORCH_URL, by as NIGHTLY_CPU_TORCH_URL, be as useRouter, ag as toRaw } from "./index-DqqhYDnY.js";
|
||||
import { s as script$b, a as script$c, b as script$d, c as script$e, d as script$f } from "./index-BNlqgrYT.js";
|
||||
import { P as PYTHON_MIRROR, a as PYPI_MIRROR } from "./uvMirrors-B-HKMf6X.js";
|
||||
import { _ as _sfc_main$8 } from "./BaseViewTemplate-Cz111_1A.js";
|
||||
const _hoisted_1$5 = { class: "flex flex-col gap-6 w-[600px]" };
|
||||
const _hoisted_2$5 = { class: "flex flex-col gap-4" };
|
||||
const _hoisted_3$5 = { class: "text-2xl font-semibold text-neutral-100" };
|
||||
const _hoisted_4$5 = { class: "text-neutral-400 my-0" };
|
||||
const _hoisted_5$3 = { class: "flex flex-col bg-neutral-800 p-4 rounded-lg" };
|
||||
const _hoisted_6$3 = { class: "flex items-center gap-4" };
|
||||
const _hoisted_7$3 = { class: "flex-1" };
|
||||
const _hoisted_8$3 = { class: "text-lg font-medium text-neutral-100" };
|
||||
const _hoisted_9$3 = { class: "text-sm text-neutral-400 mt-1" };
|
||||
const _hoisted_10$3 = { class: "flex items-center gap-4" };
|
||||
const _hoisted_11$3 = { class: "flex-1" };
|
||||
const _hoisted_12$3 = { class: "text-lg font-medium text-neutral-100" };
|
||||
const _hoisted_13$1 = { class: "text-sm text-neutral-400 mt-1" };
|
||||
const _hoisted_14$1 = { class: "text-neutral-300" };
|
||||
const _hoisted_15 = { class: "font-medium mb-2" };
|
||||
const _hoisted_16 = { class: "list-disc pl-6 space-y-1" };
|
||||
const _hoisted_17 = { class: "font-medium mt-4 mb-2" };
|
||||
const _hoisted_18 = { class: "list-disc pl-6 space-y-1" };
|
||||
const _hoisted_19 = { class: "mt-4" };
|
||||
const _hoisted_20 = {
|
||||
href: "https://comfy.org/privacy",
|
||||
target: "_blank",
|
||||
class: "text-blue-400 hover:text-blue-300 underline"
|
||||
};
|
||||
const _sfc_main$6 = /* @__PURE__ */ defineComponent({
|
||||
__name: "DesktopSettingsConfiguration",
|
||||
props: {
|
||||
"autoUpdate": { type: Boolean, ...{ required: true } },
|
||||
"autoUpdateModifiers": {},
|
||||
"allowMetrics": { type: Boolean, ...{ required: true } },
|
||||
"allowMetricsModifiers": {}
|
||||
},
|
||||
emits: ["update:autoUpdate", "update:allowMetrics"],
|
||||
setup(__props) {
|
||||
const showDialog = ref(false);
|
||||
const autoUpdate = useModel(__props, "autoUpdate");
|
||||
const allowMetrics = useModel(__props, "allowMetrics");
|
||||
const showMetricsInfo = /* @__PURE__ */ __name(() => {
|
||||
showDialog.value = true;
|
||||
}, "showMetricsInfo");
|
||||
return (_ctx, _cache) => {
|
||||
return openBlock(), createElementBlock("div", _hoisted_1$5, [
|
||||
createBaseVNode("div", _hoisted_2$5, [
|
||||
createBaseVNode("h2", _hoisted_3$5, toDisplayString(_ctx.$t("install.desktopAppSettings")), 1),
|
||||
createBaseVNode("p", _hoisted_4$5, toDisplayString(_ctx.$t("install.desktopAppSettingsDescription")), 1)
|
||||
]),
|
||||
createBaseVNode("div", _hoisted_5$3, [
|
||||
createBaseVNode("div", _hoisted_6$3, [
|
||||
createBaseVNode("div", _hoisted_7$3, [
|
||||
createBaseVNode("h3", _hoisted_8$3, toDisplayString(_ctx.$t("install.settings.autoUpdate")), 1),
|
||||
createBaseVNode("p", _hoisted_9$3, toDisplayString(_ctx.$t("install.settings.autoUpdateDescription")), 1)
|
||||
]),
|
||||
createVNode(unref(script), {
|
||||
modelValue: autoUpdate.value,
|
||||
"onUpdate:modelValue": _cache[0] || (_cache[0] = ($event) => autoUpdate.value = $event)
|
||||
}, null, 8, ["modelValue"])
|
||||
]),
|
||||
createVNode(unref(script$1)),
|
||||
createBaseVNode("div", _hoisted_10$3, [
|
||||
createBaseVNode("div", _hoisted_11$3, [
|
||||
createBaseVNode("h3", _hoisted_12$3, toDisplayString(_ctx.$t("install.settings.allowMetrics")), 1),
|
||||
createBaseVNode("p", _hoisted_13$1, toDisplayString(_ctx.$t("install.settings.allowMetricsDescription")), 1),
|
||||
createBaseVNode("a", {
|
||||
href: "#",
|
||||
class: "text-sm text-blue-400 hover:text-blue-300 mt-1 inline-block",
|
||||
onClick: withModifiers(showMetricsInfo, ["prevent"])
|
||||
}, toDisplayString(_ctx.$t("install.settings.learnMoreAboutData")), 1)
|
||||
]),
|
||||
createVNode(unref(script), {
|
||||
modelValue: allowMetrics.value,
|
||||
"onUpdate:modelValue": _cache[1] || (_cache[1] = ($event) => allowMetrics.value = $event)
|
||||
}, null, 8, ["modelValue"])
|
||||
])
|
||||
]),
|
||||
createVNode(unref(script$2), {
|
||||
visible: showDialog.value,
|
||||
"onUpdate:visible": _cache[2] || (_cache[2] = ($event) => showDialog.value = $event),
|
||||
modal: "",
|
||||
header: _ctx.$t("install.settings.dataCollectionDialog.title")
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createBaseVNode("div", _hoisted_14$1, [
|
||||
createBaseVNode("h4", _hoisted_15, toDisplayString(_ctx.$t("install.settings.dataCollectionDialog.whatWeCollect")), 1),
|
||||
createBaseVNode("ul", _hoisted_16, [
|
||||
createBaseVNode("li", null, toDisplayString(_ctx.$t("install.settings.dataCollectionDialog.collect.errorReports")), 1),
|
||||
createBaseVNode("li", null, toDisplayString(_ctx.$t("install.settings.dataCollectionDialog.collect.systemInfo")), 1),
|
||||
createBaseVNode("li", null, toDisplayString(_ctx.$t(
|
||||
"install.settings.dataCollectionDialog.collect.userJourneyEvents"
|
||||
)), 1)
|
||||
]),
|
||||
createBaseVNode("h4", _hoisted_17, toDisplayString(_ctx.$t("install.settings.dataCollectionDialog.whatWeDoNotCollect")), 1),
|
||||
createBaseVNode("ul", _hoisted_18, [
|
||||
createBaseVNode("li", null, toDisplayString(_ctx.$t(
|
||||
"install.settings.dataCollectionDialog.doNotCollect.personalInformation"
|
||||
)), 1),
|
||||
createBaseVNode("li", null, toDisplayString(_ctx.$t(
|
||||
"install.settings.dataCollectionDialog.doNotCollect.workflowContents"
|
||||
)), 1),
|
||||
createBaseVNode("li", null, toDisplayString(_ctx.$t(
|
||||
"install.settings.dataCollectionDialog.doNotCollect.fileSystemInformation"
|
||||
)), 1),
|
||||
createBaseVNode("li", null, toDisplayString(_ctx.$t(
|
||||
"install.settings.dataCollectionDialog.doNotCollect.customNodeConfigurations"
|
||||
)), 1)
|
||||
]),
|
||||
createBaseVNode("div", _hoisted_19, [
|
||||
createBaseVNode("a", _hoisted_20, toDisplayString(_ctx.$t("install.settings.dataCollectionDialog.viewFullPolicy")), 1)
|
||||
])
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
}, 8, ["visible", "header"])
|
||||
]);
|
||||
};
|
||||
}
|
||||
});
|
||||
const _imports_0 = "" + new URL("images/nvidia-logo.svg", import.meta.url).href;
|
||||
const _imports_1 = "" + new URL("images/apple-mps-logo.png", import.meta.url).href;
|
||||
const _imports_2 = "" + new URL("images/manual-configuration.svg", import.meta.url).href;
|
||||
const _hoisted_1$4 = { class: "flex flex-col gap-6 w-[600px] h-[30rem] select-none" };
|
||||
const _hoisted_2$4 = { class: "grow flex flex-col gap-4 text-neutral-300" };
|
||||
const _hoisted_3$4 = { class: "text-2xl font-semibold text-neutral-100" };
|
||||
const _hoisted_4$4 = { class: "m-1 text-neutral-400" };
|
||||
const _hoisted_5$2 = {
|
||||
key: 0,
|
||||
class: "m-1"
|
||||
};
|
||||
const _hoisted_6$2 = {
|
||||
key: 1,
|
||||
class: "m-1"
|
||||
};
|
||||
const _hoisted_7$2 = {
|
||||
key: 2,
|
||||
class: "text-neutral-300"
|
||||
};
|
||||
const _hoisted_8$2 = { class: "m-1" };
|
||||
const _hoisted_9$2 = { key: 3 };
|
||||
const _hoisted_10$2 = { class: "m-1" };
|
||||
const _hoisted_11$2 = { class: "m-1" };
|
||||
const _hoisted_12$2 = {
|
||||
for: "cpu-mode",
|
||||
class: "select-none"
|
||||
};
|
||||
const _sfc_main$5 = /* @__PURE__ */ defineComponent({
|
||||
__name: "GpuPicker",
|
||||
props: {
|
||||
"device": {
|
||||
required: true
|
||||
},
|
||||
"deviceModifiers": {}
|
||||
},
|
||||
emits: ["update:device"],
|
||||
setup(__props) {
|
||||
const { t: t2 } = useI18n();
|
||||
const cpuMode = computed({
|
||||
get: /* @__PURE__ */ __name(() => selected.value === "cpu", "get"),
|
||||
set: /* @__PURE__ */ __name((value) => {
|
||||
selected.value = value ? "cpu" : null;
|
||||
}, "set")
|
||||
});
|
||||
const selected = useModel(__props, "device");
|
||||
const electron = electronAPI();
|
||||
const platform = electron.getPlatform();
|
||||
const pickGpu = /* @__PURE__ */ __name((value) => {
|
||||
const newValue = selected.value === value ? null : value;
|
||||
selected.value = newValue;
|
||||
}, "pickGpu");
|
||||
return (_ctx, _cache) => {
|
||||
return openBlock(), createElementBlock("div", _hoisted_1$4, [
|
||||
createBaseVNode("div", _hoisted_2$4, [
|
||||
createBaseVNode("h2", _hoisted_3$4, toDisplayString(_ctx.$t("install.gpuSelection.selectGpu")), 1),
|
||||
createBaseVNode("p", _hoisted_4$4, toDisplayString(_ctx.$t("install.gpuSelection.selectGpuDescription")) + ": ", 1),
|
||||
createBaseVNode("div", {
|
||||
class: normalizeClass(["flex gap-2 text-center transition-opacity", { selected: selected.value }])
|
||||
}, [
|
||||
unref(platform) !== "darwin" ? (openBlock(), createElementBlock("div", {
|
||||
key: 0,
|
||||
class: normalizeClass(["gpu-button", { selected: selected.value === "nvidia" }]),
|
||||
role: "button",
|
||||
onClick: _cache[0] || (_cache[0] = ($event) => pickGpu("nvidia"))
|
||||
}, _cache[4] || (_cache[4] = [
|
||||
createBaseVNode("img", {
|
||||
class: "m-12",
|
||||
alt: "NVIDIA logo",
|
||||
width: "196",
|
||||
height: "32",
|
||||
src: _imports_0
|
||||
}, null, -1)
|
||||
]), 2)) : createCommentVNode("", true),
|
||||
unref(platform) === "darwin" ? (openBlock(), createElementBlock("div", {
|
||||
key: 1,
|
||||
class: normalizeClass(["gpu-button", { selected: selected.value === "mps" }]),
|
||||
role: "button",
|
||||
onClick: _cache[1] || (_cache[1] = ($event) => pickGpu("mps"))
|
||||
}, _cache[5] || (_cache[5] = [
|
||||
createBaseVNode("img", {
|
||||
class: "rounded-lg hover-brighten",
|
||||
alt: "Apple Metal Performance Shaders Logo",
|
||||
width: "292",
|
||||
ratio: "",
|
||||
src: _imports_1
|
||||
}, null, -1)
|
||||
]), 2)) : createCommentVNode("", true),
|
||||
createBaseVNode("div", {
|
||||
class: normalizeClass(["gpu-button", { selected: selected.value === "unsupported" }]),
|
||||
role: "button",
|
||||
onClick: _cache[2] || (_cache[2] = ($event) => pickGpu("unsupported"))
|
||||
}, _cache[6] || (_cache[6] = [
|
||||
createBaseVNode("img", {
|
||||
class: "m-12",
|
||||
alt: "Manual configuration",
|
||||
width: "196",
|
||||
src: _imports_2
|
||||
}, null, -1)
|
||||
]), 2)
|
||||
], 2),
|
||||
selected.value === "nvidia" ? (openBlock(), createElementBlock("p", _hoisted_5$2, [
|
||||
createVNode(unref(script$3), {
|
||||
icon: "pi pi-check",
|
||||
severity: "success",
|
||||
value: "CUDA"
|
||||
}),
|
||||
createTextVNode(" " + toDisplayString(_ctx.$t("install.gpuSelection.nvidiaDescription")), 1)
|
||||
])) : createCommentVNode("", true),
|
||||
selected.value === "mps" ? (openBlock(), createElementBlock("p", _hoisted_6$2, [
|
||||
createVNode(unref(script$3), {
|
||||
icon: "pi pi-check",
|
||||
severity: "success",
|
||||
value: "MPS"
|
||||
}),
|
||||
createTextVNode(" " + toDisplayString(_ctx.$t("install.gpuSelection.mpsDescription")), 1)
|
||||
])) : createCommentVNode("", true),
|
||||
selected.value === "unsupported" ? (openBlock(), createElementBlock("div", _hoisted_7$2, [
|
||||
createBaseVNode("p", _hoisted_8$2, [
|
||||
createVNode(unref(script$3), {
|
||||
icon: "pi pi-exclamation-triangle",
|
||||
severity: "warn",
|
||||
value: unref(t2)("icon.exclamation-triangle")
|
||||
}, null, 8, ["value"]),
|
||||
createTextVNode(" " + toDisplayString(_ctx.$t("install.gpuSelection.customSkipsPython")), 1)
|
||||
]),
|
||||
createBaseVNode("ul", null, [
|
||||
createBaseVNode("li", null, [
|
||||
createBaseVNode("strong", null, toDisplayString(_ctx.$t("install.gpuSelection.customComfyNeedsPython")), 1)
|
||||
]),
|
||||
createBaseVNode("li", null, toDisplayString(_ctx.$t("install.gpuSelection.customManualVenv")), 1),
|
||||
createBaseVNode("li", null, toDisplayString(_ctx.$t("install.gpuSelection.customInstallRequirements")), 1),
|
||||
createBaseVNode("li", null, toDisplayString(_ctx.$t("install.gpuSelection.customMayNotWork")), 1)
|
||||
])
|
||||
])) : createCommentVNode("", true),
|
||||
selected.value === "cpu" ? (openBlock(), createElementBlock("div", _hoisted_9$2, [
|
||||
createBaseVNode("p", _hoisted_10$2, [
|
||||
createVNode(unref(script$3), {
|
||||
icon: "pi pi-exclamation-triangle",
|
||||
severity: "warn",
|
||||
value: unref(t2)("icon.exclamation-triangle")
|
||||
}, null, 8, ["value"]),
|
||||
createTextVNode(" " + toDisplayString(_ctx.$t("install.gpuSelection.cpuModeDescription")), 1)
|
||||
]),
|
||||
createBaseVNode("p", _hoisted_11$2, toDisplayString(_ctx.$t("install.gpuSelection.cpuModeDescription2")), 1)
|
||||
])) : createCommentVNode("", true)
|
||||
]),
|
||||
createBaseVNode("div", {
|
||||
class: normalizeClass(["transition-opacity flex gap-3 h-0", {
|
||||
"opacity-40": selected.value && selected.value !== "cpu"
|
||||
}])
|
||||
}, [
|
||||
createVNode(unref(script), {
|
||||
modelValue: cpuMode.value,
|
||||
"onUpdate:modelValue": _cache[3] || (_cache[3] = ($event) => cpuMode.value = $event),
|
||||
inputId: "cpu-mode",
|
||||
class: "-translate-y-40"
|
||||
}, null, 8, ["modelValue"]),
|
||||
createBaseVNode("label", _hoisted_12$2, toDisplayString(_ctx.$t("install.gpuSelection.enableCpuMode")), 1)
|
||||
], 2)
|
||||
]);
|
||||
};
|
||||
}
|
||||
});
|
||||
const GpuPicker = /* @__PURE__ */ _export_sfc(_sfc_main$5, [["__scopeId", "data-v-79125ff6"]]);
|
||||
const _hoisted_1$3 = { class: "flex flex-col gap-6 w-[600px]" };
|
||||
const _hoisted_2$3 = { class: "flex flex-col gap-4" };
|
||||
const _hoisted_3$3 = { class: "text-2xl font-semibold text-neutral-100" };
|
||||
const _hoisted_4$3 = { class: "text-neutral-400 my-0" };
|
||||
const _hoisted_5$1 = { class: "flex gap-2" };
|
||||
const _hoisted_6$1 = { class: "bg-neutral-800 p-4 rounded-lg" };
|
||||
const _hoisted_7$1 = { class: "text-lg font-medium mt-0 mb-3 text-neutral-100" };
|
||||
const _hoisted_8$1 = { class: "flex flex-col gap-2" };
|
||||
const _hoisted_9$1 = { class: "flex items-center gap-2" };
|
||||
const _hoisted_10$1 = { class: "text-neutral-200" };
|
||||
const _hoisted_11$1 = { class: "pi pi-info-circle" };
|
||||
const _hoisted_12$1 = { class: "flex items-center gap-2" };
|
||||
const _hoisted_13 = { class: "text-neutral-200" };
|
||||
const _hoisted_14 = { class: "pi pi-info-circle" };
|
||||
const _sfc_main$4 = /* @__PURE__ */ defineComponent({
|
||||
__name: "InstallLocationPicker",
|
||||
props: {
|
||||
"installPath": { required: true },
|
||||
"installPathModifiers": {},
|
||||
"pathError": { required: true },
|
||||
"pathErrorModifiers": {}
|
||||
},
|
||||
emits: ["update:installPath", "update:pathError"],
|
||||
setup(__props) {
|
||||
const { t: t2 } = useI18n();
|
||||
const installPath = useModel(__props, "installPath");
|
||||
const pathError = useModel(__props, "pathError");
|
||||
const pathExists = ref(false);
|
||||
const appData = ref("");
|
||||
const appPath = ref("");
|
||||
const electron = electronAPI();
|
||||
onMounted(async () => {
|
||||
const paths = await electron.getSystemPaths();
|
||||
appData.value = paths.appData;
|
||||
appPath.value = paths.appPath;
|
||||
installPath.value = paths.defaultInstallPath;
|
||||
await validatePath(paths.defaultInstallPath);
|
||||
});
|
||||
const validatePath = /* @__PURE__ */ __name(async (path) => {
|
||||
try {
|
||||
pathError.value = "";
|
||||
pathExists.value = false;
|
||||
const validation = await electron.validateInstallPath(path);
|
||||
if (!validation.isValid) {
|
||||
const errors = [];
|
||||
if (validation.cannotWrite) errors.push(t2("install.cannotWrite"));
|
||||
if (validation.freeSpace < validation.requiredSpace) {
|
||||
const requiredGB = validation.requiredSpace / 1024 / 1024 / 1024;
|
||||
errors.push(`${t2("install.insufficientFreeSpace")}: ${requiredGB} GB`);
|
||||
}
|
||||
if (validation.parentMissing) errors.push(t2("install.parentMissing"));
|
||||
if (validation.error)
|
||||
errors.push(`${t2("install.unhandledError")}: ${validation.error}`);
|
||||
pathError.value = errors.join("\n");
|
||||
}
|
||||
if (validation.exists) pathExists.value = true;
|
||||
} catch (error) {
|
||||
pathError.value = t2("install.pathValidationFailed");
|
||||
}
|
||||
}, "validatePath");
|
||||
const browsePath = /* @__PURE__ */ __name(async () => {
|
||||
try {
|
||||
const result = await electron.showDirectoryPicker();
|
||||
if (result) {
|
||||
installPath.value = result;
|
||||
await validatePath(result);
|
||||
}
|
||||
} catch (error) {
|
||||
pathError.value = t2("install.failedToSelectDirectory");
|
||||
}
|
||||
}, "browsePath");
|
||||
return (_ctx, _cache) => {
|
||||
const _directive_tooltip = resolveDirective("tooltip");
|
||||
return openBlock(), createElementBlock("div", _hoisted_1$3, [
|
||||
createBaseVNode("div", _hoisted_2$3, [
|
||||
createBaseVNode("h2", _hoisted_3$3, toDisplayString(_ctx.$t("install.chooseInstallationLocation")), 1),
|
||||
createBaseVNode("p", _hoisted_4$3, toDisplayString(_ctx.$t("install.installLocationDescription")), 1),
|
||||
createBaseVNode("div", _hoisted_5$1, [
|
||||
createVNode(unref(script$6), { class: "flex-1" }, {
|
||||
default: withCtx(() => [
|
||||
createVNode(unref(script$4), {
|
||||
modelValue: installPath.value,
|
||||
"onUpdate:modelValue": [
|
||||
_cache[0] || (_cache[0] = ($event) => installPath.value = $event),
|
||||
validatePath
|
||||
],
|
||||
class: normalizeClass(["w-full", { "p-invalid": pathError.value }])
|
||||
}, null, 8, ["modelValue", "class"]),
|
||||
withDirectives(createVNode(unref(script$5), { class: "pi pi-info-circle" }, null, 512), [
|
||||
[_directive_tooltip, _ctx.$t("install.installLocationTooltip")]
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
}),
|
||||
createVNode(unref(script$7), {
|
||||
icon: "pi pi-folder",
|
||||
onClick: browsePath,
|
||||
class: "w-12"
|
||||
})
|
||||
]),
|
||||
pathError.value ? (openBlock(), createBlock(unref(script$8), {
|
||||
key: 0,
|
||||
severity: "error",
|
||||
class: "whitespace-pre-line"
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createTextVNode(toDisplayString(pathError.value), 1)
|
||||
]),
|
||||
_: 1
|
||||
})) : createCommentVNode("", true),
|
||||
pathExists.value ? (openBlock(), createBlock(unref(script$8), {
|
||||
key: 1,
|
||||
severity: "warn"
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createTextVNode(toDisplayString(_ctx.$t("install.pathExists")), 1)
|
||||
]),
|
||||
_: 1
|
||||
})) : createCommentVNode("", true)
|
||||
]),
|
||||
createBaseVNode("div", _hoisted_6$1, [
|
||||
createBaseVNode("h3", _hoisted_7$1, toDisplayString(_ctx.$t("install.systemLocations")), 1),
|
||||
createBaseVNode("div", _hoisted_8$1, [
|
||||
createBaseVNode("div", _hoisted_9$1, [
|
||||
_cache[1] || (_cache[1] = createBaseVNode("i", { class: "pi pi-folder text-neutral-400" }, null, -1)),
|
||||
_cache[2] || (_cache[2] = createBaseVNode("span", { class: "text-neutral-400" }, "App Data:", -1)),
|
||||
createBaseVNode("span", _hoisted_10$1, toDisplayString(appData.value), 1),
|
||||
withDirectives(createBaseVNode("span", _hoisted_11$1, null, 512), [
|
||||
[_directive_tooltip, _ctx.$t("install.appDataLocationTooltip")]
|
||||
])
|
||||
]),
|
||||
createBaseVNode("div", _hoisted_12$1, [
|
||||
_cache[3] || (_cache[3] = createBaseVNode("i", { class: "pi pi-desktop text-neutral-400" }, null, -1)),
|
||||
_cache[4] || (_cache[4] = createBaseVNode("span", { class: "text-neutral-400" }, "App Path:", -1)),
|
||||
createBaseVNode("span", _hoisted_13, toDisplayString(appPath.value), 1),
|
||||
withDirectives(createBaseVNode("span", _hoisted_14, null, 512), [
|
||||
[_directive_tooltip, _ctx.$t("install.appPathLocationTooltip")]
|
||||
])
|
||||
])
|
||||
])
|
||||
])
|
||||
]);
|
||||
};
|
||||
}
|
||||
});
|
||||
const _hoisted_1$2 = { class: "flex flex-col gap-6 w-[600px]" };
|
||||
const _hoisted_2$2 = { class: "flex flex-col gap-4" };
|
||||
const _hoisted_3$2 = { class: "text-2xl font-semibold text-neutral-100" };
|
||||
const _hoisted_4$2 = { class: "text-neutral-400 my-0" };
|
||||
const _hoisted_5 = { class: "flex gap-2" };
|
||||
const _hoisted_6 = {
|
||||
key: 0,
|
||||
class: "flex flex-col gap-4 bg-neutral-800 p-4 rounded-lg"
|
||||
};
|
||||
const _hoisted_7 = { class: "text-lg mt-0 font-medium text-neutral-100" };
|
||||
const _hoisted_8 = { class: "flex flex-col gap-3" };
|
||||
const _hoisted_9 = ["onClick"];
|
||||
const _hoisted_10 = ["for"];
|
||||
const _hoisted_11 = { class: "text-sm text-neutral-400 my-1" };
|
||||
const _hoisted_12 = {
|
||||
key: 1,
|
||||
class: "text-neutral-400 italic"
|
||||
};
|
||||
const _sfc_main$3 = /* @__PURE__ */ defineComponent({
|
||||
__name: "MigrationPicker",
|
||||
props: {
|
||||
"sourcePath": { required: false },
|
||||
"sourcePathModifiers": {},
|
||||
"migrationItemIds": {
|
||||
required: false
|
||||
},
|
||||
"migrationItemIdsModifiers": {}
|
||||
},
|
||||
emits: ["update:sourcePath", "update:migrationItemIds"],
|
||||
setup(__props) {
|
||||
const { t: t2 } = useI18n();
|
||||
const electron = electronAPI();
|
||||
const sourcePath = useModel(__props, "sourcePath");
|
||||
const migrationItemIds = useModel(__props, "migrationItemIds");
|
||||
const migrationItems = ref(
|
||||
MigrationItems.map((item) => ({
|
||||
...item,
|
||||
selected: true
|
||||
}))
|
||||
);
|
||||
const pathError = ref("");
|
||||
const isValidSource = computed(
|
||||
() => sourcePath.value !== "" && pathError.value === ""
|
||||
);
|
||||
const validateSource = /* @__PURE__ */ __name(async (sourcePath2) => {
|
||||
if (!sourcePath2) {
|
||||
pathError.value = "";
|
||||
return;
|
||||
}
|
||||
try {
|
||||
pathError.value = "";
|
||||
const validation = await electron.validateComfyUISource(sourcePath2);
|
||||
if (!validation.isValid) pathError.value = validation.error;
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
pathError.value = t2("install.pathValidationFailed");
|
||||
}
|
||||
}, "validateSource");
|
||||
const browsePath = /* @__PURE__ */ __name(async () => {
|
||||
try {
|
||||
const result = await electron.showDirectoryPicker();
|
||||
if (result) {
|
||||
sourcePath.value = result;
|
||||
await validateSource(result);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
pathError.value = t2("install.failedToSelectDirectory");
|
||||
}
|
||||
}, "browsePath");
|
||||
watchEffect(() => {
|
||||
migrationItemIds.value = migrationItems.value.filter((item) => item.selected).map((item) => item.id);
|
||||
});
|
||||
return (_ctx, _cache) => {
|
||||
return openBlock(), createElementBlock("div", _hoisted_1$2, [
|
||||
createBaseVNode("div", _hoisted_2$2, [
|
||||
createBaseVNode("h2", _hoisted_3$2, toDisplayString(_ctx.$t("install.migrateFromExistingInstallation")), 1),
|
||||
createBaseVNode("p", _hoisted_4$2, toDisplayString(_ctx.$t("install.migrationSourcePathDescription")), 1),
|
||||
createBaseVNode("div", _hoisted_5, [
|
||||
createVNode(unref(script$4), {
|
||||
modelValue: sourcePath.value,
|
||||
"onUpdate:modelValue": [
|
||||
_cache[0] || (_cache[0] = ($event) => sourcePath.value = $event),
|
||||
validateSource
|
||||
],
|
||||
placeholder: "Select existing ComfyUI installation (optional)",
|
||||
class: normalizeClass(["flex-1", { "p-invalid": pathError.value }])
|
||||
}, null, 8, ["modelValue", "class"]),
|
||||
createVNode(unref(script$7), {
|
||||
icon: "pi pi-folder",
|
||||
onClick: browsePath,
|
||||
class: "w-12"
|
||||
})
|
||||
]),
|
||||
pathError.value ? (openBlock(), createBlock(unref(script$8), {
|
||||
key: 0,
|
||||
severity: "error"
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createTextVNode(toDisplayString(pathError.value), 1)
|
||||
]),
|
||||
_: 1
|
||||
})) : createCommentVNode("", true)
|
||||
]),
|
||||
isValidSource.value ? (openBlock(), createElementBlock("div", _hoisted_6, [
|
||||
createBaseVNode("h3", _hoisted_7, toDisplayString(_ctx.$t("install.selectItemsToMigrate")), 1),
|
||||
createBaseVNode("div", _hoisted_8, [
|
||||
(openBlock(true), createElementBlock(Fragment, null, renderList(migrationItems.value, (item) => {
|
||||
return openBlock(), createElementBlock("div", {
|
||||
key: item.id,
|
||||
class: "flex items-center gap-3 p-2 hover:bg-neutral-700 rounded",
|
||||
onClick: /* @__PURE__ */ __name(($event) => item.selected = !item.selected, "onClick")
|
||||
}, [
|
||||
createVNode(unref(script$9), {
|
||||
modelValue: item.selected,
|
||||
"onUpdate:modelValue": /* @__PURE__ */ __name(($event) => item.selected = $event, "onUpdate:modelValue"),
|
||||
inputId: item.id,
|
||||
binary: true,
|
||||
onClick: _cache[1] || (_cache[1] = withModifiers(() => {
|
||||
}, ["stop"]))
|
||||
}, null, 8, ["modelValue", "onUpdate:modelValue", "inputId"]),
|
||||
createBaseVNode("div", null, [
|
||||
createBaseVNode("label", {
|
||||
for: item.id,
|
||||
class: "text-neutral-200 font-medium"
|
||||
}, toDisplayString(item.label), 9, _hoisted_10),
|
||||
createBaseVNode("p", _hoisted_11, toDisplayString(item.description), 1)
|
||||
])
|
||||
], 8, _hoisted_9);
|
||||
}), 128))
|
||||
])
|
||||
])) : (openBlock(), createElementBlock("div", _hoisted_12, toDisplayString(_ctx.$t("install.migrationOptional")), 1))
|
||||
]);
|
||||
};
|
||||
}
|
||||
});
|
||||
const _hoisted_1$1 = { class: "flex flex-col items-center gap-4" };
|
||||
const _hoisted_2$1 = { class: "w-full" };
|
||||
const _hoisted_3$1 = { class: "text-lg font-medium text-neutral-100" };
|
||||
const _hoisted_4$1 = { class: "text-sm text-neutral-400 mt-1" };
|
||||
const _sfc_main$2 = /* @__PURE__ */ defineComponent({
|
||||
__name: "MirrorItem",
|
||||
props: /* @__PURE__ */ mergeModels({
|
||||
item: {}
|
||||
}, {
|
||||
"modelValue": { required: true },
|
||||
"modelModifiers": {}
|
||||
}),
|
||||
emits: /* @__PURE__ */ mergeModels(["state-change"], ["update:modelValue"]),
|
||||
setup(__props, { emit: __emit }) {
|
||||
const emit = __emit;
|
||||
const modelValue = useModel(__props, "modelValue");
|
||||
const validationState = ref(ValidationState.IDLE);
|
||||
const normalizedSettingId = computed(() => {
|
||||
return normalizeI18nKey(__props.item.settingId);
|
||||
});
|
||||
onMounted(() => {
|
||||
modelValue.value = __props.item.mirror;
|
||||
});
|
||||
watch(validationState, (newState) => {
|
||||
emit("state-change", newState);
|
||||
if (newState === ValidationState.INVALID && modelValue.value === __props.item.mirror) {
|
||||
modelValue.value = __props.item.fallbackMirror;
|
||||
}
|
||||
});
|
||||
return (_ctx, _cache) => {
|
||||
const _component_UrlInput = _sfc_main$7;
|
||||
return openBlock(), createElementBlock("div", _hoisted_1$1, [
|
||||
createBaseVNode("div", _hoisted_2$1, [
|
||||
createBaseVNode("h3", _hoisted_3$1, toDisplayString(_ctx.$t(`settings.${normalizedSettingId.value}.name`)), 1),
|
||||
createBaseVNode("p", _hoisted_4$1, toDisplayString(_ctx.$t(`settings.${normalizedSettingId.value}.tooltip`)), 1)
|
||||
]),
|
||||
createVNode(_component_UrlInput, {
|
||||
modelValue: modelValue.value,
|
||||
"onUpdate:modelValue": _cache[0] || (_cache[0] = ($event) => modelValue.value = $event),
|
||||
"validate-url-fn": /* @__PURE__ */ __name((mirror) => unref(checkMirrorReachable)(mirror + (_ctx.item.validationPathSuffix ?? "")), "validate-url-fn"),
|
||||
onStateChange: _cache[1] || (_cache[1] = ($event) => validationState.value = $event)
|
||||
}, null, 8, ["modelValue", "validate-url-fn"])
|
||||
]);
|
||||
};
|
||||
}
|
||||
});
|
||||
const _sfc_main$1 = /* @__PURE__ */ defineComponent({
|
||||
__name: "MirrorsConfiguration",
|
||||
props: /* @__PURE__ */ mergeModels({
|
||||
device: {}
|
||||
}, {
|
||||
"pythonMirror": { required: true },
|
||||
"pythonMirrorModifiers": {},
|
||||
"pypiMirror": { required: true },
|
||||
"pypiMirrorModifiers": {},
|
||||
"torchMirror": { required: true },
|
||||
"torchMirrorModifiers": {}
|
||||
}),
|
||||
emits: ["update:pythonMirror", "update:pypiMirror", "update:torchMirror"],
|
||||
setup(__props) {
|
||||
const showMirrorInputs = ref(false);
|
||||
const pythonMirror = useModel(__props, "pythonMirror");
|
||||
const pypiMirror = useModel(__props, "pypiMirror");
|
||||
const torchMirror = useModel(__props, "torchMirror");
|
||||
const getTorchMirrorItem = /* @__PURE__ */ __name((device) => {
|
||||
const settingId = "Comfy-Desktop.UV.TorchInstallMirror";
|
||||
switch (device) {
|
||||
case "mps":
|
||||
return {
|
||||
settingId,
|
||||
mirror: NIGHTLY_CPU_TORCH_URL,
|
||||
fallbackMirror: NIGHTLY_CPU_TORCH_URL
|
||||
};
|
||||
case "nvidia":
|
||||
return {
|
||||
settingId,
|
||||
mirror: CUDA_TORCH_URL,
|
||||
fallbackMirror: CUDA_TORCH_URL
|
||||
};
|
||||
case "cpu":
|
||||
default:
|
||||
return {
|
||||
settingId,
|
||||
mirror: PYPI_MIRROR.mirror,
|
||||
fallbackMirror: PYPI_MIRROR.fallbackMirror
|
||||
};
|
||||
}
|
||||
}, "getTorchMirrorItem");
|
||||
const mirrors = computed(() => [
|
||||
[PYTHON_MIRROR, pythonMirror],
|
||||
[PYPI_MIRROR, pypiMirror],
|
||||
[getTorchMirrorItem(__props.device), torchMirror]
|
||||
]);
|
||||
const validationStates = ref(
|
||||
mirrors.value.map(() => ValidationState.IDLE)
|
||||
);
|
||||
const validationState = computed(() => {
|
||||
return mergeValidationStates(validationStates.value);
|
||||
});
|
||||
const validationStateTooltip = computed(() => {
|
||||
switch (validationState.value) {
|
||||
case ValidationState.INVALID:
|
||||
return t("install.settings.mirrorsUnreachable");
|
||||
case ValidationState.VALID:
|
||||
return t("install.settings.mirrorsReachable");
|
||||
default:
|
||||
return t("install.settings.checkingMirrors");
|
||||
}
|
||||
});
|
||||
return (_ctx, _cache) => {
|
||||
const _directive_tooltip = resolveDirective("tooltip");
|
||||
return openBlock(), createBlock(unref(script$a), {
|
||||
header: _ctx.$t("install.settings.mirrorSettings"),
|
||||
toggleable: "",
|
||||
collapsed: !showMirrorInputs.value,
|
||||
"pt:root": "bg-neutral-800 border-none w-[600px]"
|
||||
}, {
|
||||
icons: withCtx(() => [
|
||||
withDirectives(createBaseVNode("i", {
|
||||
class: normalizeClass({
|
||||
"pi pi-spin pi-spinner text-neutral-400": validationState.value === unref(ValidationState).LOADING,
|
||||
"pi pi-check text-green-500": validationState.value === unref(ValidationState).VALID,
|
||||
"pi pi-times text-red-500": validationState.value === unref(ValidationState).INVALID
|
||||
})
|
||||
}, null, 2), [
|
||||
[_directive_tooltip, validationStateTooltip.value]
|
||||
])
|
||||
]),
|
||||
default: withCtx(() => [
|
||||
(openBlock(true), createElementBlock(Fragment, null, renderList(mirrors.value, ([item, modelValue], index) => {
|
||||
return openBlock(), createElementBlock(Fragment, {
|
||||
key: item.settingId + item.mirror
|
||||
}, [
|
||||
index > 0 ? (openBlock(), createBlock(unref(script$1), { key: 0 })) : createCommentVNode("", true),
|
||||
createVNode(_sfc_main$2, {
|
||||
item,
|
||||
modelValue: modelValue.value,
|
||||
"onUpdate:modelValue": /* @__PURE__ */ __name(($event) => modelValue.value = $event, "onUpdate:modelValue"),
|
||||
onStateChange: /* @__PURE__ */ __name(($event) => validationStates.value[index] = $event, "onStateChange")
|
||||
}, null, 8, ["item", "modelValue", "onUpdate:modelValue", "onStateChange"])
|
||||
], 64);
|
||||
}), 128))
|
||||
]),
|
||||
_: 1
|
||||
}, 8, ["header", "collapsed"]);
|
||||
};
|
||||
}
|
||||
});
|
||||
const _hoisted_1 = { class: "flex pt-6 justify-end" };
|
||||
const _hoisted_2 = { class: "flex pt-6 justify-between" };
|
||||
const _hoisted_3 = { class: "flex pt-6 justify-between" };
|
||||
const _hoisted_4 = { class: "flex mt-6 justify-between" };
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
__name: "InstallView",
|
||||
setup(__props) {
|
||||
const device = ref(null);
|
||||
const installPath = ref("");
|
||||
const pathError = ref("");
|
||||
const migrationSourcePath = ref("");
|
||||
const migrationItemIds = ref([]);
|
||||
const autoUpdate = ref(true);
|
||||
const allowMetrics = ref(true);
|
||||
const pythonMirror = ref("");
|
||||
const pypiMirror = ref("");
|
||||
const torchMirror = ref("");
|
||||
const highestStep = ref(0);
|
||||
const handleStepChange = /* @__PURE__ */ __name((value) => {
|
||||
setHighestStep(value);
|
||||
electronAPI().Events.trackEvent("install_stepper_change", {
|
||||
step: value
|
||||
});
|
||||
}, "handleStepChange");
|
||||
const setHighestStep = /* @__PURE__ */ __name((value) => {
|
||||
const int = typeof value === "number" ? value : parseInt(value, 10);
|
||||
if (!isNaN(int) && int > highestStep.value) highestStep.value = int;
|
||||
}, "setHighestStep");
|
||||
const hasError = computed(() => pathError.value !== "");
|
||||
const noGpu = computed(() => typeof device.value !== "string");
|
||||
const electron = electronAPI();
|
||||
const router = useRouter();
|
||||
const install = /* @__PURE__ */ __name(() => {
|
||||
const options = {
|
||||
installPath: installPath.value,
|
||||
autoUpdate: autoUpdate.value,
|
||||
allowMetrics: allowMetrics.value,
|
||||
migrationSourcePath: migrationSourcePath.value,
|
||||
migrationItemIds: toRaw(migrationItemIds.value),
|
||||
pythonMirror: pythonMirror.value,
|
||||
pypiMirror: pypiMirror.value,
|
||||
torchMirror: torchMirror.value,
|
||||
device: device.value
|
||||
};
|
||||
electron.installComfyUI(options);
|
||||
const nextPage = options.device === "unsupported" ? "/manual-configuration" : "/server-start";
|
||||
router.push(nextPage);
|
||||
}, "install");
|
||||
onMounted(async () => {
|
||||
if (!electron) return;
|
||||
const detectedGpu = await electron.Config.getDetectedGpu();
|
||||
if (detectedGpu === "mps" || detectedGpu === "nvidia") {
|
||||
device.value = detectedGpu;
|
||||
}
|
||||
electronAPI().Events.trackEvent("install_stepper_change", {
|
||||
step: "0",
|
||||
gpu: detectedGpu
|
||||
});
|
||||
});
|
||||
return (_ctx, _cache) => {
|
||||
return openBlock(), createBlock(_sfc_main$8, { dark: "" }, {
|
||||
default: withCtx(() => [
|
||||
createVNode(unref(script$f), {
|
||||
class: "h-full p-8 2xl:p-16",
|
||||
value: "0",
|
||||
"onUpdate:value": handleStepChange
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createVNode(unref(script$b), { class: "select-none" }, {
|
||||
default: withCtx(() => [
|
||||
createVNode(unref(script$c), { value: "0" }, {
|
||||
default: withCtx(() => [
|
||||
createTextVNode(toDisplayString(_ctx.$t("install.gpu")), 1)
|
||||
]),
|
||||
_: 1
|
||||
}),
|
||||
createVNode(unref(script$c), {
|
||||
value: "1",
|
||||
disabled: noGpu.value
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createTextVNode(toDisplayString(_ctx.$t("install.installLocation")), 1)
|
||||
]),
|
||||
_: 1
|
||||
}, 8, ["disabled"]),
|
||||
createVNode(unref(script$c), {
|
||||
value: "2",
|
||||
disabled: noGpu.value || hasError.value || highestStep.value < 1
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createTextVNode(toDisplayString(_ctx.$t("install.migration")), 1)
|
||||
]),
|
||||
_: 1
|
||||
}, 8, ["disabled"]),
|
||||
createVNode(unref(script$c), {
|
||||
value: "3",
|
||||
disabled: noGpu.value || hasError.value || highestStep.value < 2
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createTextVNode(toDisplayString(_ctx.$t("install.desktopSettings")), 1)
|
||||
]),
|
||||
_: 1
|
||||
}, 8, ["disabled"])
|
||||
]),
|
||||
_: 1
|
||||
}),
|
||||
createVNode(unref(script$d), null, {
|
||||
default: withCtx(() => [
|
||||
createVNode(unref(script$e), { value: "0" }, {
|
||||
default: withCtx(({ activateCallback }) => [
|
||||
createVNode(GpuPicker, {
|
||||
device: device.value,
|
||||
"onUpdate:device": _cache[0] || (_cache[0] = ($event) => device.value = $event)
|
||||
}, null, 8, ["device"]),
|
||||
createBaseVNode("div", _hoisted_1, [
|
||||
createVNode(unref(script$7), {
|
||||
label: _ctx.$t("g.next"),
|
||||
icon: "pi pi-arrow-right",
|
||||
iconPos: "right",
|
||||
onClick: /* @__PURE__ */ __name(($event) => activateCallback("1"), "onClick"),
|
||||
disabled: typeof device.value !== "string"
|
||||
}, null, 8, ["label", "onClick", "disabled"])
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
}),
|
||||
createVNode(unref(script$e), { value: "1" }, {
|
||||
default: withCtx(({ activateCallback }) => [
|
||||
createVNode(_sfc_main$4, {
|
||||
installPath: installPath.value,
|
||||
"onUpdate:installPath": _cache[1] || (_cache[1] = ($event) => installPath.value = $event),
|
||||
pathError: pathError.value,
|
||||
"onUpdate:pathError": _cache[2] || (_cache[2] = ($event) => pathError.value = $event)
|
||||
}, null, 8, ["installPath", "pathError"]),
|
||||
createBaseVNode("div", _hoisted_2, [
|
||||
createVNode(unref(script$7), {
|
||||
label: _ctx.$t("g.back"),
|
||||
severity: "secondary",
|
||||
icon: "pi pi-arrow-left",
|
||||
onClick: /* @__PURE__ */ __name(($event) => activateCallback("0"), "onClick")
|
||||
}, null, 8, ["label", "onClick"]),
|
||||
createVNode(unref(script$7), {
|
||||
label: _ctx.$t("g.next"),
|
||||
icon: "pi pi-arrow-right",
|
||||
iconPos: "right",
|
||||
onClick: /* @__PURE__ */ __name(($event) => activateCallback("2"), "onClick"),
|
||||
disabled: pathError.value !== ""
|
||||
}, null, 8, ["label", "onClick", "disabled"])
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
}),
|
||||
createVNode(unref(script$e), { value: "2" }, {
|
||||
default: withCtx(({ activateCallback }) => [
|
||||
createVNode(_sfc_main$3, {
|
||||
sourcePath: migrationSourcePath.value,
|
||||
"onUpdate:sourcePath": _cache[3] || (_cache[3] = ($event) => migrationSourcePath.value = $event),
|
||||
migrationItemIds: migrationItemIds.value,
|
||||
"onUpdate:migrationItemIds": _cache[4] || (_cache[4] = ($event) => migrationItemIds.value = $event)
|
||||
}, null, 8, ["sourcePath", "migrationItemIds"]),
|
||||
createBaseVNode("div", _hoisted_3, [
|
||||
createVNode(unref(script$7), {
|
||||
label: _ctx.$t("g.back"),
|
||||
severity: "secondary",
|
||||
icon: "pi pi-arrow-left",
|
||||
onClick: /* @__PURE__ */ __name(($event) => activateCallback("1"), "onClick")
|
||||
}, null, 8, ["label", "onClick"]),
|
||||
createVNode(unref(script$7), {
|
||||
label: _ctx.$t("g.next"),
|
||||
icon: "pi pi-arrow-right",
|
||||
iconPos: "right",
|
||||
onClick: /* @__PURE__ */ __name(($event) => activateCallback("3"), "onClick")
|
||||
}, null, 8, ["label", "onClick"])
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
}),
|
||||
createVNode(unref(script$e), { value: "3" }, {
|
||||
default: withCtx(({ activateCallback }) => [
|
||||
createVNode(_sfc_main$6, {
|
||||
autoUpdate: autoUpdate.value,
|
||||
"onUpdate:autoUpdate": _cache[5] || (_cache[5] = ($event) => autoUpdate.value = $event),
|
||||
allowMetrics: allowMetrics.value,
|
||||
"onUpdate:allowMetrics": _cache[6] || (_cache[6] = ($event) => allowMetrics.value = $event)
|
||||
}, null, 8, ["autoUpdate", "allowMetrics"]),
|
||||
createVNode(_sfc_main$1, {
|
||||
device: device.value,
|
||||
pythonMirror: pythonMirror.value,
|
||||
"onUpdate:pythonMirror": _cache[7] || (_cache[7] = ($event) => pythonMirror.value = $event),
|
||||
pypiMirror: pypiMirror.value,
|
||||
"onUpdate:pypiMirror": _cache[8] || (_cache[8] = ($event) => pypiMirror.value = $event),
|
||||
torchMirror: torchMirror.value,
|
||||
"onUpdate:torchMirror": _cache[9] || (_cache[9] = ($event) => torchMirror.value = $event),
|
||||
class: "mt-6"
|
||||
}, null, 8, ["device", "pythonMirror", "pypiMirror", "torchMirror"]),
|
||||
createBaseVNode("div", _hoisted_4, [
|
||||
createVNode(unref(script$7), {
|
||||
label: _ctx.$t("g.back"),
|
||||
severity: "secondary",
|
||||
icon: "pi pi-arrow-left",
|
||||
onClick: /* @__PURE__ */ __name(($event) => activateCallback("2"), "onClick")
|
||||
}, null, 8, ["label", "onClick"]),
|
||||
createVNode(unref(script$7), {
|
||||
label: _ctx.$t("g.install"),
|
||||
icon: "pi pi-check",
|
||||
iconPos: "right",
|
||||
disabled: hasError.value,
|
||||
onClick: _cache[10] || (_cache[10] = ($event) => install())
|
||||
}, null, 8, ["label", "disabled"])
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
})
|
||||
]),
|
||||
_: 1
|
||||
})
|
||||
]),
|
||||
_: 1
|
||||
})
|
||||
]),
|
||||
_: 1
|
||||
});
|
||||
};
|
||||
}
|
||||
});
|
||||
const InstallView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-cd6731d2"]]);
|
||||
export {
|
||||
InstallView as default
|
||||
};
|
||||
//# sourceMappingURL=InstallView-CVZcZZXJ.js.map
|
||||
32
web/assets/InstallView-CwQdoH-C.css → web/assets/InstallView-DbJ2cGfL.css
generated
vendored
32
web/assets/InstallView-CwQdoH-C.css → web/assets/InstallView-DbJ2cGfL.css
generated
vendored
@@ -1,18 +1,20 @@
|
||||
|
||||
:root {
|
||||
.p-tag[data-v-79125ff6] {
|
||||
--p-tag-gap: 0.5rem;
|
||||
}
|
||||
.hover-brighten {
|
||||
&[data-v-79125ff6] {
|
||||
transition-property: color, background-color, border-color, text-decoration-color, fill, stroke;
|
||||
transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
|
||||
transition-duration: 150ms;
|
||||
transition-property: filter, box-shadow;
|
||||
&:hover {
|
||||
}
|
||||
&[data-v-79125ff6]:hover {
|
||||
filter: brightness(107%) contrast(105%);
|
||||
box-shadow: 0 0 0.25rem #ffffff79;
|
||||
}
|
||||
}
|
||||
.p-accordioncontent-content {
|
||||
.p-accordioncontent-content[data-v-79125ff6] {
|
||||
border-radius: 0.5rem;
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(23 23 23 / var(--tw-bg-opacity));
|
||||
@@ -21,14 +23,14 @@
|
||||
transition-duration: 150ms;
|
||||
}
|
||||
div.selected {
|
||||
.gpu-button:not(.selected) {
|
||||
.gpu-button[data-v-79125ff6]:not(.selected) {
|
||||
opacity: 0.5;
|
||||
}
|
||||
.gpu-button:not(.selected):hover {
|
||||
.gpu-button[data-v-79125ff6]:not(.selected):hover {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
.gpu-button {
|
||||
.gpu-button[data-v-79125ff6] {
|
||||
margin: 0px;
|
||||
display: flex;
|
||||
width: 50%;
|
||||
@@ -43,37 +45,37 @@ div.selected {
|
||||
transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
|
||||
transition-duration: 150ms;
|
||||
}
|
||||
.gpu-button:hover {
|
||||
.gpu-button[data-v-79125ff6]:hover {
|
||||
--tw-bg-opacity: 0.75;
|
||||
}
|
||||
.gpu-button {
|
||||
&.selected {
|
||||
&.selected[data-v-79125ff6] {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(64 64 64 / var(--tw-bg-opacity));
|
||||
}
|
||||
&.selected {
|
||||
&.selected[data-v-79125ff6] {
|
||||
--tw-bg-opacity: 0.5;
|
||||
}
|
||||
&.selected {
|
||||
&.selected[data-v-79125ff6] {
|
||||
opacity: 1;
|
||||
}
|
||||
&.selected:hover {
|
||||
&.selected[data-v-79125ff6]:hover {
|
||||
--tw-bg-opacity: 0.6;
|
||||
}
|
||||
}
|
||||
.disabled {
|
||||
.disabled[data-v-79125ff6] {
|
||||
pointer-events: none;
|
||||
opacity: 0.4;
|
||||
}
|
||||
.p-card-header {
|
||||
.p-card-header[data-v-79125ff6] {
|
||||
flex-grow: 1;
|
||||
text-align: center;
|
||||
}
|
||||
.p-card-body {
|
||||
.p-card-body[data-v-79125ff6] {
|
||||
padding-top: 0px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
[data-v-de33872d] .p-steppanel {
|
||||
[data-v-cd6731d2] .p-steppanel {
|
||||
background-color: transparent
|
||||
}
|
||||
14
web/assets/KeybindingPanel-Dc3C4lG1.js → web/assets/KeybindingPanel-CeHhC2F4.js
generated
vendored
14
web/assets/KeybindingPanel-Dc3C4lG1.js → web/assets/KeybindingPanel-CeHhC2F4.js
generated
vendored
@@ -1,10 +1,9 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { d as defineComponent, c as computed, o as openBlock, f as createElementBlock, F as Fragment, E as renderList, N as createVNode, M as withCtx, aE as createTextVNode, X as toDisplayString, j as unref, aI as script, I as createCommentVNode, ab as ref, cn as FilterMatchMode, a$ as useKeybindingStore, a2 as useCommandStore, a1 as useI18n, af as normalizeI18nKey, w as watchEffect, bs as useToast, r as resolveDirective, k as createBlock, co as SearchBox, H as createBaseVNode, l as script$2, av as script$4, bM as withModifiers, bZ as script$5, aP as script$6, i as withDirectives, cp as _sfc_main$2, aL as pushScopeId, aM as popScopeId, cq as KeyComboImpl, cr as KeybindingImpl, _ as _export_sfc } from "./index-DjNHn37O.js";
|
||||
import { s as script$1, a as script$3 } from "./index-B5F0uxTQ.js";
|
||||
import { u as useKeybindingService } from "./keybindingService-Bx7YdkXn.js";
|
||||
import "./index-B-aVupP5.js";
|
||||
import "./index-5HFeZax4.js";
|
||||
import { d as defineComponent, c as computed, o as openBlock, f as createElementBlock, F as Fragment, D as renderList, k as createVNode, z as withCtx, a7 as createTextVNode, E as toDisplayString, j as unref, a4 as script, B as createCommentVNode, U as ref, dl as FilterMatchMode, an as useKeybindingStore, L as useCommandStore, K as useI18n, Y as normalizeI18nKey, w as watchEffect, aR as useToast, r as resolveDirective, y as createBlock, dm as SearchBox, m as createBaseVNode, l as script$2, bg as script$4, ar as withModifiers, bj as script$5, ab as script$6, i as withDirectives, dn as _sfc_main$2, dp as KeyComboImpl, dq as KeybindingImpl, _ as _export_sfc } from "./index-DqqhYDnY.js";
|
||||
import { g as script$1, h as script$3 } from "./index-BapOFhAR.js";
|
||||
import { u as useKeybindingService } from "./keybindingService-DEgCutrm.js";
|
||||
import "./index-DXE47DZl.js";
|
||||
const _hoisted_1$1 = {
|
||||
key: 0,
|
||||
class: "px-2"
|
||||
@@ -37,7 +36,6 @@ const _sfc_main$1 = /* @__PURE__ */ defineComponent({
|
||||
};
|
||||
}
|
||||
});
|
||||
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-2554ab36"), n = n(), popScopeId(), n), "_withScopeId");
|
||||
const _hoisted_1 = { class: "actions invisible flex flex-row" };
|
||||
const _hoisted_2 = ["title"];
|
||||
const _hoisted_3 = { key: 1 };
|
||||
@@ -248,7 +246,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
severity: "error"
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createTextVNode(" Keybinding already exists on "),
|
||||
_cache[3] || (_cache[3] = createTextVNode(" Keybinding already exists on ")),
|
||||
createVNode(unref(script), {
|
||||
severity: "secondary",
|
||||
value: existingKeybindingOnCombo.value.commandId
|
||||
@@ -281,4 +279,4 @@ const KeybindingPanel = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "d
|
||||
export {
|
||||
KeybindingPanel as default
|
||||
};
|
||||
//# sourceMappingURL=KeybindingPanel-Dc3C4lG1.js.map
|
||||
//# sourceMappingURL=KeybindingPanel-CeHhC2F4.js.map
|
||||
87
web/assets/MaintenanceView-Bj5_Vr6o.css
generated
vendored
Normal file
87
web/assets/MaintenanceView-Bj5_Vr6o.css
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
|
||||
.task-card-ok[data-v-c3bd7658] {
|
||||
|
||||
position: absolute;
|
||||
|
||||
right: -1rem;
|
||||
|
||||
bottom: -1rem;
|
||||
|
||||
grid-column: 1 / -1;
|
||||
|
||||
grid-row: 1 / -1;
|
||||
|
||||
--tw-text-opacity: 1;
|
||||
|
||||
color: rgb(150 206 76 / var(--tw-text-opacity));
|
||||
|
||||
opacity: 1;
|
||||
|
||||
transition-property: opacity;
|
||||
|
||||
transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
|
||||
|
||||
transition-duration: 150ms;
|
||||
|
||||
font-size: 4rem;
|
||||
text-shadow: 0.25rem 0 0.5rem black;
|
||||
z-index: 10;
|
||||
}
|
||||
.p-card {
|
||||
&[data-v-c3bd7658] {
|
||||
|
||||
transition-property: opacity;
|
||||
|
||||
transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
|
||||
|
||||
transition-duration: 150ms;
|
||||
|
||||
--p-card-background: var(--p-button-secondary-background);
|
||||
opacity: 0.9;
|
||||
}
|
||||
&.opacity-65[data-v-c3bd7658] {
|
||||
opacity: 0.4;
|
||||
}
|
||||
&[data-v-c3bd7658]:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
[data-v-c3bd7658] .p-card-header {
|
||||
z-index: 0;
|
||||
}
|
||||
[data-v-c3bd7658] .p-card-body {
|
||||
z-index: 1;
|
||||
flex-grow: 1;
|
||||
justify-content: space-between;
|
||||
}
|
||||
.task-div {
|
||||
> i[data-v-c3bd7658] {
|
||||
pointer-events: none;
|
||||
}
|
||||
&:hover > i[data-v-c3bd7658] {
|
||||
opacity: 0.2;
|
||||
}
|
||||
}
|
||||
|
||||
[data-v-74b78f7d] .p-tag {
|
||||
--p-tag-gap: 0.375rem;
|
||||
}
|
||||
.backspan[data-v-74b78f7d]::before {
|
||||
position: absolute;
|
||||
margin: 0px;
|
||||
color: var(--p-text-muted-color);
|
||||
font-family: 'primeicons';
|
||||
top: -2rem;
|
||||
right: -2rem;
|
||||
speak: none;
|
||||
font-style: normal;
|
||||
font-weight: normal;
|
||||
font-variant: normal;
|
||||
text-transform: none;
|
||||
line-height: 1;
|
||||
display: inline-block;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
opacity: 0.02;
|
||||
font-size: min(14rem, 90vw);
|
||||
z-index: 0;
|
||||
}
|
||||
26033
web/assets/MaintenanceView-Df7CHNWW.js
generated
vendored
Normal file
26033
web/assets/MaintenanceView-Df7CHNWW.js
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
@@ -1,7 +1,7 @@
|
||||
|
||||
:root {
|
||||
.p-tag[data-v-dc169863] {
|
||||
--p-tag-gap: 0.5rem;
|
||||
}
|
||||
.comfy-installer {
|
||||
.comfy-installer[data-v-dc169863] {
|
||||
margin-top: max(1rem, max(0px, calc((100vh - 42rem) * 0.5)));
|
||||
}
|
||||
@@ -1,9 +1,7 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { d as defineComponent, a1 as useI18n, ab as ref, m as onMounted, o as openBlock, k as createBlock, M as withCtx, H as createBaseVNode, X as toDisplayString, N as createVNode, j as unref, aI as script, l as script$2, c0 as electronAPI } from "./index-DjNHn37O.js";
|
||||
import { s as script$1 } from "./index-jXPKy3pP.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BNGF4K22.js";
|
||||
import "./index-5HFeZax4.js";
|
||||
import { d as defineComponent, K as useI18n, U as ref, p as onMounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, a4 as script, a$ as script$1, l as script$2, b5 as electronAPI, _ as _export_sfc } from "./index-DqqhYDnY.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
|
||||
const _hoisted_1 = { class: "comfy-installer grow flex flex-col gap-4 text-neutral-300 max-w-110" };
|
||||
const _hoisted_2 = { class: "text-2xl font-semibold text-neutral-100" };
|
||||
const _hoisted_3 = { class: "m-1 text-neutral-300" };
|
||||
@@ -69,7 +67,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
};
|
||||
}
|
||||
});
|
||||
const ManualConfigurationView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-dc169863"]]);
|
||||
export {
|
||||
_sfc_main as default
|
||||
ManualConfigurationView as default
|
||||
};
|
||||
//# sourceMappingURL=ManualConfigurationView-Bi_qHE-n.js.map
|
||||
//# sourceMappingURL=ManualConfigurationView-Cz0_f_T-.js.map
|
||||
86
web/assets/MetricsConsentView-B5NlgqrS.js
generated
vendored
Normal file
86
web/assets/MetricsConsentView-B5NlgqrS.js
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
|
||||
import { d as defineComponent, aR as useToast, K as useI18n, U as ref, be as useRouter, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, a7 as createTextVNode, k as createVNode, j as unref, bn as script, l as script$1, b5 as electronAPI } from "./index-DqqhYDnY.js";
|
||||
const _hoisted_1 = { class: "h-full p-8 2xl:p-16 flex flex-col items-center justify-center" };
|
||||
const _hoisted_2 = { class: "bg-neutral-800 rounded-lg shadow-lg p-6 w-full max-w-[600px] flex flex-col gap-6" };
|
||||
const _hoisted_3 = { class: "text-3xl font-semibold text-neutral-100" };
|
||||
const _hoisted_4 = { class: "text-neutral-400" };
|
||||
const _hoisted_5 = { class: "text-neutral-400" };
|
||||
const _hoisted_6 = {
|
||||
href: "https://comfy.org/privacy",
|
||||
target: "_blank",
|
||||
class: "text-blue-400 hover:text-blue-300 underline"
|
||||
};
|
||||
const _hoisted_7 = { class: "flex items-center gap-4" };
|
||||
const _hoisted_8 = {
|
||||
id: "metricsDescription",
|
||||
class: "text-neutral-100"
|
||||
};
|
||||
const _hoisted_9 = { class: "flex pt-6 justify-end" };
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
__name: "MetricsConsentView",
|
||||
setup(__props) {
|
||||
const toast = useToast();
|
||||
const { t } = useI18n();
|
||||
const allowMetrics = ref(true);
|
||||
const router = useRouter();
|
||||
const isUpdating = ref(false);
|
||||
const updateConsent = /* @__PURE__ */ __name(async () => {
|
||||
isUpdating.value = true;
|
||||
try {
|
||||
await electronAPI().setMetricsConsent(allowMetrics.value);
|
||||
} catch (error) {
|
||||
toast.add({
|
||||
severity: "error",
|
||||
summary: t("install.errorUpdatingConsent"),
|
||||
detail: t("install.errorUpdatingConsentDetail"),
|
||||
life: 3e3
|
||||
});
|
||||
} finally {
|
||||
isUpdating.value = false;
|
||||
}
|
||||
router.push("/");
|
||||
}, "updateConsent");
|
||||
return (_ctx, _cache) => {
|
||||
const _component_BaseViewTemplate = _sfc_main$1;
|
||||
return openBlock(), createBlock(_component_BaseViewTemplate, { dark: "" }, {
|
||||
default: withCtx(() => [
|
||||
createBaseVNode("div", _hoisted_1, [
|
||||
createBaseVNode("div", _hoisted_2, [
|
||||
createBaseVNode("h2", _hoisted_3, toDisplayString(_ctx.$t("install.helpImprove")), 1),
|
||||
createBaseVNode("p", _hoisted_4, toDisplayString(_ctx.$t("install.updateConsent")), 1),
|
||||
createBaseVNode("p", _hoisted_5, [
|
||||
createTextVNode(toDisplayString(_ctx.$t("install.moreInfo")) + " ", 1),
|
||||
createBaseVNode("a", _hoisted_6, toDisplayString(_ctx.$t("install.privacyPolicy")), 1),
|
||||
_cache[1] || (_cache[1] = createTextVNode(". "))
|
||||
]),
|
||||
createBaseVNode("div", _hoisted_7, [
|
||||
createVNode(unref(script), {
|
||||
modelValue: allowMetrics.value,
|
||||
"onUpdate:modelValue": _cache[0] || (_cache[0] = ($event) => allowMetrics.value = $event),
|
||||
"aria-describedby": "metricsDescription"
|
||||
}, null, 8, ["modelValue"]),
|
||||
createBaseVNode("span", _hoisted_8, toDisplayString(allowMetrics.value ? _ctx.$t("install.metricsEnabled") : _ctx.$t("install.metricsDisabled")), 1)
|
||||
]),
|
||||
createBaseVNode("div", _hoisted_9, [
|
||||
createVNode(unref(script$1), {
|
||||
label: _ctx.$t("g.ok"),
|
||||
icon: "pi pi-check",
|
||||
loading: isUpdating.value,
|
||||
iconPos: "right",
|
||||
onClick: updateConsent
|
||||
}, null, 8, ["label", "loading"])
|
||||
])
|
||||
])
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
});
|
||||
};
|
||||
}
|
||||
});
|
||||
export {
|
||||
_sfc_main as default
|
||||
};
|
||||
//# sourceMappingURL=MetricsConsentView-B5NlgqrS.js.map
|
||||
48
web/assets/NotSupportedView-Drz3x2d-.js → web/assets/NotSupportedView-BUpntA4x.js
generated
vendored
48
web/assets/NotSupportedView-Drz3x2d-.js → web/assets/NotSupportedView-BUpntA4x.js
generated
vendored
@@ -1,21 +1,16 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { d as defineComponent, bW as useRouter, r as resolveDirective, o as openBlock, k as createBlock, M as withCtx, H as createBaseVNode, X as toDisplayString, N as createVNode, j as unref, l as script, i as withDirectives } from "./index-DjNHn37O.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BNGF4K22.js";
|
||||
import { d as defineComponent, be as useRouter, r as resolveDirective, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, l as script, i as withDirectives, _ as _export_sfc } from "./index-DqqhYDnY.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
|
||||
const _imports_0 = "" + new URL("images/sad_girl.png", import.meta.url).href;
|
||||
const _hoisted_1 = { class: "sad-container" };
|
||||
const _hoisted_2 = /* @__PURE__ */ createBaseVNode("img", {
|
||||
class: "sad-girl",
|
||||
src: _imports_0,
|
||||
alt: "Sad girl illustration"
|
||||
}, null, -1);
|
||||
const _hoisted_3 = { class: "no-drag sad-text flex items-center" };
|
||||
const _hoisted_4 = { class: "flex flex-col gap-8 p-8 min-w-110" };
|
||||
const _hoisted_5 = { class: "text-4xl font-bold text-red-500" };
|
||||
const _hoisted_6 = { class: "space-y-4" };
|
||||
const _hoisted_7 = { class: "text-xl" };
|
||||
const _hoisted_8 = { class: "list-disc list-inside space-y-1 text-neutral-800" };
|
||||
const _hoisted_9 = { class: "flex gap-4" };
|
||||
const _hoisted_2 = { class: "no-drag sad-text flex items-center" };
|
||||
const _hoisted_3 = { class: "flex flex-col gap-8 p-8 min-w-110" };
|
||||
const _hoisted_4 = { class: "text-4xl font-bold text-red-500" };
|
||||
const _hoisted_5 = { class: "space-y-4" };
|
||||
const _hoisted_6 = { class: "text-xl" };
|
||||
const _hoisted_7 = { class: "list-disc list-inside space-y-1 text-neutral-800" };
|
||||
const _hoisted_8 = { class: "flex gap-4" };
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
__name: "NotSupportedView",
|
||||
setup(__props) {
|
||||
@@ -37,18 +32,22 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
return openBlock(), createBlock(_sfc_main$1, null, {
|
||||
default: withCtx(() => [
|
||||
createBaseVNode("div", _hoisted_1, [
|
||||
_hoisted_2,
|
||||
createBaseVNode("div", _hoisted_3, [
|
||||
createBaseVNode("div", _hoisted_4, [
|
||||
createBaseVNode("h1", _hoisted_5, toDisplayString(_ctx.$t("notSupported.title")), 1),
|
||||
createBaseVNode("div", _hoisted_6, [
|
||||
createBaseVNode("p", _hoisted_7, toDisplayString(_ctx.$t("notSupported.message")), 1),
|
||||
createBaseVNode("ul", _hoisted_8, [
|
||||
_cache[0] || (_cache[0] = createBaseVNode("img", {
|
||||
class: "sad-girl",
|
||||
src: _imports_0,
|
||||
alt: "Sad girl illustration"
|
||||
}, null, -1)),
|
||||
createBaseVNode("div", _hoisted_2, [
|
||||
createBaseVNode("div", _hoisted_3, [
|
||||
createBaseVNode("h1", _hoisted_4, toDisplayString(_ctx.$t("notSupported.title")), 1),
|
||||
createBaseVNode("div", _hoisted_5, [
|
||||
createBaseVNode("p", _hoisted_6, toDisplayString(_ctx.$t("notSupported.message")), 1),
|
||||
createBaseVNode("ul", _hoisted_7, [
|
||||
createBaseVNode("li", null, toDisplayString(_ctx.$t("notSupported.supportedDevices.macos")), 1),
|
||||
createBaseVNode("li", null, toDisplayString(_ctx.$t("notSupported.supportedDevices.windows")), 1)
|
||||
])
|
||||
]),
|
||||
createBaseVNode("div", _hoisted_9, [
|
||||
createBaseVNode("div", _hoisted_8, [
|
||||
createVNode(unref(script), {
|
||||
label: _ctx.$t("notSupported.learnMore"),
|
||||
icon: "pi pi-github",
|
||||
@@ -80,7 +79,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
};
|
||||
}
|
||||
});
|
||||
const NotSupportedView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-ebb20958"]]);
|
||||
export {
|
||||
_sfc_main as default
|
||||
NotSupportedView as default
|
||||
};
|
||||
//# sourceMappingURL=NotSupportedView-Drz3x2d-.js.map
|
||||
//# sourceMappingURL=NotSupportedView-BUpntA4x.js.map
|
||||
8
web/assets/NotSupportedView-bFzHmqNj.css → web/assets/NotSupportedView-RFx6eCkN.css
generated
vendored
8
web/assets/NotSupportedView-bFzHmqNj.css → web/assets/NotSupportedView-RFx6eCkN.css
generated
vendored
@@ -1,17 +1,19 @@
|
||||
|
||||
.sad-container {
|
||||
&[data-v-ebb20958] {
|
||||
display: grid;
|
||||
align-items: center;
|
||||
justify-content: space-evenly;
|
||||
grid-template-columns: 25rem 1fr;
|
||||
& > * {
|
||||
}
|
||||
&[data-v-ebb20958] > * {
|
||||
grid-row: 1;
|
||||
}
|
||||
}
|
||||
.sad-text {
|
||||
.sad-text[data-v-ebb20958] {
|
||||
grid-column: 1/3;
|
||||
}
|
||||
.sad-girl {
|
||||
.sad-girl[data-v-ebb20958] {
|
||||
grid-column: 2/3;
|
||||
width: min(75vw, 100vh);
|
||||
}
|
||||
28
web/assets/ServerConfigPanel-Be4StJmv.js → web/assets/ServerConfigPanel-B1lI5M9c.js
generated
vendored
28
web/assets/ServerConfigPanel-Be4StJmv.js → web/assets/ServerConfigPanel-B1lI5M9c.js
generated
vendored
@@ -1,25 +1,23 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { H as createBaseVNode, o as openBlock, f as createElementBlock, Z as markRaw, d as defineComponent, a as useSettingStore, aS as storeToRefs, a5 as watch, cO as useCopyToClipboard, a1 as useI18n, k as createBlock, M as withCtx, j as unref, bZ as script, X as toDisplayString, E as renderList, F as Fragment, N as createVNode, l as script$1, I as createCommentVNode, bQ as script$2, cP as FormItem, cp as _sfc_main$1, c0 as electronAPI } from "./index-DjNHn37O.js";
|
||||
import { u as useServerConfigStore } from "./serverConfigStore-CvyKFVuP.js";
|
||||
import { o as openBlock, f as createElementBlock, m as createBaseVNode, H as markRaw, d as defineComponent, a as useSettingStore, ae as storeToRefs, O as watch, dy as useCopyToClipboard, K as useI18n, y as createBlock, z as withCtx, j as unref, bj as script, E as toDisplayString, D as renderList, F as Fragment, k as createVNode, l as script$1, B as createCommentVNode, bh as script$2, dz as FormItem, dn as _sfc_main$1, b5 as electronAPI } from "./index-DqqhYDnY.js";
|
||||
import { u as useServerConfigStore } from "./serverConfigStore-Kb5DJVFt.js";
|
||||
const _hoisted_1$1 = {
|
||||
viewBox: "0 0 24 24",
|
||||
width: "1.2em",
|
||||
height: "1.2em"
|
||||
};
|
||||
const _hoisted_2$1 = /* @__PURE__ */ createBaseVNode("path", {
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
"stroke-linecap": "round",
|
||||
"stroke-linejoin": "round",
|
||||
"stroke-width": "2",
|
||||
d: "m4 17l6-6l-6-6m8 14h8"
|
||||
}, null, -1);
|
||||
const _hoisted_3$1 = [
|
||||
_hoisted_2$1
|
||||
];
|
||||
function render(_ctx, _cache) {
|
||||
return openBlock(), createElementBlock("svg", _hoisted_1$1, [..._hoisted_3$1]);
|
||||
return openBlock(), createElementBlock("svg", _hoisted_1$1, _cache[0] || (_cache[0] = [
|
||||
createBaseVNode("path", {
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
"stroke-linecap": "round",
|
||||
"stroke-linejoin": "round",
|
||||
"stroke-width": "2",
|
||||
d: "m4 17l6-6l-6-6m8 14h8"
|
||||
}, null, -1)
|
||||
]));
|
||||
}
|
||||
__name(render, "render");
|
||||
const __unplugin_components_0 = markRaw({ name: "lucide-terminal", render });
|
||||
@@ -155,4 +153,4 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
export {
|
||||
_sfc_main as default
|
||||
};
|
||||
//# sourceMappingURL=ServerConfigPanel-Be4StJmv.js.map
|
||||
//# sourceMappingURL=ServerConfigPanel-B1lI5M9c.js.map
|
||||
100
web/assets/ServerStartView-BpH4TXPO.js
generated
vendored
Normal file
100
web/assets/ServerStartView-BpH4TXPO.js
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { d as defineComponent, K as useI18n, U as ref, bk as ProgressStatus, p as onMounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, a7 as createTextVNode, E as toDisplayString, j as unref, f as createElementBlock, B as createCommentVNode, k as createVNode, l as script, i as withDirectives, v as vShow, bl as BaseTerminal, b5 as electronAPI, _ as _export_sfc } from "./index-DqqhYDnY.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
|
||||
const _hoisted_1 = { class: "flex flex-col w-full h-full items-center" };
|
||||
const _hoisted_2 = { class: "text-2xl font-bold" };
|
||||
const _hoisted_3 = { key: 0 };
|
||||
const _hoisted_4 = {
|
||||
key: 0,
|
||||
class: "flex flex-col items-center gap-4"
|
||||
};
|
||||
const _hoisted_5 = { class: "flex items-center my-4 gap-2" };
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
__name: "ServerStartView",
|
||||
setup(__props) {
|
||||
const electron = electronAPI();
|
||||
const { t } = useI18n();
|
||||
const status = ref(ProgressStatus.INITIAL_STATE);
|
||||
const electronVersion = ref("");
|
||||
let xterm;
|
||||
const terminalVisible = ref(true);
|
||||
const updateProgress = /* @__PURE__ */ __name(({ status: newStatus }) => {
|
||||
status.value = newStatus;
|
||||
if (newStatus === ProgressStatus.ERROR) terminalVisible.value = false;
|
||||
else xterm?.clear();
|
||||
}, "updateProgress");
|
||||
const terminalCreated = /* @__PURE__ */ __name(({ terminal, useAutoSize }, root) => {
|
||||
xterm = terminal;
|
||||
useAutoSize({ root, autoRows: true, autoCols: true });
|
||||
electron.onLogMessage((message) => {
|
||||
terminal.write(message);
|
||||
});
|
||||
terminal.options.cursorBlink = false;
|
||||
terminal.options.disableStdin = true;
|
||||
terminal.options.cursorInactiveStyle = "block";
|
||||
}, "terminalCreated");
|
||||
const reinstall = /* @__PURE__ */ __name(() => electron.reinstall(), "reinstall");
|
||||
const reportIssue = /* @__PURE__ */ __name(() => {
|
||||
window.open("https://forum.comfy.org/c/v1-feedback/", "_blank");
|
||||
}, "reportIssue");
|
||||
const openLogs = /* @__PURE__ */ __name(() => electron.openLogsFolder(), "openLogs");
|
||||
onMounted(async () => {
|
||||
electron.sendReady();
|
||||
electron.onProgressUpdate(updateProgress);
|
||||
electronVersion.value = await electron.getElectronVersion();
|
||||
});
|
||||
return (_ctx, _cache) => {
|
||||
return openBlock(), createBlock(_sfc_main$1, {
|
||||
dark: "",
|
||||
class: "flex-col"
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createBaseVNode("div", _hoisted_1, [
|
||||
createBaseVNode("h2", _hoisted_2, [
|
||||
createTextVNode(toDisplayString(unref(t)(`serverStart.process.${status.value}`)) + " ", 1),
|
||||
status.value === unref(ProgressStatus).ERROR ? (openBlock(), createElementBlock("span", _hoisted_3, " v" + toDisplayString(electronVersion.value), 1)) : createCommentVNode("", true)
|
||||
]),
|
||||
status.value === unref(ProgressStatus).ERROR ? (openBlock(), createElementBlock("div", _hoisted_4, [
|
||||
createBaseVNode("div", _hoisted_5, [
|
||||
createVNode(unref(script), {
|
||||
icon: "pi pi-flag",
|
||||
severity: "secondary",
|
||||
label: unref(t)("serverStart.reportIssue"),
|
||||
onClick: reportIssue
|
||||
}, null, 8, ["label"]),
|
||||
createVNode(unref(script), {
|
||||
icon: "pi pi-file",
|
||||
severity: "secondary",
|
||||
label: unref(t)("serverStart.openLogs"),
|
||||
onClick: openLogs
|
||||
}, null, 8, ["label"]),
|
||||
createVNode(unref(script), {
|
||||
icon: "pi pi-refresh",
|
||||
label: unref(t)("serverStart.reinstall"),
|
||||
onClick: reinstall
|
||||
}, null, 8, ["label"])
|
||||
]),
|
||||
!terminalVisible.value ? (openBlock(), createBlock(unref(script), {
|
||||
key: 0,
|
||||
icon: "pi pi-search",
|
||||
severity: "secondary",
|
||||
label: unref(t)("serverStart.showTerminal"),
|
||||
onClick: _cache[0] || (_cache[0] = ($event) => terminalVisible.value = true)
|
||||
}, null, 8, ["label"])) : createCommentVNode("", true)
|
||||
])) : createCommentVNode("", true),
|
||||
withDirectives(createVNode(BaseTerminal, { onCreated: terminalCreated }, null, 512), [
|
||||
[vShow, terminalVisible.value]
|
||||
])
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
});
|
||||
};
|
||||
}
|
||||
});
|
||||
const ServerStartView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-4140d62b"]]);
|
||||
export {
|
||||
ServerStartView as default
|
||||
};
|
||||
//# sourceMappingURL=ServerStartView-BpH4TXPO.js.map
|
||||
98
web/assets/ServerStartView-CIDTUh4x.js
generated
vendored
98
web/assets/ServerStartView-CIDTUh4x.js
generated
vendored
@@ -1,98 +0,0 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { d as defineComponent, a1 as useI18n, ab as ref, b_ as ProgressStatus, m as onMounted, o as openBlock, k as createBlock, M as withCtx, H as createBaseVNode, aE as createTextVNode, X as toDisplayString, j as unref, f as createElementBlock, I as createCommentVNode, N as createVNode, l as script, i as withDirectives, v as vShow, b$ as BaseTerminal, aL as pushScopeId, aM as popScopeId, c0 as electronAPI, _ as _export_sfc } from "./index-DjNHn37O.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BNGF4K22.js";
|
||||
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-42c1131d"), n = n(), popScopeId(), n), "_withScopeId");
|
||||
const _hoisted_1 = { class: "text-2xl font-bold" };
|
||||
const _hoisted_2 = { key: 0 };
|
||||
const _hoisted_3 = {
|
||||
key: 0,
|
||||
class: "flex flex-col items-center gap-4"
|
||||
};
|
||||
const _hoisted_4 = { class: "flex items-center my-4 gap-2" };
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
__name: "ServerStartView",
|
||||
setup(__props) {
|
||||
const electron = electronAPI();
|
||||
const { t } = useI18n();
|
||||
const status = ref(ProgressStatus.INITIAL_STATE);
|
||||
const electronVersion = ref("");
|
||||
let xterm;
|
||||
const terminalVisible = ref(true);
|
||||
const updateProgress = /* @__PURE__ */ __name(({ status: newStatus }) => {
|
||||
status.value = newStatus;
|
||||
if (newStatus === ProgressStatus.ERROR) terminalVisible.value = false;
|
||||
else xterm?.clear();
|
||||
}, "updateProgress");
|
||||
const terminalCreated = /* @__PURE__ */ __name(({ terminal, useAutoSize }, root) => {
|
||||
xterm = terminal;
|
||||
useAutoSize(root, true, true);
|
||||
electron.onLogMessage((message) => {
|
||||
terminal.write(message);
|
||||
});
|
||||
terminal.options.cursorBlink = false;
|
||||
terminal.options.disableStdin = true;
|
||||
terminal.options.cursorInactiveStyle = "block";
|
||||
}, "terminalCreated");
|
||||
const reinstall = /* @__PURE__ */ __name(() => electron.reinstall(), "reinstall");
|
||||
const reportIssue = /* @__PURE__ */ __name(() => {
|
||||
window.open("https://forum.comfy.org/c/v1-feedback/", "_blank");
|
||||
}, "reportIssue");
|
||||
const openLogs = /* @__PURE__ */ __name(() => electron.openLogsFolder(), "openLogs");
|
||||
onMounted(async () => {
|
||||
electron.sendReady();
|
||||
electron.onProgressUpdate(updateProgress);
|
||||
electronVersion.value = await electron.getElectronVersion();
|
||||
});
|
||||
return (_ctx, _cache) => {
|
||||
return openBlock(), createBlock(_sfc_main$1, {
|
||||
dark: "",
|
||||
class: "flex-col"
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createBaseVNode("h2", _hoisted_1, [
|
||||
createTextVNode(toDisplayString(unref(t)(`serverStart.process.${status.value}`)) + " ", 1),
|
||||
status.value === unref(ProgressStatus).ERROR ? (openBlock(), createElementBlock("span", _hoisted_2, " v" + toDisplayString(electronVersion.value), 1)) : createCommentVNode("", true)
|
||||
]),
|
||||
status.value === unref(ProgressStatus).ERROR ? (openBlock(), createElementBlock("div", _hoisted_3, [
|
||||
createBaseVNode("div", _hoisted_4, [
|
||||
createVNode(unref(script), {
|
||||
icon: "pi pi-flag",
|
||||
severity: "secondary",
|
||||
label: unref(t)("serverStart.reportIssue"),
|
||||
onClick: reportIssue
|
||||
}, null, 8, ["label"]),
|
||||
createVNode(unref(script), {
|
||||
icon: "pi pi-file",
|
||||
severity: "secondary",
|
||||
label: unref(t)("serverStart.openLogs"),
|
||||
onClick: openLogs
|
||||
}, null, 8, ["label"]),
|
||||
createVNode(unref(script), {
|
||||
icon: "pi pi-refresh",
|
||||
label: unref(t)("serverStart.reinstall"),
|
||||
onClick: reinstall
|
||||
}, null, 8, ["label"])
|
||||
]),
|
||||
!terminalVisible.value ? (openBlock(), createBlock(unref(script), {
|
||||
key: 0,
|
||||
icon: "pi pi-search",
|
||||
severity: "secondary",
|
||||
label: unref(t)("serverStart.showTerminal"),
|
||||
onClick: _cache[0] || (_cache[0] = ($event) => terminalVisible.value = true)
|
||||
}, null, 8, ["label"])) : createCommentVNode("", true)
|
||||
])) : createCommentVNode("", true),
|
||||
withDirectives(createVNode(BaseTerminal, { onCreated: terminalCreated }, null, 512), [
|
||||
[vShow, terminalVisible.value]
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
});
|
||||
};
|
||||
}
|
||||
});
|
||||
const ServerStartView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-42c1131d"]]);
|
||||
export {
|
||||
ServerStartView as default
|
||||
};
|
||||
//# sourceMappingURL=ServerStartView-CIDTUh4x.js.map
|
||||
2
web/assets/ServerStartView-CnyN4Ib6.css → web/assets/ServerStartView-CJiwVDQY.css
generated
vendored
2
web/assets/ServerStartView-CnyN4Ib6.css → web/assets/ServerStartView-CJiwVDQY.css
generated
vendored
@@ -1,5 +1,5 @@
|
||||
|
||||
[data-v-42c1131d] .xterm-helper-textarea {
|
||||
[data-v-4140d62b] .xterm-helper-textarea {
|
||||
/* Hide this as it moves all over when uv is running */
|
||||
display: none;
|
||||
}
|
||||
33
web/assets/UserSelectView-B3jYchWu.js → web/assets/UserSelectView-wxa07xPk.js
generated
vendored
33
web/assets/UserSelectView-B3jYchWu.js → web/assets/UserSelectView-wxa07xPk.js
generated
vendored
@@ -1,18 +1,17 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { d as defineComponent, aX as useUserStore, bW as useRouter, ab as ref, c as computed, m as onMounted, o as openBlock, k as createBlock, M as withCtx, H as createBaseVNode, X as toDisplayString, N as createVNode, bX as withKeys, j as unref, av as script, bQ as script$1, bY as script$2, bZ as script$3, aE as createTextVNode, I as createCommentVNode, l as script$4 } from "./index-DjNHn37O.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BNGF4K22.js";
|
||||
import { d as defineComponent, aj as useUserStore, be as useRouter, U as ref, c as computed, p as onMounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, bf as withKeys, j as unref, bg as script, bh as script$1, bi as script$2, bj as script$3, a7 as createTextVNode, B as createCommentVNode, l as script$4 } from "./index-DqqhYDnY.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
|
||||
const _hoisted_1 = {
|
||||
id: "comfy-user-selection",
|
||||
class: "min-w-84 relative rounded-lg bg-[var(--comfy-menu-bg)] p-5 px-10 shadow-lg"
|
||||
};
|
||||
const _hoisted_2 = /* @__PURE__ */ createBaseVNode("h1", { class: "my-2.5 mb-7 font-normal" }, "ComfyUI", -1);
|
||||
const _hoisted_3 = { class: "flex w-full flex-col items-center" };
|
||||
const _hoisted_4 = { class: "flex w-full flex-col gap-2" };
|
||||
const _hoisted_5 = { for: "new-user-input" };
|
||||
const _hoisted_6 = { class: "flex w-full flex-col gap-2" };
|
||||
const _hoisted_7 = { for: "existing-user-select" };
|
||||
const _hoisted_8 = { class: "mt-5" };
|
||||
const _hoisted_2 = { class: "flex w-full flex-col items-center" };
|
||||
const _hoisted_3 = { class: "flex w-full flex-col gap-2" };
|
||||
const _hoisted_4 = { for: "new-user-input" };
|
||||
const _hoisted_5 = { class: "flex w-full flex-col gap-2" };
|
||||
const _hoisted_6 = { for: "existing-user-select" };
|
||||
const _hoisted_7 = { class: "mt-5" };
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
__name: "UserSelectView",
|
||||
setup(__props) {
|
||||
@@ -47,10 +46,10 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
return openBlock(), createBlock(_sfc_main$1, { dark: "" }, {
|
||||
default: withCtx(() => [
|
||||
createBaseVNode("main", _hoisted_1, [
|
||||
_hoisted_2,
|
||||
createBaseVNode("div", _hoisted_3, [
|
||||
createBaseVNode("div", _hoisted_4, [
|
||||
createBaseVNode("label", _hoisted_5, toDisplayString(_ctx.$t("userSelect.newUser")) + ":", 1),
|
||||
_cache[2] || (_cache[2] = createBaseVNode("h1", { class: "my-2.5 mb-7 font-normal" }, "ComfyUI", -1)),
|
||||
createBaseVNode("div", _hoisted_2, [
|
||||
createBaseVNode("div", _hoisted_3, [
|
||||
createBaseVNode("label", _hoisted_4, toDisplayString(_ctx.$t("userSelect.newUser")) + ":", 1),
|
||||
createVNode(unref(script), {
|
||||
id: "new-user-input",
|
||||
modelValue: newUsername.value,
|
||||
@@ -60,8 +59,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
}, null, 8, ["modelValue", "placeholder"])
|
||||
]),
|
||||
createVNode(unref(script$1)),
|
||||
createBaseVNode("div", _hoisted_6, [
|
||||
createBaseVNode("label", _hoisted_7, toDisplayString(_ctx.$t("userSelect.existingUser")) + ":", 1),
|
||||
createBaseVNode("div", _hoisted_5, [
|
||||
createBaseVNode("label", _hoisted_6, toDisplayString(_ctx.$t("userSelect.existingUser")) + ":", 1),
|
||||
createVNode(unref(script$2), {
|
||||
modelValue: selectedUser.value,
|
||||
"onUpdate:modelValue": _cache[1] || (_cache[1] = ($event) => selectedUser.value = $event),
|
||||
@@ -82,7 +81,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
_: 1
|
||||
})) : createCommentVNode("", true)
|
||||
]),
|
||||
createBaseVNode("footer", _hoisted_8, [
|
||||
createBaseVNode("footer", _hoisted_7, [
|
||||
createVNode(unref(script$4), {
|
||||
label: _ctx.$t("userSelect.next"),
|
||||
onClick: login
|
||||
@@ -99,4 +98,4 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
export {
|
||||
_sfc_main as default
|
||||
};
|
||||
//# sourceMappingURL=UserSelectView-B3jYchWu.js.map
|
||||
//# sourceMappingURL=UserSelectView-wxa07xPk.js.map
|
||||
7
web/assets/WelcomeView-N0ZXLjdi.js → web/assets/WelcomeView-BrXELNIm.js
generated
vendored
7
web/assets/WelcomeView-N0ZXLjdi.js → web/assets/WelcomeView-BrXELNIm.js
generated
vendored
@@ -1,8 +1,7 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { d as defineComponent, bW as useRouter, o as openBlock, k as createBlock, M as withCtx, H as createBaseVNode, X as toDisplayString, N as createVNode, j as unref, l as script, aL as pushScopeId, aM as popScopeId, _ as _export_sfc } from "./index-DjNHn37O.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BNGF4K22.js";
|
||||
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-7dfaf74c"), n = n(), popScopeId(), n), "_withScopeId");
|
||||
import { d as defineComponent, be as useRouter, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, l as script, _ as _export_sfc } from "./index-DqqhYDnY.js";
|
||||
import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
|
||||
const _hoisted_1 = { class: "flex flex-col items-center justify-center gap-8 p-8" };
|
||||
const _hoisted_2 = { class: "animated-gradient-text text-glow select-none" };
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
@@ -37,4 +36,4 @@ const WelcomeView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-
|
||||
export {
|
||||
WelcomeView as default
|
||||
};
|
||||
//# sourceMappingURL=WelcomeView-N0ZXLjdi.js.map
|
||||
//# sourceMappingURL=WelcomeView-BrXELNIm.js.map
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user