Joey Ballentine
8 months ago
140 changed files with 10092 additions and 2544 deletions
@ -1,3 +0,0 @@ |
|||||||
..\python_embeded\python.exe .\update.py ..\ComfyUI\ |
|
||||||
..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cu121 -r ../ComfyUI/requirements.txt pygit2 |
|
||||||
pause |
|
@ -1,2 +0,0 @@ |
|||||||
.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build --use-pytorch-cross-attention |
|
||||||
pause |
|
@ -1,2 +1,8 @@ |
|||||||
|
@echo off |
||||||
..\python_embeded\python.exe .\update.py ..\ComfyUI\ |
..\python_embeded\python.exe .\update.py ..\ComfyUI\ |
||||||
pause |
if exist update_new.py ( |
||||||
|
move /y update_new.py update.py |
||||||
|
echo Running updater again since it got updated. |
||||||
|
..\python_embeded\python.exe .\update.py ..\ComfyUI\ --skip_self_update |
||||||
|
) |
||||||
|
if "%~1"=="" pause |
||||||
|
@ -1,3 +0,0 @@ |
|||||||
..\python_embeded\python.exe .\update.py ..\ComfyUI\ |
|
||||||
..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 |
|
||||||
pause |
|
@ -1,11 +0,0 @@ |
|||||||
@echo off |
|
||||||
..\python_embeded\python.exe .\update.py ..\ComfyUI\ |
|
||||||
echo |
|
||||||
echo This will try to update pytorch and all python dependencies, if you get an error wait for pytorch/xformers to fix their stuff |
|
||||||
echo You should not be running this anyways unless you really have to |
|
||||||
echo |
|
||||||
echo If you just want to update normally, close this and run update_comfyui.bat instead. |
|
||||||
echo |
|
||||||
pause |
|
||||||
..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 |
|
||||||
pause |
|
@ -1,71 +0,0 @@ |
|||||||
name: "Windows Release cu118 dependencies" |
|
||||||
|
|
||||||
on: |
|
||||||
workflow_dispatch: |
|
||||||
# push: |
|
||||||
# branches: |
|
||||||
# - master |
|
||||||
|
|
||||||
jobs: |
|
||||||
build_dependencies: |
|
||||||
env: |
|
||||||
# you need at least cuda 5.0 for some of the stuff compiled here. |
|
||||||
TORCH_CUDA_ARCH_LIST: "5.0+PTX 6.0 6.1 7.0 7.5 8.0 8.6 8.9" |
|
||||||
FORCE_CUDA: 1 |
|
||||||
MAX_JOBS: 1 # will crash otherwise |
|
||||||
DISTUTILS_USE_SDK: 1 # otherwise distutils will complain on windows about multiple versions of msvc |
|
||||||
XFORMERS_BUILD_TYPE: "Release" |
|
||||||
runs-on: windows-latest |
|
||||||
steps: |
|
||||||
- name: Cache Built Dependencies |
|
||||||
uses: actions/cache@v3 |
|
||||||
id: cache-cu118_python_stuff |
|
||||||
with: |
|
||||||
path: cu118_python_deps.tar |
|
||||||
key: ${{ runner.os }}-build-cu118 |
|
||||||
|
|
||||||
- if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true' |
|
||||||
uses: actions/checkout@v3 |
|
||||||
|
|
||||||
- if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true' |
|
||||||
uses: actions/setup-python@v4 |
|
||||||
with: |
|
||||||
python-version: '3.10.9' |
|
||||||
|
|
||||||
- if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true' |
|
||||||
uses: comfyanonymous/cuda-toolkit@test |
|
||||||
id: cuda-toolkit |
|
||||||
with: |
|
||||||
cuda: '11.8.0' |
|
||||||
# copied from xformers github |
|
||||||
- name: Setup MSVC |
|
||||||
uses: ilammy/msvc-dev-cmd@v1 |
|
||||||
- name: Configure Pagefile |
|
||||||
# windows runners will OOM with many CUDA architectures |
|
||||||
# we cheat here with a page file |
|
||||||
uses: al-cheb/configure-pagefile-action@v1.3 |
|
||||||
with: |
|
||||||
minimum-size: 2GB |
|
||||||
# really unfortunate: https://github.com/ilammy/msvc-dev-cmd#name-conflicts-with-shell-bash |
|
||||||
- name: Remove link.exe |
|
||||||
shell: bash |
|
||||||
run: rm /usr/bin/link |
|
||||||
|
|
||||||
- if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true' |
|
||||||
shell: bash |
|
||||||
run: | |
|
||||||
python -m pip wheel --no-cache-dir torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir |
|
||||||
python -m pip install --no-cache-dir ./temp_wheel_dir/* |
|
||||||
echo installed basic |
|
||||||
git clone --recurse-submodules https://github.com/facebookresearch/xformers.git |
|
||||||
cd xformers |
|
||||||
python -m pip install --no-cache-dir wheel setuptools twine |
|
||||||
echo building xformers |
|
||||||
python setup.py bdist_wheel -d ../temp_wheel_dir/ |
|
||||||
cd .. |
|
||||||
rm -rf xformers |
|
||||||
ls -lah temp_wheel_dir |
|
||||||
mv temp_wheel_dir cu118_python_deps |
|
||||||
tar cf cu118_python_deps.tar cu118_python_deps |
|
||||||
|
|
||||||
|
|
@ -1,37 +0,0 @@ |
|||||||
name: "Windows Release cu118 dependencies 2" |
|
||||||
|
|
||||||
on: |
|
||||||
workflow_dispatch: |
|
||||||
inputs: |
|
||||||
xformers: |
|
||||||
description: 'xformers version' |
|
||||||
required: true |
|
||||||
type: string |
|
||||||
default: "xformers" |
|
||||||
|
|
||||||
# push: |
|
||||||
# branches: |
|
||||||
# - master |
|
||||||
|
|
||||||
jobs: |
|
||||||
build_dependencies: |
|
||||||
runs-on: windows-latest |
|
||||||
steps: |
|
||||||
- uses: actions/checkout@v3 |
|
||||||
- uses: actions/setup-python@v4 |
|
||||||
with: |
|
||||||
python-version: '3.10.9' |
|
||||||
|
|
||||||
- shell: bash |
|
||||||
run: | |
|
||||||
python -m pip wheel --no-cache-dir torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir |
|
||||||
python -m pip install --no-cache-dir ./temp_wheel_dir/* |
|
||||||
echo installed basic |
|
||||||
ls -lah temp_wheel_dir |
|
||||||
mv temp_wheel_dir cu118_python_deps |
|
||||||
tar cf cu118_python_deps.tar cu118_python_deps |
|
||||||
|
|
||||||
- uses: actions/cache/save@v3 |
|
||||||
with: |
|
||||||
path: cu118_python_deps.tar |
|
||||||
key: ${{ runner.os }}-build-cu118 |
|
@ -1,79 +0,0 @@ |
|||||||
name: "Windows Release cu118 packaging" |
|
||||||
|
|
||||||
on: |
|
||||||
workflow_dispatch: |
|
||||||
# push: |
|
||||||
# branches: |
|
||||||
# - master |
|
||||||
|
|
||||||
jobs: |
|
||||||
package_comfyui: |
|
||||||
permissions: |
|
||||||
contents: "write" |
|
||||||
packages: "write" |
|
||||||
pull-requests: "read" |
|
||||||
runs-on: windows-latest |
|
||||||
steps: |
|
||||||
- uses: actions/cache/restore@v3 |
|
||||||
id: cache |
|
||||||
with: |
|
||||||
path: cu118_python_deps.tar |
|
||||||
key: ${{ runner.os }}-build-cu118 |
|
||||||
- shell: bash |
|
||||||
run: | |
|
||||||
mv cu118_python_deps.tar ../ |
|
||||||
cd .. |
|
||||||
tar xf cu118_python_deps.tar |
|
||||||
pwd |
|
||||||
ls |
|
||||||
|
|
||||||
- uses: actions/checkout@v3 |
|
||||||
with: |
|
||||||
fetch-depth: 0 |
|
||||||
persist-credentials: false |
|
||||||
- shell: bash |
|
||||||
run: | |
|
||||||
cd .. |
|
||||||
cp -r ComfyUI ComfyUI_copy |
|
||||||
curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip |
|
||||||
unzip python_embeded.zip -d python_embeded |
|
||||||
cd python_embeded |
|
||||||
echo 'import site' >> ./python310._pth |
|
||||||
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py |
|
||||||
./python.exe get-pip.py |
|
||||||
./python.exe -s -m pip install ../cu118_python_deps/* |
|
||||||
sed -i '1i../ComfyUI' ./python310._pth |
|
||||||
cd .. |
|
||||||
|
|
||||||
git clone https://github.com/comfyanonymous/taesd |
|
||||||
cp taesd/*.pth ./ComfyUI_copy/models/vae_approx/ |
|
||||||
|
|
||||||
mkdir ComfyUI_windows_portable |
|
||||||
mv python_embeded ComfyUI_windows_portable |
|
||||||
mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI |
|
||||||
|
|
||||||
cd ComfyUI_windows_portable |
|
||||||
|
|
||||||
mkdir update |
|
||||||
cp -r ComfyUI/.ci/update_windows/* ./update/ |
|
||||||
cp -r ComfyUI/.ci/update_windows_cu118/* ./update/ |
|
||||||
cp -r ComfyUI/.ci/windows_base_files/* ./ |
|
||||||
|
|
||||||
cd .. |
|
||||||
|
|
||||||
"C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable |
|
||||||
mv ComfyUI_windows_portable.7z ComfyUI/new_ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z |
|
||||||
|
|
||||||
cd ComfyUI_windows_portable |
|
||||||
python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu |
|
||||||
|
|
||||||
ls |
|
||||||
|
|
||||||
- name: Upload binaries to release |
|
||||||
uses: svenstaro/upload-release-action@v2 |
|
||||||
with: |
|
||||||
repo_token: ${{ secrets.GITHUB_TOKEN }} |
|
||||||
file: new_ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z |
|
||||||
tag: "latest" |
|
||||||
overwrite: true |
|
||||||
|
|
@ -1,9 +0,0 @@ |
|||||||
{ |
|
||||||
"path-intellisense.mappings": { |
|
||||||
"../": "${workspaceFolder}/web/extensions/core" |
|
||||||
}, |
|
||||||
"[python]": { |
|
||||||
"editor.defaultFormatter": "ms-python.autopep8" |
|
||||||
}, |
|
||||||
"python.formatting.provider": "none" |
|
||||||
} |
|
@ -0,0 +1,54 @@ |
|||||||
|
import os |
||||||
|
import json |
||||||
|
from aiohttp import web |
||||||
|
|
||||||
|
|
||||||
|
class AppSettings(): |
||||||
|
def __init__(self, user_manager): |
||||||
|
self.user_manager = user_manager |
||||||
|
|
||||||
|
def get_settings(self, request): |
||||||
|
file = self.user_manager.get_request_user_filepath( |
||||||
|
request, "comfy.settings.json") |
||||||
|
if os.path.isfile(file): |
||||||
|
with open(file) as f: |
||||||
|
return json.load(f) |
||||||
|
else: |
||||||
|
return {} |
||||||
|
|
||||||
|
def save_settings(self, request, settings): |
||||||
|
file = self.user_manager.get_request_user_filepath( |
||||||
|
request, "comfy.settings.json") |
||||||
|
with open(file, "w") as f: |
||||||
|
f.write(json.dumps(settings, indent=4)) |
||||||
|
|
||||||
|
def add_routes(self, routes): |
||||||
|
@routes.get("/settings") |
||||||
|
async def get_settings(request): |
||||||
|
return web.json_response(self.get_settings(request)) |
||||||
|
|
||||||
|
@routes.get("/settings/{id}") |
||||||
|
async def get_setting(request): |
||||||
|
value = None |
||||||
|
settings = self.get_settings(request) |
||||||
|
setting_id = request.match_info.get("id", None) |
||||||
|
if setting_id and setting_id in settings: |
||||||
|
value = settings[setting_id] |
||||||
|
return web.json_response(value) |
||||||
|
|
||||||
|
@routes.post("/settings") |
||||||
|
async def post_settings(request): |
||||||
|
settings = self.get_settings(request) |
||||||
|
new_settings = await request.json() |
||||||
|
self.save_settings(request, {**settings, **new_settings}) |
||||||
|
return web.Response(status=200) |
||||||
|
|
||||||
|
@routes.post("/settings/{id}") |
||||||
|
async def post_setting(request): |
||||||
|
setting_id = request.match_info.get("id", None) |
||||||
|
if not setting_id: |
||||||
|
return web.Response(status=400) |
||||||
|
settings = self.get_settings(request) |
||||||
|
settings[setting_id] = await request.json() |
||||||
|
self.save_settings(request, settings) |
||||||
|
return web.Response(status=200) |
@ -0,0 +1,140 @@ |
|||||||
|
import json |
||||||
|
import os |
||||||
|
import re |
||||||
|
import uuid |
||||||
|
from aiohttp import web |
||||||
|
from comfy.cli_args import args |
||||||
|
from folder_paths import user_directory |
||||||
|
from .app_settings import AppSettings |
||||||
|
|
||||||
|
default_user = "default" |
||||||
|
users_file = os.path.join(user_directory, "users.json") |
||||||
|
|
||||||
|
|
||||||
|
class UserManager(): |
||||||
|
def __init__(self): |
||||||
|
global user_directory |
||||||
|
|
||||||
|
self.settings = AppSettings(self) |
||||||
|
if not os.path.exists(user_directory): |
||||||
|
os.mkdir(user_directory) |
||||||
|
if not args.multi_user: |
||||||
|
print("****** User settings have been changed to be stored on the server instead of browser storage. ******") |
||||||
|
print("****** For multi-user setups add the --multi-user CLI argument to enable multiple user profiles. ******") |
||||||
|
|
||||||
|
if args.multi_user: |
||||||
|
if os.path.isfile(users_file): |
||||||
|
with open(users_file) as f: |
||||||
|
self.users = json.load(f) |
||||||
|
else: |
||||||
|
self.users = {} |
||||||
|
else: |
||||||
|
self.users = {"default": "default"} |
||||||
|
|
||||||
|
def get_request_user_id(self, request): |
||||||
|
user = "default" |
||||||
|
if args.multi_user and "comfy-user" in request.headers: |
||||||
|
user = request.headers["comfy-user"] |
||||||
|
|
||||||
|
if user not in self.users: |
||||||
|
raise KeyError("Unknown user: " + user) |
||||||
|
|
||||||
|
return user |
||||||
|
|
||||||
|
def get_request_user_filepath(self, request, file, type="userdata", create_dir=True): |
||||||
|
global user_directory |
||||||
|
|
||||||
|
if type == "userdata": |
||||||
|
root_dir = user_directory |
||||||
|
else: |
||||||
|
raise KeyError("Unknown filepath type:" + type) |
||||||
|
|
||||||
|
user = self.get_request_user_id(request) |
||||||
|
path = user_root = os.path.abspath(os.path.join(root_dir, user)) |
||||||
|
|
||||||
|
# prevent leaving /{type} |
||||||
|
if os.path.commonpath((root_dir, user_root)) != root_dir: |
||||||
|
return None |
||||||
|
|
||||||
|
parent = user_root |
||||||
|
|
||||||
|
if file is not None: |
||||||
|
# prevent leaving /{type}/{user} |
||||||
|
path = os.path.abspath(os.path.join(user_root, file)) |
||||||
|
if os.path.commonpath((user_root, path)) != user_root: |
||||||
|
return None |
||||||
|
|
||||||
|
if create_dir and not os.path.exists(parent): |
||||||
|
os.mkdir(parent) |
||||||
|
|
||||||
|
return path |
||||||
|
|
||||||
|
def add_user(self, name): |
||||||
|
name = name.strip() |
||||||
|
if not name: |
||||||
|
raise ValueError("username not provided") |
||||||
|
user_id = re.sub("[^a-zA-Z0-9-_]+", '-', name) |
||||||
|
user_id = user_id + "_" + str(uuid.uuid4()) |
||||||
|
|
||||||
|
self.users[user_id] = name |
||||||
|
|
||||||
|
global users_file |
||||||
|
with open(users_file, "w") as f: |
||||||
|
json.dump(self.users, f) |
||||||
|
|
||||||
|
return user_id |
||||||
|
|
||||||
|
def add_routes(self, routes): |
||||||
|
self.settings.add_routes(routes) |
||||||
|
|
||||||
|
@routes.get("/users") |
||||||
|
async def get_users(request): |
||||||
|
if args.multi_user: |
||||||
|
return web.json_response({"storage": "server", "users": self.users}) |
||||||
|
else: |
||||||
|
user_dir = self.get_request_user_filepath(request, None, create_dir=False) |
||||||
|
return web.json_response({ |
||||||
|
"storage": "server", |
||||||
|
"migrated": os.path.exists(user_dir) |
||||||
|
}) |
||||||
|
|
||||||
|
@routes.post("/users") |
||||||
|
async def post_users(request): |
||||||
|
body = await request.json() |
||||||
|
username = body["username"] |
||||||
|
if username in self.users.values(): |
||||||
|
return web.json_response({"error": "Duplicate username."}, status=400) |
||||||
|
|
||||||
|
user_id = self.add_user(username) |
||||||
|
return web.json_response(user_id) |
||||||
|
|
||||||
|
@routes.get("/userdata/{file}") |
||||||
|
async def getuserdata(request): |
||||||
|
file = request.match_info.get("file", None) |
||||||
|
if not file: |
||||||
|
return web.Response(status=400) |
||||||
|
|
||||||
|
path = self.get_request_user_filepath(request, file) |
||||||
|
if not path: |
||||||
|
return web.Response(status=403) |
||||||
|
|
||||||
|
if not os.path.exists(path): |
||||||
|
return web.Response(status=404) |
||||||
|
|
||||||
|
return web.FileResponse(path) |
||||||
|
|
||||||
|
@routes.post("/userdata/{file}") |
||||||
|
async def post_userdata(request): |
||||||
|
file = request.match_info.get("file", None) |
||||||
|
if not file: |
||||||
|
return web.Response(status=400) |
||||||
|
|
||||||
|
path = self.get_request_user_filepath(request, file) |
||||||
|
if not path: |
||||||
|
return web.Response(status=403) |
||||||
|
|
||||||
|
body = await request.read() |
||||||
|
with open(path, "wb") as f: |
||||||
|
f.write(body) |
||||||
|
|
||||||
|
return web.Response(status=200) |
@ -0,0 +1,194 @@ |
|||||||
|
import torch |
||||||
|
from comfy.ldm.modules.attention import optimized_attention_for_device |
||||||
|
|
||||||
|
class CLIPAttention(torch.nn.Module): |
||||||
|
def __init__(self, embed_dim, heads, dtype, device, operations): |
||||||
|
super().__init__() |
||||||
|
|
||||||
|
self.heads = heads |
||||||
|
self.q_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) |
||||||
|
self.k_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) |
||||||
|
self.v_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) |
||||||
|
|
||||||
|
self.out_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) |
||||||
|
|
||||||
|
def forward(self, x, mask=None, optimized_attention=None): |
||||||
|
q = self.q_proj(x) |
||||||
|
k = self.k_proj(x) |
||||||
|
v = self.v_proj(x) |
||||||
|
|
||||||
|
out = optimized_attention(q, k, v, self.heads, mask) |
||||||
|
return self.out_proj(out) |
||||||
|
|
||||||
|
ACTIVATIONS = {"quick_gelu": lambda a: a * torch.sigmoid(1.702 * a), |
||||||
|
"gelu": torch.nn.functional.gelu, |
||||||
|
} |
||||||
|
|
||||||
|
class CLIPMLP(torch.nn.Module): |
||||||
|
def __init__(self, embed_dim, intermediate_size, activation, dtype, device, operations): |
||||||
|
super().__init__() |
||||||
|
self.fc1 = operations.Linear(embed_dim, intermediate_size, bias=True, dtype=dtype, device=device) |
||||||
|
self.activation = ACTIVATIONS[activation] |
||||||
|
self.fc2 = operations.Linear(intermediate_size, embed_dim, bias=True, dtype=dtype, device=device) |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
x = self.fc1(x) |
||||||
|
x = self.activation(x) |
||||||
|
x = self.fc2(x) |
||||||
|
return x |
||||||
|
|
||||||
|
class CLIPLayer(torch.nn.Module): |
||||||
|
def __init__(self, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations): |
||||||
|
super().__init__() |
||||||
|
self.layer_norm1 = operations.LayerNorm(embed_dim, dtype=dtype, device=device) |
||||||
|
self.self_attn = CLIPAttention(embed_dim, heads, dtype, device, operations) |
||||||
|
self.layer_norm2 = operations.LayerNorm(embed_dim, dtype=dtype, device=device) |
||||||
|
self.mlp = CLIPMLP(embed_dim, intermediate_size, intermediate_activation, dtype, device, operations) |
||||||
|
|
||||||
|
def forward(self, x, mask=None, optimized_attention=None): |
||||||
|
x += self.self_attn(self.layer_norm1(x), mask, optimized_attention) |
||||||
|
x += self.mlp(self.layer_norm2(x)) |
||||||
|
return x |
||||||
|
|
||||||
|
|
||||||
|
class CLIPEncoder(torch.nn.Module): |
||||||
|
def __init__(self, num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations): |
||||||
|
super().__init__() |
||||||
|
self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) for i in range(num_layers)]) |
||||||
|
|
||||||
|
def forward(self, x, mask=None, intermediate_output=None): |
||||||
|
optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None, small_input=True) |
||||||
|
|
||||||
|
if intermediate_output is not None: |
||||||
|
if intermediate_output < 0: |
||||||
|
intermediate_output = len(self.layers) + intermediate_output |
||||||
|
|
||||||
|
intermediate = None |
||||||
|
for i, l in enumerate(self.layers): |
||||||
|
x = l(x, mask, optimized_attention) |
||||||
|
if i == intermediate_output: |
||||||
|
intermediate = x.clone() |
||||||
|
return x, intermediate |
||||||
|
|
||||||
|
class CLIPEmbeddings(torch.nn.Module): |
||||||
|
def __init__(self, embed_dim, vocab_size=49408, num_positions=77, dtype=None, device=None): |
||||||
|
super().__init__() |
||||||
|
self.token_embedding = torch.nn.Embedding(vocab_size, embed_dim, dtype=dtype, device=device) |
||||||
|
self.position_embedding = torch.nn.Embedding(num_positions, embed_dim, dtype=dtype, device=device) |
||||||
|
|
||||||
|
def forward(self, input_tokens): |
||||||
|
return self.token_embedding(input_tokens) + self.position_embedding.weight |
||||||
|
|
||||||
|
|
||||||
|
class CLIPTextModel_(torch.nn.Module): |
||||||
|
def __init__(self, config_dict, dtype, device, operations): |
||||||
|
num_layers = config_dict["num_hidden_layers"] |
||||||
|
embed_dim = config_dict["hidden_size"] |
||||||
|
heads = config_dict["num_attention_heads"] |
||||||
|
intermediate_size = config_dict["intermediate_size"] |
||||||
|
intermediate_activation = config_dict["hidden_act"] |
||||||
|
|
||||||
|
super().__init__() |
||||||
|
self.embeddings = CLIPEmbeddings(embed_dim, dtype=torch.float32, device=device) |
||||||
|
self.encoder = CLIPEncoder(num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) |
||||||
|
self.final_layer_norm = operations.LayerNorm(embed_dim, dtype=dtype, device=device) |
||||||
|
|
||||||
|
def forward(self, input_tokens, attention_mask=None, intermediate_output=None, final_layer_norm_intermediate=True): |
||||||
|
x = self.embeddings(input_tokens) |
||||||
|
mask = None |
||||||
|
if attention_mask is not None: |
||||||
|
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]) |
||||||
|
mask = mask.masked_fill(mask.to(torch.bool), float("-inf")) |
||||||
|
|
||||||
|
causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1) |
||||||
|
if mask is not None: |
||||||
|
mask += causal_mask |
||||||
|
else: |
||||||
|
mask = causal_mask |
||||||
|
|
||||||
|
x, i = self.encoder(x, mask=mask, intermediate_output=intermediate_output) |
||||||
|
x = self.final_layer_norm(x) |
||||||
|
if i is not None and final_layer_norm_intermediate: |
||||||
|
i = self.final_layer_norm(i) |
||||||
|
|
||||||
|
pooled_output = x[torch.arange(x.shape[0], device=x.device), input_tokens.to(dtype=torch.int, device=x.device).argmax(dim=-1),] |
||||||
|
return x, i, pooled_output |
||||||
|
|
||||||
|
class CLIPTextModel(torch.nn.Module): |
||||||
|
def __init__(self, config_dict, dtype, device, operations): |
||||||
|
super().__init__() |
||||||
|
self.num_layers = config_dict["num_hidden_layers"] |
||||||
|
self.text_model = CLIPTextModel_(config_dict, dtype, device, operations) |
||||||
|
embed_dim = config_dict["hidden_size"] |
||||||
|
self.text_projection = operations.Linear(embed_dim, embed_dim, bias=False, dtype=dtype, device=device) |
||||||
|
self.text_projection.weight.copy_(torch.eye(embed_dim)) |
||||||
|
self.dtype = dtype |
||||||
|
|
||||||
|
def get_input_embeddings(self): |
||||||
|
return self.text_model.embeddings.token_embedding |
||||||
|
|
||||||
|
def set_input_embeddings(self, embeddings): |
||||||
|
self.text_model.embeddings.token_embedding = embeddings |
||||||
|
|
||||||
|
def forward(self, *args, **kwargs): |
||||||
|
x = self.text_model(*args, **kwargs) |
||||||
|
out = self.text_projection(x[2]) |
||||||
|
return (x[0], x[1], out, x[2]) |
||||||
|
|
||||||
|
|
||||||
|
class CLIPVisionEmbeddings(torch.nn.Module): |
||||||
|
def __init__(self, embed_dim, num_channels=3, patch_size=14, image_size=224, dtype=None, device=None, operations=None): |
||||||
|
super().__init__() |
||||||
|
self.class_embedding = torch.nn.Parameter(torch.empty(embed_dim, dtype=dtype, device=device)) |
||||||
|
|
||||||
|
self.patch_embedding = operations.Conv2d( |
||||||
|
in_channels=num_channels, |
||||||
|
out_channels=embed_dim, |
||||||
|
kernel_size=patch_size, |
||||||
|
stride=patch_size, |
||||||
|
bias=False, |
||||||
|
dtype=dtype, |
||||||
|
device=device |
||||||
|
) |
||||||
|
|
||||||
|
num_patches = (image_size // patch_size) ** 2 |
||||||
|
num_positions = num_patches + 1 |
||||||
|
self.position_embedding = torch.nn.Embedding(num_positions, embed_dim, dtype=dtype, device=device) |
||||||
|
|
||||||
|
def forward(self, pixel_values): |
||||||
|
embeds = self.patch_embedding(pixel_values).flatten(2).transpose(1, 2) |
||||||
|
return torch.cat([self.class_embedding.to(embeds.device).expand(pixel_values.shape[0], 1, -1), embeds], dim=1) + self.position_embedding.weight.to(embeds.device) |
||||||
|
|
||||||
|
|
||||||
|
class CLIPVision(torch.nn.Module): |
||||||
|
def __init__(self, config_dict, dtype, device, operations): |
||||||
|
super().__init__() |
||||||
|
num_layers = config_dict["num_hidden_layers"] |
||||||
|
embed_dim = config_dict["hidden_size"] |
||||||
|
heads = config_dict["num_attention_heads"] |
||||||
|
intermediate_size = config_dict["intermediate_size"] |
||||||
|
intermediate_activation = config_dict["hidden_act"] |
||||||
|
|
||||||
|
self.embeddings = CLIPVisionEmbeddings(embed_dim, config_dict["num_channels"], config_dict["patch_size"], config_dict["image_size"], dtype=torch.float32, device=device, operations=operations) |
||||||
|
self.pre_layrnorm = operations.LayerNorm(embed_dim) |
||||||
|
self.encoder = CLIPEncoder(num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) |
||||||
|
self.post_layernorm = operations.LayerNorm(embed_dim) |
||||||
|
|
||||||
|
def forward(self, pixel_values, attention_mask=None, intermediate_output=None): |
||||||
|
x = self.embeddings(pixel_values) |
||||||
|
x = self.pre_layrnorm(x) |
||||||
|
#TODO: attention_mask? |
||||||
|
x, i = self.encoder(x, mask=None, intermediate_output=intermediate_output) |
||||||
|
pooled_output = self.post_layernorm(x[:, 0, :]) |
||||||
|
return x, i, pooled_output |
||||||
|
|
||||||
|
class CLIPVisionModelProjection(torch.nn.Module): |
||||||
|
def __init__(self, config_dict, dtype, device, operations): |
||||||
|
super().__init__() |
||||||
|
self.vision_model = CLIPVision(config_dict, dtype, device, operations) |
||||||
|
self.visual_projection = operations.Linear(config_dict["hidden_size"], config_dict["projection_dim"], bias=False) |
||||||
|
|
||||||
|
def forward(self, *args, **kwargs): |
||||||
|
x = self.vision_model(*args, **kwargs) |
||||||
|
out = self.visual_projection(x[2]) |
||||||
|
return (x[0], x[1], out) |
@ -0,0 +1,161 @@ |
|||||||
|
""" |
||||||
|
This file is part of ComfyUI. |
||||||
|
Copyright (C) 2024 Stability AI |
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify |
||||||
|
it under the terms of the GNU General Public License as published by |
||||||
|
the Free Software Foundation, either version 3 of the License, or |
||||||
|
(at your option) any later version. |
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful, |
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||||
|
GNU General Public License for more details. |
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License |
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>. |
||||||
|
""" |
||||||
|
|
||||||
|
import torch |
||||||
|
import torch.nn as nn |
||||||
|
from comfy.ldm.modules.attention import optimized_attention |
||||||
|
|
||||||
|
class Linear(torch.nn.Linear): |
||||||
|
def reset_parameters(self): |
||||||
|
return None |
||||||
|
|
||||||
|
class Conv2d(torch.nn.Conv2d): |
||||||
|
def reset_parameters(self): |
||||||
|
return None |
||||||
|
|
||||||
|
class OptimizedAttention(nn.Module): |
||||||
|
def __init__(self, c, nhead, dropout=0.0, dtype=None, device=None, operations=None): |
||||||
|
super().__init__() |
||||||
|
self.heads = nhead |
||||||
|
|
||||||
|
self.to_q = operations.Linear(c, c, bias=True, dtype=dtype, device=device) |
||||||
|
self.to_k = operations.Linear(c, c, bias=True, dtype=dtype, device=device) |
||||||
|
self.to_v = operations.Linear(c, c, bias=True, dtype=dtype, device=device) |
||||||
|
|
||||||
|
self.out_proj = operations.Linear(c, c, bias=True, dtype=dtype, device=device) |
||||||
|
|
||||||
|
def forward(self, q, k, v): |
||||||
|
q = self.to_q(q) |
||||||
|
k = self.to_k(k) |
||||||
|
v = self.to_v(v) |
||||||
|
|
||||||
|
out = optimized_attention(q, k, v, self.heads) |
||||||
|
|
||||||
|
return self.out_proj(out) |
||||||
|
|
||||||
|
class Attention2D(nn.Module): |
||||||
|
def __init__(self, c, nhead, dropout=0.0, dtype=None, device=None, operations=None): |
||||||
|
super().__init__() |
||||||
|
self.attn = OptimizedAttention(c, nhead, dtype=dtype, device=device, operations=operations) |
||||||
|
# self.attn = nn.MultiheadAttention(c, nhead, dropout=dropout, bias=True, batch_first=True, dtype=dtype, device=device) |
||||||
|
|
||||||
|
def forward(self, x, kv, self_attn=False): |
||||||
|
orig_shape = x.shape |
||||||
|
x = x.view(x.size(0), x.size(1), -1).permute(0, 2, 1) # Bx4xHxW -> Bx(HxW)x4 |
||||||
|
if self_attn: |
||||||
|
kv = torch.cat([x, kv], dim=1) |
||||||
|
# x = self.attn(x, kv, kv, need_weights=False)[0] |
||||||
|
x = self.attn(x, kv, kv) |
||||||
|
x = x.permute(0, 2, 1).view(*orig_shape) |
||||||
|
return x |
||||||
|
|
||||||
|
|
||||||
|
def LayerNorm2d_op(operations): |
||||||
|
class LayerNorm2d(operations.LayerNorm): |
||||||
|
def __init__(self, *args, **kwargs): |
||||||
|
super().__init__(*args, **kwargs) |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
return super().forward(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) |
||||||
|
return LayerNorm2d |
||||||
|
|
||||||
|
class GlobalResponseNorm(nn.Module): |
||||||
|
"from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105" |
||||||
|
def __init__(self, dim, dtype=None, device=None): |
||||||
|
super().__init__() |
||||||
|
self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim, dtype=dtype, device=device)) |
||||||
|
self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim, dtype=dtype, device=device)) |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
Gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True) |
||||||
|
Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6) |
||||||
|
return self.gamma.to(device=x.device, dtype=x.dtype) * (x * Nx) + self.beta.to(device=x.device, dtype=x.dtype) + x |
||||||
|
|
||||||
|
|
||||||
|
class ResBlock(nn.Module): |
||||||
|
def __init__(self, c, c_skip=0, kernel_size=3, dropout=0.0, dtype=None, device=None, operations=None): # , num_heads=4, expansion=2): |
||||||
|
super().__init__() |
||||||
|
self.depthwise = operations.Conv2d(c, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c, dtype=dtype, device=device) |
||||||
|
# self.depthwise = SAMBlock(c, num_heads, expansion) |
||||||
|
self.norm = LayerNorm2d_op(operations)(c, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) |
||||||
|
self.channelwise = nn.Sequential( |
||||||
|
operations.Linear(c + c_skip, c * 4, dtype=dtype, device=device), |
||||||
|
nn.GELU(), |
||||||
|
GlobalResponseNorm(c * 4, dtype=dtype, device=device), |
||||||
|
nn.Dropout(dropout), |
||||||
|
operations.Linear(c * 4, c, dtype=dtype, device=device) |
||||||
|
) |
||||||
|
|
||||||
|
def forward(self, x, x_skip=None): |
||||||
|
x_res = x |
||||||
|
x = self.norm(self.depthwise(x)) |
||||||
|
if x_skip is not None: |
||||||
|
x = torch.cat([x, x_skip], dim=1) |
||||||
|
x = self.channelwise(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) |
||||||
|
return x + x_res |
||||||
|
|
||||||
|
|
||||||
|
class AttnBlock(nn.Module): |
||||||
|
def __init__(self, c, c_cond, nhead, self_attn=True, dropout=0.0, dtype=None, device=None, operations=None): |
||||||
|
super().__init__() |
||||||
|
self.self_attn = self_attn |
||||||
|
self.norm = LayerNorm2d_op(operations)(c, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) |
||||||
|
self.attention = Attention2D(c, nhead, dropout, dtype=dtype, device=device, operations=operations) |
||||||
|
self.kv_mapper = nn.Sequential( |
||||||
|
nn.SiLU(), |
||||||
|
operations.Linear(c_cond, c, dtype=dtype, device=device) |
||||||
|
) |
||||||
|
|
||||||
|
def forward(self, x, kv): |
||||||
|
kv = self.kv_mapper(kv) |
||||||
|
x = x + self.attention(self.norm(x), kv, self_attn=self.self_attn) |
||||||
|
return x |
||||||
|
|
||||||
|
|
||||||
|
class FeedForwardBlock(nn.Module): |
||||||
|
def __init__(self, c, dropout=0.0, dtype=None, device=None, operations=None): |
||||||
|
super().__init__() |
||||||
|
self.norm = LayerNorm2d_op(operations)(c, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) |
||||||
|
self.channelwise = nn.Sequential( |
||||||
|
operations.Linear(c, c * 4, dtype=dtype, device=device), |
||||||
|
nn.GELU(), |
||||||
|
GlobalResponseNorm(c * 4, dtype=dtype, device=device), |
||||||
|
nn.Dropout(dropout), |
||||||
|
operations.Linear(c * 4, c, dtype=dtype, device=device) |
||||||
|
) |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
x = x + self.channelwise(self.norm(x).permute(0, 2, 3, 1)).permute(0, 3, 1, 2) |
||||||
|
return x |
||||||
|
|
||||||
|
|
||||||
|
class TimestepBlock(nn.Module): |
||||||
|
def __init__(self, c, c_timestep, conds=['sca'], dtype=None, device=None, operations=None): |
||||||
|
super().__init__() |
||||||
|
self.mapper = operations.Linear(c_timestep, c * 2, dtype=dtype, device=device) |
||||||
|
self.conds = conds |
||||||
|
for cname in conds: |
||||||
|
setattr(self, f"mapper_{cname}", operations.Linear(c_timestep, c * 2, dtype=dtype, device=device)) |
||||||
|
|
||||||
|
def forward(self, x, t): |
||||||
|
t = t.chunk(len(self.conds) + 1, dim=1) |
||||||
|
a, b = self.mapper(t[0])[:, :, None, None].chunk(2, dim=1) |
||||||
|
for i, c in enumerate(self.conds): |
||||||
|
ac, bc = getattr(self, f"mapper_{c}")(t[i + 1])[:, :, None, None].chunk(2, dim=1) |
||||||
|
a, b = a + ac, b + bc |
||||||
|
return x * (1 + a) + b |
@ -0,0 +1,93 @@ |
|||||||
|
""" |
||||||
|
This file is part of ComfyUI. |
||||||
|
Copyright (C) 2024 Stability AI |
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify |
||||||
|
it under the terms of the GNU General Public License as published by |
||||||
|
the Free Software Foundation, either version 3 of the License, or |
||||||
|
(at your option) any later version. |
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful, |
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||||
|
GNU General Public License for more details. |
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License |
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>. |
||||||
|
""" |
||||||
|
|
||||||
|
import torch |
||||||
|
import torchvision |
||||||
|
from torch import nn |
||||||
|
from .common import LayerNorm2d_op |
||||||
|
|
||||||
|
|
||||||
|
class CNetResBlock(nn.Module): |
||||||
|
def __init__(self, c, dtype=None, device=None, operations=None): |
||||||
|
super().__init__() |
||||||
|
self.blocks = nn.Sequential( |
||||||
|
LayerNorm2d_op(operations)(c, dtype=dtype, device=device), |
||||||
|
nn.GELU(), |
||||||
|
operations.Conv2d(c, c, kernel_size=3, padding=1), |
||||||
|
LayerNorm2d_op(operations)(c, dtype=dtype, device=device), |
||||||
|
nn.GELU(), |
||||||
|
operations.Conv2d(c, c, kernel_size=3, padding=1), |
||||||
|
) |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
return x + self.blocks(x) |
||||||
|
|
||||||
|
|
||||||
|
class ControlNet(nn.Module): |
||||||
|
def __init__(self, c_in=3, c_proj=2048, proj_blocks=None, bottleneck_mode=None, dtype=None, device=None, operations=nn): |
||||||
|
super().__init__() |
||||||
|
if bottleneck_mode is None: |
||||||
|
bottleneck_mode = 'effnet' |
||||||
|
self.proj_blocks = proj_blocks |
||||||
|
if bottleneck_mode == 'effnet': |
||||||
|
embd_channels = 1280 |
||||||
|
self.backbone = torchvision.models.efficientnet_v2_s().features.eval() |
||||||
|
if c_in != 3: |
||||||
|
in_weights = self.backbone[0][0].weight.data |
||||||
|
self.backbone[0][0] = operations.Conv2d(c_in, 24, kernel_size=3, stride=2, bias=False, dtype=dtype, device=device) |
||||||
|
if c_in > 3: |
||||||
|
# nn.init.constant_(self.backbone[0][0].weight, 0) |
||||||
|
self.backbone[0][0].weight.data[:, :3] = in_weights[:, :3].clone() |
||||||
|
else: |
||||||
|
self.backbone[0][0].weight.data = in_weights[:, :c_in].clone() |
||||||
|
elif bottleneck_mode == 'simple': |
||||||
|
embd_channels = c_in |
||||||
|
self.backbone = nn.Sequential( |
||||||
|
operations.Conv2d(embd_channels, embd_channels * 4, kernel_size=3, padding=1, dtype=dtype, device=device), |
||||||
|
nn.LeakyReLU(0.2, inplace=True), |
||||||
|
operations.Conv2d(embd_channels * 4, embd_channels, kernel_size=3, padding=1, dtype=dtype, device=device), |
||||||
|
) |
||||||
|
elif bottleneck_mode == 'large': |
||||||
|
self.backbone = nn.Sequential( |
||||||
|
operations.Conv2d(c_in, 4096 * 4, kernel_size=1, dtype=dtype, device=device), |
||||||
|
nn.LeakyReLU(0.2, inplace=True), |
||||||
|
operations.Conv2d(4096 * 4, 1024, kernel_size=1, dtype=dtype, device=device), |
||||||
|
*[CNetResBlock(1024, dtype=dtype, device=device, operations=operations) for _ in range(8)], |
||||||
|
operations.Conv2d(1024, 1280, kernel_size=1, dtype=dtype, device=device), |
||||||
|
) |
||||||
|
embd_channels = 1280 |
||||||
|
else: |
||||||
|
raise ValueError(f'Unknown bottleneck mode: {bottleneck_mode}') |
||||||
|
self.projections = nn.ModuleList() |
||||||
|
for _ in range(len(proj_blocks)): |
||||||
|
self.projections.append(nn.Sequential( |
||||||
|
operations.Conv2d(embd_channels, embd_channels, kernel_size=1, bias=False, dtype=dtype, device=device), |
||||||
|
nn.LeakyReLU(0.2, inplace=True), |
||||||
|
operations.Conv2d(embd_channels, c_proj, kernel_size=1, bias=False, dtype=dtype, device=device), |
||||||
|
)) |
||||||
|
# nn.init.constant_(self.projections[-1][-1].weight, 0) # zero output projection |
||||||
|
self.xl = False |
||||||
|
self.input_channels = c_in |
||||||
|
self.unshuffle_amount = 8 |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
x = self.backbone(x) |
||||||
|
proj_outputs = [None for _ in range(max(self.proj_blocks) + 1)] |
||||||
|
for i, idx in enumerate(self.proj_blocks): |
||||||
|
proj_outputs[idx] = self.projections[i](x) |
||||||
|
return proj_outputs |
@ -0,0 +1,255 @@ |
|||||||
|
""" |
||||||
|
This file is part of ComfyUI. |
||||||
|
Copyright (C) 2024 Stability AI |
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify |
||||||
|
it under the terms of the GNU General Public License as published by |
||||||
|
the Free Software Foundation, either version 3 of the License, or |
||||||
|
(at your option) any later version. |
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful, |
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||||
|
GNU General Public License for more details. |
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License |
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>. |
||||||
|
""" |
||||||
|
|
||||||
|
import torch |
||||||
|
from torch import nn |
||||||
|
from torch.autograd import Function |
||||||
|
|
||||||
|
class vector_quantize(Function): |
||||||
|
@staticmethod |
||||||
|
def forward(ctx, x, codebook): |
||||||
|
with torch.no_grad(): |
||||||
|
codebook_sqr = torch.sum(codebook ** 2, dim=1) |
||||||
|
x_sqr = torch.sum(x ** 2, dim=1, keepdim=True) |
||||||
|
|
||||||
|
dist = torch.addmm(codebook_sqr + x_sqr, x, codebook.t(), alpha=-2.0, beta=1.0) |
||||||
|
_, indices = dist.min(dim=1) |
||||||
|
|
||||||
|
ctx.save_for_backward(indices, codebook) |
||||||
|
ctx.mark_non_differentiable(indices) |
||||||
|
|
||||||
|
nn = torch.index_select(codebook, 0, indices) |
||||||
|
return nn, indices |
||||||
|
|
||||||
|
@staticmethod |
||||||
|
def backward(ctx, grad_output, grad_indices): |
||||||
|
grad_inputs, grad_codebook = None, None |
||||||
|
|
||||||
|
if ctx.needs_input_grad[0]: |
||||||
|
grad_inputs = grad_output.clone() |
||||||
|
if ctx.needs_input_grad[1]: |
||||||
|
# Gradient wrt. the codebook |
||||||
|
indices, codebook = ctx.saved_tensors |
||||||
|
|
||||||
|
grad_codebook = torch.zeros_like(codebook) |
||||||
|
grad_codebook.index_add_(0, indices, grad_output) |
||||||
|
|
||||||
|
return (grad_inputs, grad_codebook) |
||||||
|
|
||||||
|
|
||||||
|
class VectorQuantize(nn.Module): |
||||||
|
def __init__(self, embedding_size, k, ema_decay=0.99, ema_loss=False): |
||||||
|
""" |
||||||
|
Takes an input of variable size (as long as the last dimension matches the embedding size). |
||||||
|
Returns one tensor containing the nearest neigbour embeddings to each of the inputs, |
||||||
|
with the same size as the input, vq and commitment components for the loss as a touple |
||||||
|
in the second output and the indices of the quantized vectors in the third: |
||||||
|
quantized, (vq_loss, commit_loss), indices |
||||||
|
""" |
||||||
|
super(VectorQuantize, self).__init__() |
||||||
|
|
||||||
|
self.codebook = nn.Embedding(k, embedding_size) |
||||||
|
self.codebook.weight.data.uniform_(-1./k, 1./k) |
||||||
|
self.vq = vector_quantize.apply |
||||||
|
|
||||||
|
self.ema_decay = ema_decay |
||||||
|
self.ema_loss = ema_loss |
||||||
|
if ema_loss: |
||||||
|
self.register_buffer('ema_element_count', torch.ones(k)) |
||||||
|
self.register_buffer('ema_weight_sum', torch.zeros_like(self.codebook.weight)) |
||||||
|
|
||||||
|
def _laplace_smoothing(self, x, epsilon): |
||||||
|
n = torch.sum(x) |
||||||
|
return ((x + epsilon) / (n + x.size(0) * epsilon) * n) |
||||||
|
|
||||||
|
def _updateEMA(self, z_e_x, indices): |
||||||
|
mask = nn.functional.one_hot(indices, self.ema_element_count.size(0)).float() |
||||||
|
elem_count = mask.sum(dim=0) |
||||||
|
weight_sum = torch.mm(mask.t(), z_e_x) |
||||||
|
|
||||||
|
self.ema_element_count = (self.ema_decay * self.ema_element_count) + ((1-self.ema_decay) * elem_count) |
||||||
|
self.ema_element_count = self._laplace_smoothing(self.ema_element_count, 1e-5) |
||||||
|
self.ema_weight_sum = (self.ema_decay * self.ema_weight_sum) + ((1-self.ema_decay) * weight_sum) |
||||||
|
|
||||||
|
self.codebook.weight.data = self.ema_weight_sum / self.ema_element_count.unsqueeze(-1) |
||||||
|
|
||||||
|
def idx2vq(self, idx, dim=-1): |
||||||
|
q_idx = self.codebook(idx) |
||||||
|
if dim != -1: |
||||||
|
q_idx = q_idx.movedim(-1, dim) |
||||||
|
return q_idx |
||||||
|
|
||||||
|
def forward(self, x, get_losses=True, dim=-1): |
||||||
|
if dim != -1: |
||||||
|
x = x.movedim(dim, -1) |
||||||
|
z_e_x = x.contiguous().view(-1, x.size(-1)) if len(x.shape) > 2 else x |
||||||
|
z_q_x, indices = self.vq(z_e_x, self.codebook.weight.detach()) |
||||||
|
vq_loss, commit_loss = None, None |
||||||
|
if self.ema_loss and self.training: |
||||||
|
self._updateEMA(z_e_x.detach(), indices.detach()) |
||||||
|
# pick the graded embeddings after updating the codebook in order to have a more accurate commitment loss |
||||||
|
z_q_x_grd = torch.index_select(self.codebook.weight, dim=0, index=indices) |
||||||
|
if get_losses: |
||||||
|
vq_loss = (z_q_x_grd - z_e_x.detach()).pow(2).mean() |
||||||
|
commit_loss = (z_e_x - z_q_x_grd.detach()).pow(2).mean() |
||||||
|
|
||||||
|
z_q_x = z_q_x.view(x.shape) |
||||||
|
if dim != -1: |
||||||
|
z_q_x = z_q_x.movedim(-1, dim) |
||||||
|
return z_q_x, (vq_loss, commit_loss), indices.view(x.shape[:-1]) |
||||||
|
|
||||||
|
|
||||||
|
class ResBlock(nn.Module): |
||||||
|
def __init__(self, c, c_hidden): |
||||||
|
super().__init__() |
||||||
|
# depthwise/attention |
||||||
|
self.norm1 = nn.LayerNorm(c, elementwise_affine=False, eps=1e-6) |
||||||
|
self.depthwise = nn.Sequential( |
||||||
|
nn.ReplicationPad2d(1), |
||||||
|
nn.Conv2d(c, c, kernel_size=3, groups=c) |
||||||
|
) |
||||||
|
|
||||||
|
# channelwise |
||||||
|
self.norm2 = nn.LayerNorm(c, elementwise_affine=False, eps=1e-6) |
||||||
|
self.channelwise = nn.Sequential( |
||||||
|
nn.Linear(c, c_hidden), |
||||||
|
nn.GELU(), |
||||||
|
nn.Linear(c_hidden, c), |
||||||
|
) |
||||||
|
|
||||||
|
self.gammas = nn.Parameter(torch.zeros(6), requires_grad=True) |
||||||
|
|
||||||
|
# Init weights |
||||||
|
def _basic_init(module): |
||||||
|
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d): |
||||||
|
torch.nn.init.xavier_uniform_(module.weight) |
||||||
|
if module.bias is not None: |
||||||
|
nn.init.constant_(module.bias, 0) |
||||||
|
|
||||||
|
self.apply(_basic_init) |
||||||
|
|
||||||
|
def _norm(self, x, norm): |
||||||
|
return norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
mods = self.gammas |
||||||
|
|
||||||
|
x_temp = self._norm(x, self.norm1) * (1 + mods[0]) + mods[1] |
||||||
|
try: |
||||||
|
x = x + self.depthwise(x_temp) * mods[2] |
||||||
|
except: #operation not implemented for bf16 |
||||||
|
x_temp = self.depthwise[0](x_temp.float()).to(x.dtype) |
||||||
|
x = x + self.depthwise[1](x_temp) * mods[2] |
||||||
|
|
||||||
|
x_temp = self._norm(x, self.norm2) * (1 + mods[3]) + mods[4] |
||||||
|
x = x + self.channelwise(x_temp.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * mods[5] |
||||||
|
|
||||||
|
return x |
||||||
|
|
||||||
|
|
||||||
|
class StageA(nn.Module): |
||||||
|
def __init__(self, levels=2, bottleneck_blocks=12, c_hidden=384, c_latent=4, codebook_size=8192): |
||||||
|
super().__init__() |
||||||
|
self.c_latent = c_latent |
||||||
|
c_levels = [c_hidden // (2 ** i) for i in reversed(range(levels))] |
||||||
|
|
||||||
|
# Encoder blocks |
||||||
|
self.in_block = nn.Sequential( |
||||||
|
nn.PixelUnshuffle(2), |
||||||
|
nn.Conv2d(3 * 4, c_levels[0], kernel_size=1) |
||||||
|
) |
||||||
|
down_blocks = [] |
||||||
|
for i in range(levels): |
||||||
|
if i > 0: |
||||||
|
down_blocks.append(nn.Conv2d(c_levels[i - 1], c_levels[i], kernel_size=4, stride=2, padding=1)) |
||||||
|
block = ResBlock(c_levels[i], c_levels[i] * 4) |
||||||
|
down_blocks.append(block) |
||||||
|
down_blocks.append(nn.Sequential( |
||||||
|
nn.Conv2d(c_levels[-1], c_latent, kernel_size=1, bias=False), |
||||||
|
nn.BatchNorm2d(c_latent), # then normalize them to have mean 0 and std 1 |
||||||
|
)) |
||||||
|
self.down_blocks = nn.Sequential(*down_blocks) |
||||||
|
self.down_blocks[0] |
||||||
|
|
||||||
|
self.codebook_size = codebook_size |
||||||
|
self.vquantizer = VectorQuantize(c_latent, k=codebook_size) |
||||||
|
|
||||||
|
# Decoder blocks |
||||||
|
up_blocks = [nn.Sequential( |
||||||
|
nn.Conv2d(c_latent, c_levels[-1], kernel_size=1) |
||||||
|
)] |
||||||
|
for i in range(levels): |
||||||
|
for j in range(bottleneck_blocks if i == 0 else 1): |
||||||
|
block = ResBlock(c_levels[levels - 1 - i], c_levels[levels - 1 - i] * 4) |
||||||
|
up_blocks.append(block) |
||||||
|
if i < levels - 1: |
||||||
|
up_blocks.append( |
||||||
|
nn.ConvTranspose2d(c_levels[levels - 1 - i], c_levels[levels - 2 - i], kernel_size=4, stride=2, |
||||||
|
padding=1)) |
||||||
|
self.up_blocks = nn.Sequential(*up_blocks) |
||||||
|
self.out_block = nn.Sequential( |
||||||
|
nn.Conv2d(c_levels[0], 3 * 4, kernel_size=1), |
||||||
|
nn.PixelShuffle(2), |
||||||
|
) |
||||||
|
|
||||||
|
def encode(self, x, quantize=False): |
||||||
|
x = self.in_block(x) |
||||||
|
x = self.down_blocks(x) |
||||||
|
if quantize: |
||||||
|
qe, (vq_loss, commit_loss), indices = self.vquantizer.forward(x, dim=1) |
||||||
|
return qe, x, indices, vq_loss + commit_loss * 0.25 |
||||||
|
else: |
||||||
|
return x |
||||||
|
|
||||||
|
def decode(self, x): |
||||||
|
x = self.up_blocks(x) |
||||||
|
x = self.out_block(x) |
||||||
|
return x |
||||||
|
|
||||||
|
def forward(self, x, quantize=False): |
||||||
|
qe, x, _, vq_loss = self.encode(x, quantize) |
||||||
|
x = self.decode(qe) |
||||||
|
return x, vq_loss |
||||||
|
|
||||||
|
|
||||||
|
class Discriminator(nn.Module): |
||||||
|
def __init__(self, c_in=3, c_cond=0, c_hidden=512, depth=6): |
||||||
|
super().__init__() |
||||||
|
d = max(depth - 3, 3) |
||||||
|
layers = [ |
||||||
|
nn.utils.spectral_norm(nn.Conv2d(c_in, c_hidden // (2 ** d), kernel_size=3, stride=2, padding=1)), |
||||||
|
nn.LeakyReLU(0.2), |
||||||
|
] |
||||||
|
for i in range(depth - 1): |
||||||
|
c_in = c_hidden // (2 ** max((d - i), 0)) |
||||||
|
c_out = c_hidden // (2 ** max((d - 1 - i), 0)) |
||||||
|
layers.append(nn.utils.spectral_norm(nn.Conv2d(c_in, c_out, kernel_size=3, stride=2, padding=1))) |
||||||
|
layers.append(nn.InstanceNorm2d(c_out)) |
||||||
|
layers.append(nn.LeakyReLU(0.2)) |
||||||
|
self.encoder = nn.Sequential(*layers) |
||||||
|
self.shuffle = nn.Conv2d((c_hidden + c_cond) if c_cond > 0 else c_hidden, 1, kernel_size=1) |
||||||
|
self.logits = nn.Sigmoid() |
||||||
|
|
||||||
|
def forward(self, x, cond=None): |
||||||
|
x = self.encoder(x) |
||||||
|
if cond is not None: |
||||||
|
cond = cond.view(cond.size(0), cond.size(1), 1, 1, ).expand(-1, -1, x.size(-2), x.size(-1)) |
||||||
|
x = torch.cat([x, cond], dim=1) |
||||||
|
x = self.shuffle(x) |
||||||
|
x = self.logits(x) |
||||||
|
return x |
@ -0,0 +1,257 @@ |
|||||||
|
""" |
||||||
|
This file is part of ComfyUI. |
||||||
|
Copyright (C) 2024 Stability AI |
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify |
||||||
|
it under the terms of the GNU General Public License as published by |
||||||
|
the Free Software Foundation, either version 3 of the License, or |
||||||
|
(at your option) any later version. |
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful, |
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||||
|
GNU General Public License for more details. |
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License |
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>. |
||||||
|
""" |
||||||
|
|
||||||
|
import math |
||||||
|
import numpy as np |
||||||
|
import torch |
||||||
|
from torch import nn |
||||||
|
from .common import AttnBlock, LayerNorm2d_op, ResBlock, FeedForwardBlock, TimestepBlock |
||||||
|
|
||||||
|
class StageB(nn.Module): |
||||||
|
def __init__(self, c_in=4, c_out=4, c_r=64, patch_size=2, c_cond=1280, c_hidden=[320, 640, 1280, 1280], |
||||||
|
nhead=[-1, -1, 20, 20], blocks=[[2, 6, 28, 6], [6, 28, 6, 2]], |
||||||
|
block_repeat=[[1, 1, 1, 1], [3, 3, 2, 2]], level_config=['CT', 'CT', 'CTA', 'CTA'], c_clip=1280, |
||||||
|
c_clip_seq=4, c_effnet=16, c_pixels=3, kernel_size=3, dropout=[0, 0, 0.0, 0.0], self_attn=True, |
||||||
|
t_conds=['sca'], stable_cascade_stage=None, dtype=None, device=None, operations=None): |
||||||
|
super().__init__() |
||||||
|
self.dtype = dtype |
||||||
|
self.c_r = c_r |
||||||
|
self.t_conds = t_conds |
||||||
|
self.c_clip_seq = c_clip_seq |
||||||
|
if not isinstance(dropout, list): |
||||||
|
dropout = [dropout] * len(c_hidden) |
||||||
|
if not isinstance(self_attn, list): |
||||||
|
self_attn = [self_attn] * len(c_hidden) |
||||||
|
|
||||||
|
# CONDITIONING |
||||||
|
self.effnet_mapper = nn.Sequential( |
||||||
|
operations.Conv2d(c_effnet, c_hidden[0] * 4, kernel_size=1, dtype=dtype, device=device), |
||||||
|
nn.GELU(), |
||||||
|
operations.Conv2d(c_hidden[0] * 4, c_hidden[0], kernel_size=1, dtype=dtype, device=device), |
||||||
|
LayerNorm2d_op(operations)(c_hidden[0], elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) |
||||||
|
) |
||||||
|
self.pixels_mapper = nn.Sequential( |
||||||
|
operations.Conv2d(c_pixels, c_hidden[0] * 4, kernel_size=1, dtype=dtype, device=device), |
||||||
|
nn.GELU(), |
||||||
|
operations.Conv2d(c_hidden[0] * 4, c_hidden[0], kernel_size=1, dtype=dtype, device=device), |
||||||
|
LayerNorm2d_op(operations)(c_hidden[0], elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) |
||||||
|
) |
||||||
|
self.clip_mapper = operations.Linear(c_clip, c_cond * c_clip_seq, dtype=dtype, device=device) |
||||||
|
self.clip_norm = operations.LayerNorm(c_cond, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) |
||||||
|
|
||||||
|
self.embedding = nn.Sequential( |
||||||
|
nn.PixelUnshuffle(patch_size), |
||||||
|
operations.Conv2d(c_in * (patch_size ** 2), c_hidden[0], kernel_size=1, dtype=dtype, device=device), |
||||||
|
LayerNorm2d_op(operations)(c_hidden[0], elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) |
||||||
|
) |
||||||
|
|
||||||
|
def get_block(block_type, c_hidden, nhead, c_skip=0, dropout=0, self_attn=True): |
||||||
|
if block_type == 'C': |
||||||
|
return ResBlock(c_hidden, c_skip, kernel_size=kernel_size, dropout=dropout, dtype=dtype, device=device, operations=operations) |
||||||
|
elif block_type == 'A': |
||||||
|
return AttnBlock(c_hidden, c_cond, nhead, self_attn=self_attn, dropout=dropout, dtype=dtype, device=device, operations=operations) |
||||||
|
elif block_type == 'F': |
||||||
|
return FeedForwardBlock(c_hidden, dropout=dropout, dtype=dtype, device=device, operations=operations) |
||||||
|
elif block_type == 'T': |
||||||
|
return TimestepBlock(c_hidden, c_r, conds=t_conds, dtype=dtype, device=device, operations=operations) |
||||||
|
else: |
||||||
|
raise Exception(f'Block type {block_type} not supported') |
||||||
|
|
||||||
|
# BLOCKS |
||||||
|
# -- down blocks |
||||||
|
self.down_blocks = nn.ModuleList() |
||||||
|
self.down_downscalers = nn.ModuleList() |
||||||
|
self.down_repeat_mappers = nn.ModuleList() |
||||||
|
for i in range(len(c_hidden)): |
||||||
|
if i > 0: |
||||||
|
self.down_downscalers.append(nn.Sequential( |
||||||
|
LayerNorm2d_op(operations)(c_hidden[i - 1], elementwise_affine=False, eps=1e-6, dtype=dtype, device=device), |
||||||
|
operations.Conv2d(c_hidden[i - 1], c_hidden[i], kernel_size=2, stride=2, dtype=dtype, device=device), |
||||||
|
)) |
||||||
|
else: |
||||||
|
self.down_downscalers.append(nn.Identity()) |
||||||
|
down_block = nn.ModuleList() |
||||||
|
for _ in range(blocks[0][i]): |
||||||
|
for block_type in level_config[i]: |
||||||
|
block = get_block(block_type, c_hidden[i], nhead[i], dropout=dropout[i], self_attn=self_attn[i]) |
||||||
|
down_block.append(block) |
||||||
|
self.down_blocks.append(down_block) |
||||||
|
if block_repeat is not None: |
||||||
|
block_repeat_mappers = nn.ModuleList() |
||||||
|
for _ in range(block_repeat[0][i] - 1): |
||||||
|
block_repeat_mappers.append(operations.Conv2d(c_hidden[i], c_hidden[i], kernel_size=1, dtype=dtype, device=device)) |
||||||
|
self.down_repeat_mappers.append(block_repeat_mappers) |
||||||
|
|
||||||
|
# -- up blocks |
||||||
|
self.up_blocks = nn.ModuleList() |
||||||
|
self.up_upscalers = nn.ModuleList() |
||||||
|
self.up_repeat_mappers = nn.ModuleList() |
||||||
|
for i in reversed(range(len(c_hidden))): |
||||||
|
if i > 0: |
||||||
|
self.up_upscalers.append(nn.Sequential( |
||||||
|
LayerNorm2d_op(operations)(c_hidden[i], elementwise_affine=False, eps=1e-6, dtype=dtype, device=device), |
||||||
|
operations.ConvTranspose2d(c_hidden[i], c_hidden[i - 1], kernel_size=2, stride=2, dtype=dtype, device=device), |
||||||
|
)) |
||||||
|
else: |
||||||
|
self.up_upscalers.append(nn.Identity()) |
||||||
|
up_block = nn.ModuleList() |
||||||
|
for j in range(blocks[1][::-1][i]): |
||||||
|
for k, block_type in enumerate(level_config[i]): |
||||||
|
c_skip = c_hidden[i] if i < len(c_hidden) - 1 and j == k == 0 else 0 |
||||||
|
block = get_block(block_type, c_hidden[i], nhead[i], c_skip=c_skip, dropout=dropout[i], |
||||||
|
self_attn=self_attn[i]) |
||||||
|
up_block.append(block) |
||||||
|
self.up_blocks.append(up_block) |
||||||
|
if block_repeat is not None: |
||||||
|
block_repeat_mappers = nn.ModuleList() |
||||||
|
for _ in range(block_repeat[1][::-1][i] - 1): |
||||||
|
block_repeat_mappers.append(operations.Conv2d(c_hidden[i], c_hidden[i], kernel_size=1, dtype=dtype, device=device)) |
||||||
|
self.up_repeat_mappers.append(block_repeat_mappers) |
||||||
|
|
||||||
|
# OUTPUT |
||||||
|
self.clf = nn.Sequential( |
||||||
|
LayerNorm2d_op(operations)(c_hidden[0], elementwise_affine=False, eps=1e-6, dtype=dtype, device=device), |
||||||
|
operations.Conv2d(c_hidden[0], c_out * (patch_size ** 2), kernel_size=1, dtype=dtype, device=device), |
||||||
|
nn.PixelShuffle(patch_size), |
||||||
|
) |
||||||
|
|
||||||
|
# --- WEIGHT INIT --- |
||||||
|
# self.apply(self._init_weights) # General init |
||||||
|
# nn.init.normal_(self.clip_mapper.weight, std=0.02) # conditionings |
||||||
|
# nn.init.normal_(self.effnet_mapper[0].weight, std=0.02) # conditionings |
||||||
|
# nn.init.normal_(self.effnet_mapper[2].weight, std=0.02) # conditionings |
||||||
|
# nn.init.normal_(self.pixels_mapper[0].weight, std=0.02) # conditionings |
||||||
|
# nn.init.normal_(self.pixels_mapper[2].weight, std=0.02) # conditionings |
||||||
|
# torch.nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs |
||||||
|
# nn.init.constant_(self.clf[1].weight, 0) # outputs |
||||||
|
# |
||||||
|
# # blocks |
||||||
|
# for level_block in self.down_blocks + self.up_blocks: |
||||||
|
# for block in level_block: |
||||||
|
# if isinstance(block, ResBlock) or isinstance(block, FeedForwardBlock): |
||||||
|
# block.channelwise[-1].weight.data *= np.sqrt(1 / sum(blocks[0])) |
||||||
|
# elif isinstance(block, TimestepBlock): |
||||||
|
# for layer in block.modules(): |
||||||
|
# if isinstance(layer, nn.Linear): |
||||||
|
# nn.init.constant_(layer.weight, 0) |
||||||
|
# |
||||||
|
# def _init_weights(self, m): |
||||||
|
# if isinstance(m, (nn.Conv2d, nn.Linear)): |
||||||
|
# torch.nn.init.xavier_uniform_(m.weight) |
||||||
|
# if m.bias is not None: |
||||||
|
# nn.init.constant_(m.bias, 0) |
||||||
|
|
||||||
|
def gen_r_embedding(self, r, max_positions=10000): |
||||||
|
r = r * max_positions |
||||||
|
half_dim = self.c_r // 2 |
||||||
|
emb = math.log(max_positions) / (half_dim - 1) |
||||||
|
emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() |
||||||
|
emb = r[:, None] * emb[None, :] |
||||||
|
emb = torch.cat([emb.sin(), emb.cos()], dim=1) |
||||||
|
if self.c_r % 2 == 1: # zero pad |
||||||
|
emb = nn.functional.pad(emb, (0, 1), mode='constant') |
||||||
|
return emb |
||||||
|
|
||||||
|
def gen_c_embeddings(self, clip): |
||||||
|
if len(clip.shape) == 2: |
||||||
|
clip = clip.unsqueeze(1) |
||||||
|
clip = self.clip_mapper(clip).view(clip.size(0), clip.size(1) * self.c_clip_seq, -1) |
||||||
|
clip = self.clip_norm(clip) |
||||||
|
return clip |
||||||
|
|
||||||
|
def _down_encode(self, x, r_embed, clip): |
||||||
|
level_outputs = [] |
||||||
|
block_group = zip(self.down_blocks, self.down_downscalers, self.down_repeat_mappers) |
||||||
|
for down_block, downscaler, repmap in block_group: |
||||||
|
x = downscaler(x) |
||||||
|
for i in range(len(repmap) + 1): |
||||||
|
for block in down_block: |
||||||
|
if isinstance(block, ResBlock) or ( |
||||||
|
hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, |
||||||
|
ResBlock)): |
||||||
|
x = block(x) |
||||||
|
elif isinstance(block, AttnBlock) or ( |
||||||
|
hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, |
||||||
|
AttnBlock)): |
||||||
|
x = block(x, clip) |
||||||
|
elif isinstance(block, TimestepBlock) or ( |
||||||
|
hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, |
||||||
|
TimestepBlock)): |
||||||
|
x = block(x, r_embed) |
||||||
|
else: |
||||||
|
x = block(x) |
||||||
|
if i < len(repmap): |
||||||
|
x = repmap[i](x) |
||||||
|
level_outputs.insert(0, x) |
||||||
|
return level_outputs |
||||||
|
|
||||||
|
def _up_decode(self, level_outputs, r_embed, clip): |
||||||
|
x = level_outputs[0] |
||||||
|
block_group = zip(self.up_blocks, self.up_upscalers, self.up_repeat_mappers) |
||||||
|
for i, (up_block, upscaler, repmap) in enumerate(block_group): |
||||||
|
for j in range(len(repmap) + 1): |
||||||
|
for k, block in enumerate(up_block): |
||||||
|
if isinstance(block, ResBlock) or ( |
||||||
|
hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, |
||||||
|
ResBlock)): |
||||||
|
skip = level_outputs[i] if k == 0 and i > 0 else None |
||||||
|
if skip is not None and (x.size(-1) != skip.size(-1) or x.size(-2) != skip.size(-2)): |
||||||
|
x = torch.nn.functional.interpolate(x, skip.shape[-2:], mode='bilinear', |
||||||
|
align_corners=True) |
||||||
|
x = block(x, skip) |
||||||
|
elif isinstance(block, AttnBlock) or ( |
||||||
|
hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, |
||||||
|
AttnBlock)): |
||||||
|
x = block(x, clip) |
||||||
|
elif isinstance(block, TimestepBlock) or ( |
||||||
|
hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, |
||||||
|
TimestepBlock)): |
||||||
|
x = block(x, r_embed) |
||||||
|
else: |
||||||
|
x = block(x) |
||||||
|
if j < len(repmap): |
||||||
|
x = repmap[j](x) |
||||||
|
x = upscaler(x) |
||||||
|
return x |
||||||
|
|
||||||
|
def forward(self, x, r, effnet, clip, pixels=None, **kwargs): |
||||||
|
if pixels is None: |
||||||
|
pixels = x.new_zeros(x.size(0), 3, 8, 8) |
||||||
|
|
||||||
|
# Process the conditioning embeddings |
||||||
|
r_embed = self.gen_r_embedding(r).to(dtype=x.dtype) |
||||||
|
for c in self.t_conds: |
||||||
|
t_cond = kwargs.get(c, torch.zeros_like(r)) |
||||||
|
r_embed = torch.cat([r_embed, self.gen_r_embedding(t_cond).to(dtype=x.dtype)], dim=1) |
||||||
|
clip = self.gen_c_embeddings(clip) |
||||||
|
|
||||||
|
# Model Blocks |
||||||
|
x = self.embedding(x) |
||||||
|
x = x + self.effnet_mapper( |
||||||
|
nn.functional.interpolate(effnet, size=x.shape[-2:], mode='bilinear', align_corners=True)) |
||||||
|
x = x + nn.functional.interpolate(self.pixels_mapper(pixels), size=x.shape[-2:], mode='bilinear', |
||||||
|
align_corners=True) |
||||||
|
level_outputs = self._down_encode(x, r_embed, clip) |
||||||
|
x = self._up_decode(level_outputs, r_embed, clip) |
||||||
|
return self.clf(x) |
||||||
|
|
||||||
|
def update_weights_ema(self, src_model, beta=0.999): |
||||||
|
for self_params, src_params in zip(self.parameters(), src_model.parameters()): |
||||||
|
self_params.data = self_params.data * beta + src_params.data.clone().to(self_params.device) * (1 - beta) |
||||||
|
for self_buffers, src_buffers in zip(self.buffers(), src_model.buffers()): |
||||||
|
self_buffers.data = self_buffers.data * beta + src_buffers.data.clone().to(self_buffers.device) * (1 - beta) |
@ -0,0 +1,274 @@ |
|||||||
|
""" |
||||||
|
This file is part of ComfyUI. |
||||||
|
Copyright (C) 2024 Stability AI |
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify |
||||||
|
it under the terms of the GNU General Public License as published by |
||||||
|
the Free Software Foundation, either version 3 of the License, or |
||||||
|
(at your option) any later version. |
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful, |
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||||
|
GNU General Public License for more details. |
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License |
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>. |
||||||
|
""" |
||||||
|
|
||||||
|
import torch |
||||||
|
from torch import nn |
||||||
|
import numpy as np |
||||||
|
import math |
||||||
|
from .common import AttnBlock, LayerNorm2d_op, ResBlock, FeedForwardBlock, TimestepBlock |
||||||
|
# from .controlnet import ControlNetDeliverer |
||||||
|
|
||||||
|
class UpDownBlock2d(nn.Module): |
||||||
|
def __init__(self, c_in, c_out, mode, enabled=True, dtype=None, device=None, operations=None): |
||||||
|
super().__init__() |
||||||
|
assert mode in ['up', 'down'] |
||||||
|
interpolation = nn.Upsample(scale_factor=2 if mode == 'up' else 0.5, mode='bilinear', |
||||||
|
align_corners=True) if enabled else nn.Identity() |
||||||
|
mapping = operations.Conv2d(c_in, c_out, kernel_size=1, dtype=dtype, device=device) |
||||||
|
self.blocks = nn.ModuleList([interpolation, mapping] if mode == 'up' else [mapping, interpolation]) |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
for block in self.blocks: |
||||||
|
x = block(x) |
||||||
|
return x |
||||||
|
|
||||||
|
|
||||||
|
class StageC(nn.Module): |
||||||
|
def __init__(self, c_in=16, c_out=16, c_r=64, patch_size=1, c_cond=2048, c_hidden=[2048, 2048], nhead=[32, 32], |
||||||
|
blocks=[[8, 24], [24, 8]], block_repeat=[[1, 1], [1, 1]], level_config=['CTA', 'CTA'], |
||||||
|
c_clip_text=1280, c_clip_text_pooled=1280, c_clip_img=768, c_clip_seq=4, kernel_size=3, |
||||||
|
dropout=[0.0, 0.0], self_attn=True, t_conds=['sca', 'crp'], switch_level=[False], stable_cascade_stage=None, |
||||||
|
dtype=None, device=None, operations=None): |
||||||
|
super().__init__() |
||||||
|
self.dtype = dtype |
||||||
|
self.c_r = c_r |
||||||
|
self.t_conds = t_conds |
||||||
|
self.c_clip_seq = c_clip_seq |
||||||
|
if not isinstance(dropout, list): |
||||||
|
dropout = [dropout] * len(c_hidden) |
||||||
|
if not isinstance(self_attn, list): |
||||||
|
self_attn = [self_attn] * len(c_hidden) |
||||||
|
|
||||||
|
# CONDITIONING |
||||||
|
self.clip_txt_mapper = operations.Linear(c_clip_text, c_cond, dtype=dtype, device=device) |
||||||
|
self.clip_txt_pooled_mapper = operations.Linear(c_clip_text_pooled, c_cond * c_clip_seq, dtype=dtype, device=device) |
||||||
|
self.clip_img_mapper = operations.Linear(c_clip_img, c_cond * c_clip_seq, dtype=dtype, device=device) |
||||||
|
self.clip_norm = operations.LayerNorm(c_cond, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) |
||||||
|
|
||||||
|
self.embedding = nn.Sequential( |
||||||
|
nn.PixelUnshuffle(patch_size), |
||||||
|
operations.Conv2d(c_in * (patch_size ** 2), c_hidden[0], kernel_size=1, dtype=dtype, device=device), |
||||||
|
LayerNorm2d_op(operations)(c_hidden[0], elementwise_affine=False, eps=1e-6) |
||||||
|
) |
||||||
|
|
||||||
|
def get_block(block_type, c_hidden, nhead, c_skip=0, dropout=0, self_attn=True): |
||||||
|
if block_type == 'C': |
||||||
|
return ResBlock(c_hidden, c_skip, kernel_size=kernel_size, dropout=dropout, dtype=dtype, device=device, operations=operations) |
||||||
|
elif block_type == 'A': |
||||||
|
return AttnBlock(c_hidden, c_cond, nhead, self_attn=self_attn, dropout=dropout, dtype=dtype, device=device, operations=operations) |
||||||
|
elif block_type == 'F': |
||||||
|
return FeedForwardBlock(c_hidden, dropout=dropout, dtype=dtype, device=device, operations=operations) |
||||||
|
elif block_type == 'T': |
||||||
|
return TimestepBlock(c_hidden, c_r, conds=t_conds, dtype=dtype, device=device, operations=operations) |
||||||
|
else: |
||||||
|
raise Exception(f'Block type {block_type} not supported') |
||||||
|
|
||||||
|
# BLOCKS |
||||||
|
# -- down blocks |
||||||
|
self.down_blocks = nn.ModuleList() |
||||||
|
self.down_downscalers = nn.ModuleList() |
||||||
|
self.down_repeat_mappers = nn.ModuleList() |
||||||
|
for i in range(len(c_hidden)): |
||||||
|
if i > 0: |
||||||
|
self.down_downscalers.append(nn.Sequential( |
||||||
|
LayerNorm2d_op(operations)(c_hidden[i - 1], elementwise_affine=False, eps=1e-6), |
||||||
|
UpDownBlock2d(c_hidden[i - 1], c_hidden[i], mode='down', enabled=switch_level[i - 1], dtype=dtype, device=device, operations=operations) |
||||||
|
)) |
||||||
|
else: |
||||||
|
self.down_downscalers.append(nn.Identity()) |
||||||
|
down_block = nn.ModuleList() |
||||||
|
for _ in range(blocks[0][i]): |
||||||
|
for block_type in level_config[i]: |
||||||
|
block = get_block(block_type, c_hidden[i], nhead[i], dropout=dropout[i], self_attn=self_attn[i]) |
||||||
|
down_block.append(block) |
||||||
|
self.down_blocks.append(down_block) |
||||||
|
if block_repeat is not None: |
||||||
|
block_repeat_mappers = nn.ModuleList() |
||||||
|
for _ in range(block_repeat[0][i] - 1): |
||||||
|
block_repeat_mappers.append(operations.Conv2d(c_hidden[i], c_hidden[i], kernel_size=1, dtype=dtype, device=device)) |
||||||
|
self.down_repeat_mappers.append(block_repeat_mappers) |
||||||
|
|
||||||
|
# -- up blocks |
||||||
|
self.up_blocks = nn.ModuleList() |
||||||
|
self.up_upscalers = nn.ModuleList() |
||||||
|
self.up_repeat_mappers = nn.ModuleList() |
||||||
|
for i in reversed(range(len(c_hidden))): |
||||||
|
if i > 0: |
||||||
|
self.up_upscalers.append(nn.Sequential( |
||||||
|
LayerNorm2d_op(operations)(c_hidden[i], elementwise_affine=False, eps=1e-6), |
||||||
|
UpDownBlock2d(c_hidden[i], c_hidden[i - 1], mode='up', enabled=switch_level[i - 1], dtype=dtype, device=device, operations=operations) |
||||||
|
)) |
||||||
|
else: |
||||||
|
self.up_upscalers.append(nn.Identity()) |
||||||
|
up_block = nn.ModuleList() |
||||||
|
for j in range(blocks[1][::-1][i]): |
||||||
|
for k, block_type in enumerate(level_config[i]): |
||||||
|
c_skip = c_hidden[i] if i < len(c_hidden) - 1 and j == k == 0 else 0 |
||||||
|
block = get_block(block_type, c_hidden[i], nhead[i], c_skip=c_skip, dropout=dropout[i], |
||||||
|
self_attn=self_attn[i]) |
||||||
|
up_block.append(block) |
||||||
|
self.up_blocks.append(up_block) |
||||||
|
if block_repeat is not None: |
||||||
|
block_repeat_mappers = nn.ModuleList() |
||||||
|
for _ in range(block_repeat[1][::-1][i] - 1): |
||||||
|
block_repeat_mappers.append(operations.Conv2d(c_hidden[i], c_hidden[i], kernel_size=1, dtype=dtype, device=device)) |
||||||
|
self.up_repeat_mappers.append(block_repeat_mappers) |
||||||
|
|
||||||
|
# OUTPUT |
||||||
|
self.clf = nn.Sequential( |
||||||
|
LayerNorm2d_op(operations)(c_hidden[0], elementwise_affine=False, eps=1e-6, dtype=dtype, device=device), |
||||||
|
operations.Conv2d(c_hidden[0], c_out * (patch_size ** 2), kernel_size=1, dtype=dtype, device=device), |
||||||
|
nn.PixelShuffle(patch_size), |
||||||
|
) |
||||||
|
|
||||||
|
# --- WEIGHT INIT --- |
||||||
|
# self.apply(self._init_weights) # General init |
||||||
|
# nn.init.normal_(self.clip_txt_mapper.weight, std=0.02) # conditionings |
||||||
|
# nn.init.normal_(self.clip_txt_pooled_mapper.weight, std=0.02) # conditionings |
||||||
|
# nn.init.normal_(self.clip_img_mapper.weight, std=0.02) # conditionings |
||||||
|
# torch.nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs |
||||||
|
# nn.init.constant_(self.clf[1].weight, 0) # outputs |
||||||
|
# |
||||||
|
# # blocks |
||||||
|
# for level_block in self.down_blocks + self.up_blocks: |
||||||
|
# for block in level_block: |
||||||
|
# if isinstance(block, ResBlock) or isinstance(block, FeedForwardBlock): |
||||||
|
# block.channelwise[-1].weight.data *= np.sqrt(1 / sum(blocks[0])) |
||||||
|
# elif isinstance(block, TimestepBlock): |
||||||
|
# for layer in block.modules(): |
||||||
|
# if isinstance(layer, nn.Linear): |
||||||
|
# nn.init.constant_(layer.weight, 0) |
||||||
|
# |
||||||
|
# def _init_weights(self, m): |
||||||
|
# if isinstance(m, (nn.Conv2d, nn.Linear)): |
||||||
|
# torch.nn.init.xavier_uniform_(m.weight) |
||||||
|
# if m.bias is not None: |
||||||
|
# nn.init.constant_(m.bias, 0) |
||||||
|
|
||||||
|
def gen_r_embedding(self, r, max_positions=10000): |
||||||
|
r = r * max_positions |
||||||
|
half_dim = self.c_r // 2 |
||||||
|
emb = math.log(max_positions) / (half_dim - 1) |
||||||
|
emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() |
||||||
|
emb = r[:, None] * emb[None, :] |
||||||
|
emb = torch.cat([emb.sin(), emb.cos()], dim=1) |
||||||
|
if self.c_r % 2 == 1: # zero pad |
||||||
|
emb = nn.functional.pad(emb, (0, 1), mode='constant') |
||||||
|
return emb |
||||||
|
|
||||||
|
def gen_c_embeddings(self, clip_txt, clip_txt_pooled, clip_img): |
||||||
|
clip_txt = self.clip_txt_mapper(clip_txt) |
||||||
|
if len(clip_txt_pooled.shape) == 2: |
||||||
|
clip_txt_pooled = clip_txt_pooled.unsqueeze(1) |
||||||
|
if len(clip_img.shape) == 2: |
||||||
|
clip_img = clip_img.unsqueeze(1) |
||||||
|
clip_txt_pool = self.clip_txt_pooled_mapper(clip_txt_pooled).view(clip_txt_pooled.size(0), clip_txt_pooled.size(1) * self.c_clip_seq, -1) |
||||||
|
clip_img = self.clip_img_mapper(clip_img).view(clip_img.size(0), clip_img.size(1) * self.c_clip_seq, -1) |
||||||
|
clip = torch.cat([clip_txt, clip_txt_pool, clip_img], dim=1) |
||||||
|
clip = self.clip_norm(clip) |
||||||
|
return clip |
||||||
|
|
||||||
|
def _down_encode(self, x, r_embed, clip, cnet=None): |
||||||
|
level_outputs = [] |
||||||
|
block_group = zip(self.down_blocks, self.down_downscalers, self.down_repeat_mappers) |
||||||
|
for down_block, downscaler, repmap in block_group: |
||||||
|
x = downscaler(x) |
||||||
|
for i in range(len(repmap) + 1): |
||||||
|
for block in down_block: |
||||||
|
if isinstance(block, ResBlock) or ( |
||||||
|
hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, |
||||||
|
ResBlock)): |
||||||
|
if cnet is not None: |
||||||
|
next_cnet = cnet.pop() |
||||||
|
if next_cnet is not None: |
||||||
|
x = x + nn.functional.interpolate(next_cnet, size=x.shape[-2:], mode='bilinear', |
||||||
|
align_corners=True).to(x.dtype) |
||||||
|
x = block(x) |
||||||
|
elif isinstance(block, AttnBlock) or ( |
||||||
|
hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, |
||||||
|
AttnBlock)): |
||||||
|
x = block(x, clip) |
||||||
|
elif isinstance(block, TimestepBlock) or ( |
||||||
|
hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, |
||||||
|
TimestepBlock)): |
||||||
|
x = block(x, r_embed) |
||||||
|
else: |
||||||
|
x = block(x) |
||||||
|
if i < len(repmap): |
||||||
|
x = repmap[i](x) |
||||||
|
level_outputs.insert(0, x) |
||||||
|
return level_outputs |
||||||
|
|
||||||
|
def _up_decode(self, level_outputs, r_embed, clip, cnet=None): |
||||||
|
x = level_outputs[0] |
||||||
|
block_group = zip(self.up_blocks, self.up_upscalers, self.up_repeat_mappers) |
||||||
|
for i, (up_block, upscaler, repmap) in enumerate(block_group): |
||||||
|
for j in range(len(repmap) + 1): |
||||||
|
for k, block in enumerate(up_block): |
||||||
|
if isinstance(block, ResBlock) or ( |
||||||
|
hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, |
||||||
|
ResBlock)): |
||||||
|
skip = level_outputs[i] if k == 0 and i > 0 else None |
||||||
|
if skip is not None and (x.size(-1) != skip.size(-1) or x.size(-2) != skip.size(-2)): |
||||||
|
x = torch.nn.functional.interpolate(x, skip.shape[-2:], mode='bilinear', |
||||||
|
align_corners=True) |
||||||
|
if cnet is not None: |
||||||
|
next_cnet = cnet.pop() |
||||||
|
if next_cnet is not None: |
||||||
|
x = x + nn.functional.interpolate(next_cnet, size=x.shape[-2:], mode='bilinear', |
||||||
|
align_corners=True).to(x.dtype) |
||||||
|
x = block(x, skip) |
||||||
|
elif isinstance(block, AttnBlock) or ( |
||||||
|
hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, |
||||||
|
AttnBlock)): |
||||||
|
x = block(x, clip) |
||||||
|
elif isinstance(block, TimestepBlock) or ( |
||||||
|
hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, |
||||||
|
TimestepBlock)): |
||||||
|
x = block(x, r_embed) |
||||||
|
else: |
||||||
|
x = block(x) |
||||||
|
if j < len(repmap): |
||||||
|
x = repmap[j](x) |
||||||
|
x = upscaler(x) |
||||||
|
return x |
||||||
|
|
||||||
|
def forward(self, x, r, clip_text, clip_text_pooled, clip_img, control=None, **kwargs): |
||||||
|
# Process the conditioning embeddings |
||||||
|
r_embed = self.gen_r_embedding(r).to(dtype=x.dtype) |
||||||
|
for c in self.t_conds: |
||||||
|
t_cond = kwargs.get(c, torch.zeros_like(r)) |
||||||
|
r_embed = torch.cat([r_embed, self.gen_r_embedding(t_cond).to(dtype=x.dtype)], dim=1) |
||||||
|
clip = self.gen_c_embeddings(clip_text, clip_text_pooled, clip_img) |
||||||
|
|
||||||
|
if control is not None: |
||||||
|
cnet = control.get("input") |
||||||
|
else: |
||||||
|
cnet = None |
||||||
|
|
||||||
|
# Model Blocks |
||||||
|
x = self.embedding(x) |
||||||
|
level_outputs = self._down_encode(x, r_embed, clip, cnet) |
||||||
|
x = self._up_decode(level_outputs, r_embed, clip, cnet) |
||||||
|
return self.clf(x) |
||||||
|
|
||||||
|
def update_weights_ema(self, src_model, beta=0.999): |
||||||
|
for self_params, src_params in zip(self.parameters(), src_model.parameters()): |
||||||
|
self_params.data = self_params.data * beta + src_params.data.clone().to(self_params.device) * (1 - beta) |
||||||
|
for self_buffers, src_buffers in zip(self.buffers(), src_model.buffers()): |
||||||
|
self_buffers.data = self_buffers.data * beta + src_buffers.data.clone().to(self_buffers.device) * (1 - beta) |
@ -0,0 +1,95 @@ |
|||||||
|
""" |
||||||
|
This file is part of ComfyUI. |
||||||
|
Copyright (C) 2024 Stability AI |
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify |
||||||
|
it under the terms of the GNU General Public License as published by |
||||||
|
the Free Software Foundation, either version 3 of the License, or |
||||||
|
(at your option) any later version. |
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful, |
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||||
|
GNU General Public License for more details. |
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License |
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>. |
||||||
|
""" |
||||||
|
import torch |
||||||
|
import torchvision |
||||||
|
from torch import nn |
||||||
|
|
||||||
|
|
||||||
|
# EfficientNet |
||||||
|
class EfficientNetEncoder(nn.Module): |
||||||
|
def __init__(self, c_latent=16): |
||||||
|
super().__init__() |
||||||
|
self.backbone = torchvision.models.efficientnet_v2_s().features.eval() |
||||||
|
self.mapper = nn.Sequential( |
||||||
|
nn.Conv2d(1280, c_latent, kernel_size=1, bias=False), |
||||||
|
nn.BatchNorm2d(c_latent, affine=False), # then normalize them to have mean 0 and std 1 |
||||||
|
) |
||||||
|
self.mean = nn.Parameter(torch.tensor([0.485, 0.456, 0.406])) |
||||||
|
self.std = nn.Parameter(torch.tensor([0.229, 0.224, 0.225])) |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
x = x * 0.5 + 0.5 |
||||||
|
x = (x - self.mean.view([3,1,1])) / self.std.view([3,1,1]) |
||||||
|
o = self.mapper(self.backbone(x)) |
||||||
|
return o |
||||||
|
|
||||||
|
|
||||||
|
# Fast Decoder for Stage C latents. E.g. 16 x 24 x 24 -> 3 x 192 x 192 |
||||||
|
class Previewer(nn.Module): |
||||||
|
def __init__(self, c_in=16, c_hidden=512, c_out=3): |
||||||
|
super().__init__() |
||||||
|
self.blocks = nn.Sequential( |
||||||
|
nn.Conv2d(c_in, c_hidden, kernel_size=1), # 16 channels to 512 channels |
||||||
|
nn.GELU(), |
||||||
|
nn.BatchNorm2d(c_hidden), |
||||||
|
|
||||||
|
nn.Conv2d(c_hidden, c_hidden, kernel_size=3, padding=1), |
||||||
|
nn.GELU(), |
||||||
|
nn.BatchNorm2d(c_hidden), |
||||||
|
|
||||||
|
nn.ConvTranspose2d(c_hidden, c_hidden // 2, kernel_size=2, stride=2), # 16 -> 32 |
||||||
|
nn.GELU(), |
||||||
|
nn.BatchNorm2d(c_hidden // 2), |
||||||
|
|
||||||
|
nn.Conv2d(c_hidden // 2, c_hidden // 2, kernel_size=3, padding=1), |
||||||
|
nn.GELU(), |
||||||
|
nn.BatchNorm2d(c_hidden // 2), |
||||||
|
|
||||||
|
nn.ConvTranspose2d(c_hidden // 2, c_hidden // 4, kernel_size=2, stride=2), # 32 -> 64 |
||||||
|
nn.GELU(), |
||||||
|
nn.BatchNorm2d(c_hidden // 4), |
||||||
|
|
||||||
|
nn.Conv2d(c_hidden // 4, c_hidden // 4, kernel_size=3, padding=1), |
||||||
|
nn.GELU(), |
||||||
|
nn.BatchNorm2d(c_hidden // 4), |
||||||
|
|
||||||
|
nn.ConvTranspose2d(c_hidden // 4, c_hidden // 4, kernel_size=2, stride=2), # 64 -> 128 |
||||||
|
nn.GELU(), |
||||||
|
nn.BatchNorm2d(c_hidden // 4), |
||||||
|
|
||||||
|
nn.Conv2d(c_hidden // 4, c_hidden // 4, kernel_size=3, padding=1), |
||||||
|
nn.GELU(), |
||||||
|
nn.BatchNorm2d(c_hidden // 4), |
||||||
|
|
||||||
|
nn.Conv2d(c_hidden // 4, c_out, kernel_size=1), |
||||||
|
) |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
return (self.blocks(x) - 0.5) * 2.0 |
||||||
|
|
||||||
|
class StageC_coder(nn.Module): |
||||||
|
def __init__(self): |
||||||
|
super().__init__() |
||||||
|
self.previewer = Previewer() |
||||||
|
self.encoder = EfficientNetEncoder() |
||||||
|
|
||||||
|
def encode(self, x): |
||||||
|
return self.encoder(x) |
||||||
|
|
||||||
|
def decode(self, x): |
||||||
|
return self.previewer(x) |
@ -1,40 +1,163 @@ |
|||||||
|
""" |
||||||
|
This file is part of ComfyUI. |
||||||
|
Copyright (C) 2024 Stability AI |
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify |
||||||
|
it under the terms of the GNU General Public License as published by |
||||||
|
the Free Software Foundation, either version 3 of the License, or |
||||||
|
(at your option) any later version. |
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful, |
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||||
|
GNU General Public License for more details. |
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License |
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>. |
||||||
|
""" |
||||||
|
|
||||||
import torch |
import torch |
||||||
from contextlib import contextmanager |
import comfy.model_management |
||||||
|
|
||||||
class Linear(torch.nn.Linear): |
def cast_bias_weight(s, input): |
||||||
def reset_parameters(self): |
bias = None |
||||||
return None |
non_blocking = comfy.model_management.device_supports_non_blocking(input.device) |
||||||
|
if s.bias is not None: |
||||||
class Conv2d(torch.nn.Conv2d): |
bias = s.bias.to(device=input.device, dtype=input.dtype, non_blocking=non_blocking) |
||||||
def reset_parameters(self): |
if s.bias_function is not None: |
||||||
return None |
bias = s.bias_function(bias) |
||||||
|
weight = s.weight.to(device=input.device, dtype=input.dtype, non_blocking=non_blocking) |
||||||
class Conv3d(torch.nn.Conv3d): |
if s.weight_function is not None: |
||||||
def reset_parameters(self): |
weight = s.weight_function(weight) |
||||||
return None |
return weight, bias |
||||||
|
|
||||||
def conv_nd(dims, *args, **kwargs): |
class CastWeightBiasOp: |
||||||
if dims == 2: |
comfy_cast_weights = False |
||||||
return Conv2d(*args, **kwargs) |
weight_function = None |
||||||
elif dims == 3: |
bias_function = None |
||||||
return Conv3d(*args, **kwargs) |
|
||||||
else: |
class disable_weight_init: |
||||||
raise ValueError(f"unsupported dimensions: {dims}") |
class Linear(torch.nn.Linear, CastWeightBiasOp): |
||||||
|
def reset_parameters(self): |
||||||
@contextmanager |
return None |
||||||
def use_comfy_ops(device=None, dtype=None): # Kind of an ugly hack but I can't think of a better way |
|
||||||
old_torch_nn_linear = torch.nn.Linear |
def forward_comfy_cast_weights(self, input): |
||||||
force_device = device |
weight, bias = cast_bias_weight(self, input) |
||||||
force_dtype = dtype |
return torch.nn.functional.linear(input, weight, bias) |
||||||
def linear_with_dtype(in_features: int, out_features: int, bias: bool = True, device=None, dtype=None): |
|
||||||
if force_device is not None: |
def forward(self, *args, **kwargs): |
||||||
device = force_device |
if self.comfy_cast_weights: |
||||||
if force_dtype is not None: |
return self.forward_comfy_cast_weights(*args, **kwargs) |
||||||
dtype = force_dtype |
else: |
||||||
return Linear(in_features, out_features, bias=bias, device=device, dtype=dtype) |
return super().forward(*args, **kwargs) |
||||||
|
|
||||||
torch.nn.Linear = linear_with_dtype |
class Conv2d(torch.nn.Conv2d, CastWeightBiasOp): |
||||||
try: |
def reset_parameters(self): |
||||||
yield |
return None |
||||||
finally: |
|
||||||
torch.nn.Linear = old_torch_nn_linear |
def forward_comfy_cast_weights(self, input): |
||||||
|
weight, bias = cast_bias_weight(self, input) |
||||||
|
return self._conv_forward(input, weight, bias) |
||||||
|
|
||||||
|
def forward(self, *args, **kwargs): |
||||||
|
if self.comfy_cast_weights: |
||||||
|
return self.forward_comfy_cast_weights(*args, **kwargs) |
||||||
|
else: |
||||||
|
return super().forward(*args, **kwargs) |
||||||
|
|
||||||
|
class Conv3d(torch.nn.Conv3d, CastWeightBiasOp): |
||||||
|
def reset_parameters(self): |
||||||
|
return None |
||||||
|
|
||||||
|
def forward_comfy_cast_weights(self, input): |
||||||
|
weight, bias = cast_bias_weight(self, input) |
||||||
|
return self._conv_forward(input, weight, bias) |
||||||
|
|
||||||
|
def forward(self, *args, **kwargs): |
||||||
|
if self.comfy_cast_weights: |
||||||
|
return self.forward_comfy_cast_weights(*args, **kwargs) |
||||||
|
else: |
||||||
|
return super().forward(*args, **kwargs) |
||||||
|
|
||||||
|
class GroupNorm(torch.nn.GroupNorm, CastWeightBiasOp): |
||||||
|
def reset_parameters(self): |
||||||
|
return None |
||||||
|
|
||||||
|
def forward_comfy_cast_weights(self, input): |
||||||
|
weight, bias = cast_bias_weight(self, input) |
||||||
|
return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps) |
||||||
|
|
||||||
|
def forward(self, *args, **kwargs): |
||||||
|
if self.comfy_cast_weights: |
||||||
|
return self.forward_comfy_cast_weights(*args, **kwargs) |
||||||
|
else: |
||||||
|
return super().forward(*args, **kwargs) |
||||||
|
|
||||||
|
|
||||||
|
class LayerNorm(torch.nn.LayerNorm, CastWeightBiasOp): |
||||||
|
def reset_parameters(self): |
||||||
|
return None |
||||||
|
|
||||||
|
def forward_comfy_cast_weights(self, input): |
||||||
|
if self.weight is not None: |
||||||
|
weight, bias = cast_bias_weight(self, input) |
||||||
|
else: |
||||||
|
weight = None |
||||||
|
bias = None |
||||||
|
return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps) |
||||||
|
|
||||||
|
def forward(self, *args, **kwargs): |
||||||
|
if self.comfy_cast_weights: |
||||||
|
return self.forward_comfy_cast_weights(*args, **kwargs) |
||||||
|
else: |
||||||
|
return super().forward(*args, **kwargs) |
||||||
|
|
||||||
|
class ConvTranspose2d(torch.nn.ConvTranspose2d, CastWeightBiasOp): |
||||||
|
def reset_parameters(self): |
||||||
|
return None |
||||||
|
|
||||||
|
def forward_comfy_cast_weights(self, input, output_size=None): |
||||||
|
num_spatial_dims = 2 |
||||||
|
output_padding = self._output_padding( |
||||||
|
input, output_size, self.stride, self.padding, self.kernel_size, |
||||||
|
num_spatial_dims, self.dilation) |
||||||
|
|
||||||
|
weight, bias = cast_bias_weight(self, input) |
||||||
|
return torch.nn.functional.conv_transpose2d( |
||||||
|
input, weight, bias, self.stride, self.padding, |
||||||
|
output_padding, self.groups, self.dilation) |
||||||
|
|
||||||
|
def forward(self, *args, **kwargs): |
||||||
|
if self.comfy_cast_weights: |
||||||
|
return self.forward_comfy_cast_weights(*args, **kwargs) |
||||||
|
else: |
||||||
|
return super().forward(*args, **kwargs) |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def conv_nd(s, dims, *args, **kwargs): |
||||||
|
if dims == 2: |
||||||
|
return s.Conv2d(*args, **kwargs) |
||||||
|
elif dims == 3: |
||||||
|
return s.Conv3d(*args, **kwargs) |
||||||
|
else: |
||||||
|
raise ValueError(f"unsupported dimensions: {dims}") |
||||||
|
|
||||||
|
|
||||||
|
class manual_cast(disable_weight_init): |
||||||
|
class Linear(disable_weight_init.Linear): |
||||||
|
comfy_cast_weights = True |
||||||
|
|
||||||
|
class Conv2d(disable_weight_init.Conv2d): |
||||||
|
comfy_cast_weights = True |
||||||
|
|
||||||
|
class Conv3d(disable_weight_init.Conv3d): |
||||||
|
comfy_cast_weights = True |
||||||
|
|
||||||
|
class GroupNorm(disable_weight_init.GroupNorm): |
||||||
|
comfy_cast_weights = True |
||||||
|
|
||||||
|
class LayerNorm(disable_weight_init.LayerNorm): |
||||||
|
comfy_cast_weights = True |
||||||
|
|
||||||
|
class ConvTranspose2d(disable_weight_init.ConvTranspose2d): |
||||||
|
comfy_cast_weights = True |
||||||
|
@ -0,0 +1,25 @@ |
|||||||
|
|
||||||
|
|
||||||
|
class CLIPTextEncodeControlnet: |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": {"clip": ("CLIP", ), "conditioning": ("CONDITIONING", ), "text": ("STRING", {"multiline": True})}} |
||||||
|
RETURN_TYPES = ("CONDITIONING",) |
||||||
|
FUNCTION = "encode" |
||||||
|
|
||||||
|
CATEGORY = "_for_testing/conditioning" |
||||||
|
|
||||||
|
def encode(self, clip, conditioning, text): |
||||||
|
tokens = clip.tokenize(text) |
||||||
|
cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) |
||||||
|
c = [] |
||||||
|
for t in conditioning: |
||||||
|
n = [t[0], t[1].copy()] |
||||||
|
n[1]['cross_attn_controlnet'] = cond |
||||||
|
n[1]['pooled_output_controlnet'] = pooled |
||||||
|
c.append(n) |
||||||
|
return (c, ) |
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = { |
||||||
|
"CLIPTextEncodeControlnet": CLIPTextEncodeControlnet |
||||||
|
} |
@ -0,0 +1,42 @@ |
|||||||
|
# code adapted from https://github.com/exx8/differential-diffusion |
||||||
|
|
||||||
|
import torch |
||||||
|
|
||||||
|
class DifferentialDiffusion(): |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": {"model": ("MODEL", ), |
||||||
|
}} |
||||||
|
RETURN_TYPES = ("MODEL",) |
||||||
|
FUNCTION = "apply" |
||||||
|
CATEGORY = "_for_testing" |
||||||
|
INIT = False |
||||||
|
|
||||||
|
def apply(self, model): |
||||||
|
model = model.clone() |
||||||
|
model.set_model_denoise_mask_function(self.forward) |
||||||
|
return (model,) |
||||||
|
|
||||||
|
def forward(self, sigma: torch.Tensor, denoise_mask: torch.Tensor, extra_options: dict): |
||||||
|
model = extra_options["model"] |
||||||
|
step_sigmas = extra_options["sigmas"] |
||||||
|
sigma_to = model.inner_model.model_sampling.sigma_min |
||||||
|
if step_sigmas[-1] > sigma_to: |
||||||
|
sigma_to = step_sigmas[-1] |
||||||
|
sigma_from = step_sigmas[0] |
||||||
|
|
||||||
|
ts_from = model.inner_model.model_sampling.timestep(sigma_from) |
||||||
|
ts_to = model.inner_model.model_sampling.timestep(sigma_to) |
||||||
|
current_ts = model.inner_model.model_sampling.timestep(sigma[0]) |
||||||
|
|
||||||
|
threshold = (current_ts - ts_to) / (ts_from - ts_to) |
||||||
|
|
||||||
|
return (denoise_mask >= threshold).to(denoise_mask.dtype) |
||||||
|
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = { |
||||||
|
"DifferentialDiffusion": DifferentialDiffusion, |
||||||
|
} |
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = { |
||||||
|
"DifferentialDiffusion": "Differential Diffusion", |
||||||
|
} |
@ -0,0 +1,49 @@ |
|||||||
|
import torch |
||||||
|
import comfy.model_management |
||||||
|
|
||||||
|
from kornia.morphology import dilation, erosion, opening, closing, gradient, top_hat, bottom_hat |
||||||
|
|
||||||
|
|
||||||
|
class Morphology: |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": {"image": ("IMAGE",), |
||||||
|
"operation": (["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"],), |
||||||
|
"kernel_size": ("INT", {"default": 3, "min": 3, "max": 999, "step": 1}), |
||||||
|
}} |
||||||
|
|
||||||
|
RETURN_TYPES = ("IMAGE",) |
||||||
|
FUNCTION = "process" |
||||||
|
|
||||||
|
CATEGORY = "image/postprocessing" |
||||||
|
|
||||||
|
def process(self, image, operation, kernel_size): |
||||||
|
device = comfy.model_management.get_torch_device() |
||||||
|
kernel = torch.ones(kernel_size, kernel_size, device=device) |
||||||
|
image_k = image.to(device).movedim(-1, 1) |
||||||
|
if operation == "erode": |
||||||
|
output = erosion(image_k, kernel) |
||||||
|
elif operation == "dilate": |
||||||
|
output = dilation(image_k, kernel) |
||||||
|
elif operation == "open": |
||||||
|
output = opening(image_k, kernel) |
||||||
|
elif operation == "close": |
||||||
|
output = closing(image_k, kernel) |
||||||
|
elif operation == "gradient": |
||||||
|
output = gradient(image_k, kernel) |
||||||
|
elif operation == "top_hat": |
||||||
|
output = top_hat(image_k, kernel) |
||||||
|
elif operation == "bottom_hat": |
||||||
|
output = bottom_hat(image_k, kernel) |
||||||
|
else: |
||||||
|
raise ValueError(f"Invalid operation {operation} for morphology. Must be one of 'erode', 'dilate', 'open', 'close', 'gradient', 'tophat', 'bottomhat'") |
||||||
|
img_out = output.to(comfy.model_management.intermediate_device()).movedim(1, -1) |
||||||
|
return (img_out,) |
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = { |
||||||
|
"Morphology": Morphology, |
||||||
|
} |
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = { |
||||||
|
"Morphology": "ImageMorphology", |
||||||
|
} |
@ -0,0 +1,55 @@ |
|||||||
|
import torch |
||||||
|
import comfy.model_management |
||||||
|
import comfy.sample |
||||||
|
import comfy.samplers |
||||||
|
import comfy.utils |
||||||
|
|
||||||
|
|
||||||
|
class PerpNeg: |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": {"model": ("MODEL", ), |
||||||
|
"empty_conditioning": ("CONDITIONING", ), |
||||||
|
"neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}), |
||||||
|
}} |
||||||
|
RETURN_TYPES = ("MODEL",) |
||||||
|
FUNCTION = "patch" |
||||||
|
|
||||||
|
CATEGORY = "_for_testing" |
||||||
|
|
||||||
|
def patch(self, model, empty_conditioning, neg_scale): |
||||||
|
m = model.clone() |
||||||
|
nocond = comfy.sample.convert_cond(empty_conditioning) |
||||||
|
|
||||||
|
def cfg_function(args): |
||||||
|
model = args["model"] |
||||||
|
noise_pred_pos = args["cond_denoised"] |
||||||
|
noise_pred_neg = args["uncond_denoised"] |
||||||
|
cond_scale = args["cond_scale"] |
||||||
|
x = args["input"] |
||||||
|
sigma = args["sigma"] |
||||||
|
model_options = args["model_options"] |
||||||
|
nocond_processed = comfy.samplers.encode_model_conds(model.extra_conds, nocond, x, x.device, "negative") |
||||||
|
|
||||||
|
(noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond_processed, None, x, sigma, model_options) |
||||||
|
|
||||||
|
pos = noise_pred_pos - noise_pred_nocond |
||||||
|
neg = noise_pred_neg - noise_pred_nocond |
||||||
|
perp = neg - ((torch.mul(neg, pos).sum())/(torch.norm(pos)**2)) * pos |
||||||
|
perp_neg = perp * neg_scale |
||||||
|
cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg) |
||||||
|
cfg_result = x - cfg_result |
||||||
|
return cfg_result |
||||||
|
|
||||||
|
m.set_model_sampler_cfg_function(cfg_function) |
||||||
|
|
||||||
|
return (m, ) |
||||||
|
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = { |
||||||
|
"PerpNeg": PerpNeg, |
||||||
|
} |
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = { |
||||||
|
"PerpNeg": "Perp-Neg", |
||||||
|
} |
@ -0,0 +1,187 @@ |
|||||||
|
import torch |
||||||
|
import torch.nn as nn |
||||||
|
import folder_paths |
||||||
|
import comfy.clip_model |
||||||
|
import comfy.clip_vision |
||||||
|
import comfy.ops |
||||||
|
|
||||||
|
# code for model from: https://github.com/TencentARC/PhotoMaker/blob/main/photomaker/model.py under Apache License Version 2.0 |
||||||
|
VISION_CONFIG_DICT = { |
||||||
|
"hidden_size": 1024, |
||||||
|
"image_size": 224, |
||||||
|
"intermediate_size": 4096, |
||||||
|
"num_attention_heads": 16, |
||||||
|
"num_channels": 3, |
||||||
|
"num_hidden_layers": 24, |
||||||
|
"patch_size": 14, |
||||||
|
"projection_dim": 768, |
||||||
|
"hidden_act": "quick_gelu", |
||||||
|
} |
||||||
|
|
||||||
|
class MLP(nn.Module): |
||||||
|
def __init__(self, in_dim, out_dim, hidden_dim, use_residual=True, operations=comfy.ops): |
||||||
|
super().__init__() |
||||||
|
if use_residual: |
||||||
|
assert in_dim == out_dim |
||||||
|
self.layernorm = operations.LayerNorm(in_dim) |
||||||
|
self.fc1 = operations.Linear(in_dim, hidden_dim) |
||||||
|
self.fc2 = operations.Linear(hidden_dim, out_dim) |
||||||
|
self.use_residual = use_residual |
||||||
|
self.act_fn = nn.GELU() |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
residual = x |
||||||
|
x = self.layernorm(x) |
||||||
|
x = self.fc1(x) |
||||||
|
x = self.act_fn(x) |
||||||
|
x = self.fc2(x) |
||||||
|
if self.use_residual: |
||||||
|
x = x + residual |
||||||
|
return x |
||||||
|
|
||||||
|
|
||||||
|
class FuseModule(nn.Module): |
||||||
|
def __init__(self, embed_dim, operations): |
||||||
|
super().__init__() |
||||||
|
self.mlp1 = MLP(embed_dim * 2, embed_dim, embed_dim, use_residual=False, operations=operations) |
||||||
|
self.mlp2 = MLP(embed_dim, embed_dim, embed_dim, use_residual=True, operations=operations) |
||||||
|
self.layer_norm = operations.LayerNorm(embed_dim) |
||||||
|
|
||||||
|
def fuse_fn(self, prompt_embeds, id_embeds): |
||||||
|
stacked_id_embeds = torch.cat([prompt_embeds, id_embeds], dim=-1) |
||||||
|
stacked_id_embeds = self.mlp1(stacked_id_embeds) + prompt_embeds |
||||||
|
stacked_id_embeds = self.mlp2(stacked_id_embeds) |
||||||
|
stacked_id_embeds = self.layer_norm(stacked_id_embeds) |
||||||
|
return stacked_id_embeds |
||||||
|
|
||||||
|
def forward( |
||||||
|
self, |
||||||
|
prompt_embeds, |
||||||
|
id_embeds, |
||||||
|
class_tokens_mask, |
||||||
|
) -> torch.Tensor: |
||||||
|
# id_embeds shape: [b, max_num_inputs, 1, 2048] |
||||||
|
id_embeds = id_embeds.to(prompt_embeds.dtype) |
||||||
|
num_inputs = class_tokens_mask.sum().unsqueeze(0) # TODO: check for training case |
||||||
|
batch_size, max_num_inputs = id_embeds.shape[:2] |
||||||
|
# seq_length: 77 |
||||||
|
seq_length = prompt_embeds.shape[1] |
||||||
|
# flat_id_embeds shape: [b*max_num_inputs, 1, 2048] |
||||||
|
flat_id_embeds = id_embeds.view( |
||||||
|
-1, id_embeds.shape[-2], id_embeds.shape[-1] |
||||||
|
) |
||||||
|
# valid_id_mask [b*max_num_inputs] |
||||||
|
valid_id_mask = ( |
||||||
|
torch.arange(max_num_inputs, device=flat_id_embeds.device)[None, :] |
||||||
|
< num_inputs[:, None] |
||||||
|
) |
||||||
|
valid_id_embeds = flat_id_embeds[valid_id_mask.flatten()] |
||||||
|
|
||||||
|
prompt_embeds = prompt_embeds.view(-1, prompt_embeds.shape[-1]) |
||||||
|
class_tokens_mask = class_tokens_mask.view(-1) |
||||||
|
valid_id_embeds = valid_id_embeds.view(-1, valid_id_embeds.shape[-1]) |
||||||
|
# slice out the image token embeddings |
||||||
|
image_token_embeds = prompt_embeds[class_tokens_mask] |
||||||
|
stacked_id_embeds = self.fuse_fn(image_token_embeds, valid_id_embeds) |
||||||
|
assert class_tokens_mask.sum() == stacked_id_embeds.shape[0], f"{class_tokens_mask.sum()} != {stacked_id_embeds.shape[0]}" |
||||||
|
prompt_embeds.masked_scatter_(class_tokens_mask[:, None], stacked_id_embeds.to(prompt_embeds.dtype)) |
||||||
|
updated_prompt_embeds = prompt_embeds.view(batch_size, seq_length, -1) |
||||||
|
return updated_prompt_embeds |
||||||
|
|
||||||
|
class PhotoMakerIDEncoder(comfy.clip_model.CLIPVisionModelProjection): |
||||||
|
def __init__(self): |
||||||
|
self.load_device = comfy.model_management.text_encoder_device() |
||||||
|
offload_device = comfy.model_management.text_encoder_offload_device() |
||||||
|
dtype = comfy.model_management.text_encoder_dtype(self.load_device) |
||||||
|
|
||||||
|
super().__init__(VISION_CONFIG_DICT, dtype, offload_device, comfy.ops.manual_cast) |
||||||
|
self.visual_projection_2 = comfy.ops.manual_cast.Linear(1024, 1280, bias=False) |
||||||
|
self.fuse_module = FuseModule(2048, comfy.ops.manual_cast) |
||||||
|
|
||||||
|
def forward(self, id_pixel_values, prompt_embeds, class_tokens_mask): |
||||||
|
b, num_inputs, c, h, w = id_pixel_values.shape |
||||||
|
id_pixel_values = id_pixel_values.view(b * num_inputs, c, h, w) |
||||||
|
|
||||||
|
shared_id_embeds = self.vision_model(id_pixel_values)[2] |
||||||
|
id_embeds = self.visual_projection(shared_id_embeds) |
||||||
|
id_embeds_2 = self.visual_projection_2(shared_id_embeds) |
||||||
|
|
||||||
|
id_embeds = id_embeds.view(b, num_inputs, 1, -1) |
||||||
|
id_embeds_2 = id_embeds_2.view(b, num_inputs, 1, -1) |
||||||
|
|
||||||
|
id_embeds = torch.cat((id_embeds, id_embeds_2), dim=-1) |
||||||
|
updated_prompt_embeds = self.fuse_module(prompt_embeds, id_embeds, class_tokens_mask) |
||||||
|
|
||||||
|
return updated_prompt_embeds |
||||||
|
|
||||||
|
|
||||||
|
class PhotoMakerLoader: |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": { "photomaker_model_name": (folder_paths.get_filename_list("photomaker"), )}} |
||||||
|
|
||||||
|
RETURN_TYPES = ("PHOTOMAKER",) |
||||||
|
FUNCTION = "load_photomaker_model" |
||||||
|
|
||||||
|
CATEGORY = "_for_testing/photomaker" |
||||||
|
|
||||||
|
def load_photomaker_model(self, photomaker_model_name): |
||||||
|
photomaker_model_path = folder_paths.get_full_path("photomaker", photomaker_model_name) |
||||||
|
photomaker_model = PhotoMakerIDEncoder() |
||||||
|
data = comfy.utils.load_torch_file(photomaker_model_path, safe_load=True) |
||||||
|
if "id_encoder" in data: |
||||||
|
data = data["id_encoder"] |
||||||
|
photomaker_model.load_state_dict(data) |
||||||
|
return (photomaker_model,) |
||||||
|
|
||||||
|
|
||||||
|
class PhotoMakerEncode: |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": { "photomaker": ("PHOTOMAKER",), |
||||||
|
"image": ("IMAGE",), |
||||||
|
"clip": ("CLIP", ), |
||||||
|
"text": ("STRING", {"multiline": True, "default": "photograph of photomaker"}), |
||||||
|
}} |
||||||
|
|
||||||
|
RETURN_TYPES = ("CONDITIONING",) |
||||||
|
FUNCTION = "apply_photomaker" |
||||||
|
|
||||||
|
CATEGORY = "_for_testing/photomaker" |
||||||
|
|
||||||
|
def apply_photomaker(self, photomaker, image, clip, text): |
||||||
|
special_token = "photomaker" |
||||||
|
pixel_values = comfy.clip_vision.clip_preprocess(image.to(photomaker.load_device)).float() |
||||||
|
try: |
||||||
|
index = text.split(" ").index(special_token) + 1 |
||||||
|
except ValueError: |
||||||
|
index = -1 |
||||||
|
tokens = clip.tokenize(text, return_word_ids=True) |
||||||
|
out_tokens = {} |
||||||
|
for k in tokens: |
||||||
|
out_tokens[k] = [] |
||||||
|
for t in tokens[k]: |
||||||
|
f = list(filter(lambda x: x[2] != index, t)) |
||||||
|
while len(f) < len(t): |
||||||
|
f.append(t[-1]) |
||||||
|
out_tokens[k].append(f) |
||||||
|
|
||||||
|
cond, pooled = clip.encode_from_tokens(out_tokens, return_pooled=True) |
||||||
|
|
||||||
|
if index > 0: |
||||||
|
token_index = index - 1 |
||||||
|
num_id_images = 1 |
||||||
|
class_tokens_mask = [True if token_index <= i < token_index+num_id_images else False for i in range(77)] |
||||||
|
out = photomaker(id_pixel_values=pixel_values.unsqueeze(0), prompt_embeds=cond.to(photomaker.load_device), |
||||||
|
class_tokens_mask=torch.tensor(class_tokens_mask, dtype=torch.bool, device=photomaker.load_device).unsqueeze(0)) |
||||||
|
else: |
||||||
|
out = cond |
||||||
|
|
||||||
|
return ([[out, {"pooled_output": pooled}]], ) |
||||||
|
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = { |
||||||
|
"PhotoMakerLoader": PhotoMakerLoader, |
||||||
|
"PhotoMakerEncode": PhotoMakerEncode, |
||||||
|
} |
||||||
|
|
@ -0,0 +1,170 @@ |
|||||||
|
import torch |
||||||
|
from torch import einsum |
||||||
|
import torch.nn.functional as F |
||||||
|
import math |
||||||
|
|
||||||
|
from einops import rearrange, repeat |
||||||
|
import os |
||||||
|
from comfy.ldm.modules.attention import optimized_attention, _ATTN_PRECISION |
||||||
|
import comfy.samplers |
||||||
|
|
||||||
|
# from comfy/ldm/modules/attention.py |
||||||
|
# but modified to return attention scores as well as output |
||||||
|
def attention_basic_with_sim(q, k, v, heads, mask=None): |
||||||
|
b, _, dim_head = q.shape |
||||||
|
dim_head //= heads |
||||||
|
scale = dim_head ** -0.5 |
||||||
|
|
||||||
|
h = heads |
||||||
|
q, k, v = map( |
||||||
|
lambda t: t.unsqueeze(3) |
||||||
|
.reshape(b, -1, heads, dim_head) |
||||||
|
.permute(0, 2, 1, 3) |
||||||
|
.reshape(b * heads, -1, dim_head) |
||||||
|
.contiguous(), |
||||||
|
(q, k, v), |
||||||
|
) |
||||||
|
|
||||||
|
# force cast to fp32 to avoid overflowing |
||||||
|
if _ATTN_PRECISION =="fp32": |
||||||
|
sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale |
||||||
|
else: |
||||||
|
sim = einsum('b i d, b j d -> b i j', q, k) * scale |
||||||
|
|
||||||
|
del q, k |
||||||
|
|
||||||
|
if mask is not None: |
||||||
|
mask = rearrange(mask, 'b ... -> b (...)') |
||||||
|
max_neg_value = -torch.finfo(sim.dtype).max |
||||||
|
mask = repeat(mask, 'b j -> (b h) () j', h=h) |
||||||
|
sim.masked_fill_(~mask, max_neg_value) |
||||||
|
|
||||||
|
# attention, what we cannot get enough of |
||||||
|
sim = sim.softmax(dim=-1) |
||||||
|
|
||||||
|
out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v) |
||||||
|
out = ( |
||||||
|
out.unsqueeze(0) |
||||||
|
.reshape(b, heads, -1, dim_head) |
||||||
|
.permute(0, 2, 1, 3) |
||||||
|
.reshape(b, -1, heads * dim_head) |
||||||
|
) |
||||||
|
return (out, sim) |
||||||
|
|
||||||
|
def create_blur_map(x0, attn, sigma=3.0, threshold=1.0): |
||||||
|
# reshape and GAP the attention map |
||||||
|
_, hw1, hw2 = attn.shape |
||||||
|
b, _, lh, lw = x0.shape |
||||||
|
attn = attn.reshape(b, -1, hw1, hw2) |
||||||
|
# Global Average Pool |
||||||
|
mask = attn.mean(1, keepdim=False).sum(1, keepdim=False) > threshold |
||||||
|
ratio = 2**(math.ceil(math.sqrt(lh * lw / hw1)) - 1).bit_length() |
||||||
|
mid_shape = [math.ceil(lh / ratio), math.ceil(lw / ratio)] |
||||||
|
|
||||||
|
# Reshape |
||||||
|
mask = ( |
||||||
|
mask.reshape(b, *mid_shape) |
||||||
|
.unsqueeze(1) |
||||||
|
.type(attn.dtype) |
||||||
|
) |
||||||
|
# Upsample |
||||||
|
mask = F.interpolate(mask, (lh, lw)) |
||||||
|
|
||||||
|
blurred = gaussian_blur_2d(x0, kernel_size=9, sigma=sigma) |
||||||
|
blurred = blurred * mask + x0 * (1 - mask) |
||||||
|
return blurred |
||||||
|
|
||||||
|
def gaussian_blur_2d(img, kernel_size, sigma): |
||||||
|
ksize_half = (kernel_size - 1) * 0.5 |
||||||
|
|
||||||
|
x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size) |
||||||
|
|
||||||
|
pdf = torch.exp(-0.5 * (x / sigma).pow(2)) |
||||||
|
|
||||||
|
x_kernel = pdf / pdf.sum() |
||||||
|
x_kernel = x_kernel.to(device=img.device, dtype=img.dtype) |
||||||
|
|
||||||
|
kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :]) |
||||||
|
kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1]) |
||||||
|
|
||||||
|
padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2] |
||||||
|
|
||||||
|
img = F.pad(img, padding, mode="reflect") |
||||||
|
img = F.conv2d(img, kernel2d, groups=img.shape[-3]) |
||||||
|
return img |
||||||
|
|
||||||
|
class SelfAttentionGuidance: |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": { "model": ("MODEL",), |
||||||
|
"scale": ("FLOAT", {"default": 0.5, "min": -2.0, "max": 5.0, "step": 0.1}), |
||||||
|
"blur_sigma": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 10.0, "step": 0.1}), |
||||||
|
}} |
||||||
|
RETURN_TYPES = ("MODEL",) |
||||||
|
FUNCTION = "patch" |
||||||
|
|
||||||
|
CATEGORY = "_for_testing" |
||||||
|
|
||||||
|
def patch(self, model, scale, blur_sigma): |
||||||
|
m = model.clone() |
||||||
|
|
||||||
|
attn_scores = None |
||||||
|
|
||||||
|
# TODO: make this work properly with chunked batches |
||||||
|
# currently, we can only save the attn from one UNet call |
||||||
|
def attn_and_record(q, k, v, extra_options): |
||||||
|
nonlocal attn_scores |
||||||
|
# if uncond, save the attention scores |
||||||
|
heads = extra_options["n_heads"] |
||||||
|
cond_or_uncond = extra_options["cond_or_uncond"] |
||||||
|
b = q.shape[0] // len(cond_or_uncond) |
||||||
|
if 1 in cond_or_uncond: |
||||||
|
uncond_index = cond_or_uncond.index(1) |
||||||
|
# do the entire attention operation, but save the attention scores to attn_scores |
||||||
|
(out, sim) = attention_basic_with_sim(q, k, v, heads=heads) |
||||||
|
# when using a higher batch size, I BELIEVE the result batch dimension is [uc1, ... ucn, c1, ... cn] |
||||||
|
n_slices = heads * b |
||||||
|
attn_scores = sim[n_slices * uncond_index:n_slices * (uncond_index+1)] |
||||||
|
return out |
||||||
|
else: |
||||||
|
return optimized_attention(q, k, v, heads=heads) |
||||||
|
|
||||||
|
def post_cfg_function(args): |
||||||
|
nonlocal attn_scores |
||||||
|
uncond_attn = attn_scores |
||||||
|
|
||||||
|
sag_scale = scale |
||||||
|
sag_sigma = blur_sigma |
||||||
|
sag_threshold = 1.0 |
||||||
|
model = args["model"] |
||||||
|
uncond_pred = args["uncond_denoised"] |
||||||
|
uncond = args["uncond"] |
||||||
|
cfg_result = args["denoised"] |
||||||
|
sigma = args["sigma"] |
||||||
|
model_options = args["model_options"] |
||||||
|
x = args["input"] |
||||||
|
if min(cfg_result.shape[2:]) <= 4: #skip when too small to add padding |
||||||
|
return cfg_result |
||||||
|
|
||||||
|
# create the adversarially blurred image |
||||||
|
degraded = create_blur_map(uncond_pred, uncond_attn, sag_sigma, sag_threshold) |
||||||
|
degraded_noised = degraded + x - uncond_pred |
||||||
|
# call into the UNet |
||||||
|
(sag, _) = comfy.samplers.calc_cond_uncond_batch(model, uncond, None, degraded_noised, sigma, model_options) |
||||||
|
return cfg_result + (degraded - sag) * sag_scale |
||||||
|
|
||||||
|
m.set_model_sampler_post_cfg_function(post_cfg_function, disable_cfg1_optimization=True) |
||||||
|
|
||||||
|
# from diffusers: |
||||||
|
# unet.mid_block.attentions[0].transformer_blocks[0].attn1.patch |
||||||
|
m.set_model_attn1_replace(attn_and_record, "middle", 0, 0) |
||||||
|
|
||||||
|
return (m, ) |
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = { |
||||||
|
"SelfAttentionGuidance": SelfAttentionGuidance, |
||||||
|
} |
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = { |
||||||
|
"SelfAttentionGuidance": "Self-Attention Guidance", |
||||||
|
} |
@ -0,0 +1,47 @@ |
|||||||
|
import torch |
||||||
|
import nodes |
||||||
|
import comfy.utils |
||||||
|
|
||||||
|
class SD_4XUpscale_Conditioning: |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": { "images": ("IMAGE",), |
||||||
|
"positive": ("CONDITIONING",), |
||||||
|
"negative": ("CONDITIONING",), |
||||||
|
"scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}), |
||||||
|
"noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), |
||||||
|
}} |
||||||
|
RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") |
||||||
|
RETURN_NAMES = ("positive", "negative", "latent") |
||||||
|
|
||||||
|
FUNCTION = "encode" |
||||||
|
|
||||||
|
CATEGORY = "conditioning/upscale_diffusion" |
||||||
|
|
||||||
|
def encode(self, images, positive, negative, scale_ratio, noise_augmentation): |
||||||
|
width = max(1, round(images.shape[-2] * scale_ratio)) |
||||||
|
height = max(1, round(images.shape[-3] * scale_ratio)) |
||||||
|
|
||||||
|
pixels = comfy.utils.common_upscale((images.movedim(-1,1) * 2.0) - 1.0, width // 4, height // 4, "bilinear", "center") |
||||||
|
|
||||||
|
out_cp = [] |
||||||
|
out_cn = [] |
||||||
|
|
||||||
|
for t in positive: |
||||||
|
n = [t[0], t[1].copy()] |
||||||
|
n[1]['concat_image'] = pixels |
||||||
|
n[1]['noise_augmentation'] = noise_augmentation |
||||||
|
out_cp.append(n) |
||||||
|
|
||||||
|
for t in negative: |
||||||
|
n = [t[0], t[1].copy()] |
||||||
|
n[1]['concat_image'] = pixels |
||||||
|
n[1]['noise_augmentation'] = noise_augmentation |
||||||
|
out_cn.append(n) |
||||||
|
|
||||||
|
latent = torch.zeros([images.shape[0], 4, height // 4, width // 4]) |
||||||
|
return (out_cp, out_cn, {"samples":latent}) |
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = { |
||||||
|
"SD_4XUpscale_Conditioning": SD_4XUpscale_Conditioning, |
||||||
|
} |
@ -0,0 +1,143 @@ |
|||||||
|
import torch |
||||||
|
import nodes |
||||||
|
import comfy.utils |
||||||
|
|
||||||
|
def camera_embeddings(elevation, azimuth): |
||||||
|
elevation = torch.as_tensor([elevation]) |
||||||
|
azimuth = torch.as_tensor([azimuth]) |
||||||
|
embeddings = torch.stack( |
||||||
|
[ |
||||||
|
torch.deg2rad( |
||||||
|
(90 - elevation) - (90) |
||||||
|
), # Zero123 polar is 90-elevation |
||||||
|
torch.sin(torch.deg2rad(azimuth)), |
||||||
|
torch.cos(torch.deg2rad(azimuth)), |
||||||
|
torch.deg2rad( |
||||||
|
90 - torch.full_like(elevation, 0) |
||||||
|
), |
||||||
|
], dim=-1).unsqueeze(1) |
||||||
|
|
||||||
|
return embeddings |
||||||
|
|
||||||
|
|
||||||
|
class StableZero123_Conditioning: |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": { "clip_vision": ("CLIP_VISION",), |
||||||
|
"init_image": ("IMAGE",), |
||||||
|
"vae": ("VAE",), |
||||||
|
"width": ("INT", {"default": 256, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), |
||||||
|
"height": ("INT", {"default": 256, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), |
||||||
|
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), |
||||||
|
"elevation": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": False}), |
||||||
|
"azimuth": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": False}), |
||||||
|
}} |
||||||
|
RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") |
||||||
|
RETURN_NAMES = ("positive", "negative", "latent") |
||||||
|
|
||||||
|
FUNCTION = "encode" |
||||||
|
|
||||||
|
CATEGORY = "conditioning/3d_models" |
||||||
|
|
||||||
|
def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth): |
||||||
|
output = clip_vision.encode_image(init_image) |
||||||
|
pooled = output.image_embeds.unsqueeze(0) |
||||||
|
pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) |
||||||
|
encode_pixels = pixels[:,:,:,:3] |
||||||
|
t = vae.encode(encode_pixels) |
||||||
|
cam_embeds = camera_embeddings(elevation, azimuth) |
||||||
|
cond = torch.cat([pooled, cam_embeds.to(pooled.device).repeat((pooled.shape[0], 1, 1))], dim=-1) |
||||||
|
|
||||||
|
positive = [[cond, {"concat_latent_image": t}]] |
||||||
|
negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]] |
||||||
|
latent = torch.zeros([batch_size, 4, height // 8, width // 8]) |
||||||
|
return (positive, negative, {"samples":latent}) |
||||||
|
|
||||||
|
class StableZero123_Conditioning_Batched: |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": { "clip_vision": ("CLIP_VISION",), |
||||||
|
"init_image": ("IMAGE",), |
||||||
|
"vae": ("VAE",), |
||||||
|
"width": ("INT", {"default": 256, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), |
||||||
|
"height": ("INT", {"default": 256, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), |
||||||
|
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), |
||||||
|
"elevation": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": False}), |
||||||
|
"azimuth": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": False}), |
||||||
|
"elevation_batch_increment": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": False}), |
||||||
|
"azimuth_batch_increment": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": False}), |
||||||
|
}} |
||||||
|
RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") |
||||||
|
RETURN_NAMES = ("positive", "negative", "latent") |
||||||
|
|
||||||
|
FUNCTION = "encode" |
||||||
|
|
||||||
|
CATEGORY = "conditioning/3d_models" |
||||||
|
|
||||||
|
def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth, elevation_batch_increment, azimuth_batch_increment): |
||||||
|
output = clip_vision.encode_image(init_image) |
||||||
|
pooled = output.image_embeds.unsqueeze(0) |
||||||
|
pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) |
||||||
|
encode_pixels = pixels[:,:,:,:3] |
||||||
|
t = vae.encode(encode_pixels) |
||||||
|
|
||||||
|
cam_embeds = [] |
||||||
|
for i in range(batch_size): |
||||||
|
cam_embeds.append(camera_embeddings(elevation, azimuth)) |
||||||
|
elevation += elevation_batch_increment |
||||||
|
azimuth += azimuth_batch_increment |
||||||
|
|
||||||
|
cam_embeds = torch.cat(cam_embeds, dim=0) |
||||||
|
cond = torch.cat([comfy.utils.repeat_to_batch_size(pooled, batch_size), cam_embeds], dim=-1) |
||||||
|
|
||||||
|
positive = [[cond, {"concat_latent_image": t}]] |
||||||
|
negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]] |
||||||
|
latent = torch.zeros([batch_size, 4, height // 8, width // 8]) |
||||||
|
return (positive, negative, {"samples":latent, "batch_index": [0] * batch_size}) |
||||||
|
|
||||||
|
class SV3D_Conditioning: |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": { "clip_vision": ("CLIP_VISION",), |
||||||
|
"init_image": ("IMAGE",), |
||||||
|
"vae": ("VAE",), |
||||||
|
"width": ("INT", {"default": 576, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), |
||||||
|
"height": ("INT", {"default": 576, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), |
||||||
|
"video_frames": ("INT", {"default": 21, "min": 1, "max": 4096}), |
||||||
|
"elevation": ("FLOAT", {"default": 0.0, "min": -90.0, "max": 90.0, "step": 0.1, "round": False}), |
||||||
|
}} |
||||||
|
RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") |
||||||
|
RETURN_NAMES = ("positive", "negative", "latent") |
||||||
|
|
||||||
|
FUNCTION = "encode" |
||||||
|
|
||||||
|
CATEGORY = "conditioning/3d_models" |
||||||
|
|
||||||
|
def encode(self, clip_vision, init_image, vae, width, height, video_frames, elevation): |
||||||
|
output = clip_vision.encode_image(init_image) |
||||||
|
pooled = output.image_embeds.unsqueeze(0) |
||||||
|
pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) |
||||||
|
encode_pixels = pixels[:,:,:,:3] |
||||||
|
t = vae.encode(encode_pixels) |
||||||
|
|
||||||
|
azimuth = 0 |
||||||
|
azimuth_increment = 360 / (max(video_frames, 2) - 1) |
||||||
|
|
||||||
|
elevations = [] |
||||||
|
azimuths = [] |
||||||
|
for i in range(video_frames): |
||||||
|
elevations.append(elevation) |
||||||
|
azimuths.append(azimuth) |
||||||
|
azimuth += azimuth_increment |
||||||
|
|
||||||
|
positive = [[pooled, {"concat_latent_image": t, "elevation": elevations, "azimuth": azimuths}]] |
||||||
|
negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t), "elevation": elevations, "azimuth": azimuths}]] |
||||||
|
latent = torch.zeros([video_frames, 4, height // 8, width // 8]) |
||||||
|
return (positive, negative, {"samples":latent}) |
||||||
|
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = { |
||||||
|
"StableZero123_Conditioning": StableZero123_Conditioning, |
||||||
|
"StableZero123_Conditioning_Batched": StableZero123_Conditioning_Batched, |
||||||
|
"SV3D_Conditioning": SV3D_Conditioning, |
||||||
|
} |
@ -0,0 +1,140 @@ |
|||||||
|
""" |
||||||
|
This file is part of ComfyUI. |
||||||
|
Copyright (C) 2024 Stability AI |
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify |
||||||
|
it under the terms of the GNU General Public License as published by |
||||||
|
the Free Software Foundation, either version 3 of the License, or |
||||||
|
(at your option) any later version. |
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful, |
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||||
|
GNU General Public License for more details. |
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License |
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>. |
||||||
|
""" |
||||||
|
|
||||||
|
import torch |
||||||
|
import nodes |
||||||
|
import comfy.utils |
||||||
|
|
||||||
|
|
||||||
|
class StableCascade_EmptyLatentImage: |
||||||
|
def __init__(self, device="cpu"): |
||||||
|
self.device = device |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": { |
||||||
|
"width": ("INT", {"default": 1024, "min": 256, "max": nodes.MAX_RESOLUTION, "step": 8}), |
||||||
|
"height": ("INT", {"default": 1024, "min": 256, "max": nodes.MAX_RESOLUTION, "step": 8}), |
||||||
|
"compression": ("INT", {"default": 42, "min": 4, "max": 128, "step": 1}), |
||||||
|
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}) |
||||||
|
}} |
||||||
|
RETURN_TYPES = ("LATENT", "LATENT") |
||||||
|
RETURN_NAMES = ("stage_c", "stage_b") |
||||||
|
FUNCTION = "generate" |
||||||
|
|
||||||
|
CATEGORY = "latent/stable_cascade" |
||||||
|
|
||||||
|
def generate(self, width, height, compression, batch_size=1): |
||||||
|
c_latent = torch.zeros([batch_size, 16, height // compression, width // compression]) |
||||||
|
b_latent = torch.zeros([batch_size, 4, height // 4, width // 4]) |
||||||
|
return ({ |
||||||
|
"samples": c_latent, |
||||||
|
}, { |
||||||
|
"samples": b_latent, |
||||||
|
}) |
||||||
|
|
||||||
|
class StableCascade_StageC_VAEEncode: |
||||||
|
def __init__(self, device="cpu"): |
||||||
|
self.device = device |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": { |
||||||
|
"image": ("IMAGE",), |
||||||
|
"vae": ("VAE", ), |
||||||
|
"compression": ("INT", {"default": 42, "min": 4, "max": 128, "step": 1}), |
||||||
|
}} |
||||||
|
RETURN_TYPES = ("LATENT", "LATENT") |
||||||
|
RETURN_NAMES = ("stage_c", "stage_b") |
||||||
|
FUNCTION = "generate" |
||||||
|
|
||||||
|
CATEGORY = "latent/stable_cascade" |
||||||
|
|
||||||
|
def generate(self, image, vae, compression): |
||||||
|
width = image.shape[-2] |
||||||
|
height = image.shape[-3] |
||||||
|
out_width = (width // compression) * vae.downscale_ratio |
||||||
|
out_height = (height // compression) * vae.downscale_ratio |
||||||
|
|
||||||
|
s = comfy.utils.common_upscale(image.movedim(-1,1), out_width, out_height, "bicubic", "center").movedim(1,-1) |
||||||
|
|
||||||
|
c_latent = vae.encode(s[:,:,:,:3]) |
||||||
|
b_latent = torch.zeros([c_latent.shape[0], 4, (height // 8) * 2, (width // 8) * 2]) |
||||||
|
return ({ |
||||||
|
"samples": c_latent, |
||||||
|
}, { |
||||||
|
"samples": b_latent, |
||||||
|
}) |
||||||
|
|
||||||
|
class StableCascade_StageB_Conditioning: |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": { "conditioning": ("CONDITIONING",), |
||||||
|
"stage_c": ("LATENT",), |
||||||
|
}} |
||||||
|
RETURN_TYPES = ("CONDITIONING",) |
||||||
|
|
||||||
|
FUNCTION = "set_prior" |
||||||
|
|
||||||
|
CATEGORY = "conditioning/stable_cascade" |
||||||
|
|
||||||
|
def set_prior(self, conditioning, stage_c): |
||||||
|
c = [] |
||||||
|
for t in conditioning: |
||||||
|
d = t[1].copy() |
||||||
|
d['stable_cascade_prior'] = stage_c['samples'] |
||||||
|
n = [t[0], d] |
||||||
|
c.append(n) |
||||||
|
return (c, ) |
||||||
|
|
||||||
|
class StableCascade_SuperResolutionControlnet: |
||||||
|
def __init__(self, device="cpu"): |
||||||
|
self.device = device |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": { |
||||||
|
"image": ("IMAGE",), |
||||||
|
"vae": ("VAE", ), |
||||||
|
}} |
||||||
|
RETURN_TYPES = ("IMAGE", "LATENT", "LATENT") |
||||||
|
RETURN_NAMES = ("controlnet_input", "stage_c", "stage_b") |
||||||
|
FUNCTION = "generate" |
||||||
|
|
||||||
|
CATEGORY = "_for_testing/stable_cascade" |
||||||
|
|
||||||
|
def generate(self, image, vae): |
||||||
|
width = image.shape[-2] |
||||||
|
height = image.shape[-3] |
||||||
|
batch_size = image.shape[0] |
||||||
|
controlnet_input = vae.encode(image[:,:,:,:3]).movedim(1, -1) |
||||||
|
|
||||||
|
c_latent = torch.zeros([batch_size, 16, height // 16, width // 16]) |
||||||
|
b_latent = torch.zeros([batch_size, 4, height // 2, width // 2]) |
||||||
|
return (controlnet_input, { |
||||||
|
"samples": c_latent, |
||||||
|
}, { |
||||||
|
"samples": b_latent, |
||||||
|
}) |
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = { |
||||||
|
"StableCascade_EmptyLatentImage": StableCascade_EmptyLatentImage, |
||||||
|
"StableCascade_StageB_Conditioning": StableCascade_StageB_Conditioning, |
||||||
|
"StableCascade_StageC_VAEEncode": StableCascade_StageC_VAEEncode, |
||||||
|
"StableCascade_SuperResolutionControlnet": StableCascade_SuperResolutionControlnet, |
||||||
|
} |
@ -0,0 +1,45 @@ |
|||||||
|
from PIL import Image, ImageOps |
||||||
|
from io import BytesIO |
||||||
|
import numpy as np |
||||||
|
import struct |
||||||
|
import comfy.utils |
||||||
|
import time |
||||||
|
|
||||||
|
#You can use this node to save full size images through the websocket, the |
||||||
|
#images will be sent in exactly the same format as the image previews: as |
||||||
|
#binary images on the websocket with a 8 byte header indicating the type |
||||||
|
#of binary message (first 4 bytes) and the image format (next 4 bytes). |
||||||
|
|
||||||
|
#Note that no metadata will be put in the images saved with this node. |
||||||
|
|
||||||
|
class SaveImageWebsocket: |
||||||
|
@classmethod |
||||||
|
def INPUT_TYPES(s): |
||||||
|
return {"required": |
||||||
|
{"images": ("IMAGE", ),} |
||||||
|
} |
||||||
|
|
||||||
|
RETURN_TYPES = () |
||||||
|
FUNCTION = "save_images" |
||||||
|
|
||||||
|
OUTPUT_NODE = True |
||||||
|
|
||||||
|
CATEGORY = "api/image" |
||||||
|
|
||||||
|
def save_images(self, images): |
||||||
|
pbar = comfy.utils.ProgressBar(images.shape[0]) |
||||||
|
step = 0 |
||||||
|
for image in images: |
||||||
|
i = 255. * image.cpu().numpy() |
||||||
|
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) |
||||||
|
pbar.update_absolute(step, images.shape[0], ("PNG", img, None)) |
||||||
|
step += 1 |
||||||
|
|
||||||
|
return {} |
||||||
|
|
||||||
|
def IS_CHANGED(s, images): |
||||||
|
return time.time() |
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = { |
||||||
|
"SaveImageWebsocket": SaveImageWebsocket, |
||||||
|
} |
@ -0,0 +1,35 @@ |
|||||||
|
import os |
||||||
|
import shutil |
||||||
|
|
||||||
|
base_path = os.path.dirname(os.path.realpath(__file__)) |
||||||
|
|
||||||
|
|
||||||
|
def update_windows_updater(): |
||||||
|
top_path = os.path.dirname(base_path) |
||||||
|
updater_path = os.path.join(base_path, ".ci/update_windows/update.py") |
||||||
|
bat_path = os.path.join(base_path, ".ci/update_windows/update_comfyui.bat") |
||||||
|
|
||||||
|
dest_updater_path = os.path.join(top_path, "update/update.py") |
||||||
|
dest_bat_path = os.path.join(top_path, "update/update_comfyui.bat") |
||||||
|
dest_bat_deps_path = os.path.join(top_path, "update/update_comfyui_and_python_dependencies.bat") |
||||||
|
|
||||||
|
try: |
||||||
|
with open(dest_bat_path, 'rb') as f: |
||||||
|
contents = f.read() |
||||||
|
except: |
||||||
|
return |
||||||
|
|
||||||
|
if not contents.startswith(b"..\\python_embeded\\python.exe .\\update.py"): |
||||||
|
return |
||||||
|
|
||||||
|
shutil.copy(updater_path, dest_updater_path) |
||||||
|
try: |
||||||
|
with open(dest_bat_deps_path, 'rb') as f: |
||||||
|
contents = f.read() |
||||||
|
contents = contents.replace(b'..\\python_embeded\\python.exe .\\update.py ..\\ComfyUI\\', b'call update_comfyui.bat nopause') |
||||||
|
with open(dest_bat_deps_path, 'wb') as f: |
||||||
|
f.write(contents) |
||||||
|
except: |
||||||
|
pass |
||||||
|
shutil.copy(bat_path, dest_bat_path) |
||||||
|
print("Updated the windows standalone package updater.") |
@ -1,13 +1,14 @@ |
|||||||
torch |
torch |
||||||
torchsde |
torchsde |
||||||
|
torchvision |
||||||
einops |
einops |
||||||
transformers>=4.25.1 |
transformers>=4.25.1 |
||||||
safetensors>=0.3.0 |
safetensors>=0.3.0 |
||||||
aiohttp |
aiohttp |
||||||
accelerate |
|
||||||
pyyaml |
pyyaml |
||||||
Pillow |
Pillow |
||||||
scipy |
scipy |
||||||
tqdm |
tqdm |
||||||
psutil |
psutil |
||||||
|
kornia>=0.7.1 |
||||||
spandrel==0.3.1 |
spandrel==0.3.1 |
@ -0,0 +1,159 @@ |
|||||||
|
#This is an example that uses the websockets api and the SaveImageWebsocket node to get images directly without |
||||||
|
#them being saved to disk |
||||||
|
|
||||||
|
import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client) |
||||||
|
import uuid |
||||||
|
import json |
||||||
|
import urllib.request |
||||||
|
import urllib.parse |
||||||
|
|
||||||
|
server_address = "127.0.0.1:8188" |
||||||
|
client_id = str(uuid.uuid4()) |
||||||
|
|
||||||
|
def queue_prompt(prompt): |
||||||
|
p = {"prompt": prompt, "client_id": client_id} |
||||||
|
data = json.dumps(p).encode('utf-8') |
||||||
|
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data) |
||||||
|
return json.loads(urllib.request.urlopen(req).read()) |
||||||
|
|
||||||
|
def get_image(filename, subfolder, folder_type): |
||||||
|
data = {"filename": filename, "subfolder": subfolder, "type": folder_type} |
||||||
|
url_values = urllib.parse.urlencode(data) |
||||||
|
with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response: |
||||||
|
return response.read() |
||||||
|
|
||||||
|
def get_history(prompt_id): |
||||||
|
with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response: |
||||||
|
return json.loads(response.read()) |
||||||
|
|
||||||
|
def get_images(ws, prompt): |
||||||
|
prompt_id = queue_prompt(prompt)['prompt_id'] |
||||||
|
output_images = {} |
||||||
|
current_node = "" |
||||||
|
while True: |
||||||
|
out = ws.recv() |
||||||
|
if isinstance(out, str): |
||||||
|
message = json.loads(out) |
||||||
|
if message['type'] == 'executing': |
||||||
|
data = message['data'] |
||||||
|
if data['prompt_id'] == prompt_id: |
||||||
|
if data['node'] is None: |
||||||
|
break #Execution is done |
||||||
|
else: |
||||||
|
current_node = data['node'] |
||||||
|
else: |
||||||
|
if current_node == 'save_image_websocket_node': |
||||||
|
images_output = output_images.get(current_node, []) |
||||||
|
images_output.append(out[8:]) |
||||||
|
output_images[current_node] = images_output |
||||||
|
|
||||||
|
return output_images |
||||||
|
|
||||||
|
prompt_text = """ |
||||||
|
{ |
||||||
|
"3": { |
||||||
|
"class_type": "KSampler", |
||||||
|
"inputs": { |
||||||
|
"cfg": 8, |
||||||
|
"denoise": 1, |
||||||
|
"latent_image": [ |
||||||
|
"5", |
||||||
|
0 |
||||||
|
], |
||||||
|
"model": [ |
||||||
|
"4", |
||||||
|
0 |
||||||
|
], |
||||||
|
"negative": [ |
||||||
|
"7", |
||||||
|
0 |
||||||
|
], |
||||||
|
"positive": [ |
||||||
|
"6", |
||||||
|
0 |
||||||
|
], |
||||||
|
"sampler_name": "euler", |
||||||
|
"scheduler": "normal", |
||||||
|
"seed": 8566257, |
||||||
|
"steps": 20 |
||||||
|
} |
||||||
|
}, |
||||||
|
"4": { |
||||||
|
"class_type": "CheckpointLoaderSimple", |
||||||
|
"inputs": { |
||||||
|
"ckpt_name": "v1-5-pruned-emaonly.ckpt" |
||||||
|
} |
||||||
|
}, |
||||||
|
"5": { |
||||||
|
"class_type": "EmptyLatentImage", |
||||||
|
"inputs": { |
||||||
|
"batch_size": 1, |
||||||
|
"height": 512, |
||||||
|
"width": 512 |
||||||
|
} |
||||||
|
}, |
||||||
|
"6": { |
||||||
|
"class_type": "CLIPTextEncode", |
||||||
|
"inputs": { |
||||||
|
"clip": [ |
||||||
|
"4", |
||||||
|
1 |
||||||
|
], |
||||||
|
"text": "masterpiece best quality girl" |
||||||
|
} |
||||||
|
}, |
||||||
|
"7": { |
||||||
|
"class_type": "CLIPTextEncode", |
||||||
|
"inputs": { |
||||||
|
"clip": [ |
||||||
|
"4", |
||||||
|
1 |
||||||
|
], |
||||||
|
"text": "bad hands" |
||||||
|
} |
||||||
|
}, |
||||||
|
"8": { |
||||||
|
"class_type": "VAEDecode", |
||||||
|
"inputs": { |
||||||
|
"samples": [ |
||||||
|
"3", |
||||||
|
0 |
||||||
|
], |
||||||
|
"vae": [ |
||||||
|
"4", |
||||||
|
2 |
||||||
|
] |
||||||
|
} |
||||||
|
}, |
||||||
|
"save_image_websocket_node": { |
||||||
|
"class_type": "SaveImageWebsocket", |
||||||
|
"inputs": { |
||||||
|
"images": [ |
||||||
|
"8", |
||||||
|
0 |
||||||
|
] |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
""" |
||||||
|
|
||||||
|
prompt = json.loads(prompt_text) |
||||||
|
#set the text prompt for our positive CLIPTextEncode |
||||||
|
prompt["6"]["inputs"]["text"] = "masterpiece best quality man" |
||||||
|
|
||||||
|
#set the seed for our KSampler node |
||||||
|
prompt["3"]["inputs"]["seed"] = 5 |
||||||
|
|
||||||
|
ws = websocket.WebSocket() |
||||||
|
ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id)) |
||||||
|
images = get_images(ws, prompt) |
||||||
|
|
||||||
|
#Commented out code to display the output images: |
||||||
|
|
||||||
|
# for node_id in images: |
||||||
|
# for image_data in images[node_id]: |
||||||
|
# from PIL import Image |
||||||
|
# import io |
||||||
|
# image = Image.open(io.BytesIO(image_data)) |
||||||
|
# image.show() |
||||||
|
|
@ -0,0 +1,9 @@ |
|||||||
|
const { start } = require("./utils"); |
||||||
|
const lg = require("./utils/litegraph"); |
||||||
|
|
||||||
|
// Load things once per test file before to ensure its all warmed up for the tests
|
||||||
|
beforeAll(async () => { |
||||||
|
lg.setup(global); |
||||||
|
await start({ resetEnv: true }); |
||||||
|
lg.teardown(global); |
||||||
|
}); |
@ -1,3 +1,4 @@ |
|||||||
{ |
{ |
||||||
"presets": ["@babel/preset-env"] |
"presets": ["@babel/preset-env"], |
||||||
|
"plugins": ["babel-plugin-transform-import-meta"] |
||||||
} |
} |
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue