Browse Source

Merge branch 'master' into save-images

pull/60/head
m957ymj75urz 2 years ago committed by GitHub
parent
commit
3d2f60b315
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 65
      .ci/nightly/update_windows/update.py
  2. 2
      .ci/nightly/update_windows/update_comfyui.bat
  3. 3
      .ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat
  4. 27
      .ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt
  5. 2
      .ci/nightly/windows_base_files/run_cpu.bat
  6. 2
      .ci/nightly/windows_base_files/run_nvidia_gpu.bat
  7. 65
      .ci/update_windows/update.py
  8. 2
      .ci/update_windows/update_comfyui.bat
  9. 3
      .ci/update_windows/update_comfyui_and_python_dependencies.bat
  10. 27
      .ci/windows_base_files/README_VERY_IMPORTANT.txt
  11. 2
      .ci/windows_base_files/run_cpu.bat
  12. 2
      .ci/windows_base_files/run_nvidia_gpu.bat
  13. 64
      .github/workflows/windows_release.yml
  14. 63
      .github/workflows/windows_release_cu118.yml
  15. 66
      .github/workflows/windows_release_nightly_pytorch.yml
  16. 27
      README.md
  17. 6
      comfy/cldm/cldm.py
  18. 5
      comfy/ldm/models/diffusion/ddpm.py
  19. 24
      comfy/ldm/modules/attention.py
  20. 70
      comfy/ldm/modules/diffusionmodules/model.py
  21. 6
      comfy/ldm/modules/diffusionmodules/openaimodel.py
  22. 31
      comfy/model_management.py
  23. 20
      comfy/sd.py
  24. 35
      main.py
  25. 20
      nodes.py
  26. 2
      requirements.txt
  27. 7
      server.py
  28. 25
      web/scripts/app.js

65
.ci/nightly/update_windows/update.py

@ -0,0 +1,65 @@
import pygit2
from datetime import datetime
import sys
def pull(repo, remote_name='origin', branch='master'):
for remote in repo.remotes:
if remote.name == remote_name:
remote.fetch()
remote_master_id = repo.lookup_reference('refs/remotes/origin/%s' % (branch)).target
merge_result, _ = repo.merge_analysis(remote_master_id)
# Up to date, do nothing
if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE:
return
# We can just fastforward
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD:
repo.checkout_tree(repo.get(remote_master_id))
try:
master_ref = repo.lookup_reference('refs/heads/%s' % (branch))
master_ref.set_target(remote_master_id)
except KeyError:
repo.create_branch(branch, repo.get(remote_master_id))
repo.head.set_target(remote_master_id)
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL:
repo.merge(remote_master_id)
if repo.index.conflicts is not None:
for conflict in repo.index.conflicts:
print('Conflicts found in:', conflict[0].path)
raise AssertionError('Conflicts, ahhhhh!!')
user = repo.default_signature
tree = repo.index.write_tree()
commit = repo.create_commit('HEAD',
user,
user,
'Merge!',
tree,
[repo.head.target, remote_master_id])
# We need to do this or git CLI will think we are still merging.
repo.state_cleanup()
else:
raise AssertionError('Unknown merge analysis result')
repo = pygit2.Repository(str(sys.argv[1]))
ident = pygit2.Signature('comfyui', 'comfy@ui')
try:
print("stashing current changes")
repo.stash(ident)
except KeyError:
print("nothing to stash")
backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S'))
print("creating backup branch: {}".format(backup_branch_name))
repo.branches.local.create(backup_branch_name, repo.head.peel())
print("checking out master branch")
branch = repo.lookup_branch('master')
ref = repo.lookup_reference(branch.name)
repo.checkout(ref)
print("pulling latest changes")
pull(repo)
print("Done!")

2
.ci/nightly/update_windows/update_comfyui.bat

@ -0,0 +1,2 @@
..\python_embeded\python.exe .\update.py ..\ComfyUI\
pause

3
.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat

@ -0,0 +1,3 @@
..\python_embeded\python.exe .\update.py ..\ComfyUI\
..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../ComfyUI/requirements.txt pygit2
pause

27
.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt

@ -0,0 +1,27 @@
HOW TO RUN:
if you have a NVIDIA gpu:
run_nvidia_gpu.bat
To run it in slow CPU mode:
run_cpu.bat
IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints
You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt
RECOMMENDED WAY TO UPDATE:
To update the ComfyUI code: update\update_comfyui.bat
To update ComfyUI with the python dependencies:
update\update_comfyui_and_python_dependencies.bat

2
.ci/nightly/windows_base_files/run_cpu.bat

@ -0,0 +1,2 @@
.\python_embeded\python.exe -s ComfyUI\main.py --cpu --windows-standalone-build
pause

2
.ci/nightly/windows_base_files/run_nvidia_gpu.bat

@ -0,0 +1,2 @@
.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build --use-pytorch-cross-attention
pause

65
.ci/update_windows/update.py

@ -0,0 +1,65 @@
import pygit2
from datetime import datetime
import sys
def pull(repo, remote_name='origin', branch='master'):
for remote in repo.remotes:
if remote.name == remote_name:
remote.fetch()
remote_master_id = repo.lookup_reference('refs/remotes/origin/%s' % (branch)).target
merge_result, _ = repo.merge_analysis(remote_master_id)
# Up to date, do nothing
if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE:
return
# We can just fastforward
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD:
repo.checkout_tree(repo.get(remote_master_id))
try:
master_ref = repo.lookup_reference('refs/heads/%s' % (branch))
master_ref.set_target(remote_master_id)
except KeyError:
repo.create_branch(branch, repo.get(remote_master_id))
repo.head.set_target(remote_master_id)
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL:
repo.merge(remote_master_id)
if repo.index.conflicts is not None:
for conflict in repo.index.conflicts:
print('Conflicts found in:', conflict[0].path)
raise AssertionError('Conflicts, ahhhhh!!')
user = repo.default_signature
tree = repo.index.write_tree()
commit = repo.create_commit('HEAD',
user,
user,
'Merge!',
tree,
[repo.head.target, remote_master_id])
# We need to do this or git CLI will think we are still merging.
repo.state_cleanup()
else:
raise AssertionError('Unknown merge analysis result')
repo = pygit2.Repository(str(sys.argv[1]))
ident = pygit2.Signature('comfyui', 'comfy@ui')
try:
print("stashing current changes")
repo.stash(ident)
except KeyError:
print("nothing to stash")
backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S'))
print("creating backup branch: {}".format(backup_branch_name))
repo.branches.local.create(backup_branch_name, repo.head.peel())
print("checking out master branch")
branch = repo.lookup_branch('master')
ref = repo.lookup_reference(branch.name)
repo.checkout(ref)
print("pulling latest changes")
pull(repo)
print("Done!")

2
.ci/update_windows/update_comfyui.bat

@ -0,0 +1,2 @@
..\python_embeded\python.exe .\update.py ..\ComfyUI\
pause

3
.ci/update_windows/update_comfyui_and_python_dependencies.bat

@ -0,0 +1,3 @@
..\python_embeded\python.exe .\update.py ..\ComfyUI\
..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2
pause

27
.ci/windows_base_files/README_VERY_IMPORTANT.txt

@ -0,0 +1,27 @@
HOW TO RUN:
if you have a NVIDIA gpu:
run_nvidia_gpu.bat
To run it in slow CPU mode:
run_cpu.bat
IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints
You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt
RECOMMENDED WAY TO UPDATE:
To update the ComfyUI code: update\update_comfyui.bat
To update ComfyUI with the python dependencies:
update\update_comfyui_and_python_dependencies.bat

2
.ci/windows_base_files/run_cpu.bat

@ -0,0 +1,2 @@
.\python_embeded\python.exe -s ComfyUI\main.py --cpu --windows-standalone-build
pause

2
.ci/windows_base_files/run_nvidia_gpu.bat

@ -0,0 +1,2 @@
.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build
pause

64
.github/workflows/windows_release.yml

@ -0,0 +1,64 @@
name: "Windows Release"
on:
workflow_dispatch:
# push:
# branches:
# - master
jobs:
build:
permissions:
contents: "write"
packages: "write"
pull-requests: "read"
runs-on: windows-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- shell: bash
run: |
cd ..
cp -r ComfyUI ComfyUI_copy
curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip
unzip python_embeded.zip -d python_embeded
cd python_embeded
echo 'import site' >> ./python310._pth
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
./python.exe get-pip.py
./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2
sed -i '1i../ComfyUI' ./python310._pth
cd ..
mkdir ComfyUI_windows_portable
mv python_embeded ComfyUI_windows_portable
mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI
cd ComfyUI_windows_portable
mkdir update
cp -r ComfyUI/.ci/update_windows/* ./update/
cp -r ComfyUI/.ci/windows_base_files/* ./
cd ..
"C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable
mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu.7z
cd ComfyUI_windows_portable
python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu
ls
- name: Upload binaries to release
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: ComfyUI_windows_portable_nvidia_or_cpu.7z
tag: "latest"
overwrite: true

63
.github/workflows/windows_release_cu118.yml

@ -0,0 +1,63 @@
name: "Windows Release cu118"
on:
workflow_dispatch:
# push:
# branches:
# - master
jobs:
build:
permissions:
contents: "write"
packages: "write"
pull-requests: "read"
runs-on: windows-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- shell: bash
run: |
cd ..
cp -r ComfyUI ComfyUI_copy
curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip
unzip python_embeded.zip -d python_embeded
cd python_embeded
echo 'import site' >> ./python310._pth
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
./python.exe get-pip.py
./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2
sed -i '1i../ComfyUI' ./python310._pth
cd ..
mkdir ComfyUI_windows_portable
mv python_embeded ComfyUI_windows_portable
mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI
cd ComfyUI_windows_portable
mkdir update
cp -r ComfyUI/.ci/update_windows/* ./update/
cp -r ComfyUI/.ci/windows_base_files/* ./
cd ..
"C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable
mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
cd ComfyUI_windows_portable
python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu
ls
- name: Upload binaries to release
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
tag: "latest"
overwrite: true

66
.github/workflows/windows_release_nightly_pytorch.yml

@ -0,0 +1,66 @@
name: "Windows Release Nightly pytorch"
on:
workflow_dispatch:
# push:
# branches:
# - master
jobs:
build:
permissions:
contents: "write"
packages: "write"
pull-requests: "read"
runs-on: windows-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: actions/setup-python@v4
with:
python-version: '3.10.9'
- shell: bash
run: |
cd ..
cp -r ComfyUI ComfyUI_copy
curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip
unzip python_embeded.zip -d python_embeded
cd python_embeded
echo 'import site' >> ./python310._pth
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
./python.exe get-pip.py
python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir
ls ../temp_wheel_dir
./python.exe -s -m pip install --pre ../temp_wheel_dir/*
sed -i '1i../ComfyUI' ./python310._pth
cd ..
mkdir ComfyUI_windows_portable_nightly_pytorch
mv python_embeded ComfyUI_windows_portable_nightly_pytorch
mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI
cd ComfyUI_windows_portable_nightly_pytorch
mkdir update
cp -r ComfyUI/.ci/update_windows/* ./update/
cp -r ComfyUI/.ci/windows_base_files/* ./
cd ..
"C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI_windows_portable_nightly_pytorch
mv ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z
cd ComfyUI_windows_portable_nightly_pytorch
python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu
ls
- name: Upload binaries to release
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z
tag: "latest"
overwrite: true

27
README.md

@ -31,6 +31,20 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git
# Installing
## Windows
There is a portable standalone build for Windows that should work for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases).
### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z)
Just download, extract and run. Make sure you put your Stable Diffusion checkpoints/models (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints
## Colab Notebook
To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb)
## Manual Install (Windows, Linux)
Git clone this repo.
Put your SD checkpoints (the huge ckpt/safetensors files) in: models/checkpoints
@ -39,16 +53,17 @@ Put your VAE in: models/vae
At the time of writing this pytorch has issues with python versions higher than 3.10 so make sure your python/pip versions are 3.10.
### AMD
AMD users can install rocm and pytorch with pip if you don't have it already installed:
### AMD (Linux only)
AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version:
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2```
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2```
### NVIDIA
Nvidia users should install torch using this command:
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117```
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118```
Nvidia users should also install Xformers for a speed boost but can still run the software without it.
@ -112,10 +127,6 @@ To use a textual inversion concepts/embeddings in a text prompt put them in the
```embedding:embedding_filename.pt```
### Colab Notebook
To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb)
### Fedora
To get python 3.10 on fedora:

6
comfy/cldm/cldm.py

@ -59,9 +59,9 @@ class ControlNet(nn.Module):
if context_dim is not None:
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
from omegaconf.listconfig import ListConfig
if type(context_dim) == ListConfig:
context_dim = list(context_dim)
# from omegaconf.listconfig import ListConfig
# if type(context_dim) == ListConfig:
# context_dim = list(context_dim)
if num_heads_upsample == -1:
num_heads_upsample = num_heads

5
comfy/ldm/models/diffusion/ddpm.py

@ -18,7 +18,6 @@ import itertools
from tqdm import tqdm
from torchvision.utils import make_grid
# from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
@ -1124,8 +1123,8 @@ class LatentDiffusion(DDPM):
def get_unconditional_conditioning(self, batch_size, null_label=None):
if null_label is not None:
xc = null_label
if isinstance(xc, ListConfig):
xc = list(xc)
# if isinstance(xc, ListConfig):
# xc = list(xc)
if isinstance(xc, dict) or isinstance(xc, list):
c = self.get_learned_conditioning(xc)
else:

24
comfy/ldm/modules/attention.py

@ -11,12 +11,10 @@ from .sub_quadratic_attention import efficient_dot_product_attention
import model_management
try:
if model_management.xformers_enabled():
import xformers
import xformers.ops
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
# CrossAttn precision handling
import os
@ -481,23 +479,19 @@ class CrossAttentionPytorch(nn.Module):
return self.to_out(out)
import sys
if XFORMERS_IS_AVAILBLE == False or "--disable-xformers" in sys.argv:
if model_management.xformers_enabled():
print("Using xformers cross attention")
CrossAttention = MemoryEfficientCrossAttention
elif model_management.pytorch_attention_enabled():
print("Using pytorch cross attention")
CrossAttention = CrossAttentionPytorch
else:
if "--use-split-cross-attention" in sys.argv:
print("Using split optimization for cross attention")
CrossAttention = CrossAttentionDoggettx
else:
if "--use-pytorch-cross-attention" in sys.argv:
print("Using pytorch cross attention")
torch.backends.cuda.enable_math_sdp(False)
torch.backends.cuda.enable_flash_sdp(True)
torch.backends.cuda.enable_mem_efficient_sdp(True)
CrossAttention = CrossAttentionPytorch
else:
print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
CrossAttention = CrossAttentionBirchSan
else:
print("Using xformers cross attention")
CrossAttention = MemoryEfficientCrossAttention
class BasicTransformerBlock(nn.Module):

70
comfy/ldm/modules/diffusionmodules/model.py

@ -9,13 +9,9 @@ from typing import Optional, Any
from ldm.modules.attention import MemoryEfficientCrossAttention
import model_management
try:
if model_management.xformers_enabled():
import xformers
import xformers.ops
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
print("No module 'xformers'. Proceeding without it.")
try:
OOM_EXCEPTION = torch.cuda.OutOfMemoryError
@ -303,6 +299,64 @@ class MemoryEfficientAttnBlock(nn.Module):
out = self.proj_out(out)
return x+out
class MemoryEfficientAttnBlockPytorch(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.k = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.v = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.proj_out = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.attention_op: Optional[Any] = None
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
# compute attention
B, C, H, W = q.shape
q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v))
q, k, v = map(
lambda t: t.unsqueeze(3)
.reshape(B, t.shape[1], 1, C)
.permute(0, 2, 1, 3)
.reshape(B * 1, t.shape[1], C)
.contiguous(),
(q, k, v),
)
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)
out = (
out.unsqueeze(0)
.reshape(B, 1, out.shape[1], C)
.permute(0, 2, 1, 3)
.reshape(B, out.shape[1], C)
)
out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C)
out = self.proj_out(out)
return x+out
class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
def forward(self, x, context=None, mask=None):
@ -315,8 +369,10 @@ class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown'
if XFORMERS_IS_AVAILBLE and attn_type == "vanilla":
if model_management.xformers_enabled() and attn_type == "vanilla":
attn_type = "vanilla-xformers"
if model_management.pytorch_attention_enabled() and attn_type == "vanilla":
attn_type = "vanilla-pytorch"
print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
if attn_type == "vanilla":
assert attn_kwargs is None
@ -324,6 +380,8 @@ def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
elif attn_type == "vanilla-xformers":
print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...")
return MemoryEfficientAttnBlock(in_channels)
elif attn_type == "vanilla-pytorch":
return MemoryEfficientAttnBlockPytorch(in_channels)
elif type == "memory-efficient-cross-attn":
attn_kwargs["query_dim"] = in_channels
return MemoryEfficientCrossAttentionWrapper(**attn_kwargs)

6
comfy/ldm/modules/diffusionmodules/openaimodel.py

@ -477,9 +477,9 @@ class UNetModel(nn.Module):
if context_dim is not None:
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
from omegaconf.listconfig import ListConfig
if type(context_dim) == ListConfig:
context_dim = list(context_dim)
# from omegaconf.listconfig import ListConfig
# if type(context_dim) == ListConfig:
# context_dim = list(context_dim)
if num_heads_upsample == -1:
num_heads_upsample = num_heads

31
comfy/model_management.py

@ -31,8 +31,25 @@ try:
except:
pass
if "--cpu" in sys.argv:
vram_state = CPU
if "--disable-xformers" in sys.argv:
XFORMERS_IS_AVAILBLE = False
else:
try:
import xformers
import xformers.ops
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
ENABLE_PYTORCH_ATTENTION = False
if "--use-pytorch-cross-attention" in sys.argv:
torch.backends.cuda.enable_math_sdp(True)
torch.backends.cuda.enable_flash_sdp(True)
torch.backends.cuda.enable_mem_efficient_sdp(True)
ENABLE_PYTORCH_ATTENTION = True
XFORMERS_IS_AVAILBLE = False
if "--lowvram" in sys.argv:
set_vram_to = LOW_VRAM
if "--novram" in sys.argv:
@ -54,6 +71,8 @@ if set_vram_to == LOW_VRAM or set_vram_to == NO_VRAM:
total_vram_available_mb = (total_vram - 1024) // 2
total_vram_available_mb = int(max(256, total_vram_available_mb))
if "--cpu" in sys.argv:
vram_state = CPU
print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM"][vram_state])
@ -159,6 +178,14 @@ def get_autocast_device(dev):
return dev.type
return "cuda"
def xformers_enabled():
if vram_state == CPU:
return False
return XFORMERS_IS_AVAILBLE
def pytorch_attention_enabled():
return ENABLE_PYTORCH_ATTENTION
def get_free_memory(dev=None, torch_free_too=False):
if dev is None:
dev = get_torch_device()

20
comfy/sd.py

@ -6,7 +6,7 @@ import sd2_clip
import model_management
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
from omegaconf import OmegaConf
import yaml
from .cldm import cldm
from .t2i_adapter import adapter
@ -726,12 +726,19 @@ def load_clip(ckpt_path, embedding_directory=None):
return clip
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
config = OmegaConf.load(config_path)
with open(config_path, 'r') as stream:
config = yaml.safe_load(stream)
model_config_params = config['model']['params']
clip_config = model_config_params['cond_stage_config']
scale_factor = model_config_params['scale_factor']
vae_config = model_config_params['first_stage_config']
fp16 = False
if "unet_config" in model_config_params:
if "params" in model_config_params["unet_config"]:
if "use_fp16" in model_config_params["unet_config"]["params"]:
fp16 = model_config_params["unet_config"]["params"]["use_fp16"]
clip = None
vae = None
@ -750,9 +757,13 @@ def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, e
w.cond_stage_model = clip.cond_stage_model
load_state_dict_to = [w]
model = instantiate_from_config(config.model)
model = instantiate_from_config(config["model"])
sd = load_torch_file(ckpt_path)
model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)
if fp16:
model = model.half()
return (ModelPatcher(model), clip, vae)
@ -853,4 +864,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, e
model = instantiate_from_config(model_config)
model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)
if fp16:
model = model.half()
return (ModelPatcher(model), clip, vae)

35
main.py

@ -1,5 +1,6 @@
import os
import sys
import shutil
import threading
import asyncio
@ -8,9 +9,6 @@ if os.name == "nt":
import logging
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
import execution
import server
if __name__ == "__main__":
if '--help' in sys.argv:
print("Valid Command line Arguments:")
@ -18,6 +16,8 @@ if __name__ == "__main__":
print("\t--port 8188\t\t\tSet the listen port.")
print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n")
print("\t--use-split-cross-attention\tUse the split cross attention optimization instead of the sub-quadratic one.\n\t\t\t\t\tIgnored when xformers is used.")
print("\t--use-pytorch-cross-attention\tUse the new pytorch 2.0 cross attention function.")
print("\t--disable-xformers\t\tdisables xformers")
print()
print("\t--highvram\t\t\tBy default models will be unloaded to CPU memory after being used.\n\t\t\t\t\tThis option keeps them in GPU memory.\n")
print("\t--normalvram\t\t\tUsed to force normal vram use if lowvram gets automatically enabled.")
@ -31,6 +31,9 @@ if __name__ == "__main__":
print("disabling upcasting of attention")
os.environ['ATTN_PRECISION'] = "fp16"
import execution
import server
def prompt_worker(q, server):
e = execution.PromptExecutor(server)
while True:
@ -38,8 +41,8 @@ def prompt_worker(q, server):
e.execute(item[-2], item[-1])
q.task_done(item_id, e.outputs)
async def run(server, address='', port=8188, verbose=True):
await asyncio.gather(server.start(address, port, verbose), server.publish_loop())
async def run(server, address='', port=8188, verbose=True, call_on_start=None):
await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop())
def hijack_progress(server):
from tqdm.auto import tqdm
@ -51,7 +54,14 @@ def hijack_progress(server):
return v
setattr(tqdm, "update", wrapped_func)
def cleanup_temp():
temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir, ignore_errors=True)
if __name__ == "__main__":
cleanup_temp()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
server = server.PromptServer(loop)
@ -76,11 +86,22 @@ if __name__ == "__main__":
except:
pass
if '--quick-test-for-ci' in sys.argv:
exit(0)
call_on_start = None
if "--windows-standalone-build" in sys.argv:
def startup_server(address, port):
import webbrowser
webbrowser.open("http://{}:{}".format(address, port))
call_on_start = startup_server
if os.name == "nt":
try:
loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print))
loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print, call_on_start=call_on_start))
except KeyboardInterrupt:
pass
else:
loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print))
loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print, call_on_start=call_on_start))
cleanup_temp()

20
nodes.py

@ -775,6 +775,7 @@ class KSamplerAdvanced:
class SaveImage:
def __init__(self):
self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
self.url_suffix = ""
@classmethod
def INPUT_TYPES(s):
@ -818,6 +819,9 @@ class SaveImage:
os.makedirs(full_output_folder, exist_ok=True)
counter = 1
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
paths = list()
for image in images:
i = 255. * image.cpu().numpy()
@ -828,12 +832,25 @@ class SaveImage:
if extra_pnginfo is not None:
for x in extra_pnginfo:
metadata.add_text(x, json.dumps(extra_pnginfo[x]))
file = f"{filename}_{counter:05}_.png"
img.save(os.path.join(full_output_folder, file), pnginfo=metadata, optimize=True)
paths.append(os.path.join(subfolder, file))
paths.append(os.path.join(subfolder, file + self.url_suffix))
counter += 1
return { "ui": { "images": paths } }
class PreviewImage(SaveImage):
def __init__(self):
self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
self.url_suffix = "?type=temp"
@classmethod
def INPUT_TYPES(s):
return {"required":
{"images": ("IMAGE", ), },
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
class LoadImage:
input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
@classmethod
@ -954,6 +971,7 @@ NODE_CLASS_MAPPINGS = {
"EmptyLatentImage": EmptyLatentImage,
"LatentUpscale": LatentUpscale,
"SaveImage": SaveImage,
"PreviewImage": PreviewImage,
"LoadImage": LoadImage,
"LoadImageMask": LoadImageMask,
"ImageScale": ImageScale,

2
requirements.txt

@ -1,7 +1,6 @@
torch
torchdiffeq
torchsde
omegaconf
einops
open-clip-torch
transformers
@ -9,3 +8,4 @@ safetensors
pytorch_lightning
aiohttp
accelerate
pyyaml

7
server.py

@ -113,7 +113,7 @@ class PromptServer():
async def view_image(request):
if "file" in request.rel_url.query:
type = request.rel_url.query.get("type", "output")
if type != "output" and type != "input":
if type not in ["output", "input", "temp"]:
return web.Response(status=400)
output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), type)
@ -267,7 +267,7 @@ class PromptServer():
msg = await self.messages.get()
await self.send(*msg)
async def start(self, address, port, verbose=True):
async def start(self, address, port, verbose=True, call_on_start=None):
runner = web.AppRunner(self.app)
await runner.setup()
site = web.TCPSite(runner, address, port)
@ -278,3 +278,6 @@ class PromptServer():
if verbose:
print("Starting server\n")
print("To see the GUI go to: http://{}:{}".format(address, port))
if call_on_start is not None:
call_on_start(address, port)

25
web/scripts/app.js

@ -144,7 +144,14 @@ class ComfyApp {
if (numImages === 1 && !imageIndex) {
this.imageIndex = imageIndex = 0;
}
let shiftY = this.type === "SaveImage" ? 55 : this.imageOffset || 0;
let shiftY;
if (this.imageOffset != null) {
shiftY = this.imageOffset;
} else {
shiftY = this.computeSize()[1];
}
let dw = this.size[0];
let dh = this.size[1];
dh -= shiftY;
@ -400,6 +407,15 @@ class ComfyApp {
api.init();
}
#addKeyboardHandler() {
window.addEventListener("keydown", (e) => {
// Queue prompt using ctrl or command + enter
if ((e.ctrlKey || e.metaKey) && (e.key === "Enter" || e.keyCode === 13 || e.keyCode === 10)) {
this.queuePrompt(e.shiftKey ? -1 : 0);
}
});
}
/**
* Loads all extensions from the API into the window
*/
@ -466,6 +482,7 @@ class ComfyApp {
this.#addApiUpdateHandlers();
this.#addDropHandler();
this.#addPasteHandler();
this.#addKeyboardHandler();
await this.#invokeExtensionsAsync("setup");
}
@ -499,7 +516,11 @@ class ComfyApp {
if (Array.isArray(type)) {
// Enums e.g. latent rotation
this.addWidget("combo", inputName, type[0], () => {}, { values: type });
let defaultValue = type[0];
if (inputData[1] && inputData[1].default) {
defaultValue = inputData[1].default;
}
this.addWidget("combo", inputName, defaultValue, () => {}, { values: type });
} else if (`${type}:${inputName}` in widgets) {
// Support custom widgets by Type:Name
Object.assign(config, widgets[`${type}:${inputName}`](this, inputName, inputData, app) || {});

Loading…
Cancel
Save