Compare commits

...

4 Commits

Author SHA1 Message Date
tin2tin 9c50c8c26a
Make Torch import for device safe. 12 months ago
tin2tin 3f992ce170
Om Mac use MPS device 12 months ago
tin2tin 8e5ab36ed6
Attempt to fix cuda being used 12 months ago
tin2tin 5cd2b0648c
Use CPU instead of CUDA 12 months ago
  1. 209
      __init__.py

209
__init__.py

@ -26,10 +26,21 @@ import string
from os.path import dirname, realpath, isdir, join, basename
import shutil
from datetime import date
try:
exec("import torch")
if torch.cuda.is_available():
gfx_device = "cuda"
elif torch.backends.mps.is_available():
gfx_device = "mps"
else:
gfx_device = "cpu"
except ModuleNotFoundError:
print("Pallaidium dependencies needs to be installed and then Blender needs to be restarted.")
os_platform = platform.system() # 'Linux', 'Darwin', 'Java', 'Windows'
def show_system_console(show):
if os_platform == "Windows":
# https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-showwindow
@ -1024,24 +1035,26 @@ class GeneratorAddonPreferences(AddonPreferences):
name="Video Model",
items=[
(
"guoyww/animatediff-motion-adapter-v1-5-2",
"AnimateDiff",
"AnimateDiff",
"stabilityai/stable-video-diffusion-img2vid-xt",
"Stable Video Diffusion XT (576x1024x24) ",
"stabilityai/stable-video-diffusion-img2vid-xt",
),
(
"stabilityai/stable-video-diffusion-img2vid",
"Stable Video Diffusion (576x1024x14)",
"stabilityai/stable-video-diffusion-img2vid",
),
# ("hotshotco/Hotshot-XL", "Hotshot-XL (512x512)", "Hotshot-XL (512x512)"),
("strangeman3107/animov-0.1.1", "Animov (448x384)", "Animov (448x384)"),
("strangeman3107/animov-512x", "Animov (512x512)", "Animov (512x512)"),
# (
# "stabilityai/stable-diffusion-xl-base-1.0",
# "Img2img SD XL 1.0 Refine (1024x1024)",
# "Stable Diffusion XL 1.0",
# ),
("camenduru/potat1", "Potat v1 (1024x576)", "Potat (1024x576)"),
#("camenduru/potat1", "Potat v1 (1024x576)", "Potat (1024x576)"),
# ("VideoCrafter/Image2Video-512", "VideoCrafter v1 (512x512)", "VideoCrafter/Image2Video-512"),
(
"cerspense/zeroscope_v2_dark_30x448x256",
"Zeroscope (448x256x30)",
"Zeroscope (448x256x30)",
"cerspense/zeroscope_v2_XL",
"Zeroscope XL (1024x576x24)",
"Zeroscope XL (1024x576x24)",
),
(
"cerspense/zeroscope_v2_576w",
@ -1049,20 +1062,18 @@ class GeneratorAddonPreferences(AddonPreferences):
"Zeroscope (576x320x24)",
),
(
"cerspense/zeroscope_v2_XL",
"Zeroscope XL (1024x576x24)",
"Zeroscope XL (1024x576x24)",
),
(
"stabilityai/stable-video-diffusion-img2vid-xt",
"Stable Video Diffusion XT (576x1024x24) ",
"stabilityai/stable-video-diffusion-img2vid-xt",
"cerspense/zeroscope_v2_dark_30x448x256",
"Zeroscope (448x256x30)",
"Zeroscope (448x256x30)",
),
(
"stabilityai/stable-video-diffusion-img2vid",
"Stable Video Diffusion (576x1024x14)",
"stabilityai/stable-video-diffusion-img2vid",
"guoyww/animatediff-motion-adapter-v1-5-2",
"AnimateDiff",
"AnimateDiff",
),
# ("hotshotco/Hotshot-XL", "Hotshot-XL (512x512)", "Hotshot-XL (512x512)"),
("strangeman3107/animov-512x", "Animov (512x512)", "Animov (512x512)"),
("strangeman3107/animov-0.1.1", "Animov (448x384)", "Animov (448x384)"),
],
default="cerspense/zeroscope_v2_576w",
update=input_strips_updated,
@ -1071,6 +1082,38 @@ class GeneratorAddonPreferences(AddonPreferences):
image_model_card: bpy.props.EnumProperty(
name="Image Model",
items=[
(
"stabilityai/stable-diffusion-xl-base-1.0",
"Stable Diffusion XL 1.0 (1024x1024)",
"stabilityai/stable-diffusion-xl-base-1.0",
),
(
"stabilityai/sdxl-turbo",
"Stable Diffusion Turbo (512x512)",
"stabilityai/sdxl-turbo",
),
(
"runwayml/stable-diffusion-v1-5",
"Stable Diffusion 1.5 (512x512)",
"runwayml/stable-diffusion-v1-5",
),
(
"stabilityai/stable-diffusion-2",
"Stable Diffusion 2 (768x768)",
"stabilityai/stable-diffusion-2",
),
(
"segmind/SSD-1B",
"Segmind SSD-1B (1024x1024)",
"segmind/SSD-1B",
),
(
"Lykon/dreamshaper-7",
"Dreamshaper LCM v7 (1024 x 1024)",
"Lykon/dreamshaper-7",
),
# ("ptx0/terminus-xl-gamma-v1", "Terminus XL Gamma v1", "ptx0/terminus-xl-gamma-v1"),
("warp-ai/wuerstchen", "Würstchen (1024x1024)", "warp-ai/wuerstchen"),
(
"Salesforce/blipdiffusion",
"Blip Subject Driven (512x512)",
@ -1081,12 +1124,7 @@ class GeneratorAddonPreferences(AddonPreferences):
"ControlNet (512x512)",
"lllyasviel/sd-controlnet-canny",
),
("DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0"),
(
"Lykon/dreamshaper-7",
"Dreamshaper LCM v7 (1024 x 1024)",
"Lykon/dreamshaper-7",
),
#("DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0"),
(
"monster-labs/control_v1p_sd15_qrcode_monster",
"Illusion (512x512)",
@ -1102,33 +1140,6 @@ class GeneratorAddonPreferences(AddonPreferences):
"Scribble (512x512)",
"lllyasviel/control_v11p_sd15_scribble",
),
(
"segmind/SSD-1B",
"Segmind SSD-1B (1024x1024)",
"segmind/SSD-1B",
),
(
"runwayml/stable-diffusion-v1-5",
"Stable Diffusion 1.5 (512x512)",
"runwayml/stable-diffusion-v1-5",
),
(
"stabilityai/stable-diffusion-2",
"Stable Diffusion 2 (768x768)",
"stabilityai/stable-diffusion-2",
),
(
"stabilityai/sdxl-turbo",
"Stable Diffusion Turbo (512x512)",
"stabilityai/sdxl-turbo",
),
(
"stabilityai/stable-diffusion-xl-base-1.0",
"Stable Diffusion XL 1.0 (1024x1024)",
"stabilityai/stable-diffusion-xl-base-1.0",
),
# ("ptx0/terminus-xl-gamma-v1", "Terminus XL Gamma v1", "ptx0/terminus-xl-gamma-v1"),
("warp-ai/wuerstchen", "Würstchen (1024x1024)", "warp-ai/wuerstchen"),
],
default="stabilityai/stable-diffusion-xl-base-1.0",
update=input_strips_updated,
@ -1142,6 +1153,7 @@ class GeneratorAddonPreferences(AddonPreferences):
"Music: MusicGen Stereo",
"facebook/musicgen-stereo-small",
),
("bark", "Speech: Bark", "Bark"),
(
"cvssp/audioldm2-music",
"Music: AudioLDM 2",
@ -1152,10 +1164,9 @@ class GeneratorAddonPreferences(AddonPreferences):
"Sound: AudioLDM 2",
"Sound: AudioLDM 2",
),
("bark", "Speech: Bark", "Bark"),
# ("declare-lab/mustango", "Mustango", "declare-lab/mustango"),
],
default="bark",
default="facebook/musicgen-stereo-small",
update=input_strips_updated,
)
@ -2000,7 +2011,7 @@ class SEQUENCER_OT_generate_movie(Operator):
# # pipe.unet.enable_forward_chunking(chunk_size=1, dim=1) # Heavy
# # pipe.enable_vae_slicing()
# else:
# pipe.to("cuda")
# pipe.to(gfx_device)
# from diffusers import StableDiffusionXLImg2ImgPipeline
# refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained(
@ -2016,7 +2027,7 @@ class SEQUENCER_OT_generate_movie(Operator):
# # refiner.enable_vae_tiling()
# # refiner.enable_vae_slicing()
# else:
# refiner.to("cuda")
# refiner.to(gfx_device)
if (
movie_model_card == "stabilityai/stable-video-diffusion-img2vid"
or movie_model_card == "stabilityai/stable-video-diffusion-img2vid-xt"
@ -2031,7 +2042,7 @@ class SEQUENCER_OT_generate_movie(Operator):
if low_vram():
refiner.enable_model_cpu_offload()
else:
refiner.to("cuda")
refiner.to(gfx_device)
else: # vid2vid / img2vid
if (
movie_model_card == "cerspense/zeroscope_v2_dark_30x448x256"
@ -2063,7 +2074,7 @@ class SEQUENCER_OT_generate_movie(Operator):
# upscale.enable_vae_slicing()
#upscale.unet.enable_forward_chunking(chunk_size=1, dim=1) # heavy:
else:
upscale.to("cuda")
upscale.to(gfx_device)
# Models for movie generation
else:
if movie_model_card == "guoyww/animatediff-motion-adapter-v1-5-2":
@ -2095,7 +2106,7 @@ class SEQUENCER_OT_generate_movie(Operator):
pipe.enable_model_cpu_offload()
# pipe.unet.enable_forward_chunking(chunk_size=1, dim=1) # heavy:
else:
upscale.to("cuda")
upscale.to(gfx_device)
elif movie_model_card == "VideoCrafter/Image2Video-512":
from diffusers import StableDiffusionPipeline
@ -2114,7 +2125,7 @@ class SEQUENCER_OT_generate_movie(Operator):
pipe.enable_model_cpu_offload()
# pipe.enable_vae_slicing()
else:
pipe.to("cuda")
pipe.to(gfx_device)
elif (
movie_model_card == "stabilityai/stable-video-diffusion-img2vid"
or movie_model_card == "stabilityai/stable-video-diffusion-img2vid-xt"
@ -2140,7 +2151,7 @@ class SEQUENCER_OT_generate_movie(Operator):
pipe.enable_model_cpu_offload()
# pipe.enable_vae_slicing()
else:
pipe.to("cuda")
pipe.to(gfx_device)
# Model for upscale generated movie
if scene.video_to_video:
if torch.cuda.is_available():
@ -2162,7 +2173,7 @@ class SEQUENCER_OT_generate_movie(Operator):
#upscale.unet.enable_forward_chunking(chunk_size=1, dim=1) # Heavy
# upscale.enable_vae_slicing()
else:
upscale.to("cuda")
upscale.to(gfx_device)
if scene.use_freeU and pipe: # Free Lunch
# -------- freeu block registration
print("Process: FreeU")
@ -2511,7 +2522,7 @@ class SEQUENCER_OT_generate_audio(Operator):
"Dependencies needs to be installed in the add-on preferences.",
)
return {"CANCELLED"}
show_system_console(True)
set_system_console_topmost(True)
@ -2536,8 +2547,8 @@ class SEQUENCER_OT_generate_audio(Operator):
pipe.enable_model_cpu_offload()
# pipe.enable_vae_slicing()
else:
pipe.to("cuda")
pipe.to(gfx_device)
# Musicgen
elif addon_prefs.audio_model_card == "facebook/musicgen-stereo-small":
from transformers import pipeline
@ -2551,7 +2562,7 @@ class SEQUENCER_OT_generate_audio(Operator):
)
if int(audio_length_in_s * 50) > 1503:
self.report({"INFO"}, "Maximum output duration is 30 sec.")
# Bark
elif addon_prefs.audio_model_card == "bark":
preload_models(
@ -2560,7 +2571,7 @@ class SEQUENCER_OT_generate_audio(Operator):
fine_use_gpu=True,
fine_use_small=True,
)
# Mustango
elif addon_prefs.audio_model_card == "declare-lab/mustango":
import IPython
@ -2573,13 +2584,13 @@ class SEQUENCER_OT_generate_audio(Operator):
model = DiffusionPipeline.from_pretrained(
"declare-lab/mustango"
) # , device="cuda:0", torch_dtype=torch.float16)
# Deadend
else:
print("Audio model not found.")
self.report({"INFO"}, "Audio model not found.")
return {"CANCELLED"}
# Main loop
for i in range(scene.movie_num_batch):
if i > 0:
@ -2630,7 +2641,7 @@ class SEQUENCER_OT_generate_audio(Operator):
# Write the combined audio to a file
write_wav(filename, rate, audio.transpose())
# Musicgen
elif addon_prefs.audio_model_card == "facebook/musicgen-stereo-small":
print("Generate: MusicGen Stereo")
@ -2857,7 +2868,7 @@ class SEQUENCER_OT_generate_image(Operator):
"None of the selected strips are movie, image, text or scene types.",
)
return {"CANCELLED"}
# LOADING MODELS
# models for inpaint
@ -2874,7 +2885,7 @@ class SEQUENCER_OT_generate_image(Operator):
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
torch_dtype=torch.float16,
variant="fp16",
).to("cuda")
).to(gfx_device)
# Set scheduler
if scene.use_lcm:
@ -2890,8 +2901,8 @@ class SEQUENCER_OT_generate_image(Operator):
# torch.cuda.set_per_process_memory_fraction(0.99)
pipe.enable_model_cpu_offload()
else:
pipe.to("cuda")
pipe.to(gfx_device)
# Conversion img2img/vid2img.
elif (
do_convert
@ -2927,8 +2938,8 @@ class SEQUENCER_OT_generate_image(Operator):
# refiner.enable_vae_tiling()
# converter.enable_vae_slicing()
else:
converter.to("cuda")
converter.to(gfx_device)
# ControlNet & Illusion
elif (
image_model_card == "lllyasviel/sd-controlnet-canny"
@ -2968,8 +2979,8 @@ class SEQUENCER_OT_generate_image(Operator):
pipe.enable_model_cpu_offload()
# pipe.enable_vae_slicing()
else:
pipe.to("cuda")
pipe.to(gfx_device)
# Blip
elif image_model_card == "Salesforce/blipdiffusion":
print("Load: Blip Model")
@ -2982,15 +2993,15 @@ class SEQUENCER_OT_generate_image(Operator):
pipe = BlipDiffusionPipeline.from_pretrained(
"Salesforce/blipdiffusion", torch_dtype=torch.float16
).to("cuda")
).to(gfx_device)
else:
from controlnet_aux import CannyDetector
from diffusers.pipelines import BlipDiffusionControlNetPipeline
pipe = BlipDiffusionControlNetPipeline.from_pretrained(
"Salesforce/blipdiffusion-controlnet", torch_dtype=torch.float16
).to("cuda")
).to(gfx_device)
# OpenPose
elif image_model_card == "lllyasviel/sd-controlnet-openpose":
print("Load: OpenPose Model")
@ -3034,8 +3045,8 @@ class SEQUENCER_OT_generate_image(Operator):
pipe.enable_model_cpu_offload()
# pipe.enable_vae_slicing()
else:
pipe.to("cuda")
pipe.to(gfx_device)
# Scribble
elif image_model_card == "lllyasviel/control_v11p_sd15_scribble":
print("Load: Scribble Model")
@ -3076,8 +3087,8 @@ class SEQUENCER_OT_generate_image(Operator):
# pipe.enable_vae_slicing()
# pipe.enable_forward_chunking(chunk_size=1, dim=1)
else:
pipe.to("cuda")
pipe.to(gfx_device)
# Dreamshaper
elif image_model_card == "Lykon/dreamshaper-7":
if do_convert:
@ -3092,7 +3103,7 @@ class SEQUENCER_OT_generate_image(Operator):
) # , custom_pipeline="latent_consistency_txt2img"
pipe.to(torch_device="cuda", torch_dtype=torch.float16)
# Wuerstchen
elif image_model_card == "warp-ai/wuerstchen":
print("Load: Würstchen Model")
@ -3114,8 +3125,8 @@ class SEQUENCER_OT_generate_image(Operator):
if low_vram():
pipe.enable_model_cpu_offload()
else:
pipe.to("cuda")
pipe.to(gfx_device)
# DeepFloyd
elif image_model_card == "DeepFloyd/IF-I-M-v1.0":
print("Load: DeepFloyd Model")
@ -3135,8 +3146,8 @@ class SEQUENCER_OT_generate_image(Operator):
if low_vram():
stage_1.enable_model_cpu_offload()
else:
stage_1.to("cuda")
stage_1.to(gfx_device)
# stage 2
stage_2 = DiffusionPipeline.from_pretrained(
"DeepFloyd/IF-II-M-v1.0",
@ -3147,8 +3158,8 @@ class SEQUENCER_OT_generate_image(Operator):
if low_vram():
stage_2.enable_model_cpu_offload()
else:
stage_2.to("cuda")
stage_2.to(gfx_device)
# stage 3
safety_modules = {
"feature_extractor": stage_1.feature_extractor,
@ -3163,7 +3174,7 @@ class SEQUENCER_OT_generate_image(Operator):
if low_vram():
stage_3.enable_model_cpu_offload()
else:
stage_3.to("cuda")
stage_3.to(gfx_device)
# Stable diffusion etc.
else:
@ -3226,7 +3237,7 @@ class SEQUENCER_OT_generate_image(Operator):
pipe.enable_model_cpu_offload()
pipe.enable_vae_slicing()
else:
pipe.to("cuda")
pipe.to(gfx_device)
if scene.use_freeU and pipe: # Free Lunch
# -------- freeu block registration
print("Process: FreeU")
@ -3285,7 +3296,7 @@ class SEQUENCER_OT_generate_image(Operator):
# refiner.enable_vae_tiling()
# refiner.enable_vae_slicing()
else:
refiner.to("cuda")
refiner.to(gfx_device)
# # Allow longer prompts.
# if image_model_card == "runwayml/stable-diffusion-v1-5":
# if pipe:
@ -3841,7 +3852,7 @@ class SEQUENCER_OT_generate_text(Operator):
)
model = BlipForConditionalGeneration.from_pretrained(
"Salesforce/blip-image-captioning-large", torch_dtype=torch.float16
).to("cuda")
).to(gfx_device)
init_image = (
load_first_frame(scene.movie_path)
@ -3852,7 +3863,7 @@ class SEQUENCER_OT_generate_text(Operator):
text = ""
inputs = processor(init_image, text, return_tensors="pt").to(
"cuda", torch.float16
gfx_device, torch.float16
)
out = model.generate(**inputs, max_new_tokens=256)

Loading…
Cancel
Save