|
|
@ -26,10 +26,21 @@ import string |
|
|
|
from os.path import dirname, realpath, isdir, join, basename |
|
|
|
from os.path import dirname, realpath, isdir, join, basename |
|
|
|
import shutil |
|
|
|
import shutil |
|
|
|
from datetime import date |
|
|
|
from datetime import date |
|
|
|
|
|
|
|
try: |
|
|
|
|
|
|
|
exec("import torch") |
|
|
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
|
|
|
|
|
gfx_device = "cuda" |
|
|
|
|
|
|
|
elif torch.backends.mps.is_available(): |
|
|
|
|
|
|
|
gfx_device = "mps" |
|
|
|
|
|
|
|
else: |
|
|
|
|
|
|
|
gfx_device = "cpu" |
|
|
|
|
|
|
|
except ModuleNotFoundError: |
|
|
|
|
|
|
|
print("Pallaidium dependencies needs to be installed and then Blender needs to be restarted.") |
|
|
|
|
|
|
|
|
|
|
|
os_platform = platform.system() # 'Linux', 'Darwin', 'Java', 'Windows' |
|
|
|
os_platform = platform.system() # 'Linux', 'Darwin', 'Java', 'Windows' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def show_system_console(show): |
|
|
|
def show_system_console(show): |
|
|
|
if os_platform == "Windows": |
|
|
|
if os_platform == "Windows": |
|
|
|
# https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-showwindow |
|
|
|
# https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-showwindow |
|
|
@ -1024,24 +1035,26 @@ class GeneratorAddonPreferences(AddonPreferences): |
|
|
|
name="Video Model", |
|
|
|
name="Video Model", |
|
|
|
items=[ |
|
|
|
items=[ |
|
|
|
( |
|
|
|
( |
|
|
|
"guoyww/animatediff-motion-adapter-v1-5-2", |
|
|
|
"stabilityai/stable-video-diffusion-img2vid-xt", |
|
|
|
"AnimateDiff", |
|
|
|
"Stable Video Diffusion XT (576x1024x24) ", |
|
|
|
"AnimateDiff", |
|
|
|
"stabilityai/stable-video-diffusion-img2vid-xt", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
( |
|
|
|
|
|
|
|
"stabilityai/stable-video-diffusion-img2vid", |
|
|
|
|
|
|
|
"Stable Video Diffusion (576x1024x14)", |
|
|
|
|
|
|
|
"stabilityai/stable-video-diffusion-img2vid", |
|
|
|
), |
|
|
|
), |
|
|
|
# ("hotshotco/Hotshot-XL", "Hotshot-XL (512x512)", "Hotshot-XL (512x512)"), |
|
|
|
|
|
|
|
("strangeman3107/animov-0.1.1", "Animov (448x384)", "Animov (448x384)"), |
|
|
|
|
|
|
|
("strangeman3107/animov-512x", "Animov (512x512)", "Animov (512x512)"), |
|
|
|
|
|
|
|
# ( |
|
|
|
# ( |
|
|
|
# "stabilityai/stable-diffusion-xl-base-1.0", |
|
|
|
# "stabilityai/stable-diffusion-xl-base-1.0", |
|
|
|
# "Img2img SD XL 1.0 Refine (1024x1024)", |
|
|
|
# "Img2img SD XL 1.0 Refine (1024x1024)", |
|
|
|
# "Stable Diffusion XL 1.0", |
|
|
|
# "Stable Diffusion XL 1.0", |
|
|
|
# ), |
|
|
|
# ), |
|
|
|
("camenduru/potat1", "Potat v1 (1024x576)", "Potat (1024x576)"), |
|
|
|
#("camenduru/potat1", "Potat v1 (1024x576)", "Potat (1024x576)"), |
|
|
|
# ("VideoCrafter/Image2Video-512", "VideoCrafter v1 (512x512)", "VideoCrafter/Image2Video-512"), |
|
|
|
# ("VideoCrafter/Image2Video-512", "VideoCrafter v1 (512x512)", "VideoCrafter/Image2Video-512"), |
|
|
|
( |
|
|
|
( |
|
|
|
"cerspense/zeroscope_v2_dark_30x448x256", |
|
|
|
"cerspense/zeroscope_v2_XL", |
|
|
|
"Zeroscope (448x256x30)", |
|
|
|
"Zeroscope XL (1024x576x24)", |
|
|
|
"Zeroscope (448x256x30)", |
|
|
|
"Zeroscope XL (1024x576x24)", |
|
|
|
), |
|
|
|
), |
|
|
|
( |
|
|
|
( |
|
|
|
"cerspense/zeroscope_v2_576w", |
|
|
|
"cerspense/zeroscope_v2_576w", |
|
|
@ -1049,20 +1062,18 @@ class GeneratorAddonPreferences(AddonPreferences): |
|
|
|
"Zeroscope (576x320x24)", |
|
|
|
"Zeroscope (576x320x24)", |
|
|
|
), |
|
|
|
), |
|
|
|
( |
|
|
|
( |
|
|
|
"cerspense/zeroscope_v2_XL", |
|
|
|
"cerspense/zeroscope_v2_dark_30x448x256", |
|
|
|
"Zeroscope XL (1024x576x24)", |
|
|
|
"Zeroscope (448x256x30)", |
|
|
|
"Zeroscope XL (1024x576x24)", |
|
|
|
"Zeroscope (448x256x30)", |
|
|
|
), |
|
|
|
|
|
|
|
( |
|
|
|
|
|
|
|
"stabilityai/stable-video-diffusion-img2vid-xt", |
|
|
|
|
|
|
|
"Stable Video Diffusion XT (576x1024x24) ", |
|
|
|
|
|
|
|
"stabilityai/stable-video-diffusion-img2vid-xt", |
|
|
|
|
|
|
|
), |
|
|
|
), |
|
|
|
( |
|
|
|
( |
|
|
|
"stabilityai/stable-video-diffusion-img2vid", |
|
|
|
"guoyww/animatediff-motion-adapter-v1-5-2", |
|
|
|
"Stable Video Diffusion (576x1024x14)", |
|
|
|
"AnimateDiff", |
|
|
|
"stabilityai/stable-video-diffusion-img2vid", |
|
|
|
"AnimateDiff", |
|
|
|
), |
|
|
|
), |
|
|
|
|
|
|
|
# ("hotshotco/Hotshot-XL", "Hotshot-XL (512x512)", "Hotshot-XL (512x512)"), |
|
|
|
|
|
|
|
("strangeman3107/animov-512x", "Animov (512x512)", "Animov (512x512)"), |
|
|
|
|
|
|
|
("strangeman3107/animov-0.1.1", "Animov (448x384)", "Animov (448x384)"), |
|
|
|
], |
|
|
|
], |
|
|
|
default="cerspense/zeroscope_v2_576w", |
|
|
|
default="cerspense/zeroscope_v2_576w", |
|
|
|
update=input_strips_updated, |
|
|
|
update=input_strips_updated, |
|
|
@ -1071,6 +1082,38 @@ class GeneratorAddonPreferences(AddonPreferences): |
|
|
|
image_model_card: bpy.props.EnumProperty( |
|
|
|
image_model_card: bpy.props.EnumProperty( |
|
|
|
name="Image Model", |
|
|
|
name="Image Model", |
|
|
|
items=[ |
|
|
|
items=[ |
|
|
|
|
|
|
|
( |
|
|
|
|
|
|
|
"stabilityai/stable-diffusion-xl-base-1.0", |
|
|
|
|
|
|
|
"Stable Diffusion XL 1.0 (1024x1024)", |
|
|
|
|
|
|
|
"stabilityai/stable-diffusion-xl-base-1.0", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
( |
|
|
|
|
|
|
|
"stabilityai/sdxl-turbo", |
|
|
|
|
|
|
|
"Stable Diffusion Turbo (512x512)", |
|
|
|
|
|
|
|
"stabilityai/sdxl-turbo", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
( |
|
|
|
|
|
|
|
"runwayml/stable-diffusion-v1-5", |
|
|
|
|
|
|
|
"Stable Diffusion 1.5 (512x512)", |
|
|
|
|
|
|
|
"runwayml/stable-diffusion-v1-5", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
( |
|
|
|
|
|
|
|
"stabilityai/stable-diffusion-2", |
|
|
|
|
|
|
|
"Stable Diffusion 2 (768x768)", |
|
|
|
|
|
|
|
"stabilityai/stable-diffusion-2", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
( |
|
|
|
|
|
|
|
"segmind/SSD-1B", |
|
|
|
|
|
|
|
"Segmind SSD-1B (1024x1024)", |
|
|
|
|
|
|
|
"segmind/SSD-1B", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
( |
|
|
|
|
|
|
|
"Lykon/dreamshaper-7", |
|
|
|
|
|
|
|
"Dreamshaper LCM v7 (1024 x 1024)", |
|
|
|
|
|
|
|
"Lykon/dreamshaper-7", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
# ("ptx0/terminus-xl-gamma-v1", "Terminus XL Gamma v1", "ptx0/terminus-xl-gamma-v1"), |
|
|
|
|
|
|
|
("warp-ai/wuerstchen", "Würstchen (1024x1024)", "warp-ai/wuerstchen"), |
|
|
|
( |
|
|
|
( |
|
|
|
"Salesforce/blipdiffusion", |
|
|
|
"Salesforce/blipdiffusion", |
|
|
|
"Blip Subject Driven (512x512)", |
|
|
|
"Blip Subject Driven (512x512)", |
|
|
@ -1081,12 +1124,7 @@ class GeneratorAddonPreferences(AddonPreferences): |
|
|
|
"ControlNet (512x512)", |
|
|
|
"ControlNet (512x512)", |
|
|
|
"lllyasviel/sd-controlnet-canny", |
|
|
|
"lllyasviel/sd-controlnet-canny", |
|
|
|
), |
|
|
|
), |
|
|
|
("DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0"), |
|
|
|
#("DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0"), |
|
|
|
( |
|
|
|
|
|
|
|
"Lykon/dreamshaper-7", |
|
|
|
|
|
|
|
"Dreamshaper LCM v7 (1024 x 1024)", |
|
|
|
|
|
|
|
"Lykon/dreamshaper-7", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
( |
|
|
|
( |
|
|
|
"monster-labs/control_v1p_sd15_qrcode_monster", |
|
|
|
"monster-labs/control_v1p_sd15_qrcode_monster", |
|
|
|
"Illusion (512x512)", |
|
|
|
"Illusion (512x512)", |
|
|
@ -1102,33 +1140,6 @@ class GeneratorAddonPreferences(AddonPreferences): |
|
|
|
"Scribble (512x512)", |
|
|
|
"Scribble (512x512)", |
|
|
|
"lllyasviel/control_v11p_sd15_scribble", |
|
|
|
"lllyasviel/control_v11p_sd15_scribble", |
|
|
|
), |
|
|
|
), |
|
|
|
( |
|
|
|
|
|
|
|
"segmind/SSD-1B", |
|
|
|
|
|
|
|
"Segmind SSD-1B (1024x1024)", |
|
|
|
|
|
|
|
"segmind/SSD-1B", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
( |
|
|
|
|
|
|
|
"runwayml/stable-diffusion-v1-5", |
|
|
|
|
|
|
|
"Stable Diffusion 1.5 (512x512)", |
|
|
|
|
|
|
|
"runwayml/stable-diffusion-v1-5", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
( |
|
|
|
|
|
|
|
"stabilityai/stable-diffusion-2", |
|
|
|
|
|
|
|
"Stable Diffusion 2 (768x768)", |
|
|
|
|
|
|
|
"stabilityai/stable-diffusion-2", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
( |
|
|
|
|
|
|
|
"stabilityai/sdxl-turbo", |
|
|
|
|
|
|
|
"Stable Diffusion Turbo (512x512)", |
|
|
|
|
|
|
|
"stabilityai/sdxl-turbo", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
( |
|
|
|
|
|
|
|
"stabilityai/stable-diffusion-xl-base-1.0", |
|
|
|
|
|
|
|
"Stable Diffusion XL 1.0 (1024x1024)", |
|
|
|
|
|
|
|
"stabilityai/stable-diffusion-xl-base-1.0", |
|
|
|
|
|
|
|
), |
|
|
|
|
|
|
|
# ("ptx0/terminus-xl-gamma-v1", "Terminus XL Gamma v1", "ptx0/terminus-xl-gamma-v1"), |
|
|
|
|
|
|
|
("warp-ai/wuerstchen", "Würstchen (1024x1024)", "warp-ai/wuerstchen"), |
|
|
|
|
|
|
|
], |
|
|
|
], |
|
|
|
default="stabilityai/stable-diffusion-xl-base-1.0", |
|
|
|
default="stabilityai/stable-diffusion-xl-base-1.0", |
|
|
|
update=input_strips_updated, |
|
|
|
update=input_strips_updated, |
|
|
@ -1142,6 +1153,7 @@ class GeneratorAddonPreferences(AddonPreferences): |
|
|
|
"Music: MusicGen Stereo", |
|
|
|
"Music: MusicGen Stereo", |
|
|
|
"facebook/musicgen-stereo-small", |
|
|
|
"facebook/musicgen-stereo-small", |
|
|
|
), |
|
|
|
), |
|
|
|
|
|
|
|
("bark", "Speech: Bark", "Bark"), |
|
|
|
( |
|
|
|
( |
|
|
|
"cvssp/audioldm2-music", |
|
|
|
"cvssp/audioldm2-music", |
|
|
|
"Music: AudioLDM 2", |
|
|
|
"Music: AudioLDM 2", |
|
|
@ -1152,10 +1164,9 @@ class GeneratorAddonPreferences(AddonPreferences): |
|
|
|
"Sound: AudioLDM 2", |
|
|
|
"Sound: AudioLDM 2", |
|
|
|
"Sound: AudioLDM 2", |
|
|
|
"Sound: AudioLDM 2", |
|
|
|
), |
|
|
|
), |
|
|
|
("bark", "Speech: Bark", "Bark"), |
|
|
|
|
|
|
|
# ("declare-lab/mustango", "Mustango", "declare-lab/mustango"), |
|
|
|
# ("declare-lab/mustango", "Mustango", "declare-lab/mustango"), |
|
|
|
], |
|
|
|
], |
|
|
|
default="bark", |
|
|
|
default="facebook/musicgen-stereo-small", |
|
|
|
update=input_strips_updated, |
|
|
|
update=input_strips_updated, |
|
|
|
) |
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
@ -2000,7 +2011,7 @@ class SEQUENCER_OT_generate_movie(Operator): |
|
|
|
# # pipe.unet.enable_forward_chunking(chunk_size=1, dim=1) # Heavy |
|
|
|
# # pipe.unet.enable_forward_chunking(chunk_size=1, dim=1) # Heavy |
|
|
|
# # pipe.enable_vae_slicing() |
|
|
|
# # pipe.enable_vae_slicing() |
|
|
|
# else: |
|
|
|
# else: |
|
|
|
# pipe.to("cuda") |
|
|
|
# pipe.to(gfx_device) |
|
|
|
# from diffusers import StableDiffusionXLImg2ImgPipeline |
|
|
|
# from diffusers import StableDiffusionXLImg2ImgPipeline |
|
|
|
|
|
|
|
|
|
|
|
# refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained( |
|
|
|
# refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained( |
|
|
@ -2016,7 +2027,7 @@ class SEQUENCER_OT_generate_movie(Operator): |
|
|
|
# # refiner.enable_vae_tiling() |
|
|
|
# # refiner.enable_vae_tiling() |
|
|
|
# # refiner.enable_vae_slicing() |
|
|
|
# # refiner.enable_vae_slicing() |
|
|
|
# else: |
|
|
|
# else: |
|
|
|
# refiner.to("cuda") |
|
|
|
# refiner.to(gfx_device) |
|
|
|
if ( |
|
|
|
if ( |
|
|
|
movie_model_card == "stabilityai/stable-video-diffusion-img2vid" |
|
|
|
movie_model_card == "stabilityai/stable-video-diffusion-img2vid" |
|
|
|
or movie_model_card == "stabilityai/stable-video-diffusion-img2vid-xt" |
|
|
|
or movie_model_card == "stabilityai/stable-video-diffusion-img2vid-xt" |
|
|
@ -2031,7 +2042,7 @@ class SEQUENCER_OT_generate_movie(Operator): |
|
|
|
if low_vram(): |
|
|
|
if low_vram(): |
|
|
|
refiner.enable_model_cpu_offload() |
|
|
|
refiner.enable_model_cpu_offload() |
|
|
|
else: |
|
|
|
else: |
|
|
|
refiner.to("cuda") |
|
|
|
refiner.to(gfx_device) |
|
|
|
else: # vid2vid / img2vid |
|
|
|
else: # vid2vid / img2vid |
|
|
|
if ( |
|
|
|
if ( |
|
|
|
movie_model_card == "cerspense/zeroscope_v2_dark_30x448x256" |
|
|
|
movie_model_card == "cerspense/zeroscope_v2_dark_30x448x256" |
|
|
@ -2063,7 +2074,7 @@ class SEQUENCER_OT_generate_movie(Operator): |
|
|
|
# upscale.enable_vae_slicing() |
|
|
|
# upscale.enable_vae_slicing() |
|
|
|
#upscale.unet.enable_forward_chunking(chunk_size=1, dim=1) # heavy: |
|
|
|
#upscale.unet.enable_forward_chunking(chunk_size=1, dim=1) # heavy: |
|
|
|
else: |
|
|
|
else: |
|
|
|
upscale.to("cuda") |
|
|
|
upscale.to(gfx_device) |
|
|
|
# Models for movie generation |
|
|
|
# Models for movie generation |
|
|
|
else: |
|
|
|
else: |
|
|
|
if movie_model_card == "guoyww/animatediff-motion-adapter-v1-5-2": |
|
|
|
if movie_model_card == "guoyww/animatediff-motion-adapter-v1-5-2": |
|
|
@ -2095,7 +2106,7 @@ class SEQUENCER_OT_generate_movie(Operator): |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
# pipe.unet.enable_forward_chunking(chunk_size=1, dim=1) # heavy: |
|
|
|
# pipe.unet.enable_forward_chunking(chunk_size=1, dim=1) # heavy: |
|
|
|
else: |
|
|
|
else: |
|
|
|
upscale.to("cuda") |
|
|
|
upscale.to(gfx_device) |
|
|
|
elif movie_model_card == "VideoCrafter/Image2Video-512": |
|
|
|
elif movie_model_card == "VideoCrafter/Image2Video-512": |
|
|
|
from diffusers import StableDiffusionPipeline |
|
|
|
from diffusers import StableDiffusionPipeline |
|
|
|
|
|
|
|
|
|
|
@ -2114,7 +2125,7 @@ class SEQUENCER_OT_generate_movie(Operator): |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
else: |
|
|
|
else: |
|
|
|
pipe.to("cuda") |
|
|
|
pipe.to(gfx_device) |
|
|
|
elif ( |
|
|
|
elif ( |
|
|
|
movie_model_card == "stabilityai/stable-video-diffusion-img2vid" |
|
|
|
movie_model_card == "stabilityai/stable-video-diffusion-img2vid" |
|
|
|
or movie_model_card == "stabilityai/stable-video-diffusion-img2vid-xt" |
|
|
|
or movie_model_card == "stabilityai/stable-video-diffusion-img2vid-xt" |
|
|
@ -2140,7 +2151,7 @@ class SEQUENCER_OT_generate_movie(Operator): |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
else: |
|
|
|
else: |
|
|
|
pipe.to("cuda") |
|
|
|
pipe.to(gfx_device) |
|
|
|
# Model for upscale generated movie |
|
|
|
# Model for upscale generated movie |
|
|
|
if scene.video_to_video: |
|
|
|
if scene.video_to_video: |
|
|
|
if torch.cuda.is_available(): |
|
|
|
if torch.cuda.is_available(): |
|
|
@ -2162,7 +2173,7 @@ class SEQUENCER_OT_generate_movie(Operator): |
|
|
|
#upscale.unet.enable_forward_chunking(chunk_size=1, dim=1) # Heavy |
|
|
|
#upscale.unet.enable_forward_chunking(chunk_size=1, dim=1) # Heavy |
|
|
|
# upscale.enable_vae_slicing() |
|
|
|
# upscale.enable_vae_slicing() |
|
|
|
else: |
|
|
|
else: |
|
|
|
upscale.to("cuda") |
|
|
|
upscale.to(gfx_device) |
|
|
|
if scene.use_freeU and pipe: # Free Lunch |
|
|
|
if scene.use_freeU and pipe: # Free Lunch |
|
|
|
# -------- freeu block registration |
|
|
|
# -------- freeu block registration |
|
|
|
print("Process: FreeU") |
|
|
|
print("Process: FreeU") |
|
|
@ -2511,7 +2522,7 @@ class SEQUENCER_OT_generate_audio(Operator): |
|
|
|
"Dependencies needs to be installed in the add-on preferences.", |
|
|
|
"Dependencies needs to be installed in the add-on preferences.", |
|
|
|
) |
|
|
|
) |
|
|
|
return {"CANCELLED"} |
|
|
|
return {"CANCELLED"} |
|
|
|
|
|
|
|
|
|
|
|
show_system_console(True) |
|
|
|
show_system_console(True) |
|
|
|
set_system_console_topmost(True) |
|
|
|
set_system_console_topmost(True) |
|
|
|
|
|
|
|
|
|
|
@ -2536,8 +2547,8 @@ class SEQUENCER_OT_generate_audio(Operator): |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
else: |
|
|
|
else: |
|
|
|
pipe.to("cuda") |
|
|
|
pipe.to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
# Musicgen |
|
|
|
# Musicgen |
|
|
|
elif addon_prefs.audio_model_card == "facebook/musicgen-stereo-small": |
|
|
|
elif addon_prefs.audio_model_card == "facebook/musicgen-stereo-small": |
|
|
|
from transformers import pipeline |
|
|
|
from transformers import pipeline |
|
|
@ -2551,7 +2562,7 @@ class SEQUENCER_OT_generate_audio(Operator): |
|
|
|
) |
|
|
|
) |
|
|
|
if int(audio_length_in_s * 50) > 1503: |
|
|
|
if int(audio_length_in_s * 50) > 1503: |
|
|
|
self.report({"INFO"}, "Maximum output duration is 30 sec.") |
|
|
|
self.report({"INFO"}, "Maximum output duration is 30 sec.") |
|
|
|
|
|
|
|
|
|
|
|
# Bark |
|
|
|
# Bark |
|
|
|
elif addon_prefs.audio_model_card == "bark": |
|
|
|
elif addon_prefs.audio_model_card == "bark": |
|
|
|
preload_models( |
|
|
|
preload_models( |
|
|
@ -2560,7 +2571,7 @@ class SEQUENCER_OT_generate_audio(Operator): |
|
|
|
fine_use_gpu=True, |
|
|
|
fine_use_gpu=True, |
|
|
|
fine_use_small=True, |
|
|
|
fine_use_small=True, |
|
|
|
) |
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
# Mustango |
|
|
|
# Mustango |
|
|
|
elif addon_prefs.audio_model_card == "declare-lab/mustango": |
|
|
|
elif addon_prefs.audio_model_card == "declare-lab/mustango": |
|
|
|
import IPython |
|
|
|
import IPython |
|
|
@ -2573,13 +2584,13 @@ class SEQUENCER_OT_generate_audio(Operator): |
|
|
|
model = DiffusionPipeline.from_pretrained( |
|
|
|
model = DiffusionPipeline.from_pretrained( |
|
|
|
"declare-lab/mustango" |
|
|
|
"declare-lab/mustango" |
|
|
|
) # , device="cuda:0", torch_dtype=torch.float16) |
|
|
|
) # , device="cuda:0", torch_dtype=torch.float16) |
|
|
|
|
|
|
|
|
|
|
|
# Deadend |
|
|
|
# Deadend |
|
|
|
else: |
|
|
|
else: |
|
|
|
print("Audio model not found.") |
|
|
|
print("Audio model not found.") |
|
|
|
self.report({"INFO"}, "Audio model not found.") |
|
|
|
self.report({"INFO"}, "Audio model not found.") |
|
|
|
return {"CANCELLED"} |
|
|
|
return {"CANCELLED"} |
|
|
|
|
|
|
|
|
|
|
|
# Main loop |
|
|
|
# Main loop |
|
|
|
for i in range(scene.movie_num_batch): |
|
|
|
for i in range(scene.movie_num_batch): |
|
|
|
if i > 0: |
|
|
|
if i > 0: |
|
|
@ -2630,7 +2641,7 @@ class SEQUENCER_OT_generate_audio(Operator): |
|
|
|
|
|
|
|
|
|
|
|
# Write the combined audio to a file |
|
|
|
# Write the combined audio to a file |
|
|
|
write_wav(filename, rate, audio.transpose()) |
|
|
|
write_wav(filename, rate, audio.transpose()) |
|
|
|
|
|
|
|
|
|
|
|
# Musicgen |
|
|
|
# Musicgen |
|
|
|
elif addon_prefs.audio_model_card == "facebook/musicgen-stereo-small": |
|
|
|
elif addon_prefs.audio_model_card == "facebook/musicgen-stereo-small": |
|
|
|
print("Generate: MusicGen Stereo") |
|
|
|
print("Generate: MusicGen Stereo") |
|
|
@ -2857,7 +2868,7 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
"None of the selected strips are movie, image, text or scene types.", |
|
|
|
"None of the selected strips are movie, image, text or scene types.", |
|
|
|
) |
|
|
|
) |
|
|
|
return {"CANCELLED"} |
|
|
|
return {"CANCELLED"} |
|
|
|
|
|
|
|
|
|
|
|
# LOADING MODELS |
|
|
|
# LOADING MODELS |
|
|
|
|
|
|
|
|
|
|
|
# models for inpaint |
|
|
|
# models for inpaint |
|
|
@ -2874,7 +2885,7 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1", |
|
|
|
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1", |
|
|
|
torch_dtype=torch.float16, |
|
|
|
torch_dtype=torch.float16, |
|
|
|
variant="fp16", |
|
|
|
variant="fp16", |
|
|
|
).to("cuda") |
|
|
|
).to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
# Set scheduler |
|
|
|
# Set scheduler |
|
|
|
if scene.use_lcm: |
|
|
|
if scene.use_lcm: |
|
|
@ -2890,8 +2901,8 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
# torch.cuda.set_per_process_memory_fraction(0.99) |
|
|
|
# torch.cuda.set_per_process_memory_fraction(0.99) |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
else: |
|
|
|
else: |
|
|
|
pipe.to("cuda") |
|
|
|
pipe.to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
# Conversion img2img/vid2img. |
|
|
|
# Conversion img2img/vid2img. |
|
|
|
elif ( |
|
|
|
elif ( |
|
|
|
do_convert |
|
|
|
do_convert |
|
|
@ -2927,8 +2938,8 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
# refiner.enable_vae_tiling() |
|
|
|
# refiner.enable_vae_tiling() |
|
|
|
# converter.enable_vae_slicing() |
|
|
|
# converter.enable_vae_slicing() |
|
|
|
else: |
|
|
|
else: |
|
|
|
converter.to("cuda") |
|
|
|
converter.to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
# ControlNet & Illusion |
|
|
|
# ControlNet & Illusion |
|
|
|
elif ( |
|
|
|
elif ( |
|
|
|
image_model_card == "lllyasviel/sd-controlnet-canny" |
|
|
|
image_model_card == "lllyasviel/sd-controlnet-canny" |
|
|
@ -2968,8 +2979,8 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
else: |
|
|
|
else: |
|
|
|
pipe.to("cuda") |
|
|
|
pipe.to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
# Blip |
|
|
|
# Blip |
|
|
|
elif image_model_card == "Salesforce/blipdiffusion": |
|
|
|
elif image_model_card == "Salesforce/blipdiffusion": |
|
|
|
print("Load: Blip Model") |
|
|
|
print("Load: Blip Model") |
|
|
@ -2982,15 +2993,15 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
|
|
|
|
|
|
|
|
pipe = BlipDiffusionPipeline.from_pretrained( |
|
|
|
pipe = BlipDiffusionPipeline.from_pretrained( |
|
|
|
"Salesforce/blipdiffusion", torch_dtype=torch.float16 |
|
|
|
"Salesforce/blipdiffusion", torch_dtype=torch.float16 |
|
|
|
).to("cuda") |
|
|
|
).to(gfx_device) |
|
|
|
else: |
|
|
|
else: |
|
|
|
from controlnet_aux import CannyDetector |
|
|
|
from controlnet_aux import CannyDetector |
|
|
|
from diffusers.pipelines import BlipDiffusionControlNetPipeline |
|
|
|
from diffusers.pipelines import BlipDiffusionControlNetPipeline |
|
|
|
|
|
|
|
|
|
|
|
pipe = BlipDiffusionControlNetPipeline.from_pretrained( |
|
|
|
pipe = BlipDiffusionControlNetPipeline.from_pretrained( |
|
|
|
"Salesforce/blipdiffusion-controlnet", torch_dtype=torch.float16 |
|
|
|
"Salesforce/blipdiffusion-controlnet", torch_dtype=torch.float16 |
|
|
|
).to("cuda") |
|
|
|
).to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
# OpenPose |
|
|
|
# OpenPose |
|
|
|
elif image_model_card == "lllyasviel/sd-controlnet-openpose": |
|
|
|
elif image_model_card == "lllyasviel/sd-controlnet-openpose": |
|
|
|
print("Load: OpenPose Model") |
|
|
|
print("Load: OpenPose Model") |
|
|
@ -3034,8 +3045,8 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
else: |
|
|
|
else: |
|
|
|
pipe.to("cuda") |
|
|
|
pipe.to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
# Scribble |
|
|
|
# Scribble |
|
|
|
elif image_model_card == "lllyasviel/control_v11p_sd15_scribble": |
|
|
|
elif image_model_card == "lllyasviel/control_v11p_sd15_scribble": |
|
|
|
print("Load: Scribble Model") |
|
|
|
print("Load: Scribble Model") |
|
|
@ -3076,8 +3087,8 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
# pipe.enable_forward_chunking(chunk_size=1, dim=1) |
|
|
|
# pipe.enable_forward_chunking(chunk_size=1, dim=1) |
|
|
|
else: |
|
|
|
else: |
|
|
|
pipe.to("cuda") |
|
|
|
pipe.to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
# Dreamshaper |
|
|
|
# Dreamshaper |
|
|
|
elif image_model_card == "Lykon/dreamshaper-7": |
|
|
|
elif image_model_card == "Lykon/dreamshaper-7": |
|
|
|
if do_convert: |
|
|
|
if do_convert: |
|
|
@ -3092,7 +3103,7 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
) # , custom_pipeline="latent_consistency_txt2img" |
|
|
|
) # , custom_pipeline="latent_consistency_txt2img" |
|
|
|
|
|
|
|
|
|
|
|
pipe.to(torch_device="cuda", torch_dtype=torch.float16) |
|
|
|
pipe.to(torch_device="cuda", torch_dtype=torch.float16) |
|
|
|
|
|
|
|
|
|
|
|
# Wuerstchen |
|
|
|
# Wuerstchen |
|
|
|
elif image_model_card == "warp-ai/wuerstchen": |
|
|
|
elif image_model_card == "warp-ai/wuerstchen": |
|
|
|
print("Load: Würstchen Model") |
|
|
|
print("Load: Würstchen Model") |
|
|
@ -3114,8 +3125,8 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
if low_vram(): |
|
|
|
if low_vram(): |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
else: |
|
|
|
else: |
|
|
|
pipe.to("cuda") |
|
|
|
pipe.to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
# DeepFloyd |
|
|
|
# DeepFloyd |
|
|
|
elif image_model_card == "DeepFloyd/IF-I-M-v1.0": |
|
|
|
elif image_model_card == "DeepFloyd/IF-I-M-v1.0": |
|
|
|
print("Load: DeepFloyd Model") |
|
|
|
print("Load: DeepFloyd Model") |
|
|
@ -3135,8 +3146,8 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
if low_vram(): |
|
|
|
if low_vram(): |
|
|
|
stage_1.enable_model_cpu_offload() |
|
|
|
stage_1.enable_model_cpu_offload() |
|
|
|
else: |
|
|
|
else: |
|
|
|
stage_1.to("cuda") |
|
|
|
stage_1.to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
# stage 2 |
|
|
|
# stage 2 |
|
|
|
stage_2 = DiffusionPipeline.from_pretrained( |
|
|
|
stage_2 = DiffusionPipeline.from_pretrained( |
|
|
|
"DeepFloyd/IF-II-M-v1.0", |
|
|
|
"DeepFloyd/IF-II-M-v1.0", |
|
|
@ -3147,8 +3158,8 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
if low_vram(): |
|
|
|
if low_vram(): |
|
|
|
stage_2.enable_model_cpu_offload() |
|
|
|
stage_2.enable_model_cpu_offload() |
|
|
|
else: |
|
|
|
else: |
|
|
|
stage_2.to("cuda") |
|
|
|
stage_2.to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
# stage 3 |
|
|
|
# stage 3 |
|
|
|
safety_modules = { |
|
|
|
safety_modules = { |
|
|
|
"feature_extractor": stage_1.feature_extractor, |
|
|
|
"feature_extractor": stage_1.feature_extractor, |
|
|
@ -3163,7 +3174,7 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
if low_vram(): |
|
|
|
if low_vram(): |
|
|
|
stage_3.enable_model_cpu_offload() |
|
|
|
stage_3.enable_model_cpu_offload() |
|
|
|
else: |
|
|
|
else: |
|
|
|
stage_3.to("cuda") |
|
|
|
stage_3.to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
# Stable diffusion etc. |
|
|
|
# Stable diffusion etc. |
|
|
|
else: |
|
|
|
else: |
|
|
@ -3226,7 +3237,7 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
pipe.enable_vae_slicing() |
|
|
|
pipe.enable_vae_slicing() |
|
|
|
else: |
|
|
|
else: |
|
|
|
pipe.to("cuda") |
|
|
|
pipe.to(gfx_device) |
|
|
|
if scene.use_freeU and pipe: # Free Lunch |
|
|
|
if scene.use_freeU and pipe: # Free Lunch |
|
|
|
# -------- freeu block registration |
|
|
|
# -------- freeu block registration |
|
|
|
print("Process: FreeU") |
|
|
|
print("Process: FreeU") |
|
|
@ -3285,7 +3296,7 @@ class SEQUENCER_OT_generate_image(Operator): |
|
|
|
# refiner.enable_vae_tiling() |
|
|
|
# refiner.enable_vae_tiling() |
|
|
|
# refiner.enable_vae_slicing() |
|
|
|
# refiner.enable_vae_slicing() |
|
|
|
else: |
|
|
|
else: |
|
|
|
refiner.to("cuda") |
|
|
|
refiner.to(gfx_device) |
|
|
|
# # Allow longer prompts. |
|
|
|
# # Allow longer prompts. |
|
|
|
# if image_model_card == "runwayml/stable-diffusion-v1-5": |
|
|
|
# if image_model_card == "runwayml/stable-diffusion-v1-5": |
|
|
|
# if pipe: |
|
|
|
# if pipe: |
|
|
@ -3841,7 +3852,7 @@ class SEQUENCER_OT_generate_text(Operator): |
|
|
|
) |
|
|
|
) |
|
|
|
model = BlipForConditionalGeneration.from_pretrained( |
|
|
|
model = BlipForConditionalGeneration.from_pretrained( |
|
|
|
"Salesforce/blip-image-captioning-large", torch_dtype=torch.float16 |
|
|
|
"Salesforce/blip-image-captioning-large", torch_dtype=torch.float16 |
|
|
|
).to("cuda") |
|
|
|
).to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
init_image = ( |
|
|
|
init_image = ( |
|
|
|
load_first_frame(scene.movie_path) |
|
|
|
load_first_frame(scene.movie_path) |
|
|
@ -3852,7 +3863,7 @@ class SEQUENCER_OT_generate_text(Operator): |
|
|
|
|
|
|
|
|
|
|
|
text = "" |
|
|
|
text = "" |
|
|
|
inputs = processor(init_image, text, return_tensors="pt").to( |
|
|
|
inputs = processor(init_image, text, return_tensors="pt").to( |
|
|
|
"cuda", torch.float16 |
|
|
|
gfx_device, torch.float16 |
|
|
|
) |
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
out = model.generate(**inputs, max_new_tokens=256) |
|
|
|
out = model.generate(**inputs, max_new_tokens=256) |
|
|
|