|
|
|
@ -685,6 +685,7 @@ def install_modules(self):
|
|
|
|
|
# resemble-enhance: |
|
|
|
|
subprocess.call([pybin, "-m", "pip", "install", "git+https://github.com/daswer123/resemble-enhance-windows.git", "--no-dependencies", "--upgrade"]) |
|
|
|
|
deep_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"deepspeed/deepspeed-0.12.4+unknown-py3-none-any.whl") |
|
|
|
|
print(deep_speed) |
|
|
|
|
import_module(self, "deepspeed", deep_path) |
|
|
|
|
import_module(self, "librosa", "librosa") |
|
|
|
|
import_module(self, "celluloid", "celluloid") |
|
|
|
@ -1142,6 +1143,8 @@ class GeneratorAddonPreferences(AddonPreferences):
|
|
|
|
|
"Stable Diffusion XL 1.0 (1024x1024)", |
|
|
|
|
"stabilityai/stable-diffusion-xl-base-1.0", |
|
|
|
|
), |
|
|
|
|
("ByteDance/SDXL-Lightning", "SDXL-Lightning 2 Step (1024 x 1024)", "ByteDance/SDXL-Lightning"), |
|
|
|
|
# ("stabilityai/stable-cascade", "Stable Cascade (1024 x 1024)", "stabilityai/stable-cascade"), |
|
|
|
|
# ("thibaud/sdxl_dpo_turbo", "SDXL DPO TURBO (1024x1024)", "thibaud/sdxl_dpo_turbo"), |
|
|
|
|
# ( |
|
|
|
|
# "stabilityai/sdxl-turbo", |
|
|
|
@ -1173,11 +1176,11 @@ class GeneratorAddonPreferences(AddonPreferences):
|
|
|
|
|
# "Miniaturus_PotentiaV1.2 (1024x1024)", |
|
|
|
|
# "dataautogpt3/Miniaturus_PotentiaV1.2", |
|
|
|
|
# ),# |
|
|
|
|
# ( |
|
|
|
|
# "dataautogpt3/ProteusV0.2", |
|
|
|
|
# "Proteus (1024x1024)", |
|
|
|
|
# "dataautogpt3/ProteusV0.2", |
|
|
|
|
# ), |
|
|
|
|
( |
|
|
|
|
"dataautogpt3/ProteusV0.3", |
|
|
|
|
"Proteus (1024x1024)", |
|
|
|
|
"dataautogpt3/ProteusV0.3", |
|
|
|
|
), |
|
|
|
|
("dataautogpt3/OpenDalleV1.1", "OpenDalle (1024 x 1024)", "dataautogpt3/OpenDalleV1.1"), |
|
|
|
|
# ("h94/IP-Adapter", "IP-Adapter (512 x 512)", "h94/IP-Adapter"), |
|
|
|
|
#("PixArt-alpha/PixArt-XL-2-1024-MS", "PixArt (1024 x 1024)", "PixArt-alpha/PixArt-XL-2-1024-MS"), |
|
|
|
@ -1692,7 +1695,7 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
|
|
|
|
|
"svd_decode_chunk_size", |
|
|
|
|
text="Decode Frames", |
|
|
|
|
) |
|
|
|
|
if bpy.context.scene.sequence_editor is not None and image_model_card != "diffusers/controlnet-canny-sdxl-1.0-small": |
|
|
|
|
if bpy.context.scene.sequence_editor is not None and image_model_card != "diffusers/controlnet-canny-sdxl-1.0-small" and image_model_card != "ByteDance/SDXL-Lightning": |
|
|
|
|
if len(bpy.context.scene.sequence_editor.sequences) > 0: |
|
|
|
|
if input == "input_strips" and type == "image": |
|
|
|
|
col.prop_search( |
|
|
|
@ -1818,9 +1821,12 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
else: |
|
|
|
|
col.prop( |
|
|
|
|
context.scene, "movie_num_inference_steps", text="Quality Steps" |
|
|
|
|
) |
|
|
|
|
if type == "image" and image_model_card == "ByteDance/SDXL-Lightning": |
|
|
|
|
pass |
|
|
|
|
else: |
|
|
|
|
col.prop( |
|
|
|
|
context.scene, "movie_num_inference_steps", text="Quality Steps" |
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
if ( |
|
|
|
|
type == "movie" |
|
|
|
@ -1833,6 +1839,9 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
|
|
|
|
|
scene.use_lcm and not ( |
|
|
|
|
type == "image" |
|
|
|
|
and image_model_card == "Lykon/dreamshaper-8" |
|
|
|
|
) and not ( |
|
|
|
|
type == "image" |
|
|
|
|
and image_model_card == image_model_card == "ByteDance/SDXL-Lightning" |
|
|
|
|
) |
|
|
|
|
): |
|
|
|
|
pass |
|
|
|
@ -3133,6 +3142,7 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
# ) |
|
|
|
|
|
|
|
|
|
# from compel import Compel |
|
|
|
|
|
|
|
|
|
except ModuleNotFoundError: |
|
|
|
|
print("Dependencies needs to be installed in the add-on preferences.") |
|
|
|
|
self.report( |
|
|
|
@ -3173,6 +3183,7 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
and not image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster" |
|
|
|
|
and not image_model_card == "Salesforce/blipdiffusion" |
|
|
|
|
and not image_model_card == "Lykon/dreamshaper-8" |
|
|
|
|
and not image_model_card == "ByteDance/SDXL-Lightning" |
|
|
|
|
) |
|
|
|
|
do_convert = ( |
|
|
|
|
(scene.image_path or scene.movie_path) |
|
|
|
@ -3182,6 +3193,7 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
and not image_model_card == "h94/IP-Adapter" |
|
|
|
|
and not image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster" |
|
|
|
|
and not image_model_card == "Salesforce/blipdiffusion" |
|
|
|
|
and not image_model_card == "ByteDance/SDXL-Lightning" |
|
|
|
|
and not do_inpaint |
|
|
|
|
) |
|
|
|
|
do_refine = scene.refine_sd and not do_convert |
|
|
|
@ -3746,14 +3758,45 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
torch_dtype=torch.float16, |
|
|
|
|
local_files_only=local_files_only, |
|
|
|
|
) |
|
|
|
|
elif image_model_card == "dataautogpt3/ProteusV0.2": |
|
|
|
|
elif image_model_card == "ByteDance/SDXL-Lightning": |
|
|
|
|
import torch |
|
|
|
|
from diffusers import ( |
|
|
|
|
AutoPipelineForText2Image, |
|
|
|
|
StableDiffusionXLPipeline, |
|
|
|
|
KDPM2AncestralDiscreteScheduler, |
|
|
|
|
AutoencoderKL |
|
|
|
|
from diffusers import StableDiffusionXLPipeline, EulerDiscreteScheduler |
|
|
|
|
from huggingface_hub import hf_hub_download |
|
|
|
|
|
|
|
|
|
base = "stabilityai/stable-diffusion-xl-base-1.0" |
|
|
|
|
repo = "ByteDance/SDXL-Lightning" |
|
|
|
|
ckpt = "sdxl_lightning_2step_lora.pth" # Use the correct ckpt for your step setting! |
|
|
|
|
|
|
|
|
|
# Load model. |
|
|
|
|
pipe = StableDiffusionXLPipeline.from_pretrained(base, torch_dtype=torch.float16, variant="fp16").to("cuda") |
|
|
|
|
pipe.load_lora_weights(hf_hub_download(repo, ckpt)) |
|
|
|
|
pipe.fuse_lora() |
|
|
|
|
|
|
|
|
|
# Ensure sampler uses "trailing" timesteps. |
|
|
|
|
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing") |
|
|
|
|
|
|
|
|
|
elif image_model_card == "dataautogpt3/ProteusV0.3": |
|
|
|
|
from diffusers import StableDiffusionXLPipeline |
|
|
|
|
# from diffusers import AutoencoderKL |
|
|
|
|
|
|
|
|
|
# vae = AutoencoderKL.from_pretrained( |
|
|
|
|
# "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 |
|
|
|
|
# ) |
|
|
|
|
pipe = StableDiffusionXLPipeline.from_single_file( |
|
|
|
|
"dataautogpt3/ProteusV0.3", |
|
|
|
|
#vae=vae, |
|
|
|
|
torch_dtype=torch.float16, |
|
|
|
|
#variant="fp16", |
|
|
|
|
) |
|
|
|
|
# from diffusers import DPMSolverMultistepScheduler |
|
|
|
|
# pipe.scheduler = DPMSolverMultistepScheduler.from_config( |
|
|
|
|
# pipe.scheduler.config |
|
|
|
|
# ) |
|
|
|
|
|
|
|
|
|
if low_vram(): |
|
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
|
else: |
|
|
|
|
pipe.to(gfx_device) |
|
|
|
|
|
|
|
|
|
# # Load VAE component |
|
|
|
|
# vae = AutoencoderKL.from_pretrained( |
|
|
|
@ -3771,6 +3814,13 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
# ) |
|
|
|
|
#pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config) |
|
|
|
|
|
|
|
|
|
elif image_model_card == "stabilityai/stable-cascade": |
|
|
|
|
import torch |
|
|
|
|
from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline |
|
|
|
|
# prior = StableCascadePriorPipeline.from_pretrained("stabilityai/stable-cascade-prior", torch_dtype=torch.bfloat16).to(gfx_device) |
|
|
|
|
# decoder = StableCascadeDecoderPipeline.from_pretrained("stabilityai/stable-cascade", torch_dtype=torch.float16).to(gfx_device) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
elif image_model_card == "dataautogpt3/Miniaturus_PotentiaV1.2": |
|
|
|
|
from diffusers import AutoPipelineForText2Image |
|
|
|
|
pipe = AutoPipelineForText2Image.from_pretrained( |
|
|
|
@ -3831,28 +3881,27 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
scene.movie_num_guidance = 0 |
|
|
|
|
pipe.load_lora_weights("segmind/Segmind-VegaRT") |
|
|
|
|
pipe.fuse_lora() |
|
|
|
|
elif image_model_card != "PixArt-alpha/PixArt-XL-2-1024-MS" and image_model_card != "Lykon/dreamshaper-8": |
|
|
|
|
print("Use LCM: False") |
|
|
|
|
elif image_model_card != "PixArt-alpha/PixArt-XL-2-1024-MS" and image_model_card != "Lykon/dreamshaper-8" and image_model_card != "stabilityai/stable-cascade": |
|
|
|
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config( |
|
|
|
|
pipe.scheduler.config |
|
|
|
|
) |
|
|
|
|
if image_model_card != "stabilityai/stable-cascade": |
|
|
|
|
pipe.watermark = NoWatermark() |
|
|
|
|
|
|
|
|
|
pipe.watermark = NoWatermark() |
|
|
|
|
|
|
|
|
|
if low_vram(): |
|
|
|
|
# torch.cuda.set_per_process_memory_fraction(0.95) # 6 GB VRAM |
|
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
|
else: |
|
|
|
|
pipe.to(gfx_device) |
|
|
|
|
if low_vram(): |
|
|
|
|
# torch.cuda.set_per_process_memory_fraction(0.95) # 6 GB VRAM |
|
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
|
else: |
|
|
|
|
pipe.to(gfx_device) |
|
|
|
|
|
|
|
|
|
# # FreeU |
|
|
|
|
# if scene.use_freeU and pipe: # Free Lunch |
|
|
|
|
# # -------- freeu block registration |
|
|
|
|
# print("Process: FreeU") |
|
|
|
|
# register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4) |
|
|
|
|
# register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4) |
|
|
|
|
# # -------- freeu block registration |
|
|
|
|
# # FreeU |
|
|
|
|
# if scene.use_freeU and pipe: # Free Lunch |
|
|
|
|
# # -------- freeu block registration |
|
|
|
|
# print("Process: FreeU") |
|
|
|
|
# register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4) |
|
|
|
|
# register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4) |
|
|
|
|
# # -------- freeu block registration |
|
|
|
|
|
|
|
|
|
# LoRA |
|
|
|
|
if ( |
|
|
|
@ -4247,6 +4296,43 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
generator=generator, |
|
|
|
|
).images[0] |
|
|
|
|
|
|
|
|
|
elif image_model_card == "ByteDance/SDXL-Lightning": |
|
|
|
|
image = pipe( |
|
|
|
|
prompt=prompt, |
|
|
|
|
negative_prompt=negative_prompt, |
|
|
|
|
height=y, |
|
|
|
|
width=x, |
|
|
|
|
guidance_scale=0.0, |
|
|
|
|
output_type="pil", |
|
|
|
|
num_inference_steps=2, |
|
|
|
|
).images[0] |
|
|
|
|
decoder = None |
|
|
|
|
elif image_model_card == "stabilityai/stable-cascade": |
|
|
|
|
#import torch |
|
|
|
|
prior = StableCascadePriorPipeline.from_pretrained("stabilityai/stable-cascade-prior", torch_dtype=torch.bfloat16) |
|
|
|
|
prior.enable_model_cpu_offload() |
|
|
|
|
prior_output = prior( |
|
|
|
|
prompt=prompt, |
|
|
|
|
height=y, |
|
|
|
|
width=x, |
|
|
|
|
negative_prompt=negative_prompt, |
|
|
|
|
guidance_scale=image_num_guidance, |
|
|
|
|
#num_images_per_prompt=num_images_per_prompt, |
|
|
|
|
num_inference_steps=image_num_inference_steps, |
|
|
|
|
) |
|
|
|
|
prior = None |
|
|
|
|
decoder = StableCascadeDecoderPipeline.from_pretrained("stabilityai/stable-cascade", torch_dtype=torch.float16) |
|
|
|
|
decoder.enable_model_cpu_offload() |
|
|
|
|
image = decoder( |
|
|
|
|
image_embeddings=prior_output.image_embeddings.half(), |
|
|
|
|
prompt=prompt, |
|
|
|
|
negative_prompt=negative_prompt, |
|
|
|
|
guidance_scale=0.0, |
|
|
|
|
output_type="pil", |
|
|
|
|
num_inference_steps=int(image_num_inference_steps/2), |
|
|
|
|
).images[0] |
|
|
|
|
decoder = None |
|
|
|
|
|
|
|
|
|
# Inpaint |
|
|
|
|
elif do_inpaint: |
|
|
|
|
print("Process: Inpaint") |
|
|
|
|