|
|
|
@ -14,7 +14,7 @@
|
|
|
|
|
bl_info = { |
|
|
|
|
"name": "Pallaidium - Generative AI", |
|
|
|
|
"author": "tintwotin", |
|
|
|
|
"version": (2, 1), |
|
|
|
|
"version": (2, 0), |
|
|
|
|
"blender": (3, 4, 0), |
|
|
|
|
"location": "Video Sequence Editor > Sidebar > Generative AI", |
|
|
|
|
"description": "AI Generate media in the VSE", |
|
|
|
@ -44,6 +44,7 @@ import gc
|
|
|
|
|
temp = pathlib.PosixPath |
|
|
|
|
pathlib.PosixPath = pathlib.WindowsPath |
|
|
|
|
import time |
|
|
|
|
from bpy_extras.io_utils import ImportHelper |
|
|
|
|
|
|
|
|
|
import sys |
|
|
|
|
print("Python: "+sys.version) |
|
|
|
@ -114,7 +115,7 @@ def timer():
|
|
|
|
|
def print_elapsed_time(start_time): |
|
|
|
|
elapsed_time = time.time() - start_time |
|
|
|
|
formatted_time = format_time(elapsed_time * 1000) # Convert to milliseconds |
|
|
|
|
print(f"Total time: {formatted_time}") |
|
|
|
|
print(f"Total time: {formatted_time}\n\n") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def split_and_recombine_text(text, desired_length=200, max_length=300): |
|
|
|
@ -697,8 +698,9 @@ def install_modules(self):
|
|
|
|
|
|
|
|
|
|
import_module(self, "huggingface_hub", "huggingface_hub") |
|
|
|
|
import_module(self, "transformers", "git+https://github.com/huggingface/transformers.git") |
|
|
|
|
|
|
|
|
|
subprocess.call([pybin, "-m", "pip", "install", "git+https://github.com/suno-ai/bark.git", "--upgrade"]) |
|
|
|
|
import_module(self, "WhisperSpeech", "WhisperSpeech") |
|
|
|
|
import_module(self, "pydub", "pydub") |
|
|
|
|
if os_platform == "Windows": |
|
|
|
|
subprocess.call([pybin, "-m", "pip", "install", "git+https://github.com/daswer123/resemble-enhance-windows.git", "--no-dependencies", "--upgrade"]) |
|
|
|
|
#subprocess.call([pybin, "-m", "pip", "install", "resemble-enhance", "--no-dependencies", "--upgrade"]) |
|
|
|
@ -984,6 +986,17 @@ class GENERATOR_OT_uninstall(Operator):
|
|
|
|
|
uninstall_module_with_dependencies("tabulate") |
|
|
|
|
uninstall_module_with_dependencies("gradio") |
|
|
|
|
|
|
|
|
|
# WhisperSpeech |
|
|
|
|
uninstall_module_with_dependencies("ruamel.yaml.clib") |
|
|
|
|
uninstall_module_with_dependencies("fastprogress") |
|
|
|
|
uninstall_module_with_dependencies("fastcore") |
|
|
|
|
uninstall_module_with_dependencies("ruamel.yaml") |
|
|
|
|
uninstall_module_with_dependencies("hyperpyyaml") |
|
|
|
|
uninstall_module_with_dependencies("speechbrain") |
|
|
|
|
uninstall_module_with_dependencies("vocos") |
|
|
|
|
uninstall_module_with_dependencies("WhisperSpeech") |
|
|
|
|
uninstall_module_with_dependencies("pydub") |
|
|
|
|
|
|
|
|
|
self.report( |
|
|
|
|
{"INFO"}, |
|
|
|
|
"\nRemove AI Models manually: \nLinux and macOS: ~/.cache/huggingface/hub\nWindows: %userprofile%.cache\\huggingface\\hub", |
|
|
|
@ -1019,10 +1032,10 @@ def input_strips_updated(self, context):
|
|
|
|
|
scene.inpaint_selected_strip = "" |
|
|
|
|
|
|
|
|
|
if type == "image" and scene.input_strips != "input_strips" and ( |
|
|
|
|
image_model_card == "lllyasviel/sd-controlnet-canny" |
|
|
|
|
image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small" |
|
|
|
|
or image_model_card == "lllyasviel/sd-controlnet-openpose" |
|
|
|
|
or image_model_card == "lllyasviel/control_v11p_sd15_scribble" |
|
|
|
|
or image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" |
|
|
|
|
or image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster" |
|
|
|
|
or image_model_card == "Salesforce/blipdiffusion" |
|
|
|
|
or image_model_card == "h94/IP-Adapter" |
|
|
|
|
): |
|
|
|
@ -1082,10 +1095,10 @@ def output_strips_updated(self, context):
|
|
|
|
|
bpy.ops.lora.refresh_files() |
|
|
|
|
|
|
|
|
|
if ( |
|
|
|
|
image_model_card == "lllyasviel/sd-controlnet-canny" |
|
|
|
|
image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small" |
|
|
|
|
or image_model_card == "lllyasviel/sd-controlnet-openpose" |
|
|
|
|
or image_model_card == "lllyasviel/control_v11p_sd15_scribble" |
|
|
|
|
or image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" |
|
|
|
|
or image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster" |
|
|
|
|
or image_model_card == "Salesforce/blipdiffusion" |
|
|
|
|
or image_model_card == "h94/IP-Adapter" |
|
|
|
|
) and type == "image": |
|
|
|
@ -1266,17 +1279,17 @@ class GeneratorAddonPreferences(AddonPreferences):
|
|
|
|
|
"Salesforce/blipdiffusion", |
|
|
|
|
), |
|
|
|
|
( |
|
|
|
|
"lllyasviel/sd-controlnet-canny", |
|
|
|
|
"diffusers/controlnet-canny-sdxl-1.0-small", |
|
|
|
|
"Canny (512x512)", |
|
|
|
|
"lllyasviel/sd-controlnet-canny", |
|
|
|
|
"diffusers/controlnet-canny-sdxl-1.0-small", |
|
|
|
|
), |
|
|
|
|
# Disabled - has log-in code. |
|
|
|
|
# ("DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0"), |
|
|
|
|
# ( |
|
|
|
|
# "monster-labs/control_v1p_sd15_qrcode_monster", |
|
|
|
|
# "Illusion (512x512)", |
|
|
|
|
# "monster-labs/control_v1p_sd15_qrcode_monster", |
|
|
|
|
# ), |
|
|
|
|
( |
|
|
|
|
"monster-labs/control_v1p_sdxl_qrcode_monster", |
|
|
|
|
"Illusion (512x512)", |
|
|
|
|
"monster-labs/control_v1p_sdxl_qrcode_monster", |
|
|
|
|
), |
|
|
|
|
( |
|
|
|
|
"lllyasviel/sd-controlnet-openpose", |
|
|
|
|
"OpenPose (512x512)", |
|
|
|
@ -1305,6 +1318,7 @@ class GeneratorAddonPreferences(AddonPreferences):
|
|
|
|
|
"vtrungnhan9/audioldm2-music-zac2023", |
|
|
|
|
), |
|
|
|
|
("bark", "Speech: Bark", "Bark"), |
|
|
|
|
("WhisperSpeech", "Speech: WhisperSpeech", "WhisperSpeech"), |
|
|
|
|
# ( |
|
|
|
|
# #"vtrungnhan9/audioldm2-music-zac2023", |
|
|
|
|
# "cvssp/audioldm2-music", |
|
|
|
@ -1708,12 +1722,11 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
|
|
|
|
|
and movie_model_card != "guoyww/animatediff-motion-adapter-v1-5-2" |
|
|
|
|
) or ( |
|
|
|
|
type == "image" |
|
|
|
|
#and image_model_card != "lllyasviel/sd-controlnet-canny" |
|
|
|
|
#and image_model_card != "diffusers/controlnet-canny-sdxl-1.0-small" |
|
|
|
|
and image_model_card != "lllyasviel/sd-controlnet-openpose" |
|
|
|
|
#and image_model_card != "h94/IP-Adapter" |
|
|
|
|
and image_model_card != "lllyasviel/control_v11p_sd15_scribble" |
|
|
|
|
and image_model_card |
|
|
|
|
!= "monster-labs/control_v1p_sd15_qrcode_monster" |
|
|
|
|
#and image_model_card!= "monster-labs/control_v1p_sdxl_qrcode_monster" |
|
|
|
|
and image_model_card != "Salesforce/blipdiffusion" |
|
|
|
|
): |
|
|
|
|
if input == "input_strips" and not scene.inpaint_selected_strip: |
|
|
|
@ -1737,7 +1750,7 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
|
|
|
|
|
"svd_decode_chunk_size", |
|
|
|
|
text="Decode Frames", |
|
|
|
|
) |
|
|
|
|
if bpy.context.scene.sequence_editor is not None and image_model_card != "lllyasviel/sd-controlnet-canny": |
|
|
|
|
if bpy.context.scene.sequence_editor is not None and image_model_card != "diffusers/controlnet-canny-sdxl-1.0-small": |
|
|
|
|
if len(bpy.context.scene.sequence_editor.sequences) > 0: |
|
|
|
|
if input == "input_strips" and type == "image": |
|
|
|
|
col.prop_search( |
|
|
|
@ -1768,7 +1781,7 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
|
|
|
|
|
or image_model_card == "runwayml/stable-diffusion-v1-5" |
|
|
|
|
or image_model_card == "stabilityai/sdxl-turbo" |
|
|
|
|
or image_model_card == "lllyasviel/sd-controlnet-openpose" |
|
|
|
|
or image_model_card == "lllyasviel/sd-controlnet-canny" |
|
|
|
|
or image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small" |
|
|
|
|
or image_model_card == "lllyasviel/control_v11p_sd15_scribble" |
|
|
|
|
) |
|
|
|
|
and type == "image" |
|
|
|
@ -1817,6 +1830,7 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
|
|
|
|
|
if (type == "audio" and audio_model_card == "bark") or ( |
|
|
|
|
type == "audio" |
|
|
|
|
and audio_model_card == "facebook/musicgen-stereo-medium" |
|
|
|
|
and audio_model_card == "WhisperSpeech" |
|
|
|
|
): |
|
|
|
|
pass |
|
|
|
|
else: |
|
|
|
@ -1847,6 +1861,12 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
|
|
|
|
|
col = layout.column(align=True) |
|
|
|
|
col.prop(context.scene, "speakers", text="Speaker") |
|
|
|
|
col.prop(context.scene, "languages", text="Language") |
|
|
|
|
|
|
|
|
|
elif type == "audio" and audio_model_card == "WhisperSpeech": |
|
|
|
|
row = col.row(align=True) |
|
|
|
|
row.prop(context.scene, "audio_path", text="Speaker") |
|
|
|
|
row.operator("sequencer.open_audio_filebrowser", text="", icon="FILEBROWSER") |
|
|
|
|
col.prop(context.scene, "audio_speed", text="Speed") |
|
|
|
|
elif ( |
|
|
|
|
type == "audio" |
|
|
|
|
and addon_prefs.audio_model_card == "facebook/musicgen-stereo-medium" |
|
|
|
@ -1854,6 +1874,7 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
|
|
|
|
|
col.prop( |
|
|
|
|
context.scene, "movie_num_inference_steps", text="Quality Steps" |
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
else: |
|
|
|
|
col.prop( |
|
|
|
|
context.scene, "movie_num_inference_steps", text="Quality Steps" |
|
|
|
@ -1899,11 +1920,11 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
|
|
|
|
|
# row = col.row() |
|
|
|
|
## if type == "movie" or ( |
|
|
|
|
## type == "image" |
|
|
|
|
## and image_model_card != "lllyasviel/sd-controlnet-canny" |
|
|
|
|
## and image_model_card != "diffusers/controlnet-canny-sdxl-1.0-small" |
|
|
|
|
## and image_model_card != "lllyasviel/sd-controlnet-openpose" |
|
|
|
|
## and image_model_card != "lllyasviel/control_v11p_sd15_scribble" |
|
|
|
|
## and image_model_card |
|
|
|
|
## != "monster-labs/control_v1p_sd15_qrcode_monster" |
|
|
|
|
## != "monster-labs/control_v1p_sdxl_qrcode_monster" |
|
|
|
|
## and image_model_card != "Salesforce/blipdiffusion" |
|
|
|
|
## ): |
|
|
|
|
## row.prop(context.scene, "use_freeU", text="FreeU") |
|
|
|
@ -1917,8 +1938,8 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
|
|
|
|
|
or (type == "image" and image_model_card == "segmind/SSD-1B") |
|
|
|
|
or (type == "image" and image_model_card == "lllyasviel/sd-controlnet-openpose") |
|
|
|
|
or (type == "image" and image_model_card == "lllyasviel/control_v11p_sd15_scribble") |
|
|
|
|
or (type == "image" and image_model_card == "lllyasviel/sd-controlnet-canny") |
|
|
|
|
or (type == "image" and image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster") |
|
|
|
|
or (type == "image" and image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small") |
|
|
|
|
or (type == "image" and image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster") |
|
|
|
|
or ( |
|
|
|
|
type == "image" |
|
|
|
|
and image_model_card == "segmind/Segmind-Vega" |
|
|
|
@ -2612,6 +2633,34 @@ class SEQUENCER_OT_generate_movie(Operator):
|
|
|
|
|
return {"FINISHED"} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SequencerOpenAudioFile(Operator, ImportHelper): |
|
|
|
|
bl_idname = "sequencer.open_audio_filebrowser" |
|
|
|
|
bl_label = "Open Audio File Browser" |
|
|
|
|
filter_glob: StringProperty( |
|
|
|
|
default='*.mp3;*.wav;*.ogg', |
|
|
|
|
options={'HIDDEN'}, |
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
def execute(self, context): |
|
|
|
|
scene = context.scene |
|
|
|
|
# Check if the file exists |
|
|
|
|
if self.filepath and os.path.exists(self.filepath): |
|
|
|
|
valid_extensions = {".mp3", ".wav"} |
|
|
|
|
filename, extension = os.path.splitext(self.filepath) |
|
|
|
|
if extension.lower() in valid_extensions: |
|
|
|
|
print('Selected audio file:', self.filepath) |
|
|
|
|
scene.audio_path=bpy.path.abspath(self.filepath) |
|
|
|
|
else: |
|
|
|
|
self.report({'ERROR'}, "Selected file does not exist.") |
|
|
|
|
return {'CANCELLED'} |
|
|
|
|
|
|
|
|
|
return {'FINISHED'} |
|
|
|
|
|
|
|
|
|
def invoke(self, context, event): |
|
|
|
|
context.window_manager.fileselect_add(self) |
|
|
|
|
return {'RUNNING_MODAL'} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SEQUENCER_OT_generate_audio(Operator): |
|
|
|
|
"""Generate Audio""" |
|
|
|
|
|
|
|
|
@ -2668,6 +2717,9 @@ class SEQUENCER_OT_generate_audio(Operator):
|
|
|
|
|
|
|
|
|
|
from resemble_enhance.enhancer.inference import denoise, enhance |
|
|
|
|
|
|
|
|
|
if addon_prefs.audio_model_card == "WhisperSpeech": |
|
|
|
|
from whisperspeech.pipeline import Pipeline |
|
|
|
|
|
|
|
|
|
except ModuleNotFoundError: |
|
|
|
|
print("Dependencies needs to be installed in the add-on preferences.") |
|
|
|
|
self.report( |
|
|
|
@ -2675,6 +2727,7 @@ class SEQUENCER_OT_generate_audio(Operator):
|
|
|
|
|
"Dependencies needs to be installed in the add-on preferences.", |
|
|
|
|
) |
|
|
|
|
return {"CANCELLED"} |
|
|
|
|
|
|
|
|
|
show_system_console(True) |
|
|
|
|
set_system_console_topmost(True) |
|
|
|
|
# clear the VRAM |
|
|
|
@ -2743,6 +2796,12 @@ class SEQUENCER_OT_generate_audio(Operator):
|
|
|
|
|
fine_use_gpu=True, |
|
|
|
|
fine_use_small=True, |
|
|
|
|
) |
|
|
|
|
#WhisperSpeech |
|
|
|
|
elif addon_prefs.audio_model_card == "WhisperSpeech": |
|
|
|
|
from whisperspeech.pipeline import Pipeline |
|
|
|
|
|
|
|
|
|
pipe = Pipeline(s2a_ref='collabora/whisperspeech:s2a-q4-small-en+pl.model') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Mustango |
|
|
|
|
elif addon_prefs.audio_model_card == "declare-lab/mustango": |
|
|
|
@ -2837,6 +2896,19 @@ class SEQUENCER_OT_generate_audio(Operator):
|
|
|
|
|
# Write the combined audio to a file |
|
|
|
|
write_wav(filename, new_sr, wav2) |
|
|
|
|
|
|
|
|
|
#WhisperSpeech |
|
|
|
|
elif addon_prefs.audio_model_card == "WhisperSpeech": |
|
|
|
|
|
|
|
|
|
prompt = context.scene.generate_movie_prompt |
|
|
|
|
prompt = prompt.replace("\n", " ").strip() |
|
|
|
|
filename = solve_path(clean_filename(prompt) + ".wav") |
|
|
|
|
if scene.audio_path: |
|
|
|
|
speaker = scene.audio_path |
|
|
|
|
else: |
|
|
|
|
speaker = None |
|
|
|
|
|
|
|
|
|
audio_tensor = pipe.generate_to_file(filename, prompt, speaker=speaker, lang='en', cps=int(scene.audio_speed)) |
|
|
|
|
|
|
|
|
|
# Musicgen. |
|
|
|
|
elif addon_prefs.audio_model_card == "facebook/musicgen-stereo-medium": |
|
|
|
|
print("Generate: MusicGen Stereo") |
|
|
|
@ -3063,9 +3135,9 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
|
|
|
|
|
if ( |
|
|
|
|
scene.generate_movie_prompt == "" |
|
|
|
|
and not image_model_card == "lllyasviel/sd-controlnet-canny" |
|
|
|
|
and not image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small" |
|
|
|
|
and not image_model_card == "Salesforce/blipdiffusion" |
|
|
|
|
and not image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" |
|
|
|
|
and not image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster" |
|
|
|
|
): |
|
|
|
|
self.report({"INFO"}, "Text prompt in the Generative AI tab is empty!") |
|
|
|
|
return {"CANCELLED"} |
|
|
|
@ -3124,21 +3196,21 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
input == "input_strips" |
|
|
|
|
and find_strip_by_name(scene, scene.inpaint_selected_strip) |
|
|
|
|
and type == "image" |
|
|
|
|
and not image_model_card == "lllyasviel/sd-controlnet-canny" |
|
|
|
|
and not image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small" |
|
|
|
|
and not image_model_card == "lllyasviel/sd-controlnet-openpose" |
|
|
|
|
and not image_model_card == "lllyasviel/control_v11p_sd15_scribble" |
|
|
|
|
and not image_model_card == "h94/IP-Adapter" |
|
|
|
|
and not image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" |
|
|
|
|
and not image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster" |
|
|
|
|
and not image_model_card == "Salesforce/blipdiffusion" |
|
|
|
|
and not image_model_card == "Lykon/dreamshaper-8" |
|
|
|
|
) |
|
|
|
|
do_convert = ( |
|
|
|
|
(scene.image_path or scene.movie_path) |
|
|
|
|
and not image_model_card == "lllyasviel/sd-controlnet-canny" |
|
|
|
|
and not image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small" |
|
|
|
|
and not image_model_card == "lllyasviel/sd-controlnet-openpose" |
|
|
|
|
and not image_model_card == "lllyasviel/control_v11p_sd15_scribble" |
|
|
|
|
and not image_model_card == "h94/IP-Adapter" |
|
|
|
|
and not image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" |
|
|
|
|
and not image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster" |
|
|
|
|
and not image_model_card == "Salesforce/blipdiffusion" |
|
|
|
|
and not do_inpaint |
|
|
|
|
) |
|
|
|
@ -3146,11 +3218,11 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
if ( |
|
|
|
|
do_inpaint |
|
|
|
|
or do_convert |
|
|
|
|
or image_model_card == "lllyasviel/sd-controlnet-canny" |
|
|
|
|
or image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small" |
|
|
|
|
or image_model_card == "lllyasviel/sd-controlnet-openpose" |
|
|
|
|
or image_model_card == "lllyasviel/control_v11p_sd15_scribble" |
|
|
|
|
or image_model_card == "h94/IP-Adapter" |
|
|
|
|
or image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" |
|
|
|
|
or image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster" |
|
|
|
|
or image_model_card == "Salesforce/blipdiffusion" |
|
|
|
|
): |
|
|
|
|
if not strips: |
|
|
|
@ -3178,11 +3250,14 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
# clear the VRAM |
|
|
|
|
clear_cuda_cache() |
|
|
|
|
|
|
|
|
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) |
|
|
|
|
|
|
|
|
|
#pipe = AutoPipelineForInpainting.from_pretrained( |
|
|
|
|
pipe = StableDiffusionXLInpaintPipeline.from_pretrained( |
|
|
|
|
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1", |
|
|
|
|
torch_dtype=torch.float16, |
|
|
|
|
variant="fp16", |
|
|
|
|
vae=vae, |
|
|
|
|
local_files_only=local_files_only, |
|
|
|
|
).to(gfx_device) |
|
|
|
|
# Set scheduler |
|
|
|
@ -3298,60 +3373,41 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
|
|
|
|
|
# Canny & Illusion |
|
|
|
|
elif ( |
|
|
|
|
image_model_card == "lllyasviel/sd-controlnet-canny" |
|
|
|
|
or image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" |
|
|
|
|
image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small" |
|
|
|
|
or image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster" |
|
|
|
|
): |
|
|
|
|
if image_model_card == "lllyasviel/sd-controlnet-canny": |
|
|
|
|
if image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small": |
|
|
|
|
print("Load: Canny") |
|
|
|
|
else: |
|
|
|
|
print("Load: Illusion") |
|
|
|
|
|
|
|
|
|
# from diffusers import ( |
|
|
|
|
# #StableDiffusionControlNetPipeline, |
|
|
|
|
# ControlNetModel, |
|
|
|
|
# UniPCMultistepScheduler, |
|
|
|
|
# ) |
|
|
|
|
|
|
|
|
|
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster": |
|
|
|
|
controlnet = ControlNetModel.from_pretrained( |
|
|
|
|
"diffusers/controlnet-canny-sdxl-1.0", |
|
|
|
|
torch_dtype=torch.float16 |
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
if image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster": |
|
|
|
|
|
|
|
|
|
#vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) |
|
|
|
|
pipe = StableDiffusionXLControlNetPipeline.from_pretrained( |
|
|
|
|
"monster-labs/control_v1p_sdxl_qrcode_monster", |
|
|
|
|
controlnet=controlnet, |
|
|
|
|
#vae=vae, |
|
|
|
|
torch_dtype=torch.float16, |
|
|
|
|
local_files_only=local_files_only, |
|
|
|
|
) |
|
|
|
|
else: |
|
|
|
|
controlnet = ControlNetModel.from_pretrained( |
|
|
|
|
"diffusers/controlnet-canny-sdxl-1.0-small", |
|
|
|
|
torch_dtype=torch.float16, |
|
|
|
|
variant="fp16", |
|
|
|
|
local_files_only=local_files_only, |
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) |
|
|
|
|
pipe = StableDiffusionXLControlNetPipeline.from_pretrained( |
|
|
|
|
"stabilityai/stable-diffusion-xl-base-1.0", |
|
|
|
|
controlnet=controlnet, |
|
|
|
|
vae=vae, |
|
|
|
|
torch_dtype=torch.float16, |
|
|
|
|
variant="fp16", |
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
# controlnet = ControlNetModel.from_pretrained( |
|
|
|
|
# image_model_card, |
|
|
|
|
# torch_dtype=torch.float16, |
|
|
|
|
# local_files_only=local_files_only, |
|
|
|
|
# ) |
|
|
|
|
# pipe = StableDiffusionControlNetPipeline.from_pretrained( |
|
|
|
|
# "runwayml/stable-diffusion-v1-5", |
|
|
|
|
# controlnet=controlnet, |
|
|
|
|
# torch_dtype=torch.float16, |
|
|
|
|
# local_files_only=local_files_only, |
|
|
|
|
# ) # safety_checker=None, |
|
|
|
|
|
|
|
|
|
pipe.watermark = NoWatermark() |
|
|
|
|
# |
|
|
|
|
|
|
|
|
|
if scene.use_lcm: |
|
|
|
|
from diffusers import LCMScheduler |
|
|
|
|
|
|
|
|
@ -3368,7 +3424,6 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") |
|
|
|
|
|
|
|
|
|
if low_vram(): |
|
|
|
|
#pipe.enable_xformers_memory_efficient_attention() |
|
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
|
else: |
|
|
|
|
pipe.to(gfx_device) |
|
|
|
@ -3660,12 +3715,12 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
|
|
|
|
|
from diffusers import AutoencoderKL |
|
|
|
|
|
|
|
|
|
# vae = AutoencoderKL.from_pretrained( |
|
|
|
|
# "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 |
|
|
|
|
# ) |
|
|
|
|
vae = AutoencoderKL.from_pretrained( |
|
|
|
|
"madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 |
|
|
|
|
) |
|
|
|
|
pipe = StableDiffusionXLPipeline.from_single_file( |
|
|
|
|
"https://huggingface.co/thibaud/sdxl_dpo_turbo/blob/main/sdxl_dpo_turbo.safetensors", |
|
|
|
|
# vae=vae, |
|
|
|
|
vae=vae, |
|
|
|
|
torch_dtype=torch.float16, |
|
|
|
|
variant="fp16", |
|
|
|
|
) |
|
|
|
@ -3827,7 +3882,7 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
or image_model_card == "runwayml/stable-diffusion-v1-5" |
|
|
|
|
or image_model_card == "stabilityai/sdxl-turbo" |
|
|
|
|
or image_model_card == "lllyasviel/sd-controlnet-openpose" |
|
|
|
|
or image_model_card == "lllyasviel/sd-controlnet-canny" |
|
|
|
|
or image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small" |
|
|
|
|
or image_model_card == "lllyasviel/control_v11p_sd15_scribble" |
|
|
|
|
): |
|
|
|
|
scene = context.scene |
|
|
|
@ -3976,10 +4031,9 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
|
|
|
|
|
# Canny & Illusion |
|
|
|
|
elif ( |
|
|
|
|
image_model_card == "lllyasviel/sd-controlnet-canny" |
|
|
|
|
or image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" |
|
|
|
|
image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small" |
|
|
|
|
or image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster" |
|
|
|
|
): |
|
|
|
|
print("Process: Canny") |
|
|
|
|
|
|
|
|
|
init_image = None |
|
|
|
|
if scene.image_path: |
|
|
|
@ -3992,7 +4046,8 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
|
|
|
|
|
image = scale_image_within_dimensions(np.array(init_image),x,None) |
|
|
|
|
|
|
|
|
|
if image_model_card == "lllyasviel/sd-controlnet-canny": |
|
|
|
|
if image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small": |
|
|
|
|
print("Process: Canny") |
|
|
|
|
image = np.array(init_image) |
|
|
|
|
low_threshold = 100 |
|
|
|
|
high_threshold = 200 |
|
|
|
@ -4001,9 +4056,6 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
canny_image = np.concatenate([image, image, image], axis=2) |
|
|
|
|
canny_image = Image.fromarray(canny_image) |
|
|
|
|
# canny_image = np.array(canny_image) |
|
|
|
|
else: |
|
|
|
|
canny_image = init_image |
|
|
|
|
|
|
|
|
|
image = pipe( |
|
|
|
|
prompt=prompt, |
|
|
|
|
#negative_prompt=negative_prompt, |
|
|
|
@ -4018,6 +4070,27 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
# width=x, |
|
|
|
|
# generator=generator, |
|
|
|
|
).images[0] |
|
|
|
|
else: |
|
|
|
|
print("Process: Illusion") |
|
|
|
|
illusion_image = init_image |
|
|
|
|
|
|
|
|
|
image = pipe( |
|
|
|
|
prompt=prompt, |
|
|
|
|
negative_prompt=negative_prompt, |
|
|
|
|
num_inference_steps=image_num_inference_steps, # Should be around 50 |
|
|
|
|
control_image=illusion_image, |
|
|
|
|
controlnet_conditioning_scale=1.00 - scene.image_power, |
|
|
|
|
generator=generator, |
|
|
|
|
control_guidance_start=0, |
|
|
|
|
control_guidance_end=1, |
|
|
|
|
#output_type="latent" |
|
|
|
|
# guidance_scale=clamp_value( |
|
|
|
|
# image_num_guidance, 3, 5 |
|
|
|
|
# ), # Should be between 3 and 5. |
|
|
|
|
# # guess_mode=True, #NOTE: Maybe the individual methods should be selectable instead? |
|
|
|
|
# height=y, |
|
|
|
|
# width=x, |
|
|
|
|
).images[0] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -4910,6 +4983,7 @@ classes = (
|
|
|
|
|
LORABROWSER_UL_files, |
|
|
|
|
GENERATOR_OT_install, |
|
|
|
|
GENERATOR_OT_uninstall, |
|
|
|
|
SequencerOpenAudioFile, |
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -5153,7 +5227,19 @@ def register():
|
|
|
|
|
default="", |
|
|
|
|
update=update_folder_callback, |
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
bpy.types.Scene.audio_path = bpy.props.StringProperty( |
|
|
|
|
name="audio_path", |
|
|
|
|
default="", |
|
|
|
|
description="Path to speaker voice", |
|
|
|
|
) |
|
|
|
|
# The frame audio duration. |
|
|
|
|
bpy.types.Scene.audio_speed = bpy.props.IntProperty( |
|
|
|
|
name="audio_speed", |
|
|
|
|
default=13, |
|
|
|
|
min=1, |
|
|
|
|
max=20, |
|
|
|
|
description="Speech speed.", |
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
def unregister(): |
|
|
|
|
for cls in classes: |
|
|
|
|