Browse Source

Update __init__.py

pull/101/head
tin2tin 10 months ago committed by GitHub
parent
commit
c5207e24f2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 328
      __init__.py

328
__init__.py

@ -14,7 +14,7 @@
bl_info = { bl_info = {
"name": "Pallaidium - Generative AI", "name": "Pallaidium - Generative AI",
"author": "tintwotin", "author": "tintwotin",
"version": (2, 1), "version": (2, 0),
"blender": (3, 4, 0), "blender": (3, 4, 0),
"location": "Video Sequence Editor > Sidebar > Generative AI", "location": "Video Sequence Editor > Sidebar > Generative AI",
"description": "AI Generate media in the VSE", "description": "AI Generate media in the VSE",
@ -44,6 +44,7 @@ import gc
temp = pathlib.PosixPath temp = pathlib.PosixPath
pathlib.PosixPath = pathlib.WindowsPath pathlib.PosixPath = pathlib.WindowsPath
import time import time
from bpy_extras.io_utils import ImportHelper
import sys import sys
print("Python: "+sys.version) print("Python: "+sys.version)
@ -114,7 +115,7 @@ def timer():
def print_elapsed_time(start_time): def print_elapsed_time(start_time):
elapsed_time = time.time() - start_time elapsed_time = time.time() - start_time
formatted_time = format_time(elapsed_time * 1000) # Convert to milliseconds formatted_time = format_time(elapsed_time * 1000) # Convert to milliseconds
print(f"Total time: {formatted_time}") print(f"Total time: {formatted_time}\n\n")
def split_and_recombine_text(text, desired_length=200, max_length=300): def split_and_recombine_text(text, desired_length=200, max_length=300):
@ -697,8 +698,9 @@ def install_modules(self):
import_module(self, "huggingface_hub", "huggingface_hub") import_module(self, "huggingface_hub", "huggingface_hub")
import_module(self, "transformers", "git+https://github.com/huggingface/transformers.git") import_module(self, "transformers", "git+https://github.com/huggingface/transformers.git")
subprocess.call([pybin, "-m", "pip", "install", "git+https://github.com/suno-ai/bark.git", "--upgrade"]) subprocess.call([pybin, "-m", "pip", "install", "git+https://github.com/suno-ai/bark.git", "--upgrade"])
import_module(self, "WhisperSpeech", "WhisperSpeech")
import_module(self, "pydub", "pydub")
if os_platform == "Windows": if os_platform == "Windows":
subprocess.call([pybin, "-m", "pip", "install", "git+https://github.com/daswer123/resemble-enhance-windows.git", "--no-dependencies", "--upgrade"]) subprocess.call([pybin, "-m", "pip", "install", "git+https://github.com/daswer123/resemble-enhance-windows.git", "--no-dependencies", "--upgrade"])
#subprocess.call([pybin, "-m", "pip", "install", "resemble-enhance", "--no-dependencies", "--upgrade"]) #subprocess.call([pybin, "-m", "pip", "install", "resemble-enhance", "--no-dependencies", "--upgrade"])
@ -718,7 +720,7 @@ def install_modules(self):
else: else:
import_module(self, "resemble_enhance", "resemble-enhance") import_module(self, "resemble_enhance", "resemble-enhance")
#import_module(self, "peft", "git+https://github.com/huggingface/peft.git") #import_module(self, "peft", "git+https://github.com/huggingface/peft.git")
#import_module(self, "bark", "git+https://github.com/suno-ai/bark.git") #import_module(self, "bark", "git+https://github.com/suno-ai/bark.git")
#import_module(self, "diffusers", "diffusers") #import_module(self, "diffusers", "diffusers")
import_module(self, "diffusers", "git+https://github.com/huggingface/diffusers.git") import_module(self, "diffusers", "git+https://github.com/huggingface/diffusers.git")
@ -735,7 +737,7 @@ def install_modules(self):
import_module(self, "protobuf", "protobuf") import_module(self, "protobuf", "protobuf")
python_version_info = sys.version_info python_version_info = sys.version_info
python_version_str = parse_python_version(python_version_info) python_version_str = parse_python_version(python_version_info)
import_module(self, "imageio", "imageio") import_module(self, "imageio", "imageio")
import_module(self, "imwatermark", "invisible-watermark>=0.2.0") import_module(self, "imwatermark", "invisible-watermark>=0.2.0")
@ -762,26 +764,26 @@ def install_modules(self):
import_module(self, "triton", "triton") import_module(self, "triton", "triton")
#subprocess.check_call([pybin, "-m", "pip", "install", "numpy", "--upgrade"]) #subprocess.check_call([pybin, "-m", "pip", "install", "numpy", "--upgrade"])
# import_module(self, "mustango", "mustango") # import_module(self, "mustango", "mustango")
# import_module(self, "mustango", "git+https://github.com/AMAAI-Lab/mustango.git") # import_module(self, "mustango", "git+https://github.com/AMAAI-Lab/mustango.git")
if os_platform == "Windows": if os_platform == "Windows":
if python_version_str == "3.10": if python_version_str == "3.10":
subprocess.check_call([pybin, "-m", "pip", "install", "https://files.pythonhosted.org/packages/e2/a9/98e0197b24165113ac551aae5646005205f88347fb13ac59a75a9864e1d3/mediapipe-0.10.9-cp310-cp310-win_amd64.whl", "--no-warn-script-location"]) subprocess.check_call([pybin, "-m", "pip", "install", "https://files.pythonhosted.org/packages/e2/a9/98e0197b24165113ac551aae5646005205f88347fb13ac59a75a9864e1d3/mediapipe-0.10.9-cp310-cp310-win_amd64.whl", "--no-warn-script-location"])
else: else:
subprocess.check_call([pybin, "-m", "pip", "install", "https://files.pythonhosted.org/packages/e9/7b/cd671c5067a56e1b4a9b70d0e42ac8cdb9f63acdc186589827cf213802a5/mediapipe-0.10.9-cp311-cp311-win_amd64.whl", "--no-warn-script-location"]) subprocess.check_call([pybin, "-m", "pip", "install", "https://files.pythonhosted.org/packages/e9/7b/cd671c5067a56e1b4a9b70d0e42ac8cdb9f63acdc186589827cf213802a5/mediapipe-0.10.9-cp311-cp311-win_amd64.whl", "--no-warn-script-location"])
else: else:
import_module(self, "mediapipe", "mediapipe") import_module(self, "mediapipe", "mediapipe")
if os_platform == "Windows": if os_platform == "Windows":
if python_version_str == "3.10": if python_version_str == "3.10":
subprocess.check_call([pybin, "-m", "pip", "install", "https://github.com/Gourieff/Assets/raw/main/Insightface/insightface-0.7.3-cp310-cp310-win_amd64.whl", "--no-warn-script-location"]) subprocess.check_call([pybin, "-m", "pip", "install", "https://github.com/Gourieff/Assets/raw/main/Insightface/insightface-0.7.3-cp310-cp310-win_amd64.whl", "--no-warn-script-location"])
else: else:
subprocess.check_call([pybin, "-m", "pip", "install", "https://github.com/Gourieff/Assets/raw/main/Insightface/insightface-0.7.3-cp311-cp311-win_amd64.whl", "--no-warn-script-location"]) subprocess.check_call([pybin, "-m", "pip", "install", "https://github.com/Gourieff/Assets/raw/main/Insightface/insightface-0.7.3-cp311-cp311-win_amd64.whl", "--no-warn-script-location"])
else: else:
import_module(self, "insightface", "insightface") import_module(self, "insightface", "insightface")
#import_module(self, "audiocraft", "git+https://github.com/facebookresearch/audiocraft.git") #import_module(self, "audiocraft", "git+https://github.com/facebookresearch/audiocraft.git")
# import_module(self, "compel", "compel") # import_module(self, "compel", "compel")
@ -872,7 +874,7 @@ def install_modules(self):
import_module(self, "torchvision", "torchvision") import_module(self, "torchvision", "torchvision")
import_module(self, "torchaudio", "torchaudio") import_module(self, "torchaudio", "torchaudio")
import_module(self, "xformers", "xformers") import_module(self, "xformers", "xformers")
def get_module_dependencies(module_name): def get_module_dependencies(module_name):
@ -984,6 +986,17 @@ class GENERATOR_OT_uninstall(Operator):
uninstall_module_with_dependencies("tabulate") uninstall_module_with_dependencies("tabulate")
uninstall_module_with_dependencies("gradio") uninstall_module_with_dependencies("gradio")
# WhisperSpeech
uninstall_module_with_dependencies("ruamel.yaml.clib")
uninstall_module_with_dependencies("fastprogress")
uninstall_module_with_dependencies("fastcore")
uninstall_module_with_dependencies("ruamel.yaml")
uninstall_module_with_dependencies("hyperpyyaml")
uninstall_module_with_dependencies("speechbrain")
uninstall_module_with_dependencies("vocos")
uninstall_module_with_dependencies("WhisperSpeech")
uninstall_module_with_dependencies("pydub")
self.report( self.report(
{"INFO"}, {"INFO"},
"\nRemove AI Models manually: \nLinux and macOS: ~/.cache/huggingface/hub\nWindows: %userprofile%.cache\\huggingface\\hub", "\nRemove AI Models manually: \nLinux and macOS: ~/.cache/huggingface/hub\nWindows: %userprofile%.cache\\huggingface\\hub",
@ -1019,10 +1032,10 @@ def input_strips_updated(self, context):
scene.inpaint_selected_strip = "" scene.inpaint_selected_strip = ""
if type == "image" and scene.input_strips != "input_strips" and ( if type == "image" and scene.input_strips != "input_strips" and (
image_model_card == "lllyasviel/sd-controlnet-canny" image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small"
or image_model_card == "lllyasviel/sd-controlnet-openpose" or image_model_card == "lllyasviel/sd-controlnet-openpose"
or image_model_card == "lllyasviel/control_v11p_sd15_scribble" or image_model_card == "lllyasviel/control_v11p_sd15_scribble"
or image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" or image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster"
or image_model_card == "Salesforce/blipdiffusion" or image_model_card == "Salesforce/blipdiffusion"
or image_model_card == "h94/IP-Adapter" or image_model_card == "h94/IP-Adapter"
): ):
@ -1082,10 +1095,10 @@ def output_strips_updated(self, context):
bpy.ops.lora.refresh_files() bpy.ops.lora.refresh_files()
if ( if (
image_model_card == "lllyasviel/sd-controlnet-canny" image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small"
or image_model_card == "lllyasviel/sd-controlnet-openpose" or image_model_card == "lllyasviel/sd-controlnet-openpose"
or image_model_card == "lllyasviel/control_v11p_sd15_scribble" or image_model_card == "lllyasviel/control_v11p_sd15_scribble"
or image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" or image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster"
or image_model_card == "Salesforce/blipdiffusion" or image_model_card == "Salesforce/blipdiffusion"
or image_model_card == "h94/IP-Adapter" or image_model_card == "h94/IP-Adapter"
) and type == "image": ) and type == "image":
@ -1243,7 +1256,7 @@ class GeneratorAddonPreferences(AddonPreferences):
# "dataautogpt3/Miniaturus_PotentiaV1.2", # "dataautogpt3/Miniaturus_PotentiaV1.2",
# "Miniaturus_PotentiaV1.2 (1024x1024)", # "Miniaturus_PotentiaV1.2 (1024x1024)",
# "dataautogpt3/Miniaturus_PotentiaV1.2", # "dataautogpt3/Miniaturus_PotentiaV1.2",
# ),# # ),#
# ( # (
# "dataautogpt3/ProteusV0.2", # "dataautogpt3/ProteusV0.2",
# "Proteus (1024x1024)", # "Proteus (1024x1024)",
@ -1266,17 +1279,17 @@ class GeneratorAddonPreferences(AddonPreferences):
"Salesforce/blipdiffusion", "Salesforce/blipdiffusion",
), ),
( (
"lllyasviel/sd-controlnet-canny", "diffusers/controlnet-canny-sdxl-1.0-small",
"Canny (512x512)", "Canny (512x512)",
"lllyasviel/sd-controlnet-canny", "diffusers/controlnet-canny-sdxl-1.0-small",
), ),
# Disabled - has log-in code. # Disabled - has log-in code.
# ("DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0"), # ("DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0", "DeepFloyd/IF-I-M-v1.0"),
# ( (
# "monster-labs/control_v1p_sd15_qrcode_monster", "monster-labs/control_v1p_sdxl_qrcode_monster",
# "Illusion (512x512)", "Illusion (512x512)",
# "monster-labs/control_v1p_sd15_qrcode_monster", "monster-labs/control_v1p_sdxl_qrcode_monster",
# ), ),
( (
"lllyasviel/sd-controlnet-openpose", "lllyasviel/sd-controlnet-openpose",
"OpenPose (512x512)", "OpenPose (512x512)",
@ -1305,6 +1318,7 @@ class GeneratorAddonPreferences(AddonPreferences):
"vtrungnhan9/audioldm2-music-zac2023", "vtrungnhan9/audioldm2-music-zac2023",
), ),
("bark", "Speech: Bark", "Bark"), ("bark", "Speech: Bark", "Bark"),
("WhisperSpeech", "Speech: WhisperSpeech", "WhisperSpeech"),
# ( # (
# #"vtrungnhan9/audioldm2-music-zac2023", # #"vtrungnhan9/audioldm2-music-zac2023",
# "cvssp/audioldm2-music", # "cvssp/audioldm2-music",
@ -1708,12 +1722,11 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
and movie_model_card != "guoyww/animatediff-motion-adapter-v1-5-2" and movie_model_card != "guoyww/animatediff-motion-adapter-v1-5-2"
) or ( ) or (
type == "image" type == "image"
#and image_model_card != "lllyasviel/sd-controlnet-canny" #and image_model_card != "diffusers/controlnet-canny-sdxl-1.0-small"
and image_model_card != "lllyasviel/sd-controlnet-openpose" and image_model_card != "lllyasviel/sd-controlnet-openpose"
#and image_model_card != "h94/IP-Adapter" #and image_model_card != "h94/IP-Adapter"
and image_model_card != "lllyasviel/control_v11p_sd15_scribble" and image_model_card != "lllyasviel/control_v11p_sd15_scribble"
and image_model_card #and image_model_card!= "monster-labs/control_v1p_sdxl_qrcode_monster"
!= "monster-labs/control_v1p_sd15_qrcode_monster"
and image_model_card != "Salesforce/blipdiffusion" and image_model_card != "Salesforce/blipdiffusion"
): ):
if input == "input_strips" and not scene.inpaint_selected_strip: if input == "input_strips" and not scene.inpaint_selected_strip:
@ -1737,7 +1750,7 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
"svd_decode_chunk_size", "svd_decode_chunk_size",
text="Decode Frames", text="Decode Frames",
) )
if bpy.context.scene.sequence_editor is not None and image_model_card != "lllyasviel/sd-controlnet-canny": if bpy.context.scene.sequence_editor is not None and image_model_card != "diffusers/controlnet-canny-sdxl-1.0-small":
if len(bpy.context.scene.sequence_editor.sequences) > 0: if len(bpy.context.scene.sequence_editor.sequences) > 0:
if input == "input_strips" and type == "image": if input == "input_strips" and type == "image":
col.prop_search( col.prop_search(
@ -1768,7 +1781,7 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
or image_model_card == "runwayml/stable-diffusion-v1-5" or image_model_card == "runwayml/stable-diffusion-v1-5"
or image_model_card == "stabilityai/sdxl-turbo" or image_model_card == "stabilityai/sdxl-turbo"
or image_model_card == "lllyasviel/sd-controlnet-openpose" or image_model_card == "lllyasviel/sd-controlnet-openpose"
or image_model_card == "lllyasviel/sd-controlnet-canny" or image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small"
or image_model_card == "lllyasviel/control_v11p_sd15_scribble" or image_model_card == "lllyasviel/control_v11p_sd15_scribble"
) )
and type == "image" and type == "image"
@ -1817,6 +1830,7 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
if (type == "audio" and audio_model_card == "bark") or ( if (type == "audio" and audio_model_card == "bark") or (
type == "audio" type == "audio"
and audio_model_card == "facebook/musicgen-stereo-medium" and audio_model_card == "facebook/musicgen-stereo-medium"
and audio_model_card == "WhisperSpeech"
): ):
pass pass
else: else:
@ -1847,6 +1861,12 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
col = layout.column(align=True) col = layout.column(align=True)
col.prop(context.scene, "speakers", text="Speaker") col.prop(context.scene, "speakers", text="Speaker")
col.prop(context.scene, "languages", text="Language") col.prop(context.scene, "languages", text="Language")
elif type == "audio" and audio_model_card == "WhisperSpeech":
row = col.row(align=True)
row.prop(context.scene, "audio_path", text="Speaker")
row.operator("sequencer.open_audio_filebrowser", text="", icon="FILEBROWSER")
col.prop(context.scene, "audio_speed", text="Speed")
elif ( elif (
type == "audio" type == "audio"
and addon_prefs.audio_model_card == "facebook/musicgen-stereo-medium" and addon_prefs.audio_model_card == "facebook/musicgen-stereo-medium"
@ -1854,6 +1874,7 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
col.prop( col.prop(
context.scene, "movie_num_inference_steps", text="Quality Steps" context.scene, "movie_num_inference_steps", text="Quality Steps"
) )
else: else:
col.prop( col.prop(
context.scene, "movie_num_inference_steps", text="Quality Steps" context.scene, "movie_num_inference_steps", text="Quality Steps"
@ -1899,11 +1920,11 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
# row = col.row() # row = col.row()
## if type == "movie" or ( ## if type == "movie" or (
## type == "image" ## type == "image"
## and image_model_card != "lllyasviel/sd-controlnet-canny" ## and image_model_card != "diffusers/controlnet-canny-sdxl-1.0-small"
## and image_model_card != "lllyasviel/sd-controlnet-openpose" ## and image_model_card != "lllyasviel/sd-controlnet-openpose"
## and image_model_card != "lllyasviel/control_v11p_sd15_scribble" ## and image_model_card != "lllyasviel/control_v11p_sd15_scribble"
## and image_model_card ## and image_model_card
## != "monster-labs/control_v1p_sd15_qrcode_monster" ## != "monster-labs/control_v1p_sdxl_qrcode_monster"
## and image_model_card != "Salesforce/blipdiffusion" ## and image_model_card != "Salesforce/blipdiffusion"
## ): ## ):
## row.prop(context.scene, "use_freeU", text="FreeU") ## row.prop(context.scene, "use_freeU", text="FreeU")
@ -1917,8 +1938,8 @@ class SEQUENCER_PT_pallaidium_panel(Panel): # UI
or (type == "image" and image_model_card == "segmind/SSD-1B") or (type == "image" and image_model_card == "segmind/SSD-1B")
or (type == "image" and image_model_card == "lllyasviel/sd-controlnet-openpose") or (type == "image" and image_model_card == "lllyasviel/sd-controlnet-openpose")
or (type == "image" and image_model_card == "lllyasviel/control_v11p_sd15_scribble") or (type == "image" and image_model_card == "lllyasviel/control_v11p_sd15_scribble")
or (type == "image" and image_model_card == "lllyasviel/sd-controlnet-canny") or (type == "image" and image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small")
or (type == "image" and image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster") or (type == "image" and image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster")
or ( or (
type == "image" type == "image"
and image_model_card == "segmind/Segmind-Vega" and image_model_card == "segmind/Segmind-Vega"
@ -2612,6 +2633,34 @@ class SEQUENCER_OT_generate_movie(Operator):
return {"FINISHED"} return {"FINISHED"}
class SequencerOpenAudioFile(Operator, ImportHelper):
bl_idname = "sequencer.open_audio_filebrowser"
bl_label = "Open Audio File Browser"
filter_glob: StringProperty(
default='*.mp3;*.wav;*.ogg',
options={'HIDDEN'},
)
def execute(self, context):
scene = context.scene
# Check if the file exists
if self.filepath and os.path.exists(self.filepath):
valid_extensions = {".mp3", ".wav"}
filename, extension = os.path.splitext(self.filepath)
if extension.lower() in valid_extensions:
print('Selected audio file:', self.filepath)
scene.audio_path=bpy.path.abspath(self.filepath)
else:
self.report({'ERROR'}, "Selected file does not exist.")
return {'CANCELLED'}
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class SEQUENCER_OT_generate_audio(Operator): class SEQUENCER_OT_generate_audio(Operator):
"""Generate Audio""" """Generate Audio"""
@ -2668,6 +2717,9 @@ class SEQUENCER_OT_generate_audio(Operator):
from resemble_enhance.enhancer.inference import denoise, enhance from resemble_enhance.enhancer.inference import denoise, enhance
if addon_prefs.audio_model_card == "WhisperSpeech":
from whisperspeech.pipeline import Pipeline
except ModuleNotFoundError: except ModuleNotFoundError:
print("Dependencies needs to be installed in the add-on preferences.") print("Dependencies needs to be installed in the add-on preferences.")
self.report( self.report(
@ -2675,6 +2727,7 @@ class SEQUENCER_OT_generate_audio(Operator):
"Dependencies needs to be installed in the add-on preferences.", "Dependencies needs to be installed in the add-on preferences.",
) )
return {"CANCELLED"} return {"CANCELLED"}
show_system_console(True) show_system_console(True)
set_system_console_topmost(True) set_system_console_topmost(True)
# clear the VRAM # clear the VRAM
@ -2743,6 +2796,12 @@ class SEQUENCER_OT_generate_audio(Operator):
fine_use_gpu=True, fine_use_gpu=True,
fine_use_small=True, fine_use_small=True,
) )
#WhisperSpeech
elif addon_prefs.audio_model_card == "WhisperSpeech":
from whisperspeech.pipeline import Pipeline
pipe = Pipeline(s2a_ref='collabora/whisperspeech:s2a-q4-small-en+pl.model')
# Mustango # Mustango
elif addon_prefs.audio_model_card == "declare-lab/mustango": elif addon_prefs.audio_model_card == "declare-lab/mustango":
@ -2823,7 +2882,7 @@ class SEQUENCER_OT_generate_audio(Operator):
#dwav = transform(dwav) #dwav = transform(dwav)
# dwav = audio # dwav = audio
#sr = rate #sr = rate
if torch.cuda.is_available(): if torch.cuda.is_available():
device = "cuda" device = "cuda"
else: else:
@ -2837,6 +2896,19 @@ class SEQUENCER_OT_generate_audio(Operator):
# Write the combined audio to a file # Write the combined audio to a file
write_wav(filename, new_sr, wav2) write_wav(filename, new_sr, wav2)
#WhisperSpeech
elif addon_prefs.audio_model_card == "WhisperSpeech":
prompt = context.scene.generate_movie_prompt
prompt = prompt.replace("\n", " ").strip()
filename = solve_path(clean_filename(prompt) + ".wav")
if scene.audio_path:
speaker = scene.audio_path
else:
speaker = None
audio_tensor = pipe.generate_to_file(filename, prompt, speaker=speaker, lang='en', cps=int(scene.audio_speed))
# Musicgen. # Musicgen.
elif addon_prefs.audio_model_card == "facebook/musicgen-stereo-medium": elif addon_prefs.audio_model_card == "facebook/musicgen-stereo-medium":
print("Generate: MusicGen Stereo") print("Generate: MusicGen Stereo")
@ -3063,9 +3135,9 @@ class SEQUENCER_OT_generate_image(Operator):
if ( if (
scene.generate_movie_prompt == "" scene.generate_movie_prompt == ""
and not image_model_card == "lllyasviel/sd-controlnet-canny" and not image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small"
and not image_model_card == "Salesforce/blipdiffusion" and not image_model_card == "Salesforce/blipdiffusion"
and not image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" and not image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster"
): ):
self.report({"INFO"}, "Text prompt in the Generative AI tab is empty!") self.report({"INFO"}, "Text prompt in the Generative AI tab is empty!")
return {"CANCELLED"} return {"CANCELLED"}
@ -3124,21 +3196,21 @@ class SEQUENCER_OT_generate_image(Operator):
input == "input_strips" input == "input_strips"
and find_strip_by_name(scene, scene.inpaint_selected_strip) and find_strip_by_name(scene, scene.inpaint_selected_strip)
and type == "image" and type == "image"
and not image_model_card == "lllyasviel/sd-controlnet-canny" and not image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small"
and not image_model_card == "lllyasviel/sd-controlnet-openpose" and not image_model_card == "lllyasviel/sd-controlnet-openpose"
and not image_model_card == "lllyasviel/control_v11p_sd15_scribble" and not image_model_card == "lllyasviel/control_v11p_sd15_scribble"
and not image_model_card == "h94/IP-Adapter" and not image_model_card == "h94/IP-Adapter"
and not image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" and not image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster"
and not image_model_card == "Salesforce/blipdiffusion" and not image_model_card == "Salesforce/blipdiffusion"
and not image_model_card == "Lykon/dreamshaper-8" and not image_model_card == "Lykon/dreamshaper-8"
) )
do_convert = ( do_convert = (
(scene.image_path or scene.movie_path) (scene.image_path or scene.movie_path)
and not image_model_card == "lllyasviel/sd-controlnet-canny" and not image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small"
and not image_model_card == "lllyasviel/sd-controlnet-openpose" and not image_model_card == "lllyasviel/sd-controlnet-openpose"
and not image_model_card == "lllyasviel/control_v11p_sd15_scribble" and not image_model_card == "lllyasviel/control_v11p_sd15_scribble"
and not image_model_card == "h94/IP-Adapter" and not image_model_card == "h94/IP-Adapter"
and not image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" and not image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster"
and not image_model_card == "Salesforce/blipdiffusion" and not image_model_card == "Salesforce/blipdiffusion"
and not do_inpaint and not do_inpaint
) )
@ -3146,11 +3218,11 @@ class SEQUENCER_OT_generate_image(Operator):
if ( if (
do_inpaint do_inpaint
or do_convert or do_convert
or image_model_card == "lllyasviel/sd-controlnet-canny" or image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small"
or image_model_card == "lllyasviel/sd-controlnet-openpose" or image_model_card == "lllyasviel/sd-controlnet-openpose"
or image_model_card == "lllyasviel/control_v11p_sd15_scribble" or image_model_card == "lllyasviel/control_v11p_sd15_scribble"
or image_model_card == "h94/IP-Adapter" or image_model_card == "h94/IP-Adapter"
or image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" or image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster"
or image_model_card == "Salesforce/blipdiffusion" or image_model_card == "Salesforce/blipdiffusion"
): ):
if not strips: if not strips:
@ -3178,11 +3250,14 @@ class SEQUENCER_OT_generate_image(Operator):
# clear the VRAM # clear the VRAM
clear_cuda_cache() clear_cuda_cache()
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
#pipe = AutoPipelineForInpainting.from_pretrained( #pipe = AutoPipelineForInpainting.from_pretrained(
pipe = StableDiffusionXLInpaintPipeline.from_pretrained( pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1", "diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
torch_dtype=torch.float16, torch_dtype=torch.float16,
variant="fp16", variant="fp16",
vae=vae,
local_files_only=local_files_only, local_files_only=local_files_only,
).to(gfx_device) ).to(gfx_device)
# Set scheduler # Set scheduler
@ -3248,7 +3323,7 @@ class SEQUENCER_OT_generate_image(Operator):
print("LoRAs will be ignored for image or movie input.") print("LoRAs will be ignored for image or movie input.")
enabled_items = False enabled_items = False
if enabled_items: if enabled_items:
if scene.use_lcm: if scene.use_lcm:
from diffusers import LCMScheduler from diffusers import LCMScheduler
@ -3262,8 +3337,8 @@ class SEQUENCER_OT_generate_image(Operator):
adapter_name=("lcm-lora-sdxl"), adapter_name=("lcm-lora-sdxl"),
) )
else: else:
converter.load_lora_weights("latent-consistency/lcm-lora-sdxl") converter.load_lora_weights("latent-consistency/lcm-lora-sdxl")
converter.watermark = NoWatermark() converter.watermark = NoWatermark()
if low_vram(): if low_vram():
converter.enable_model_cpu_offload() converter.enable_model_cpu_offload()
@ -3271,7 +3346,7 @@ class SEQUENCER_OT_generate_image(Operator):
# converter.enable_vae_slicing() # converter.enable_vae_slicing()
else: else:
converter.to(gfx_device) converter.to(gfx_device)
# elif: # depth # elif: # depth
# from transformers import DPTFeatureExtractor, DPTForDepthEstimation # from transformers import DPTFeatureExtractor, DPTForDepthEstimation
# from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL # from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL
@ -3294,64 +3369,45 @@ class SEQUENCER_OT_generate_image(Operator):
# use_safetensors=True, # use_safetensors=True,
# torch_dtype=torch.float16, # torch_dtype=torch.float16,
# ).to(gfx_device) # ).to(gfx_device)
# pipe.enable_model_cpu_offload() # pipe.enable_model_cpu_offload()
# Canny & Illusion # Canny & Illusion
elif ( elif (
image_model_card == "lllyasviel/sd-controlnet-canny" image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small"
or image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" or image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster"
): ):
if image_model_card == "lllyasviel/sd-controlnet-canny": if image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small":
print("Load: Canny") print("Load: Canny")
else: else:
print("Load: Illusion") print("Load: Illusion")
# from diffusers import (
# #StableDiffusionControlNetPipeline,
# ControlNetModel,
# UniPCMultistepScheduler,
# )
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL
if image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster":
controlnet = ControlNetModel.from_pretrained( controlnet = ControlNetModel.from_pretrained(
"diffusers/controlnet-canny-sdxl-1.0",
torch_dtype=torch.float16
)
if image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster":
#vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
"monster-labs/control_v1p_sdxl_qrcode_monster", "monster-labs/control_v1p_sdxl_qrcode_monster",
controlnet=controlnet,
#vae=vae,
torch_dtype=torch.float16, torch_dtype=torch.float16,
) local_files_only=local_files_only,
)
else: else:
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) controlnet = ControlNetModel.from_pretrained(
pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "diffusers/controlnet-canny-sdxl-1.0-small",
"stabilityai/stable-diffusion-xl-base-1.0",
controlnet=controlnet,
vae=vae,
torch_dtype=torch.float16, torch_dtype=torch.float16,
variant="fp16",
local_files_only=local_files_only,
) )
# controlnet = ControlNetModel.from_pretrained( vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
# image_model_card, pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
# torch_dtype=torch.float16, "stabilityai/stable-diffusion-xl-base-1.0",
# local_files_only=local_files_only, controlnet=controlnet,
# ) vae=vae,
# pipe = StableDiffusionControlNetPipeline.from_pretrained( torch_dtype=torch.float16,
# "runwayml/stable-diffusion-v1-5", variant="fp16",
# controlnet=controlnet, )
# torch_dtype=torch.float16,
# local_files_only=local_files_only,
# ) # safety_checker=None,
pipe.watermark = NoWatermark() pipe.watermark = NoWatermark()
#
if scene.use_lcm: if scene.use_lcm:
from diffusers import LCMScheduler from diffusers import LCMScheduler
@ -3368,7 +3424,6 @@ class SEQUENCER_OT_generate_image(Operator):
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
if low_vram(): if low_vram():
#pipe.enable_xformers_memory_efficient_attention()
pipe.enable_model_cpu_offload() pipe.enable_model_cpu_offload()
else: else:
pipe.to(gfx_device) pipe.to(gfx_device)
@ -3660,12 +3715,12 @@ class SEQUENCER_OT_generate_image(Operator):
from diffusers import AutoencoderKL from diffusers import AutoencoderKL
# vae = AutoencoderKL.from_pretrained( vae = AutoencoderKL.from_pretrained(
# "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
# ) )
pipe = StableDiffusionXLPipeline.from_single_file( pipe = StableDiffusionXLPipeline.from_single_file(
"https://huggingface.co/thibaud/sdxl_dpo_turbo/blob/main/sdxl_dpo_turbo.safetensors", "https://huggingface.co/thibaud/sdxl_dpo_turbo/blob/main/sdxl_dpo_turbo.safetensors",
# vae=vae, vae=vae,
torch_dtype=torch.float16, torch_dtype=torch.float16,
variant="fp16", variant="fp16",
) )
@ -3717,21 +3772,21 @@ class SEQUENCER_OT_generate_image(Operator):
import torch import torch
from diffusers import ( from diffusers import (
AutoPipelineForText2Image, AutoPipelineForText2Image,
StableDiffusionXLPipeline, StableDiffusionXLPipeline,
KDPM2AncestralDiscreteScheduler, KDPM2AncestralDiscreteScheduler,
AutoencoderKL AutoencoderKL
) )
# # Load VAE component # # Load VAE component
# vae = AutoencoderKL.from_pretrained( # vae = AutoencoderKL.from_pretrained(
# "madebyollin/sdxl-vae-fp16-fix", # "madebyollin/sdxl-vae-fp16-fix",
# torch_dtype=torch.float16 # torch_dtype=torch.float16
# ) # )
# Configure the pipeline # Configure the pipeline
#pipe = StableDiffusionXLPipeline.from_pretrained( #pipe = StableDiffusionXLPipeline.from_pretrained(
# pipe = AutoPipelineForText2Image.from_pretrained( # pipe = AutoPipelineForText2Image.from_pretrained(
# "dataautogpt3/ProteusV0.2", # "dataautogpt3/ProteusV0.2",
# #vae=vae, # #vae=vae,
# torch_dtype=torch.float16, # torch_dtype=torch.float16,
# local_files_only=local_files_only, # local_files_only=local_files_only,
@ -3744,7 +3799,7 @@ class SEQUENCER_OT_generate_image(Operator):
"dataautogpt3/Miniaturus_PotentiaV1.2", "dataautogpt3/Miniaturus_PotentiaV1.2",
torch_dtype=torch.float16, # vae=vae, torch_dtype=torch.float16, # vae=vae,
local_files_only=local_files_only, local_files_only=local_files_only,
) )
else: else:
from diffusers import AutoPipelineForText2Image from diffusers import AutoPipelineForText2Image
pipe = AutoPipelineForText2Image.from_pretrained( pipe = AutoPipelineForText2Image.from_pretrained(
@ -3827,7 +3882,7 @@ class SEQUENCER_OT_generate_image(Operator):
or image_model_card == "runwayml/stable-diffusion-v1-5" or image_model_card == "runwayml/stable-diffusion-v1-5"
or image_model_card == "stabilityai/sdxl-turbo" or image_model_card == "stabilityai/sdxl-turbo"
or image_model_card == "lllyasviel/sd-controlnet-openpose" or image_model_card == "lllyasviel/sd-controlnet-openpose"
or image_model_card == "lllyasviel/sd-controlnet-canny" or image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small"
or image_model_card == "lllyasviel/control_v11p_sd15_scribble" or image_model_card == "lllyasviel/control_v11p_sd15_scribble"
): ):
scene = context.scene scene = context.scene
@ -3844,7 +3899,7 @@ class SEQUENCER_OT_generate_image(Operator):
) )
pipe.set_adapters(enabled_names, adapter_weights=enabled_weights) pipe.set_adapters(enabled_names, adapter_weights=enabled_weights)
print("Load LoRAs: " + " ".join(enabled_names)) print("Load LoRAs: " + " ".join(enabled_names))
# Refiner model - load if chosen. # Refiner model - load if chosen.
if do_refine: if do_refine:
@ -3976,11 +4031,10 @@ class SEQUENCER_OT_generate_image(Operator):
# Canny & Illusion # Canny & Illusion
elif ( elif (
image_model_card == "lllyasviel/sd-controlnet-canny" image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small"
or image_model_card == "monster-labs/control_v1p_sd15_qrcode_monster" or image_model_card == "monster-labs/control_v1p_sdxl_qrcode_monster"
): ):
print("Process: Canny")
init_image = None init_image = None
if scene.image_path: if scene.image_path:
init_image = load_first_frame(scene.image_path) init_image = load_first_frame(scene.image_path)
@ -3992,7 +4046,8 @@ class SEQUENCER_OT_generate_image(Operator):
image = scale_image_within_dimensions(np.array(init_image),x,None) image = scale_image_within_dimensions(np.array(init_image),x,None)
if image_model_card == "lllyasviel/sd-controlnet-canny": if image_model_card == "diffusers/controlnet-canny-sdxl-1.0-small":
print("Process: Canny")
image = np.array(init_image) image = np.array(init_image)
low_threshold = 100 low_threshold = 100
high_threshold = 200 high_threshold = 200
@ -4001,25 +4056,43 @@ class SEQUENCER_OT_generate_image(Operator):
canny_image = np.concatenate([image, image, image], axis=2) canny_image = np.concatenate([image, image, image], axis=2)
canny_image = Image.fromarray(canny_image) canny_image = Image.fromarray(canny_image)
# canny_image = np.array(canny_image) # canny_image = np.array(canny_image)
image = pipe(
prompt=prompt,
#negative_prompt=negative_prompt,
num_inference_steps=image_num_inference_steps, # Should be around 50
controlnet_conditioning_scale=1.00 - scene.image_power,
image=canny_image,
# guidance_scale=clamp_value(
# image_num_guidance, 3, 5
# ), # Should be between 3 and 5.
# # guess_mode=True, #NOTE: Maybe the individual methods should be selectable instead?
# height=y,
# width=x,
# generator=generator,
).images[0]
else: else:
canny_image = init_image print("Process: Illusion")
illusion_image = init_image
image = pipe(
prompt=prompt, image = pipe(
#negative_prompt=negative_prompt, prompt=prompt,
num_inference_steps=image_num_inference_steps, # Should be around 50 negative_prompt=negative_prompt,
controlnet_conditioning_scale=1.00 - scene.image_power, num_inference_steps=image_num_inference_steps, # Should be around 50
image=canny_image, control_image=illusion_image,
# guidance_scale=clamp_value( controlnet_conditioning_scale=1.00 - scene.image_power,
# image_num_guidance, 3, 5 generator=generator,
# ), # Should be between 3 and 5. control_guidance_start=0,
# # guess_mode=True, #NOTE: Maybe the individual methods should be selectable instead? control_guidance_end=1,
# height=y, #output_type="latent"
# width=x, # guidance_scale=clamp_value(
# generator=generator, # image_num_guidance, 3, 5
).images[0] # ), # Should be between 3 and 5.
# # guess_mode=True, #NOTE: Maybe the individual methods should be selectable instead?
# height=y,
# width=x,
).images[0]
# DreamShaper # DreamShaper
elif image_model_card == "Lykon/dreamshaper-8" and do_convert == False: elif image_model_card == "Lykon/dreamshaper-8" and do_convert == False:
@ -4256,7 +4329,7 @@ class SEQUENCER_OT_generate_image(Operator):
# Img2img # Img2img
elif do_convert: elif do_convert:
if enabled_items: if enabled_items:
self.report( self.report(
{"INFO"}, {"INFO"},
@ -4910,6 +4983,7 @@ classes = (
LORABROWSER_UL_files, LORABROWSER_UL_files,
GENERATOR_OT_install, GENERATOR_OT_install,
GENERATOR_OT_uninstall, GENERATOR_OT_uninstall,
SequencerOpenAudioFile,
) )
@ -5153,7 +5227,19 @@ def register():
default="", default="",
update=update_folder_callback, update=update_folder_callback,
) )
bpy.types.Scene.audio_path = bpy.props.StringProperty(
name="audio_path",
default="",
description="Path to speaker voice",
)
# The frame audio duration.
bpy.types.Scene.audio_speed = bpy.props.IntProperty(
name="audio_speed",
default=13,
min=1,
max=20,
description="Speech speed.",
)
def unregister(): def unregister():
for cls in classes: for cls in classes:

Loading…
Cancel
Save