Browse Source

Image inpaint - now working.

When image/movie strips are used as masks, the white areas will be replaced with the prompt.
pull/42/head
tin2tin 1 year ago committed by GitHub
parent
commit
f8c3c0eac9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 135
      __init__.py

135
__init__.py

@ -280,6 +280,37 @@ def load_video_as_np_array(video_path):
return np.array(frames)
def load_first_frame(file_path):
import cv2, PIL, os
from diffusers.utils import load_image
print(file_path)
extension = os.path.splitext(file_path)[-1].lower() # Convert to lowercase for case-insensitive comparison
valid_extensions = {'.sgi', '.rgb', '.bw', '.cin', '.dpx', '.png', '.jpg', '.jpeg', '.jp2', '.jp2', '.j2c', '.tga', '.exr', '.hdr', '.tif', '.tiff', '.webp'}
if extension in valid_extensions:
image = cv2.imread(file_path)
#if image is not None:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return PIL.Image.fromarray(image)
else:
# Try to open the file as a video
cap = cv2.VideoCapture(file_path)
# Check if the file was successfully opened as a video
if cap.isOpened():
# Read the first frame from the video
ret, frame = cap.read()
cap.release() # Release the video capture object
if ret:
# If the first frame was successfully read, it's a video
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return PIL.Image.fromarray(frame)
# If neither video nor image worked, return None
return None
def process_frames(frame_folder_path, target_width):
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
@ -383,7 +414,7 @@ def process_image(image_path, frames_nr):
#Make the loop for Zooming-in
i = 1
while i < frames_nr:
zLvl = 1.0 + ((i / (1/(max_zoom-1)) / frames_nr) * 0.01)
zLvl = 1.0 + ((i / (1/(max_zoom-1)) / frames_nr) * 0.005)
angle = 0 #i * max_rot / frames_nr
zoomedImg = zoomPan(img, zLvl, angle, coord=None)
output_path = os.path.join(temp_image_folder, f"frame_{i:04d}.png")
@ -650,6 +681,21 @@ def input_strips_updated(self, context):
if movie_model_card == "stabilityai/stable-diffusion-xl-base-1.0":
scene.input_strips = "input_strips"
if scene.generatorai_typeselect == "video" or scene.generatorai_typeselect == "audio":
scene.inpaint_selected_strip = ""
def output_strips_updated(self, context):
preferences = context.preferences
addon_prefs = preferences.addons[__name__].preferences
movie_model_card = addon_prefs.movie_model_card
scene = context.scene
type = scene.generatorai_typeselect
input = scene.input_strips
print(type)
if type == "movie" or type == "audio":
scene.inpaint_selected_strip = ""
class GeneratorAddonPreferences(AddonPreferences):
@ -939,17 +985,18 @@ class SEQEUNCER_PT_generate_ai(Panel): # UI
col = layout.column(align=False)
col.use_property_split = True
col.use_property_decorate = False
col = col.box()
col = col.column(align=True)
if type != "audio":
col = col.box()
col = col.column()
col.prop(context.scene, "input_strips", text="Input")
if input == "input_strips":
col.prop(context.scene, "image_power", text="Strip Power")
if input == "input_strips" and not scene.inpaint_selected_strip:
col.prop(context.scene, "image_power", text="Strip Power")
if input == "input_strips" and type == "image":
col.prop_search(scene, "inpaint_selected_strip", scene.sequence_editor, "sequences", text="Inpaint Mask", icon='SEQ_STRIP_DUPLICATE')
if input == "input_strips" and type == "image":
col.prop_search(scene, "inpaint_selected_strip", scene.sequence_editor, "sequences", text="Inpaint Mask", icon='SEQ_STRIP_DUPLICATE')
col = layout.column(align=True)
col = col.box()
@ -1170,16 +1217,16 @@ class SEQUENCER_OT_generate_movie(Operator):
# from modelscope.pipelines import pipeline
# from modelscope.outputs import OutputKeys
# #pipe = pipeline(task='image-to-video', model='damo-vilab/MS-Image2Video', model_revision='v1.1.0')
# pipe = pipeline(task='image-to-video', model='damo/Image-to-Video', model_revision='v1.1.0')
# #pipe = pipeline(task='image-to-video', model='https://dagshub.com/model/damo-video-to-video/src/main/data', model_revision='v1.1.0')
#pipe = pipeline(task='image-to-video', model='damo-vilab/MS-Image2Video', model_revision='v1.1.0')
#pipe = pipeline(task='image-to-video', model='damo/Image-to-Video', model_revision='v1.1.0')
#pipe = pipeline(task='image-to-video', model='https://dagshub.com/model/damo-video-to-video/src/main/data', model_revision='v1.1.0')
## if low_vram:
## pipe.enable_model_cpu_offload()
## pipe.enable_vae_tiling()
## pipe.enable_vae_slicing()
## else:
# refiner.to("cuda")
# if low_vram:
# pipe.enable_model_cpu_offload()
# pipe.enable_vae_tiling()
# pipe.enable_vae_slicing()
# else:
# pipe.to("cuda")
else: # vid2vid / img2vid
if movie_model_card == "cerspense/zeroscope_v2_dark_30x448x256" or movie_model_card == "cerspense/zeroscope_v2_576w" or scene.image_path:
@ -1334,10 +1381,10 @@ class SEQUENCER_OT_generate_movie(Operator):
else:
if scene.movie_path:
video = load_video_as_np_array(video_path)
print("\nProcess: Video to video")
print("Process: Video to video")
elif scene.image_path:
print("\nProcess: Image to video")
print("Process: Image to video")
video = process_image(scene.image_path, int(scene.generate_movie_frames))
# Upscale video
@ -1358,7 +1405,7 @@ class SEQUENCER_OT_generate_movie(Operator):
).frames
# elif scene.image_path: #img2vid
# print("\nProcess: Image to video")
# print("Process: Image to video")
#
# # IMG_PATH: your image path (url or local file)
# video_frames = pipe(scene.image_path, output_video='./output.mp4').frames
@ -1739,6 +1786,8 @@ class SEQUENCER_OT_generate_image(Operator):
import torch
import requests
from diffusers.utils import load_image
import numpy as np
import PIL
except ModuleNotFoundError:
print("Dependencies needs to be installed in the add-on preferences.")
self.report(
@ -1785,7 +1834,7 @@ class SEQUENCER_OT_generate_image(Operator):
torch.cuda.empty_cache()
#vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) vae=vae,
pipe = StableDiffusionInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-2-inpainting", torch_dtype=torch.float16, variant="fp16") #use_safetensors=True
pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16") #use_safetensors=True
#pipe = StableDiffusionXLInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16, variant="fp16") #use_safetensors=True
pipe.watermark = NoWatermark()
@ -1793,7 +1842,7 @@ class SEQUENCER_OT_generate_image(Operator):
if low_vram:
#torch.cuda.set_per_process_memory_fraction(0.99)
pipe.enable_model_cpu_offload()
pipe.enable_vae_slicing()
#pipe.enable_vae_slicing()
else:
pipe.to("cuda")
@ -2000,10 +2049,19 @@ class SEQUENCER_OT_generate_image(Operator):
return
mask_path = get_strip_path(mask_strip)
mask_image = load_image(mask_path).convert("RGB")
mask_image = load_first_frame(mask_path)
if not mask_image:
print("Loading mask failed!")
return
mask_image = mask_image.resize((x, y))
if scene.image_path:
init_image = load_first_frame(scene.image_path)
if scene.movie_path:
init_image = load_first_frame(scene.movie_path)
if not init_image:
print("Loading strip failed!")
return
init_image = load_image(scene.image_path).convert("RGB")
init_image = init_image.resize((x, y))
image = pipe(
@ -2013,11 +2071,39 @@ class SEQUENCER_OT_generate_image(Operator):
mask_image=mask_image,
num_inference_steps=image_num_inference_steps,
guidance_scale=image_num_guidance,
strength=1.00 - scene.image_power,
#strength=1.00 - scene.image_power,
height=y,
width=x,
generator=generator,
).images[0]
# https://github.com/huggingface/diffusers/commit/5f740d0f55adec63ee2453f83f1c0d7d984e01e4
#init_image = load_image(img_url).resize((512, 512))
#mask_image = load_image(mask_url).resize((512, 512))
#repainted_image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
#repainted_image.save("repainted_image.png")
# # Convert mask to grayscale NumPy array
# mask_image_arr = np.array(mask_image.convert("L"))
# init_image_arr = np.array(init_image.convert("L"))
# repainted_image_arr = np.array(repainted_image.convert("L"))
# # Add a channel dimension to the end of the grayscale mask
# mask_image_arr = mask_image_arr[:, :, None]
# init_image_arr = init_image_arr[:, :, None]
# repainted_image_arr = repainted_image_arr[:, :, None]
# # Binarize the mask: 1s correspond to the pixels which are repainted
# mask_image_arr = mask_image_arr.astype(np.float32) / 255.0
# mask_image_arr[mask_image_arr < 0.5] = 0
# mask_image_arr[mask_image_arr >= 0.5] = 1
# # Take the masked pixels from the repainted image and the unmasked pixels from the initial image
# unmasked_unchanged_image_arr = (1 - mask_image_arr) * init_image_arr + mask_image_arr * repainted_image_arr
# image = PIL.Image.fromarray(unmasked_unchanged_image_arr.astype("uint8"))
# #unmasked_unchanged_image.save("force_unmasked_unchanged.png")
# Img2img
elif scene.image_path:
print("Process: Image to image")
@ -2370,6 +2456,7 @@ def register():
("audio", "Audio", "Generate Audio"),
],
default="image",
update=output_strips_updated,
)
bpy.types.Scene.speakers = bpy.props.EnumProperty(

Loading…
Cancel
Save