Browse Source

Fix: Proteus

pull/101/head
tin2tin 9 months ago committed by GitHub
parent
commit
2c9cbd8c19
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 51
      __init__.py

51
__init__.py

@ -3776,44 +3776,21 @@ class SEQUENCER_OT_generate_image(Operator):
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing") pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
elif image_model_card == "dataautogpt3/ProteusV0.3": elif image_model_card == "dataautogpt3/ProteusV0.3":
from diffusers import StableDiffusionXLPipeline from diffusers import StableDiffusionXLPipeline, KDPM2AncestralDiscreteScheduler
# from diffusers import AutoencoderKL from diffusers import AutoencoderKL
# vae = AutoencoderKL.from_pretrained( vae = AutoencoderKL.from_pretrained(
# "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
# ) )
pipe = StableDiffusionXLPipeline.from_single_file( pipe = StableDiffusionXLPipeline.from_pretrained(
"dataautogpt3/ProteusV0.3", "dataautogpt3/ProteusV0.3",
#vae=vae, vae=vae,
torch_dtype=torch.float16, torch_dtype=torch.float16,
#variant="fp16", #variant="fp16",
) )
# from diffusers import DPMSolverMultistepScheduler pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
# pipe.scheduler = DPMSolverMultistepScheduler.from_config(
# pipe.scheduler.config
# )
if low_vram():
pipe.enable_model_cpu_offload()
else:
pipe.to(gfx_device) pipe.to(gfx_device)
# # Load VAE component
# vae = AutoencoderKL.from_pretrained(
# "madebyollin/sdxl-vae-fp16-fix",
# torch_dtype=torch.float16
# )
# Configure the pipeline
#pipe = StableDiffusionXLPipeline.from_pretrained(
# pipe = AutoPipelineForText2Image.from_pretrained(
# "dataautogpt3/ProteusV0.2",
# #vae=vae,
# torch_dtype=torch.float16,
# local_files_only=local_files_only,
# )
#pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
elif image_model_card == "stabilityai/stable-cascade": elif image_model_card == "stabilityai/stable-cascade":
import torch import torch
from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline
@ -4333,6 +4310,18 @@ class SEQUENCER_OT_generate_image(Operator):
).images[0] ).images[0]
decoder = None decoder = None
elif image_model_card == "dataautogpt3/ProteusV0.3":
image = pipe(
# prompt_embeds=prompt, # for compel - long prompts
prompt,
negative_prompt=negative_prompt,
num_inference_steps=image_num_inference_steps,
guidance_scale=image_num_guidance,
height=y,
width=x,
generator=generator,
).images[0]
# Inpaint # Inpaint
elif do_inpaint: elif do_inpaint:
print("Process: Inpaint") print("Process: Inpaint")

Loading…
Cancel
Save