|
|
|
@ -2572,7 +2572,6 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
if low_vram(): |
|
|
|
|
# torch.cuda.set_per_process_memory_fraction(0.95) # 6 GB VRAM |
|
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
|
# pipe.enable_vae_slicing() |
|
|
|
|
# pipe.enable_forward_chunking(chunk_size=1, dim=1) |
|
|
|
|
else: |
|
|
|
|
pipe.to("cuda") |
|
|
|
@ -2692,7 +2691,7 @@ class SEQUENCER_OT_generate_image(Operator):
|
|
|
|
|
if low_vram(): |
|
|
|
|
# torch.cuda.set_per_process_memory_fraction(0.95) # 6 GB VRAM |
|
|
|
|
pipe.enable_model_cpu_offload() |
|
|
|
|
#pipe.enable_vae_slicing() |
|
|
|
|
pipe.enable_vae_slicing() |
|
|
|
|
# pipe.enable_forward_chunking(chunk_size=1, dim=1) |
|
|
|
|
else: |
|
|
|
|
pipe.to("cuda") |
|
|
|
|