Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -17,11 +17,12 @@ DEFAULT_INFERENCE_STEPS = 1
|
|
17 |
# Device and model setup
|
18 |
dtype = torch.float16
|
19 |
pipe = FluxWithCFGPipeline.from_pretrained(
|
20 |
-
"
|
21 |
)
|
22 |
-
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=
|
23 |
pipe.load_lora_weights("ostris/OpenFLUX.1", weight_name="openflux1-v0.1.0-fast-lora.safetensors", adapter_name="fast")
|
24 |
pipe.set_adapters("fast")
|
|
|
25 |
pipe.to("cuda")
|
26 |
# pipe.transformer.to(memory_format=torch.channels_last)
|
27 |
# pipe.transformer = torch.compile(
|
|
|
17 |
# Device and model setup
|
18 |
dtype = torch.float16
|
19 |
pipe = FluxWithCFGPipeline.from_pretrained(
|
20 |
+
"black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
|
21 |
)
|
22 |
+
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
|
23 |
pipe.load_lora_weights("ostris/OpenFLUX.1", weight_name="openflux1-v0.1.0-fast-lora.safetensors", adapter_name="fast")
|
24 |
pipe.set_adapters("fast")
|
25 |
+
pipeline.fuse_lora(adapter_names=["fast"], lora_scale=1.0)
|
26 |
pipe.to("cuda")
|
27 |
# pipe.transformer.to(memory_format=torch.channels_last)
|
28 |
# pipe.transformer = torch.compile(
|