Spaces:
Runtime error
Runtime error
Switch to use 2-step unet checkpoint
#1
by
PeterL1n
- opened
app.py
CHANGED
@@ -18,7 +18,7 @@ from sfast.compilers.diffusion_pipeline_compiler import compile, CompilationConf
|
|
18 |
BASE = "stabilityai/stable-diffusion-xl-base-1.0"
|
19 |
REPO = "ByteDance/SDXL-Lightning"
|
20 |
# 1-step
|
21 |
-
CHECKPOINT = "
|
22 |
|
23 |
# {
|
24 |
# "1-Step": ["sdxl_lightning_1step_unet_x0.safetensors", 1],
|
@@ -38,12 +38,11 @@ torch_dtype = torch.float16
|
|
38 |
print(f"TORCH_COMPILE: {TORCH_COMPILE}")
|
39 |
print(f"device: {device}")
|
40 |
|
41 |
-
|
|
|
42 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
43 |
-
BASE, torch_dtype=torch.float16, variant="fp16"
|
44 |
).to("cuda")
|
45 |
-
pipe.load_lora_weights(hf_hub_download(REPO, CHECKPOINT))
|
46 |
-
pipe.fuse_lora()
|
47 |
|
48 |
# Ensure sampler uses "trailing" timesteps.
|
49 |
pipe.scheduler = EulerDiscreteScheduler.from_config(
|
@@ -133,23 +132,24 @@ with gr.Blocks(css=css) as demo:
|
|
133 |
"""## Running SDXL-Lightning with `diffusers`
|
134 |
```py
|
135 |
import torch
|
136 |
-
from diffusers import StableDiffusionXLPipeline, EulerDiscreteScheduler
|
137 |
from huggingface_hub import hf_hub_download
|
|
|
138 |
|
139 |
base = "stabilityai/stable-diffusion-xl-base-1.0"
|
140 |
repo = "ByteDance/SDXL-Lightning"
|
141 |
-
ckpt = "
|
142 |
|
143 |
# Load model.
|
144 |
-
|
145 |
-
|
146 |
-
pipe.
|
147 |
|
148 |
# Ensure sampler uses "trailing" timesteps.
|
149 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
|
150 |
|
151 |
# Ensure using the same inference steps as the loaded model and CFG set to 0.
|
152 |
-
pipe("A girl smiling", num_inference_steps=
|
153 |
```
|
154 |
"""
|
155 |
)
|
|
|
18 |
BASE = "stabilityai/stable-diffusion-xl-base-1.0"
|
19 |
REPO = "ByteDance/SDXL-Lightning"
|
20 |
# 1-step
|
21 |
+
CHECKPOINT = "sdxl_lightning_2step_unet.safetensors"
|
22 |
|
23 |
# {
|
24 |
# "1-Step": ["sdxl_lightning_1step_unet_x0.safetensors", 1],
|
|
|
38 |
print(f"TORCH_COMPILE: {TORCH_COMPILE}")
|
39 |
print(f"device: {device}")
|
40 |
|
41 |
+
unet = UNet2DConditionModel.from_config(BASE, subfolder="unet").to("cuda", torch.float16)
|
42 |
+
unet.load_state_dict(load_file(hf_hub_download(REPO, CHECKPOINT), device="cuda"))
|
43 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
44 |
+
BASE, unet=unet, torch_dtype=torch.float16, variant="fp16"
|
45 |
).to("cuda")
|
|
|
|
|
46 |
|
47 |
# Ensure sampler uses "trailing" timesteps.
|
48 |
pipe.scheduler = EulerDiscreteScheduler.from_config(
|
|
|
132 |
"""## Running SDXL-Lightning with `diffusers`
|
133 |
```py
|
134 |
import torch
|
135 |
+
from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler
|
136 |
from huggingface_hub import hf_hub_download
|
137 |
+
from safetensors.torch import load_file
|
138 |
|
139 |
base = "stabilityai/stable-diffusion-xl-base-1.0"
|
140 |
repo = "ByteDance/SDXL-Lightning"
|
141 |
+
ckpt = "sdxl_lightning_2step_unet.safetensors" # Use the correct ckpt for your step setting!
|
142 |
|
143 |
# Load model.
|
144 |
+
unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
|
145 |
+
unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cuda"))
|
146 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
|
147 |
|
148 |
# Ensure sampler uses "trailing" timesteps.
|
149 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
|
150 |
|
151 |
# Ensure using the same inference steps as the loaded model and CFG set to 0.
|
152 |
+
pipe("A girl smiling", num_inference_steps=2, guidance_scale=0).images[0].save("output.png")
|
153 |
```
|
154 |
"""
|
155 |
)
|