Commit
·
69f6fc2
1
Parent(s):
db33e16
add local run script
Browse files- run_k_diffusion.py +38 -0
- run_local.py +18 -0
run_k_diffusion.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
#from diffusers import DiffusionPipeline, StableDiffusionPipeline, KDPM2DiscreteScheduler, KDPM2AncestralDiscreteScheduler, HeunDiscreteScheduler
|
| 3 |
+
from diffusers import DiffusionPipeline, StableDiffusionPipeline, HeunDiscreteScheduler
|
| 4 |
+
import torch
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
seed = 33
|
| 8 |
+
inference_steps = 25
|
| 9 |
+
|
| 10 |
+
old_pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="sd_text2img_k_diffusion")
|
| 11 |
+
old_pipe = old_pipe.to("cuda")
|
| 12 |
+
|
| 13 |
+
for prompt in ["astronaut riding horse", "whale falling from sky", "magical forest", "highly photorealistic picture of johnny depp"]:
|
| 14 |
+
# for sampler in ["sample_dpm_2_ancestral", "sample_dpm_2", "sample_heun"]:
|
| 15 |
+
# for sampler in ["heun", "sample_dpm_2_ancestral", "sample_dpm_2"]:
|
| 16 |
+
for sampler in ["sample_heun"]:
|
| 17 |
+
old_pipe.set_sampler(sampler)
|
| 18 |
+
torch.manual_seed(0)
|
| 19 |
+
image = old_pipe(prompt, num_inference_steps=inference_steps).images[0]
|
| 20 |
+
folder = f"a_{'_'.join(prompt.split())}"
|
| 21 |
+
os.makedirs(f"/home/patrick_huggingface_co/images/{folder}", exist_ok=True)
|
| 22 |
+
image.save(f"/home/patrick_huggingface_co/images/{folder}/{sampler}.png")
|
| 23 |
+
|
| 24 |
+
pipe = StableDiffusionPipeline(**old_pipe.components)
|
| 25 |
+
pipe = pipe.to("cuda")
|
| 26 |
+
|
| 27 |
+
# if sampler == "sample_dpm_2":
|
| 28 |
+
# pipe.scheduler = KDPM2DiscreteScheduler.from_config(pipe.scheduler.config)
|
| 29 |
+
# elif sampler == "sample_dpm_2_ancestral":
|
| 30 |
+
# pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
| 31 |
+
if sampler == "sample_heun":
|
| 32 |
+
pipe.scheduler = HeunDiscreteScheduler.from_config(pipe.scheduler.config)
|
| 33 |
+
|
| 34 |
+
torch.manual_seed(0)
|
| 35 |
+
image = pipe(prompt, num_inference_steps=inference_steps).images[0]
|
| 36 |
+
|
| 37 |
+
image.save(f"/home/patrick_huggingface_co/images/{folder}/hf_{sampler}.png")
|
| 38 |
+
break
|
run_local.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from diffusers import DiffusionPipeline
|
| 3 |
+
import torch
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
|
| 8 |
+
local_path = sys.argv[1]
|
| 9 |
+
local_path = os.path.abspath(local_path)
|
| 10 |
+
|
| 11 |
+
prompts = ["astronaut on mars", "Barack Obama smiling with a big grin"]
|
| 12 |
+
|
| 13 |
+
pipe = DiffusionPipeline.from_pretrained(local_path, torch_dtype=torch.float16)
|
| 14 |
+
pipe = pipe.to("cuda")
|
| 15 |
+
|
| 16 |
+
for prompt in prompts:
|
| 17 |
+
image = pipe(prompt).images[0]
|
| 18 |
+
image.save(f"/home/patrick_huggingface_co/images/a_{'_'.join(prompt.split())}.png")
|