Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,6 +4,9 @@ from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler
|
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
import spaces
|
| 6 |
from PIL import Image
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
# Constants
|
| 9 |
base = "stabilityai/stable-diffusion-xl-base-1.0"
|
|
@@ -62,7 +65,7 @@ examples = [
|
|
| 62 |
|
| 63 |
# Gradio Interface
|
| 64 |
|
| 65 |
-
with gr.Blocks(css=CSS) as demo:
|
| 66 |
gr.HTML("<h1><center>Adobe DMD2🦖</center></h1>")
|
| 67 |
gr.HTML("<p><center><a href='https://huggingface.co/tianweiy/DMD2'>DMD2</a> text-to-image generation</center></p>")
|
| 68 |
with gr.Group():
|
|
@@ -76,7 +79,7 @@ with gr.Blocks(css=CSS) as demo:
|
|
| 76 |
inputs=prompt,
|
| 77 |
outputs=img,
|
| 78 |
fn=generate_image,
|
| 79 |
-
cache_examples=
|
| 80 |
)
|
| 81 |
|
| 82 |
prompt.submit(fn=generate_image,
|
|
|
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
import spaces
|
| 6 |
from PIL import Image
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
|
| 10 |
|
| 11 |
# Constants
|
| 12 |
base = "stabilityai/stable-diffusion-xl-base-1.0"
|
|
|
|
| 65 |
|
| 66 |
# Gradio Interface
|
| 67 |
|
| 68 |
+
with gr.Blocks(css=CSS, theme="soft") as demo:
|
| 69 |
gr.HTML("<h1><center>Adobe DMD2🦖</center></h1>")
|
| 70 |
gr.HTML("<p><center><a href='https://huggingface.co/tianweiy/DMD2'>DMD2</a> text-to-image generation</center></p>")
|
| 71 |
with gr.Group():
|
|
|
|
| 79 |
inputs=prompt,
|
| 80 |
outputs=img,
|
| 81 |
fn=generate_image,
|
| 82 |
+
cache_examples=CACHE_EXAMPLES,
|
| 83 |
)
|
| 84 |
|
| 85 |
prompt.submit(fn=generate_image,
|