Spaces:
Runtime error
Runtime error
Commit
·
548031b
1
Parent(s):
3e255d1
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,7 +2,7 @@ import torch
|
|
| 2 |
from diffusers import StableDiffusionPipeline
|
| 3 |
import gradio as gr
|
| 4 |
|
| 5 |
-
model_base = "
|
| 6 |
lora_model_path = "Krebzonide/3a0s-w68r-4qw1-0"
|
| 7 |
|
| 8 |
pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16, use_safetensors=True)
|
|
@@ -42,8 +42,15 @@ css = """
|
|
| 42 |
.dark .btn-red:hover {background: linear-gradient(to bottom right, #be123c, #be123c) !important;}
|
| 43 |
"""
|
| 44 |
|
| 45 |
-
def generate(prompt, neg_prompt):
|
| 46 |
-
images = pipe(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
return [(img, f"Image {i+1}") for i, img in enumerate(images)]
|
| 48 |
|
| 49 |
|
|
@@ -53,8 +60,12 @@ with gr.Blocks(css=css) as demo:
|
|
| 53 |
negative_prompt = gr.Textbox(label="Negative Prompt", value="lowres, bad anatomy, bad hands, cropped, worst quality, disfigured, deformed, extra limbs, asian, filter, render")
|
| 54 |
submit_btn = gr.Button("Generate", variant="primary", min_width="96px")
|
| 55 |
gallery = gr.Gallery(label="Generated images")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
-
submit_btn.click(generate, [prompt, negative_prompt], [gallery], queue=True)
|
| 58 |
|
| 59 |
demo.queue(1)
|
| 60 |
demo.launch(debug=True)
|
|
|
|
| 2 |
from diffusers import StableDiffusionPipeline
|
| 3 |
import gradio as gr
|
| 4 |
|
| 5 |
+
model_base = "SG161222/Realistic_Vision_V5.1_noVAE"
|
| 6 |
lora_model_path = "Krebzonide/3a0s-w68r-4qw1-0"
|
| 7 |
|
| 8 |
pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16, use_safetensors=True)
|
|
|
|
| 42 |
.dark .btn-red:hover {background: linear-gradient(to bottom right, #be123c, #be123c) !important;}
|
| 43 |
"""
|
| 44 |
|
| 45 |
+
def generate(prompt, neg_prompt, samp_steps, guide_scale, lora_scale):
|
| 46 |
+
images = pipe(
|
| 47 |
+
prompt,
|
| 48 |
+
negative_prompt=neg_prompt,
|
| 49 |
+
num_inference_steps=samp_steps,
|
| 50 |
+
guidance_scale=guide_scale,
|
| 51 |
+
cross_attention_kwargs={"scale": lora_scale},
|
| 52 |
+
num_images_per_prompt=4
|
| 53 |
+
).images
|
| 54 |
return [(img, f"Image {i+1}") for i, img in enumerate(images)]
|
| 55 |
|
| 56 |
|
|
|
|
| 60 |
negative_prompt = gr.Textbox(label="Negative Prompt", value="lowres, bad anatomy, bad hands, cropped, worst quality, disfigured, deformed, extra limbs, asian, filter, render")
|
| 61 |
submit_btn = gr.Button("Generate", variant="primary", min_width="96px")
|
| 62 |
gallery = gr.Gallery(label="Generated images")
|
| 63 |
+
with gr.Row():
|
| 64 |
+
samp_steps = gr.Slider(1, 100, value=30, step=1, label="Sampling steps")
|
| 65 |
+
guide_scale = gr.Slider(1, 10, value=6, step=0.5, label="Guidance scale")
|
| 66 |
+
lora_scale = gr.Slider(0, 1, value=0.5, step=0.01, label="LoRA power")
|
| 67 |
|
| 68 |
+
submit_btn.click(generate, [prompt, negative_prompt, samp_steps, guide_scale, lora_scale], [gallery], queue=True)
|
| 69 |
|
| 70 |
demo.queue(1)
|
| 71 |
demo.launch(debug=True)
|