Spaces:
Running
on
T4
Running
on
T4
import torch | |
from diffusers import StableDiffusionPipeline | |
import gradio as gr | |
#model_base = "SG161222/Realistic_Vision_V5.1_noVAE" #realistic people | |
model_base = "Justin-Choo/epiCRealism-Natural_Sin_RC1_VAE" #realistic people | |
#model_base = "Lykon/DreamShaper" #unrealistic people | |
#model_base = "runwayml/stable-diffusion-v1-5" #base | |
#model_base = "Krebzonide/LazyMixPlus" #nsfw people | |
#lora_model_path = "Krebzonide/LoRA-CH-0" #mecjh - Corey H, traind on epiCRealism | |
lora_model_path = "Krebzonide/LoRA-CH-1" #mecjh - Corey H, traind on epiCRealism | |
#lora_model_path = "Krebzonide/LoRA-EM1" #gfemti - Emily M, trained on sd-V1-5 | |
#lora_model_path = "Krebzonide/LoRA-YX1" #uwspyx - Professor Xing, trained on Realistic_Vision | |
pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16, use_safetensors=True) | |
pipe.unet.load_attn_procs(lora_model_path) | |
pipe.to("cuda") | |
css = """ | |
.btn-green { | |
background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important; | |
border-color: #22c55e !important; | |
color: #166534 !important; | |
} | |
.btn-green:hover { | |
background-image: linear-gradient(to bottom right, #6dd178, #6dd178) !important; | |
} | |
""" | |
def generate(prompt, neg_prompt, samp_steps, guide_scale, lora_scale, progress=gr.Progress(track_tqdm=True)): | |
images = pipe( | |
prompt, | |
negative_prompt=neg_prompt, | |
num_inference_steps=samp_steps, | |
guidance_scale=guide_scale, | |
cross_attention_kwargs={"scale": lora_scale}, | |
num_images_per_prompt=4 | |
).images | |
return [(img, f"Image {i+1}") for i, img in enumerate(images)] | |
with gr.Blocks(css=css) as demo: | |
with gr.Column(): | |
prompt = gr.Textbox(label="Prompt") | |
negative_prompt = gr.Textbox(label="Negative Prompt", value="lowres, bad anatomy, bad hands, cropped, worst quality, disfigured, deformed, extra limbs, asian, filter, render") | |
submit_btn = gr.Button("Generate", elem_classes="btn-green") | |
gallery = gr.Gallery(label="Generated images", height=700) | |
with gr.Row(): | |
samp_steps = gr.Slider(1, 100, value=25, step=1, label="Sampling steps") | |
guide_scale = gr.Slider(1, 10, value=6, step=0.5, label="Guidance scale") | |
lora_scale = gr.Slider(0, 1, value=0.5, step=0.01, label="LoRA power") | |
submit_btn.click(generate, [prompt, negative_prompt, samp_steps, guide_scale, lora_scale], [gallery], queue=True) | |
demo.queue(1) | |
demo.launch(debug=True) |