Spaces:
Runtime error
Runtime error
| import torch | |
| from diffusers import StableDiffusionPipeline | |
| import gradio as gr | |
| model_base = "SG161222/Realistic_Vision_V5.1_noVAE" | |
| lora_model_path = "Krebzonide/3a0s-w68r-4qw1-0" | |
| pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16, use_safetensors=True) | |
| pipe.unet.load_attn_procs(lora_model_path) #working, commented to text base model------------------------------------ | |
| #pipe.unet.load_attn_procs(lora_model_path, use_auth_token=True) test accessing a private model---------------------- | |
| pipe.to("cuda") | |
| css = """ | |
| .btn-green { | |
| background-image: linear-gradient(to bottom right, #86efac, #22c55e) !important; | |
| border-color: #22c55e !important; | |
| color: #166534 !important; | |
| } | |
| .btn-green:hover { | |
| background-image: linear-gradient(to bottom right, #86efac, #86efac) !important; | |
| } | |
| .btn-red { | |
| background: linear-gradient(to bottom right, #fda4af, #fb7185) !important; | |
| border-color: #fb7185 !important; | |
| color: #9f1239 !important; | |
| } | |
| .btn-red:hover {background: linear-gradient(to bottom right, #fda4af, #fda4af) !important;} | |
| /*****/ | |
| .dark .btn-green { | |
| background-image: linear-gradient(to bottom right, #047857, #065f46) !important; | |
| border-color: #047857 !important; | |
| color: #ffffff !important; | |
| } | |
| .dark .btn-green:hover { | |
| background-image: linear-gradient(to bottom right, #047857, #047857) !important; | |
| } | |
| .dark .btn-red { | |
| background: linear-gradient(to bottom right, #be123c, #9f1239) !important; | |
| border-color: #be123c !important; | |
| color: #ffffff !important; | |
| } | |
| .dark .btn-red:hover {background: linear-gradient(to bottom right, #be123c, #be123c) !important;} | |
| """ | |
| def generate(prompt, neg_prompt, samp_steps, guide_scale, lora_scale): | |
| images = pipe( | |
| prompt, | |
| negative_prompt=neg_prompt, | |
| num_inference_steps=samp_steps, | |
| guidance_scale=guide_scale, | |
| cross_attention_kwargs={"scale": lora_scale}, | |
| num_images_per_prompt=4 | |
| ).images | |
| return [(img, f"Image {i+1}") for i, img in enumerate(images)] | |
| with gr.Blocks(css=css) as demo: | |
| with gr.Column(): | |
| prompt = gr.Textbox(label="Prompt") | |
| negative_prompt = gr.Textbox(label="Negative Prompt", value="lowres, bad anatomy, bad hands, cropped, worst quality, disfigured, deformed, extra limbs, asian, filter, render") | |
| submit_btn = gr.Button("Generate", variant="primary", min_width="96px") | |
| gallery = gr.Gallery(label="Generated images") | |
| with gr.Row(): | |
| samp_steps = gr.Slider(1, 100, value=30, step=1, label="Sampling steps") | |
| guide_scale = gr.Slider(1, 10, value=6, step=0.5, label="Guidance scale") | |
| lora_scale = gr.Slider(0, 1, value=0.5, step=0.01, label="LoRA power") | |
| submit_btn.click(generate, [prompt, negative_prompt, samp_steps, guide_scale, lora_scale], [gallery], queue=True) | |
| demo.queue(1) | |
| demo.launch(debug=True) |