File size: 1,144 Bytes
ed7ec63
 
 
 
 
 
 
 
 
 
 
bfb27c0
ed7ec63
 
 
 
 
 
 
 
 
 
 
bfb27c0
ed7ec63
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import gradio as gr
import torch
from diffusers import LCMScheduler, AutoPipelineForText2Image

# model_id = "Lykon/dreamshaper-7"
# prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k"

def run(checkpoint, prompt):
    model_id = checkpoint
    adapter_id = "latent-consistency/lcm-lora-sdv1-5"

    pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float32, safety_checker=None).to("cpu")
    pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)

    # load and fuse lcm lora
    pipe.load_lora_weights(adapter_id)
    pipe.fuse_lora()

    # disable guidance_scale by passing 0
    image = pipe(prompt=prompt, num_inference_steps=4, guidance_scale=0, width=512, height=512).images[0]
    return image

with gr.Blocks() as demo:
    input_checkpoint = gr.Text(value="Lykon/dreamshaper-8", label="Checkpoint")
    input_prompt = gr.Text(value="Self-portrait oil painting, a beautiful cyborg with golden hair, 8k", label="Prompt")
    out = gr.Image(type="pil")
    btn = gr.Button("Run")
    btn.click(fn=run, inputs=[input_checkpoint, input_prompt], outputs=out)

demo.launch()