File size: 1,861 Bytes
9e21259 200226c 9e21259 edfcd79 200226c c5cb933 74aec6d edfcd79 5cfd0f2 edfcd79 200226c 6694cc8 edfcd79 9e21259 200226c 9e21259 edfcd79 9e21259 200226c 5e52cf2 9e21259 a1ff9d0 200226c 9e21259 200226c fb31468 8f9c91d fb31468 9e21259 edfcd79 200226c a1ff9d0 200226c 9e21259 200226c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
import gradio as gr
from optimum.intel import OVStableDiffusionPipeline
model_id = "yujiepan/dreamshaper-8-lcm-openvino-w8a8"
pipeline = OVStableDiffusionPipeline.from_pretrained(model_id, device='CPU')
pipeline.sampler = "dpm++2s_a"
num_inference_steps = 28
def infer(prompt):
image = pipeline(
prompt=prompt,
guidance_scale=1.0,
num_inference_steps=num_inference_steps,
width=512,
height=512,
num_images_per_prompt=1,
).images[0]
return image
examples = [
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
]
css = """
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("# Demo : yujiepan/dreamshaper-8-lcm-openvino-w8a8 ⚡")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
#with gr.Accordion("Advanced Settings", open=False):
# num_inference_steps = gr.Slider(
# label="Number of inference steps",
# minimum=1,
# maximum=50,
# step=1,
# value=28,
# )
gr.Examples(
examples=examples,
fn=infer,
inputs=[prompt],
outputs=[result]
)
run_button.click(
fn=infer,
#inputs=[prompt, num_inference_steps],
inputs=[prompt],
outputs=[result]
)
demo.queue().launch(share=True)
|