Spaces:
Running
on
T4
Running
on
T4
File size: 2,554 Bytes
4358e59 9c70ba0 3fec1fb 4358e59 3976ed9 f4ff201 3fec1fb 70e3d12 3fec1fb 70e3d12 3fec1fb 81d0a4c 8553966 81d0a4c 7e9a760 983bcd6 8553966 983bcd6 9c70ba0 9f0ced2 81d0a4c 308c05e 4358e59 983bcd6 4358e59 809a5ae f9aa80f 3fec1fb 4358e59 983bcd6 4358e59 686e886 4358e59 e5c577f 75f237b 3fec1fb 0a14984 3fec1fb 81d0a4c 70e3d12 a4ca4f9 548031b 8553966 983bcd6 75f237b 5f1159f a4ca4f9 769a722 a4ca4f9 81d0a4c b47c647 4358e59 81d0a4c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
from diffusers import AutoPipelineForText2Image, StableDiffusionImg2ImgPipeline
from PIL import Image
import gradio as gr
import random
import torch
import math
css = """
.btn-green {
background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important;
border-color: #22c55e !important;
color: #166534 !important;
}
.btn-green:hover {
background-image: linear-gradient(to bottom right, #6dd178, #6dd178) !important;
}
"""
def generate(prompt, turbo_steps, samp_steps, seed, progress=gr.Progress(track_tqdm=True), negative_prompt = ""):
print("prompt = ", prompt)
print("negative prompt = ", negative_prompt)
if seed < 0:
seed = random.randint(1,999999)
image = txt2img(
prompt,
num_inference_steps=turbo_steps,
guidance_scale=0.0,
generator=torch.manual_seed(seed),
).images[0]
upscaled_image = image.resize((1024,1024), 1)
final_image = img2img(
prompt=prompt,
negative_prompt=negative_prompt,
image=upscaled_image,
num_inference_steps=samp_steps,
guidance_scale=5,
strength=1,
generator=torch.manual_seed(seed),
).images[0]
return [final_image], seed
def set_base_models():
txt2img = AutoPipelineForText2Image.from_pretrained(
"stabilityai/sdxl-turbo",
torch_dtype = torch.float16,
variant = "fp16"
)
txt2img.to("cuda")
img2img = StableDiffusionImg2ImgPipeline.from_pretrained(
"Lykon/dreamshaper-8",
torch_dtype = torch.float16,
variant = "fp16",
safety_checker=None
)
img2img.to("cuda")
return txt2img, img2img
with gr.Blocks(css=css) as demo:
with gr.Column():
prompt = gr.Textbox(label="Prompt")
negative_prompt = gr.Textbox(label="Negative Prompt")
submit_btn = gr.Button("Generate", elem_classes="btn-green")
with gr.Row():
turbo_steps = gr.Slider(1, 4, value=1, step=1, label="Turbo steps")
sampling_steps = gr.Slider(1, 6, value=3, step=1, label="Refiner steps")
seed = gr.Number(label="Seed", value=-1, minimum=-1, precision=0)
lastSeed = gr.Number(label="Last Seed", value=-1, interactive=False)
gallery = gr.Gallery(show_label=False, preview=True, container=False, height=1100)
submit_btn.click(generate, [prompt, turbo_steps, sampling_steps, seed, negative_prompt], [gallery, lastSeed], queue=True)
txt2img, img2img = set_base_models()
demo.launch(debug=True)
|