Spaces:
Runtime error
Runtime error
File size: 4,647 Bytes
b22b80e 1483ea1 b22b80e 9c2430d b22b80e e4f4963 9c2430d b22b80e a2139ac b6ec892 3e4f76c a2139ac b22b80e 1483ea1 9c2430d 1483ea1 9c2430d 1483ea1 9c2430d 1483ea1 9c2430d 1483ea1 9c2430d 1483ea1 9c2430d 1483ea1 9c2430d 1483ea1 9c2430d a2139ac 3e4f76c 95ab993 a2139ac 7819338 a2139ac 9c2430d b22b80e 9c2430d b22b80e 9c2430d b22b80e 9c2430d b22b80e 9c2430d b22b80e 9c2430d b22b80e 9c2430d 8acf762 9c2430d b22b80e 9c2430d 1483ea1 9c2430d b22b80e 9c2430d b22b80e 9c2430d b22b80e 9c2430d b22b80e 9c2430d 1483ea1 9c2430d b22b80e 9c2430d b22b80e 9c2430d b22b80e 9c2430d b22b80e 1483ea1 b22b80e e4f4963 b22b80e 9c2430d a2139ac 9c2430d b22b80e 9c2430d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
import gradio as gr
import numpy as np
import random
import json
from PIL import Image
import spaces
from http import HTTPStatus
from urllib.parse import urlparse, unquote
from pathlib import PurePosixPath
import requests
import os
from diffusers import DiffusionPipeline
import torch
model_name = "Qwen/Qwen-Image"
pipe = DiffusionPipeline.from_pretrained(model_name, torch_dtype=torch.bfloat16)
pipe.to('cuda')
MAX_SEED = np.iinfo(np.int32).max
#MAX_IMAGE_SIZE = 1440
examples = json.loads(open("examples.json").read())
# (1664, 928), (1472, 1140), (1328, 1328)
def get_image_size(aspect_ratio):
if aspect_ratio == "1:1":
return 1920, 1920
elif aspect_ratio == "16:9":
return 1920, 1080
elif aspect_ratio == "9:16":
return 1080, 1920
elif aspect_ratio == "4:3":
return 1920, 1440
elif aspect_ratio == "3:4":
return 1440, 1920
else:
return 640, 640
@spaces.GPU(duration=60)
def infer_diffusers(
prompt,
negative_prompt=" ",
seed=42,
randomize_seed=False,
aspect_ratio="16:9",
guidance_scale=4,
num_inference_steps=50,
progress=gr.Progress(track_tqdm=True),
):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
width, height = get_image_size(aspect_ratio)
print("Generating for prompt:", prompt)
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
width=width,
height=height,
num_inference_steps=50,
true_cfg_scale=4.0,
generator=torch.Generator(device="cuda").manual_seed(42)
).images[0]
#image.save("example.png")
return image, seed
css = """
#col-container {
margin: 0 auto;
max-width: 1024px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
# gr.Markdown('<div style="text-align: center;"><a href="https://huggingface.co/Qwen/Qwen-Image"><img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_logo.png" width="400"/></a></div>')
gr.Markdown('<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_logo.png" alt="your_alt_text" width="400" style="display: block; margin: 0 auto;">')
gr.Markdown("[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series. Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image) to run locally with ComfyUI or diffusers.")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0, variant="primary")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=True,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
aspect_ratio = gr.Radio(
label="Image size (ratio, max dim 1920)",
choices=["1:1", "16:9", "9:16", "4:3", "3:4"],
value="16:9",
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=7.5,
step=0.1,
value=4.0,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=35,
)
gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False, cache_mode="lazy")
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer_diffusers,
inputs=[
prompt,
negative_prompt,
seed,
randomize_seed,
aspect_ratio,
guidance_scale,
num_inference_steps,
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch() |