import gradio as gr import numpy as np import random import torch import spaces from PIL import Image from diffusers import QwenImageEditPipeline from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler import torch import math from optimization import optimize_pipeline_ # --- Model Loading --- dtype = torch.bfloat16 device = "cuda" if torch.cuda.is_available() else "cpu" pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", torch_dtype=dtype).to(device) # --- Ahead-of-time compilation --- optimize_pipeline_(pipe, image=Image.new("RGB", (1024, 1024)), prompt='prompt') # --- UI Constants and Helpers --- MAX_SEED = np.iinfo(np.int32).max # --- Main Inference Function (with hardcoded negative prompt) --- @spaces.GPU(duration=120) def infer( image, prompt, seed=42, randomize_seed=False, true_guidance_scale=4.0, num_inference_steps=8, progress=gr.Progress(track_tqdm=True), ): """ Generates an image using the local Qwen-Image diffusers pipeline. """ # Hardcode the negative prompt as requested negative_prompt = " " if randomize_seed: seed = random.randint(0, MAX_SEED) # Set up the generator for reproducibility generator = torch.Generator(device=device).manual_seed(seed) print(f"Calling pipeline with prompt: '{prompt}'") print(f"Negative Prompt: '{negative_prompt}'") print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {guidance_scale}") # Generate the image image = pipe( image, prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, generator=generator, true_cfg_scale=true_guidance_scale, ).images[0] return image, seed # --- Examples and UI Layout --- examples = [] css = """ #col-container { margin: 0 auto; max-width: 1024px; } #edit_text{margin-top: -62px !important} """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.HTML('Qwen-Image Logo') gr.HTML('

Edit

', elem_id="edit_text") gr.Markdown("[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series. Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image-Edit) to run locally with ComfyUI or diffusers.") with gr.Row(): with gr.Column(): input_image = gr.Image(label="Input Image", show_label=False, type="pil") prompt = gr.Text( label="Prompt", show_label=False, placeholder="describe the edit instruction", container=False, ) run_button = gr.Button("Edit!", variant="primary") result = gr.Image(label="Result", show_label=False, type="pil") with gr.Accordion("Advanced Settings", open=False): # Negative prompt UI element is removed here seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, ) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) with gr.Row(): true_guidance_scale = gr.Slider( label="True guidance scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0 ) num_inference_steps = gr.Slider( label="Number of inference steps", minimum=1, maximum=50, step=1, value=8, ) # gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False) gr.on( triggers=[run_button.click, prompt.submit], fn=infer, inputs=[ input_image, prompt, # negative_prompt is no longer an input from the UI seed, randomize_seed, true_guidance_scale, num_inference_steps, ], outputs=[result, seed], ) if __name__ == "__main__": demo.launch()