Spaces:
Runtime error
Runtime error
| import spaces | |
| import torch | |
| from inspect import signature | |
| from diffusers import ( | |
| FluxPipeline, | |
| StableDiffusion3Pipeline, | |
| PixArtSigmaPipeline, | |
| SanaPipeline, | |
| AuraFlowPipeline, | |
| Kandinsky3Pipeline, | |
| HunyuanDiTPipeline, | |
| LuminaText2ImgPipeline,AutoPipelineForText2Image | |
| ) | |
| import gradio as gr | |
| cache_dir = '/workspace/hf_cache' | |
| MODEL_CONFIGS = { | |
| "FLUX": { | |
| "repo_id": "black-forest-labs/FLUX.1-dev", | |
| "pipeline_class": FluxPipeline, | |
| "cache_dir" : cache_dir | |
| }, | |
| "AuraFlow": { | |
| "repo_id": "fal/AuraFlow", | |
| "pipeline_class": AuraFlowPipeline, | |
| "cache_dir": cache_dir, | |
| }, | |
| "Lumina": { | |
| "repo_id": "Alpha-VLLM/Lumina-Next-SFT-diffusers", | |
| "pipeline_class": LuminaText2ImgPipeline, | |
| "cache_dir": cache_dir, | |
| } | |
| } | |
| def generate_image_with_progress(pipe, prompt, num_steps, guidance_scale=None, seed=None, progress=gr.Progress(track_tqdm=True)): | |
| generator = None | |
| if seed is not None: | |
| generator = torch.Generator("cuda").manual_seed(seed) | |
| def callback(pipe, step_index, timestep, callback_kwargs): | |
| print(f" callback => {step_index}, {timestep}") | |
| if step_index is None: | |
| step_index = 0 | |
| cur_prg = step_index / num_steps | |
| progress(cur_prg, desc=f"Step {step_index}/{num_steps}") | |
| return callback_kwargs | |
| print(f"START GENR ") | |
| if hasattr(pipe, "guidance_scale") and hasattr(pipe, "callback_on_step_end"): | |
| print("has callback_on_step_end and has guidance_scale") | |
| image = pipe( | |
| prompt, | |
| num_inference_steps=num_steps, | |
| generator=generator, | |
| guidance_scale=guidance_scale, | |
| callback_on_step_end=callback, | |
| ).images[0] | |
| elif not hasattr(pipe, "callback_on_step_end") and hasattr(pipe, "guidance_scale"): | |
| print("NO callback_on_step_end and has guidance_scale") | |
| image = pipe( | |
| prompt, | |
| num_inference_steps=num_steps, | |
| guidance_scale=guidance_scale, | |
| generator=generator, | |
| ).images[0] | |
| elif hasattr(pipe, "callback_on_step_end") and not hasattr(pipe, "guidance_scale"): | |
| print(" has callback_on_step_end and NO guidance_scale") | |
| image = pipe( | |
| prompt, | |
| num_inference_steps=num_steps, | |
| generator=generator, | |
| callback_on_step_end=callback | |
| ).images[0] | |
| elif not hasattr(pipe, "callback_on_step_end") and not hasattr(pipe, "guidance_scale"): | |
| print("NO callback_on_step_end and NO guidance_scale") | |
| image = pipe( | |
| prompt, | |
| num_inference_steps=num_steps, | |
| generator=generator, | |
| ).images[0] | |
| return image | |
| def create_pipeline_logic(prompt_text, model_name): | |
| print(f"starting {model_name}") | |
| progress = gr.Progress() | |
| num_steps = 30 | |
| guidance_scale = 7.5 # Example guidance scale, can be adjusted per model | |
| seed = 42 | |
| config = MODEL_CONFIGS[model_name] | |
| pipe_class = config["pipeline_class"] | |
| pipe = None | |
| if model_name == "Kandinsky": | |
| print("Kandinsky Special") | |
| pipe = AutoPipelineForText2Image.from_pretrained( | |
| "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 | |
| ) | |
| else: | |
| pipe = pipe_class.from_pretrained( | |
| config["repo_id"], | |
| #cache_dir=config["cache_dir"], | |
| torch_dtype=torch.bfloat16 | |
| ).to("cuda") | |
| print(f"PIPE ATTR {signature(pipe)}") | |
| image = generate_image_with_progress( | |
| pipe, prompt_text, num_steps=num_steps, guidance_scale=guidance_scale, seed=seed, progress=progress | |
| ) | |
| return f"Seed: {seed}", image | |
| def main(): | |
| with gr.Blocks() as app: | |
| gr.Markdown("# Dynamic Multiple Model Image Generation") | |
| prompt_text = gr.Textbox(label="Enter prompt") | |
| for model_name, config in MODEL_CONFIGS.items(): | |
| with gr.Tab(model_name): | |
| button = gr.Button(f"Run {model_name}") | |
| output = gr.Textbox(label="Status") | |
| img = gr.Image(label=model_name, height=300) | |
| button.click(fn=create_pipeline_logic, inputs=[prompt_text, gr.Text(value= model_name,visible=False)], outputs=[output, img]) | |
| app.launch() | |
| if __name__ == "__main__": | |
| main() | |