Spaces:
Running
on
Zero
Running
on
Zero
RageshAntony
commited on
added check_app.py to check progress
Browse files- check_app.py +87 -0
check_app.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from diffusers import FluxPipeline, StableDiffusion3Pipeline
|
3 |
+
from PIL import Image
|
4 |
+
from io import BytesIO
|
5 |
+
import gradio as gr
|
6 |
+
|
7 |
+
# Initialize pipelines
|
8 |
+
stable_diffusion_pipe = StableDiffusion3Pipeline.from_pretrained(
|
9 |
+
"stabilityai/stable-diffusion-3.5-large", torch_dtype=torch.bfloat16
|
10 |
+
).to("cuda")
|
11 |
+
|
12 |
+
flux_pipe = FluxPipeline.from_pretrained(
|
13 |
+
"black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16
|
14 |
+
).to("cuda")
|
15 |
+
|
16 |
+
# Function to generate images with progress
|
17 |
+
def generate_image_with_progress(pipe, prompt, num_steps, guidance_scale=None, seed=None, progress=gr.Progress()):
|
18 |
+
generator = None
|
19 |
+
if seed is not None:
|
20 |
+
generator = torch.Generator("cpu").manual_seed(seed)
|
21 |
+
print("Start generating")
|
22 |
+
# Wrapper to track progress
|
23 |
+
def callback(step, timestep, latents):
|
24 |
+
cur_prg = step / num_steps
|
25 |
+
print(f"Progressing {cur_prg} ")
|
26 |
+
progress(cur_prg, desc=f"Step {step}/{num_steps}")
|
27 |
+
|
28 |
+
if isinstance(pipe, StableDiffusion3Pipeline):
|
29 |
+
image = pipe(
|
30 |
+
prompt,
|
31 |
+
num_inference_steps=num_steps,
|
32 |
+
guidance_scale=guidance_scale,
|
33 |
+
callback=callback,
|
34 |
+
).images[0]
|
35 |
+
elif isinstance(pipe, FluxPipeline):
|
36 |
+
image = pipe(
|
37 |
+
prompt,
|
38 |
+
num_inference_steps=num_steps,
|
39 |
+
generator=generator,
|
40 |
+
output_type="pil",
|
41 |
+
callback=callback,
|
42 |
+
).images[0]
|
43 |
+
return image
|
44 |
+
|
45 |
+
# Gradio application
|
46 |
+
def main():
|
47 |
+
def tab1_logic(prompt_text):
|
48 |
+
progress = gr.Progress()
|
49 |
+
num_steps = 30
|
50 |
+
seed = 42
|
51 |
+
print(f"Start tab {prompt_text}")
|
52 |
+
image = generate_image_with_progress(
|
53 |
+
flux_pipe, prompt_text, num_steps=num_steps, seed=seed, progress=progress
|
54 |
+
)
|
55 |
+
return f"Seed: {seed}", image
|
56 |
+
|
57 |
+
def tab2_logic(prompt_text):
|
58 |
+
progress = gr.Progress()
|
59 |
+
num_steps = 28
|
60 |
+
guidance_scale = 3.5
|
61 |
+
print(f"Start tab {prompt_text}")
|
62 |
+
image = generate_image_with_progress(
|
63 |
+
stable_diffusion_pipe, prompt_text, num_steps=num_steps, guidance_scale=guidance_scale, progress=progress
|
64 |
+
)
|
65 |
+
return "Seed: None", image
|
66 |
+
|
67 |
+
with gr.Blocks() as app:
|
68 |
+
gr.Markdown("# Multiple Model Image Generation with Progress Bar")
|
69 |
+
|
70 |
+
prompt_text = gr.Textbox(label="Enter prompt")
|
71 |
+
|
72 |
+
with gr.Tab("FLUX"):
|
73 |
+
button_1 = gr.Button("Run FLUX")
|
74 |
+
output_1 = gr.Textbox(label="Status")
|
75 |
+
img_1 = gr.Image(label="FLUX", height=300)
|
76 |
+
button_1.click(fn=tab1_logic, inputs=[prompt_text], outputs=[output_1, img_1])
|
77 |
+
|
78 |
+
with gr.Tab("StableDiffusion3"):
|
79 |
+
button_2 = gr.Button("Run StableDiffusion3")
|
80 |
+
output_2 = gr.Textbox(label="Status")
|
81 |
+
img_2 = gr.Image(label="StableDiffusion3", height=300)
|
82 |
+
button_2.click(fn=tab2_logic, inputs=[prompt_text], outputs=[output_2, img_2])
|
83 |
+
|
84 |
+
app.launch()
|
85 |
+
|
86 |
+
if __name__ == "__main__":
|
87 |
+
main()
|