Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -184,125 +184,104 @@ examples = [
|
|
| 184 |
]
|
| 185 |
|
| 186 |
# --- Gradio UI ---
|
| 187 |
-
with gr.Blocks(
|
| 188 |
-
gr.
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
gr.
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
placeholder="Describe the image you want to generate...",
|
| 204 |
-
lines=3,
|
| 205 |
-
show_label=False,
|
| 206 |
-
container=False,
|
| 207 |
-
)
|
| 208 |
-
|
| 209 |
-
with gr.Row():
|
| 210 |
-
generateBtn = gr.Button("🖼️ Generate Image", variant="primary", interactive=pipe is not None)
|
| 211 |
-
enhanceBtn = gr.Button(f"🚀 Enhance (Steps: {ENHANCE_STEPS})", interactive=pipe is not None) # Use fixed steps for enhance
|
| 212 |
-
|
| 213 |
-
realtime = gr.Checkbox(label="⚡ Realtime Generation", info="Generates image automatically as you type or adjust sliders (requires more GPU).", value=False, interactive=pipe is not None)
|
| 214 |
-
|
| 215 |
-
with gr.Accordion("Advanced Options", open=False):
|
| 216 |
-
with gr.Row():
|
| 217 |
-
seed = gr.Number(label="Seed", value=42, precision=0, interactive=pipe is not None) # Use precision=0 for integers
|
| 218 |
-
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True, interactive=pipe is not None)
|
| 219 |
-
with gr.Row():
|
| 220 |
-
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=DEFAULT_WIDTH, interactive=pipe is not None) # Increase step for faster adjustment
|
| 221 |
-
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=DEFAULT_HEIGHT, interactive=pipe is not None)
|
| 222 |
-
num_inference_steps = gr.Slider(
|
| 223 |
-
label="Inference Steps",
|
| 224 |
-
minimum=MIN_INFERENCE_STEPS,
|
| 225 |
-
maximum=MAX_INFERENCE_STEPS,
|
| 226 |
-
step=1,
|
| 227 |
-
value=DEFAULT_INFERENCE_STEPS,
|
| 228 |
-
info=f"Controls quality vs speed. Default: {DEFAULT_INFERENCE_STEPS}. Enhance uses {ENHANCE_STEPS}.",
|
| 229 |
-
interactive=pipe is not None
|
| 230 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
cache_examples="lazy", # Use caching
|
| 240 |
-
run_on_click=True, # Ensure examples run when clicked
|
| 241 |
-
label="Example Prompts"
|
| 242 |
)
|
| 243 |
|
| 244 |
-
# --- Event Listeners ---
|
| 245 |
-
|
| 246 |
-
# Combine inputs needed for generate_image
|
| 247 |
-
gen_inputs = [prompt, seed, width, height, randomize_seed, num_inference_steps]
|
| 248 |
-
outputs = [result, seed, latency]
|
| 249 |
-
|
| 250 |
-
# Generate Button Click
|
| 251 |
generateBtn.click(
|
| 252 |
fn=generate_image,
|
| 253 |
-
inputs=
|
| 254 |
-
outputs=
|
| 255 |
-
show_progress="full",
|
| 256 |
-
api_name="
|
| 257 |
-
queue=
|
| 258 |
)
|
| 259 |
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 268 |
)
|
| 269 |
-
|
| 270 |
-
|
|
|
|
|
|
|
|
|
|
| 271 |
prompt.submit(
|
| 272 |
fn=generate_image,
|
| 273 |
-
inputs=
|
| 274 |
-
outputs=
|
| 275 |
show_progress="full",
|
| 276 |
-
queue=
|
|
|
|
| 277 |
)
|
| 278 |
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
# Using .input for text allows updates while typing
|
| 289 |
-
# Using .change for sliders updates when released (default) or continuously if specified
|
| 290 |
-
event_type = "input" if isinstance(component, (gr.Textbox, gr.Number)) else "change"
|
| 291 |
-
|
| 292 |
-
getattr(component, event_type)(
|
| 293 |
-
fn=handle_realtime_update,
|
| 294 |
-
inputs=realtime_inputs,
|
| 295 |
-
outputs=outputs,
|
| 296 |
-
show_progress="hidden", # Hide progress for realtime updates
|
| 297 |
-
# queue=False essential for responsiveness & cancelling previous requests
|
| 298 |
-
# trigger_mode='throttle' with a small delay (e.g., 0.5s) can prevent excessive calls
|
| 299 |
-
# 'always_last' ensures only the latest input value triggers execution
|
| 300 |
-
queue=False,
|
| 301 |
-
trigger_mode="throttle",
|
| 302 |
-
throttle_delay=0.5 # Throttle updates slightly (e.g., every 500ms)
|
| 303 |
-
# trigger_mode="always_last", # Alternative: trigger only after user stops changing for a bit
|
| 304 |
)
|
| 305 |
|
| 306 |
-
#
|
| 307 |
-
|
| 308 |
-
demo.queue().launch(debug=True) # Enable queue for better handling of multiple users/requests, add debug=True for more logs
|
|
|
|
| 184 |
]
|
| 185 |
|
| 186 |
# --- Gradio UI ---
|
| 187 |
+
with gr.Blocks() as demo:
|
| 188 |
+
with gr.Column(elem_id="app-container"):
|
| 189 |
+
gr.Markdown("# 🎨 Realtime FLUX Image Generator")
|
| 190 |
+
gr.Markdown("Generate stunning images in real-time with Modified Flux.Schnell pipeline.")
|
| 191 |
+
gr.Markdown("<span style='color: red;'>Note: Sometimes it stucks or stops generating images (I don't know why). In that situation just refresh the site.</span>")
|
| 192 |
+
|
| 193 |
+
with gr.Row():
|
| 194 |
+
with gr.Column(scale=2.5):
|
| 195 |
+
result = gr.Image(label="Generated Image", show_label=False, interactive=False)
|
| 196 |
+
with gr.Column(scale=1):
|
| 197 |
+
prompt = gr.Text(
|
| 198 |
+
label="Prompt",
|
| 199 |
+
placeholder="Describe the image you want to generate...",
|
| 200 |
+
lines=3,
|
| 201 |
+
show_label=False,
|
| 202 |
+
container=False,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
)
|
| 204 |
+
generateBtn = gr.Button("🖼️ Generate Image")
|
| 205 |
+
enhanceBtn = gr.Button("🚀 Enhance Image")
|
| 206 |
+
|
| 207 |
+
with gr.Column("Advanced Options"):
|
| 208 |
+
with gr.Row():
|
| 209 |
+
realtime = gr.Checkbox(label="Realtime Toggler", info="If TRUE then uses more GPU but create image in realtime.", value=False)
|
| 210 |
+
latency = gr.Text(label="Latency")
|
| 211 |
+
with gr.Row():
|
| 212 |
+
seed = gr.Number(label="Seed", value=42)
|
| 213 |
+
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
| 214 |
+
with gr.Row():
|
| 215 |
+
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_WIDTH)
|
| 216 |
+
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_HEIGHT)
|
| 217 |
+
num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=4, step=1, value=DEFAULT_INFERENCE_STEPS)
|
| 218 |
+
|
| 219 |
+
with gr.Row():
|
| 220 |
+
gr.Markdown("### 🌟 Inspiration Gallery")
|
| 221 |
+
with gr.Row():
|
| 222 |
+
gr.Examples(
|
| 223 |
+
examples=examples,
|
| 224 |
+
fn=generate_image,
|
| 225 |
+
inputs=[prompt],
|
| 226 |
+
outputs=[result, seed, latency],
|
| 227 |
+
cache_examples="lazy"
|
| 228 |
+
)
|
| 229 |
|
| 230 |
+
enhanceBtn.click(
|
| 231 |
+
fn=generate_image,
|
| 232 |
+
inputs=[prompt, seed, width, height],
|
| 233 |
+
outputs=[result, seed, latency],
|
| 234 |
+
show_progress="full",
|
| 235 |
+
queue=False,
|
| 236 |
+
concurrency_limit=None
|
|
|
|
|
|
|
|
|
|
| 237 |
)
|
| 238 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 239 |
generateBtn.click(
|
| 240 |
fn=generate_image,
|
| 241 |
+
inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
|
| 242 |
+
outputs=[result, seed, latency],
|
| 243 |
+
show_progress="full",
|
| 244 |
+
api_name="RealtimeFlux",
|
| 245 |
+
queue=False
|
| 246 |
)
|
| 247 |
|
| 248 |
+
def update_ui(realtime_enabled):
|
| 249 |
+
return {
|
| 250 |
+
prompt: gr.update(interactive=True),
|
| 251 |
+
generateBtn: gr.update(visible=not realtime_enabled)
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
realtime.change(
|
| 255 |
+
fn=update_ui,
|
| 256 |
+
inputs=[realtime],
|
| 257 |
+
outputs=[prompt, generateBtn],
|
| 258 |
+
queue=False,
|
| 259 |
+
concurrency_limit=None
|
| 260 |
)
|
| 261 |
+
|
| 262 |
+
def realtime_generation(*args):
|
| 263 |
+
if args[0]: # If realtime is enabled
|
| 264 |
+
return next(generate_image(*args[1:]))
|
| 265 |
+
|
| 266 |
prompt.submit(
|
| 267 |
fn=generate_image,
|
| 268 |
+
inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
|
| 269 |
+
outputs=[result, seed, latency],
|
| 270 |
show_progress="full",
|
| 271 |
+
queue=False,
|
| 272 |
+
concurrency_limit=None
|
| 273 |
)
|
| 274 |
|
| 275 |
+
for component in [prompt, width, height, num_inference_steps]:
|
| 276 |
+
component.input(
|
| 277 |
+
fn=realtime_generation,
|
| 278 |
+
inputs=[realtime, prompt, seed, width, height, randomize_seed, num_inference_steps],
|
| 279 |
+
outputs=[result, seed, latency],
|
| 280 |
+
show_progress="hidden",
|
| 281 |
+
trigger_mode="always_last",
|
| 282 |
+
queue=False,
|
| 283 |
+
concurrency_limit=None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 284 |
)
|
| 285 |
|
| 286 |
+
# Launch the app
|
| 287 |
+
demo.launch()
|
|
|