Gemini899 commited on
Commit
06363d9
·
verified ·
1 Parent(s): 0395deb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -59
app.py CHANGED
@@ -2,8 +2,8 @@ import logging
2
  import random
3
  import warnings
4
  import os
5
- import io
6
- import base64
7
  import gradio as gr
8
  import numpy as np
9
  import spaces
@@ -21,10 +21,10 @@ warnings.filterwarnings("ignore")
21
  css = """
22
  #col-container {
23
  margin: 0 auto;
24
- max-width: 512px; /* Increased max-width slightly for better layout */
25
  }
26
  .gradio-container {
27
- max-width: 900px !important; /* Control overall container width */
28
  margin: auto !important;
29
  }
30
  """
@@ -75,20 +75,67 @@ try:
75
  pipe.to(device)
76
  logging.info("Pipeline loaded and moved to device.")
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  except Exception as e:
79
- logging.error(f"FATAL: Error during model loading: {e}", exc_info=True)
80
- # --- Simplified Error Handling for Brevity ---
81
- print(f"FATAL ERROR DURING MODEL LOAD: {e}")
82
- raise SystemExit(f"Model loading failed: {e}")
 
 
83
 
84
 
85
  # --- Constants ---
86
  MAX_SEED = 2**32 - 1
87
- MAX_PIXEL_BUDGET = 1280 * 1280
88
- # --- NEW: Define the internal factor for quality ---
 
 
 
 
 
89
  INTERNAL_PROCESSING_FACTOR = 4
90
 
91
- # --- Image Processing Function (Modified) ---
92
  def process_input(input_image):
93
  """Processes the input image for the pipeline.
94
  The pixel budget check uses the fixed INTERNAL_PROCESSING_FACTOR."""
@@ -126,9 +173,8 @@ def process_input(input_image):
126
  input_scale_factor = (max_input_pixels / current_input_pixels) ** 0.5
127
  input_w_resized = int(w * input_scale_factor)
128
  input_h_resized = int(h * input_scale_factor)
129
- # Ensure minimum size of 8x8
130
- input_w_resized = max(8, input_w_resized)
131
- input_h_resized = max(8, input_h_resized)
132
  intermediate_w = input_w_resized * INTERNAL_PROCESSING_FACTOR
133
  intermediate_h = input_h_resized * INTERNAL_PROCESSING_FACTOR
134
 
@@ -154,14 +200,14 @@ def process_input(input_image):
154
 
155
  return input_image_to_process, w_original, h_original, was_resized
156
 
157
- # --- Inference Function (Modified) ---
158
- @spaces.GPU(duration=90)
159
  def infer(
160
  seed,
161
  randomize_seed,
162
  input_image,
163
- num_inference_steps,
164
- final_upscale_factor, # Renamed for clarity internally
165
  controlnet_conditioning_scale,
166
  progress=gr.Progress(track_tqdm=True),
167
  ):
@@ -170,7 +216,7 @@ def infer(
170
  gr.Error("Pipeline not loaded. Cannot perform inference.")
171
  return [[None, None], 0, None]
172
 
173
- original_input_pil = input_image # Keep ref for slider
174
 
175
  if input_image is None:
176
  gr.Warning("Please provide an input image.")
@@ -180,12 +226,15 @@ def infer(
180
  seed = random.randint(0, MAX_SEED)
181
  seed = int(seed)
182
 
183
- # Ensure final_upscale_factor is an integer
184
  final_upscale_factor = int(final_upscale_factor)
 
 
 
185
  if final_upscale_factor > INTERNAL_PROCESSING_FACTOR:
186
- gr.Warning(f"Selected upscale factor ({final_upscale_factor}x) is larger than internal processing factor ({INTERNAL_PROCESSING_FACTOR}x). "
187
- f"Results might not be optimal. Clamping final factor to {INTERNAL_PROCESSING_FACTOR}x for this run.")
188
- final_upscale_factor = INTERNAL_PROCESSING_FACTOR # Prevent upscaling *beyond* internal processing
189
 
190
  logging.info(
191
  f"Starting inference with seed: {seed}, "
@@ -195,7 +244,6 @@ def infer(
195
  )
196
 
197
  try:
198
- # process_input now implicitly uses INTERNAL_PROCESSING_FACTOR for budget checks
199
  processed_input_image, w_original, h_original, was_input_resized = process_input(
200
  input_image
201
  )
@@ -210,20 +258,18 @@ def infer(
210
  control_image_w = w_proc * INTERNAL_PROCESSING_FACTOR
211
  control_image_h = h_proc * INTERNAL_PROCESSING_FACTOR
212
 
213
- # Clamp control image size if it *still* exceeds budget (e.g., due to rounding or small inputs)
214
- # This check should technically be redundant if process_input worked correctly, but good failsafe.
215
- if control_image_w * control_image_h > MAX_PIXEL_BUDGET * 1.05: # Add a small margin
216
  scale_factor = (MAX_PIXEL_BUDGET / (control_image_w * control_image_h)) ** 0.5
217
  control_image_w = max(8, int(control_image_w * scale_factor))
218
  control_image_h = max(8, int(control_image_h * scale_factor))
219
  control_image_w = max(8, control_image_w - control_image_w % 8)
220
  control_image_h = max(8, control_image_h - control_image_h % 8)
221
- logging.warning(f"Control image dimensions clamped to {control_image_w}x{control_image_h} post-processing to fit budget.")
222
  gr.Warning(f"Control image dimensions further clamped to {control_image_w}x{control_image_h}.")
223
 
224
  logging.info(f"Resizing processed input {w_proc}x{h_proc} to control image {control_image_w}x{control_image_h} (using {INTERNAL_PROCESSING_FACTOR}x factor)")
225
  try:
226
- # Use the processed input image for control, resized to the intermediate size
227
  control_image = processed_input_image.resize((control_image_w, control_image_h), Image.Resampling.LANCZOS)
228
  except ValueError as resize_err:
229
  logging.error(f"Error resizing processed input to control image: {resize_err}")
@@ -233,26 +279,28 @@ def infer(
233
  generator = torch.Generator(device=device).manual_seed(seed)
234
 
235
  # --- Run the Pipeline at INTERNAL_PROCESSING_FACTOR scale ---
236
- gr.Info(f"Generating intermediate image at {INTERNAL_PROCESSING_FACTOR}x quality ({control_image_w}x{control_image_h})...")
237
- logging.info(f"Running pipeline with size: {control_image_w}x{control_image_h}")
238
  intermediate_result_image = None
239
  try:
240
  with torch.inference_mode():
241
  intermediate_result_image = pipe(
242
  prompt="",
243
- control_image=control_image, # Control image IS the intermediate size
244
  controlnet_conditioning_scale=float(controlnet_conditioning_scale),
245
- num_inference_steps=int(num_inference_steps),
246
  guidance_scale=0.0,
247
- height=control_image_h, # Target height for the model
248
- width=control_image_w, # Target width for the model
249
  generator=generator,
 
 
250
  ).images[0]
251
  logging.info(f"Pipeline execution finished. Intermediate image size: {intermediate_result_image.size if intermediate_result_image else 'None'}")
252
 
253
  except torch.cuda.OutOfMemoryError as oom_error:
254
  logging.error(f"CUDA Out of Memory during pipeline execution: {oom_error}", exc_info=True)
255
- gr.Error(f"Ran out of GPU memory trying to generate intermediate {control_image_w}x{control_image_h}.")
256
  if device == 'cuda': torch.cuda.empty_cache()
257
  return [[original_input_pil, None], seed, None]
258
  except Exception as e:
@@ -266,61 +314,52 @@ def infer(
266
  return [[original_input_pil, None], seed, None]
267
 
268
  # --- Final Resizing to User's Desired Scale ---
269
- # Calculate final target dimensions based on ORIGINAL input size and FINAL upscale factor
270
- # If input was resized, we scale the *processed* input size instead, as original is unknown
271
  if was_input_resized:
272
- # Base final size on the downscaled input that was processed
273
  final_target_w = w_proc * final_upscale_factor
274
  final_target_h = h_proc * final_upscale_factor
275
  logging.warning(f"Input was downscaled. Final size based on processed input: {w_proc}x{h_proc} * {final_upscale_factor}x -> {final_target_w}x{final_target_h}")
276
  gr.Info(f"Input was downscaled. Final size target approx {final_target_w}x{final_target_h}.")
277
  else:
278
- # Base final size on the original input size
279
  final_target_w = w_original * final_upscale_factor
280
  final_target_h = h_original * final_upscale_factor
281
 
282
  final_result_image = intermediate_result_image
283
  current_w, current_h = intermediate_result_image.size
284
 
285
- # Only resize if the intermediate size doesn't match the final desired size
286
  if (current_w, current_h) != (final_target_w, final_target_h):
287
  logging.info(f"Resizing intermediate image from {current_w}x{current_h} to final target size {final_target_w}x{final_target_h} (using {final_upscale_factor}x factor)")
288
  gr.Info(f"Resizing from intermediate {current_w}x{current_h} to final {final_target_w}x{final_target_h}...")
289
-
290
  try:
291
  if final_target_w > 0 and final_target_h > 0:
292
- # Use LANCZOS for downsampling, it's high quality
293
  final_result_image = intermediate_result_image.resize((final_target_w, final_target_h), Image.Resampling.LANCZOS)
294
  else:
295
  gr.Warning(f"Invalid final target dimensions ({final_target_w}x{final_target_h}). Skipping final resize.")
296
- final_result_image = intermediate_result_image # Keep intermediate
297
  except Exception as resize_e:
298
  logging.error(f"Could not resize intermediate image to final size: {resize_e}")
299
  gr.Warning(f"Failed to resize to final {final_upscale_factor}x. Returning intermediate {INTERNAL_PROCESSING_FACTOR}x result ({current_w}x{current_h}).")
300
- final_result_image = intermediate_result_image # Fallback to intermediate
301
  else:
302
  logging.info(f"Intermediate size {current_w}x{current_h} matches final target size. No final resize needed.")
303
 
304
-
305
  logging.info(f"Inference successful. Final output size: {final_result_image.size}")
306
 
307
- # --- Base64 Encoding (No change needed here) ---
308
  base64_string = None
309
  if final_result_image:
310
  try:
311
  buffered = io.BytesIO()
312
- final_result_image.save(buffered, format="WEBP", quality=90)
313
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
314
  base64_string = f"data:image/webp;base64,{img_str}"
315
  logging.info(f"Encoded result image to Base64 string (length: {len(base64_string)} chars).")
316
  except Exception as enc_err:
317
  logging.error(f"Failed to encode result image to Base64: {enc_err}", exc_info=True)
318
 
319
- # Return original input and the FINAL processed image
320
  return [[original_input_pil, final_result_image], seed, base64_string]
321
 
322
 
323
- # --- Gradio Interface (Minor Text Updates) ---
324
  with gr.Blocks(css=css, theme=gr.themes.Soft(), title="Flux Upscaler Demo") as demo:
325
  gr.Markdown(
326
  f"""
@@ -328,11 +367,16 @@ with gr.Blocks(css=css, theme=gr.themes.Soft(), title="Flux Upscaler Demo") as d
328
  Upscale images using the [Flux.1-dev Upscaler ControlNet](https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Upscaler) model based on [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev).
329
  Currently running on **{power_device}**. Hardware provided by Hugging Face 🤗.
330
 
331
- **How it works:** This demo uses an internal processing scale of **{INTERNAL_PROCESSING_FACTOR}x** for potentially higher detail generation,
332
  then resizes the result to your selected **Final Upscale Factor**. This aims for {INTERNAL_PROCESSING_FACTOR}x quality at your desired output resolution.
333
 
 
 
 
 
 
334
  *Note*: Intermediate processing resolution is limited to approximately **{MAX_PIXEL_BUDGET/1_000_000:.1f} megapixels** ({int(MAX_PIXEL_BUDGET**0.5)}x{int(MAX_PIXEL_BUDGET**0.5)}) due to resource constraints.
335
- The *diffusion process time* is mainly determined by this intermediate size, not the final output size.
336
  """
337
  )
338
 
@@ -346,9 +390,25 @@ with gr.Blocks(css=css, theme=gr.themes.Soft(), title="Flux Upscaler Demo") as d
346
  )
347
  with gr.Column(scale=1):
348
  # Renamed slider label for clarity
349
- upscale_factor_slider = gr.Slider(label="Final Upscale Factor", info=f"Output size relative to input. Internal processing uses {INTERNAL_PROCESSING_FACTOR}x quality.", minimum=1, maximum=INTERNAL_PROCESSING_FACTOR, step=1, value=2) # Default to 2x, max is now INTERNAL_PROCESSING_FACTOR
350
- num_inference_steps = gr.Slider(label="Inference Steps", minimum=4, maximum=50, step=1, value=15)
351
- controlnet_conditioning_scale = gr.Slider(label="ControlNet Conditioning Scale", info="Strength of ControlNet guidance", minimum=0.0, maximum=1.5, step=0.05, value=0.6)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
  with gr.Row():
353
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
354
  randomize_seed = gr.Checkbox(label="Random", value=True, scale=0, min_width=80)
@@ -373,14 +433,14 @@ with gr.Blocks(css=css, theme=gr.themes.Soft(), title="Flux Upscaler Demo") as d
373
 
374
  if example_paths:
375
  gr.Examples(
376
- # Examples now use the new default of 2x for the final factor
377
- examples=[ [path, 2, 15, 0.6, random.randint(0,MAX_SEED), True] for path in example_paths ],
378
  # Ensure inputs match the order expected by `infer` now
379
  inputs=[ input_im, upscale_factor_slider, num_inference_steps, controlnet_conditioning_scale, seed, randomize_seed, ],
380
  outputs=[result_slider, output_seed], # Base64 output ignored by Examples
381
  fn=infer,
382
  cache_examples="lazy",
383
- label="Example Images (Click to Run with 2x Output)",
384
  run_on_click=True
385
  )
386
  else:
@@ -401,8 +461,9 @@ with gr.Blocks(css=css, theme=gr.themes.Soft(), title="Flux Upscaler Demo") as d
401
  controlnet_conditioning_scale,
402
  ],
403
  outputs=[result_slider, output_seed, api_base64_output],
404
- api_name="upscale"
405
  )
406
 
407
  # Launch the Gradio app
 
408
  demo.queue(max_size=10).launch(share=False, show_api=True)
 
2
  import random
3
  import warnings
4
  import os
5
+ import io # Ensure io is imported
6
+ import base64 # Ensure base64 is imported
7
  import gradio as gr
8
  import numpy as np
9
  import spaces
 
21
  css = """
22
  #col-container {
23
  margin: 0 auto;
24
+ max-width: 512px;
25
  }
26
  .gradio-container {
27
+ max-width: 900px !important;
28
  margin: auto !important;
29
  }
30
  """
 
75
  pipe.to(device)
76
  logging.info("Pipeline loaded and moved to device.")
77
 
78
+ # --- OPTIMIZATION: Attempt torch.compile (PyTorch 2.0+) ---
79
+ if device == "cuda" and hasattr(torch, "compile"):
80
+ logging.info("Attempting to compile the pipeline transformer with torch.compile...")
81
+ try:
82
+ # Modes: 'default', 'reduce-overhead', 'max-autotune'
83
+ # 'max-autotune' might give best runtime performance but takes longer to compile initially
84
+ pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
85
+ # You could potentially compile other components too, but start with the transformer
86
+ # pipe.controlnet = torch.compile(pipe.controlnet, mode="max-autotune", fullgraph=True)
87
+ logging.info("Pipeline transformer compiled successfully.")
88
+ # Optional: Add a dummy inference run here to trigger compilation during startup
89
+ # This avoids the compilation delay on the *first* user request.
90
+ # try:
91
+ # logging.info("Running dummy inference to finalize compilation...")
92
+ # _ = pipe(prompt="", control_image=Image.new('RGB', (64, 64)), height=64*4, width=64*4, num_inference_steps=1, guidance_scale=0.0, output_type="latent")
93
+ # logging.info("Dummy inference completed.")
94
+ # except Exception as compile_run_e:
95
+ # logging.warning(f"Dummy inference after compile failed (might be ok): {compile_run_e}")
96
+ except Exception as e:
97
+ logging.warning(f"torch.compile failed: {e}. Running unoptimized.")
98
+ else:
99
+ logging.info("torch.compile not available or not on CUDA, skipping compilation.")
100
+
101
+ # --- OPTIMIZATION: Consider xformers ---
102
+ # If torch.compile doesn't provide enough speedup or isn't available,
103
+ # you can try installing and enabling xformers.
104
+ # 1. Add `xformers` to your requirements.txt or install it (`pip install xformers`).
105
+ # 2. Uncomment and add this code block *before* the torch.compile block:
106
+ # try:
107
+ # import xformers
108
+ # pipe.enable_xformers_memory_efficient_attention()
109
+ # logging.info("Enabled xformers memory efficient attention.")
110
+ # except ImportError:
111
+ # logging.info("xformers not installed. Skipping.")
112
+ # except Exception as e:
113
+ # logging.warning(f"Could not enable xformers: {e}.")
114
+
115
+ logging.info("Pipeline ready for inference.")
116
+
117
+
118
  except Exception as e:
119
+ logging.error(f"FATAL: Error during model loading or setup: {e}", exc_info=True)
120
+ # Simple error display if Gradio Blocks object isn't ready
121
+ print(f"FATAL ERROR DURING MODEL LOAD/SETUP: {e}")
122
+ # You might want to use the Gradio error block structure here if `gr` is available
123
+ # with gr.Blocks() as demo_error: ... etc.
124
+ raise SystemExit(f"Model loading/setup failed: {e}")
125
 
126
 
127
  # --- Constants ---
128
  MAX_SEED = 2**32 - 1
129
+ MAX_PIXEL_BUDGET = 1280 * 1280 # Max pixels for the *intermediate* high-res image
130
+
131
+ # --- SPEED VS QUALITY ---
132
+ # INTERNAL_PROCESSING_FACTOR: Determines the scale the diffusion model *targets*
133
+ # Higher values (like 4) aim for more detail generation but are slower.
134
+ # Lower values (like 3 or 2) will be faster but might produce less detail enhancement.
135
+ # You were aiming for 4x quality, so we keep it at 4. Reducing this is a direct speedup trade-off.
136
  INTERNAL_PROCESSING_FACTOR = 4
137
 
138
+ # --- Image Processing Function (Uses INTERNAL_PROCESSING_FACTOR for budgeting) ---
139
  def process_input(input_image):
140
  """Processes the input image for the pipeline.
141
  The pixel budget check uses the fixed INTERNAL_PROCESSING_FACTOR."""
 
173
  input_scale_factor = (max_input_pixels / current_input_pixels) ** 0.5
174
  input_w_resized = int(w * input_scale_factor)
175
  input_h_resized = int(h * input_scale_factor)
176
+ input_w_resized = max(8, input_w_resized) # Ensure min size
177
+ input_h_resized = max(8, input_h_resized) # Ensure min size
 
178
  intermediate_w = input_w_resized * INTERNAL_PROCESSING_FACTOR
179
  intermediate_h = input_h_resized * INTERNAL_PROCESSING_FACTOR
180
 
 
200
 
201
  return input_image_to_process, w_original, h_original, was_resized
202
 
203
+ # --- Inference Function (Runs pipeline at Internal Factor, resizes to Final Factor) ---
204
+ @spaces.GPU(duration=70) # Keep GPU decorator
205
  def infer(
206
  seed,
207
  randomize_seed,
208
  input_image,
209
+ num_inference_steps, # Reducing this is a direct way to speed up (quality trade-off)
210
+ final_upscale_factor, # User's desired final output scale
211
  controlnet_conditioning_scale,
212
  progress=gr.Progress(track_tqdm=True),
213
  ):
 
216
  gr.Error("Pipeline not loaded. Cannot perform inference.")
217
  return [[None, None], 0, None]
218
 
219
+ original_input_pil = input_image
220
 
221
  if input_image is None:
222
  gr.Warning("Please provide an input image.")
 
226
  seed = random.randint(0, MAX_SEED)
227
  seed = int(seed)
228
 
229
+ # Ensure factors are integers
230
  final_upscale_factor = int(final_upscale_factor)
231
+ num_inference_steps = int(num_inference_steps) # Ensure steps are int
232
+
233
+ # Sanity check: final factor shouldn't exceed internal processing factor in this workflow
234
  if final_upscale_factor > INTERNAL_PROCESSING_FACTOR:
235
+ gr.Warning(f"Selected final upscale factor ({final_upscale_factor}x) is larger than internal processing factor ({INTERNAL_PROCESSING_FACTOR}x). "
236
+ f"Clamping final factor to {INTERNAL_PROCESSING_FACTOR}x.")
237
+ final_upscale_factor = INTERNAL_PROCESSING_FACTOR
238
 
239
  logging.info(
240
  f"Starting inference with seed: {seed}, "
 
244
  )
245
 
246
  try:
 
247
  processed_input_image, w_original, h_original, was_input_resized = process_input(
248
  input_image
249
  )
 
258
  control_image_w = w_proc * INTERNAL_PROCESSING_FACTOR
259
  control_image_h = h_proc * INTERNAL_PROCESSING_FACTOR
260
 
261
+ # Failsafe clamp if budget is still exceeded (should be rare if process_input works)
262
+ if control_image_w * control_image_h > MAX_PIXEL_BUDGET * 1.05: # Small margin
 
263
  scale_factor = (MAX_PIXEL_BUDGET / (control_image_w * control_image_h)) ** 0.5
264
  control_image_w = max(8, int(control_image_w * scale_factor))
265
  control_image_h = max(8, int(control_image_h * scale_factor))
266
  control_image_w = max(8, control_image_w - control_image_w % 8)
267
  control_image_h = max(8, control_image_h - control_image_h % 8)
268
+ logging.warning(f"Control image dimensions clamped post-processing to {control_image_w}x{control_image_h} to fit budget.")
269
  gr.Warning(f"Control image dimensions further clamped to {control_image_w}x{control_image_h}.")
270
 
271
  logging.info(f"Resizing processed input {w_proc}x{h_proc} to control image {control_image_w}x{control_image_h} (using {INTERNAL_PROCESSING_FACTOR}x factor)")
272
  try:
 
273
  control_image = processed_input_image.resize((control_image_w, control_image_h), Image.Resampling.LANCZOS)
274
  except ValueError as resize_err:
275
  logging.error(f"Error resizing processed input to control image: {resize_err}")
 
279
  generator = torch.Generator(device=device).manual_seed(seed)
280
 
281
  # --- Run the Pipeline at INTERNAL_PROCESSING_FACTOR scale ---
282
+ gr.Info(f"Generating intermediate image at {INTERNAL_PROCESSING_FACTOR}x quality ({control_image_w}x{control_image_h}) with {num_inference_steps} steps...")
283
+ logging.info(f"Running pipeline with size: {control_image_w}x{control_image_h}, steps: {num_inference_steps}")
284
  intermediate_result_image = None
285
  try:
286
  with torch.inference_mode():
287
  intermediate_result_image = pipe(
288
  prompt="",
289
+ control_image=control_image,
290
  controlnet_conditioning_scale=float(controlnet_conditioning_scale),
291
+ num_inference_steps=num_inference_steps, # Use the integer value
292
  guidance_scale=0.0,
293
+ height=control_image_h,
294
+ width=control_image_w,
295
  generator=generator,
296
+ # Add progress callback if desired, requires tqdm
297
+ # callback_on_step_end = ...
298
  ).images[0]
299
  logging.info(f"Pipeline execution finished. Intermediate image size: {intermediate_result_image.size if intermediate_result_image else 'None'}")
300
 
301
  except torch.cuda.OutOfMemoryError as oom_error:
302
  logging.error(f"CUDA Out of Memory during pipeline execution: {oom_error}", exc_info=True)
303
+ gr.Error(f"Ran out of GPU memory trying to generate intermediate {control_image_w}x{control_image_h}. Try reducing the Final Upscale Factor or using a smaller input image.")
304
  if device == 'cuda': torch.cuda.empty_cache()
305
  return [[original_input_pil, None], seed, None]
306
  except Exception as e:
 
314
  return [[original_input_pil, None], seed, None]
315
 
316
  # --- Final Resizing to User's Desired Scale ---
 
 
317
  if was_input_resized:
 
318
  final_target_w = w_proc * final_upscale_factor
319
  final_target_h = h_proc * final_upscale_factor
320
  logging.warning(f"Input was downscaled. Final size based on processed input: {w_proc}x{h_proc} * {final_upscale_factor}x -> {final_target_w}x{final_target_h}")
321
  gr.Info(f"Input was downscaled. Final size target approx {final_target_w}x{final_target_h}.")
322
  else:
 
323
  final_target_w = w_original * final_upscale_factor
324
  final_target_h = h_original * final_upscale_factor
325
 
326
  final_result_image = intermediate_result_image
327
  current_w, current_h = intermediate_result_image.size
328
 
 
329
  if (current_w, current_h) != (final_target_w, final_target_h):
330
  logging.info(f"Resizing intermediate image from {current_w}x{current_h} to final target size {final_target_w}x{final_target_h} (using {final_upscale_factor}x factor)")
331
  gr.Info(f"Resizing from intermediate {current_w}x{current_h} to final {final_target_w}x{final_target_h}...")
 
332
  try:
333
  if final_target_w > 0 and final_target_h > 0:
 
334
  final_result_image = intermediate_result_image.resize((final_target_w, final_target_h), Image.Resampling.LANCZOS)
335
  else:
336
  gr.Warning(f"Invalid final target dimensions ({final_target_w}x{final_target_h}). Skipping final resize.")
337
+ final_result_image = intermediate_result_image
338
  except Exception as resize_e:
339
  logging.error(f"Could not resize intermediate image to final size: {resize_e}")
340
  gr.Warning(f"Failed to resize to final {final_upscale_factor}x. Returning intermediate {INTERNAL_PROCESSING_FACTOR}x result ({current_w}x{current_h}).")
341
+ final_result_image = intermediate_result_image
342
  else:
343
  logging.info(f"Intermediate size {current_w}x{current_h} matches final target size. No final resize needed.")
344
 
 
345
  logging.info(f"Inference successful. Final output size: {final_result_image.size}")
346
 
347
+ # --- Base64 Encoding ---
348
  base64_string = None
349
  if final_result_image:
350
  try:
351
  buffered = io.BytesIO()
352
+ final_result_image.save(buffered, format="WEBP", quality=90) # WEBP is usually smaller
353
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
354
  base64_string = f"data:image/webp;base64,{img_str}"
355
  logging.info(f"Encoded result image to Base64 string (length: {len(base64_string)} chars).")
356
  except Exception as enc_err:
357
  logging.error(f"Failed to encode result image to Base64: {enc_err}", exc_info=True)
358
 
 
359
  return [[original_input_pil, final_result_image], seed, base64_string]
360
 
361
 
362
+ # --- Gradio Interface ---
363
  with gr.Blocks(css=css, theme=gr.themes.Soft(), title="Flux Upscaler Demo") as demo:
364
  gr.Markdown(
365
  f"""
 
367
  Upscale images using the [Flux.1-dev Upscaler ControlNet](https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Upscaler) model based on [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev).
368
  Currently running on **{power_device}**. Hardware provided by Hugging Face 🤗.
369
 
370
+ **How it works:** This demo uses an internal processing scale of **{INTERNAL_PROCESSING_FACTOR}x** for potentially higher detail generation (slower),
371
  then resizes the result to your selected **Final Upscale Factor**. This aims for {INTERNAL_PROCESSING_FACTOR}x quality at your desired output resolution.
372
 
373
+ **To Speed Up:**
374
+ 1. **Reduce `Inference Steps`:** Fewer steps = faster generation (potential quality decrease). Try 10-15 instead of 25.
375
+ 2. **(Code Change Needed):** Reduce `INTERNAL_PROCESSING_FACTOR` in the script (e.g., to 3). This directly reduces computation but may lower detail enhancement.
376
+ 3. `torch.compile` has been enabled (if using PyTorch 2.0+ on GPU) which should provide some speedup after the first run.
377
+
378
  *Note*: Intermediate processing resolution is limited to approximately **{MAX_PIXEL_BUDGET/1_000_000:.1f} megapixels** ({int(MAX_PIXEL_BUDGET**0.5)}x{int(MAX_PIXEL_BUDGET**0.5)}) due to resource constraints.
379
+ The *diffusion process time* is mainly determined by this intermediate size and the number of steps.
380
  """
381
  )
382
 
 
390
  )
391
  with gr.Column(scale=1):
392
  # Renamed slider label for clarity
393
+ upscale_factor_slider = gr.Slider(
394
+ label="Final Upscale Factor",
395
+ info=f"Output size relative to input. Internal processing uses {INTERNAL_PROCESSING_FACTOR}x quality.",
396
+ minimum=1,
397
+ maximum=INTERNAL_PROCESSING_FACTOR, # Max limited by internal factor
398
+ step=1,
399
+ value=min(2, INTERNAL_PROCESSING_FACTOR) # Default to 2x or internal factor if smaller
400
+ )
401
+ # --- SPEED OPTIMIZATION: Reduce default steps ---
402
+ num_inference_steps = gr.Slider(
403
+ label="Inference Steps",
404
+ info="Fewer steps = faster, more steps = potentially higher quality. Try 10-15 for speed.",
405
+ minimum=4, maximum=50, step=1, value=15 # Defaulting to 15 instead of 25
406
+ )
407
+ controlnet_conditioning_scale = gr.Slider(
408
+ label="ControlNet Conditioning Scale",
409
+ info="Strength of ControlNet guidance.",
410
+ minimum=0.0, maximum=1.5, step=0.05, value=0.6
411
+ )
412
  with gr.Row():
413
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
414
  randomize_seed = gr.Checkbox(label="Random", value=True, scale=0, min_width=80)
 
433
 
434
  if example_paths:
435
  gr.Examples(
436
+ # Examples use the new defaults: final factor 2x, steps 15
437
+ examples=[ [path, min(2, INTERNAL_PROCESSING_FACTOR), 15, 0.6, random.randint(0,MAX_SEED), True] for path in example_paths ],
438
  # Ensure inputs match the order expected by `infer` now
439
  inputs=[ input_im, upscale_factor_slider, num_inference_steps, controlnet_conditioning_scale, seed, randomize_seed, ],
440
  outputs=[result_slider, output_seed], # Base64 output ignored by Examples
441
  fn=infer,
442
  cache_examples="lazy",
443
+ label=f"Example Images (Click to Run with {min(2, INTERNAL_PROCESSING_FACTOR)}x Output, 15 Steps)",
444
  run_on_click=True
445
  )
446
  else:
 
461
  controlnet_conditioning_scale,
462
  ],
463
  outputs=[result_slider, output_seed, api_base64_output],
464
+ api_name="upscale" # Keep API name
465
  )
466
 
467
  # Launch the Gradio app
468
+ # Consider increasing queue timeout if compilation adds significant startup time
469
  demo.queue(max_size=10).launch(share=False, show_api=True)