RageshAntony commited on
Commit
07aba0d
·
verified ·
1 Parent(s): e113c61

new changes

Browse files
Files changed (1) hide show
  1. app.py +237 -158
app.py CHANGED
@@ -2,12 +2,10 @@ import gradio as gr
2
  import numpy as np
3
  import random
4
  import torch
5
- import torch.multiprocessing as mp
6
- from torch.cuda.amp import autocast
7
  from diffusers import (
8
  DiffusionPipeline, StableDiffusion3Pipeline, FluxPipeline, PixArtSigmaPipeline,
9
  AuraFlowPipeline, Kandinsky3Pipeline, HunyuanDiTPipeline,
10
- LuminaText2ImgPipeline
11
  )
12
  import spaces
13
  import gc
@@ -20,108 +18,169 @@ import time
20
  import glob
21
  from datetime import datetime
22
  from PIL import Image
23
- from queue import Queue
24
- from concurrent.futures import ThreadPoolExecutor, as_completed
25
-
26
- from dataclasses import dataclass
27
- from typing import Optional, List
28
-
29
- @dataclass
30
- class MultiGPUConfig:
31
- count: int = 2 # Number of GPUs to request
32
- memory: int = 16 # Memory per GPU in GB
33
- duration: int = 3600 # Duration in seconds
34
-
35
- class SpacesMultiGPU:
36
- def __init__(self, config: Optional[MultiGPUConfig] = None):
37
- self.config = config or MultiGPUConfig()
38
-
39
- def __call__(self, func):
40
- # Apply multiple GPU decorators
41
- decorated_func = func
42
- for gpu_idx in range(self.config.count):
43
- decorated_func = spaces.GPU(
44
- device=gpu_idx, # Specify which GPU to request
45
- memory=self.config.memory,
46
- duration=self.config.duration
47
- )(decorated_func)
48
- return decorated_func
49
-
50
- # Example usage in your generation code
51
- gpu_config = MultiGPUConfig(
52
- count=2, # Request 2 GPUs
53
- duration=400 # 1 hour duration
54
- )
55
 
56
  # Constants
57
  MAX_SEED = np.iinfo(np.int32).max
58
  MAX_IMAGE_SIZE = 1024
59
- TORCH_DTYPE = torch.bfloat16
 
60
  OUTPUT_DIR = "generated_images"
61
  os.makedirs(OUTPUT_DIR, exist_ok=True)
62
 
63
- # Get available GPU devices
64
- AVAILABLE_GPUS = list(range(torch.cuda.device_count()))
65
- print(f"Available GPUs: {AVAILABLE_GPUS}")
66
-
67
  # Model configurations
68
  MODEL_CONFIGS = {
69
  "FLUX": {
70
  "repo_id": "black-forest-labs/FLUX.1-dev",
71
- "pipeline_class": FluxPipeline
 
72
  },
73
  "Stable Diffusion 3.5": {
74
  "repo_id": "stabilityai/stable-diffusion-3.5-large",
75
- "pipeline_class": StableDiffusion3Pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  }
77
- }
78
 
79
- # GPU allocation queue and model cache
80
- gpu_queue = Queue()
81
- for gpu_id in AVAILABLE_GPUS:
82
- gpu_queue.put(gpu_id)
83
 
84
- model_cache = {}
 
 
 
85
  model_locks = {model_name: threading.Lock() for model_name in MODEL_CONFIGS.keys()}
86
 
87
- def get_next_available_gpu():
88
- """Get the next available GPU from the queue"""
89
- gpu_id = gpu_queue.get()
90
- return gpu_id
91
 
92
- def release_gpu(gpu_id):
93
- """Release GPU back to the queue"""
94
- gpu_queue.put(gpu_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
- def load_pipeline_on_gpu(model_name, gpu_id):
97
- """Load model pipeline on specific GPU with memory tracking"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  config = MODEL_CONFIGS[model_name]
99
-
100
- with torch.cuda.device(gpu_id):
101
- pipe = config["pipeline_class"].from_pretrained(
102
- config["repo_id"],
103
- torch_dtype=TORCH_DTYPE
104
- )
105
- pipe = pipe.to(f"cuda:{gpu_id}")
106
-
107
- if hasattr(pipe, 'enable_model_cpu_offload'):
108
- pipe.enable_model_cpu_offload()
109
-
 
 
 
110
  return pipe
111
 
 
112
  def save_generated_image(image, model_name, prompt):
113
  """Save generated image with timestamp and model name"""
114
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
 
115
  prompt_part = "".join(c for c in prompt[:30] if c.isalnum() or c in (' ', '-', '_')).strip()
116
  filename = f"{timestamp}_{model_name}_{prompt_part}.png"
117
  filepath = os.path.join(OUTPUT_DIR, filename)
118
  image.save(filepath)
119
  return filepath
120
 
 
121
  def get_generated_images():
122
  """Get list of generated images with their details"""
123
  files = glob.glob(os.path.join(OUTPUT_DIR, "*.png"))
124
- files.sort(key=os.path.getctime, reverse=True)
125
  return [
126
  {
127
  "path": f,
@@ -132,25 +191,35 @@ def get_generated_images():
132
  for f in files
133
  ]
134
 
135
- def generate_image_on_gpu(args):
136
- """Generate image on specific GPU"""
137
- model_name, prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps = args
138
-
139
- try:
140
- gpu_id = get_next_available_gpu()
141
- print(f"Generating {model_name} on GPU {gpu_id}")
142
-
143
- # Load or get cached pipeline
144
- cache_key = f"{model_name}_{gpu_id}"
145
- if cache_key not in model_cache:
146
- with model_locks[model_name]:
147
- model_cache[cache_key] = load_pipeline_on_gpu(model_name, gpu_id)
148
-
149
- pipe = model_cache[cache_key]
150
-
151
- # Generate image
152
- with torch.cuda.device(gpu_id), autocast():
153
- generator = torch.Generator(f"cuda:{gpu_id}").manual_seed(seed)
 
 
 
 
 
 
 
 
 
 
154
  image = pipe(
155
  prompt=prompt,
156
  negative_prompt=negative_prompt,
@@ -160,53 +229,22 @@ def generate_image_on_gpu(args):
160
  height=height,
161
  generator=generator,
162
  ).images[0]
163
-
164
- filepath = save_generated_image(image, model_name, prompt)
165
- print(f"Saved image from {model_name} to: {filepath}")
166
-
167
- release_gpu(gpu_id)
168
- return image, seed
169
-
170
- except Exception as e:
171
- print(f"Error with {model_name} on GPU {gpu_id}: {str(e)}")
172
- release_gpu(gpu_id)
173
- raise e
174
-
175
- @SpacesMultiGPU(gpu_config)
176
- def generate_all(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress()):
177
- outputs = [None] * (len(MODEL_CONFIGS) * 2)
178
-
179
- # Prepare generation tasks
180
- tasks = []
181
- for model_name in MODEL_CONFIGS.keys():
182
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else seed
183
- tasks.append((
184
- model_name, prompt, negative_prompt, current_seed,
185
- width, height, guidance_scale, num_inference_steps
186
- ))
187
-
188
- # Run generation in parallel using thread pool
189
- with ThreadPoolExecutor(max_workers=len(AVAILABLE_GPUS)) as executor:
190
- future_to_model = {
191
- executor.submit(generate_image_on_gpu, task): idx
192
- for idx, task in enumerate(tasks)
193
- }
194
-
195
- for future in as_completed(future_to_model):
196
- idx = future_to_model[future]
197
- try:
198
- image, used_seed = future.result()
199
- outputs[idx * 2] = image
200
- outputs[idx * 2 + 1] = used_seed
201
- yield outputs + [None]
202
- except Exception as e:
203
- print(f"Generation failed for model {idx}: {str(e)}")
204
- outputs[idx * 2] = None
205
- outputs[idx * 2 + 1] = None
206
-
207
- # Update gallery after all generations complete
208
- gallery_images = update_gallery()
209
- return outputs
210
 
211
  # Gradio Interface
212
  css = """
@@ -218,8 +256,8 @@ css = """
218
 
219
  with gr.Blocks(css=css) as demo:
220
  with gr.Column(elem_id="col-container"):
221
- gr.Markdown(f"# Multi-GPU Image Generation ({len(AVAILABLE_GPUS)} GPUs Available)")
222
-
223
  with gr.Row():
224
  prompt = gr.Text(
225
  label="Prompt",
@@ -229,14 +267,14 @@ with gr.Blocks(css=css) as demo:
229
  container=False,
230
  )
231
  run_button = gr.Button("Generate", scale=0, variant="primary")
232
-
233
  with gr.Accordion("Advanced Settings", open=False):
234
  negative_prompt = gr.Text(
235
  label="Negative prompt",
236
  max_lines=1,
237
  placeholder="Enter a negative prompt",
238
  )
239
-
240
  seed = gr.Slider(
241
  label="Seed",
242
  minimum=0,
@@ -244,9 +282,9 @@ with gr.Blocks(css=css) as demo:
244
  step=1,
245
  value=0,
246
  )
247
-
248
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
249
-
250
  with gr.Row():
251
  width = gr.Slider(
252
  label="Width",
@@ -262,7 +300,7 @@ with gr.Blocks(css=css) as demo:
262
  step=32,
263
  value=1024,
264
  )
265
-
266
  with gr.Row():
267
  guidance_scale = gr.Slider(
268
  label="Guidance scale",
@@ -278,7 +316,9 @@ with gr.Blocks(css=css) as demo:
278
  step=1,
279
  value=40,
280
  )
281
-
 
 
282
  with gr.Row():
283
  with gr.Column(scale=2):
284
  with gr.Tabs() as tabs:
@@ -287,18 +327,21 @@ with gr.Blocks(css=css) as demo:
287
  for model_name in MODEL_CONFIGS.keys():
288
  with gr.Tab(model_name):
289
  results[model_name] = gr.Image(label=f"{model_name} Result")
290
- seeds[model_name] = gr.Number(label="Seed used", visible=False)
291
-
292
- with gr.Column(scale=1):
293
- gr.Markdown("### Generated Images")
294
- file_gallery = gr.Gallery(
295
- label="Generated Images",
296
- show_label=False,
297
- elem_id="file_gallery",
298
- columns=2,
299
- height=400
300
- )
301
- refresh_button = gr.Button("Refresh Gallery")
 
 
 
302
 
303
  def update_gallery():
304
  """Update the file gallery"""
@@ -308,6 +351,44 @@ with gr.Blocks(css=css) as demo:
308
  for f in files
309
  ]
310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
  output_components = []
312
  for model_name in MODEL_CONFIGS.keys():
313
  output_components.extend([results[model_name], seeds[model_name]])
@@ -340,6 +421,4 @@ with gr.Blocks(css=css) as demo:
340
  )
341
 
342
  if __name__ == "__main__":
343
- # Initialize multiprocessing for PyTorch
344
- mp.set_start_method('spawn', force=True)
345
- demo.launch()
 
2
  import numpy as np
3
  import random
4
  import torch
 
 
5
  from diffusers import (
6
  DiffusionPipeline, StableDiffusion3Pipeline, FluxPipeline, PixArtSigmaPipeline,
7
  AuraFlowPipeline, Kandinsky3Pipeline, HunyuanDiTPipeline,
8
+ LuminaText2ImgPipeline, SanaPipeline
9
  )
10
  import spaces
11
  import gc
 
18
  import glob
19
  from datetime import datetime
20
  from PIL import Image
21
+
22
+ #import os
23
+ #cache_dir = '/workspace/hf_cache'
24
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  # Constants
27
  MAX_SEED = np.iinfo(np.int32).max
28
  MAX_IMAGE_SIZE = 1024
29
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
30
+ TORCH_DTYPE = torch.bfloat16 if torch.cuda.is_available() else torch.float32
31
  OUTPUT_DIR = "generated_images"
32
  os.makedirs(OUTPUT_DIR, exist_ok=True)
33
 
 
 
 
 
34
  # Model configurations
35
  MODEL_CONFIGS = {
36
  "FLUX": {
37
  "repo_id": "black-forest-labs/FLUX.1-dev",
38
+ "pipeline_class": FluxPipeline,
39
+ #"cache_dir" : cache_dir
40
  },
41
  "Stable Diffusion 3.5": {
42
  "repo_id": "stabilityai/stable-diffusion-3.5-large",
43
+ "pipeline_class": StableDiffusion3Pipeline,
44
+ #"cache_dir" : cache_dir
45
+ },
46
+ "PixArt": {
47
+ "repo_id": "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
48
+ "pipeline_class": PixArtSigmaPipeline,
49
+ #"cache_dir" : cache_dir
50
+ },
51
+ "SANA": {
52
+ "repo_id": "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers",
53
+ "pipeline_class": SanaPipeline,
54
+ #"cache_dir" : cache_dir
55
+ },
56
+ "AuraFlow": {
57
+ "repo_id": "fal/AuraFlow",
58
+ "pipeline_class": AuraFlowPipeline,
59
+ #"cache_dir" : cache_dir
60
+ },
61
+ "Kandinsky": {
62
+ "repo_id": "kandinsky-community/kandinsky-3",
63
+ "pipeline_class": Kandinsky3Pipeline,
64
+ #"cache_dir" : cache_dir
65
+ },
66
+ "Hunyuan": {
67
+ "repo_id": "Tencent-Hunyuan/HunyuanDiT-Diffusers",
68
+ "pipeline_class": HunyuanDiTPipeline,
69
+ #"cache_dir" : cache_dir
70
+ },
71
+ "Lumina": {
72
+ "repo_id": "Alpha-VLLM/Lumina-Next-SFT-diffusers",
73
+ "pipeline_class": LuminaText2ImgPipeline,
74
+ #"cache_dir" : cache_dir
75
  }
 
76
 
 
 
 
 
77
 
78
+ }
79
+
80
+ # Dictionary to store model pipelines
81
+ pipes = {}
82
  model_locks = {model_name: threading.Lock() for model_name in MODEL_CONFIGS.keys()}
83
 
 
 
 
 
84
 
85
+ def get_process_memory():
86
+ """Get memory usage of current process in GB"""
87
+ process = psutil.Process(os.getpid())
88
+ return process.memory_info().rss / 1024 / 1024 / 1024
89
+
90
+
91
+ def clear_torch_cache():
92
+ """Clear PyTorch's CUDA cache"""
93
+ if torch.cuda.is_available():
94
+ torch.cuda.empty_cache()
95
+ torch.cuda.ipc_collect()
96
+
97
+
98
+ def remove_cache_dir(model_name):
99
+ """Remove the model's cache directory"""
100
+ cache_dir = Path.home() / '.cache' / 'huggingface' / 'diffusers' / MODEL_CONFIGS[model_name]['repo_id'].replace('/',
101
+ '--')
102
+ if cache_dir.exists():
103
+ shutil.rmtree(cache_dir, ignore_errors=True)
104
+
105
+
106
+ def deep_cleanup(model_name, pipe):
107
+ """Perform deep cleanup of model resources"""
108
+ try:
109
+ # 1. Move model to CPU first (helps prevent CUDA memory fragmentation)
110
+ if hasattr(pipe, 'to'):
111
+ pipe.to('cpu')
112
+
113
+ # 2. Delete all model components explicitly
114
+ for attr_name in list(pipe.__dict__.keys()):
115
+ if hasattr(pipe, attr_name):
116
+ delattr(pipe, attr_name)
117
 
118
+ # 3. Remove from pipes dictionary
119
+ if model_name in pipes:
120
+ del pipes[model_name]
121
+
122
+ # 4. Clear CUDA cache
123
+ clear_torch_cache()
124
+
125
+ # 5. Run garbage collection multiple times
126
+ for _ in range(3):
127
+ gc.collect()
128
+
129
+ # 6. Remove cached files
130
+ remove_cache_dir(model_name)
131
+
132
+ # 7. Additional CUDA cleanup if available
133
+ if torch.cuda.is_available():
134
+ torch.cuda.synchronize()
135
+
136
+ # 8. Wait a small amount of time to ensure cleanup
137
+ time.sleep(1)
138
+
139
+ except Exception as e:
140
+ print(f"Error during cleanup of {model_name}: {str(e)}")
141
+
142
+ finally:
143
+ # Final garbage collection
144
+ gc.collect()
145
+ clear_torch_cache()
146
+
147
+
148
+ def load_pipeline(model_name):
149
+ """Load model pipeline with memory tracking"""
150
+ initial_memory = get_process_memory()
151
  config = MODEL_CONFIGS[model_name]
152
+
153
+ pipe = config["pipeline_class"].from_pretrained(
154
+ config["repo_id"],
155
+ torch_dtype=TORCH_DTYPE,
156
+ cache_dir=cache_dir
157
+ )
158
+ pipe = pipe.to(DEVICE)
159
+
160
+ if hasattr(pipe, 'enable_model_cpu_offload'):
161
+ pipe.enable_model_cpu_offload()
162
+
163
+ final_memory = get_process_memory()
164
+ print(f"Memory used by {model_name}: {final_memory - initial_memory:.2f} GB")
165
+
166
  return pipe
167
 
168
+
169
  def save_generated_image(image, model_name, prompt):
170
  """Save generated image with timestamp and model name"""
171
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
172
+ # Create sanitized filename from prompt (first 30 chars)
173
  prompt_part = "".join(c for c in prompt[:30] if c.isalnum() or c in (' ', '-', '_')).strip()
174
  filename = f"{timestamp}_{model_name}_{prompt_part}.png"
175
  filepath = os.path.join(OUTPUT_DIR, filename)
176
  image.save(filepath)
177
  return filepath
178
 
179
+
180
  def get_generated_images():
181
  """Get list of generated images with their details"""
182
  files = glob.glob(os.path.join(OUTPUT_DIR, "*.png"))
183
+ files.sort(key=os.path.getctime, reverse=True) # Sort by creation time
184
  return [
185
  {
186
  "path": f,
 
191
  for f in files
192
  ]
193
 
194
+
195
+ def generate_image(
196
+ model_name,
197
+ prompt,
198
+ negative_prompt="",
199
+ seed=42,
200
+ randomize_seed=False,
201
+ width=1024,
202
+ height=1024,
203
+ guidance_scale=4.5,
204
+ num_inference_steps=40,
205
+ progress=gr.Progress(track_tqdm=True)
206
+ ):
207
+ with model_locks[model_name]:
208
+ try:
209
+ # progress(0, desc=f"Loading {model_name} model...")
210
+
211
+ if model_name not in pipes:
212
+ pipes[model_name] = load_pipeline(model_name)
213
+
214
+ pipe = pipes[model_name]
215
+
216
+ if randomize_seed:
217
+ seed = random.randint(0, MAX_SEED)
218
+
219
+ generator = torch.Generator(DEVICE).manual_seed(seed)
220
+ print(f"Generating image with {model_name}...")
221
+ # progress(0.3, desc=f"Generating image with {model_name}...")
222
+
223
  image = pipe(
224
  prompt=prompt,
225
  negative_prompt=negative_prompt,
 
229
  height=height,
230
  generator=generator,
231
  ).images[0]
232
+
233
+ filepath = save_generated_image(image, model_name, prompt)
234
+ print(f"Saved image to: {filepath}")
235
+
236
+ # progress(0.9, desc=f"Cleaning up {model_name} resources...")
237
+ # deep_cleanup(model_name, pipe)
238
+
239
+ # progress(1.0, desc=f"Generation complete with {model_name}")
240
+ return image, seed
241
+
242
+ except Exception as e:
243
+ print(f"Error with {model_name}: {str(e)}")
244
+ if model_name in pipes:
245
+ deep_cleanup(model_name, pipes[model_name])
246
+ raise e
247
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
 
249
  # Gradio Interface
250
  css = """
 
256
 
257
  with gr.Blocks(css=css) as demo:
258
  with gr.Column(elem_id="col-container"):
259
+ gr.Markdown("# Multi-Model Image Generation")
260
+
261
  with gr.Row():
262
  prompt = gr.Text(
263
  label="Prompt",
 
267
  container=False,
268
  )
269
  run_button = gr.Button("Generate", scale=0, variant="primary")
270
+
271
  with gr.Accordion("Advanced Settings", open=False):
272
  negative_prompt = gr.Text(
273
  label="Negative prompt",
274
  max_lines=1,
275
  placeholder="Enter a negative prompt",
276
  )
277
+
278
  seed = gr.Slider(
279
  label="Seed",
280
  minimum=0,
 
282
  step=1,
283
  value=0,
284
  )
285
+
286
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
287
+
288
  with gr.Row():
289
  width = gr.Slider(
290
  label="Width",
 
300
  step=32,
301
  value=1024,
302
  )
303
+
304
  with gr.Row():
305
  guidance_scale = gr.Slider(
306
  label="Guidance scale",
 
316
  step=1,
317
  value=40,
318
  )
319
+
320
+ memory_indicator = gr.Markdown("Current memory usage: 0 GB")
321
+
322
  with gr.Row():
323
  with gr.Column(scale=2):
324
  with gr.Tabs() as tabs:
 
327
  for model_name in MODEL_CONFIGS.keys():
328
  with gr.Tab(model_name):
329
  results[model_name] = gr.Image(label=f"{model_name} Result")
330
+ seeds[model_name] = gr.Number(label="Seed used", visible=True)
331
+ with gr.Column(scale=1):
332
+ gr.Markdown("### Generated Images")
333
+ file_gallery = gr.Gallery(
334
+ label="Generated Images",
335
+ show_label=False,
336
+ elem_id="file_gallery",
337
+ columns=3,
338
+ height=800,
339
+ visible=True
340
+ )
341
+ refresh_button = gr.Button("Refresh Gallery")
342
+
343
+
344
+
345
 
346
  def update_gallery():
347
  """Update the file gallery"""
 
351
  for f in files
352
  ]
353
 
354
+
355
+ @spaces.GPU(duration=600)
356
+ def generate_all(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
357
+ progress=gr.Progress()):
358
+ outputs = [None] * (len(MODEL_CONFIGS) * 2)
359
+ for idx, model_name in enumerate(MODEL_CONFIGS.keys()):
360
+ try:
361
+ # Display progress for the specific model
362
+ # progress(0, desc=f"Starting generation for {model_name}...")
363
+ print(f"IMAGE GENERATING {model_name} ")
364
+ image, used_seed = generate_image(
365
+ model_name, prompt, negative_prompt, seed,
366
+ randomize_seed, width, height, guidance_scale,
367
+ num_inference_steps, progress
368
+ )
369
+ print(f"IMAGE GENERATIED {model_name} ")
370
+ # Update the respective model's tab with the generated image
371
+ # results[model_name].update(image)
372
+ # seeds[model_name].update(used_seed)
373
+ outputs[idx * 2] = image # Image slot
374
+ outputs[idx * 2 + 1] = seed # Seed slot
375
+ # outputs.extend([image, used_seed])
376
+ # Add intermediate results to progress * (len(all_outputs) - len(all_outputs))
377
+ print("YELID")
378
+ yield outputs + [None]
379
+
380
+
381
+ except Exception as e:
382
+ print(f"Error generating with {model_name}: {str(e)}")
383
+ outputs[idx * 2] = None
384
+ outputs[idx * 2 + 1] = None
385
+
386
+ # Update the gallery after generation
387
+ gallery_images = update_gallery()
388
+ # file_gallery.update(value=gallery_images)
389
+ return outputs
390
+
391
+
392
  output_components = []
393
  for model_name in MODEL_CONFIGS.keys():
394
  output_components.extend([results[model_name], seeds[model_name]])
 
421
  )
422
 
423
  if __name__ == "__main__":
424
+ demo.launch(server_name='0.0.0.0')