Aduc-sdr commited on
Commit
71cd5d1
·
verified ·
1 Parent(s): beebea3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -156
app.py CHANGED
@@ -2,7 +2,7 @@
2
  # //
3
  # // Licensed under the Apache License, Version 2.0 (the "License");
4
  # // you may not use this file except in compliance with the License.
5
- # // You may obtain a copy of the License at
6
  # //
7
  # // http://www.apache.org/licenses/LICENSE-2.0
8
  # //
@@ -16,99 +16,73 @@ import subprocess
16
  import os
17
  import sys
18
 
19
- # --- ETAPA 1: Preparação do Ambiente ---
20
- # Clonar o repositório para garantir que todas as pastas de código (data, common, etc.) existam.
 
 
 
21
 
22
- repo_dir_name = "SeedVR2-3B"
23
- if not os.path.exists(repo_dir_name):
24
- print(f"Clonando o repositório {repo_dir_name} para obter todo o código-fonte...")
25
- subprocess.run(f"git clone --depth 1 https://huggingface.co/spaces/ByteDance-Seed/{repo_dir_name}", shell=True, check=True)
26
-
27
- # --- ETAPA 2: Configuração dos Caminhos ---
28
- # Mudar para o diretório do repositório e adicioná-lo ao path do Python.
29
-
30
- os.chdir(repo_dir_name)
31
  print(f"Diretório de trabalho alterado para: {os.getcwd()}")
32
- sys.path.insert(0, os.path.abspath('.'))
33
- print(f"Diretório atual adicionado ao sys.path para importações.")
34
 
35
- # --- ETAPA 3: Instalação de Dependências (NA ORDEM CORRETA E COM A FLAG CORRETA) ---
 
 
36
 
 
37
  python_executable = sys.executable
38
-
39
- # **PASSO 3.1: Instalar requisitos PRIMEIRO para ter o PyTorch disponível**
40
- print("Instalando dependências a partir do requirements.txt (isso inclui o PyTorch)...")
41
  subprocess.run([python_executable, "-m", "pip", "install", "-r", "requirements.txt"], check=True)
42
- print("✅ Dependências básicas (incluindo PyTorch) instaladas.")
43
-
44
-
45
- # **PASSO 3.2: Compilar dependências otimizadas para a GPU**
46
- print("Instalando flash-attn compilando do zero...")
47
- subprocess.run([python_executable, "-m", "pip", "install", "--force-reinstall", "--no-cache-dir", "flash-attn"], check=True)
48
-
49
- print("Clonando e compilando o Apex do zero (isso pode demorar um pouco)...")
50
- if not os.path.exists("apex"):
51
- subprocess.run("git clone https://github.com/NVIDIA/apex", shell=True, check=True)
52
-
53
- # **CORREÇÃO FINAL: Adicionar a flag --no-build-isolation**
54
- # Isso força o build a usar o ambiente atual (onde o torch já foi instalado)
55
- # em vez de criar um ambiente isolado e vazio.
56
- print("Compilando e instalando o Apex...")
57
- subprocess.run(
58
- [
59
- python_executable, "-m", "pip", "install",
60
- "--no-build-isolation", # A FLAG CRÍTICA QUE RESOLVE O PROBLEMA
61
- "-v",
62
- "--disable-pip-version-check",
63
- "--no-cache-dir",
64
- "--global-option=--cpp_ext",
65
- "--global-option=--cuda_ext",
66
- "./apex"
67
- ],
68
- check=True
69
- )
70
- print("✅ Configuração do Apex concluída.")
71
 
 
 
72
 
73
- # **PASSO 3.3: Download dos modelos e dados de exemplo**
74
- import torch
75
  from pathlib import Path
76
  from urllib.parse import urlparse
77
  from torch.hub import download_url_to_file, get_dir
78
 
79
- def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
80
- if model_dir is None:
81
- hub_dir = get_dir(); model_dir = os.path.join(hub_dir, 'checkpoints')
82
  os.makedirs(model_dir, exist_ok=True)
83
- parts = urlparse(url); filename = os.path.basename(parts.path)
84
- if file_name is not None: filename = file_name
85
- cached_file = os.path.abspath(os.path.join(model_dir, filename))
 
86
  if not os.path.exists(cached_file):
87
  print(f'Baixando: "{url}" para {cached_file}\n')
88
  download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
89
  return cached_file
90
 
 
 
 
 
 
 
 
 
 
91
  pretrain_model_url = {
92
- 'vae': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/ema_vae.pth',
93
- 'dit': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/seedvr2_ema_3b.pth',
94
  'pos_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/pos_emb.pt',
95
  'neg_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/neg_emb.pt',
96
  }
97
 
98
- ckpt_dir = Path('./ckpts'); ckpt_dir.mkdir(exist_ok=True)
99
  for key, url in pretrain_model_url.items():
100
- filename = os.path.basename(url)
101
  model_dir = './ckpts' if key in ['vae', 'dit'] else '.'
102
- if not os.path.exists(os.path.join(model_dir, filename)):
103
- load_file_from_url(url=url, model_dir=model_dir, progress=True, file_name=filename)
104
 
 
105
  torch.hub.download_url_to_file('https://huggingface.co/datasets/Iceclear/SeedVR_VideoDemos/resolve/main/seedvr_videos_crf23/aigc1k/23_1_lq.mp4', '01.mp4')
106
  torch.hub.download_url_to_file('https://huggingface.co/datasets/Iceclear/SeedVR_VideoDemos/resolve/main/seedvr_videos_crf23/aigc1k/28_1_lq.mp4', '02.mp4')
107
  torch.hub.download_url_to_file('https://huggingface.co/datasets/Iceclear/SeedVR_VideoDemos/resolve/main/seedvr_videos_crf23/aigc1k/2_1_lq.mp4', '03.mp4')
108
- print("✅ Modelos e dados de exemplo baixados.")
109
 
110
-
111
- # --- ETAPA 4: Execução do Código Principal da Aplicação ---
112
  import mediapy
113
  from einops import rearrange
114
  from omegaconf import OmegaConf
@@ -138,26 +112,18 @@ os.environ["MASTER_ADDR"] = "127.0.0.1"
138
  os.environ["MASTER_PORT"] = "12355"
139
  os.environ["RANK"] = str(0)
140
  os.environ["WORLD_SIZE"] = str(1)
141
- os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
142
 
143
  if os.path.exists("projects/video_diffusion_sr/color_fix.py"):
144
  from projects.video_diffusion_sr.color_fix import wavelet_reconstruction
145
  use_colorfix = True
146
  else:
147
  use_colorfix = False
148
- print('Atenção!!!!!! A correção de cor não está disponível!')
149
 
150
- def configure_sequence_parallel(sp_size):
151
- if sp_size > 1:
152
- init_sequence_parallel(sp_size)
153
-
154
- def configure_runner(sp_size):
155
- config_path = 'configs_3b/main.yaml'
156
- config = load_config(config_path)
157
  runner = VideoDiffusionInfer(config)
158
  OmegaConf.set_readonly(runner.config, False)
159
  init_torch(cudnn_benchmark=False, timeout=datetime.timedelta(seconds=3600))
160
- configure_sequence_parallel(sp_size)
161
  runner.configure_dit_model(device="cuda", checkpoint='ckpts/seedvr2_ema_3b.pth')
162
  runner.configure_vae_model()
163
  if hasattr(runner.vae, "set_memory_limit"):
@@ -165,103 +131,65 @@ def configure_runner(sp_size):
165
  return runner
166
 
167
  def generation_step(runner, text_embeds_dict, cond_latents):
168
- def _move_to_cuda(x):
169
- return [i.to(torch.device("cuda")) for i in x]
170
  noises = [torch.randn_like(latent) for latent in cond_latents]
171
  aug_noises = [torch.randn_like(latent) for latent in cond_latents]
172
  noises, aug_noises, cond_latents = sync_data((noises, aug_noises, cond_latents), 0)
173
  noises, aug_noises, cond_latents = list(map(_move_to_cuda, (noises, aug_noises, cond_latents)))
174
  def _add_noise(x, aug_noise):
175
- t = torch.tensor([1000.0], device=torch.device("cuda")) * 0.1
176
- shape = torch.tensor(x.shape[1:], device=torch.device("cuda"))[None]
177
  t = runner.timestep_transform(t, shape)
178
  return runner.schedule.forward(x, aug_noise, t)
179
- conditions = [runner.get_condition(noise, task="sr", latent_blur=_add_noise(latent_blur, aug_noise)) for noise, aug_noise, latent_blur in zip(noises, aug_noises, cond_latents)]
180
  with torch.no_grad(), torch.autocast("cuda", torch.bfloat16, enabled=True):
181
- video_tensors = runner.inference(noises=noises, conditions=conditions, dit_offload=False, **text_embeds_dict)
182
- return [rearrange(video, "c t h w -> t c h w") for video in video_tensors]
183
 
184
  @spaces.GPU
185
- def generation_loop(video_path, seed=666, fps_out=24, batch_size=1, cfg_scale=1.0, cfg_rescale=0.0, sample_steps=1, res_h=1280, res_w=720, sp_size=1):
186
  if video_path is None: return None, None, None
187
- runner = configure_runner(1)
188
- def _extract_text_embeds():
189
- positive_prompts_embeds = []
190
- for _ in original_videos_local:
191
- positive_prompts_embeds.append({"texts_pos": [torch.load('pos_emb.pt')], "texts_neg": [torch.load('neg_emb.pt')]})
192
- gc.collect(); torch.cuda.empty_cache()
193
- return positive_prompts_embeds
194
- runner.config.diffusion.cfg.scale, runner.config.diffusion.cfg.rescale, runner.config.diffusion.timesteps.sampling.steps = cfg_scale, cfg_rescale, sample_steps
195
  runner.configure_diffusion()
196
- set_seed(int(seed) % (2**32), same_across_ranks=True)
197
  os.makedirs("output", exist_ok=True)
198
- original_videos = [os.path.basename(video_path)]
199
- original_videos_local = partition_by_size(original_videos, batch_size)
200
- positive_prompts_embeds = _extract_text_embeds()
201
- video_transform = Compose([NaResize(resolution=(res_h * res_w) ** 0.5, mode="area", downsample_only=False), Lambda(lambda x: torch.clamp(x, 0.0, 1.0)), DivisibleCrop((16, 16)), Normalize(0.5, 0.5), Rearrange("t c h w -> c t h w")])
202
- for videos, text_embeds in tqdm(zip(original_videos_local, positive_prompts_embeds)):
203
- media_type, _ = mimetypes.guess_type(video_path)
204
- is_video = media_type and media_type.startswith("video")
205
- if is_video:
206
- video, _, _ = read_video(video_path, output_format="TCHW"); video = video[:121] / 255.0; output_dir = os.path.join("output", f"{uuid.uuid4()}.mp4")
207
- else:
208
- video = T.ToTensor()(Image.open(video_path).convert("RGB")).unsqueeze(0); output_dir = os.path.join("output", f"{uuid.uuid4()}.png")
209
- cond_latents = [video_transform(video.to("cuda"))]
210
- ori_lengths = [v.size(1) for v in cond_latents]
211
- cond_latents = runner.vae_encode(cond_latents)
212
- for key in ["texts_pos", "texts_neg"]:
213
- for i, emb in enumerate(text_embeds[key]): text_embeds[key][i] = emb.to("cuda")
214
- samples = generation_step(runner, text_embeds, cond_latents=cond_latents)
215
- del cond_latents
216
- for sample, ori_length in zip(samples, ori_lengths):
217
- sample = sample[:ori_length].to("cpu")
218
- sample = rearrange(sample, "t c h w -> t h w c").clip(-1, 1).mul_(0.5).add_(0.5).mul_(255).round().to(torch.uint8).numpy()
219
- if is_video: mediapy.write_video(output_dir, sample, fps=fps_out)
220
- else: mediapy.write_image(output_dir, sample[0])
221
- gc.collect(); torch.cuda.empty_cache()
222
- return (None, output_dir, output_dir) if is_video else (output_dir, None, output_dir)
223
-
224
- with gr.Blocks(title="SeedVR2: Restauração de Vídeo em Um Passo") as demo:
225
- gr.HTML(f"""
226
- <div style='text-align:center; margin-bottom: 10px;'>
227
- <img src='file/{os.path.abspath("assets/seedvr_logo.png")}' style='height:40px;' alt='SeedVR logo'/>
228
- </div>
229
- <p><b>Demonstração oficial do Gradio</b> para
230
- <a href='https://github.com/ByteDance-Seed/SeedVR' target='_blank'>
231
- <b>SeedVR2: One-Step Video Restoration via Diffusion Adversarial Post-Training</b></a>.<br>
232
- 🔥 <b>SeedVR2</b> é um algoritmo de restauração de imagem e vídeo em um passo para conteúdo do mundo real e AIGC.
233
- </p>
234
- """)
235
  with gr.Row():
236
- input_file = gr.File(label="Carregar imagem ou vídeo")
237
  with gr.Column():
238
- seed = gr.Number(label="Seed", value=666)
239
- fps = gr.Number(label="FPS de Saída (para vídeo)", value=24)
240
  run_button = gr.Button("Executar")
241
- with gr.Row():
242
- output_image = gr.Image(label="Imagem de Saída")
243
- output_video = gr.Video(label="Vídeo de Saída")
244
- download_link = gr.File(label="Baixar o resultado")
245
  run_button.click(fn=generation_loop, inputs=[input_file, seed, fps], outputs=[output_image, output_video, download_link])
246
- gr.Examples(
247
- examples=[
248
- ["01.mp4", 4, 24],
249
- ["02.mp4", 4, 24],
250
- ["03.mp4", 4, 24],
251
- ],
252
- inputs=[input_file, seed, fps]
253
- )
254
- gr.HTML("""
255
- <hr>
256
- <p>Se você achou o SeedVR útil, por favor ⭐ o
257
- <a href='https://github.com/ByteDance-Seed/SeedVR' target='_blank'>repositório no GitHub</a>:</p>
258
- <a href="https://github.com/ByteDance-Seed/SeedVR" target="_blank">
259
- <img src="https://img.shields.io/github/stars/ByteDance-Seed/SeedVR?style=social" alt="GitHub Stars">
260
- </a>
261
- <h4>Aviso</h4>
262
- <p>Esta demonstração suporta até <b>720p e 121 frames para vídeos ou imagens 2k</b>.
263
- Para outros casos de uso, verifique o <a href='https://github.com/ByteDance-Seed/SeedVR' target='_blank'>repositório no GitHub</a>.</p>
264
- <h4>Limitações</h4>
265
- <p>Pode falhar em degradações pesadas ou em clipes AIGC com pouco movimento, causando excesso de nitidez ou restauração inadequada.</p>
266
- """)
267
  demo.queue().launch(share=True)
 
2
  # //
3
  # // Licensed under the Apache License, Version 2.0 (the "License");
4
  # // you may not use this file except in compliance with the License.
5
+ # // You may not obtain a copy of the License at
6
  # //
7
  # // http://www.apache.org/licenses/LICENSE-2.0
8
  # //
 
16
  import os
17
  import sys
18
 
19
+ # --- ETAPA 1: Clonar o Repositório Oficial do GitHub ---
20
+ repo_name = "SeedVR"
21
+ if not os.path.exists(repo_name):
22
+ print(f"Clonando o repositório {repo_name} do GitHub...")
23
+ subprocess.run(f"git clone https://github.com/ByteDance-Seed/{repo_name}.git", shell=True, check=True)
24
 
25
+ # --- ETAPA 2: Mudar para o Diretório e Configurar o Ambiente ---
26
+ os.chdir(repo_name)
 
 
 
 
 
 
 
27
  print(f"Diretório de trabalho alterado para: {os.getcwd()}")
 
 
28
 
29
+ # Adicionar o diretório ao path do Python para que as importações funcionem
30
+ sys.path.insert(0, os.path.abspath('.'))
31
+ print(f"Diretório atual adicionado ao sys.path.")
32
 
33
+ # --- ETAPA 3: Instalar Dependências Conforme as Instruções ---
34
  python_executable = sys.executable
35
+ print("Instalando dependências do requirements.txt...")
 
 
36
  subprocess.run([python_executable, "-m", "pip", "install", "-r", "requirements.txt"], check=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
+ print("Instalando flash-attn...")
39
+ subprocess.run([python_executable, "-m", "pip", "install", "flash-attn==2.5.9.post1", "--no-build-isolation"], check=True)
40
 
 
 
41
  from pathlib import Path
42
  from urllib.parse import urlparse
43
  from torch.hub import download_url_to_file, get_dir
44
 
45
+ # Função auxiliar para downloads
46
+ def load_file_from_url(url, model_dir='.', progress=True, file_name=None):
 
47
  os.makedirs(model_dir, exist_ok=True)
48
+ if not file_name:
49
+ parts = urlparse(url)
50
+ file_name = os.path.basename(parts.path)
51
+ cached_file = os.path.join(model_dir, file_name)
52
  if not os.path.exists(cached_file):
53
  print(f'Baixando: "{url}" para {cached_file}\n')
54
  download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
55
  return cached_file
56
 
57
+ # Baixar e instalar Apex pré-compilado (crucial para o ambiente do Spaces)
58
+ apex_url = 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/apex-0.1-cp39-cp39-linux_x86_64.whl'
59
+ apex_wheel_path = load_file_from_url(url=apex_url)
60
+ print("Instalando Apex a partir do wheel baixado...")
61
+ subprocess.run([python_executable, "-m", "pip", "install", "--force-reinstall", "--no-cache-dir", apex_wheel_path], check=True)
62
+ print("✅ Configuração do Apex concluída.")
63
+
64
+ # --- ETAPA 4: Baixar os Modelos Pré-treinados ---
65
+ print("Baixando modelos pré-treinados...")
66
  pretrain_model_url = {
67
+ 'vae': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/ema_vae.pth',
68
+ 'dit': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/seedvr2_ema_3b.pth',
69
  'pos_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/pos_emb.pt',
70
  'neg_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/neg_emb.pt',
71
  }
72
 
73
+ Path('./ckpts').mkdir(exist_ok=True)
74
  for key, url in pretrain_model_url.items():
 
75
  model_dir = './ckpts' if key in ['vae', 'dit'] else '.'
76
+ load_file_from_url(url=url, model_dir=model_dir)
 
77
 
78
+ # Baixar vídeos de exemplo
79
  torch.hub.download_url_to_file('https://huggingface.co/datasets/Iceclear/SeedVR_VideoDemos/resolve/main/seedvr_videos_crf23/aigc1k/23_1_lq.mp4', '01.mp4')
80
  torch.hub.download_url_to_file('https://huggingface.co/datasets/Iceclear/SeedVR_VideoDemos/resolve/main/seedvr_videos_crf23/aigc1k/28_1_lq.mp4', '02.mp4')
81
  torch.hub.download_url_to_file('https://huggingface.co/datasets/Iceclear/SeedVR_VideoDemos/resolve/main/seedvr_videos_crf23/aigc1k/2_1_lq.mp4', '03.mp4')
82
+ print("✅ Setup completo. Iniciando a aplicação...")
83
 
84
+ # --- ETAPA 5: Executar a Aplicação Principal ---
85
+ import torch
86
  import mediapy
87
  from einops import rearrange
88
  from omegaconf import OmegaConf
 
112
  os.environ["MASTER_PORT"] = "12355"
113
  os.environ["RANK"] = str(0)
114
  os.environ["WORLD_SIZE"] = str(1)
 
115
 
116
  if os.path.exists("projects/video_diffusion_sr/color_fix.py"):
117
  from projects.video_diffusion_sr.color_fix import wavelet_reconstruction
118
  use_colorfix = True
119
  else:
120
  use_colorfix = False
 
121
 
122
+ def configure_runner():
123
+ config = load_config('configs_3b/main.yaml')
 
 
 
 
 
124
  runner = VideoDiffusionInfer(config)
125
  OmegaConf.set_readonly(runner.config, False)
126
  init_torch(cudnn_benchmark=False, timeout=datetime.timedelta(seconds=3600))
 
127
  runner.configure_dit_model(device="cuda", checkpoint='ckpts/seedvr2_ema_3b.pth')
128
  runner.configure_vae_model()
129
  if hasattr(runner.vae, "set_memory_limit"):
 
131
  return runner
132
 
133
  def generation_step(runner, text_embeds_dict, cond_latents):
134
+ def _move_to_cuda(x): return [i.to("cuda") for i in x]
 
135
  noises = [torch.randn_like(latent) for latent in cond_latents]
136
  aug_noises = [torch.randn_like(latent) for latent in cond_latents]
137
  noises, aug_noises, cond_latents = sync_data((noises, aug_noises, cond_latents), 0)
138
  noises, aug_noises, cond_latents = list(map(_move_to_cuda, (noises, aug_noises, cond_latents)))
139
  def _add_noise(x, aug_noise):
140
+ t = torch.tensor([100.0], device="cuda")
141
+ shape = torch.tensor(x.shape[1:], device="cuda")[None]
142
  t = runner.timestep_transform(t, shape)
143
  return runner.schedule.forward(x, aug_noise, t)
144
+ conditions = [runner.get_condition(n, task="sr", latent_blur=_add_noise(l, an)) for n, an, l in zip(noises, aug_noises, cond_latents)]
145
  with torch.no_grad(), torch.autocast("cuda", torch.bfloat16, enabled=True):
146
+ video_tensors = runner.inference(noises=noises, conditions=conditions, **text_embeds_dict)
147
+ return [rearrange(v, "c t h w -> t c h w") for v in video_tensors]
148
 
149
  @spaces.GPU
150
+ def generation_loop(video_path, seed=666, fps_out=24):
151
  if video_path is None: return None, None, None
152
+ runner = configure_runner()
153
+ text_embeds = {"texts_pos": [torch.load('pos_emb.pt').to("cuda")], "texts_neg": [torch.load('neg_emb.pt').to("cuda")]}
 
 
 
 
 
 
154
  runner.configure_diffusion()
155
+ set_seed(int(seed))
156
  os.makedirs("output", exist_ok=True)
157
+ video_transform = Compose([NaResize(1024), DivisibleCrop(16), Normalize(0.5, 0.5), Rearrange("t c h w -> c t h w")])
158
+ media_type, _ = mimetypes.guess_type(video_path)
159
+ is_video = media_type and media_type.startswith("video")
160
+ if is_video:
161
+ video, _, _ = read_video(video_path, output_format="TCHW")
162
+ video = video[:121] / 255.0
163
+ output_path = os.path.join("output", f"{uuid.uuid4()}.mp4")
164
+ else:
165
+ video = T.ToTensor()(Image.open(video_path).convert("RGB")).unsqueeze(0)
166
+ output_path = os.path.join("output", f"{uuid.uuid4()}.png")
167
+ cond_latents = [video_transform(video.to("cuda"))]
168
+ ori_length = cond_latents[0].size(2)
169
+ cond_latents = runner.vae_encode(cond_latents)
170
+ samples = generation_step(runner, text_embeds, cond_latents)
171
+ sample = samples[0][:ori_length].cpu()
172
+ sample = rearrange(sample, "t c h w -> t h w c").clip(-1, 1).add(1).mul(127.5).byte().numpy()
173
+ if is_video:
174
+ mediapy.write_video(output_path, sample, fps=fps_out)
175
+ return None, output_path, output_path
176
+ else:
177
+ mediapy.write_image(output_path, sample[0])
178
+ return output_path, None, output_path
179
+
180
+ with gr.Blocks(title="SeedVR") as demo:
181
+ gr.HTML(f"""<div style='text-align:center; margin-bottom: 10px;'><img src='file/{os.path.abspath("assets/seedvr_logo.png")}' style='height:40px;'/></div>...""")
 
 
 
 
 
 
 
 
 
 
 
 
182
  with gr.Row():
183
+ input_file = gr.File(label="Carregar Imagem ou Vídeo")
184
  with gr.Column():
185
+ seed = gr.Number(label="Seed", value=42)
186
+ fps = gr.Number(label="FPS de Saída", value=24)
187
  run_button = gr.Button("Executar")
188
+ output_image = gr.Image(label="Imagem de Saída")
189
+ output_video = gr.Video(label="Vídeo de Saída")
190
+ download_link = gr.File(label="Baixar Resultado")
 
191
  run_button.click(fn=generation_loop, inputs=[input_file, seed, fps], outputs=[output_image, output_video, download_link])
192
+ gr.Examples(examples=[["01.mp4", 42, 24], ["02.mp4", 42, 24], ["03.mp4", 42, 24]], inputs=[input_file, seed, fps])
193
+ gr.HTML("""<hr>...""")
194
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
  demo.queue().launch(share=True)