multimodalart HF Staff commited on
Commit
3d79b08
·
verified ·
1 Parent(s): cbdec18

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -75
app.py CHANGED
@@ -11,8 +11,8 @@ import tempfile
11
  from PIL import Image
12
  from huggingface_hub import hf_hub_download
13
  import shutil
 
14
 
15
- # --- Import necessary classes from the provided files ---
16
  from inference import (
17
  create_ltx_video_pipeline,
18
  create_latent_upsampler,
@@ -25,38 +25,9 @@ from inference import (
25
  from ltx_video.pipelines.pipeline_ltx_video import ConditioningItem, LTXMultiScalePipeline, LTXVideoPipeline
26
  from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
27
 
28
- # --- Global constants from user's request and YAML ---
29
- YAML_CONFIG_STRING = """
30
- pipeline_type: multi-scale
31
- checkpoint_path: "ltxv-13b-0.9.7-distilled.safetensors" # This will be replaced by the rc3 version
32
- downscale_factor: 0.6666666
33
- spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.7.safetensors"
34
- stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
35
- decode_timestep: 0.05
36
- decode_noise_scale: 0.025
37
- text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
38
- precision: "bfloat16"
39
- sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
40
- prompt_enhancement_words_threshold: 120
41
- prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
42
- prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
43
- stochastic_sampling: false
44
-
45
- first_pass:
46
- timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250]
47
- guidance_scale: 1
48
- stg_scale: 0
49
- rescaling_scale: 1
50
- skip_block_list: [42]
51
-
52
- second_pass:
53
- timesteps: [0.9094, 0.7250, 0.4219]
54
- guidance_scale: 1
55
- stg_scale: 0
56
- rescaling_scale: 1
57
- skip_block_list: [42]
58
- """
59
- PIPELINE_CONFIG_YAML = yaml.safe_load(YAML_CONFIG_STRING)
60
 
61
  # Model specific paths (to be downloaded)
62
  DISTILLED_MODEL_REPO = "LTX-Colab/LTX-Video-Preview"
@@ -66,6 +37,7 @@ UPSCALER_REPO = "Lightricks/LTX-Video"
66
 
67
  MAX_IMAGE_SIZE = PIPELINE_CONFIG_YAML.get("max_resolution", 1280)
68
  MAX_NUM_FRAMES = 257
 
69
 
70
  # --- Global variables for loaded models ---
71
  pipeline_instance = None
@@ -117,12 +89,13 @@ if PIPELINE_CONFIG_YAML.get("spatial_upscaler_model_path"):
117
  target_inference_device = "cuda"
118
  print(f"Target inference device: {target_inference_device}")
119
  pipeline_instance.to(target_inference_device)
120
- latent_upsampler_instance.to(target_inference_device)
 
121
 
122
  @spaces.GPU
123
  def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath,
124
  height_ui, width_ui, mode,
125
- ui_steps, num_frames_ui,
126
  ui_frames_to_use,
127
  seed_ui, randomize_seed, ui_guidance_scale, improve_texture_flag,
128
  progress=gr.Progress(track_tqdm=True)):
@@ -131,13 +104,33 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
131
  seed_ui = random.randint(0, 2**32 - 1)
132
  seed_everething(int(seed_ui))
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  actual_height = int(height_ui)
135
  actual_width = int(width_ui)
136
- actual_num_frames = int(num_frames_ui)
137
 
138
  height_padded = ((actual_height - 1) // 32 + 1) * 32
139
  width_padded = ((actual_width - 1) // 32 + 1) * 32
140
- num_frames_padded = ((actual_num_frames - 2) // 8 + 1) * 8 + 1
 
 
 
 
 
141
 
142
  padding_values = calculate_padding(actual_height, actual_width, height_padded, width_padded)
143
 
@@ -146,10 +139,10 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
146
  "negative_prompt": negative_prompt,
147
  "height": height_padded,
148
  "width": width_padded,
149
- "num_frames": num_frames_padded,
150
- "frame_rate": 30,
151
  "generator": torch.Generator(device=target_inference_device).manual_seed(int(seed_ui)),
152
- "output_type": "pt", # Crucial: pipeline will output [0,1] range tensors
153
  "conditioning_items": None,
154
  "media_items": None,
155
  "decode_timestep": PIPELINE_CONFIG_YAML["decode_timestep"],
@@ -191,14 +184,18 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
191
  media_path=input_video_filepath,
192
  height=actual_height,
193
  width=actual_width,
194
- max_frames=int(ui_frames_to_use),
195
  padding=padding_values
196
  ).to(target_inference_device)
197
  except Exception as e:
198
  print(f"Error loading video {input_video_filepath}: {e}")
199
  raise gr.Error(f"Could not load video: {e}")
200
 
201
- print(f"Moving models to {target_inference_device} for inference...")
 
 
 
 
202
 
203
  active_latent_upsampler = None
204
  if improve_texture_flag and latent_upsampler_instance:
@@ -227,17 +224,18 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
227
  "second_pass": second_pass_args,
228
  })
229
 
230
- print(f"Calling multi-scale pipeline (eff. HxW: {actual_height}x{actual_width}) on {target_inference_device}")
231
  result_images_tensor = multi_scale_pipeline_obj(**multi_scale_call_kwargs).images
232
  else:
233
  single_pass_call_kwargs = call_kwargs.copy()
234
  single_pass_call_kwargs["guidance_scale"] = float(ui_guidance_scale)
235
  single_pass_call_kwargs["num_inference_steps"] = int(ui_steps)
236
- single_pass_call_kwargs.pop("first_pass", None)
 
237
  single_pass_call_kwargs.pop("second_pass", None)
238
  single_pass_call_kwargs.pop("downscale_factor", None)
239
 
240
- print(f"Calling base pipeline (padded HxW: {height_padded}x{width_padded}) on {target_inference_device}")
241
  result_images_tensor = pipeline_instance(**single_pass_call_kwargs).images
242
 
243
  if result_images_tensor is None:
@@ -246,14 +244,14 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
246
  pad_left, pad_right, pad_top, pad_bottom = padding_values
247
  slice_h_end = -pad_bottom if pad_bottom > 0 else None
248
  slice_w_end = -pad_right if pad_right > 0 else None
 
 
249
  result_images_tensor = result_images_tensor[
250
  :, :, :actual_num_frames, pad_top:slice_h_end, pad_left:slice_w_end
251
  ]
252
 
253
- # The pipeline with output_type="pt" should return tensors in the [0, 1] range.
254
  video_np = result_images_tensor[0].permute(1, 2, 3, 0).cpu().float().numpy()
255
 
256
- # Clip to ensure values are indeed in [0, 1] before scaling to uint8
257
  video_np = np.clip(video_np, 0, 1)
258
  video_np = (video_np * 255).astype(np.uint8)
259
 
@@ -278,13 +276,13 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
278
  raise gr.Error(f"Failed to save video: {e2}")
279
 
280
  if isinstance(input_image_filepath, tempfile._TemporaryFileWrapper):
281
- if os.path.exists(input_image_filepath.name): # Check if it's already closed by Gradio
282
  try:
283
  input_image_filepath.close()
284
  os.remove(input_image_filepath.name)
285
- except: pass # May already be closed/removed
286
  elif input_image_filepath and os.path.exists(input_image_filepath) and input_image_filepath.startswith(tempfile.gettempdir()):
287
- try: os.remove(input_image_filepath) # If Gradio passed a path to a temp file
288
  except: pass
289
 
290
  if isinstance(input_video_filepath, tempfile._TemporaryFileWrapper):
@@ -312,29 +310,35 @@ with gr.Blocks(css=css) as demo:
312
  gr.Markdown("Generates a short video based on text prompt, image, or existing video. Models are moved to GPU during generation and back to CPU afterwards to save VRAM.")
313
  with gr.Row():
314
  with gr.Column():
315
- with gr.Group():
316
- with gr.Tab("image-to-video") as image_tab:
317
- video_i_hidden = gr.Textbox(label="video_i", visible=False, value=None)
318
- image_i2v = gr.Image(label="Input Image", type="filepath", sources=["upload", "webcam"])
319
- i2v_prompt = gr.Textbox(label="Prompt", value="The creature from the image starts to move", lines=3)
320
- i2v_button = gr.Button("Generate Image-to-Video", variant="primary")
321
- with gr.Tab("text-to-video") as text_tab:
322
- image_n_hidden = gr.Textbox(label="image_n", visible=False, value=None)
323
- video_n_hidden = gr.Textbox(label="video_n", visible=False, value=None)
324
- t2v_prompt = gr.Textbox(label="Prompt", value="A majestic dragon flying over a medieval castle", lines=3)
325
- t2v_button = gr.Button("Generate Text-to-Video", variant="primary")
326
- with gr.Tab("video-to-video") as video_tab:
327
- image_v_hidden = gr.Textbox(label="image_v", visible=False, value=None)
328
- video_v2v = gr.Video(label="Input Video", sources=["upload", "webcam"])
329
- frames_to_use = gr.Slider(label="Frames to use from input video", minimum=9, maximum=MAX_NUM_FRAMES, value=9, step=8, info="Number of initial frames to use for conditioning/transformation. Must be N*8+1.")
330
- v2v_prompt = gr.Textbox(label="Prompt", value="Change the style to cinematic anime", lines=3)
331
- v2v_button = gr.Button("Generate Video-to-Video", variant="primary")
332
-
 
 
 
 
 
 
 
333
  improve_texture = gr.Checkbox(label="Improve Texture (multi-scale)", value=True, info="Uses a two-pass generation for better quality, but is slower. Recommended for final output.")
334
 
335
  with gr.Column():
336
  output_video = gr.Video(label="Generated Video", interactive=False)
337
- gr.Markdown("Note: Generation can take a few minutes depending on settings and hardware.")
338
 
339
  with gr.Accordion("Advanced settings", open=False):
340
  negative_prompt_input = gr.Textbox(label="Negative Prompt", value="worst quality, inconsistent motion, blurry, jittery, distorted", lines=2)
@@ -343,27 +347,26 @@ with gr.Blocks(css=css) as demo:
343
  randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=False)
344
  with gr.Row():
345
  guidance_scale_input = gr.Slider(label="Guidance Scale (CFG)", minimum=1.0, maximum=10.0, value=PIPELINE_CONFIG_YAML.get("first_pass", {}).get("guidance_scale", 1.0), step=0.1, info="Controls how much the prompt influences the output. Higher values = stronger influence.")
346
- default_steps = len(PIPELINE_CONFIG_YAML.get("first_pass", {}).get("timesteps", [1]*7))
347
  steps_input = gr.Slider(label="Inference Steps (for first pass if multi-scale)", minimum=1, maximum=30, value=default_steps, step=1, info="Number of denoising steps. More steps can improve quality but increase time. If YAML defines 'timesteps' for a pass, this UI value is ignored for that pass.")
348
- with gr.Row():
349
- num_frames_input = gr.Slider(label="Number of Frames to Generate", minimum=9, maximum=MAX_NUM_FRAMES, value=25, step=8, info="Total frames in the output video. Should be N*8+1 (e.g., 9, 17, 25...).")
350
  with gr.Row():
351
  height_input = gr.Slider(label="Height", value=512, step=32, minimum=256, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
352
  width_input = gr.Slider(label="Width", value=704, step=32, minimum=256, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
353
 
 
354
  t2v_inputs = [t2v_prompt, negative_prompt_input, image_n_hidden, video_n_hidden,
355
  height_input, width_input, gr.State("text-to-video"),
356
- steps_input, num_frames_input, gr.State(0),
357
  seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
358
 
359
  i2v_inputs = [i2v_prompt, negative_prompt_input, image_i2v, video_i_hidden,
360
  height_input, width_input, gr.State("image-to-video"),
361
- steps_input, num_frames_input, gr.State(0),
362
  seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
363
 
364
  v2v_inputs = [v2v_prompt, negative_prompt_input, image_v_hidden, video_v2v,
365
  height_input, width_input, gr.State("video-to-video"),
366
- steps_input, num_frames_input, frames_to_use,
367
  seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
368
 
369
  t2v_button.click(fn=generate, inputs=t2v_inputs, outputs=[output_video], api_name="text_to_video")
 
11
  from PIL import Image
12
  from huggingface_hub import hf_hub_download
13
  import shutil
14
+ import math # For math.round, though built-in round works for floats
15
 
 
16
  from inference import (
17
  create_ltx_video_pipeline,
18
  create_latent_upsampler,
 
25
  from ltx_video.pipelines.pipeline_ltx_video import ConditioningItem, LTXMultiScalePipeline, LTXVideoPipeline
26
  from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
27
 
28
+ config_file_path = "configs/ltxv-13b-0.9.7-distilled.yaml"
29
+ with open(config_file_path, "r") as file:
30
+ PIPELINE_CONFIG_YAML = yaml.safe_load(file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  # Model specific paths (to be downloaded)
33
  DISTILLED_MODEL_REPO = "LTX-Colab/LTX-Video-Preview"
 
37
 
38
  MAX_IMAGE_SIZE = PIPELINE_CONFIG_YAML.get("max_resolution", 1280)
39
  MAX_NUM_FRAMES = 257
40
+ FPS = 30.0 # Frames per second for duration calculation
41
 
42
  # --- Global variables for loaded models ---
43
  pipeline_instance = None
 
89
  target_inference_device = "cuda"
90
  print(f"Target inference device: {target_inference_device}")
91
  pipeline_instance.to(target_inference_device)
92
+ if latent_upsampler_instance: # Check if it was created before moving
93
+ latent_upsampler_instance.to(target_inference_device)
94
 
95
  @spaces.GPU
96
  def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath,
97
  height_ui, width_ui, mode,
98
+ ui_steps, duration_ui, # << CHANGED from num_frames_ui
99
  ui_frames_to_use,
100
  seed_ui, randomize_seed, ui_guidance_scale, improve_texture_flag,
101
  progress=gr.Progress(track_tqdm=True)):
 
104
  seed_ui = random.randint(0, 2**32 - 1)
105
  seed_everething(int(seed_ui))
106
 
107
+ # Convert duration_ui (seconds) to actual_num_frames (N*8+1 format)
108
+ target_frames_ideal = duration_ui * FPS
109
+ target_frames_rounded = round(target_frames_ideal)
110
+ if target_frames_rounded < 1: # ensure positive for calculation
111
+ target_frames_rounded = 1
112
+
113
+ # Calculate N for N*8+1, ensuring it's rounded to the nearest integer
114
+ # (target_frames_rounded - 1) could be float if target_frames_rounded is float
115
+ n_val = round((float(target_frames_rounded) - 1.0) / 8.0)
116
+ actual_num_frames = int(n_val * 8 + 1)
117
+
118
+ # Clamp to the allowed min (9) and max (MAX_NUM_FRAMES) N*8+1 values
119
+ actual_num_frames = max(9, actual_num_frames)
120
+ actual_num_frames = min(MAX_NUM_FRAMES, actual_num_frames)
121
+
122
  actual_height = int(height_ui)
123
  actual_width = int(width_ui)
124
+ # actual_num_frames is now calculated above
125
 
126
  height_padded = ((actual_height - 1) // 32 + 1) * 32
127
  width_padded = ((actual_width - 1) // 32 + 1) * 32
128
+ # This padding ensures the model gets a frame count that is N*8+1
129
+ # Since actual_num_frames is already N*8+1, this should preserve it.
130
+ num_frames_padded = ((actual_num_frames - 2) // 8 + 1) * 8 + 1
131
+ if num_frames_padded != actual_num_frames:
132
+ print(f"Warning: actual_num_frames ({actual_num_frames}) and num_frames_padded ({num_frames_padded}) differ. Using num_frames_padded for pipeline.")
133
+ # This case should ideally not happen if actual_num_frames is correctly N*8+1 and >= 9.
134
 
135
  padding_values = calculate_padding(actual_height, actual_width, height_padded, width_padded)
136
 
 
139
  "negative_prompt": negative_prompt,
140
  "height": height_padded,
141
  "width": width_padded,
142
+ "num_frames": num_frames_padded, # Use the padded value for the model
143
+ "frame_rate": int(FPS),
144
  "generator": torch.Generator(device=target_inference_device).manual_seed(int(seed_ui)),
145
+ "output_type": "pt",
146
  "conditioning_items": None,
147
  "media_items": None,
148
  "decode_timestep": PIPELINE_CONFIG_YAML["decode_timestep"],
 
184
  media_path=input_video_filepath,
185
  height=actual_height,
186
  width=actual_width,
187
+ max_frames=int(ui_frames_to_use), # This is from a separate slider for V2V
188
  padding=padding_values
189
  ).to(target_inference_device)
190
  except Exception as e:
191
  print(f"Error loading video {input_video_filepath}: {e}")
192
  raise gr.Error(f"Could not load video: {e}")
193
 
194
+ print(f"Moving models to {target_inference_device} for inference (if not already there)...")
195
+ # Models are moved globally once, no need to move per call unless strategy changes.
196
+ # pipeline_instance.to(target_inference_device)
197
+ # if latent_upsampler_instance:
198
+ # latent_upsampler_instance.to(target_inference_device)
199
 
200
  active_latent_upsampler = None
201
  if improve_texture_flag and latent_upsampler_instance:
 
224
  "second_pass": second_pass_args,
225
  })
226
 
227
+ print(f"Calling multi-scale pipeline (eff. HxW: {actual_height}x{actual_width}, Frames: {actual_num_frames} -> Padded: {num_frames_padded}) on {target_inference_device}")
228
  result_images_tensor = multi_scale_pipeline_obj(**multi_scale_call_kwargs).images
229
  else:
230
  single_pass_call_kwargs = call_kwargs.copy()
231
  single_pass_call_kwargs["guidance_scale"] = float(ui_guidance_scale)
232
  single_pass_call_kwargs["num_inference_steps"] = int(ui_steps)
233
+ # These keys might not exist if improve_texture_flag is false from the start of call_kwargs
234
+ single_pass_call_kwargs.pop("first_pass", None)
235
  single_pass_call_kwargs.pop("second_pass", None)
236
  single_pass_call_kwargs.pop("downscale_factor", None)
237
 
238
+ print(f"Calling base pipeline (padded HxW: {height_padded}x{width_padded}, Frames: {actual_num_frames} -> Padded: {num_frames_padded}) on {target_inference_device}")
239
  result_images_tensor = pipeline_instance(**single_pass_call_kwargs).images
240
 
241
  if result_images_tensor is None:
 
244
  pad_left, pad_right, pad_top, pad_bottom = padding_values
245
  slice_h_end = -pad_bottom if pad_bottom > 0 else None
246
  slice_w_end = -pad_right if pad_right > 0 else None
247
+
248
+ # Crop to actual_num_frames, which is the desired output length
249
  result_images_tensor = result_images_tensor[
250
  :, :, :actual_num_frames, pad_top:slice_h_end, pad_left:slice_w_end
251
  ]
252
 
 
253
  video_np = result_images_tensor[0].permute(1, 2, 3, 0).cpu().float().numpy()
254
 
 
255
  video_np = np.clip(video_np, 0, 1)
256
  video_np = (video_np * 255).astype(np.uint8)
257
 
 
276
  raise gr.Error(f"Failed to save video: {e2}")
277
 
278
  if isinstance(input_image_filepath, tempfile._TemporaryFileWrapper):
279
+ if os.path.exists(input_image_filepath.name):
280
  try:
281
  input_image_filepath.close()
282
  os.remove(input_image_filepath.name)
283
+ except: pass
284
  elif input_image_filepath and os.path.exists(input_image_filepath) and input_image_filepath.startswith(tempfile.gettempdir()):
285
+ try: os.remove(input_image_filepath)
286
  except: pass
287
 
288
  if isinstance(input_video_filepath, tempfile._TemporaryFileWrapper):
 
310
  gr.Markdown("Generates a short video based on text prompt, image, or existing video. Models are moved to GPU during generation and back to CPU afterwards to save VRAM.")
311
  with gr.Row():
312
  with gr.Column():
313
+ with gr.Tab("image-to-video") as image_tab:
314
+ video_i_hidden = gr.Textbox(label="video_i", visible=False, value=None)
315
+ image_i2v = gr.Image(label="Input Image", type="filepath", sources=["upload", "webcam"])
316
+ i2v_prompt = gr.Textbox(label="Prompt", value="The creature from the image starts to move", lines=3)
317
+ i2v_button = gr.Button("Generate Image-to-Video", variant="primary")
318
+ with gr.Tab("text-to-video") as text_tab:
319
+ image_n_hidden = gr.Textbox(label="image_n", visible=False, value=None)
320
+ video_n_hidden = gr.Textbox(label="video_n", visible=False, value=None)
321
+ t2v_prompt = gr.Textbox(label="Prompt", value="A majestic dragon flying over a medieval castle", lines=3)
322
+ t2v_button = gr.Button("Generate Text-to-Video", variant="primary")
323
+ with gr.Tab("video-to-video") as video_tab:
324
+ image_v_hidden = gr.Textbox(label="image_v", visible=False, value=None)
325
+ video_v2v = gr.Video(label="Input Video", sources=["upload", "webcam"])
326
+ frames_to_use = gr.Slider(label="Frames to use from input video", minimum=9, maximum=MAX_NUM_FRAMES, value=9, step=8, info="Number of initial frames to use for conditioning/transformation. Must be N*8+1.")
327
+ v2v_prompt = gr.Textbox(label="Prompt", value="Change the style to cinematic anime", lines=3)
328
+ v2v_button = gr.Button("Generate Video-to-Video", variant="primary")
329
+
330
+ duration_input = gr.Slider(
331
+ label="Duration (seconds)",
332
+ minimum=0.3,
333
+ maximum=8.5,
334
+ value=2,
335
+ step=0.1,
336
+ info=f"Target video duration (0.3s to 8.5s)"
337
+ )
338
  improve_texture = gr.Checkbox(label="Improve Texture (multi-scale)", value=True, info="Uses a two-pass generation for better quality, but is slower. Recommended for final output.")
339
 
340
  with gr.Column():
341
  output_video = gr.Video(label="Generated Video", interactive=False)
 
342
 
343
  with gr.Accordion("Advanced settings", open=False):
344
  negative_prompt_input = gr.Textbox(label="Negative Prompt", value="worst quality, inconsistent motion, blurry, jittery, distorted", lines=2)
 
347
  randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=False)
348
  with gr.Row():
349
  guidance_scale_input = gr.Slider(label="Guidance Scale (CFG)", minimum=1.0, maximum=10.0, value=PIPELINE_CONFIG_YAML.get("first_pass", {}).get("guidance_scale", 1.0), step=0.1, info="Controls how much the prompt influences the output. Higher values = stronger influence.")
350
+ default_steps = len(PIPELINE_CONFIG_YAML.get("first_pass", {}).get("timesteps", [1]*7)) # Default to 7 if not found
351
  steps_input = gr.Slider(label="Inference Steps (for first pass if multi-scale)", minimum=1, maximum=30, value=default_steps, step=1, info="Number of denoising steps. More steps can improve quality but increase time. If YAML defines 'timesteps' for a pass, this UI value is ignored for that pass.")
 
 
352
  with gr.Row():
353
  height_input = gr.Slider(label="Height", value=512, step=32, minimum=256, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
354
  width_input = gr.Slider(label="Width", value=704, step=32, minimum=256, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
355
 
356
+ # --- UPDATED INPUT LISTS ---
357
  t2v_inputs = [t2v_prompt, negative_prompt_input, image_n_hidden, video_n_hidden,
358
  height_input, width_input, gr.State("text-to-video"),
359
+ steps_input, duration_input, gr.State(0), # Replaced num_frames_input with duration_input
360
  seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
361
 
362
  i2v_inputs = [i2v_prompt, negative_prompt_input, image_i2v, video_i_hidden,
363
  height_input, width_input, gr.State("image-to-video"),
364
+ steps_input, duration_input, gr.State(0), # Replaced num_frames_input with duration_input
365
  seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
366
 
367
  v2v_inputs = [v2v_prompt, negative_prompt_input, image_v_hidden, video_v2v,
368
  height_input, width_input, gr.State("video-to-video"),
369
+ steps_input, duration_input, frames_to_use, # Replaced num_frames_input with duration_input
370
  seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
371
 
372
  t2v_button.click(fn=generate, inputs=t2v_inputs, outputs=[output_video], api_name="text_to_video")