Spaces:
Running
on
Zero
Running
on
Zero
revise ui from Lightricks/ltx-video-distilled
Browse files
app.py
CHANGED
@@ -161,25 +161,28 @@ function refresh() {
|
|
161 |
with gr.Blocks(css=css, theme=gr.themes.Ocean()) as demo:
|
162 |
|
163 |
gr.Markdown("# LTX Video 0.9.7 Distilled")
|
|
|
|
|
164 |
with gr.Row():
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
|
|
183 |
label="Video Duration (seconds)",
|
184 |
minimum=0.3,
|
185 |
maximum=8.5,
|
@@ -187,9 +190,11 @@ with gr.Blocks(css=css, theme=gr.themes.Ocean()) as demo:
|
|
187 |
step=0.1,
|
188 |
info=f"Target video duration (0.3s to 8.5s)"
|
189 |
)
|
|
|
190 |
|
191 |
-
|
192 |
-
|
|
|
193 |
|
194 |
|
195 |
with gr.Accordion("Advanced settings", open=False):
|
|
|
161 |
with gr.Blocks(css=css, theme=gr.themes.Ocean()) as demo:
|
162 |
|
163 |
gr.Markdown("# LTX Video 0.9.7 Distilled")
|
164 |
+
gr.Markdown("Fast high quality video generation. [Model](https://huggingface.co/Lightricks/LTX-Video/blob/main/ltxv-13b-0.9.7-distilled.safetensors) [GitHub](https://github.com/Lightricks/LTX-Video) [Diffusers](#)")
|
165 |
+
|
166 |
with gr.Row():
|
167 |
+
with gr.Column():
|
168 |
+
with gr.Tab("image-to-video") as image_tab:
|
169 |
+
video_i_hidden = gr.Textbox(label="video_i", visible=False, value=None)
|
170 |
+
image_i2v = gr.Image(label="Input Image", type="filepath", sources=["upload", "webcam", "clipboard"])
|
171 |
+
i2v_prompt = gr.Textbox(label="Prompt", value="The creature from the image starts to move", lines=3)
|
172 |
+
i2v_button = gr.Button("Generate Image-to-Video", variant="primary")
|
173 |
+
with gr.Tab("text-to-video") as text_tab:
|
174 |
+
image_n_hidden = gr.Textbox(label="image_n", visible=False, value=None)
|
175 |
+
video_n_hidden = gr.Textbox(label="video_n", visible=False, value=None)
|
176 |
+
t2v_prompt = gr.Textbox(label="Prompt", value="A majestic dragon flying over a medieval castle", lines=3)
|
177 |
+
t2v_button = gr.Button("Generate Text-to-Video", variant="primary")
|
178 |
+
with gr.Tab("video-to-video", visible=False) as video_tab:
|
179 |
+
image_v_hidden = gr.Textbox(label="image_v", visible=False, value=None)
|
180 |
+
video_v2v = gr.Video(label="Input Video", sources=["upload", "webcam"]) # type defaults to filepath
|
181 |
+
frames_to_use = gr.Slider(label="Frames to use from input video", minimum=9, maximum=MAX_NUM_FRAMES, value=9, step=8, info="Number of initial frames to use for conditioning/transformation. Must be N*8+1.")
|
182 |
+
v2v_prompt = gr.Textbox(label="Prompt", value="Change the style to cinematic anime", lines=3)
|
183 |
+
v2v_button = gr.Button("Generate Video-to-Video", variant="primary")
|
184 |
+
|
185 |
+
duration_input = gr.Slider(
|
186 |
label="Video Duration (seconds)",
|
187 |
minimum=0.3,
|
188 |
maximum=8.5,
|
|
|
190 |
step=0.1,
|
191 |
info=f"Target video duration (0.3s to 8.5s)"
|
192 |
)
|
193 |
+
improve_texture = gr.Checkbox(label="Improve Texture (multi-scale)", value=True, info="Uses a two-pass generation for better quality, but is slower. Recommended for final output.")
|
194 |
|
195 |
+
with gr.Column():
|
196 |
+
output_video = gr.Video(label="Generated Video", interactive=False)
|
197 |
+
gr.DeepLinkButton()
|
198 |
|
199 |
|
200 |
with gr.Accordion("Advanced settings", open=False):
|