Spaces:
Running
Running
Upload 5 files
Browse files- README.md +11 -18
- app.py +484 -834
- app_endframe.py +892 -892
- app_v2v.py +746 -746
- requirements.txt +21 -46
README.md
CHANGED
@@ -1,21 +1,14 @@
|
|
1 |
---
|
2 |
-
title:
|
|
|
|
|
|
|
3 |
sdk: gradio
|
4 |
-
|
5 |
-
sdk_version: 4.38.1
|
6 |
app_file: app.py
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
- Image-to-Image
|
14 |
-
- Image-2-Image
|
15 |
-
- Img-to-Img
|
16 |
-
- Img-2-Img
|
17 |
-
- language models
|
18 |
-
- LLMs
|
19 |
-
short_description: Restore blurred or small images with prompt
|
20 |
-
suggested_hardware: zero-a10g
|
21 |
-
---
|
|
|
1 |
---
|
2 |
+
title: FramePack F1 + V2V + EF
|
3 |
+
emoji: 👽
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: gray
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.29.0
|
|
|
8 |
app_file: app.py
|
9 |
+
pinned: true
|
10 |
+
license: apache-2.0
|
11 |
+
short_description: fast video generation from images & text
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -1,864 +1,514 @@
|
|
|
|
|
|
1 |
import os
|
|
|
|
|
|
|
2 |
import gradio as gr
|
3 |
-
import argparse
|
4 |
-
import numpy as np
|
5 |
import torch
|
|
|
6 |
import einops
|
7 |
-
import
|
|
|
8 |
import math
|
9 |
-
import time
|
10 |
-
import random
|
11 |
import spaces
|
12 |
-
import re
|
13 |
-
import uuid
|
14 |
|
15 |
-
from gradio_imageslider import ImageSlider
|
16 |
from PIL import Image
|
17 |
-
from
|
18 |
-
from
|
19 |
-
from
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
try:
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
if
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
rotation,
|
159 |
-
denoise_image,
|
160 |
-
prompt,
|
161 |
-
a_prompt,
|
162 |
-
n_prompt,
|
163 |
-
num_samples,
|
164 |
-
min_size,
|
165 |
-
downscale,
|
166 |
-
upscale,
|
167 |
-
edm_steps,
|
168 |
-
s_stage1,
|
169 |
-
s_stage2,
|
170 |
-
s_cfg,
|
171 |
-
randomize_seed,
|
172 |
-
seed,
|
173 |
-
s_churn,
|
174 |
-
s_noise,
|
175 |
-
color_fix_type,
|
176 |
-
diff_dtype,
|
177 |
-
ae_dtype,
|
178 |
-
gamma_correction,
|
179 |
-
linear_CFG,
|
180 |
-
linear_s_stage2,
|
181 |
-
spt_linear_CFG,
|
182 |
-
spt_linear_s_stage2,
|
183 |
-
model_select,
|
184 |
-
output_format,
|
185 |
-
allocation
|
186 |
-
):
|
187 |
-
print("noisy_image:\n" + str(noisy_image))
|
188 |
-
print("denoise_image:\n" + str(denoise_image))
|
189 |
-
print("rotation: " + str(rotation))
|
190 |
-
print("prompt: " + str(prompt))
|
191 |
-
print("a_prompt: " + str(a_prompt))
|
192 |
-
print("n_prompt: " + str(n_prompt))
|
193 |
-
print("num_samples: " + str(num_samples))
|
194 |
-
print("min_size: " + str(min_size))
|
195 |
-
print("downscale: " + str(downscale))
|
196 |
-
print("upscale: " + str(upscale))
|
197 |
-
print("edm_steps: " + str(edm_steps))
|
198 |
-
print("s_stage1: " + str(s_stage1))
|
199 |
-
print("s_stage2: " + str(s_stage2))
|
200 |
-
print("s_cfg: " + str(s_cfg))
|
201 |
-
print("randomize_seed: " + str(randomize_seed))
|
202 |
-
print("seed: " + str(seed))
|
203 |
-
print("s_churn: " + str(s_churn))
|
204 |
-
print("s_noise: " + str(s_noise))
|
205 |
-
print("color_fix_type: " + str(color_fix_type))
|
206 |
-
print("diff_dtype: " + str(diff_dtype))
|
207 |
-
print("ae_dtype: " + str(ae_dtype))
|
208 |
-
print("gamma_correction: " + str(gamma_correction))
|
209 |
-
print("linear_CFG: " + str(linear_CFG))
|
210 |
-
print("linear_s_stage2: " + str(linear_s_stage2))
|
211 |
-
print("spt_linear_CFG: " + str(spt_linear_CFG))
|
212 |
-
print("spt_linear_s_stage2: " + str(spt_linear_s_stage2))
|
213 |
-
print("model_select: " + str(model_select))
|
214 |
-
print("GPU time allocation: " + str(allocation) + " min")
|
215 |
-
print("output_format: " + str(output_format))
|
216 |
-
|
217 |
-
input_format = re.sub(r"^.*\.([^\.]+)$", r"\1", noisy_image)
|
218 |
-
|
219 |
-
if input_format not in ['png', 'webp', 'jpg', 'jpeg', 'gif', 'bmp', 'heic']:
|
220 |
-
gr.Warning('Invalid image format. Please first convert into *.png, *.webp, *.jpg, *.jpeg, *.gif, *.bmp or *.heic.')
|
221 |
-
return None, None, None, None
|
222 |
-
|
223 |
-
if output_format == "input":
|
224 |
-
if noisy_image is None:
|
225 |
-
output_format = "png"
|
226 |
else:
|
227 |
-
|
228 |
-
print("final output_format: " + str(output_format))
|
229 |
|
230 |
-
|
231 |
-
|
232 |
|
233 |
-
|
234 |
-
a_prompt = ""
|
235 |
|
236 |
-
|
237 |
-
n_prompt = ""
|
238 |
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
279 |
|
280 |
-
def
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
311 |
|
312 |
@spaces.GPU(duration=get_duration)
|
313 |
-
def
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
seed,
|
329 |
-
s_churn,
|
330 |
-
s_noise,
|
331 |
-
color_fix_type,
|
332 |
-
diff_dtype,
|
333 |
-
ae_dtype,
|
334 |
-
gamma_correction,
|
335 |
-
linear_CFG,
|
336 |
-
linear_s_stage2,
|
337 |
-
spt_linear_CFG,
|
338 |
-
spt_linear_s_stage2,
|
339 |
-
model_select,
|
340 |
-
output_format,
|
341 |
-
allocation
|
342 |
-
):
|
343 |
-
start = time.time()
|
344 |
-
print('restore ==>>')
|
345 |
-
|
346 |
-
torch.cuda.set_device(SUPIR_device)
|
347 |
-
|
348 |
-
with torch.no_grad():
|
349 |
-
input_image = upscale_image(input_image, upscale, unit_resolution=32, min_size=min_size)
|
350 |
-
LQ = np.array(input_image) / 255.0
|
351 |
-
LQ = np.power(LQ, gamma_correction)
|
352 |
-
LQ *= 255.0
|
353 |
-
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
354 |
-
LQ = LQ / 255 * 2 - 1
|
355 |
-
LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
|
356 |
-
captions = ['']
|
357 |
-
|
358 |
-
samples = model.batchify_sample(LQ, captions, num_steps=edm_steps, restoration_scale=s_stage1, s_churn=s_churn,
|
359 |
-
s_noise=s_noise, cfg_scale=s_cfg, control_scale=s_stage2, seed=seed,
|
360 |
-
num_samples=num_samples, p_p=a_prompt, n_p=n_prompt, color_fix_type=color_fix_type,
|
361 |
-
use_linear_CFG=linear_CFG, use_linear_control_scale=linear_s_stage2,
|
362 |
-
cfg_scale_start=spt_linear_CFG, control_scale_start=spt_linear_s_stage2)
|
363 |
-
|
364 |
-
x_samples = (einops.rearrange(samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().round().clip(
|
365 |
-
0, 255).astype(np.uint8)
|
366 |
-
results = [x_samples[i] for i in range(num_samples)]
|
367 |
-
torch.cuda.empty_cache()
|
368 |
-
|
369 |
-
# All the results have the same size
|
370 |
-
input_height, input_width, input_channel = np.array(input_image).shape
|
371 |
-
result_height, result_width, result_channel = np.array(results[0]).shape
|
372 |
-
|
373 |
-
print('<<== restore')
|
374 |
-
end = time.time()
|
375 |
-
secondes = int(end - start)
|
376 |
-
minutes = math.floor(secondes / 60)
|
377 |
-
secondes = secondes - (minutes * 60)
|
378 |
-
hours = math.floor(minutes / 60)
|
379 |
-
minutes = minutes - (hours * 60)
|
380 |
-
information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
|
381 |
-
"If you don't get the image you wanted, add more details in the « Image description ». " + \
|
382 |
-
"Wait " + str(allocation) + " min before a new run to avoid quota penalty or use another computer. " + \
|
383 |
-
"The image" + (" has" if len(results) == 1 else "s have") + " been generated in " + \
|
384 |
-
((str(hours) + " h, ") if hours != 0 else "") + \
|
385 |
-
((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
|
386 |
-
str(secondes) + " sec. " + \
|
387 |
-
"The new image resolution is " + str(result_width) + \
|
388 |
-
" pixels large and " + str(result_height) + \
|
389 |
-
" pixels high, so a resolution of " + f'{result_width * result_height:,}' + " pixels."
|
390 |
-
print(information)
|
391 |
-
try:
|
392 |
-
print("Initial resolution: " + f'{input_width * input_height:,}')
|
393 |
-
print("Final resolution: " + f'{result_width * result_height:,}')
|
394 |
-
print("edm_steps: " + str(edm_steps))
|
395 |
-
print("num_samples: " + str(num_samples))
|
396 |
-
print("downscale: " + str(downscale))
|
397 |
-
print("Estimated minutes: " + f'{(((result_width * result_height**(1/1.75)) * input_width * input_height * (edm_steps**(1/2)) * (num_samples**(1/2.5)))**(1/2.5)) / 25000:,}')
|
398 |
-
except Exception as e:
|
399 |
-
print('Exception of Estimation')
|
400 |
-
|
401 |
-
# Only one image can be shown in the slider
|
402 |
-
return [noisy_image] + [results[0]], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = results), gr.update(value = information, visible = True), gr.update(visible=True)
|
403 |
-
|
404 |
-
def load_and_reset(param_setting):
|
405 |
-
print('load_and_reset ==>>')
|
406 |
-
if torch.cuda.device_count() == 0:
|
407 |
-
gr.Warning('Set this space to GPU config to make it work.')
|
408 |
-
return None, None, None, None, None, None, None, None, None, None, None, None, None, None
|
409 |
-
edm_steps = default_setting.edm_steps
|
410 |
-
s_stage2 = 1.0
|
411 |
-
s_stage1 = -1.0
|
412 |
-
s_churn = 5
|
413 |
-
s_noise = 1.003
|
414 |
-
a_prompt = 'Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - ' \
|
415 |
-
'realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore ' \
|
416 |
-
'detailing, hyper sharpness, perfect without deformations.'
|
417 |
-
n_prompt = 'painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, ' \
|
418 |
-
'3D render, unreal engine, blurring, dirty, messy, worst quality, low quality, frames, watermark, ' \
|
419 |
-
'signature, jpeg artifacts, deformed, lowres, over-smooth'
|
420 |
-
color_fix_type = 'Wavelet'
|
421 |
-
spt_linear_s_stage2 = 0.0
|
422 |
-
linear_s_stage2 = False
|
423 |
-
linear_CFG = True
|
424 |
-
if param_setting == "Quality":
|
425 |
-
s_cfg = default_setting.s_cfg_Quality
|
426 |
-
spt_linear_CFG = default_setting.spt_linear_CFG_Quality
|
427 |
-
model_select = "v0-Q"
|
428 |
-
elif param_setting == "Fidelity":
|
429 |
-
s_cfg = default_setting.s_cfg_Fidelity
|
430 |
-
spt_linear_CFG = default_setting.spt_linear_CFG_Fidelity
|
431 |
-
model_select = "v0-F"
|
432 |
-
else:
|
433 |
-
raise NotImplementedError
|
434 |
-
gr.Info('The parameters are reset.')
|
435 |
-
print('<<== load_and_reset')
|
436 |
-
return edm_steps, s_cfg, s_stage2, s_stage1, s_churn, s_noise, a_prompt, n_prompt, color_fix_type, linear_CFG, \
|
437 |
-
linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select
|
438 |
-
|
439 |
-
def log_information(result_gallery):
|
440 |
-
print('log_information')
|
441 |
-
if result_gallery is not None:
|
442 |
-
for i, result in enumerate(result_gallery):
|
443 |
-
print(result[0])
|
444 |
-
|
445 |
-
def on_select_result(result_slider, result_gallery, evt: gr.SelectData):
|
446 |
-
print('on_select_result')
|
447 |
-
if result_gallery is not None:
|
448 |
-
for i, result in enumerate(result_gallery):
|
449 |
-
print(result[0])
|
450 |
-
return [result_slider[0], result_gallery[evt.index][0]]
|
451 |
-
|
452 |
-
title_html = """
|
453 |
-
<h1><center>SUPIR</center></h1>
|
454 |
-
<big><center>Upscale your images up to x10 freely, without account, without watermark and download it</center></big>
|
455 |
-
<center><big><big>🤸<big><big><big><big><big><big>🤸</big></big></big></big></big></big></big></big></center>
|
456 |
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
|
|
|
|
|
|
|
|
465 |
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
# Gradio interface
|
485 |
-
with gr.Blocks() as interface:
|
486 |
-
if torch.cuda.device_count() == 0:
|
487 |
-
with gr.Row():
|
488 |
-
gr.HTML("""
|
489 |
-
<p style="background-color: red;"><big><big><big><b>⚠️To use SUPIR, <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true">duplicate this space</a> and set a GPU with 30 GB VRAM.</b>
|
490 |
|
491 |
-
|
492 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
493 |
""")
|
494 |
-
gr.
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
with gr.Column():
|
546 |
-
linear_CFG = gr.Checkbox(label="Linear CFG", value=True)
|
547 |
-
spt_linear_CFG = gr.Slider(label="CFG Start", minimum=1.0,
|
548 |
-
maximum=9.0, value=default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.5)
|
549 |
-
with gr.Column():
|
550 |
-
linear_s_stage2 = gr.Checkbox(label="Linear Restoring Guidance", value=False)
|
551 |
-
spt_linear_s_stage2 = gr.Slider(label="Guidance Start", minimum=0.,
|
552 |
-
maximum=1., value=0., step=0.05)
|
553 |
-
with gr.Column():
|
554 |
-
diff_dtype = gr.Radio([["fp32 (precision)", "fp32"], ["fp16 (medium)", "fp16"], ["bf16 (speed)", "bf16"]], label="Diffusion Data Type", value="fp32",
|
555 |
-
interactive=True)
|
556 |
-
with gr.Column():
|
557 |
-
ae_dtype = gr.Radio([["fp32 (precision)", "fp32"], ["bf16 (speed)", "bf16"]], label="Auto-Encoder Data Type", value="fp32",
|
558 |
-
interactive=True)
|
559 |
-
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
|
560 |
-
seed = gr.Slider(label="Seed", minimum=0, maximum=max_64_bit_int, step=1, randomize=True)
|
561 |
-
with gr.Group():
|
562 |
-
param_setting = gr.Radio(["Quality", "Fidelity"], interactive=True, label="Presetting", value = "Quality")
|
563 |
-
restart_button = gr.Button(value="Apply presetting")
|
564 |
-
|
565 |
-
with gr.Column():
|
566 |
-
diffusion_button = gr.Button(value="🚀 Upscale/Restore", variant = "primary", elem_id = "process_button")
|
567 |
-
reset_btn = gr.Button(value="🧹 Reinit page", variant="stop", elem_id="reset_button", visible = False)
|
568 |
-
|
569 |
-
warning = gr.HTML(value = "<center><big>Your computer must <u>not</u> enter into standby mode.</big><br/>On Chrome, you can force to keep a tab alive in <code>chrome://discards/</code></center>", visible = False)
|
570 |
-
restore_information = gr.HTML(value = "Restart the process to get another result.", visible = False)
|
571 |
-
result_slider = ImageSlider(label = 'Comparator', show_label = False, interactive = False, elem_id = "slider1", show_download_button = False)
|
572 |
-
result_gallery = gr.Gallery(label = 'Downloadable results', show_label = True, interactive = False, elem_id = "gallery1")
|
573 |
|
574 |
gr.Examples(
|
575 |
examples = [
|
576 |
[
|
577 |
-
"./
|
578 |
-
|
579 |
-
|
580 |
-
"
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
1,
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
False,
|
592 |
-
42,
|
593 |
-
5,
|
594 |
-
1.003,
|
595 |
-
"AdaIn",
|
596 |
-
"fp16",
|
597 |
-
"bf16",
|
598 |
-
1.0,
|
599 |
-
True,
|
600 |
-
4,
|
601 |
-
False,
|
602 |
-
0.,
|
603 |
-
"v0-Q",
|
604 |
-
"input",
|
605 |
-
179
|
606 |
-
],
|
607 |
-
[
|
608 |
-
"./Examples/Example2.jpeg",
|
609 |
-
0,
|
610 |
-
None,
|
611 |
-
"La cabeza de un gato atigrado, en una casa, fotorrealista, 8k, extremadamente detallada",
|
612 |
-
"Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
|
613 |
-
"painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
|
614 |
-
1,
|
615 |
-
1024,
|
616 |
-
1,
|
617 |
-
1,
|
618 |
-
200,
|
619 |
-
-1,
|
620 |
-
1,
|
621 |
-
7.5,
|
622 |
-
False,
|
623 |
-
42,
|
624 |
-
5,
|
625 |
-
1.003,
|
626 |
-
"Wavelet",
|
627 |
-
"fp16",
|
628 |
-
"bf16",
|
629 |
-
1.0,
|
630 |
-
True,
|
631 |
-
4,
|
632 |
-
False,
|
633 |
-
0.,
|
634 |
-
"v0-Q",
|
635 |
-
"input",
|
636 |
-
179
|
637 |
-
],
|
638 |
-
[
|
639 |
-
"./Examples/Example3.webp",
|
640 |
-
0,
|
641 |
-
None,
|
642 |
-
"A red apple",
|
643 |
-
"Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
|
644 |
-
"painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
|
645 |
-
1,
|
646 |
-
1024,
|
647 |
-
1,
|
648 |
-
1,
|
649 |
-
200,
|
650 |
-
-1,
|
651 |
-
1,
|
652 |
-
7.5,
|
653 |
-
False,
|
654 |
-
42,
|
655 |
-
5,
|
656 |
-
1.003,
|
657 |
-
"Wavelet",
|
658 |
-
"fp16",
|
659 |
-
"bf16",
|
660 |
-
1.0,
|
661 |
-
True,
|
662 |
-
4,
|
663 |
-
False,
|
664 |
-
0.,
|
665 |
-
"v0-Q",
|
666 |
-
"input",
|
667 |
-
179
|
668 |
-
],
|
669 |
-
[
|
670 |
-
"./Examples/Example3.webp",
|
671 |
-
0,
|
672 |
-
None,
|
673 |
-
"A red marble",
|
674 |
-
"Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
|
675 |
-
"painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
|
676 |
-
1,
|
677 |
-
1024,
|
678 |
-
1,
|
679 |
-
1,
|
680 |
-
200,
|
681 |
-
-1,
|
682 |
-
1,
|
683 |
-
7.5,
|
684 |
-
False,
|
685 |
-
42,
|
686 |
-
5,
|
687 |
-
1.003,
|
688 |
-
"Wavelet",
|
689 |
-
"fp16",
|
690 |
-
"bf16",
|
691 |
-
1.0,
|
692 |
-
True,
|
693 |
-
4,
|
694 |
-
False,
|
695 |
-
0.,
|
696 |
-
"v0-Q",
|
697 |
-
"input",
|
698 |
-
179
|
699 |
],
|
700 |
],
|
701 |
run_on_click = True,
|
702 |
-
fn =
|
703 |
-
inputs =
|
704 |
-
|
705 |
-
|
706 |
-
denoise_image,
|
707 |
-
prompt,
|
708 |
-
a_prompt,
|
709 |
-
n_prompt,
|
710 |
-
num_samples,
|
711 |
-
min_size,
|
712 |
-
downscale,
|
713 |
-
upscale,
|
714 |
-
edm_steps,
|
715 |
-
s_stage1,
|
716 |
-
s_stage2,
|
717 |
-
s_cfg,
|
718 |
-
randomize_seed,
|
719 |
-
seed,
|
720 |
-
s_churn,
|
721 |
-
s_noise,
|
722 |
-
color_fix_type,
|
723 |
-
diff_dtype,
|
724 |
-
ae_dtype,
|
725 |
-
gamma_correction,
|
726 |
-
linear_CFG,
|
727 |
-
linear_s_stage2,
|
728 |
-
spt_linear_CFG,
|
729 |
-
spt_linear_s_stage2,
|
730 |
-
model_select,
|
731 |
-
output_format,
|
732 |
-
allocation
|
733 |
-
],
|
734 |
-
outputs = [
|
735 |
-
result_slider,
|
736 |
-
result_gallery,
|
737 |
-
restore_information,
|
738 |
-
reset_btn
|
739 |
-
],
|
740 |
-
cache_examples = False,
|
741 |
)
|
742 |
|
743 |
-
|
744 |
-
|
745 |
-
|
746 |
-
input_image.upload(fn = check_upload, inputs = [
|
747 |
-
input_image
|
748 |
-
], outputs = [
|
749 |
-
rotation
|
750 |
-
], queue = False, show_progress = False)
|
751 |
-
|
752 |
-
denoise_button.click(fn = check_and_update, inputs = [
|
753 |
-
input_image
|
754 |
-
], outputs = [warning], queue = False, show_progress = False).success(fn = stage1_process, inputs = [
|
755 |
-
input_image,
|
756 |
-
gamma_correction,
|
757 |
-
diff_dtype,
|
758 |
-
ae_dtype
|
759 |
-
], outputs=[
|
760 |
-
denoise_image,
|
761 |
-
denoise_information
|
762 |
-
])
|
763 |
-
|
764 |
-
diffusion_button.click(fn = update_seed, inputs = [
|
765 |
-
randomize_seed,
|
766 |
-
seed
|
767 |
-
], outputs = [
|
768 |
-
seed
|
769 |
-
], queue = False, show_progress = False).then(fn = check_and_update, inputs = [
|
770 |
-
input_image
|
771 |
-
], outputs = [warning], queue = False, show_progress = False).success(fn=stage2_process, inputs = [
|
772 |
-
input_image,
|
773 |
-
rotation,
|
774 |
-
denoise_image,
|
775 |
-
prompt,
|
776 |
-
a_prompt,
|
777 |
-
n_prompt,
|
778 |
-
num_samples,
|
779 |
-
min_size,
|
780 |
-
downscale,
|
781 |
-
upscale,
|
782 |
-
edm_steps,
|
783 |
-
s_stage1,
|
784 |
-
s_stage2,
|
785 |
-
s_cfg,
|
786 |
-
randomize_seed,
|
787 |
-
seed,
|
788 |
-
s_churn,
|
789 |
-
s_noise,
|
790 |
-
color_fix_type,
|
791 |
-
diff_dtype,
|
792 |
-
ae_dtype,
|
793 |
-
gamma_correction,
|
794 |
-
linear_CFG,
|
795 |
-
linear_s_stage2,
|
796 |
-
spt_linear_CFG,
|
797 |
-
spt_linear_s_stage2,
|
798 |
-
model_select,
|
799 |
-
output_format,
|
800 |
-
allocation
|
801 |
-
], outputs = [
|
802 |
-
result_slider,
|
803 |
-
result_gallery,
|
804 |
-
restore_information,
|
805 |
-
reset_btn
|
806 |
-
]).success(fn = log_information, inputs = [
|
807 |
-
result_gallery
|
808 |
-
], outputs = [], queue = False, show_progress = False)
|
809 |
-
|
810 |
-
result_gallery.change(on_select_result, [result_slider, result_gallery], result_slider)
|
811 |
-
result_gallery.select(on_select_result, [result_slider, result_gallery], result_slider)
|
812 |
-
|
813 |
-
restart_button.click(fn = load_and_reset, inputs = [
|
814 |
-
param_setting
|
815 |
-
], outputs = [
|
816 |
-
edm_steps,
|
817 |
-
s_cfg,
|
818 |
-
s_stage2,
|
819 |
-
s_stage1,
|
820 |
-
s_churn,
|
821 |
-
s_noise,
|
822 |
-
a_prompt,
|
823 |
-
n_prompt,
|
824 |
-
color_fix_type,
|
825 |
-
linear_CFG,
|
826 |
-
linear_s_stage2,
|
827 |
-
spt_linear_CFG,
|
828 |
-
spt_linear_s_stage2,
|
829 |
-
model_select
|
830 |
-
])
|
831 |
-
|
832 |
-
reset_btn.click(fn = reset, inputs = [], outputs = [
|
833 |
-
input_image,
|
834 |
-
rotation,
|
835 |
-
denoise_image,
|
836 |
-
prompt,
|
837 |
-
a_prompt,
|
838 |
-
n_prompt,
|
839 |
-
num_samples,
|
840 |
-
min_size,
|
841 |
-
downscale,
|
842 |
-
upscale,
|
843 |
-
edm_steps,
|
844 |
-
s_stage1,
|
845 |
-
s_stage2,
|
846 |
-
s_cfg,
|
847 |
-
randomize_seed,
|
848 |
-
seed,
|
849 |
-
s_churn,
|
850 |
-
s_noise,
|
851 |
-
color_fix_type,
|
852 |
-
diff_dtype,
|
853 |
-
ae_dtype,
|
854 |
-
gamma_correction,
|
855 |
-
linear_CFG,
|
856 |
-
linear_s_stage2,
|
857 |
-
spt_linear_CFG,
|
858 |
-
spt_linear_s_stage2,
|
859 |
-
model_select,
|
860 |
-
output_format,
|
861 |
-
allocation
|
862 |
-
], queue = False, show_progress = False)
|
863 |
-
|
864 |
-
interface.queue(10).launch()
|
|
|
1 |
+
from diffusers_helper.hf_login import login
|
2 |
+
|
3 |
import os
|
4 |
+
|
5 |
+
os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
|
6 |
+
|
7 |
import gradio as gr
|
|
|
|
|
8 |
import torch
|
9 |
+
import traceback
|
10 |
import einops
|
11 |
+
import safetensors.torch as sf
|
12 |
+
import numpy as np
|
13 |
import math
|
|
|
|
|
14 |
import spaces
|
|
|
|
|
15 |
|
|
|
16 |
from PIL import Image
|
17 |
+
from diffusers import AutoencoderKLHunyuanVideo
|
18 |
+
from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
|
19 |
+
from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
|
20 |
+
from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
|
21 |
+
from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
|
22 |
+
from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
|
23 |
+
from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
|
24 |
+
from diffusers_helper.thread_utils import AsyncStream, async_run
|
25 |
+
from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
|
26 |
+
from transformers import SiglipImageProcessor, SiglipVisionModel
|
27 |
+
from diffusers_helper.clip_vision import hf_clip_vision_encode
|
28 |
+
from diffusers_helper.bucket_tools import find_nearest_bucket
|
29 |
+
|
30 |
+
|
31 |
+
free_mem_gb = get_cuda_free_memory_gb(gpu)
|
32 |
+
high_vram = free_mem_gb > 80
|
33 |
+
|
34 |
+
print(f'Free VRAM {free_mem_gb} GB')
|
35 |
+
print(f'High-VRAM Mode: {high_vram}')
|
36 |
+
|
37 |
+
text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
|
38 |
+
text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
|
39 |
+
tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
|
40 |
+
tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2')
|
41 |
+
vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
|
42 |
+
|
43 |
+
feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
|
44 |
+
image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
|
45 |
+
|
46 |
+
# quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True)
|
47 |
+
# transformer = HunyuanVideoTransformer3DModelPacked.from_single_file("https://huggingface.co/sirolim/FramePack_F1_I2V_FP8/resolve/main/FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", torch_dtype=torch.bfloat16)
|
48 |
+
# transformer = HunyuanVideoTransformer3DModelPacked.from_single_file('sirolim/FramePack_F1_I2V_FP8', "FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", use_safetensors=True, torch_dtype=torch.bfloat16).cpu()
|
49 |
+
transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu()
|
50 |
+
|
51 |
+
vae.eval()
|
52 |
+
text_encoder.eval()
|
53 |
+
text_encoder_2.eval()
|
54 |
+
image_encoder.eval()
|
55 |
+
transformer.eval()
|
56 |
+
|
57 |
+
if not high_vram:
|
58 |
+
vae.enable_slicing()
|
59 |
+
vae.enable_tiling()
|
60 |
+
|
61 |
+
transformer.high_quality_fp32_output_for_inference = True
|
62 |
+
print('transformer.high_quality_fp32_output_for_inference = True')
|
63 |
+
|
64 |
+
transformer.to(dtype=torch.bfloat16)
|
65 |
+
vae.to(dtype=torch.float16)
|
66 |
+
image_encoder.to(dtype=torch.float16)
|
67 |
+
text_encoder.to(dtype=torch.float16)
|
68 |
+
text_encoder_2.to(dtype=torch.float16)
|
69 |
+
|
70 |
+
vae.requires_grad_(False)
|
71 |
+
text_encoder.requires_grad_(False)
|
72 |
+
text_encoder_2.requires_grad_(False)
|
73 |
+
image_encoder.requires_grad_(False)
|
74 |
+
transformer.requires_grad_(False)
|
75 |
+
|
76 |
+
if not high_vram:
|
77 |
+
# DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
|
78 |
+
DynamicSwapInstaller.install_model(transformer, device=gpu)
|
79 |
+
DynamicSwapInstaller.install_model(text_encoder, device=gpu)
|
80 |
+
else:
|
81 |
+
text_encoder.to(gpu)
|
82 |
+
text_encoder_2.to(gpu)
|
83 |
+
image_encoder.to(gpu)
|
84 |
+
vae.to(gpu)
|
85 |
+
transformer.to(gpu)
|
86 |
+
|
87 |
+
stream = AsyncStream()
|
88 |
+
|
89 |
+
outputs_folder = './outputs/'
|
90 |
+
os.makedirs(outputs_folder, exist_ok=True)
|
91 |
+
|
92 |
+
examples = [
|
93 |
+
["img_examples/1.png", "The girl dances gracefully, with clear movements, full of charm.",],
|
94 |
+
["img_examples/2.jpg", "The man dances flamboyantly, swinging his hips and striking bold poses with dramatic flair."],
|
95 |
+
["img_examples/3.png", "The woman dances elegantly among the blossoms, spinning slowly with flowing sleeves and graceful hand movements."],
|
96 |
+
]
|
97 |
+
|
98 |
+
def generate_examples(input_image, prompt):
|
99 |
+
|
100 |
+
t2v=False
|
101 |
+
n_prompt=""
|
102 |
+
seed=31337
|
103 |
+
total_second_length=5
|
104 |
+
latent_window_size=9
|
105 |
+
steps=25
|
106 |
+
cfg=1.0
|
107 |
+
gs=10.0
|
108 |
+
rs=0.0
|
109 |
+
gpu_memory_preservation=6
|
110 |
+
use_teacache=True
|
111 |
+
mp4_crf=16
|
112 |
+
|
113 |
+
global stream
|
114 |
+
|
115 |
+
# assert input_image is not None, 'No input image!'
|
116 |
+
if t2v:
|
117 |
+
default_height, default_width = 640, 640
|
118 |
+
input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
|
119 |
+
print("No input image provided. Using a blank white image.")
|
120 |
+
|
121 |
+
yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
|
122 |
+
|
123 |
+
stream = AsyncStream()
|
124 |
+
|
125 |
+
async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
|
126 |
+
|
127 |
+
output_filename = None
|
128 |
+
|
129 |
+
while True:
|
130 |
+
flag, data = stream.output_queue.next()
|
131 |
+
|
132 |
+
if flag == 'file':
|
133 |
+
output_filename = data
|
134 |
+
yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
|
135 |
+
|
136 |
+
if flag == 'progress':
|
137 |
+
preview, desc, html = data
|
138 |
+
yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
|
139 |
+
|
140 |
+
if flag == 'end':
|
141 |
+
yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
|
142 |
+
break
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
@torch.no_grad()
|
147 |
+
def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
|
148 |
+
total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
|
149 |
+
total_latent_sections = int(max(round(total_latent_sections), 1))
|
150 |
+
|
151 |
+
job_id = generate_timestamp()
|
152 |
+
|
153 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
|
154 |
+
|
155 |
try:
|
156 |
+
# Clean GPU
|
157 |
+
if not high_vram:
|
158 |
+
unload_complete_models(
|
159 |
+
text_encoder, text_encoder_2, image_encoder, vae, transformer
|
160 |
+
)
|
161 |
+
|
162 |
+
# Text encoding
|
163 |
+
|
164 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
|
165 |
+
|
166 |
+
if not high_vram:
|
167 |
+
fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
|
168 |
+
load_model_as_complete(text_encoder_2, target_device=gpu)
|
169 |
+
|
170 |
+
llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
171 |
+
|
172 |
+
if cfg == 1:
|
173 |
+
llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
else:
|
175 |
+
llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
|
|
176 |
|
177 |
+
llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
|
178 |
+
llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
|
179 |
|
180 |
+
# Processing input image
|
|
|
181 |
|
182 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Image processing ...'))))
|
|
|
183 |
|
184 |
+
H, W, C = input_image.shape
|
185 |
+
height, width = find_nearest_bucket(H, W, resolution=640)
|
186 |
+
input_image_np = resize_and_center_crop(input_image, target_width=width, target_height=height)
|
187 |
+
|
188 |
+
Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
|
189 |
+
|
190 |
+
input_image_pt = torch.from_numpy(input_image_np).float() / 127.5 - 1
|
191 |
+
input_image_pt = input_image_pt.permute(2, 0, 1)[None, :, None]
|
192 |
+
|
193 |
+
# VAE encoding
|
194 |
+
|
195 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'VAE encoding ...'))))
|
196 |
+
|
197 |
+
if not high_vram:
|
198 |
+
load_model_as_complete(vae, target_device=gpu)
|
199 |
+
|
200 |
+
start_latent = vae_encode(input_image_pt, vae)
|
201 |
+
|
202 |
+
# CLIP Vision
|
203 |
+
|
204 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
|
205 |
+
|
206 |
+
if not high_vram:
|
207 |
+
load_model_as_complete(image_encoder, target_device=gpu)
|
208 |
+
|
209 |
+
image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
|
210 |
+
image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
|
211 |
+
|
212 |
+
# Dtype
|
213 |
+
|
214 |
+
llama_vec = llama_vec.to(transformer.dtype)
|
215 |
+
llama_vec_n = llama_vec_n.to(transformer.dtype)
|
216 |
+
clip_l_pooler = clip_l_pooler.to(transformer.dtype)
|
217 |
+
clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
|
218 |
+
image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
|
219 |
+
|
220 |
+
# Sampling
|
221 |
+
|
222 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
|
223 |
+
|
224 |
+
rnd = torch.Generator("cpu").manual_seed(seed)
|
225 |
+
|
226 |
+
history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu()
|
227 |
+
history_pixels = None
|
228 |
+
|
229 |
+
history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)
|
230 |
+
total_generated_latent_frames = 1
|
231 |
+
|
232 |
+
for section_index in range(total_latent_sections):
|
233 |
+
if stream.input_queue.top() == 'end':
|
234 |
+
stream.output_queue.push(('end', None))
|
235 |
+
return
|
236 |
+
|
237 |
+
print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
|
238 |
+
|
239 |
+
if not high_vram:
|
240 |
+
unload_complete_models()
|
241 |
+
move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
|
242 |
+
|
243 |
+
if use_teacache:
|
244 |
+
transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
|
245 |
+
else:
|
246 |
+
transformer.initialize_teacache(enable_teacache=False)
|
247 |
|
248 |
+
def callback(d):
|
249 |
+
preview = d['denoised']
|
250 |
+
preview = vae_decode_fake(preview)
|
251 |
+
|
252 |
+
preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
|
253 |
+
preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
|
254 |
+
|
255 |
+
if stream.input_queue.top() == 'end':
|
256 |
+
stream.output_queue.push(('end', None))
|
257 |
+
raise KeyboardInterrupt('User ends the task.')
|
258 |
+
|
259 |
+
current_step = d['i'] + 1
|
260 |
+
percentage = int(100.0 * current_step / steps)
|
261 |
+
hint = f'Sampling {current_step}/{steps}'
|
262 |
+
desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'
|
263 |
+
stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
|
264 |
+
return
|
265 |
+
|
266 |
+
indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
|
267 |
+
clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
|
268 |
+
clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
|
269 |
+
|
270 |
+
clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]):, :, :].split([16, 2, 1], dim=2)
|
271 |
+
clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
|
272 |
+
|
273 |
+
generated_latents = sample_hunyuan(
|
274 |
+
transformer=transformer,
|
275 |
+
sampler='unipc',
|
276 |
+
width=width,
|
277 |
+
height=height,
|
278 |
+
frames=latent_window_size * 4 - 3,
|
279 |
+
real_guidance_scale=cfg,
|
280 |
+
distilled_guidance_scale=gs,
|
281 |
+
guidance_rescale=rs,
|
282 |
+
# shift=3.0,
|
283 |
+
num_inference_steps=steps,
|
284 |
+
generator=rnd,
|
285 |
+
prompt_embeds=llama_vec,
|
286 |
+
prompt_embeds_mask=llama_attention_mask,
|
287 |
+
prompt_poolers=clip_l_pooler,
|
288 |
+
negative_prompt_embeds=llama_vec_n,
|
289 |
+
negative_prompt_embeds_mask=llama_attention_mask_n,
|
290 |
+
negative_prompt_poolers=clip_l_pooler_n,
|
291 |
+
device=gpu,
|
292 |
+
dtype=torch.bfloat16,
|
293 |
+
image_embeddings=image_encoder_last_hidden_state,
|
294 |
+
latent_indices=latent_indices,
|
295 |
+
clean_latents=clean_latents,
|
296 |
+
clean_latent_indices=clean_latent_indices,
|
297 |
+
clean_latents_2x=clean_latents_2x,
|
298 |
+
clean_latent_2x_indices=clean_latent_2x_indices,
|
299 |
+
clean_latents_4x=clean_latents_4x,
|
300 |
+
clean_latent_4x_indices=clean_latent_4x_indices,
|
301 |
+
callback=callback,
|
302 |
+
)
|
303 |
+
|
304 |
+
total_generated_latent_frames += int(generated_latents.shape[2])
|
305 |
+
history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
|
306 |
+
|
307 |
+
if not high_vram:
|
308 |
+
offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
|
309 |
+
load_model_as_complete(vae, target_device=gpu)
|
310 |
+
|
311 |
+
real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
|
312 |
+
|
313 |
+
if history_pixels is None:
|
314 |
+
history_pixels = vae_decode(real_history_latents, vae).cpu()
|
315 |
+
else:
|
316 |
+
section_latent_frames = latent_window_size * 2
|
317 |
+
overlapped_frames = latent_window_size * 4 - 3
|
318 |
+
|
319 |
+
current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
|
320 |
+
history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
|
321 |
+
|
322 |
+
if not high_vram:
|
323 |
+
unload_complete_models()
|
324 |
+
|
325 |
+
output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
|
326 |
+
|
327 |
+
save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
|
328 |
+
|
329 |
+
print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
|
330 |
+
|
331 |
+
stream.output_queue.push(('file', output_filename))
|
332 |
+
except:
|
333 |
+
traceback.print_exc()
|
334 |
+
|
335 |
+
if not high_vram:
|
336 |
+
unload_complete_models(
|
337 |
+
text_encoder, text_encoder_2, image_encoder, vae, transformer
|
338 |
+
)
|
339 |
+
|
340 |
+
stream.output_queue.push(('end', None))
|
341 |
+
return
|
342 |
+
|
343 |
+
def get_duration(input_image, prompt, t2v, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
|
344 |
+
return total_second_length * 60
|
345 |
|
346 |
@spaces.GPU(duration=get_duration)
|
347 |
+
def process(input_image, prompt,
|
348 |
+
t2v=False,
|
349 |
+
n_prompt="",
|
350 |
+
seed=31337,
|
351 |
+
total_second_length=5,
|
352 |
+
latent_window_size=9,
|
353 |
+
steps=25,
|
354 |
+
cfg=1.0,
|
355 |
+
gs=10.0,
|
356 |
+
rs=0.0,
|
357 |
+
gpu_memory_preservation=6,
|
358 |
+
use_teacache=True,
|
359 |
+
mp4_crf=16
|
360 |
+
):
|
361 |
+
global stream
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
362 |
|
363 |
+
# assert input_image is not None, 'No input image!'
|
364 |
+
if t2v:
|
365 |
+
default_height, default_width = 640, 640
|
366 |
+
input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
|
367 |
+
print("No input image provided. Using a blank white image.")
|
368 |
+
else:
|
369 |
+
composite_rgba_uint8 = input_image["composite"]
|
370 |
+
|
371 |
+
# rgb_uint8 will be (H, W, 3), dtype uint8
|
372 |
+
rgb_uint8 = composite_rgba_uint8[:, :, :3]
|
373 |
+
# mask_uint8 will be (H, W), dtype uint8
|
374 |
+
mask_uint8 = composite_rgba_uint8[:, :, 3]
|
375 |
|
376 |
+
# Create background
|
377 |
+
h, w = rgb_uint8.shape[:2]
|
378 |
+
# White background, (H, W, 3), dtype uint8
|
379 |
+
background_uint8 = np.full((h, w, 3), 255, dtype=np.uint8)
|
380 |
+
|
381 |
+
# Normalize mask to range [0.0, 1.0].
|
382 |
+
alpha_normalized_float32 = mask_uint8.astype(np.float32) / 255.0
|
383 |
+
|
384 |
+
# Expand alpha to 3 channels to match RGB images for broadcasting.
|
385 |
+
# alpha_mask_float32 will have shape (H, W, 3)
|
386 |
+
alpha_mask_float32 = np.stack([alpha_normalized_float32] * 3, axis=2)
|
387 |
+
|
388 |
+
# alpha blending
|
389 |
+
blended_image_float32 = rgb_uint8.astype(np.float32) * alpha_mask_float32 + \
|
390 |
+
background_uint8.astype(np.float32) * (1.0 - alpha_mask_float32)
|
391 |
+
|
392 |
+
input_image = np.clip(blended_image_float32, 0, 255).astype(np.uint8)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
393 |
|
394 |
+
yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
|
395 |
+
|
396 |
+
stream = AsyncStream()
|
397 |
+
|
398 |
+
async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
|
399 |
+
|
400 |
+
output_filename = None
|
401 |
+
|
402 |
+
while True:
|
403 |
+
flag, data = stream.output_queue.next()
|
404 |
+
|
405 |
+
if flag == 'file':
|
406 |
+
output_filename = data
|
407 |
+
yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
|
408 |
+
|
409 |
+
if flag == 'progress':
|
410 |
+
preview, desc, html = data
|
411 |
+
yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
|
412 |
+
|
413 |
+
if flag == 'end':
|
414 |
+
yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
|
415 |
+
break
|
416 |
+
|
417 |
+
|
418 |
+
def end_process():
|
419 |
+
stream.input_queue.push('end')
|
420 |
+
|
421 |
+
|
422 |
+
quick_prompts = [
|
423 |
+
'The girl dances gracefully, with clear movements, full of charm.',
|
424 |
+
'A character doing some simple body movements.',
|
425 |
+
]
|
426 |
+
quick_prompts = [[x] for x in quick_prompts]
|
427 |
+
|
428 |
+
|
429 |
+
css = make_progress_bar_css()
|
430 |
+
block = gr.Blocks(css=css).queue()
|
431 |
+
with block:
|
432 |
+
gr.Markdown('# FramePack Essentials | Experimentation in Progress')
|
433 |
+
gr.Markdown(f"""### Space is constantly being tinkered with, expect downtime and errors.
|
434 |
""")
|
435 |
+
with gr.Row():
|
436 |
+
with gr.Column():
|
437 |
+
input_image = gr.ImageEditor(type="numpy", label="Image", height=320, brush=gr.Brush(colors=["#ffffff"]))
|
438 |
+
prompt = gr.Textbox(label="Prompt", value='')
|
439 |
+
t2v = gr.Checkbox(label="do text-to-video", value=False)
|
440 |
+
example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Quick List', samples_per_page=1000, components=[prompt])
|
441 |
+
example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
|
442 |
+
|
443 |
+
with gr.Row():
|
444 |
+
start_button = gr.Button(value="Start Generation")
|
445 |
+
end_button = gr.Button(value="End Generation", interactive=False)
|
446 |
+
|
447 |
+
total_second_length = gr.Slider(label="Total Video Length (Seconds)", minimum=1, maximum=5, value=2, step=0.1)
|
448 |
+
with gr.Group():
|
449 |
+
with gr.Accordion("Advanced settings", open=False):
|
450 |
+
use_teacache = gr.Checkbox(label='Use TeaCache', value=True, info='Faster speed, but often makes hands and fingers slightly worse.')
|
451 |
+
|
452 |
+
n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=False) # Not used
|
453 |
+
seed = gr.Number(label="Seed", value=31337, precision=0)
|
454 |
+
|
455 |
+
|
456 |
+
latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1, visible=False) # Should not change
|
457 |
+
steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Changing this value is not recommended.')
|
458 |
+
|
459 |
+
cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=False) # Should not change
|
460 |
+
gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01, info='Changing this value is not recommended.')
|
461 |
+
rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
|
462 |
+
|
463 |
+
gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
|
464 |
+
|
465 |
+
mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
|
466 |
+
|
467 |
+
with gr.Column():
|
468 |
+
preview_image = gr.Image(label="Next Latents", height=200, visible=False)
|
469 |
+
result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
|
470 |
+
progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
|
471 |
+
progress_bar = gr.HTML('', elem_classes='no-generating-animation')
|
472 |
+
|
473 |
+
gr.HTML('<div style="text-align:center; margin-top:20px;">Share your results and find ideas at the <a href="https://x.com/search?q=framepack&f=live" target="_blank">FramePack Twitter (X) thread</a></div>')
|
474 |
+
|
475 |
+
ips = [input_image, prompt, t2v, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
|
476 |
+
start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
|
477 |
+
end_button.click(fn=end_process)
|
478 |
+
|
479 |
+
# gr.Examples(
|
480 |
+
# examples,
|
481 |
+
# inputs=[input_image, prompt],
|
482 |
+
# outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
|
483 |
+
# fn=generate_examples,
|
484 |
+
# cache_examples=True
|
485 |
+
# )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
486 |
|
487 |
gr.Examples(
|
488 |
examples = [
|
489 |
[
|
490 |
+
"./img_examples/Example1.png", # input_image
|
491 |
+
"View of the sea as far as the eye can see, from the seaside, a piece of land is barely visible on the horizon at the middle, the sky is radiant, reflections of the sun in the water, photorealistic, realistic, intricate details, 8k, insanely detailed",
|
492 |
+
False, # t2v
|
493 |
+
"", # n_prompt
|
494 |
+
42, # seed
|
495 |
+
1, # total_second_length
|
496 |
+
9, # latent_window_size
|
497 |
+
10, # steps
|
498 |
+
1.0, # cfg
|
499 |
+
3.0, # gs
|
500 |
+
0.0, # rs
|
501 |
+
6, # gpu_memory_preservation
|
502 |
+
False, # use_teacache
|
503 |
+
16 # mp4_crf
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
504 |
],
|
505 |
],
|
506 |
run_on_click = True,
|
507 |
+
fn = process,
|
508 |
+
inputs = ips,
|
509 |
+
outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
|
510 |
+
cache_examples = True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
511 |
)
|
512 |
|
513 |
+
|
514 |
+
block.launch(ssr_mode=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_endframe.py
CHANGED
@@ -1,893 +1,893 @@
|
|
1 |
-
from diffusers_helper.hf_login import login
|
2 |
-
|
3 |
-
import os
|
4 |
-
|
5 |
-
os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
|
6 |
-
|
7 |
-
import gradio as gr
|
8 |
-
import torch
|
9 |
-
import traceback
|
10 |
-
import einops
|
11 |
-
import safetensors.torch as sf
|
12 |
-
import numpy as np
|
13 |
-
import argparse
|
14 |
-
import math
|
15 |
-
# 20250506 pftq: Added for video input loading
|
16 |
-
import decord
|
17 |
-
# 20250506 pftq: Added for progress bars in video_encode
|
18 |
-
from tqdm import tqdm
|
19 |
-
# 20250506 pftq: Normalize file paths for Windows compatibility
|
20 |
-
import pathlib
|
21 |
-
# 20250506 pftq: for easier to read timestamp
|
22 |
-
from datetime import datetime
|
23 |
-
# 20250508 pftq: for saving prompt to mp4 comments metadata
|
24 |
-
import imageio_ffmpeg
|
25 |
-
import tempfile
|
26 |
-
import shutil
|
27 |
-
import subprocess
|
28 |
-
import spaces
|
29 |
-
from PIL import Image
|
30 |
-
from diffusers import AutoencoderKLHunyuanVideo
|
31 |
-
from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
|
32 |
-
from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
|
33 |
-
from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
|
34 |
-
from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
|
35 |
-
from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
|
36 |
-
from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
|
37 |
-
from diffusers_helper.thread_utils import AsyncStream, async_run
|
38 |
-
from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
|
39 |
-
from transformers import SiglipImageProcessor, SiglipVisionModel
|
40 |
-
from diffusers_helper.clip_vision import hf_clip_vision_encode
|
41 |
-
from diffusers_helper.bucket_tools import find_nearest_bucket
|
42 |
-
|
43 |
-
parser = argparse.ArgumentParser()
|
44 |
-
parser.add_argument('--share', action='store_true')
|
45 |
-
parser.add_argument("--server", type=str, default='0.0.0.0')
|
46 |
-
parser.add_argument("--port", type=int, required=False)
|
47 |
-
parser.add_argument("--inbrowser", action='store_true')
|
48 |
-
args = parser.parse_args()
|
49 |
-
|
50 |
-
print(args)
|
51 |
-
|
52 |
-
free_mem_gb = get_cuda_free_memory_gb(gpu)
|
53 |
-
high_vram = free_mem_gb > 60
|
54 |
-
|
55 |
-
print(f'Free VRAM {free_mem_gb} GB')
|
56 |
-
print(f'High-VRAM Mode: {high_vram}')
|
57 |
-
|
58 |
-
text_encoder = LlamaModel.from_pretrained("
|
59 |
-
text_encoder_2 = CLIPTextModel.from_pretrained("
|
60 |
-
tokenizer = LlamaTokenizerFast.from_pretrained("
|
61 |
-
tokenizer_2 = CLIPTokenizer.from_pretrained("
|
62 |
-
vae = AutoencoderKLHunyuanVideo.from_pretrained("
|
63 |
-
|
64 |
-
feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
|
65 |
-
image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
|
66 |
-
|
67 |
-
transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePackI2V_HY', torch_dtype=torch.bfloat16).cpu()
|
68 |
-
|
69 |
-
vae.eval()
|
70 |
-
text_encoder.eval()
|
71 |
-
text_encoder_2.eval()
|
72 |
-
image_encoder.eval()
|
73 |
-
transformer.eval()
|
74 |
-
|
75 |
-
if not high_vram:
|
76 |
-
vae.enable_slicing()
|
77 |
-
vae.enable_tiling()
|
78 |
-
|
79 |
-
transformer.high_quality_fp32_output_for_inference = True
|
80 |
-
print('transformer.high_quality_fp32_output_for_inference = True')
|
81 |
-
|
82 |
-
transformer.to(dtype=torch.bfloat16)
|
83 |
-
vae.to(dtype=torch.float16)
|
84 |
-
image_encoder.to(dtype=torch.float16)
|
85 |
-
text_encoder.to(dtype=torch.float16)
|
86 |
-
text_encoder_2.to(dtype=torch.float16)
|
87 |
-
|
88 |
-
vae.requires_grad_(False)
|
89 |
-
text_encoder.requires_grad_(False)
|
90 |
-
text_encoder_2.requires_grad_(False)
|
91 |
-
image_encoder.requires_grad_(False)
|
92 |
-
transformer.requires_grad_(False)
|
93 |
-
|
94 |
-
if not high_vram:
|
95 |
-
# DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
|
96 |
-
DynamicSwapInstaller.install_model(transformer, device=gpu)
|
97 |
-
DynamicSwapInstaller.install_model(text_encoder, device=gpu)
|
98 |
-
else:
|
99 |
-
text_encoder.to(gpu)
|
100 |
-
text_encoder_2.to(gpu)
|
101 |
-
image_encoder.to(gpu)
|
102 |
-
vae.to(gpu)
|
103 |
-
transformer.to(gpu)
|
104 |
-
|
105 |
-
stream = AsyncStream()
|
106 |
-
|
107 |
-
outputs_folder = './outputs/'
|
108 |
-
os.makedirs(outputs_folder, exist_ok=True)
|
109 |
-
|
110 |
-
input_video_debug_value = None
|
111 |
-
prompt_debug_value = None
|
112 |
-
total_second_length_debug_value = None
|
113 |
-
|
114 |
-
# 20250506 pftq: Added function to encode input video frames into latents
|
115 |
-
@torch.no_grad()
|
116 |
-
def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, device="cuda", width=None, height=None):
|
117 |
-
"""
|
118 |
-
Encode a video into latent representations using the VAE.
|
119 |
-
|
120 |
-
Args:
|
121 |
-
video_path: Path to the input video file.
|
122 |
-
vae: AutoencoderKLHunyuanVideo model.
|
123 |
-
height, width: Target resolution for resizing frames.
|
124 |
-
vae_batch_size: Number of frames to process per batch.
|
125 |
-
device: Device for computation (e.g., "cuda").
|
126 |
-
|
127 |
-
Returns:
|
128 |
-
start_latent: Latent of the first frame (for compatibility with original code).
|
129 |
-
input_image_np: First frame as numpy array (for CLIP vision encoding).
|
130 |
-
history_latents: Latents of all frames (shape: [1, channels, frames, height//8, width//8]).
|
131 |
-
fps: Frames per second of the input video.
|
132 |
-
"""
|
133 |
-
# 20250506 pftq: Normalize video path for Windows compatibility
|
134 |
-
video_path = str(pathlib.Path(video_path).resolve())
|
135 |
-
print(f"Processing video: {video_path}")
|
136 |
-
|
137 |
-
# 20250506 pftq: Check CUDA availability and fallback to CPU if needed
|
138 |
-
if device == "cuda" and not torch.cuda.is_available():
|
139 |
-
print("CUDA is not available, falling back to CPU")
|
140 |
-
device = "cpu"
|
141 |
-
|
142 |
-
try:
|
143 |
-
# 20250506 pftq: Load video and get FPS
|
144 |
-
print("Initializing VideoReader...")
|
145 |
-
vr = decord.VideoReader(video_path)
|
146 |
-
fps = vr.get_avg_fps() # Get input video FPS
|
147 |
-
num_real_frames = len(vr)
|
148 |
-
print(f"Video loaded: {num_real_frames} frames, FPS: {fps}")
|
149 |
-
|
150 |
-
# Truncate to nearest latent size (multiple of 4)
|
151 |
-
latent_size_factor = 4
|
152 |
-
num_frames = (num_real_frames // latent_size_factor) * latent_size_factor
|
153 |
-
if num_frames != num_real_frames:
|
154 |
-
print(f"Truncating video from {num_real_frames} to {num_frames} frames for latent size compatibility")
|
155 |
-
num_real_frames = num_frames
|
156 |
-
|
157 |
-
# 20250506 pftq: Read frames
|
158 |
-
print("Reading video frames...")
|
159 |
-
frames = vr.get_batch(range(num_real_frames)).asnumpy() # Shape: (num_real_frames, height, width, channels)
|
160 |
-
print(f"Frames read: {frames.shape}")
|
161 |
-
|
162 |
-
# 20250506 pftq: Get native video resolution
|
163 |
-
native_height, native_width = frames.shape[1], frames.shape[2]
|
164 |
-
print(f"Native video resolution: {native_width}x{native_height}")
|
165 |
-
|
166 |
-
# 20250506 pftq: Use native resolution if height/width not specified, otherwise use provided values
|
167 |
-
target_height = native_height if height is None else height
|
168 |
-
target_width = native_width if width is None else width
|
169 |
-
|
170 |
-
# 20250506 pftq: Adjust to nearest bucket for model compatibility
|
171 |
-
if not no_resize:
|
172 |
-
target_height, target_width = find_nearest_bucket(target_height, target_width, resolution=resolution)
|
173 |
-
print(f"Adjusted resolution: {target_width}x{target_height}")
|
174 |
-
else:
|
175 |
-
print(f"Using native resolution without resizing: {target_width}x{target_height}")
|
176 |
-
|
177 |
-
# 20250506 pftq: Preprocess frames to match original image processing
|
178 |
-
processed_frames = []
|
179 |
-
for i, frame in enumerate(frames):
|
180 |
-
#print(f"Preprocessing frame {i+1}/{num_frames}")
|
181 |
-
frame_np = resize_and_center_crop(frame, target_width=target_width, target_height=target_height)
|
182 |
-
processed_frames.append(frame_np)
|
183 |
-
processed_frames = np.stack(processed_frames) # Shape: (num_real_frames, height, width, channels)
|
184 |
-
print(f"Frames preprocessed: {processed_frames.shape}")
|
185 |
-
|
186 |
-
# 20250506 pftq: Save first frame for CLIP vision encoding
|
187 |
-
input_image_np = processed_frames[0]
|
188 |
-
end_of_input_video_image_np = processed_frames[-1]
|
189 |
-
|
190 |
-
# 20250506 pftq: Convert to tensor and normalize to [-1, 1]
|
191 |
-
print("Converting frames to tensor...")
|
192 |
-
frames_pt = torch.from_numpy(processed_frames).float() / 127.5 - 1
|
193 |
-
frames_pt = frames_pt.permute(0, 3, 1, 2) # Shape: (num_real_frames, channels, height, width)
|
194 |
-
frames_pt = frames_pt.unsqueeze(0) # Shape: (1, num_real_frames, channels, height, width)
|
195 |
-
frames_pt = frames_pt.permute(0, 2, 1, 3, 4) # Shape: (1, channels, num_real_frames, height, width)
|
196 |
-
print(f"Tensor shape: {frames_pt.shape}")
|
197 |
-
|
198 |
-
# 20250507 pftq: Save pixel frames for use in worker
|
199 |
-
input_video_pixels = frames_pt.cpu()
|
200 |
-
|
201 |
-
# 20250506 pftq: Move to device
|
202 |
-
print(f"Moving tensor to device: {device}")
|
203 |
-
frames_pt = frames_pt.to(device)
|
204 |
-
print("Tensor moved to device")
|
205 |
-
|
206 |
-
# 20250506 pftq: Move VAE to device
|
207 |
-
print(f"Moving VAE to device: {device}")
|
208 |
-
vae.to(device)
|
209 |
-
print("VAE moved to device")
|
210 |
-
|
211 |
-
# 20250506 pftq: Encode frames in batches
|
212 |
-
print(f"Encoding input video frames in VAE batch size {vae_batch_size} (reduce if memory issues here or if forcing video resolution)")
|
213 |
-
latents = []
|
214 |
-
vae.eval()
|
215 |
-
with torch.no_grad():
|
216 |
-
for i in tqdm(range(0, frames_pt.shape[2], vae_batch_size), desc="Encoding video frames", mininterval=0.1):
|
217 |
-
#print(f"Encoding batch {i//vae_batch_size + 1}: frames {i} to {min(i + vae_batch_size, frames_pt.shape[2])}")
|
218 |
-
batch = frames_pt[:, :, i:i + vae_batch_size] # Shape: (1, channels, batch_size, height, width)
|
219 |
-
try:
|
220 |
-
# 20250506 pftq: Log GPU memory before encoding
|
221 |
-
if device == "cuda":
|
222 |
-
free_mem = torch.cuda.memory_allocated() / 1024**3
|
223 |
-
#print(f"GPU memory before encoding: {free_mem:.2f} GB")
|
224 |
-
batch_latent = vae_encode(batch, vae)
|
225 |
-
# 20250506 pftq: Synchronize CUDA to catch issues
|
226 |
-
if device == "cuda":
|
227 |
-
torch.cuda.synchronize()
|
228 |
-
#print(f"GPU memory after encoding: {torch.cuda.memory_allocated() / 1024**3:.2f} GB")
|
229 |
-
latents.append(batch_latent)
|
230 |
-
#print(f"Batch encoded, latent shape: {batch_latent.shape}")
|
231 |
-
except RuntimeError as e:
|
232 |
-
print(f"Error during VAE encoding: {str(e)}")
|
233 |
-
if device == "cuda" and "out of memory" in str(e).lower():
|
234 |
-
print("CUDA out of memory, try reducing vae_batch_size or using CPU")
|
235 |
-
raise
|
236 |
-
|
237 |
-
# 20250506 pftq: Concatenate latents
|
238 |
-
print("Concatenating latents...")
|
239 |
-
history_latents = torch.cat(latents, dim=2) # Shape: (1, channels, frames, height//8, width//8)
|
240 |
-
print(f"History latents shape: {history_latents.shape}")
|
241 |
-
|
242 |
-
# 20250506 pftq: Get first frame's latent
|
243 |
-
start_latent = history_latents[:, :, :1] # Shape: (1, channels, 1, height//8, width//8)
|
244 |
-
end_of_input_video_latent = history_latents[:, :, -1:] # Shape: (1, channels, 1, height//8, width//8)
|
245 |
-
print(f"Start latent shape: {start_latent.shape}")
|
246 |
-
|
247 |
-
# 20250506 pftq: Move VAE back to CPU to free GPU memory
|
248 |
-
if device == "cuda":
|
249 |
-
vae.to(cpu)
|
250 |
-
torch.cuda.empty_cache()
|
251 |
-
print("VAE moved back to CPU, CUDA cache cleared")
|
252 |
-
|
253 |
-
return start_latent, input_image_np, history_latents, fps, target_height, target_width, input_video_pixels, end_of_input_video_latent, end_of_input_video_image_np
|
254 |
-
|
255 |
-
except Exception as e:
|
256 |
-
print(f"Error in video_encode: {str(e)}")
|
257 |
-
raise
|
258 |
-
|
259 |
-
|
260 |
-
# 20250507 pftq: New function to encode a single image (end frame)
|
261 |
-
@torch.no_grad()
|
262 |
-
def image_encode(image_np, target_width, target_height, vae, image_encoder, feature_extractor, device="cuda"):
|
263 |
-
"""
|
264 |
-
Encode a single image into a latent and compute its CLIP vision embedding.
|
265 |
-
|
266 |
-
Args:
|
267 |
-
image_np: Input image as numpy array.
|
268 |
-
target_width, target_height: Exact resolution to resize the image to (matches start frame).
|
269 |
-
vae: AutoencoderKLHunyuanVideo model.
|
270 |
-
image_encoder: SiglipVisionModel for CLIP vision encoding.
|
271 |
-
feature_extractor: SiglipImageProcessor for preprocessing.
|
272 |
-
device: Device for computation (e.g., "cuda").
|
273 |
-
|
274 |
-
Returns:
|
275 |
-
latent: Latent representation of the image (shape: [1, channels, 1, height//8, width//8]).
|
276 |
-
clip_embedding: CLIP vision embedding of the image.
|
277 |
-
processed_image_np: Processed image as numpy array (after resizing).
|
278 |
-
"""
|
279 |
-
# 20250507 pftq: Process end frame with exact start frame dimensions
|
280 |
-
print("Processing end frame...")
|
281 |
-
try:
|
282 |
-
print(f"Using exact start frame resolution for end frame: {target_width}x{target_height}")
|
283 |
-
|
284 |
-
# Resize and preprocess image to match start frame
|
285 |
-
processed_image_np = resize_and_center_crop(image_np, target_width=target_width, target_height=target_height)
|
286 |
-
|
287 |
-
# Convert to tensor and normalize
|
288 |
-
image_pt = torch.from_numpy(processed_image_np).float() / 127.5 - 1
|
289 |
-
image_pt = image_pt.permute(2, 0, 1).unsqueeze(0).unsqueeze(2) # Shape: [1, channels, 1, height, width]
|
290 |
-
image_pt = image_pt.to(device)
|
291 |
-
|
292 |
-
# Move VAE to device
|
293 |
-
vae.to(device)
|
294 |
-
|
295 |
-
# Encode to latent
|
296 |
-
latent = vae_encode(image_pt, vae)
|
297 |
-
print(f"image_encode vae output shape: {latent.shape}")
|
298 |
-
|
299 |
-
# Move image encoder to device
|
300 |
-
image_encoder.to(device)
|
301 |
-
|
302 |
-
# Compute CLIP vision embedding
|
303 |
-
clip_embedding = hf_clip_vision_encode(processed_image_np, feature_extractor, image_encoder).last_hidden_state
|
304 |
-
|
305 |
-
# Move models back to CPU and clear cache
|
306 |
-
if device == "cuda":
|
307 |
-
vae.to(cpu)
|
308 |
-
image_encoder.to(cpu)
|
309 |
-
torch.cuda.empty_cache()
|
310 |
-
print("VAE and image encoder moved back to CPU, CUDA cache cleared")
|
311 |
-
|
312 |
-
print(f"End latent shape: {latent.shape}")
|
313 |
-
return latent, clip_embedding, processed_image_np
|
314 |
-
|
315 |
-
except Exception as e:
|
316 |
-
print(f"Error in image_encode: {str(e)}")
|
317 |
-
raise
|
318 |
-
|
319 |
-
# 20250508 pftq: for saving prompt to mp4 metadata comments
|
320 |
-
def set_mp4_comments_imageio_ffmpeg(input_file, comments):
|
321 |
-
try:
|
322 |
-
# Get the path to the bundled FFmpeg binary from imageio-ffmpeg
|
323 |
-
ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
|
324 |
-
|
325 |
-
# Check if input file exists
|
326 |
-
if not os.path.exists(input_file):
|
327 |
-
print(f"Error: Input file {input_file} does not exist")
|
328 |
-
return False
|
329 |
-
|
330 |
-
# Create a temporary file path
|
331 |
-
temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
|
332 |
-
|
333 |
-
# FFmpeg command using the bundled binary
|
334 |
-
command = [
|
335 |
-
ffmpeg_path, # Use imageio-ffmpeg's FFmpeg
|
336 |
-
'-i', input_file, # input file
|
337 |
-
'-metadata', f'comment={comments}', # set comment metadata
|
338 |
-
'-c:v', 'copy', # copy video stream without re-encoding
|
339 |
-
'-c:a', 'copy', # copy audio stream without re-encoding
|
340 |
-
'-y', # overwrite output file if it exists
|
341 |
-
temp_file # temporary output file
|
342 |
-
]
|
343 |
-
|
344 |
-
# Run the FFmpeg command
|
345 |
-
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
346 |
-
|
347 |
-
if result.returncode == 0:
|
348 |
-
# Replace the original file with the modified one
|
349 |
-
shutil.move(temp_file, input_file)
|
350 |
-
print(f"Successfully added comments to {input_file}")
|
351 |
-
return True
|
352 |
-
else:
|
353 |
-
# Clean up temp file if FFmpeg fails
|
354 |
-
if os.path.exists(temp_file):
|
355 |
-
os.remove(temp_file)
|
356 |
-
print(f"Error: FFmpeg failed with message:\n{result.stderr}")
|
357 |
-
return False
|
358 |
-
|
359 |
-
except Exception as e:
|
360 |
-
# Clean up temp file in case of other errors
|
361 |
-
if 'temp_file' in locals() and os.path.exists(temp_file):
|
362 |
-
os.remove(temp_file)
|
363 |
-
print(f"Error saving prompt to video metadata, ffmpeg may be required: "+str(e))
|
364 |
-
return False
|
365 |
-
|
366 |
-
# 20250506 pftq: Modified worker to accept video input, and clean frame count
|
367 |
-
@torch.no_grad()
|
368 |
-
def worker(input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
|
369 |
-
|
370 |
-
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
|
371 |
-
|
372 |
-
try:
|
373 |
-
# Clean GPU
|
374 |
-
if not high_vram:
|
375 |
-
unload_complete_models(
|
376 |
-
text_encoder, text_encoder_2, image_encoder, vae, transformer
|
377 |
-
)
|
378 |
-
|
379 |
-
# Text encoding
|
380 |
-
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
|
381 |
-
|
382 |
-
if not high_vram:
|
383 |
-
fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
|
384 |
-
load_model_as_complete(text_encoder_2, target_device=gpu)
|
385 |
-
|
386 |
-
llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
387 |
-
|
388 |
-
if cfg == 1:
|
389 |
-
llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
|
390 |
-
else:
|
391 |
-
llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
392 |
-
|
393 |
-
llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
|
394 |
-
llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
|
395 |
-
|
396 |
-
# 20250506 pftq: Processing input video instead of image
|
397 |
-
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Video processing ...'))))
|
398 |
-
|
399 |
-
# 20250506 pftq: Encode video
|
400 |
-
start_latent, input_image_np, video_latents, fps, height, width, input_video_pixels, end_of_input_video_latent, end_of_input_video_image_np = video_encode(input_video, resolution, no_resize, vae, vae_batch_size=vae_batch, device=gpu)
|
401 |
-
|
402 |
-
#Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
|
403 |
-
|
404 |
-
# CLIP Vision
|
405 |
-
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
|
406 |
-
|
407 |
-
if not high_vram:
|
408 |
-
load_model_as_complete(image_encoder, target_device=gpu)
|
409 |
-
|
410 |
-
image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
|
411 |
-
image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
|
412 |
-
start_embedding = image_encoder_last_hidden_state
|
413 |
-
|
414 |
-
end_of_input_video_output = hf_clip_vision_encode(end_of_input_video_image_np, feature_extractor, image_encoder)
|
415 |
-
end_of_input_video_last_hidden_state = end_of_input_video_output.last_hidden_state
|
416 |
-
end_of_input_video_embedding = end_of_input_video_last_hidden_state
|
417 |
-
|
418 |
-
# 20250507 pftq: Process end frame if provided
|
419 |
-
end_latent = None
|
420 |
-
end_clip_embedding = None
|
421 |
-
if end_frame is not None:
|
422 |
-
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'End frame encoding ...'))))
|
423 |
-
end_latent, end_clip_embedding, _ = image_encode(
|
424 |
-
end_frame, target_width=width, target_height=height, vae=vae,
|
425 |
-
image_encoder=image_encoder, feature_extractor=feature_extractor, device=gpu
|
426 |
-
)
|
427 |
-
|
428 |
-
# Dtype
|
429 |
-
llama_vec = llama_vec.to(transformer.dtype)
|
430 |
-
llama_vec_n = llama_vec_n.to(transformer.dtype)
|
431 |
-
clip_l_pooler = clip_l_pooler.to(transformer.dtype)
|
432 |
-
clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
|
433 |
-
image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
|
434 |
-
end_of_input_video_embedding = end_of_input_video_embedding.to(transformer.dtype)
|
435 |
-
|
436 |
-
# 20250509 pftq: Restored original placement of total_latent_sections after video_encode
|
437 |
-
total_latent_sections = (total_second_length * fps) / (latent_window_size * 4)
|
438 |
-
total_latent_sections = int(max(round(total_latent_sections), 1))
|
439 |
-
|
440 |
-
for idx in range(batch):
|
441 |
-
if idx > 0:
|
442 |
-
seed = seed + 1
|
443 |
-
|
444 |
-
if batch > 1:
|
445 |
-
print(f"Beginning video {idx+1} of {batch} with seed {seed} ")
|
446 |
-
|
447 |
-
job_id = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+f"_framepack-videoinput-endframe_{width}-{total_second_length}sec_seed-{seed}_steps-{steps}_distilled-{gs}_cfg-{cfg}"
|
448 |
-
|
449 |
-
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
|
450 |
-
|
451 |
-
rnd = torch.Generator("cpu").manual_seed(seed)
|
452 |
-
|
453 |
-
history_latents = video_latents.cpu()
|
454 |
-
history_pixels = None
|
455 |
-
total_generated_latent_frames = 0
|
456 |
-
previous_video = None
|
457 |
-
|
458 |
-
|
459 |
-
# 20250509 Generate backwards with end frame for better end frame anchoring
|
460 |
-
latent_paddings = list(reversed(range(total_latent_sections)))
|
461 |
-
if total_latent_sections > 4:
|
462 |
-
latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0]
|
463 |
-
|
464 |
-
for section_index, latent_padding in enumerate(latent_paddings):
|
465 |
-
is_start_of_video = latent_padding == 0
|
466 |
-
is_end_of_video = latent_padding == latent_paddings[0]
|
467 |
-
latent_padding_size = latent_padding * latent_window_size
|
468 |
-
|
469 |
-
if stream.input_queue.top() == 'end':
|
470 |
-
stream.output_queue.push(('end', None))
|
471 |
-
return
|
472 |
-
|
473 |
-
if not high_vram:
|
474 |
-
unload_complete_models()
|
475 |
-
move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
|
476 |
-
|
477 |
-
if use_teacache:
|
478 |
-
transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
|
479 |
-
else:
|
480 |
-
transformer.initialize_teacache(enable_teacache=False)
|
481 |
-
|
482 |
-
def callback(d):
|
483 |
-
try:
|
484 |
-
preview = d['denoised']
|
485 |
-
preview = vae_decode_fake(preview)
|
486 |
-
preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
|
487 |
-
preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
|
488 |
-
if stream.input_queue.top() == 'end':
|
489 |
-
stream.output_queue.push(('end', None))
|
490 |
-
raise KeyboardInterrupt('User ends the task.')
|
491 |
-
current_step = d['i'] + 1
|
492 |
-
percentage = int(100.0 * current_step / steps)
|
493 |
-
hint = f'Sampling {current_step}/{steps}'
|
494 |
-
desc = f'Total frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / fps) :.2f} seconds (FPS-{fps}), Seed: {seed}, Video {idx+1} of {batch}. Generating part {total_latent_sections - section_index} of {total_latent_sections} backward...'
|
495 |
-
stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
|
496 |
-
except ConnectionResetError as e:
|
497 |
-
print(f"Suppressed ConnectionResetError in callback: {e}")
|
498 |
-
return
|
499 |
-
|
500 |
-
# 20250509 pftq: Dynamic frame allocation like original num_clean_frames, fix split error
|
501 |
-
available_frames = video_latents.shape[2] if is_start_of_video else history_latents.shape[2]
|
502 |
-
effective_clean_frames = max(0, num_clean_frames - 1) if num_clean_frames > 1 else 1
|
503 |
-
if is_start_of_video:
|
504 |
-
effective_clean_frames = 1 # avoid jumpcuts from input video
|
505 |
-
clean_latent_pre_frames = effective_clean_frames
|
506 |
-
num_2x_frames = min(2, max(1, available_frames - clean_latent_pre_frames - 1)) if available_frames > clean_latent_pre_frames + 1 else 1
|
507 |
-
num_4x_frames = min(16, max(1, available_frames - clean_latent_pre_frames - num_2x_frames)) if available_frames > clean_latent_pre_frames + num_2x_frames else 1
|
508 |
-
total_context_frames = num_2x_frames + num_4x_frames
|
509 |
-
total_context_frames = min(total_context_frames, available_frames - clean_latent_pre_frames)
|
510 |
-
|
511 |
-
# 20250511 pftq: Dynamically adjust post_frames based on clean_latents_post
|
512 |
-
post_frames = 1 if is_end_of_video and end_latent is not None else effective_clean_frames # 20250511 pftq: Single frame for end_latent, otherwise padding causes still image
|
513 |
-
indices = torch.arange(0, clean_latent_pre_frames + latent_padding_size + latent_window_size + post_frames + num_2x_frames + num_4x_frames).unsqueeze(0)
|
514 |
-
clean_latent_indices_pre, blank_indices, latent_indices, clean_latent_indices_post, clean_latent_2x_indices, clean_latent_4x_indices = indices.split(
|
515 |
-
[clean_latent_pre_frames, latent_padding_size, latent_window_size, post_frames, num_2x_frames, num_4x_frames], dim=1
|
516 |
-
)
|
517 |
-
clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1)
|
518 |
-
|
519 |
-
# 20250509 pftq: Split context frames dynamically for 2x and 4x only
|
520 |
-
context_frames = history_latents[:, :, -(total_context_frames + clean_latent_pre_frames):-clean_latent_pre_frames, :, :] if total_context_frames > 0 else history_latents[:, :, :1, :, :]
|
521 |
-
split_sizes = [num_4x_frames, num_2x_frames]
|
522 |
-
split_sizes = [s for s in split_sizes if s > 0]
|
523 |
-
if split_sizes and context_frames.shape[2] >= sum(split_sizes):
|
524 |
-
splits = context_frames.split(split_sizes, dim=2)
|
525 |
-
split_idx = 0
|
526 |
-
clean_latents_4x = splits[split_idx] if num_4x_frames > 0 else history_latents[:, :, :1, :, :]
|
527 |
-
split_idx += 1 if num_4x_frames > 0 else 0
|
528 |
-
clean_latents_2x = splits[split_idx] if num_2x_frames > 0 and split_idx < len(splits) else history_latents[:, :, :1, :, :]
|
529 |
-
else:
|
530 |
-
clean_latents_4x = clean_latents_2x = history_latents[:, :, :1, :, :]
|
531 |
-
|
532 |
-
clean_latents_pre = video_latents[:, :, -min(effective_clean_frames, video_latents.shape[2]):].to(history_latents) # smoother motion but jumpcuts if end frame is too different, must change clean_latent_pre_frames to effective_clean_frames also
|
533 |
-
clean_latents_post = history_latents[:, :, :min(effective_clean_frames, history_latents.shape[2]), :, :] # smoother motion, must change post_frames to effective_clean_frames also
|
534 |
-
|
535 |
-
if is_end_of_video:
|
536 |
-
clean_latents_post = torch.zeros_like(end_of_input_video_latent).to(history_latents)
|
537 |
-
|
538 |
-
# 20250509 pftq: handle end frame if available
|
539 |
-
if end_latent is not None:
|
540 |
-
#current_end_frame_weight = end_frame_weight * (latent_padding / latent_paddings[0])
|
541 |
-
#current_end_frame_weight = current_end_frame_weight * 0.5 + 0.5
|
542 |
-
current_end_frame_weight = end_frame_weight # changing this over time introduces discontinuity
|
543 |
-
# 20250511 pftq: Removed end frame weight adjustment as it has no effect
|
544 |
-
image_encoder_last_hidden_state = (1 - current_end_frame_weight) * end_of_input_video_embedding + end_clip_embedding * current_end_frame_weight
|
545 |
-
image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
|
546 |
-
|
547 |
-
# 20250511 pftq: Use end_latent only
|
548 |
-
if is_end_of_video:
|
549 |
-
clean_latents_post = end_latent.to(history_latents)[:, :, :1, :, :] # Ensure single frame
|
550 |
-
|
551 |
-
# 20250511 pftq: Pad clean_latents_pre to match clean_latent_pre_frames if needed
|
552 |
-
if clean_latents_pre.shape[2] < clean_latent_pre_frames:
|
553 |
-
clean_latents_pre = clean_latents_pre.repeat(1, 1, clean_latent_pre_frames // clean_latents_pre.shape[2], 1, 1)
|
554 |
-
# 20250511 pftq: Pad clean_latents_post to match post_frames if needed
|
555 |
-
if clean_latents_post.shape[2] < post_frames:
|
556 |
-
clean_latents_post = clean_latents_post.repeat(1, 1, post_frames // clean_latents_post.shape[2], 1, 1)
|
557 |
-
|
558 |
-
clean_latents = torch.cat([clean_latents_pre, clean_latents_post], dim=2)
|
559 |
-
|
560 |
-
max_frames = min(latent_window_size * 4 - 3, history_latents.shape[2] * 4)
|
561 |
-
print(f"Generating video {idx+1} of {batch} with seed {seed}, part {total_latent_sections - section_index} of {total_latent_sections} backward")
|
562 |
-
generated_latents = sample_hunyuan(
|
563 |
-
transformer=transformer,
|
564 |
-
sampler='unipc',
|
565 |
-
width=width,
|
566 |
-
height=height,
|
567 |
-
frames=max_frames,
|
568 |
-
real_guidance_scale=cfg,
|
569 |
-
distilled_guidance_scale=gs,
|
570 |
-
guidance_rescale=rs,
|
571 |
-
num_inference_steps=steps,
|
572 |
-
generator=rnd,
|
573 |
-
prompt_embeds=llama_vec,
|
574 |
-
prompt_embeds_mask=llama_attention_mask,
|
575 |
-
prompt_poolers=clip_l_pooler,
|
576 |
-
negative_prompt_embeds=llama_vec_n,
|
577 |
-
negative_prompt_embeds_mask=llama_attention_mask_n,
|
578 |
-
negative_prompt_poolers=clip_l_pooler_n,
|
579 |
-
device=gpu,
|
580 |
-
dtype=torch.bfloat16,
|
581 |
-
image_embeddings=image_encoder_last_hidden_state,
|
582 |
-
latent_indices=latent_indices,
|
583 |
-
clean_latents=clean_latents,
|
584 |
-
clean_latent_indices=clean_latent_indices,
|
585 |
-
clean_latents_2x=clean_latents_2x,
|
586 |
-
clean_latent_2x_indices=clean_latent_2x_indices,
|
587 |
-
clean_latents_4x=clean_latents_4x,
|
588 |
-
clean_latent_4x_indices=clean_latent_4x_indices,
|
589 |
-
callback=callback,
|
590 |
-
)
|
591 |
-
|
592 |
-
if is_start_of_video:
|
593 |
-
generated_latents = torch.cat([video_latents[:, :, -1:].to(generated_latents), generated_latents], dim=2)
|
594 |
-
|
595 |
-
total_generated_latent_frames += int(generated_latents.shape[2])
|
596 |
-
history_latents = torch.cat([generated_latents.to(history_latents), history_latents], dim=2)
|
597 |
-
|
598 |
-
if not high_vram:
|
599 |
-
offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
|
600 |
-
load_model_as_complete(vae, target_device=gpu)
|
601 |
-
|
602 |
-
real_history_latents = history_latents[:, :, :total_generated_latent_frames, :, :]
|
603 |
-
if history_pixels is None:
|
604 |
-
history_pixels = vae_decode(real_history_latents, vae).cpu()
|
605 |
-
else:
|
606 |
-
section_latent_frames = (latent_window_size * 2 + 1) if is_start_of_video else (latent_window_size * 2)
|
607 |
-
overlapped_frames = latent_window_size * 4 - 3
|
608 |
-
current_pixels = vae_decode(real_history_latents[:, :, :section_latent_frames], vae).cpu()
|
609 |
-
history_pixels = soft_append_bcthw(current_pixels, history_pixels, overlapped_frames)
|
610 |
-
|
611 |
-
if not high_vram:
|
612 |
-
unload_complete_models()
|
613 |
-
|
614 |
-
output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
|
615 |
-
save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
|
616 |
-
print(f"Latest video saved: {output_filename}")
|
617 |
-
set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompt} | Negative Prompt: {n_prompt}")
|
618 |
-
print(f"Prompt saved to mp4 metadata comments: {output_filename}")
|
619 |
-
|
620 |
-
if previous_video is not None and os.path.exists(previous_video):
|
621 |
-
try:
|
622 |
-
os.remove(previous_video)
|
623 |
-
print(f"Previous partial video deleted: {previous_video}")
|
624 |
-
except Exception as e:
|
625 |
-
print(f"Error deleting previous partial video {previous_video}: {e}")
|
626 |
-
previous_video = output_filename
|
627 |
-
|
628 |
-
print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
|
629 |
-
stream.output_queue.push(('file', output_filename))
|
630 |
-
|
631 |
-
if is_start_of_video:
|
632 |
-
break
|
633 |
-
|
634 |
-
history_pixels = torch.cat([input_video_pixels, history_pixels], dim=2)
|
635 |
-
#overlapped_frames = latent_window_size * 4 - 3
|
636 |
-
#history_pixels = soft_append_bcthw(input_video_pixels, history_pixels, overlapped_frames)
|
637 |
-
|
638 |
-
output_filename = os.path.join(outputs_folder, f'{job_id}_final.mp4')
|
639 |
-
save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
|
640 |
-
print(f"Final video with input blend saved: {output_filename}")
|
641 |
-
set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompt} | Negative Prompt: {n_prompt}")
|
642 |
-
print(f"Prompt saved to mp4 metadata comments: {output_filename}")
|
643 |
-
stream.output_queue.push(('file', output_filename))
|
644 |
-
|
645 |
-
if previous_video is not None and os.path.exists(previous_video):
|
646 |
-
try:
|
647 |
-
os.remove(previous_video)
|
648 |
-
print(f"Previous partial video deleted: {previous_video}")
|
649 |
-
except Exception as e:
|
650 |
-
print(f"Error deleting previous partial video {previous_video}: {e}")
|
651 |
-
previous_video = output_filename
|
652 |
-
|
653 |
-
print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
|
654 |
-
|
655 |
-
stream.output_queue.push(('file', output_filename))
|
656 |
-
|
657 |
-
except:
|
658 |
-
traceback.print_exc()
|
659 |
-
|
660 |
-
if not high_vram:
|
661 |
-
unload_complete_models(
|
662 |
-
text_encoder, text_encoder_2, image_encoder, vae, transformer
|
663 |
-
)
|
664 |
-
|
665 |
-
stream.output_queue.push(('end', None))
|
666 |
-
return
|
667 |
-
|
668 |
-
# 20250506 pftq: Modified process to pass clean frame count, etc
|
669 |
-
def get_duration(input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
|
670 |
-
global total_second_length_debug_value
|
671 |
-
if total_second_length_debug_value is not None:
|
672 |
-
return total_second_length_debug_value * 60
|
673 |
-
return total_second_length * 60
|
674 |
-
|
675 |
-
@spaces.GPU(duration=get_duration)
|
676 |
-
def process(input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
|
677 |
-
global stream, high_vram, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
|
678 |
-
|
679 |
-
if input_video_debug_value is not None:
|
680 |
-
input_video = input_video_debug_value
|
681 |
-
input_video_debug_value = None
|
682 |
-
|
683 |
-
if prompt_debug_value is not None:
|
684 |
-
prompt = prompt_debug_value
|
685 |
-
prompt_debug_value = None
|
686 |
-
|
687 |
-
if total_second_length_debug_value is not None:
|
688 |
-
total_second_length = total_second_length_debug_value
|
689 |
-
total_second_length_debug_value = None
|
690 |
-
|
691 |
-
# 20250506 pftq: Updated assertion for video input
|
692 |
-
assert input_video is not None, 'No input video!'
|
693 |
-
|
694 |
-
yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
|
695 |
-
|
696 |
-
# 20250507 pftq: Even the H100 needs offloading if the video dimensions are 720p or higher
|
697 |
-
if high_vram and (no_resize or resolution>640):
|
698 |
-
print("Disabling high vram mode due to no resize and/or potentially higher resolution...")
|
699 |
-
high_vram = False
|
700 |
-
vae.enable_slicing()
|
701 |
-
vae.enable_tiling()
|
702 |
-
DynamicSwapInstaller.install_model(transformer, device=gpu)
|
703 |
-
DynamicSwapInstaller.install_model(text_encoder, device=gpu)
|
704 |
-
|
705 |
-
# 20250508 pftq: automatically set distilled cfg to 1 if cfg is used
|
706 |
-
if cfg > 1:
|
707 |
-
gs = 1
|
708 |
-
|
709 |
-
stream = AsyncStream()
|
710 |
-
|
711 |
-
# 20250506 pftq: Pass num_clean_frames, vae_batch, etc
|
712 |
-
async_run(worker, input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch)
|
713 |
-
|
714 |
-
output_filename = None
|
715 |
-
|
716 |
-
while True:
|
717 |
-
flag, data = stream.output_queue.next()
|
718 |
-
|
719 |
-
if flag == 'file':
|
720 |
-
output_filename = data
|
721 |
-
yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
|
722 |
-
|
723 |
-
if flag == 'progress':
|
724 |
-
preview, desc, html = data
|
725 |
-
#yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
|
726 |
-
yield output_filename, gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True) # 20250506 pftq: Keep refreshing the video in case it got hidden when the tab was in the background
|
727 |
-
|
728 |
-
if flag == 'end':
|
729 |
-
yield output_filename, gr.update(visible=False), desc+' Video complete.', '', gr.update(interactive=True), gr.update(interactive=False)
|
730 |
-
break
|
731 |
-
|
732 |
-
def end_process():
|
733 |
-
stream.input_queue.push('end')
|
734 |
-
|
735 |
-
quick_prompts = [
|
736 |
-
'The girl dances gracefully, with clear movements, full of charm.',
|
737 |
-
'A character doing some simple body movements.',
|
738 |
-
]
|
739 |
-
quick_prompts = [[x] for x in quick_prompts]
|
740 |
-
|
741 |
-
css = make_progress_bar_css()
|
742 |
-
block = gr.Blocks(css=css).queue(
|
743 |
-
max_size=10 # 20250507 pftq: Limit queue size
|
744 |
-
)
|
745 |
-
with block:
|
746 |
-
# 20250506 pftq: Updated title to reflect video input functionality
|
747 |
-
gr.Markdown('# Framepack with Video Input (Video Extension) + End Frame')
|
748 |
-
with gr.Row():
|
749 |
-
with gr.Column():
|
750 |
-
|
751 |
-
# 20250506 pftq: Changed to Video input from Image
|
752 |
-
with gr.Row():
|
753 |
-
input_video = gr.Video(sources='upload', label="Input Video", height=320)
|
754 |
-
with gr.Column():
|
755 |
-
# 20250507 pftq: Added end_frame + weight
|
756 |
-
end_frame = gr.Image(sources='upload', type="numpy", label="End Frame (Optional) - Reduce context frames if very different from input video or if it is jumpcutting/slowing to still image.", height=320)
|
757 |
-
end_frame_weight = gr.Slider(label="End Frame Weight", minimum=0.0, maximum=1.0, value=1.0, step=0.01, info='Reduce to treat more as a reference image.', visible=False) # no effect
|
758 |
-
|
759 |
-
prompt = gr.Textbox(label="Prompt", value='')
|
760 |
-
#example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Quick List', samples_per_page=1000, components=[prompt])
|
761 |
-
#example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
|
762 |
-
|
763 |
-
with gr.Row():
|
764 |
-
start_button = gr.Button(value="Start Generation", variant="primary")
|
765 |
-
end_button = gr.Button(value="End Generation", variant="stop", interactive=False)
|
766 |
-
|
767 |
-
with gr.Group():
|
768 |
-
with gr.Row():
|
769 |
-
use_teacache = gr.Checkbox(label='Use TeaCache', value=False, info='Faster speed, but often makes hands and fingers slightly worse.')
|
770 |
-
no_resize = gr.Checkbox(label='Force Original Video Resolution (No Resizing)', value=False, info='Might run out of VRAM (720p requires > 24GB VRAM).')
|
771 |
-
|
772 |
-
seed = gr.Number(label="Seed", value=31337, precision=0)
|
773 |
-
|
774 |
-
batch = gr.Slider(label="Batch Size (Number of Videos)", minimum=1, maximum=1000, value=1, step=1, info='Generate multiple videos each with a different seed.')
|
775 |
-
|
776 |
-
resolution = gr.Number(label="Resolution (max width or height)", value=640, precision=0, visible=False)
|
777 |
-
|
778 |
-
total_second_length = gr.Slider(label="Additional Video Length to Generate (Seconds)", minimum=1, maximum=120, value=5, step=0.1)
|
779 |
-
|
780 |
-
# 20250506 pftq: Reduced default distilled guidance scale to improve adherence to input video
|
781 |
-
gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=3.0, step=0.01, info='Prompt adherence at the cost of less details from the input video, but to a lesser extent than Context Frames.')
|
782 |
-
cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=True, info='Use instead of Distilled for more detail/control + Negative Prompt (make sure Distilled=1). Doubles render time.') # Should not change
|
783 |
-
rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
|
784 |
-
|
785 |
-
n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=True, info='Requires using normal CFG (undistilled) instead of Distilled (set Distilled=1 and CFG > 1).')
|
786 |
-
|
787 |
-
steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Expensive. Increase for more quality, especially if using high non-distilled CFG.')
|
788 |
-
|
789 |
-
# 20250506 pftq: Renamed slider to Number of Context Frames and updated description
|
790 |
-
num_clean_frames = gr.Slider(label="Number of Context Frames (Adherence to Video)", minimum=2, maximum=10, value=5, step=1, info="Expensive. Retain more video details. Reduce if memory issues or motion too restricted (jumpcut, ignoring prompt, still).")
|
791 |
-
|
792 |
-
default_vae = 32
|
793 |
-
if high_vram:
|
794 |
-
default_vae = 128
|
795 |
-
elif free_mem_gb>=20:
|
796 |
-
default_vae = 64
|
797 |
-
|
798 |
-
vae_batch = gr.Slider(label="VAE Batch Size for Input Video", minimum=4, maximum=256, value=default_vae, step=4, info="Expensive. Increase for better quality frames during fast motion. Reduce if running out of memory")
|
799 |
-
|
800 |
-
latent_window_size = gr.Slider(label="Latent Window Size", minimum=9, maximum=49, value=9, step=1, visible=True, info='Expensive. Generate more frames at a time (larger chunks). Less degradation but higher VRAM cost.')
|
801 |
-
|
802 |
-
gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
|
803 |
-
|
804 |
-
mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
|
805 |
-
|
806 |
-
with gr.Row():
|
807 |
-
input_video_debug = gr.Video(sources='upload', label="Input Video Debug", height=320)
|
808 |
-
prompt_debug = gr.Textbox(label="Prompt Debug", value='')
|
809 |
-
total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (Seconds) Debug", minimum=1, maximum=120, value=5, step=0.1)
|
810 |
-
|
811 |
-
with gr.Column():
|
812 |
-
preview_image = gr.Image(label="Next Latents", height=200, visible=False)
|
813 |
-
result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
|
814 |
-
progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
|
815 |
-
progress_bar = gr.HTML('', elem_classes='no-generating-animation')
|
816 |
-
|
817 |
-
with gr.Row(visible=False):
|
818 |
-
gr.Examples(
|
819 |
-
examples = [
|
820 |
-
[
|
821 |
-
"./img_examples/Example1.mp4", # input_video
|
822 |
-
None, # end_frame
|
823 |
-
0.0, # end_frame_weight
|
824 |
-
"View of the sea as far as the eye can see, from the seaside, a piece of land is barely visible on the horizon at the middle, the sky is radiant, reflections of the sun in the water, photorealistic, realistic, intricate details, 8k, insanely detailed",
|
825 |
-
"", # n_prompt
|
826 |
-
42, # seed
|
827 |
-
1, # batch
|
828 |
-
640, # resolution
|
829 |
-
1, # total_second_length
|
830 |
-
9, # latent_window_size
|
831 |
-
10, # steps
|
832 |
-
1.0, # cfg
|
833 |
-
3.0, # gs
|
834 |
-
0.0, # rs
|
835 |
-
6, # gpu_memory_preservation
|
836 |
-
False, # use_teacache
|
837 |
-
False, # no_resize
|
838 |
-
16, # mp4_crf
|
839 |
-
5, # num_clean_frames
|
840 |
-
default_vae
|
841 |
-
],
|
842 |
-
],
|
843 |
-
run_on_click = True,
|
844 |
-
fn = process,
|
845 |
-
inputs = [input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch],
|
846 |
-
outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
|
847 |
-
cache_examples = True,
|
848 |
-
)
|
849 |
-
|
850 |
-
gr.HTML("""
|
851 |
-
<div style="text-align:center; margin-top:20px;">Share your results and find ideas at the <a href="https://x.com/search?q=framepack&f=live" target="_blank">FramePack Twitter (X) thread</a></div>
|
852 |
-
""")
|
853 |
-
|
854 |
-
# 20250506 pftq: Updated inputs to include num_clean_frames
|
855 |
-
ips = [input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
|
856 |
-
start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
|
857 |
-
end_button.click(fn=end_process)
|
858 |
-
|
859 |
-
|
860 |
-
def handle_input_video_debug_upload(input):
|
861 |
-
global input_video_debug_value
|
862 |
-
input_video_debug_value = input
|
863 |
-
return []
|
864 |
-
|
865 |
-
def handle_prompt_debug_change(input):
|
866 |
-
global prompt_debug_value
|
867 |
-
prompt_debug_value = input
|
868 |
-
return []
|
869 |
-
|
870 |
-
def handle_total_second_length_debug_change(input):
|
871 |
-
global total_second_length_debug_value
|
872 |
-
total_second_length_debug_value = input
|
873 |
-
return []
|
874 |
-
|
875 |
-
input_video_debug.upload(
|
876 |
-
fn=handle_input_video_debug_upload,
|
877 |
-
inputs=[input_video_debug],
|
878 |
-
outputs=[]
|
879 |
-
)
|
880 |
-
|
881 |
-
prompt_debug.change(
|
882 |
-
fn=handle_prompt_debug_change,
|
883 |
-
inputs=[prompt_debug],
|
884 |
-
outputs=[]
|
885 |
-
)
|
886 |
-
|
887 |
-
total_second_length_debug.change(
|
888 |
-
fn=handle_total_second_length_debug_change,
|
889 |
-
inputs=[total_second_length_debug],
|
890 |
-
outputs=[]
|
891 |
-
)
|
892 |
-
|
893 |
block.launch(share=True)
|
|
|
1 |
+
from diffusers_helper.hf_login import login
|
2 |
+
|
3 |
+
import os
|
4 |
+
|
5 |
+
os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
import torch
|
9 |
+
import traceback
|
10 |
+
import einops
|
11 |
+
import safetensors.torch as sf
|
12 |
+
import numpy as np
|
13 |
+
import argparse
|
14 |
+
import math
|
15 |
+
# 20250506 pftq: Added for video input loading
|
16 |
+
import decord
|
17 |
+
# 20250506 pftq: Added for progress bars in video_encode
|
18 |
+
from tqdm import tqdm
|
19 |
+
# 20250506 pftq: Normalize file paths for Windows compatibility
|
20 |
+
import pathlib
|
21 |
+
# 20250506 pftq: for easier to read timestamp
|
22 |
+
from datetime import datetime
|
23 |
+
# 20250508 pftq: for saving prompt to mp4 comments metadata
|
24 |
+
import imageio_ffmpeg
|
25 |
+
import tempfile
|
26 |
+
import shutil
|
27 |
+
import subprocess
|
28 |
+
import spaces
|
29 |
+
from PIL import Image
|
30 |
+
from diffusers import AutoencoderKLHunyuanVideo
|
31 |
+
from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
|
32 |
+
from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
|
33 |
+
from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
|
34 |
+
from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
|
35 |
+
from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
|
36 |
+
from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
|
37 |
+
from diffusers_helper.thread_utils import AsyncStream, async_run
|
38 |
+
from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
|
39 |
+
from transformers import SiglipImageProcessor, SiglipVisionModel
|
40 |
+
from diffusers_helper.clip_vision import hf_clip_vision_encode
|
41 |
+
from diffusers_helper.bucket_tools import find_nearest_bucket
|
42 |
+
|
43 |
+
parser = argparse.ArgumentParser()
|
44 |
+
parser.add_argument('--share', action='store_true')
|
45 |
+
parser.add_argument("--server", type=str, default='0.0.0.0')
|
46 |
+
parser.add_argument("--port", type=int, required=False)
|
47 |
+
parser.add_argument("--inbrowser", action='store_true')
|
48 |
+
args = parser.parse_args()
|
49 |
+
|
50 |
+
print(args)
|
51 |
+
|
52 |
+
free_mem_gb = get_cuda_free_memory_gb(gpu)
|
53 |
+
high_vram = free_mem_gb > 60
|
54 |
+
|
55 |
+
print(f'Free VRAM {free_mem_gb} GB')
|
56 |
+
print(f'High-VRAM Mode: {high_vram}')
|
57 |
+
|
58 |
+
text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
|
59 |
+
text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
|
60 |
+
tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
|
61 |
+
tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2')
|
62 |
+
vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
|
63 |
+
|
64 |
+
feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
|
65 |
+
image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
|
66 |
+
|
67 |
+
transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePackI2V_HY', torch_dtype=torch.bfloat16).cpu()
|
68 |
+
|
69 |
+
vae.eval()
|
70 |
+
text_encoder.eval()
|
71 |
+
text_encoder_2.eval()
|
72 |
+
image_encoder.eval()
|
73 |
+
transformer.eval()
|
74 |
+
|
75 |
+
if not high_vram:
|
76 |
+
vae.enable_slicing()
|
77 |
+
vae.enable_tiling()
|
78 |
+
|
79 |
+
transformer.high_quality_fp32_output_for_inference = True
|
80 |
+
print('transformer.high_quality_fp32_output_for_inference = True')
|
81 |
+
|
82 |
+
transformer.to(dtype=torch.bfloat16)
|
83 |
+
vae.to(dtype=torch.float16)
|
84 |
+
image_encoder.to(dtype=torch.float16)
|
85 |
+
text_encoder.to(dtype=torch.float16)
|
86 |
+
text_encoder_2.to(dtype=torch.float16)
|
87 |
+
|
88 |
+
vae.requires_grad_(False)
|
89 |
+
text_encoder.requires_grad_(False)
|
90 |
+
text_encoder_2.requires_grad_(False)
|
91 |
+
image_encoder.requires_grad_(False)
|
92 |
+
transformer.requires_grad_(False)
|
93 |
+
|
94 |
+
if not high_vram:
|
95 |
+
# DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
|
96 |
+
DynamicSwapInstaller.install_model(transformer, device=gpu)
|
97 |
+
DynamicSwapInstaller.install_model(text_encoder, device=gpu)
|
98 |
+
else:
|
99 |
+
text_encoder.to(gpu)
|
100 |
+
text_encoder_2.to(gpu)
|
101 |
+
image_encoder.to(gpu)
|
102 |
+
vae.to(gpu)
|
103 |
+
transformer.to(gpu)
|
104 |
+
|
105 |
+
stream = AsyncStream()
|
106 |
+
|
107 |
+
outputs_folder = './outputs/'
|
108 |
+
os.makedirs(outputs_folder, exist_ok=True)
|
109 |
+
|
110 |
+
input_video_debug_value = None
|
111 |
+
prompt_debug_value = None
|
112 |
+
total_second_length_debug_value = None
|
113 |
+
|
114 |
+
# 20250506 pftq: Added function to encode input video frames into latents
|
115 |
+
@torch.no_grad()
|
116 |
+
def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, device="cuda", width=None, height=None):
|
117 |
+
"""
|
118 |
+
Encode a video into latent representations using the VAE.
|
119 |
+
|
120 |
+
Args:
|
121 |
+
video_path: Path to the input video file.
|
122 |
+
vae: AutoencoderKLHunyuanVideo model.
|
123 |
+
height, width: Target resolution for resizing frames.
|
124 |
+
vae_batch_size: Number of frames to process per batch.
|
125 |
+
device: Device for computation (e.g., "cuda").
|
126 |
+
|
127 |
+
Returns:
|
128 |
+
start_latent: Latent of the first frame (for compatibility with original code).
|
129 |
+
input_image_np: First frame as numpy array (for CLIP vision encoding).
|
130 |
+
history_latents: Latents of all frames (shape: [1, channels, frames, height//8, width//8]).
|
131 |
+
fps: Frames per second of the input video.
|
132 |
+
"""
|
133 |
+
# 20250506 pftq: Normalize video path for Windows compatibility
|
134 |
+
video_path = str(pathlib.Path(video_path).resolve())
|
135 |
+
print(f"Processing video: {video_path}")
|
136 |
+
|
137 |
+
# 20250506 pftq: Check CUDA availability and fallback to CPU if needed
|
138 |
+
if device == "cuda" and not torch.cuda.is_available():
|
139 |
+
print("CUDA is not available, falling back to CPU")
|
140 |
+
device = "cpu"
|
141 |
+
|
142 |
+
try:
|
143 |
+
# 20250506 pftq: Load video and get FPS
|
144 |
+
print("Initializing VideoReader...")
|
145 |
+
vr = decord.VideoReader(video_path)
|
146 |
+
fps = vr.get_avg_fps() # Get input video FPS
|
147 |
+
num_real_frames = len(vr)
|
148 |
+
print(f"Video loaded: {num_real_frames} frames, FPS: {fps}")
|
149 |
+
|
150 |
+
# Truncate to nearest latent size (multiple of 4)
|
151 |
+
latent_size_factor = 4
|
152 |
+
num_frames = (num_real_frames // latent_size_factor) * latent_size_factor
|
153 |
+
if num_frames != num_real_frames:
|
154 |
+
print(f"Truncating video from {num_real_frames} to {num_frames} frames for latent size compatibility")
|
155 |
+
num_real_frames = num_frames
|
156 |
+
|
157 |
+
# 20250506 pftq: Read frames
|
158 |
+
print("Reading video frames...")
|
159 |
+
frames = vr.get_batch(range(num_real_frames)).asnumpy() # Shape: (num_real_frames, height, width, channels)
|
160 |
+
print(f"Frames read: {frames.shape}")
|
161 |
+
|
162 |
+
# 20250506 pftq: Get native video resolution
|
163 |
+
native_height, native_width = frames.shape[1], frames.shape[2]
|
164 |
+
print(f"Native video resolution: {native_width}x{native_height}")
|
165 |
+
|
166 |
+
# 20250506 pftq: Use native resolution if height/width not specified, otherwise use provided values
|
167 |
+
target_height = native_height if height is None else height
|
168 |
+
target_width = native_width if width is None else width
|
169 |
+
|
170 |
+
# 20250506 pftq: Adjust to nearest bucket for model compatibility
|
171 |
+
if not no_resize:
|
172 |
+
target_height, target_width = find_nearest_bucket(target_height, target_width, resolution=resolution)
|
173 |
+
print(f"Adjusted resolution: {target_width}x{target_height}")
|
174 |
+
else:
|
175 |
+
print(f"Using native resolution without resizing: {target_width}x{target_height}")
|
176 |
+
|
177 |
+
# 20250506 pftq: Preprocess frames to match original image processing
|
178 |
+
processed_frames = []
|
179 |
+
for i, frame in enumerate(frames):
|
180 |
+
#print(f"Preprocessing frame {i+1}/{num_frames}")
|
181 |
+
frame_np = resize_and_center_crop(frame, target_width=target_width, target_height=target_height)
|
182 |
+
processed_frames.append(frame_np)
|
183 |
+
processed_frames = np.stack(processed_frames) # Shape: (num_real_frames, height, width, channels)
|
184 |
+
print(f"Frames preprocessed: {processed_frames.shape}")
|
185 |
+
|
186 |
+
# 20250506 pftq: Save first frame for CLIP vision encoding
|
187 |
+
input_image_np = processed_frames[0]
|
188 |
+
end_of_input_video_image_np = processed_frames[-1]
|
189 |
+
|
190 |
+
# 20250506 pftq: Convert to tensor and normalize to [-1, 1]
|
191 |
+
print("Converting frames to tensor...")
|
192 |
+
frames_pt = torch.from_numpy(processed_frames).float() / 127.5 - 1
|
193 |
+
frames_pt = frames_pt.permute(0, 3, 1, 2) # Shape: (num_real_frames, channels, height, width)
|
194 |
+
frames_pt = frames_pt.unsqueeze(0) # Shape: (1, num_real_frames, channels, height, width)
|
195 |
+
frames_pt = frames_pt.permute(0, 2, 1, 3, 4) # Shape: (1, channels, num_real_frames, height, width)
|
196 |
+
print(f"Tensor shape: {frames_pt.shape}")
|
197 |
+
|
198 |
+
# 20250507 pftq: Save pixel frames for use in worker
|
199 |
+
input_video_pixels = frames_pt.cpu()
|
200 |
+
|
201 |
+
# 20250506 pftq: Move to device
|
202 |
+
print(f"Moving tensor to device: {device}")
|
203 |
+
frames_pt = frames_pt.to(device)
|
204 |
+
print("Tensor moved to device")
|
205 |
+
|
206 |
+
# 20250506 pftq: Move VAE to device
|
207 |
+
print(f"Moving VAE to device: {device}")
|
208 |
+
vae.to(device)
|
209 |
+
print("VAE moved to device")
|
210 |
+
|
211 |
+
# 20250506 pftq: Encode frames in batches
|
212 |
+
print(f"Encoding input video frames in VAE batch size {vae_batch_size} (reduce if memory issues here or if forcing video resolution)")
|
213 |
+
latents = []
|
214 |
+
vae.eval()
|
215 |
+
with torch.no_grad():
|
216 |
+
for i in tqdm(range(0, frames_pt.shape[2], vae_batch_size), desc="Encoding video frames", mininterval=0.1):
|
217 |
+
#print(f"Encoding batch {i//vae_batch_size + 1}: frames {i} to {min(i + vae_batch_size, frames_pt.shape[2])}")
|
218 |
+
batch = frames_pt[:, :, i:i + vae_batch_size] # Shape: (1, channels, batch_size, height, width)
|
219 |
+
try:
|
220 |
+
# 20250506 pftq: Log GPU memory before encoding
|
221 |
+
if device == "cuda":
|
222 |
+
free_mem = torch.cuda.memory_allocated() / 1024**3
|
223 |
+
#print(f"GPU memory before encoding: {free_mem:.2f} GB")
|
224 |
+
batch_latent = vae_encode(batch, vae)
|
225 |
+
# 20250506 pftq: Synchronize CUDA to catch issues
|
226 |
+
if device == "cuda":
|
227 |
+
torch.cuda.synchronize()
|
228 |
+
#print(f"GPU memory after encoding: {torch.cuda.memory_allocated() / 1024**3:.2f} GB")
|
229 |
+
latents.append(batch_latent)
|
230 |
+
#print(f"Batch encoded, latent shape: {batch_latent.shape}")
|
231 |
+
except RuntimeError as e:
|
232 |
+
print(f"Error during VAE encoding: {str(e)}")
|
233 |
+
if device == "cuda" and "out of memory" in str(e).lower():
|
234 |
+
print("CUDA out of memory, try reducing vae_batch_size or using CPU")
|
235 |
+
raise
|
236 |
+
|
237 |
+
# 20250506 pftq: Concatenate latents
|
238 |
+
print("Concatenating latents...")
|
239 |
+
history_latents = torch.cat(latents, dim=2) # Shape: (1, channels, frames, height//8, width//8)
|
240 |
+
print(f"History latents shape: {history_latents.shape}")
|
241 |
+
|
242 |
+
# 20250506 pftq: Get first frame's latent
|
243 |
+
start_latent = history_latents[:, :, :1] # Shape: (1, channels, 1, height//8, width//8)
|
244 |
+
end_of_input_video_latent = history_latents[:, :, -1:] # Shape: (1, channels, 1, height//8, width//8)
|
245 |
+
print(f"Start latent shape: {start_latent.shape}")
|
246 |
+
|
247 |
+
# 20250506 pftq: Move VAE back to CPU to free GPU memory
|
248 |
+
if device == "cuda":
|
249 |
+
vae.to(cpu)
|
250 |
+
torch.cuda.empty_cache()
|
251 |
+
print("VAE moved back to CPU, CUDA cache cleared")
|
252 |
+
|
253 |
+
return start_latent, input_image_np, history_latents, fps, target_height, target_width, input_video_pixels, end_of_input_video_latent, end_of_input_video_image_np
|
254 |
+
|
255 |
+
except Exception as e:
|
256 |
+
print(f"Error in video_encode: {str(e)}")
|
257 |
+
raise
|
258 |
+
|
259 |
+
|
260 |
+
# 20250507 pftq: New function to encode a single image (end frame)
|
261 |
+
@torch.no_grad()
|
262 |
+
def image_encode(image_np, target_width, target_height, vae, image_encoder, feature_extractor, device="cuda"):
|
263 |
+
"""
|
264 |
+
Encode a single image into a latent and compute its CLIP vision embedding.
|
265 |
+
|
266 |
+
Args:
|
267 |
+
image_np: Input image as numpy array.
|
268 |
+
target_width, target_height: Exact resolution to resize the image to (matches start frame).
|
269 |
+
vae: AutoencoderKLHunyuanVideo model.
|
270 |
+
image_encoder: SiglipVisionModel for CLIP vision encoding.
|
271 |
+
feature_extractor: SiglipImageProcessor for preprocessing.
|
272 |
+
device: Device for computation (e.g., "cuda").
|
273 |
+
|
274 |
+
Returns:
|
275 |
+
latent: Latent representation of the image (shape: [1, channels, 1, height//8, width//8]).
|
276 |
+
clip_embedding: CLIP vision embedding of the image.
|
277 |
+
processed_image_np: Processed image as numpy array (after resizing).
|
278 |
+
"""
|
279 |
+
# 20250507 pftq: Process end frame with exact start frame dimensions
|
280 |
+
print("Processing end frame...")
|
281 |
+
try:
|
282 |
+
print(f"Using exact start frame resolution for end frame: {target_width}x{target_height}")
|
283 |
+
|
284 |
+
# Resize and preprocess image to match start frame
|
285 |
+
processed_image_np = resize_and_center_crop(image_np, target_width=target_width, target_height=target_height)
|
286 |
+
|
287 |
+
# Convert to tensor and normalize
|
288 |
+
image_pt = torch.from_numpy(processed_image_np).float() / 127.5 - 1
|
289 |
+
image_pt = image_pt.permute(2, 0, 1).unsqueeze(0).unsqueeze(2) # Shape: [1, channels, 1, height, width]
|
290 |
+
image_pt = image_pt.to(device)
|
291 |
+
|
292 |
+
# Move VAE to device
|
293 |
+
vae.to(device)
|
294 |
+
|
295 |
+
# Encode to latent
|
296 |
+
latent = vae_encode(image_pt, vae)
|
297 |
+
print(f"image_encode vae output shape: {latent.shape}")
|
298 |
+
|
299 |
+
# Move image encoder to device
|
300 |
+
image_encoder.to(device)
|
301 |
+
|
302 |
+
# Compute CLIP vision embedding
|
303 |
+
clip_embedding = hf_clip_vision_encode(processed_image_np, feature_extractor, image_encoder).last_hidden_state
|
304 |
+
|
305 |
+
# Move models back to CPU and clear cache
|
306 |
+
if device == "cuda":
|
307 |
+
vae.to(cpu)
|
308 |
+
image_encoder.to(cpu)
|
309 |
+
torch.cuda.empty_cache()
|
310 |
+
print("VAE and image encoder moved back to CPU, CUDA cache cleared")
|
311 |
+
|
312 |
+
print(f"End latent shape: {latent.shape}")
|
313 |
+
return latent, clip_embedding, processed_image_np
|
314 |
+
|
315 |
+
except Exception as e:
|
316 |
+
print(f"Error in image_encode: {str(e)}")
|
317 |
+
raise
|
318 |
+
|
319 |
+
# 20250508 pftq: for saving prompt to mp4 metadata comments
|
320 |
+
def set_mp4_comments_imageio_ffmpeg(input_file, comments):
|
321 |
+
try:
|
322 |
+
# Get the path to the bundled FFmpeg binary from imageio-ffmpeg
|
323 |
+
ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
|
324 |
+
|
325 |
+
# Check if input file exists
|
326 |
+
if not os.path.exists(input_file):
|
327 |
+
print(f"Error: Input file {input_file} does not exist")
|
328 |
+
return False
|
329 |
+
|
330 |
+
# Create a temporary file path
|
331 |
+
temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
|
332 |
+
|
333 |
+
# FFmpeg command using the bundled binary
|
334 |
+
command = [
|
335 |
+
ffmpeg_path, # Use imageio-ffmpeg's FFmpeg
|
336 |
+
'-i', input_file, # input file
|
337 |
+
'-metadata', f'comment={comments}', # set comment metadata
|
338 |
+
'-c:v', 'copy', # copy video stream without re-encoding
|
339 |
+
'-c:a', 'copy', # copy audio stream without re-encoding
|
340 |
+
'-y', # overwrite output file if it exists
|
341 |
+
temp_file # temporary output file
|
342 |
+
]
|
343 |
+
|
344 |
+
# Run the FFmpeg command
|
345 |
+
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
346 |
+
|
347 |
+
if result.returncode == 0:
|
348 |
+
# Replace the original file with the modified one
|
349 |
+
shutil.move(temp_file, input_file)
|
350 |
+
print(f"Successfully added comments to {input_file}")
|
351 |
+
return True
|
352 |
+
else:
|
353 |
+
# Clean up temp file if FFmpeg fails
|
354 |
+
if os.path.exists(temp_file):
|
355 |
+
os.remove(temp_file)
|
356 |
+
print(f"Error: FFmpeg failed with message:\n{result.stderr}")
|
357 |
+
return False
|
358 |
+
|
359 |
+
except Exception as e:
|
360 |
+
# Clean up temp file in case of other errors
|
361 |
+
if 'temp_file' in locals() and os.path.exists(temp_file):
|
362 |
+
os.remove(temp_file)
|
363 |
+
print(f"Error saving prompt to video metadata, ffmpeg may be required: "+str(e))
|
364 |
+
return False
|
365 |
+
|
366 |
+
# 20250506 pftq: Modified worker to accept video input, and clean frame count
|
367 |
+
@torch.no_grad()
|
368 |
+
def worker(input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
|
369 |
+
|
370 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
|
371 |
+
|
372 |
+
try:
|
373 |
+
# Clean GPU
|
374 |
+
if not high_vram:
|
375 |
+
unload_complete_models(
|
376 |
+
text_encoder, text_encoder_2, image_encoder, vae, transformer
|
377 |
+
)
|
378 |
+
|
379 |
+
# Text encoding
|
380 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
|
381 |
+
|
382 |
+
if not high_vram:
|
383 |
+
fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
|
384 |
+
load_model_as_complete(text_encoder_2, target_device=gpu)
|
385 |
+
|
386 |
+
llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
387 |
+
|
388 |
+
if cfg == 1:
|
389 |
+
llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
|
390 |
+
else:
|
391 |
+
llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
392 |
+
|
393 |
+
llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
|
394 |
+
llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
|
395 |
+
|
396 |
+
# 20250506 pftq: Processing input video instead of image
|
397 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Video processing ...'))))
|
398 |
+
|
399 |
+
# 20250506 pftq: Encode video
|
400 |
+
start_latent, input_image_np, video_latents, fps, height, width, input_video_pixels, end_of_input_video_latent, end_of_input_video_image_np = video_encode(input_video, resolution, no_resize, vae, vae_batch_size=vae_batch, device=gpu)
|
401 |
+
|
402 |
+
#Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
|
403 |
+
|
404 |
+
# CLIP Vision
|
405 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
|
406 |
+
|
407 |
+
if not high_vram:
|
408 |
+
load_model_as_complete(image_encoder, target_device=gpu)
|
409 |
+
|
410 |
+
image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
|
411 |
+
image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
|
412 |
+
start_embedding = image_encoder_last_hidden_state
|
413 |
+
|
414 |
+
end_of_input_video_output = hf_clip_vision_encode(end_of_input_video_image_np, feature_extractor, image_encoder)
|
415 |
+
end_of_input_video_last_hidden_state = end_of_input_video_output.last_hidden_state
|
416 |
+
end_of_input_video_embedding = end_of_input_video_last_hidden_state
|
417 |
+
|
418 |
+
# 20250507 pftq: Process end frame if provided
|
419 |
+
end_latent = None
|
420 |
+
end_clip_embedding = None
|
421 |
+
if end_frame is not None:
|
422 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'End frame encoding ...'))))
|
423 |
+
end_latent, end_clip_embedding, _ = image_encode(
|
424 |
+
end_frame, target_width=width, target_height=height, vae=vae,
|
425 |
+
image_encoder=image_encoder, feature_extractor=feature_extractor, device=gpu
|
426 |
+
)
|
427 |
+
|
428 |
+
# Dtype
|
429 |
+
llama_vec = llama_vec.to(transformer.dtype)
|
430 |
+
llama_vec_n = llama_vec_n.to(transformer.dtype)
|
431 |
+
clip_l_pooler = clip_l_pooler.to(transformer.dtype)
|
432 |
+
clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
|
433 |
+
image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
|
434 |
+
end_of_input_video_embedding = end_of_input_video_embedding.to(transformer.dtype)
|
435 |
+
|
436 |
+
# 20250509 pftq: Restored original placement of total_latent_sections after video_encode
|
437 |
+
total_latent_sections = (total_second_length * fps) / (latent_window_size * 4)
|
438 |
+
total_latent_sections = int(max(round(total_latent_sections), 1))
|
439 |
+
|
440 |
+
for idx in range(batch):
|
441 |
+
if idx > 0:
|
442 |
+
seed = seed + 1
|
443 |
+
|
444 |
+
if batch > 1:
|
445 |
+
print(f"Beginning video {idx+1} of {batch} with seed {seed} ")
|
446 |
+
|
447 |
+
job_id = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+f"_framepack-videoinput-endframe_{width}-{total_second_length}sec_seed-{seed}_steps-{steps}_distilled-{gs}_cfg-{cfg}"
|
448 |
+
|
449 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
|
450 |
+
|
451 |
+
rnd = torch.Generator("cpu").manual_seed(seed)
|
452 |
+
|
453 |
+
history_latents = video_latents.cpu()
|
454 |
+
history_pixels = None
|
455 |
+
total_generated_latent_frames = 0
|
456 |
+
previous_video = None
|
457 |
+
|
458 |
+
|
459 |
+
# 20250509 Generate backwards with end frame for better end frame anchoring
|
460 |
+
latent_paddings = list(reversed(range(total_latent_sections)))
|
461 |
+
if total_latent_sections > 4:
|
462 |
+
latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0]
|
463 |
+
|
464 |
+
for section_index, latent_padding in enumerate(latent_paddings):
|
465 |
+
is_start_of_video = latent_padding == 0
|
466 |
+
is_end_of_video = latent_padding == latent_paddings[0]
|
467 |
+
latent_padding_size = latent_padding * latent_window_size
|
468 |
+
|
469 |
+
if stream.input_queue.top() == 'end':
|
470 |
+
stream.output_queue.push(('end', None))
|
471 |
+
return
|
472 |
+
|
473 |
+
if not high_vram:
|
474 |
+
unload_complete_models()
|
475 |
+
move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
|
476 |
+
|
477 |
+
if use_teacache:
|
478 |
+
transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
|
479 |
+
else:
|
480 |
+
transformer.initialize_teacache(enable_teacache=False)
|
481 |
+
|
482 |
+
def callback(d):
|
483 |
+
try:
|
484 |
+
preview = d['denoised']
|
485 |
+
preview = vae_decode_fake(preview)
|
486 |
+
preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
|
487 |
+
preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
|
488 |
+
if stream.input_queue.top() == 'end':
|
489 |
+
stream.output_queue.push(('end', None))
|
490 |
+
raise KeyboardInterrupt('User ends the task.')
|
491 |
+
current_step = d['i'] + 1
|
492 |
+
percentage = int(100.0 * current_step / steps)
|
493 |
+
hint = f'Sampling {current_step}/{steps}'
|
494 |
+
desc = f'Total frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / fps) :.2f} seconds (FPS-{fps}), Seed: {seed}, Video {idx+1} of {batch}. Generating part {total_latent_sections - section_index} of {total_latent_sections} backward...'
|
495 |
+
stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
|
496 |
+
except ConnectionResetError as e:
|
497 |
+
print(f"Suppressed ConnectionResetError in callback: {e}")
|
498 |
+
return
|
499 |
+
|
500 |
+
# 20250509 pftq: Dynamic frame allocation like original num_clean_frames, fix split error
|
501 |
+
available_frames = video_latents.shape[2] if is_start_of_video else history_latents.shape[2]
|
502 |
+
effective_clean_frames = max(0, num_clean_frames - 1) if num_clean_frames > 1 else 1
|
503 |
+
if is_start_of_video:
|
504 |
+
effective_clean_frames = 1 # avoid jumpcuts from input video
|
505 |
+
clean_latent_pre_frames = effective_clean_frames
|
506 |
+
num_2x_frames = min(2, max(1, available_frames - clean_latent_pre_frames - 1)) if available_frames > clean_latent_pre_frames + 1 else 1
|
507 |
+
num_4x_frames = min(16, max(1, available_frames - clean_latent_pre_frames - num_2x_frames)) if available_frames > clean_latent_pre_frames + num_2x_frames else 1
|
508 |
+
total_context_frames = num_2x_frames + num_4x_frames
|
509 |
+
total_context_frames = min(total_context_frames, available_frames - clean_latent_pre_frames)
|
510 |
+
|
511 |
+
# 20250511 pftq: Dynamically adjust post_frames based on clean_latents_post
|
512 |
+
post_frames = 1 if is_end_of_video and end_latent is not None else effective_clean_frames # 20250511 pftq: Single frame for end_latent, otherwise padding causes still image
|
513 |
+
indices = torch.arange(0, clean_latent_pre_frames + latent_padding_size + latent_window_size + post_frames + num_2x_frames + num_4x_frames).unsqueeze(0)
|
514 |
+
clean_latent_indices_pre, blank_indices, latent_indices, clean_latent_indices_post, clean_latent_2x_indices, clean_latent_4x_indices = indices.split(
|
515 |
+
[clean_latent_pre_frames, latent_padding_size, latent_window_size, post_frames, num_2x_frames, num_4x_frames], dim=1
|
516 |
+
)
|
517 |
+
clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1)
|
518 |
+
|
519 |
+
# 20250509 pftq: Split context frames dynamically for 2x and 4x only
|
520 |
+
context_frames = history_latents[:, :, -(total_context_frames + clean_latent_pre_frames):-clean_latent_pre_frames, :, :] if total_context_frames > 0 else history_latents[:, :, :1, :, :]
|
521 |
+
split_sizes = [num_4x_frames, num_2x_frames]
|
522 |
+
split_sizes = [s for s in split_sizes if s > 0]
|
523 |
+
if split_sizes and context_frames.shape[2] >= sum(split_sizes):
|
524 |
+
splits = context_frames.split(split_sizes, dim=2)
|
525 |
+
split_idx = 0
|
526 |
+
clean_latents_4x = splits[split_idx] if num_4x_frames > 0 else history_latents[:, :, :1, :, :]
|
527 |
+
split_idx += 1 if num_4x_frames > 0 else 0
|
528 |
+
clean_latents_2x = splits[split_idx] if num_2x_frames > 0 and split_idx < len(splits) else history_latents[:, :, :1, :, :]
|
529 |
+
else:
|
530 |
+
clean_latents_4x = clean_latents_2x = history_latents[:, :, :1, :, :]
|
531 |
+
|
532 |
+
clean_latents_pre = video_latents[:, :, -min(effective_clean_frames, video_latents.shape[2]):].to(history_latents) # smoother motion but jumpcuts if end frame is too different, must change clean_latent_pre_frames to effective_clean_frames also
|
533 |
+
clean_latents_post = history_latents[:, :, :min(effective_clean_frames, history_latents.shape[2]), :, :] # smoother motion, must change post_frames to effective_clean_frames also
|
534 |
+
|
535 |
+
if is_end_of_video:
|
536 |
+
clean_latents_post = torch.zeros_like(end_of_input_video_latent).to(history_latents)
|
537 |
+
|
538 |
+
# 20250509 pftq: handle end frame if available
|
539 |
+
if end_latent is not None:
|
540 |
+
#current_end_frame_weight = end_frame_weight * (latent_padding / latent_paddings[0])
|
541 |
+
#current_end_frame_weight = current_end_frame_weight * 0.5 + 0.5
|
542 |
+
current_end_frame_weight = end_frame_weight # changing this over time introduces discontinuity
|
543 |
+
# 20250511 pftq: Removed end frame weight adjustment as it has no effect
|
544 |
+
image_encoder_last_hidden_state = (1 - current_end_frame_weight) * end_of_input_video_embedding + end_clip_embedding * current_end_frame_weight
|
545 |
+
image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
|
546 |
+
|
547 |
+
# 20250511 pftq: Use end_latent only
|
548 |
+
if is_end_of_video:
|
549 |
+
clean_latents_post = end_latent.to(history_latents)[:, :, :1, :, :] # Ensure single frame
|
550 |
+
|
551 |
+
# 20250511 pftq: Pad clean_latents_pre to match clean_latent_pre_frames if needed
|
552 |
+
if clean_latents_pre.shape[2] < clean_latent_pre_frames:
|
553 |
+
clean_latents_pre = clean_latents_pre.repeat(1, 1, clean_latent_pre_frames // clean_latents_pre.shape[2], 1, 1)
|
554 |
+
# 20250511 pftq: Pad clean_latents_post to match post_frames if needed
|
555 |
+
if clean_latents_post.shape[2] < post_frames:
|
556 |
+
clean_latents_post = clean_latents_post.repeat(1, 1, post_frames // clean_latents_post.shape[2], 1, 1)
|
557 |
+
|
558 |
+
clean_latents = torch.cat([clean_latents_pre, clean_latents_post], dim=2)
|
559 |
+
|
560 |
+
max_frames = min(latent_window_size * 4 - 3, history_latents.shape[2] * 4)
|
561 |
+
print(f"Generating video {idx+1} of {batch} with seed {seed}, part {total_latent_sections - section_index} of {total_latent_sections} backward")
|
562 |
+
generated_latents = sample_hunyuan(
|
563 |
+
transformer=transformer,
|
564 |
+
sampler='unipc',
|
565 |
+
width=width,
|
566 |
+
height=height,
|
567 |
+
frames=max_frames,
|
568 |
+
real_guidance_scale=cfg,
|
569 |
+
distilled_guidance_scale=gs,
|
570 |
+
guidance_rescale=rs,
|
571 |
+
num_inference_steps=steps,
|
572 |
+
generator=rnd,
|
573 |
+
prompt_embeds=llama_vec,
|
574 |
+
prompt_embeds_mask=llama_attention_mask,
|
575 |
+
prompt_poolers=clip_l_pooler,
|
576 |
+
negative_prompt_embeds=llama_vec_n,
|
577 |
+
negative_prompt_embeds_mask=llama_attention_mask_n,
|
578 |
+
negative_prompt_poolers=clip_l_pooler_n,
|
579 |
+
device=gpu,
|
580 |
+
dtype=torch.bfloat16,
|
581 |
+
image_embeddings=image_encoder_last_hidden_state,
|
582 |
+
latent_indices=latent_indices,
|
583 |
+
clean_latents=clean_latents,
|
584 |
+
clean_latent_indices=clean_latent_indices,
|
585 |
+
clean_latents_2x=clean_latents_2x,
|
586 |
+
clean_latent_2x_indices=clean_latent_2x_indices,
|
587 |
+
clean_latents_4x=clean_latents_4x,
|
588 |
+
clean_latent_4x_indices=clean_latent_4x_indices,
|
589 |
+
callback=callback,
|
590 |
+
)
|
591 |
+
|
592 |
+
if is_start_of_video:
|
593 |
+
generated_latents = torch.cat([video_latents[:, :, -1:].to(generated_latents), generated_latents], dim=2)
|
594 |
+
|
595 |
+
total_generated_latent_frames += int(generated_latents.shape[2])
|
596 |
+
history_latents = torch.cat([generated_latents.to(history_latents), history_latents], dim=2)
|
597 |
+
|
598 |
+
if not high_vram:
|
599 |
+
offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
|
600 |
+
load_model_as_complete(vae, target_device=gpu)
|
601 |
+
|
602 |
+
real_history_latents = history_latents[:, :, :total_generated_latent_frames, :, :]
|
603 |
+
if history_pixels is None:
|
604 |
+
history_pixels = vae_decode(real_history_latents, vae).cpu()
|
605 |
+
else:
|
606 |
+
section_latent_frames = (latent_window_size * 2 + 1) if is_start_of_video else (latent_window_size * 2)
|
607 |
+
overlapped_frames = latent_window_size * 4 - 3
|
608 |
+
current_pixels = vae_decode(real_history_latents[:, :, :section_latent_frames], vae).cpu()
|
609 |
+
history_pixels = soft_append_bcthw(current_pixels, history_pixels, overlapped_frames)
|
610 |
+
|
611 |
+
if not high_vram:
|
612 |
+
unload_complete_models()
|
613 |
+
|
614 |
+
output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
|
615 |
+
save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
|
616 |
+
print(f"Latest video saved: {output_filename}")
|
617 |
+
set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompt} | Negative Prompt: {n_prompt}")
|
618 |
+
print(f"Prompt saved to mp4 metadata comments: {output_filename}")
|
619 |
+
|
620 |
+
if previous_video is not None and os.path.exists(previous_video):
|
621 |
+
try:
|
622 |
+
os.remove(previous_video)
|
623 |
+
print(f"Previous partial video deleted: {previous_video}")
|
624 |
+
except Exception as e:
|
625 |
+
print(f"Error deleting previous partial video {previous_video}: {e}")
|
626 |
+
previous_video = output_filename
|
627 |
+
|
628 |
+
print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
|
629 |
+
stream.output_queue.push(('file', output_filename))
|
630 |
+
|
631 |
+
if is_start_of_video:
|
632 |
+
break
|
633 |
+
|
634 |
+
history_pixels = torch.cat([input_video_pixels, history_pixels], dim=2)
|
635 |
+
#overlapped_frames = latent_window_size * 4 - 3
|
636 |
+
#history_pixels = soft_append_bcthw(input_video_pixels, history_pixels, overlapped_frames)
|
637 |
+
|
638 |
+
output_filename = os.path.join(outputs_folder, f'{job_id}_final.mp4')
|
639 |
+
save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
|
640 |
+
print(f"Final video with input blend saved: {output_filename}")
|
641 |
+
set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompt} | Negative Prompt: {n_prompt}")
|
642 |
+
print(f"Prompt saved to mp4 metadata comments: {output_filename}")
|
643 |
+
stream.output_queue.push(('file', output_filename))
|
644 |
+
|
645 |
+
if previous_video is not None and os.path.exists(previous_video):
|
646 |
+
try:
|
647 |
+
os.remove(previous_video)
|
648 |
+
print(f"Previous partial video deleted: {previous_video}")
|
649 |
+
except Exception as e:
|
650 |
+
print(f"Error deleting previous partial video {previous_video}: {e}")
|
651 |
+
previous_video = output_filename
|
652 |
+
|
653 |
+
print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
|
654 |
+
|
655 |
+
stream.output_queue.push(('file', output_filename))
|
656 |
+
|
657 |
+
except:
|
658 |
+
traceback.print_exc()
|
659 |
+
|
660 |
+
if not high_vram:
|
661 |
+
unload_complete_models(
|
662 |
+
text_encoder, text_encoder_2, image_encoder, vae, transformer
|
663 |
+
)
|
664 |
+
|
665 |
+
stream.output_queue.push(('end', None))
|
666 |
+
return
|
667 |
+
|
668 |
+
# 20250506 pftq: Modified process to pass clean frame count, etc
|
669 |
+
def get_duration(input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
|
670 |
+
global total_second_length_debug_value
|
671 |
+
if total_second_length_debug_value is not None:
|
672 |
+
return total_second_length_debug_value * 60
|
673 |
+
return total_second_length * 60
|
674 |
+
|
675 |
+
@spaces.GPU(duration=get_duration)
|
676 |
+
def process(input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
|
677 |
+
global stream, high_vram, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
|
678 |
+
|
679 |
+
if input_video_debug_value is not None:
|
680 |
+
input_video = input_video_debug_value
|
681 |
+
input_video_debug_value = None
|
682 |
+
|
683 |
+
if prompt_debug_value is not None:
|
684 |
+
prompt = prompt_debug_value
|
685 |
+
prompt_debug_value = None
|
686 |
+
|
687 |
+
if total_second_length_debug_value is not None:
|
688 |
+
total_second_length = total_second_length_debug_value
|
689 |
+
total_second_length_debug_value = None
|
690 |
+
|
691 |
+
# 20250506 pftq: Updated assertion for video input
|
692 |
+
assert input_video is not None, 'No input video!'
|
693 |
+
|
694 |
+
yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
|
695 |
+
|
696 |
+
# 20250507 pftq: Even the H100 needs offloading if the video dimensions are 720p or higher
|
697 |
+
if high_vram and (no_resize or resolution>640):
|
698 |
+
print("Disabling high vram mode due to no resize and/or potentially higher resolution...")
|
699 |
+
high_vram = False
|
700 |
+
vae.enable_slicing()
|
701 |
+
vae.enable_tiling()
|
702 |
+
DynamicSwapInstaller.install_model(transformer, device=gpu)
|
703 |
+
DynamicSwapInstaller.install_model(text_encoder, device=gpu)
|
704 |
+
|
705 |
+
# 20250508 pftq: automatically set distilled cfg to 1 if cfg is used
|
706 |
+
if cfg > 1:
|
707 |
+
gs = 1
|
708 |
+
|
709 |
+
stream = AsyncStream()
|
710 |
+
|
711 |
+
# 20250506 pftq: Pass num_clean_frames, vae_batch, etc
|
712 |
+
async_run(worker, input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch)
|
713 |
+
|
714 |
+
output_filename = None
|
715 |
+
|
716 |
+
while True:
|
717 |
+
flag, data = stream.output_queue.next()
|
718 |
+
|
719 |
+
if flag == 'file':
|
720 |
+
output_filename = data
|
721 |
+
yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
|
722 |
+
|
723 |
+
if flag == 'progress':
|
724 |
+
preview, desc, html = data
|
725 |
+
#yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
|
726 |
+
yield output_filename, gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True) # 20250506 pftq: Keep refreshing the video in case it got hidden when the tab was in the background
|
727 |
+
|
728 |
+
if flag == 'end':
|
729 |
+
yield output_filename, gr.update(visible=False), desc+' Video complete.', '', gr.update(interactive=True), gr.update(interactive=False)
|
730 |
+
break
|
731 |
+
|
732 |
+
def end_process():
|
733 |
+
stream.input_queue.push('end')
|
734 |
+
|
735 |
+
quick_prompts = [
|
736 |
+
'The girl dances gracefully, with clear movements, full of charm.',
|
737 |
+
'A character doing some simple body movements.',
|
738 |
+
]
|
739 |
+
quick_prompts = [[x] for x in quick_prompts]
|
740 |
+
|
741 |
+
css = make_progress_bar_css()
|
742 |
+
block = gr.Blocks(css=css).queue(
|
743 |
+
max_size=10 # 20250507 pftq: Limit queue size
|
744 |
+
)
|
745 |
+
with block:
|
746 |
+
# 20250506 pftq: Updated title to reflect video input functionality
|
747 |
+
gr.Markdown('# Framepack with Video Input (Video Extension) + End Frame')
|
748 |
+
with gr.Row():
|
749 |
+
with gr.Column():
|
750 |
+
|
751 |
+
# 20250506 pftq: Changed to Video input from Image
|
752 |
+
with gr.Row():
|
753 |
+
input_video = gr.Video(sources='upload', label="Input Video", height=320)
|
754 |
+
with gr.Column():
|
755 |
+
# 20250507 pftq: Added end_frame + weight
|
756 |
+
end_frame = gr.Image(sources='upload', type="numpy", label="End Frame (Optional) - Reduce context frames if very different from input video or if it is jumpcutting/slowing to still image.", height=320)
|
757 |
+
end_frame_weight = gr.Slider(label="End Frame Weight", minimum=0.0, maximum=1.0, value=1.0, step=0.01, info='Reduce to treat more as a reference image.', visible=False) # no effect
|
758 |
+
|
759 |
+
prompt = gr.Textbox(label="Prompt", value='')
|
760 |
+
#example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Quick List', samples_per_page=1000, components=[prompt])
|
761 |
+
#example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
|
762 |
+
|
763 |
+
with gr.Row():
|
764 |
+
start_button = gr.Button(value="Start Generation", variant="primary")
|
765 |
+
end_button = gr.Button(value="End Generation", variant="stop", interactive=False)
|
766 |
+
|
767 |
+
with gr.Group():
|
768 |
+
with gr.Row():
|
769 |
+
use_teacache = gr.Checkbox(label='Use TeaCache', value=False, info='Faster speed, but often makes hands and fingers slightly worse.')
|
770 |
+
no_resize = gr.Checkbox(label='Force Original Video Resolution (No Resizing)', value=False, info='Might run out of VRAM (720p requires > 24GB VRAM).')
|
771 |
+
|
772 |
+
seed = gr.Number(label="Seed", value=31337, precision=0)
|
773 |
+
|
774 |
+
batch = gr.Slider(label="Batch Size (Number of Videos)", minimum=1, maximum=1000, value=1, step=1, info='Generate multiple videos each with a different seed.')
|
775 |
+
|
776 |
+
resolution = gr.Number(label="Resolution (max width or height)", value=640, precision=0, visible=False)
|
777 |
+
|
778 |
+
total_second_length = gr.Slider(label="Additional Video Length to Generate (Seconds)", minimum=1, maximum=120, value=5, step=0.1)
|
779 |
+
|
780 |
+
# 20250506 pftq: Reduced default distilled guidance scale to improve adherence to input video
|
781 |
+
gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=3.0, step=0.01, info='Prompt adherence at the cost of less details from the input video, but to a lesser extent than Context Frames.')
|
782 |
+
cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=True, info='Use instead of Distilled for more detail/control + Negative Prompt (make sure Distilled=1). Doubles render time.') # Should not change
|
783 |
+
rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
|
784 |
+
|
785 |
+
n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=True, info='Requires using normal CFG (undistilled) instead of Distilled (set Distilled=1 and CFG > 1).')
|
786 |
+
|
787 |
+
steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Expensive. Increase for more quality, especially if using high non-distilled CFG.')
|
788 |
+
|
789 |
+
# 20250506 pftq: Renamed slider to Number of Context Frames and updated description
|
790 |
+
num_clean_frames = gr.Slider(label="Number of Context Frames (Adherence to Video)", minimum=2, maximum=10, value=5, step=1, info="Expensive. Retain more video details. Reduce if memory issues or motion too restricted (jumpcut, ignoring prompt, still).")
|
791 |
+
|
792 |
+
default_vae = 32
|
793 |
+
if high_vram:
|
794 |
+
default_vae = 128
|
795 |
+
elif free_mem_gb>=20:
|
796 |
+
default_vae = 64
|
797 |
+
|
798 |
+
vae_batch = gr.Slider(label="VAE Batch Size for Input Video", minimum=4, maximum=256, value=default_vae, step=4, info="Expensive. Increase for better quality frames during fast motion. Reduce if running out of memory")
|
799 |
+
|
800 |
+
latent_window_size = gr.Slider(label="Latent Window Size", minimum=9, maximum=49, value=9, step=1, visible=True, info='Expensive. Generate more frames at a time (larger chunks). Less degradation but higher VRAM cost.')
|
801 |
+
|
802 |
+
gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
|
803 |
+
|
804 |
+
mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
|
805 |
+
|
806 |
+
with gr.Row():
|
807 |
+
input_video_debug = gr.Video(sources='upload', label="Input Video Debug", height=320)
|
808 |
+
prompt_debug = gr.Textbox(label="Prompt Debug", value='')
|
809 |
+
total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (Seconds) Debug", minimum=1, maximum=120, value=5, step=0.1)
|
810 |
+
|
811 |
+
with gr.Column():
|
812 |
+
preview_image = gr.Image(label="Next Latents", height=200, visible=False)
|
813 |
+
result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
|
814 |
+
progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
|
815 |
+
progress_bar = gr.HTML('', elem_classes='no-generating-animation')
|
816 |
+
|
817 |
+
with gr.Row(visible=False):
|
818 |
+
gr.Examples(
|
819 |
+
examples = [
|
820 |
+
[
|
821 |
+
"./img_examples/Example1.mp4", # input_video
|
822 |
+
None, # end_frame
|
823 |
+
0.0, # end_frame_weight
|
824 |
+
"View of the sea as far as the eye can see, from the seaside, a piece of land is barely visible on the horizon at the middle, the sky is radiant, reflections of the sun in the water, photorealistic, realistic, intricate details, 8k, insanely detailed",
|
825 |
+
"", # n_prompt
|
826 |
+
42, # seed
|
827 |
+
1, # batch
|
828 |
+
640, # resolution
|
829 |
+
1, # total_second_length
|
830 |
+
9, # latent_window_size
|
831 |
+
10, # steps
|
832 |
+
1.0, # cfg
|
833 |
+
3.0, # gs
|
834 |
+
0.0, # rs
|
835 |
+
6, # gpu_memory_preservation
|
836 |
+
False, # use_teacache
|
837 |
+
False, # no_resize
|
838 |
+
16, # mp4_crf
|
839 |
+
5, # num_clean_frames
|
840 |
+
default_vae
|
841 |
+
],
|
842 |
+
],
|
843 |
+
run_on_click = True,
|
844 |
+
fn = process,
|
845 |
+
inputs = [input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch],
|
846 |
+
outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
|
847 |
+
cache_examples = True,
|
848 |
+
)
|
849 |
+
|
850 |
+
gr.HTML("""
|
851 |
+
<div style="text-align:center; margin-top:20px;">Share your results and find ideas at the <a href="https://x.com/search?q=framepack&f=live" target="_blank">FramePack Twitter (X) thread</a></div>
|
852 |
+
""")
|
853 |
+
|
854 |
+
# 20250506 pftq: Updated inputs to include num_clean_frames
|
855 |
+
ips = [input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
|
856 |
+
start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
|
857 |
+
end_button.click(fn=end_process)
|
858 |
+
|
859 |
+
|
860 |
+
def handle_input_video_debug_upload(input):
|
861 |
+
global input_video_debug_value
|
862 |
+
input_video_debug_value = input
|
863 |
+
return []
|
864 |
+
|
865 |
+
def handle_prompt_debug_change(input):
|
866 |
+
global prompt_debug_value
|
867 |
+
prompt_debug_value = input
|
868 |
+
return []
|
869 |
+
|
870 |
+
def handle_total_second_length_debug_change(input):
|
871 |
+
global total_second_length_debug_value
|
872 |
+
total_second_length_debug_value = input
|
873 |
+
return []
|
874 |
+
|
875 |
+
input_video_debug.upload(
|
876 |
+
fn=handle_input_video_debug_upload,
|
877 |
+
inputs=[input_video_debug],
|
878 |
+
outputs=[]
|
879 |
+
)
|
880 |
+
|
881 |
+
prompt_debug.change(
|
882 |
+
fn=handle_prompt_debug_change,
|
883 |
+
inputs=[prompt_debug],
|
884 |
+
outputs=[]
|
885 |
+
)
|
886 |
+
|
887 |
+
total_second_length_debug.change(
|
888 |
+
fn=handle_total_second_length_debug_change,
|
889 |
+
inputs=[total_second_length_debug],
|
890 |
+
outputs=[]
|
891 |
+
)
|
892 |
+
|
893 |
block.launch(share=True)
|
app_v2v.py
CHANGED
@@ -1,746 +1,746 @@
|
|
1 |
-
from diffusers_helper.hf_login import login
|
2 |
-
|
3 |
-
import os
|
4 |
-
|
5 |
-
os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
|
6 |
-
import spaces
|
7 |
-
import gradio as gr
|
8 |
-
import torch
|
9 |
-
import traceback
|
10 |
-
import einops
|
11 |
-
import safetensors.torch as sf
|
12 |
-
import numpy as np
|
13 |
-
import argparse
|
14 |
-
import math
|
15 |
-
import decord
|
16 |
-
from tqdm import tqdm
|
17 |
-
import pathlib
|
18 |
-
from datetime import datetime
|
19 |
-
import imageio_ffmpeg
|
20 |
-
import tempfile
|
21 |
-
import shutil
|
22 |
-
import subprocess
|
23 |
-
|
24 |
-
from PIL import Image
|
25 |
-
from diffusers import AutoencoderKLHunyuanVideo
|
26 |
-
from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
|
27 |
-
from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
|
28 |
-
from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
|
29 |
-
from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
|
30 |
-
from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
|
31 |
-
from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
|
32 |
-
from diffusers_helper.thread_utils import AsyncStream, async_run
|
33 |
-
from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
|
34 |
-
from transformers import SiglipImageProcessor, SiglipVisionModel
|
35 |
-
from diffusers_helper.clip_vision import hf_clip_vision_encode
|
36 |
-
from diffusers_helper.bucket_tools import find_nearest_bucket
|
37 |
-
from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, HunyuanVideoTransformer3DModel, HunyuanVideoPipeline
|
38 |
-
|
39 |
-
parser = argparse.ArgumentParser()
|
40 |
-
parser.add_argument('--share', action='store_true')
|
41 |
-
parser.add_argument("--server", type=str, default='0.0.0.0')
|
42 |
-
parser.add_argument("--port", type=int, required=False)
|
43 |
-
parser.add_argument("--inbrowser", action='store_true')
|
44 |
-
args = parser.parse_args()
|
45 |
-
|
46 |
-
print(args)
|
47 |
-
|
48 |
-
free_mem_gb = get_cuda_free_memory_gb(gpu)
|
49 |
-
high_vram = free_mem_gb > 80
|
50 |
-
|
51 |
-
print(f'Free VRAM {free_mem_gb} GB')
|
52 |
-
print(f'High-VRAM Mode: {high_vram}')
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
text_encoder = LlamaModel.from_pretrained("
|
57 |
-
text_encoder_2 = CLIPTextModel.from_pretrained("
|
58 |
-
tokenizer = LlamaTokenizerFast.from_pretrained("
|
59 |
-
tokenizer_2 = CLIPTokenizer.from_pretrained("
|
60 |
-
vae = AutoencoderKLHunyuanVideo.from_pretrained("
|
61 |
-
|
62 |
-
feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
|
63 |
-
image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
|
64 |
-
|
65 |
-
quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True)
|
66 |
-
transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained(
|
67 |
-
"lllyasviel/FramePack_F1_I2V_HY_20250503",
|
68 |
-
quantization_config=quant_config,
|
69 |
-
torch_dtype=torch.bfloat16,
|
70 |
-
).cpu()
|
71 |
-
|
72 |
-
# transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu()
|
73 |
-
|
74 |
-
vae.eval()
|
75 |
-
text_encoder.eval()
|
76 |
-
text_encoder_2.eval()
|
77 |
-
image_encoder.eval()
|
78 |
-
transformer.eval()
|
79 |
-
|
80 |
-
if not high_vram:
|
81 |
-
vae.enable_slicing()
|
82 |
-
vae.enable_tiling()
|
83 |
-
|
84 |
-
transformer.high_quality_fp32_output_for_inference = True
|
85 |
-
print('transformer.high_quality_fp32_output_for_inference = True')
|
86 |
-
|
87 |
-
# transformer.to(dtype=torch.bfloat16)
|
88 |
-
vae.to(dtype=torch.float16)
|
89 |
-
image_encoder.to(dtype=torch.float16)
|
90 |
-
text_encoder.to(dtype=torch.float16)
|
91 |
-
text_encoder_2.to(dtype=torch.float16)
|
92 |
-
|
93 |
-
vae.requires_grad_(False)
|
94 |
-
text_encoder.requires_grad_(False)
|
95 |
-
text_encoder_2.requires_grad_(False)
|
96 |
-
image_encoder.requires_grad_(False)
|
97 |
-
transformer.requires_grad_(False)
|
98 |
-
|
99 |
-
if not high_vram:
|
100 |
-
# DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
|
101 |
-
DynamicSwapInstaller.install_model(transformer, device=gpu)
|
102 |
-
DynamicSwapInstaller.install_model(text_encoder, device=gpu)
|
103 |
-
else:
|
104 |
-
text_encoder.to(gpu)
|
105 |
-
text_encoder_2.to(gpu)
|
106 |
-
image_encoder.to(gpu)
|
107 |
-
vae.to(gpu)
|
108 |
-
# transformer.to(gpu)
|
109 |
-
|
110 |
-
stream = AsyncStream()
|
111 |
-
|
112 |
-
outputs_folder = './outputs/'
|
113 |
-
os.makedirs(outputs_folder, exist_ok=True)
|
114 |
-
|
115 |
-
input_video_debug_value = None
|
116 |
-
prompt_debug_value = None
|
117 |
-
total_second_length_debug_value = None
|
118 |
-
|
119 |
-
@spaces.GPU()
|
120 |
-
@torch.no_grad()
|
121 |
-
def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, device="cuda", width=None, height=None):
|
122 |
-
"""
|
123 |
-
Encode a video into latent representations using the VAE.
|
124 |
-
|
125 |
-
Args:
|
126 |
-
video_path: Path to the input video file.
|
127 |
-
vae: AutoencoderKLHunyuanVideo model.
|
128 |
-
height, width: Target resolution for resizing frames.
|
129 |
-
vae_batch_size: Number of frames to process per batch.
|
130 |
-
device: Device for computation (e.g., "cuda").
|
131 |
-
|
132 |
-
Returns:
|
133 |
-
start_latent: Latent of the first frame (for compatibility with original code).
|
134 |
-
input_image_np: First frame as numpy array (for CLIP vision encoding).
|
135 |
-
history_latents: Latents of all frames (shape: [1, channels, frames, height//8, width//8]).
|
136 |
-
fps: Frames per second of the input video.
|
137 |
-
"""
|
138 |
-
video_path = str(pathlib.Path(video_path).resolve())
|
139 |
-
print(f"Processing video: {video_path}")
|
140 |
-
|
141 |
-
if device == "cuda" and not torch.cuda.is_available():
|
142 |
-
print("CUDA is not available, falling back to CPU")
|
143 |
-
device = "cpu"
|
144 |
-
|
145 |
-
try:
|
146 |
-
print("Initializing VideoReader...")
|
147 |
-
vr = decord.VideoReader(video_path)
|
148 |
-
fps = vr.get_avg_fps() # Get input video FPS
|
149 |
-
num_real_frames = len(vr)
|
150 |
-
print(f"Video loaded: {num_real_frames} frames, FPS: {fps}")
|
151 |
-
|
152 |
-
# Truncate to nearest latent size (multiple of 4)
|
153 |
-
latent_size_factor = 4
|
154 |
-
num_frames = (num_real_frames // latent_size_factor) * latent_size_factor
|
155 |
-
if num_frames != num_real_frames:
|
156 |
-
print(f"Truncating video from {num_real_frames} to {num_frames} frames for latent size compatibility")
|
157 |
-
num_real_frames = num_frames
|
158 |
-
|
159 |
-
print("Reading video frames...")
|
160 |
-
frames = vr.get_batch(range(num_real_frames)).asnumpy() # Shape: (num_real_frames, height, width, channels)
|
161 |
-
print(f"Frames read: {frames.shape}")
|
162 |
-
|
163 |
-
native_height, native_width = frames.shape[1], frames.shape[2]
|
164 |
-
print(f"Native video resolution: {native_width}x{native_height}")
|
165 |
-
|
166 |
-
target_height = native_height if height is None else height
|
167 |
-
target_width = native_width if width is None else width
|
168 |
-
|
169 |
-
if not no_resize:
|
170 |
-
target_height, target_width = find_nearest_bucket(target_height, target_width, resolution=resolution)
|
171 |
-
print(f"Adjusted resolution: {target_width}x{target_height}")
|
172 |
-
else:
|
173 |
-
print(f"Using native resolution without resizing: {target_width}x{target_height}")
|
174 |
-
|
175 |
-
processed_frames = []
|
176 |
-
for i, frame in enumerate(frames):
|
177 |
-
#print(f"Preprocessing frame {i+1}/{num_frames}")
|
178 |
-
frame_np = resize_and_center_crop(frame, target_width=target_width, target_height=target_height)
|
179 |
-
processed_frames.append(frame_np)
|
180 |
-
processed_frames = np.stack(processed_frames) # Shape: (num_real_frames, height, width, channels)
|
181 |
-
print(f"Frames preprocessed: {processed_frames.shape}")
|
182 |
-
|
183 |
-
input_image_np = processed_frames[0]
|
184 |
-
|
185 |
-
print("Converting frames to tensor...")
|
186 |
-
frames_pt = torch.from_numpy(processed_frames).float() / 127.5 - 1
|
187 |
-
frames_pt = frames_pt.permute(0, 3, 1, 2) # Shape: (num_real_frames, channels, height, width)
|
188 |
-
frames_pt = frames_pt.unsqueeze(0) # Shape: (1, num_real_frames, channels, height, width)
|
189 |
-
frames_pt = frames_pt.permute(0, 2, 1, 3, 4) # Shape: (1, channels, num_real_frames, height, width)
|
190 |
-
print(f"Tensor shape: {frames_pt.shape}")
|
191 |
-
|
192 |
-
input_video_pixels = frames_pt.cpu()
|
193 |
-
|
194 |
-
print(f"Moving tensor to device: {device}")
|
195 |
-
frames_pt = frames_pt.to(device)
|
196 |
-
print("Tensor moved to device")
|
197 |
-
|
198 |
-
print(f"Moving VAE to device: {device}")
|
199 |
-
vae.to(device)
|
200 |
-
print("VAE moved to device")
|
201 |
-
|
202 |
-
print(f"Encoding input video frames in VAE batch size {vae_batch_size} (reduce if memory issues here or if forcing video resolution)")
|
203 |
-
latents = []
|
204 |
-
vae.eval()
|
205 |
-
with torch.no_grad():
|
206 |
-
for i in tqdm(range(0, frames_pt.shape[2], vae_batch_size), desc="Encoding video frames", mininterval=0.1):
|
207 |
-
#print(f"Encoding batch {i//vae_batch_size + 1}: frames {i} to {min(i + vae_batch_size, frames_pt.shape[2])}")
|
208 |
-
batch = frames_pt[:, :, i:i + vae_batch_size] # Shape: (1, channels, batch_size, height, width)
|
209 |
-
try:
|
210 |
-
if device == "cuda":
|
211 |
-
free_mem = torch.cuda.memory_allocated() / 1024**3
|
212 |
-
print(f"GPU memory before encoding: {free_mem:.2f} GB")
|
213 |
-
batch_latent = vae_encode(batch, vae)
|
214 |
-
if device == "cuda":
|
215 |
-
torch.cuda.synchronize()
|
216 |
-
print(f"GPU memory after encoding: {torch.cuda.memory_allocated() / 1024**3:.2f} GB")
|
217 |
-
latents.append(batch_latent)
|
218 |
-
#print(f"Batch encoded, latent shape: {batch_latent.shape}")
|
219 |
-
except RuntimeError as e:
|
220 |
-
print(f"Error during VAE encoding: {str(e)}")
|
221 |
-
if device == "cuda" and "out of memory" in str(e).lower():
|
222 |
-
print("CUDA out of memory, try reducing vae_batch_size or using CPU")
|
223 |
-
raise
|
224 |
-
|
225 |
-
print("Concatenating latents...")
|
226 |
-
history_latents = torch.cat(latents, dim=2) # Shape: (1, channels, frames, height//8, width//8)
|
227 |
-
print(f"History latents shape: {history_latents.shape}")
|
228 |
-
|
229 |
-
start_latent = history_latents[:, :, :1] # Shape: (1, channels, 1, height//8, width//8)
|
230 |
-
print(f"Start latent shape: {start_latent.shape}")
|
231 |
-
|
232 |
-
if device == "cuda":
|
233 |
-
vae.to(cpu)
|
234 |
-
torch.cuda.empty_cache()
|
235 |
-
print("VAE moved back to CPU, CUDA cache cleared")
|
236 |
-
|
237 |
-
return start_latent, input_image_np, history_latents, fps, target_height, target_width, input_video_pixels
|
238 |
-
|
239 |
-
except Exception as e:
|
240 |
-
print(f"Error in video_encode: {str(e)}")
|
241 |
-
raise
|
242 |
-
|
243 |
-
def set_mp4_comments_imageio_ffmpeg(input_file, comments):
|
244 |
-
try:
|
245 |
-
ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
|
246 |
-
|
247 |
-
if not os.path.exists(input_file):
|
248 |
-
print(f"Error: Input file {input_file} does not exist")
|
249 |
-
return False
|
250 |
-
|
251 |
-
# Create a temporary file path
|
252 |
-
temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
|
253 |
-
|
254 |
-
# FFmpeg command using the bundled binary
|
255 |
-
command = [
|
256 |
-
ffmpeg_path, # Use imageio-ffmpeg's FFmpeg
|
257 |
-
'-i', input_file, # input file
|
258 |
-
'-metadata', f'comment={comments}', # set comment metadata
|
259 |
-
'-c:v', 'copy', # copy video stream without re-encoding
|
260 |
-
'-c:a', 'copy', # copy audio stream without re-encoding
|
261 |
-
'-y', # overwrite output file if it exists
|
262 |
-
temp_file # temporary output file
|
263 |
-
]
|
264 |
-
|
265 |
-
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
266 |
-
|
267 |
-
if result.returncode == 0:
|
268 |
-
# Replace the original file with the modified one
|
269 |
-
shutil.move(temp_file, input_file)
|
270 |
-
print(f"Successfully added comments to {input_file}")
|
271 |
-
return True
|
272 |
-
else:
|
273 |
-
# Clean up temp file if FFmpeg fails
|
274 |
-
if os.path.exists(temp_file):
|
275 |
-
os.remove(temp_file)
|
276 |
-
print(f"Error: FFmpeg failed with message:\n{result.stderr}")
|
277 |
-
return False
|
278 |
-
|
279 |
-
except Exception as e:
|
280 |
-
# Clean up temp file in case of other errors
|
281 |
-
if 'temp_file' in locals() and os.path.exists(temp_file):
|
282 |
-
os.remove(temp_file)
|
283 |
-
print(f"Error saving prompt to video metadata, ffmpeg may be required: "+str(e))
|
284 |
-
return False
|
285 |
-
|
286 |
-
@spaces.GPU()
|
287 |
-
@torch.no_grad()
|
288 |
-
def worker(input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
|
289 |
-
|
290 |
-
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
|
291 |
-
|
292 |
-
try:
|
293 |
-
if not high_vram:
|
294 |
-
unload_complete_models(
|
295 |
-
text_encoder, text_encoder_2, image_encoder, vae
|
296 |
-
)
|
297 |
-
|
298 |
-
# Text encoding
|
299 |
-
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
|
300 |
-
|
301 |
-
if not high_vram:
|
302 |
-
fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
|
303 |
-
load_model_as_complete(text_encoder_2, target_device=gpu)
|
304 |
-
|
305 |
-
llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
306 |
-
|
307 |
-
if cfg == 1:
|
308 |
-
llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
|
309 |
-
else:
|
310 |
-
llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
311 |
-
|
312 |
-
llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
|
313 |
-
llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
|
314 |
-
|
315 |
-
# 20250506 pftq: Processing input video instead of image
|
316 |
-
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Video processing ...'))))
|
317 |
-
|
318 |
-
# 20250506 pftq: Encode video
|
319 |
-
#H, W = 640, 640 # Default resolution, will be adjusted
|
320 |
-
#height, width = find_nearest_bucket(H, W, resolution=640)
|
321 |
-
#start_latent, input_image_np, history_latents, fps = video_encode(input_video, vae, height, width, vae_batch_size=16, device=gpu)
|
322 |
-
start_latent, input_image_np, video_latents, fps, height, width, input_video_pixels = video_encode(input_video, resolution, no_resize, vae, vae_batch_size=vae_batch, device=gpu)
|
323 |
-
|
324 |
-
#Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
|
325 |
-
|
326 |
-
# CLIP Vision
|
327 |
-
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
|
328 |
-
|
329 |
-
if not high_vram:
|
330 |
-
load_model_as_complete(image_encoder, target_device=gpu)
|
331 |
-
|
332 |
-
image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
|
333 |
-
image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
|
334 |
-
|
335 |
-
# Dtype
|
336 |
-
llama_vec = llama_vec.to(transformer.dtype)
|
337 |
-
llama_vec_n = llama_vec_n.to(transformer.dtype)
|
338 |
-
clip_l_pooler = clip_l_pooler.to(transformer.dtype)
|
339 |
-
clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
|
340 |
-
image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
|
341 |
-
|
342 |
-
total_latent_sections = (total_second_length * fps) / (latent_window_size * 4)
|
343 |
-
total_latent_sections = int(max(round(total_latent_sections), 1))
|
344 |
-
|
345 |
-
for idx in range(batch):
|
346 |
-
if idx>0:
|
347 |
-
seed = seed + 1
|
348 |
-
|
349 |
-
if batch > 1:
|
350 |
-
print(f"Beginning video {idx+1} of {batch} with seed {seed} ")
|
351 |
-
|
352 |
-
#job_id = generate_timestamp()
|
353 |
-
job_id = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+f"_framepackf1-videoinput_{width}-{total_second_length}sec_seed-{seed}_steps-{steps}_distilled-{gs}_cfg-{cfg}" # 20250506 pftq: easier to read timestamp and filename
|
354 |
-
|
355 |
-
# Sampling
|
356 |
-
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
|
357 |
-
|
358 |
-
rnd = torch.Generator("cpu").manual_seed(seed)
|
359 |
-
|
360 |
-
history_latents = video_latents.cpu()
|
361 |
-
total_generated_latent_frames = history_latents.shape[2]
|
362 |
-
history_pixels = None
|
363 |
-
previous_video = None
|
364 |
-
|
365 |
-
# 20250507 pftq: hot fix for initial video being corrupted by vae encoding, issue with ghosting because of slight differences
|
366 |
-
#history_pixels = input_video_pixels
|
367 |
-
#save_bcthw_as_mp4(vae_decode(video_latents, vae).cpu(), os.path.join(outputs_folder, f'{job_id}_input_video.mp4'), fps=fps, crf=mp4_crf) # 20250507 pftq: test fast movement corrupted by vae encoding if vae batch size too low
|
368 |
-
|
369 |
-
for section_index in range(total_latent_sections):
|
370 |
-
if stream.input_queue.top() == 'end':
|
371 |
-
stream.output_queue.push(('end', None))
|
372 |
-
return
|
373 |
-
|
374 |
-
print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
|
375 |
-
|
376 |
-
if not high_vram:
|
377 |
-
unload_complete_models()
|
378 |
-
# move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
|
379 |
-
|
380 |
-
if use_teacache:
|
381 |
-
transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
|
382 |
-
else:
|
383 |
-
transformer.initialize_teacache(enable_teacache=False)
|
384 |
-
|
385 |
-
def callback(d):
|
386 |
-
preview = d['denoised']
|
387 |
-
preview = vae_decode_fake(preview)
|
388 |
-
|
389 |
-
preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
|
390 |
-
preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
|
391 |
-
|
392 |
-
if stream.input_queue.top() == 'end':
|
393 |
-
stream.output_queue.push(('end', None))
|
394 |
-
raise KeyboardInterrupt('User ends the task.')
|
395 |
-
|
396 |
-
current_step = d['i'] + 1
|
397 |
-
percentage = int(100.0 * current_step / steps)
|
398 |
-
hint = f'Sampling {current_step}/{steps}'
|
399 |
-
desc = f'Total frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / fps) :.2f} seconds (FPS-{fps}), Seed: {seed}, Video {idx+1} of {batch}. The video is generating part {section_index+1} of {total_latent_sections}...'
|
400 |
-
stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
|
401 |
-
return
|
402 |
-
|
403 |
-
# 20250506 pftq: Use user-specified number of context frames, matching original allocation for num_clean_frames=2
|
404 |
-
available_frames = history_latents.shape[2] # Number of latent frames
|
405 |
-
max_pixel_frames = min(latent_window_size * 4 - 3, available_frames * 4) # Cap at available pixel frames
|
406 |
-
adjusted_latent_frames = max(1, (max_pixel_frames + 3) // 4) # Convert back to latent frames
|
407 |
-
# Adjust num_clean_frames to match original behavior: num_clean_frames=2 means 1 frame for clean_latents_1x
|
408 |
-
effective_clean_frames = max(0, num_clean_frames - 1) if num_clean_frames > 1 else 0
|
409 |
-
effective_clean_frames = min(effective_clean_frames, available_frames - 2) if available_frames > 2 else 0 # 20250507 pftq: changed 1 to 2 for edge case for <=1 sec videos
|
410 |
-
num_2x_frames = min(2, max(1, available_frames - effective_clean_frames - 1)) if available_frames > effective_clean_frames + 1 else 0 # 20250507 pftq: subtracted 1 for edge case for <=1 sec videos
|
411 |
-
num_4x_frames = min(16, max(1, available_frames - effective_clean_frames - num_2x_frames)) if available_frames > effective_clean_frames + num_2x_frames else 0 # 20250507 pftq: Edge case for <=1 sec
|
412 |
-
|
413 |
-
total_context_frames = num_4x_frames + num_2x_frames + effective_clean_frames
|
414 |
-
total_context_frames = min(total_context_frames, available_frames) # 20250507 pftq: Edge case for <=1 sec videos
|
415 |
-
|
416 |
-
indices = torch.arange(0, sum([1, num_4x_frames, num_2x_frames, effective_clean_frames, adjusted_latent_frames])).unsqueeze(0) # 20250507 pftq: latent_window_size to adjusted_latent_frames for edge case for <=1 sec videos
|
417 |
-
clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split(
|
418 |
-
[1, num_4x_frames, num_2x_frames, effective_clean_frames, adjusted_latent_frames], dim=1 # 20250507 pftq: latent_window_size to adjusted_latent_frames for edge case for <=1 sec videos
|
419 |
-
)
|
420 |
-
clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
|
421 |
-
|
422 |
-
# 20250506 pftq: Split history_latents dynamically based on available frames
|
423 |
-
fallback_frame_count = 2 # 20250507 pftq: Changed 0 to 2 Edge case for <=1 sec videos
|
424 |
-
context_frames = history_latents[:, :, -total_context_frames:, :, :] if total_context_frames > 0 else history_latents[:, :, :fallback_frame_count, :, :]
|
425 |
-
if total_context_frames > 0:
|
426 |
-
split_sizes = [num_4x_frames, num_2x_frames, effective_clean_frames]
|
427 |
-
split_sizes = [s for s in split_sizes if s > 0] # Remove zero sizes
|
428 |
-
if split_sizes:
|
429 |
-
splits = context_frames.split(split_sizes, dim=2)
|
430 |
-
split_idx = 0
|
431 |
-
clean_latents_4x = splits[split_idx] if num_4x_frames > 0 else history_latents[:, :, :fallback_frame_count, :, :]
|
432 |
-
if clean_latents_4x.shape[2] < 2: # 20250507 pftq: edge case for <=1 sec videos
|
433 |
-
clean_latents_4x = torch.cat([clean_latents_4x, clean_latents_4x[:, :, -1:, :, :]], dim=2)[:, :, :2, :, :]
|
434 |
-
split_idx += 1 if num_4x_frames > 0 else 0
|
435 |
-
clean_latents_2x = splits[split_idx] if num_2x_frames > 0 and split_idx < len(splits) else history_latents[:, :, :fallback_frame_count, :, :]
|
436 |
-
if clean_latents_2x.shape[2] < 2: # 20250507 pftq: edge case for <=1 sec videos
|
437 |
-
clean_latents_2x = torch.cat([clean_latents_2x, clean_latents_2x[:, :, -1:, :, :]], dim=2)[:, :, :2, :, :]
|
438 |
-
split_idx += 1 if num_2x_frames > 0 else 0
|
439 |
-
clean_latents_1x = splits[split_idx] if effective_clean_frames > 0 and split_idx < len(splits) else history_latents[:, :, :fallback_frame_count, :, :]
|
440 |
-
else:
|
441 |
-
clean_latents_4x = clean_latents_2x = clean_latents_1x = history_latents[:, :, :fallback_frame_count, :, :]
|
442 |
-
else:
|
443 |
-
clean_latents_4x = clean_latents_2x = clean_latents_1x = history_latents[:, :, :fallback_frame_count, :, :]
|
444 |
-
|
445 |
-
clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
|
446 |
-
|
447 |
-
# 20250507 pftq: Fix for <=1 sec videos.
|
448 |
-
max_frames = min(latent_window_size * 4 - 3, history_latents.shape[2] * 4)
|
449 |
-
|
450 |
-
generated_latents = sample_hunyuan(
|
451 |
-
transformer=transformer,
|
452 |
-
sampler='unipc',
|
453 |
-
width=width,
|
454 |
-
height=height,
|
455 |
-
frames=max_frames,
|
456 |
-
real_guidance_scale=cfg,
|
457 |
-
distilled_guidance_scale=gs,
|
458 |
-
guidance_rescale=rs,
|
459 |
-
num_inference_steps=steps,
|
460 |
-
generator=rnd,
|
461 |
-
prompt_embeds=llama_vec,
|
462 |
-
prompt_embeds_mask=llama_attention_mask,
|
463 |
-
prompt_poolers=clip_l_pooler,
|
464 |
-
negative_prompt_embeds=llama_vec_n,
|
465 |
-
negative_prompt_embeds_mask=llama_attention_mask_n,
|
466 |
-
negative_prompt_poolers=clip_l_pooler_n,
|
467 |
-
device=gpu,
|
468 |
-
dtype=torch.bfloat16,
|
469 |
-
image_embeddings=image_encoder_last_hidden_state,
|
470 |
-
latent_indices=latent_indices,
|
471 |
-
clean_latents=clean_latents,
|
472 |
-
clean_latent_indices=clean_latent_indices,
|
473 |
-
clean_latents_2x=clean_latents_2x,
|
474 |
-
clean_latent_2x_indices=clean_latent_2x_indices,
|
475 |
-
clean_latents_4x=clean_latents_4x,
|
476 |
-
clean_latent_4x_indices=clean_latent_4x_indices,
|
477 |
-
callback=callback,
|
478 |
-
)
|
479 |
-
|
480 |
-
total_generated_latent_frames += int(generated_latents.shape[2])
|
481 |
-
history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
|
482 |
-
|
483 |
-
if not high_vram:
|
484 |
-
offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
|
485 |
-
load_model_as_complete(vae, target_device=gpu)
|
486 |
-
|
487 |
-
real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
|
488 |
-
|
489 |
-
if history_pixels is None:
|
490 |
-
history_pixels = vae_decode(real_history_latents, vae).cpu()
|
491 |
-
else:
|
492 |
-
section_latent_frames = latent_window_size * 2
|
493 |
-
overlapped_frames = min(latent_window_size * 4 - 3, history_pixels.shape[2])
|
494 |
-
|
495 |
-
#if section_index == 0:
|
496 |
-
#extra_latents = 1 # Add up to 2 extra latent frames for smoother overlap to initial video
|
497 |
-
#extra_pixel_frames = extra_latents * 4 # Approx. 4 pixel frames per latent
|
498 |
-
#overlapped_frames = min(overlapped_frames + extra_pixel_frames, history_pixels.shape[2], section_latent_frames * 4)
|
499 |
-
|
500 |
-
current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
|
501 |
-
history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
|
502 |
-
|
503 |
-
if not high_vram:
|
504 |
-
unload_complete_models()
|
505 |
-
|
506 |
-
output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
|
507 |
-
|
508 |
-
# 20250506 pftq: Use input video FPS for output
|
509 |
-
save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
|
510 |
-
print(f"Latest video saved: {output_filename}")
|
511 |
-
# 20250508 pftq: Save prompt to mp4 metadata comments
|
512 |
-
set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompt} | Negative Prompt: {n_prompt}");
|
513 |
-
print(f"Prompt saved to mp4 metadata comments: {output_filename}")
|
514 |
-
|
515 |
-
# 20250506 pftq: Clean up previous partial files
|
516 |
-
if previous_video is not None and os.path.exists(previous_video):
|
517 |
-
try:
|
518 |
-
os.remove(previous_video)
|
519 |
-
print(f"Previous partial video deleted: {previous_video}")
|
520 |
-
except Exception as e:
|
521 |
-
print(f"Error deleting previous partial video {previous_video}: {e}")
|
522 |
-
previous_video = output_filename
|
523 |
-
|
524 |
-
print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
|
525 |
-
|
526 |
-
stream.output_queue.push(('file', output_filename))
|
527 |
-
except:
|
528 |
-
traceback.print_exc()
|
529 |
-
|
530 |
-
if not high_vram:
|
531 |
-
unload_complete_models(
|
532 |
-
text_encoder, text_encoder_2, image_encoder, vae
|
533 |
-
)
|
534 |
-
|
535 |
-
stream.output_queue.push(('end', None))
|
536 |
-
return
|
537 |
-
|
538 |
-
def get_duration(input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
|
539 |
-
global total_second_length_debug_value
|
540 |
-
if total_second_length_debug_value is not None:
|
541 |
-
return 5 * 60
|
542 |
-
return 5 * 60
|
543 |
-
|
544 |
-
@spaces.GPU(duration=get_duration)
|
545 |
-
def process(input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
|
546 |
-
global stream, high_vram, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
|
547 |
-
|
548 |
-
if input_video_debug_value is not None:
|
549 |
-
input_video = input_video_debug_value
|
550 |
-
input_video_debug_value = None
|
551 |
-
|
552 |
-
if prompt_debug_value is not None:
|
553 |
-
prompt = prompt_debug_value
|
554 |
-
prompt_debug_value = None
|
555 |
-
|
556 |
-
if total_second_length_debug_value is not None:
|
557 |
-
total_second_length = total_second_length_debug_value
|
558 |
-
total_second_length_debug_value = None
|
559 |
-
|
560 |
-
# 20250506 pftq: Updated assertion for video input
|
561 |
-
assert input_video is not None, 'No input video!'
|
562 |
-
|
563 |
-
yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
|
564 |
-
|
565 |
-
# 20250507 pftq: Even the H100 needs offloading if the video dimensions are 720p or higher
|
566 |
-
if high_vram and (no_resize or resolution>640):
|
567 |
-
print("Disabling high vram mode due to no resize and/or potentially higher resolution...")
|
568 |
-
high_vram = False
|
569 |
-
vae.enable_slicing()
|
570 |
-
vae.enable_tiling()
|
571 |
-
DynamicSwapInstaller.install_model(transformer, device=gpu)
|
572 |
-
DynamicSwapInstaller.install_model(text_encoder, device=gpu)
|
573 |
-
|
574 |
-
# 20250508 pftq: automatically set distilled cfg to 1 if cfg is used
|
575 |
-
if cfg > 1:
|
576 |
-
gs = 1
|
577 |
-
|
578 |
-
stream = AsyncStream()
|
579 |
-
|
580 |
-
# 20250506 pftq: Pass num_clean_frames, vae_batch, etc
|
581 |
-
async_run(worker, input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch)
|
582 |
-
|
583 |
-
output_filename = None
|
584 |
-
|
585 |
-
while True:
|
586 |
-
flag, data = stream.output_queue.next()
|
587 |
-
|
588 |
-
if flag == 'file':
|
589 |
-
output_filename = data
|
590 |
-
yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
|
591 |
-
|
592 |
-
if flag == 'progress':
|
593 |
-
preview, desc, html = data
|
594 |
-
#yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
|
595 |
-
yield output_filename, gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True) # 20250506 pftq: Keep refreshing the video in case it got hidden when the tab was in the background
|
596 |
-
|
597 |
-
if flag == 'end':
|
598 |
-
yield output_filename, gr.update(visible=False), desc+' Video complete.', '', gr.update(interactive=True), gr.update(interactive=False)
|
599 |
-
break
|
600 |
-
|
601 |
-
def end_process():
|
602 |
-
stream.input_queue.push('end')
|
603 |
-
|
604 |
-
quick_prompts = [
|
605 |
-
'The girl dances gracefully, with clear movements, full of charm.',
|
606 |
-
'A character doing some simple body movements.',
|
607 |
-
]
|
608 |
-
quick_prompts = [[x] for x in quick_prompts]
|
609 |
-
|
610 |
-
css = make_progress_bar_css()
|
611 |
-
block = gr.Blocks(css=css).queue()
|
612 |
-
with block:
|
613 |
-
gr.Markdown('# Framepack F1 (Video Extender)')
|
614 |
-
with gr.Row():
|
615 |
-
with gr.Column():
|
616 |
-
# 20250506 pftq: Changed to Video input from Image
|
617 |
-
input_video = gr.Video(sources='upload', label="Input Video", height=320)
|
618 |
-
prompt = gr.Textbox(label="Prompt", value='')
|
619 |
-
#example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Quick List', samples_per_page=1000, components=[prompt])
|
620 |
-
#example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
|
621 |
-
|
622 |
-
with gr.Row():
|
623 |
-
start_button = gr.Button(value="Start Generation", variant="primary")
|
624 |
-
end_button = gr.Button(value="End Generation", variant="stop", interactive=False)
|
625 |
-
|
626 |
-
with gr.Group():
|
627 |
-
with gr.Row():
|
628 |
-
use_teacache = gr.Checkbox(label='Use TeaCache', value=False, info='Faster speed, but often makes hands and fingers slightly worse.')
|
629 |
-
no_resize = gr.Checkbox(label='Force Original Video Resolution (No Resizing)', value=False, info='Might run out of VRAM (720p requires > 24GB VRAM).')
|
630 |
-
|
631 |
-
seed = gr.Number(label="Seed", value=31337, precision=0)
|
632 |
-
|
633 |
-
batch = gr.Slider(label="Batch Size (Number of Videos)", minimum=1, maximum=1000, value=1, step=1, info='Generate multiple videos each with a different seed.')
|
634 |
-
|
635 |
-
resolution = gr.Number(label="Resolution (max width or height)", value=640, precision=0, visible=False)
|
636 |
-
|
637 |
-
total_second_length = gr.Slider(label="Additional Video Length to Generate (Seconds)", minimum=1, maximum=120, value=1, step=0.1)
|
638 |
-
|
639 |
-
gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=3.0, step=0.01, info='Prompt adherence at the cost of less details from the input video, but to a lesser extent than Context Frames.')
|
640 |
-
cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=True, info='Use this instead of Distilled for more detail/control + Negative Prompt (make sure Distilled set to 1). Doubles render time.') # Should not change
|
641 |
-
rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
|
642 |
-
|
643 |
-
n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=True, info='Requires using normal CFG (undistilled) instead of Distilled (set Distilled=1 and CFG > 1).')
|
644 |
-
steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Increase for more quality, especially if using high non-distilled CFG.')
|
645 |
-
|
646 |
-
num_clean_frames = gr.Slider(label="Number of Context Frames", minimum=2, maximum=10, value=5, step=1, info="Retain more video details but increase memory use. Reduce to 2 if memory issues.")
|
647 |
-
|
648 |
-
default_vae = 32
|
649 |
-
if high_vram:
|
650 |
-
default_vae = 128
|
651 |
-
elif free_mem_gb>=20:
|
652 |
-
default_vae = 64
|
653 |
-
|
654 |
-
vae_batch = gr.Slider(label="VAE Batch Size for Input Video", minimum=4, maximum=256, value=default_vae, step=4, info="Reduce if running out of memory. Increase for better quality frames during fast motion.")
|
655 |
-
|
656 |
-
latent_window_size = gr.Slider(label="Latent Window Size", minimum=9, maximum=33, value=9, step=1, visible=True, info='Generate more frames at a time (larger chunks). Less degradation and better blending but higher VRAM cost.')
|
657 |
-
|
658 |
-
gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
|
659 |
-
|
660 |
-
mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
|
661 |
-
|
662 |
-
with gr.Row():
|
663 |
-
input_video_debug = gr.Video(sources='upload', label="Input Video Debug", height=320)
|
664 |
-
prompt_debug = gr.Textbox(label="Prompt Debug", value='')
|
665 |
-
total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (Seconds) Debug", minimum=1, maximum=120, value=1, step=0.1)
|
666 |
-
|
667 |
-
with gr.Column():
|
668 |
-
preview_image = gr.Image(label="Next Latents", height=200, visible=False)
|
669 |
-
result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
|
670 |
-
progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
|
671 |
-
progress_bar = gr.HTML('', elem_classes='no-generating-animation')
|
672 |
-
|
673 |
-
with gr.Row(visible=False):
|
674 |
-
gr.Examples(
|
675 |
-
examples = [
|
676 |
-
[
|
677 |
-
"./img_examples/Example1.mp4", # input_video
|
678 |
-
"View of the sea as far as the eye can see, from the seaside, a piece of land is barely visible on the horizon at the middle, the sky is radiant, reflections of the sun in the water, photorealistic, realistic, intricate details, 8k, insanely detailed",
|
679 |
-
"", # n_prompt
|
680 |
-
42, # seed
|
681 |
-
1, # batch
|
682 |
-
640, # resolution
|
683 |
-
1, # total_second_length
|
684 |
-
9, # latent_window_size
|
685 |
-
25, # steps
|
686 |
-
1.0, # cfg
|
687 |
-
3.0, # gs
|
688 |
-
0.0, # rs
|
689 |
-
6, # gpu_memory_preservation
|
690 |
-
False, # use_teacache
|
691 |
-
False, # no_resize
|
692 |
-
16, # mp4_crf
|
693 |
-
5, # num_clean_frames
|
694 |
-
default_vae
|
695 |
-
],
|
696 |
-
],
|
697 |
-
run_on_click = True,
|
698 |
-
fn = process,
|
699 |
-
inputs = [input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch],
|
700 |
-
outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
|
701 |
-
cache_examples = True,
|
702 |
-
)
|
703 |
-
|
704 |
-
gr.HTML("""
|
705 |
-
<div style="text-align:center; margin-top:20px;">Share your results and find ideas at the <a href="https://x.com/search?q=framepack&f=live" target="_blank">FramePack Twitter (X) thread</a></div>
|
706 |
-
""")
|
707 |
-
|
708 |
-
ips = [input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
|
709 |
-
start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
|
710 |
-
end_button.click(fn=end_process)
|
711 |
-
|
712 |
-
|
713 |
-
def handle_input_video_debug_upload(input):
|
714 |
-
global input_video_debug_value
|
715 |
-
input_video_debug_value = input
|
716 |
-
return []
|
717 |
-
|
718 |
-
def handle_prompt_debug_change(input):
|
719 |
-
global prompt_debug_value
|
720 |
-
prompt_debug_value = input
|
721 |
-
return []
|
722 |
-
|
723 |
-
def handle_total_second_length_debug_change(input):
|
724 |
-
global total_second_length_debug_value
|
725 |
-
total_second_length_debug_value = input
|
726 |
-
return []
|
727 |
-
|
728 |
-
input_video_debug.upload(
|
729 |
-
fn=handle_input_video_debug_upload,
|
730 |
-
inputs=[input_video_debug],
|
731 |
-
outputs=[]
|
732 |
-
)
|
733 |
-
|
734 |
-
prompt_debug.change(
|
735 |
-
fn=handle_prompt_debug_change,
|
736 |
-
inputs=[prompt_debug],
|
737 |
-
outputs=[]
|
738 |
-
)
|
739 |
-
|
740 |
-
total_second_length_debug.change(
|
741 |
-
fn=handle_total_second_length_debug_change,
|
742 |
-
inputs=[total_second_length_debug],
|
743 |
-
outputs=[]
|
744 |
-
)
|
745 |
-
|
746 |
-
block.launch(ssr_mode=False)
|
|
|
1 |
+
from diffusers_helper.hf_login import login
|
2 |
+
|
3 |
+
import os
|
4 |
+
|
5 |
+
os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
|
6 |
+
import spaces
|
7 |
+
import gradio as gr
|
8 |
+
import torch
|
9 |
+
import traceback
|
10 |
+
import einops
|
11 |
+
import safetensors.torch as sf
|
12 |
+
import numpy as np
|
13 |
+
import argparse
|
14 |
+
import math
|
15 |
+
import decord
|
16 |
+
from tqdm import tqdm
|
17 |
+
import pathlib
|
18 |
+
from datetime import datetime
|
19 |
+
import imageio_ffmpeg
|
20 |
+
import tempfile
|
21 |
+
import shutil
|
22 |
+
import subprocess
|
23 |
+
|
24 |
+
from PIL import Image
|
25 |
+
from diffusers import AutoencoderKLHunyuanVideo
|
26 |
+
from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
|
27 |
+
from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
|
28 |
+
from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
|
29 |
+
from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
|
30 |
+
from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
|
31 |
+
from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
|
32 |
+
from diffusers_helper.thread_utils import AsyncStream, async_run
|
33 |
+
from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
|
34 |
+
from transformers import SiglipImageProcessor, SiglipVisionModel
|
35 |
+
from diffusers_helper.clip_vision import hf_clip_vision_encode
|
36 |
+
from diffusers_helper.bucket_tools import find_nearest_bucket
|
37 |
+
from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, HunyuanVideoTransformer3DModel, HunyuanVideoPipeline
|
38 |
+
|
39 |
+
parser = argparse.ArgumentParser()
|
40 |
+
parser.add_argument('--share', action='store_true')
|
41 |
+
parser.add_argument("--server", type=str, default='0.0.0.0')
|
42 |
+
parser.add_argument("--port", type=int, required=False)
|
43 |
+
parser.add_argument("--inbrowser", action='store_true')
|
44 |
+
args = parser.parse_args()
|
45 |
+
|
46 |
+
print(args)
|
47 |
+
|
48 |
+
free_mem_gb = get_cuda_free_memory_gb(gpu)
|
49 |
+
high_vram = free_mem_gb > 80
|
50 |
+
|
51 |
+
print(f'Free VRAM {free_mem_gb} GB')
|
52 |
+
print(f'High-VRAM Mode: {high_vram}')
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
|
57 |
+
text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
|
58 |
+
tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
|
59 |
+
tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2')
|
60 |
+
vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
|
61 |
+
|
62 |
+
feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
|
63 |
+
image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
|
64 |
+
|
65 |
+
quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True)
|
66 |
+
transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained(
|
67 |
+
"lllyasviel/FramePack_F1_I2V_HY_20250503",
|
68 |
+
quantization_config=quant_config,
|
69 |
+
torch_dtype=torch.bfloat16,
|
70 |
+
).cpu()
|
71 |
+
|
72 |
+
# transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu()
|
73 |
+
|
74 |
+
vae.eval()
|
75 |
+
text_encoder.eval()
|
76 |
+
text_encoder_2.eval()
|
77 |
+
image_encoder.eval()
|
78 |
+
transformer.eval()
|
79 |
+
|
80 |
+
if not high_vram:
|
81 |
+
vae.enable_slicing()
|
82 |
+
vae.enable_tiling()
|
83 |
+
|
84 |
+
transformer.high_quality_fp32_output_for_inference = True
|
85 |
+
print('transformer.high_quality_fp32_output_for_inference = True')
|
86 |
+
|
87 |
+
# transformer.to(dtype=torch.bfloat16)
|
88 |
+
vae.to(dtype=torch.float16)
|
89 |
+
image_encoder.to(dtype=torch.float16)
|
90 |
+
text_encoder.to(dtype=torch.float16)
|
91 |
+
text_encoder_2.to(dtype=torch.float16)
|
92 |
+
|
93 |
+
vae.requires_grad_(False)
|
94 |
+
text_encoder.requires_grad_(False)
|
95 |
+
text_encoder_2.requires_grad_(False)
|
96 |
+
image_encoder.requires_grad_(False)
|
97 |
+
transformer.requires_grad_(False)
|
98 |
+
|
99 |
+
if not high_vram:
|
100 |
+
# DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
|
101 |
+
DynamicSwapInstaller.install_model(transformer, device=gpu)
|
102 |
+
DynamicSwapInstaller.install_model(text_encoder, device=gpu)
|
103 |
+
else:
|
104 |
+
text_encoder.to(gpu)
|
105 |
+
text_encoder_2.to(gpu)
|
106 |
+
image_encoder.to(gpu)
|
107 |
+
vae.to(gpu)
|
108 |
+
# transformer.to(gpu)
|
109 |
+
|
110 |
+
stream = AsyncStream()
|
111 |
+
|
112 |
+
outputs_folder = './outputs/'
|
113 |
+
os.makedirs(outputs_folder, exist_ok=True)
|
114 |
+
|
115 |
+
input_video_debug_value = None
|
116 |
+
prompt_debug_value = None
|
117 |
+
total_second_length_debug_value = None
|
118 |
+
|
119 |
+
@spaces.GPU()
|
120 |
+
@torch.no_grad()
|
121 |
+
def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, device="cuda", width=None, height=None):
|
122 |
+
"""
|
123 |
+
Encode a video into latent representations using the VAE.
|
124 |
+
|
125 |
+
Args:
|
126 |
+
video_path: Path to the input video file.
|
127 |
+
vae: AutoencoderKLHunyuanVideo model.
|
128 |
+
height, width: Target resolution for resizing frames.
|
129 |
+
vae_batch_size: Number of frames to process per batch.
|
130 |
+
device: Device for computation (e.g., "cuda").
|
131 |
+
|
132 |
+
Returns:
|
133 |
+
start_latent: Latent of the first frame (for compatibility with original code).
|
134 |
+
input_image_np: First frame as numpy array (for CLIP vision encoding).
|
135 |
+
history_latents: Latents of all frames (shape: [1, channels, frames, height//8, width//8]).
|
136 |
+
fps: Frames per second of the input video.
|
137 |
+
"""
|
138 |
+
video_path = str(pathlib.Path(video_path).resolve())
|
139 |
+
print(f"Processing video: {video_path}")
|
140 |
+
|
141 |
+
if device == "cuda" and not torch.cuda.is_available():
|
142 |
+
print("CUDA is not available, falling back to CPU")
|
143 |
+
device = "cpu"
|
144 |
+
|
145 |
+
try:
|
146 |
+
print("Initializing VideoReader...")
|
147 |
+
vr = decord.VideoReader(video_path)
|
148 |
+
fps = vr.get_avg_fps() # Get input video FPS
|
149 |
+
num_real_frames = len(vr)
|
150 |
+
print(f"Video loaded: {num_real_frames} frames, FPS: {fps}")
|
151 |
+
|
152 |
+
# Truncate to nearest latent size (multiple of 4)
|
153 |
+
latent_size_factor = 4
|
154 |
+
num_frames = (num_real_frames // latent_size_factor) * latent_size_factor
|
155 |
+
if num_frames != num_real_frames:
|
156 |
+
print(f"Truncating video from {num_real_frames} to {num_frames} frames for latent size compatibility")
|
157 |
+
num_real_frames = num_frames
|
158 |
+
|
159 |
+
print("Reading video frames...")
|
160 |
+
frames = vr.get_batch(range(num_real_frames)).asnumpy() # Shape: (num_real_frames, height, width, channels)
|
161 |
+
print(f"Frames read: {frames.shape}")
|
162 |
+
|
163 |
+
native_height, native_width = frames.shape[1], frames.shape[2]
|
164 |
+
print(f"Native video resolution: {native_width}x{native_height}")
|
165 |
+
|
166 |
+
target_height = native_height if height is None else height
|
167 |
+
target_width = native_width if width is None else width
|
168 |
+
|
169 |
+
if not no_resize:
|
170 |
+
target_height, target_width = find_nearest_bucket(target_height, target_width, resolution=resolution)
|
171 |
+
print(f"Adjusted resolution: {target_width}x{target_height}")
|
172 |
+
else:
|
173 |
+
print(f"Using native resolution without resizing: {target_width}x{target_height}")
|
174 |
+
|
175 |
+
processed_frames = []
|
176 |
+
for i, frame in enumerate(frames):
|
177 |
+
#print(f"Preprocessing frame {i+1}/{num_frames}")
|
178 |
+
frame_np = resize_and_center_crop(frame, target_width=target_width, target_height=target_height)
|
179 |
+
processed_frames.append(frame_np)
|
180 |
+
processed_frames = np.stack(processed_frames) # Shape: (num_real_frames, height, width, channels)
|
181 |
+
print(f"Frames preprocessed: {processed_frames.shape}")
|
182 |
+
|
183 |
+
input_image_np = processed_frames[0]
|
184 |
+
|
185 |
+
print("Converting frames to tensor...")
|
186 |
+
frames_pt = torch.from_numpy(processed_frames).float() / 127.5 - 1
|
187 |
+
frames_pt = frames_pt.permute(0, 3, 1, 2) # Shape: (num_real_frames, channels, height, width)
|
188 |
+
frames_pt = frames_pt.unsqueeze(0) # Shape: (1, num_real_frames, channels, height, width)
|
189 |
+
frames_pt = frames_pt.permute(0, 2, 1, 3, 4) # Shape: (1, channels, num_real_frames, height, width)
|
190 |
+
print(f"Tensor shape: {frames_pt.shape}")
|
191 |
+
|
192 |
+
input_video_pixels = frames_pt.cpu()
|
193 |
+
|
194 |
+
print(f"Moving tensor to device: {device}")
|
195 |
+
frames_pt = frames_pt.to(device)
|
196 |
+
print("Tensor moved to device")
|
197 |
+
|
198 |
+
print(f"Moving VAE to device: {device}")
|
199 |
+
vae.to(device)
|
200 |
+
print("VAE moved to device")
|
201 |
+
|
202 |
+
print(f"Encoding input video frames in VAE batch size {vae_batch_size} (reduce if memory issues here or if forcing video resolution)")
|
203 |
+
latents = []
|
204 |
+
vae.eval()
|
205 |
+
with torch.no_grad():
|
206 |
+
for i in tqdm(range(0, frames_pt.shape[2], vae_batch_size), desc="Encoding video frames", mininterval=0.1):
|
207 |
+
#print(f"Encoding batch {i//vae_batch_size + 1}: frames {i} to {min(i + vae_batch_size, frames_pt.shape[2])}")
|
208 |
+
batch = frames_pt[:, :, i:i + vae_batch_size] # Shape: (1, channels, batch_size, height, width)
|
209 |
+
try:
|
210 |
+
if device == "cuda":
|
211 |
+
free_mem = torch.cuda.memory_allocated() / 1024**3
|
212 |
+
print(f"GPU memory before encoding: {free_mem:.2f} GB")
|
213 |
+
batch_latent = vae_encode(batch, vae)
|
214 |
+
if device == "cuda":
|
215 |
+
torch.cuda.synchronize()
|
216 |
+
print(f"GPU memory after encoding: {torch.cuda.memory_allocated() / 1024**3:.2f} GB")
|
217 |
+
latents.append(batch_latent)
|
218 |
+
#print(f"Batch encoded, latent shape: {batch_latent.shape}")
|
219 |
+
except RuntimeError as e:
|
220 |
+
print(f"Error during VAE encoding: {str(e)}")
|
221 |
+
if device == "cuda" and "out of memory" in str(e).lower():
|
222 |
+
print("CUDA out of memory, try reducing vae_batch_size or using CPU")
|
223 |
+
raise
|
224 |
+
|
225 |
+
print("Concatenating latents...")
|
226 |
+
history_latents = torch.cat(latents, dim=2) # Shape: (1, channels, frames, height//8, width//8)
|
227 |
+
print(f"History latents shape: {history_latents.shape}")
|
228 |
+
|
229 |
+
start_latent = history_latents[:, :, :1] # Shape: (1, channels, 1, height//8, width//8)
|
230 |
+
print(f"Start latent shape: {start_latent.shape}")
|
231 |
+
|
232 |
+
if device == "cuda":
|
233 |
+
vae.to(cpu)
|
234 |
+
torch.cuda.empty_cache()
|
235 |
+
print("VAE moved back to CPU, CUDA cache cleared")
|
236 |
+
|
237 |
+
return start_latent, input_image_np, history_latents, fps, target_height, target_width, input_video_pixels
|
238 |
+
|
239 |
+
except Exception as e:
|
240 |
+
print(f"Error in video_encode: {str(e)}")
|
241 |
+
raise
|
242 |
+
|
243 |
+
def set_mp4_comments_imageio_ffmpeg(input_file, comments):
|
244 |
+
try:
|
245 |
+
ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
|
246 |
+
|
247 |
+
if not os.path.exists(input_file):
|
248 |
+
print(f"Error: Input file {input_file} does not exist")
|
249 |
+
return False
|
250 |
+
|
251 |
+
# Create a temporary file path
|
252 |
+
temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
|
253 |
+
|
254 |
+
# FFmpeg command using the bundled binary
|
255 |
+
command = [
|
256 |
+
ffmpeg_path, # Use imageio-ffmpeg's FFmpeg
|
257 |
+
'-i', input_file, # input file
|
258 |
+
'-metadata', f'comment={comments}', # set comment metadata
|
259 |
+
'-c:v', 'copy', # copy video stream without re-encoding
|
260 |
+
'-c:a', 'copy', # copy audio stream without re-encoding
|
261 |
+
'-y', # overwrite output file if it exists
|
262 |
+
temp_file # temporary output file
|
263 |
+
]
|
264 |
+
|
265 |
+
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
266 |
+
|
267 |
+
if result.returncode == 0:
|
268 |
+
# Replace the original file with the modified one
|
269 |
+
shutil.move(temp_file, input_file)
|
270 |
+
print(f"Successfully added comments to {input_file}")
|
271 |
+
return True
|
272 |
+
else:
|
273 |
+
# Clean up temp file if FFmpeg fails
|
274 |
+
if os.path.exists(temp_file):
|
275 |
+
os.remove(temp_file)
|
276 |
+
print(f"Error: FFmpeg failed with message:\n{result.stderr}")
|
277 |
+
return False
|
278 |
+
|
279 |
+
except Exception as e:
|
280 |
+
# Clean up temp file in case of other errors
|
281 |
+
if 'temp_file' in locals() and os.path.exists(temp_file):
|
282 |
+
os.remove(temp_file)
|
283 |
+
print(f"Error saving prompt to video metadata, ffmpeg may be required: "+str(e))
|
284 |
+
return False
|
285 |
+
|
286 |
+
@spaces.GPU()
|
287 |
+
@torch.no_grad()
|
288 |
+
def worker(input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
|
289 |
+
|
290 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
|
291 |
+
|
292 |
+
try:
|
293 |
+
if not high_vram:
|
294 |
+
unload_complete_models(
|
295 |
+
text_encoder, text_encoder_2, image_encoder, vae
|
296 |
+
)
|
297 |
+
|
298 |
+
# Text encoding
|
299 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
|
300 |
+
|
301 |
+
if not high_vram:
|
302 |
+
fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
|
303 |
+
load_model_as_complete(text_encoder_2, target_device=gpu)
|
304 |
+
|
305 |
+
llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
306 |
+
|
307 |
+
if cfg == 1:
|
308 |
+
llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
|
309 |
+
else:
|
310 |
+
llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
311 |
+
|
312 |
+
llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
|
313 |
+
llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
|
314 |
+
|
315 |
+
# 20250506 pftq: Processing input video instead of image
|
316 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Video processing ...'))))
|
317 |
+
|
318 |
+
# 20250506 pftq: Encode video
|
319 |
+
#H, W = 640, 640 # Default resolution, will be adjusted
|
320 |
+
#height, width = find_nearest_bucket(H, W, resolution=640)
|
321 |
+
#start_latent, input_image_np, history_latents, fps = video_encode(input_video, vae, height, width, vae_batch_size=16, device=gpu)
|
322 |
+
start_latent, input_image_np, video_latents, fps, height, width, input_video_pixels = video_encode(input_video, resolution, no_resize, vae, vae_batch_size=vae_batch, device=gpu)
|
323 |
+
|
324 |
+
#Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
|
325 |
+
|
326 |
+
# CLIP Vision
|
327 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
|
328 |
+
|
329 |
+
if not high_vram:
|
330 |
+
load_model_as_complete(image_encoder, target_device=gpu)
|
331 |
+
|
332 |
+
image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
|
333 |
+
image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
|
334 |
+
|
335 |
+
# Dtype
|
336 |
+
llama_vec = llama_vec.to(transformer.dtype)
|
337 |
+
llama_vec_n = llama_vec_n.to(transformer.dtype)
|
338 |
+
clip_l_pooler = clip_l_pooler.to(transformer.dtype)
|
339 |
+
clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
|
340 |
+
image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
|
341 |
+
|
342 |
+
total_latent_sections = (total_second_length * fps) / (latent_window_size * 4)
|
343 |
+
total_latent_sections = int(max(round(total_latent_sections), 1))
|
344 |
+
|
345 |
+
for idx in range(batch):
|
346 |
+
if idx>0:
|
347 |
+
seed = seed + 1
|
348 |
+
|
349 |
+
if batch > 1:
|
350 |
+
print(f"Beginning video {idx+1} of {batch} with seed {seed} ")
|
351 |
+
|
352 |
+
#job_id = generate_timestamp()
|
353 |
+
job_id = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+f"_framepackf1-videoinput_{width}-{total_second_length}sec_seed-{seed}_steps-{steps}_distilled-{gs}_cfg-{cfg}" # 20250506 pftq: easier to read timestamp and filename
|
354 |
+
|
355 |
+
# Sampling
|
356 |
+
stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
|
357 |
+
|
358 |
+
rnd = torch.Generator("cpu").manual_seed(seed)
|
359 |
+
|
360 |
+
history_latents = video_latents.cpu()
|
361 |
+
total_generated_latent_frames = history_latents.shape[2]
|
362 |
+
history_pixels = None
|
363 |
+
previous_video = None
|
364 |
+
|
365 |
+
# 20250507 pftq: hot fix for initial video being corrupted by vae encoding, issue with ghosting because of slight differences
|
366 |
+
#history_pixels = input_video_pixels
|
367 |
+
#save_bcthw_as_mp4(vae_decode(video_latents, vae).cpu(), os.path.join(outputs_folder, f'{job_id}_input_video.mp4'), fps=fps, crf=mp4_crf) # 20250507 pftq: test fast movement corrupted by vae encoding if vae batch size too low
|
368 |
+
|
369 |
+
for section_index in range(total_latent_sections):
|
370 |
+
if stream.input_queue.top() == 'end':
|
371 |
+
stream.output_queue.push(('end', None))
|
372 |
+
return
|
373 |
+
|
374 |
+
print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
|
375 |
+
|
376 |
+
if not high_vram:
|
377 |
+
unload_complete_models()
|
378 |
+
# move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
|
379 |
+
|
380 |
+
if use_teacache:
|
381 |
+
transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
|
382 |
+
else:
|
383 |
+
transformer.initialize_teacache(enable_teacache=False)
|
384 |
+
|
385 |
+
def callback(d):
|
386 |
+
preview = d['denoised']
|
387 |
+
preview = vae_decode_fake(preview)
|
388 |
+
|
389 |
+
preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
|
390 |
+
preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
|
391 |
+
|
392 |
+
if stream.input_queue.top() == 'end':
|
393 |
+
stream.output_queue.push(('end', None))
|
394 |
+
raise KeyboardInterrupt('User ends the task.')
|
395 |
+
|
396 |
+
current_step = d['i'] + 1
|
397 |
+
percentage = int(100.0 * current_step / steps)
|
398 |
+
hint = f'Sampling {current_step}/{steps}'
|
399 |
+
desc = f'Total frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / fps) :.2f} seconds (FPS-{fps}), Seed: {seed}, Video {idx+1} of {batch}. The video is generating part {section_index+1} of {total_latent_sections}...'
|
400 |
+
stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
|
401 |
+
return
|
402 |
+
|
403 |
+
# 20250506 pftq: Use user-specified number of context frames, matching original allocation for num_clean_frames=2
|
404 |
+
available_frames = history_latents.shape[2] # Number of latent frames
|
405 |
+
max_pixel_frames = min(latent_window_size * 4 - 3, available_frames * 4) # Cap at available pixel frames
|
406 |
+
adjusted_latent_frames = max(1, (max_pixel_frames + 3) // 4) # Convert back to latent frames
|
407 |
+
# Adjust num_clean_frames to match original behavior: num_clean_frames=2 means 1 frame for clean_latents_1x
|
408 |
+
effective_clean_frames = max(0, num_clean_frames - 1) if num_clean_frames > 1 else 0
|
409 |
+
effective_clean_frames = min(effective_clean_frames, available_frames - 2) if available_frames > 2 else 0 # 20250507 pftq: changed 1 to 2 for edge case for <=1 sec videos
|
410 |
+
num_2x_frames = min(2, max(1, available_frames - effective_clean_frames - 1)) if available_frames > effective_clean_frames + 1 else 0 # 20250507 pftq: subtracted 1 for edge case for <=1 sec videos
|
411 |
+
num_4x_frames = min(16, max(1, available_frames - effective_clean_frames - num_2x_frames)) if available_frames > effective_clean_frames + num_2x_frames else 0 # 20250507 pftq: Edge case for <=1 sec
|
412 |
+
|
413 |
+
total_context_frames = num_4x_frames + num_2x_frames + effective_clean_frames
|
414 |
+
total_context_frames = min(total_context_frames, available_frames) # 20250507 pftq: Edge case for <=1 sec videos
|
415 |
+
|
416 |
+
indices = torch.arange(0, sum([1, num_4x_frames, num_2x_frames, effective_clean_frames, adjusted_latent_frames])).unsqueeze(0) # 20250507 pftq: latent_window_size to adjusted_latent_frames for edge case for <=1 sec videos
|
417 |
+
clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split(
|
418 |
+
[1, num_4x_frames, num_2x_frames, effective_clean_frames, adjusted_latent_frames], dim=1 # 20250507 pftq: latent_window_size to adjusted_latent_frames for edge case for <=1 sec videos
|
419 |
+
)
|
420 |
+
clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
|
421 |
+
|
422 |
+
# 20250506 pftq: Split history_latents dynamically based on available frames
|
423 |
+
fallback_frame_count = 2 # 20250507 pftq: Changed 0 to 2 Edge case for <=1 sec videos
|
424 |
+
context_frames = history_latents[:, :, -total_context_frames:, :, :] if total_context_frames > 0 else history_latents[:, :, :fallback_frame_count, :, :]
|
425 |
+
if total_context_frames > 0:
|
426 |
+
split_sizes = [num_4x_frames, num_2x_frames, effective_clean_frames]
|
427 |
+
split_sizes = [s for s in split_sizes if s > 0] # Remove zero sizes
|
428 |
+
if split_sizes:
|
429 |
+
splits = context_frames.split(split_sizes, dim=2)
|
430 |
+
split_idx = 0
|
431 |
+
clean_latents_4x = splits[split_idx] if num_4x_frames > 0 else history_latents[:, :, :fallback_frame_count, :, :]
|
432 |
+
if clean_latents_4x.shape[2] < 2: # 20250507 pftq: edge case for <=1 sec videos
|
433 |
+
clean_latents_4x = torch.cat([clean_latents_4x, clean_latents_4x[:, :, -1:, :, :]], dim=2)[:, :, :2, :, :]
|
434 |
+
split_idx += 1 if num_4x_frames > 0 else 0
|
435 |
+
clean_latents_2x = splits[split_idx] if num_2x_frames > 0 and split_idx < len(splits) else history_latents[:, :, :fallback_frame_count, :, :]
|
436 |
+
if clean_latents_2x.shape[2] < 2: # 20250507 pftq: edge case for <=1 sec videos
|
437 |
+
clean_latents_2x = torch.cat([clean_latents_2x, clean_latents_2x[:, :, -1:, :, :]], dim=2)[:, :, :2, :, :]
|
438 |
+
split_idx += 1 if num_2x_frames > 0 else 0
|
439 |
+
clean_latents_1x = splits[split_idx] if effective_clean_frames > 0 and split_idx < len(splits) else history_latents[:, :, :fallback_frame_count, :, :]
|
440 |
+
else:
|
441 |
+
clean_latents_4x = clean_latents_2x = clean_latents_1x = history_latents[:, :, :fallback_frame_count, :, :]
|
442 |
+
else:
|
443 |
+
clean_latents_4x = clean_latents_2x = clean_latents_1x = history_latents[:, :, :fallback_frame_count, :, :]
|
444 |
+
|
445 |
+
clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
|
446 |
+
|
447 |
+
# 20250507 pftq: Fix for <=1 sec videos.
|
448 |
+
max_frames = min(latent_window_size * 4 - 3, history_latents.shape[2] * 4)
|
449 |
+
|
450 |
+
generated_latents = sample_hunyuan(
|
451 |
+
transformer=transformer,
|
452 |
+
sampler='unipc',
|
453 |
+
width=width,
|
454 |
+
height=height,
|
455 |
+
frames=max_frames,
|
456 |
+
real_guidance_scale=cfg,
|
457 |
+
distilled_guidance_scale=gs,
|
458 |
+
guidance_rescale=rs,
|
459 |
+
num_inference_steps=steps,
|
460 |
+
generator=rnd,
|
461 |
+
prompt_embeds=llama_vec,
|
462 |
+
prompt_embeds_mask=llama_attention_mask,
|
463 |
+
prompt_poolers=clip_l_pooler,
|
464 |
+
negative_prompt_embeds=llama_vec_n,
|
465 |
+
negative_prompt_embeds_mask=llama_attention_mask_n,
|
466 |
+
negative_prompt_poolers=clip_l_pooler_n,
|
467 |
+
device=gpu,
|
468 |
+
dtype=torch.bfloat16,
|
469 |
+
image_embeddings=image_encoder_last_hidden_state,
|
470 |
+
latent_indices=latent_indices,
|
471 |
+
clean_latents=clean_latents,
|
472 |
+
clean_latent_indices=clean_latent_indices,
|
473 |
+
clean_latents_2x=clean_latents_2x,
|
474 |
+
clean_latent_2x_indices=clean_latent_2x_indices,
|
475 |
+
clean_latents_4x=clean_latents_4x,
|
476 |
+
clean_latent_4x_indices=clean_latent_4x_indices,
|
477 |
+
callback=callback,
|
478 |
+
)
|
479 |
+
|
480 |
+
total_generated_latent_frames += int(generated_latents.shape[2])
|
481 |
+
history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
|
482 |
+
|
483 |
+
if not high_vram:
|
484 |
+
offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
|
485 |
+
load_model_as_complete(vae, target_device=gpu)
|
486 |
+
|
487 |
+
real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
|
488 |
+
|
489 |
+
if history_pixels is None:
|
490 |
+
history_pixels = vae_decode(real_history_latents, vae).cpu()
|
491 |
+
else:
|
492 |
+
section_latent_frames = latent_window_size * 2
|
493 |
+
overlapped_frames = min(latent_window_size * 4 - 3, history_pixels.shape[2])
|
494 |
+
|
495 |
+
#if section_index == 0:
|
496 |
+
#extra_latents = 1 # Add up to 2 extra latent frames for smoother overlap to initial video
|
497 |
+
#extra_pixel_frames = extra_latents * 4 # Approx. 4 pixel frames per latent
|
498 |
+
#overlapped_frames = min(overlapped_frames + extra_pixel_frames, history_pixels.shape[2], section_latent_frames * 4)
|
499 |
+
|
500 |
+
current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
|
501 |
+
history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
|
502 |
+
|
503 |
+
if not high_vram:
|
504 |
+
unload_complete_models()
|
505 |
+
|
506 |
+
output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
|
507 |
+
|
508 |
+
# 20250506 pftq: Use input video FPS for output
|
509 |
+
save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
|
510 |
+
print(f"Latest video saved: {output_filename}")
|
511 |
+
# 20250508 pftq: Save prompt to mp4 metadata comments
|
512 |
+
set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompt} | Negative Prompt: {n_prompt}");
|
513 |
+
print(f"Prompt saved to mp4 metadata comments: {output_filename}")
|
514 |
+
|
515 |
+
# 20250506 pftq: Clean up previous partial files
|
516 |
+
if previous_video is not None and os.path.exists(previous_video):
|
517 |
+
try:
|
518 |
+
os.remove(previous_video)
|
519 |
+
print(f"Previous partial video deleted: {previous_video}")
|
520 |
+
except Exception as e:
|
521 |
+
print(f"Error deleting previous partial video {previous_video}: {e}")
|
522 |
+
previous_video = output_filename
|
523 |
+
|
524 |
+
print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
|
525 |
+
|
526 |
+
stream.output_queue.push(('file', output_filename))
|
527 |
+
except:
|
528 |
+
traceback.print_exc()
|
529 |
+
|
530 |
+
if not high_vram:
|
531 |
+
unload_complete_models(
|
532 |
+
text_encoder, text_encoder_2, image_encoder, vae
|
533 |
+
)
|
534 |
+
|
535 |
+
stream.output_queue.push(('end', None))
|
536 |
+
return
|
537 |
+
|
538 |
+
def get_duration(input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
|
539 |
+
global total_second_length_debug_value
|
540 |
+
if total_second_length_debug_value is not None:
|
541 |
+
return 5 * 60
|
542 |
+
return 5 * 60
|
543 |
+
|
544 |
+
@spaces.GPU(duration=get_duration)
|
545 |
+
def process(input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
|
546 |
+
global stream, high_vram, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
|
547 |
+
|
548 |
+
if input_video_debug_value is not None:
|
549 |
+
input_video = input_video_debug_value
|
550 |
+
input_video_debug_value = None
|
551 |
+
|
552 |
+
if prompt_debug_value is not None:
|
553 |
+
prompt = prompt_debug_value
|
554 |
+
prompt_debug_value = None
|
555 |
+
|
556 |
+
if total_second_length_debug_value is not None:
|
557 |
+
total_second_length = total_second_length_debug_value
|
558 |
+
total_second_length_debug_value = None
|
559 |
+
|
560 |
+
# 20250506 pftq: Updated assertion for video input
|
561 |
+
assert input_video is not None, 'No input video!'
|
562 |
+
|
563 |
+
yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
|
564 |
+
|
565 |
+
# 20250507 pftq: Even the H100 needs offloading if the video dimensions are 720p or higher
|
566 |
+
if high_vram and (no_resize or resolution>640):
|
567 |
+
print("Disabling high vram mode due to no resize and/or potentially higher resolution...")
|
568 |
+
high_vram = False
|
569 |
+
vae.enable_slicing()
|
570 |
+
vae.enable_tiling()
|
571 |
+
DynamicSwapInstaller.install_model(transformer, device=gpu)
|
572 |
+
DynamicSwapInstaller.install_model(text_encoder, device=gpu)
|
573 |
+
|
574 |
+
# 20250508 pftq: automatically set distilled cfg to 1 if cfg is used
|
575 |
+
if cfg > 1:
|
576 |
+
gs = 1
|
577 |
+
|
578 |
+
stream = AsyncStream()
|
579 |
+
|
580 |
+
# 20250506 pftq: Pass num_clean_frames, vae_batch, etc
|
581 |
+
async_run(worker, input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch)
|
582 |
+
|
583 |
+
output_filename = None
|
584 |
+
|
585 |
+
while True:
|
586 |
+
flag, data = stream.output_queue.next()
|
587 |
+
|
588 |
+
if flag == 'file':
|
589 |
+
output_filename = data
|
590 |
+
yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
|
591 |
+
|
592 |
+
if flag == 'progress':
|
593 |
+
preview, desc, html = data
|
594 |
+
#yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
|
595 |
+
yield output_filename, gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True) # 20250506 pftq: Keep refreshing the video in case it got hidden when the tab was in the background
|
596 |
+
|
597 |
+
if flag == 'end':
|
598 |
+
yield output_filename, gr.update(visible=False), desc+' Video complete.', '', gr.update(interactive=True), gr.update(interactive=False)
|
599 |
+
break
|
600 |
+
|
601 |
+
def end_process():
|
602 |
+
stream.input_queue.push('end')
|
603 |
+
|
604 |
+
quick_prompts = [
|
605 |
+
'The girl dances gracefully, with clear movements, full of charm.',
|
606 |
+
'A character doing some simple body movements.',
|
607 |
+
]
|
608 |
+
quick_prompts = [[x] for x in quick_prompts]
|
609 |
+
|
610 |
+
css = make_progress_bar_css()
|
611 |
+
block = gr.Blocks(css=css).queue()
|
612 |
+
with block:
|
613 |
+
gr.Markdown('# Framepack F1 (Video Extender)')
|
614 |
+
with gr.Row():
|
615 |
+
with gr.Column():
|
616 |
+
# 20250506 pftq: Changed to Video input from Image
|
617 |
+
input_video = gr.Video(sources='upload', label="Input Video", height=320)
|
618 |
+
prompt = gr.Textbox(label="Prompt", value='')
|
619 |
+
#example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Quick List', samples_per_page=1000, components=[prompt])
|
620 |
+
#example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
|
621 |
+
|
622 |
+
with gr.Row():
|
623 |
+
start_button = gr.Button(value="Start Generation", variant="primary")
|
624 |
+
end_button = gr.Button(value="End Generation", variant="stop", interactive=False)
|
625 |
+
|
626 |
+
with gr.Group():
|
627 |
+
with gr.Row():
|
628 |
+
use_teacache = gr.Checkbox(label='Use TeaCache', value=False, info='Faster speed, but often makes hands and fingers slightly worse.')
|
629 |
+
no_resize = gr.Checkbox(label='Force Original Video Resolution (No Resizing)', value=False, info='Might run out of VRAM (720p requires > 24GB VRAM).')
|
630 |
+
|
631 |
+
seed = gr.Number(label="Seed", value=31337, precision=0)
|
632 |
+
|
633 |
+
batch = gr.Slider(label="Batch Size (Number of Videos)", minimum=1, maximum=1000, value=1, step=1, info='Generate multiple videos each with a different seed.')
|
634 |
+
|
635 |
+
resolution = gr.Number(label="Resolution (max width or height)", value=640, precision=0, visible=False)
|
636 |
+
|
637 |
+
total_second_length = gr.Slider(label="Additional Video Length to Generate (Seconds)", minimum=1, maximum=120, value=1, step=0.1)
|
638 |
+
|
639 |
+
gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=3.0, step=0.01, info='Prompt adherence at the cost of less details from the input video, but to a lesser extent than Context Frames.')
|
640 |
+
cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=True, info='Use this instead of Distilled for more detail/control + Negative Prompt (make sure Distilled set to 1). Doubles render time.') # Should not change
|
641 |
+
rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
|
642 |
+
|
643 |
+
n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=True, info='Requires using normal CFG (undistilled) instead of Distilled (set Distilled=1 and CFG > 1).')
|
644 |
+
steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Increase for more quality, especially if using high non-distilled CFG.')
|
645 |
+
|
646 |
+
num_clean_frames = gr.Slider(label="Number of Context Frames", minimum=2, maximum=10, value=5, step=1, info="Retain more video details but increase memory use. Reduce to 2 if memory issues.")
|
647 |
+
|
648 |
+
default_vae = 32
|
649 |
+
if high_vram:
|
650 |
+
default_vae = 128
|
651 |
+
elif free_mem_gb>=20:
|
652 |
+
default_vae = 64
|
653 |
+
|
654 |
+
vae_batch = gr.Slider(label="VAE Batch Size for Input Video", minimum=4, maximum=256, value=default_vae, step=4, info="Reduce if running out of memory. Increase for better quality frames during fast motion.")
|
655 |
+
|
656 |
+
latent_window_size = gr.Slider(label="Latent Window Size", minimum=9, maximum=33, value=9, step=1, visible=True, info='Generate more frames at a time (larger chunks). Less degradation and better blending but higher VRAM cost.')
|
657 |
+
|
658 |
+
gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
|
659 |
+
|
660 |
+
mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
|
661 |
+
|
662 |
+
with gr.Row():
|
663 |
+
input_video_debug = gr.Video(sources='upload', label="Input Video Debug", height=320)
|
664 |
+
prompt_debug = gr.Textbox(label="Prompt Debug", value='')
|
665 |
+
total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (Seconds) Debug", minimum=1, maximum=120, value=1, step=0.1)
|
666 |
+
|
667 |
+
with gr.Column():
|
668 |
+
preview_image = gr.Image(label="Next Latents", height=200, visible=False)
|
669 |
+
result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
|
670 |
+
progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
|
671 |
+
progress_bar = gr.HTML('', elem_classes='no-generating-animation')
|
672 |
+
|
673 |
+
with gr.Row(visible=False):
|
674 |
+
gr.Examples(
|
675 |
+
examples = [
|
676 |
+
[
|
677 |
+
"./img_examples/Example1.mp4", # input_video
|
678 |
+
"View of the sea as far as the eye can see, from the seaside, a piece of land is barely visible on the horizon at the middle, the sky is radiant, reflections of the sun in the water, photorealistic, realistic, intricate details, 8k, insanely detailed",
|
679 |
+
"", # n_prompt
|
680 |
+
42, # seed
|
681 |
+
1, # batch
|
682 |
+
640, # resolution
|
683 |
+
1, # total_second_length
|
684 |
+
9, # latent_window_size
|
685 |
+
25, # steps
|
686 |
+
1.0, # cfg
|
687 |
+
3.0, # gs
|
688 |
+
0.0, # rs
|
689 |
+
6, # gpu_memory_preservation
|
690 |
+
False, # use_teacache
|
691 |
+
False, # no_resize
|
692 |
+
16, # mp4_crf
|
693 |
+
5, # num_clean_frames
|
694 |
+
default_vae
|
695 |
+
],
|
696 |
+
],
|
697 |
+
run_on_click = True,
|
698 |
+
fn = process,
|
699 |
+
inputs = [input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch],
|
700 |
+
outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
|
701 |
+
cache_examples = True,
|
702 |
+
)
|
703 |
+
|
704 |
+
gr.HTML("""
|
705 |
+
<div style="text-align:center; margin-top:20px;">Share your results and find ideas at the <a href="https://x.com/search?q=framepack&f=live" target="_blank">FramePack Twitter (X) thread</a></div>
|
706 |
+
""")
|
707 |
+
|
708 |
+
ips = [input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
|
709 |
+
start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
|
710 |
+
end_button.click(fn=end_process)
|
711 |
+
|
712 |
+
|
713 |
+
def handle_input_video_debug_upload(input):
|
714 |
+
global input_video_debug_value
|
715 |
+
input_video_debug_value = input
|
716 |
+
return []
|
717 |
+
|
718 |
+
def handle_prompt_debug_change(input):
|
719 |
+
global prompt_debug_value
|
720 |
+
prompt_debug_value = input
|
721 |
+
return []
|
722 |
+
|
723 |
+
def handle_total_second_length_debug_change(input):
|
724 |
+
global total_second_length_debug_value
|
725 |
+
total_second_length_debug_value = input
|
726 |
+
return []
|
727 |
+
|
728 |
+
input_video_debug.upload(
|
729 |
+
fn=handle_input_video_debug_upload,
|
730 |
+
inputs=[input_video_debug],
|
731 |
+
outputs=[]
|
732 |
+
)
|
733 |
+
|
734 |
+
prompt_debug.change(
|
735 |
+
fn=handle_prompt_debug_change,
|
736 |
+
inputs=[prompt_debug],
|
737 |
+
outputs=[]
|
738 |
+
)
|
739 |
+
|
740 |
+
total_second_length_debug.change(
|
741 |
+
fn=handle_total_second_length_debug_change,
|
742 |
+
inputs=[total_second_length_debug],
|
743 |
+
outputs=[]
|
744 |
+
)
|
745 |
+
|
746 |
+
block.launch(ssr_mode=False)
|
requirements.txt
CHANGED
@@ -1,48 +1,23 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
gradio_client==1.7.0
|
5 |
-
numpy==1.26.4
|
6 |
-
requests==2.32.3
|
7 |
sentencepiece==0.2.0
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
transformers==4.42.4
|
14 |
-
accelerate==0.32.1
|
15 |
-
scikit-learn==1.5.1
|
16 |
-
einops==0.8.0
|
17 |
-
einops-exts==0.0.4
|
18 |
-
timm==1.0.7
|
19 |
-
openai-clip==1.0.1
|
20 |
-
fsspec==2024.6.1
|
21 |
-
kornia==0.7.3
|
22 |
-
matplotlib==3.9.1
|
23 |
-
ninja==1.11.1.1
|
24 |
-
omegaconf==2.3.0
|
25 |
-
opencv-python==4.10.0.84
|
26 |
-
pandas==2.2.2
|
27 |
-
pillow==10.4.0
|
28 |
-
pytorch-lightning==2.3.3
|
29 |
-
PyYAML==6.0.1
|
30 |
-
scipy==1.14.0
|
31 |
-
tqdm==4.66.4
|
32 |
-
triton==2.3.1
|
33 |
-
urllib3==2.2.2
|
34 |
-
webdataset==0.2.86
|
35 |
-
xformers==0.0.27
|
36 |
-
facexlib==0.3.0
|
37 |
-
k-diffusion==0.1.1.post1
|
38 |
-
diffusers==0.30.0
|
39 |
-
pillow-heif==0.18.0
|
40 |
-
|
41 |
-
open-clip-torch==2.24.0
|
42 |
-
|
43 |
-
torchaudio
|
44 |
-
easydict==1.13
|
45 |
-
fairscale==0.4.13
|
46 |
torchsde==0.2.6
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==1.6.0
|
2 |
+
diffusers==0.33.1
|
3 |
+
transformers==4.46.2
|
|
|
|
|
|
|
4 |
sentencepiece==0.2.0
|
5 |
+
pillow==11.1.0
|
6 |
+
av==12.1.0
|
7 |
+
numpy==1.26.2
|
8 |
+
scipy==1.12.0
|
9 |
+
requests==2.31.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
torchsde==0.2.6
|
11 |
+
torch>=2.0.0
|
12 |
+
torchvision
|
13 |
+
torchaudio
|
14 |
+
einops
|
15 |
+
opencv-contrib-python
|
16 |
+
safetensors
|
17 |
+
huggingface_hub
|
18 |
+
spaces
|
19 |
+
decord
|
20 |
+
imageio_ffmpeg
|
21 |
+
sageattention
|
22 |
+
xformers
|
23 |
+
bitsandbytes
|