Fabrice-TIERCELIN commited on
Commit
b0d63fe
·
verified ·
1 Parent(s): c1f8bed

Merge code

Browse files
Files changed (1) hide show
  1. app.py +53 -246
app.py CHANGED
@@ -304,7 +304,8 @@ def set_mp4_comments_imageio_ffmpeg(input_file, comments):
304
  return False
305
 
306
  @torch.no_grad()
307
- def worker(input_image, prompts, n_prompt, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf):
 
308
  def encode_prompt(prompt, n_prompt):
309
  llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
310
 
@@ -397,9 +398,10 @@ def worker(input_image, prompts, n_prompt, seed, resolution, total_second_length
397
  rnd = torch.Generator("cpu").manual_seed(seed)
398
 
399
  history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu()
 
400
  history_pixels = None
401
 
402
- history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)
403
  total_generated_latent_frames = 1
404
 
405
  if enable_preview:
@@ -425,252 +427,35 @@ def worker(input_image, prompts, n_prompt, seed, resolution, total_second_length
425
  return
426
 
427
  indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
428
- clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
429
- clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
430
-
431
- def post_process(generated_latents, total_generated_latent_frames, history_latents, high_vram, transformer, gpu, vae, history_pixels, latent_window_size, enable_preview, section_index, total_latent_sections, outputs_folder, mp4_crf, stream):
432
- total_generated_latent_frames += int(generated_latents.shape[2])
433
- history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
434
-
435
- if not high_vram:
436
- offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
437
- load_model_as_complete(vae, target_device=gpu)
438
-
439
- if history_pixels is None:
440
- real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
441
- history_pixels = vae_decode(real_history_latents, vae).cpu()
442
- else:
443
- section_latent_frames = latent_window_size * 2
444
- overlapped_frames = latent_window_size * 4 - 3
445
-
446
- real_history_latents = history_latents[:, :, -min(section_latent_frames, total_generated_latent_frames):, :, :]
447
- history_pixels = soft_append_bcthw(history_pixels, vae_decode(real_history_latents, vae).cpu(), overlapped_frames)
448
-
449
- if not high_vram:
450
- unload_complete_models()
451
-
452
- if enable_preview or section_index == total_latent_sections - 1:
453
- output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
454
-
455
- save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
456
-
457
- print(f'Decoded. Current latent shape pixel shape {history_pixels.shape}')
458
-
459
- stream.output_queue.push(('file', output_filename))
460
- return [total_generated_latent_frames, history_latents, history_pixels]
461
-
462
- for section_index in range(total_latent_sections):
463
- if stream.input_queue.top() == 'end':
464
- stream.output_queue.push(('end', None))
465
- return
466
-
467
- print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
468
-
469
- if len(prompt_parameters) > 0:
470
- [llama_vec, clip_l_pooler, llama_vec_n, clip_l_pooler_n, llama_attention_mask, llama_attention_mask_n] = prompt_parameters.pop(0)
471
-
472
- if not high_vram:
473
- unload_complete_models()
474
- move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
475
-
476
- if use_teacache:
477
- transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
478
- else:
479
- transformer.initialize_teacache(enable_teacache=False)
480
-
481
- clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]):, :, :].split([16, 2, 1], dim=2)
482
- clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
483
-
484
- generated_latents = sample_hunyuan(
485
- transformer=transformer,
486
- sampler='unipc',
487
- width=width,
488
- height=height,
489
- frames=latent_window_size * 4 - 3,
490
- real_guidance_scale=cfg,
491
- distilled_guidance_scale=gs,
492
- guidance_rescale=rs,
493
- # shift=3.0,
494
- num_inference_steps=steps,
495
- generator=rnd,
496
- prompt_embeds=llama_vec,
497
- prompt_embeds_mask=llama_attention_mask,
498
- prompt_poolers=clip_l_pooler,
499
- negative_prompt_embeds=llama_vec_n,
500
- negative_prompt_embeds_mask=llama_attention_mask_n,
501
- negative_prompt_poolers=clip_l_pooler_n,
502
- device=gpu,
503
- dtype=torch.bfloat16,
504
- image_embeddings=image_encoder_last_hidden_state,
505
- latent_indices=latent_indices,
506
- clean_latents=clean_latents,
507
- clean_latent_indices=clean_latent_indices,
508
- clean_latents_2x=clean_latents_2x,
509
- clean_latent_2x_indices=clean_latent_2x_indices,
510
- clean_latents_4x=clean_latents_4x,
511
- clean_latent_4x_indices=clean_latent_4x_indices,
512
- callback=callback,
513
- )
514
-
515
- [total_generated_latent_frames, history_latents, history_pixels] = post_process(generated_latents, total_generated_latent_frames, history_latents, high_vram, transformer, gpu, vae, history_pixels, latent_window_size, enable_preview, section_index, total_latent_sections, outputs_folder, mp4_crf, stream)
516
- except:
517
- traceback.print_exc()
518
-
519
- if not high_vram:
520
- unload_complete_models(
521
- text_encoder, text_encoder_2, image_encoder, vae, transformer
522
- )
523
-
524
- stream.output_queue.push(('end', None))
525
- return
526
-
527
- @torch.no_grad()
528
- def worker_last_frame(input_image, prompts, n_prompt, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf):
529
- def encode_prompt(prompt, n_prompt):
530
- llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
531
-
532
- if cfg == 1:
533
- llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
534
- else:
535
- llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
536
-
537
- llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
538
- llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
539
-
540
- llama_vec = llama_vec.to(transformer.dtype)
541
- llama_vec_n = llama_vec_n.to(transformer.dtype)
542
- clip_l_pooler = clip_l_pooler.to(transformer.dtype)
543
- clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
544
- return [llama_vec, clip_l_pooler, llama_vec_n, clip_l_pooler_n, llama_attention_mask, llama_attention_mask_n]
545
-
546
- total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
547
- total_latent_sections = int(max(round(total_latent_sections), 1))
548
-
549
- job_id = generate_timestamp()
550
-
551
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
552
-
553
- try:
554
- # Clean GPU
555
- if not high_vram:
556
- unload_complete_models(
557
- text_encoder, text_encoder_2, image_encoder, vae, transformer
558
- )
559
-
560
- # Text encoding
561
-
562
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
563
-
564
- if not high_vram:
565
- fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
566
- load_model_as_complete(text_encoder_2, target_device=gpu)
567
-
568
- prompt_parameters = []
569
-
570
- for prompt_part in prompts:
571
- prompt_parameters.append(encode_prompt(prompt_part, n_prompt))
572
-
573
- # Processing input image
574
-
575
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Image processing ...'))))
576
-
577
- H, W, C = input_image.shape
578
- height, width = find_nearest_bucket(H, W, resolution=resolution)
579
-
580
- def get_start_latent(input_image, height, width, vae, gpu, image_encoder, high_vram):
581
- input_image_np = resize_and_center_crop(input_image, target_width=width, target_height=height)
582
-
583
- #Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
584
-
585
- input_image_pt = torch.from_numpy(input_image_np).float() / 127.5 - 1
586
- input_image_pt = input_image_pt.permute(2, 0, 1)[None, :, None]
587
-
588
- # VAE encoding
589
-
590
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'VAE encoding ...'))))
591
-
592
- if not high_vram:
593
- load_model_as_complete(vae, target_device=gpu)
594
-
595
- start_latent = vae_encode(input_image_pt, vae)
596
-
597
- # CLIP Vision
598
-
599
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
600
-
601
- if not high_vram:
602
- load_model_as_complete(image_encoder, target_device=gpu)
603
-
604
- image_encoder_last_hidden_state = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder).last_hidden_state
605
-
606
- return [start_latent, image_encoder_last_hidden_state]
607
-
608
- [start_latent, image_encoder_last_hidden_state] = get_start_latent(input_image, height, width, vae, gpu, image_encoder, high_vram)
609
-
610
- # Dtype
611
-
612
- image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
613
-
614
- # Sampling
615
-
616
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
617
-
618
- rnd = torch.Generator("cpu").manual_seed(seed)
619
-
620
- history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu()
621
- history_pixels = None
622
-
623
- history_latents = torch.cat([start_latent.to(history_latents), history_latents], dim=2)
624
- total_generated_latent_frames = 1
625
-
626
- if enable_preview:
627
- def callback(d):
628
- preview = d['denoised']
629
- preview = vae_decode_fake(preview)
630
-
631
- preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
632
- preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
633
-
634
- if stream.input_queue.top() == 'end':
635
- stream.output_queue.push(('end', None))
636
- raise KeyboardInterrupt('User ends the task.')
637
-
638
- current_step = d['i'] + 1
639
- percentage = int(100.0 * current_step / steps)
640
- hint = f'Sampling {current_step}/{steps}'
641
- desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30), Resolution: {height}px * {width}px. The video is being extended now ...'
642
- stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
643
- return
644
  else:
645
- def callback(d):
646
- return
647
-
648
- indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
649
- latent_indices, clean_latent_1x_indices, clean_latent_2x_indices, clean_latent_4x_indices, clean_latent_indices_start = indices.split([latent_window_size, 1, 2, 16, 1], dim=1)
650
- clean_latent_indices = torch.cat([clean_latent_1x_indices, clean_latent_indices_start], dim=1)
651
 
652
  def post_process(generated_latents, total_generated_latent_frames, history_latents, high_vram, transformer, gpu, vae, history_pixels, latent_window_size, enable_preview, section_index, total_latent_sections, outputs_folder, mp4_crf, stream):
653
  total_generated_latent_frames += int(generated_latents.shape[2])
654
- history_latents = torch.cat([generated_latents.to(history_latents), history_latents], dim=2)
655
 
656
  if not high_vram:
657
  offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
658
  load_model_as_complete(vae, target_device=gpu)
659
 
660
  if history_pixels is None:
661
- real_history_latents = history_latents[:, :, :total_generated_latent_frames, :, :]
662
  history_pixels = vae_decode(real_history_latents, vae).cpu()
663
  else:
664
  section_latent_frames = latent_window_size * 2
665
  overlapped_frames = latent_window_size * 4 - 3
666
 
667
- real_history_latents = history_latents[:, :, :min(section_latent_frames, total_generated_latent_frames), :, :]
668
- history_pixels = soft_append_bcthw(vae_decode(real_history_latents, vae).cpu(), history_pixels, overlapped_frames)
669
 
670
  if not high_vram:
671
  unload_complete_models()
672
 
673
- if enable_preview or section_index == 0:
674
  output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
675
 
676
  save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
@@ -680,7 +465,7 @@ def worker_last_frame(input_image, prompts, n_prompt, seed, resolution, total_se
680
  stream.output_queue.push(('file', output_filename))
681
  return [total_generated_latent_frames, history_latents, history_pixels]
682
 
683
- for section_index in range(total_latent_sections - 1, -1, -1):
684
  if stream.input_queue.top() == 'end':
685
  stream.output_queue.push(('end', None))
686
  return
@@ -688,7 +473,7 @@ def worker_last_frame(input_image, prompts, n_prompt, seed, resolution, total_se
688
  print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
689
 
690
  if len(prompt_parameters) > 0:
691
- [llama_vec, clip_l_pooler, llama_vec_n, clip_l_pooler_n, llama_attention_mask, llama_attention_mask_n] = prompt_parameters.pop(len(prompt_parameters) - 1)
692
 
693
  if not high_vram:
694
  unload_complete_models()
@@ -699,8 +484,12 @@ def worker_last_frame(input_image, prompts, n_prompt, seed, resolution, total_se
699
  else:
700
  transformer.initialize_teacache(enable_teacache=False)
701
 
702
- clean_latents_1x, clean_latents_2x, clean_latents_4x = history_latents[:, :, :sum([1, 2, 16]), :, :].split([1, 2, 16], dim=2)
703
- clean_latents = torch.cat([clean_latents_1x, start_latent.to(history_latents)], dim=2)
 
 
 
 
704
 
705
  generated_latents = sample_hunyuan(
706
  transformer=transformer,
@@ -791,7 +580,9 @@ def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_
791
  stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Video processing ...'))))
792
 
793
  # 20250506 pftq: Encode video
794
- start_latent, input_image_np, video_latents, fps, height, width, input_video_pixels = video_encode(input_video, resolution, no_resize, vae, vae_batch_size=vae_batch, device=gpu)
 
 
795
 
796
  # CLIP Vision
797
  stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
@@ -881,7 +672,7 @@ def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_
881
  if effective_clean_frames > 0 and split_idx < len(splits):
882
  clean_latents_1x = splits[split_idx]
883
 
884
- clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
885
 
886
  # 20250507 pftq: Fix for <=1 sec videos.
887
  max_frames = min(latent_window_size * 4 - 3, history_latents.shape[2] * 4)
@@ -900,7 +691,7 @@ def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_
900
  rnd = torch.Generator("cpu").manual_seed(seed)
901
 
902
  # 20250506 pftq: Initialize history_latents with video latents
903
- history_latents = video_latents.cpu()
904
  total_generated_latent_frames = history_latents.shape[2]
905
  # 20250506 pftq: Initialize history_pixels to fix UnboundLocalError
906
  history_pixels = None
@@ -1013,7 +804,7 @@ def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_
1013
  stream.output_queue.push(('end', None))
1014
  return
1015
 
1016
- def get_duration(input_image, image_position, prompt, generation_mode, n_prompt, randomize_seed, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf, progress = None):
1017
  return total_second_length * 60 * (0.9 if use_teacache else 1.5) * (1 + ((steps - 25) / 100))
1018
 
1019
  @spaces.GPU(duration=get_duration)
@@ -1034,8 +825,7 @@ def process(input_image,
1034
  gpu_memory_preservation=6,
1035
  enable_preview=True,
1036
  use_teacache=False,
1037
- mp4_crf=16,
1038
- progress = gr.Progress()
1039
  ):
1040
  start = time.time()
1041
  global stream
@@ -1060,7 +850,7 @@ def process(input_image,
1060
 
1061
  stream = AsyncStream()
1062
 
1063
- async_run(worker_last_frame if image_position == 100 else worker, input_image, prompts, n_prompt, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf)
1064
 
1065
  output_filename = None
1066
 
@@ -1073,7 +863,6 @@ def process(input_image,
1073
 
1074
  if flag == 'progress':
1075
  preview, desc, html = data
1076
- progress(None, desc = desc)
1077
  yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
1078
 
1079
  if flag == 'end':
@@ -1090,13 +879,12 @@ def process(input_image,
1090
  "You can upscale the result with RIFE. To make all your generated scenes consistent, you can then apply a face swap on the main character.", gr.update(interactive=True), gr.update(interactive=False)
1091
  break
1092
 
1093
- def get_duration_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch, progress = None):
1094
  return total_second_length * 60 * (0.9 if use_teacache else 2.3) * (1 + ((steps - 25) / 100))
1095
 
1096
  # 20250506 pftq: Modified process to pass clean frame count, etc from video_encode
1097
  @spaces.GPU(duration=get_duration_video)
1098
- def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch,
1099
- progress = gr.Progress()):
1100
  start = time.time()
1101
  global stream, high_vram
1102
 
@@ -1144,7 +932,6 @@ def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, re
1144
 
1145
  if flag == 'progress':
1146
  preview, desc, html = data
1147
- progress(None, desc = desc)
1148
  #yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
1149
  yield output_filename, gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True) # 20250506 pftq: Keep refreshing the video in case it got hidden when the tab was in the background
1150
 
@@ -1234,7 +1021,7 @@ with block:
1234
  generation_mode = gr.Radio([["Text-to-Video", "text"], ["Image-to-Video", "image"], ["Video Extension", "video"]], elem_id="generation-mode", label="Generation mode", value = "image")
1235
  text_to_video_hint = gr.HTML("I discourage to use the Text-to-Video feature. You should rather generate an image with Flux and use Image-to-Video. You will save time.")
1236
  input_image = gr.Image(sources='upload', type="numpy", label="Image", height=320)
1237
- image_position = gr.Slider(label="Image position", minimum=0, maximum=100, value=0, step=100, info='0=Video start; 100=Video end')
1238
  input_video = gr.Video(sources='upload', label="Input Video", height=320)
1239
  timeless_prompt = gr.Textbox(label="Timeless prompt", info='Used on the whole duration of the generation', value='', placeholder="The creature starts to move, fast motion, fixed camera, focus motion, consistent arm, consistent position, mute colors, insanely detailed")
1240
  prompt_number = gr.Slider(label="Timed prompt number", minimum=0, maximum=1000, value=0, step=1, info='Prompts will automatically appear')
@@ -1394,6 +1181,26 @@ with block:
1394
  False, # enable_preview
1395
  True, # use_teacache
1396
  16 # mp4_crf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1397
  ]
1398
  ],
1399
  run_on_click = True,
 
304
  return False
305
 
306
  @torch.no_grad()
307
+ def worker(input_image, image_position, prompts, n_prompt, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf):
308
+ is_last_frame = (image_position == 100)
309
  def encode_prompt(prompt, n_prompt):
310
  llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
311
 
 
398
  rnd = torch.Generator("cpu").manual_seed(seed)
399
 
400
  history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu()
401
+ start_latent = start_latent.to(history_latents)
402
  history_pixels = None
403
 
404
+ history_latents = torch.cat([start_latent, history_latents] if is_last_frame else [history_latents, start_latent], dim=2)
405
  total_generated_latent_frames = 1
406
 
407
  if enable_preview:
 
427
  return
428
 
429
  indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
430
+ if is_last_frame:
431
+ latent_indices, clean_latent_1x_indices, clean_latent_2x_indices, clean_latent_4x_indices, clean_latent_indices_start = indices.split([latent_window_size, 1, 2, 16, 1], dim=1)
432
+ clean_latent_indices = torch.cat([clean_latent_1x_indices, clean_latent_indices_start], dim=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433
  else:
434
+ clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
435
+ clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
 
 
 
 
436
 
437
  def post_process(generated_latents, total_generated_latent_frames, history_latents, high_vram, transformer, gpu, vae, history_pixels, latent_window_size, enable_preview, section_index, total_latent_sections, outputs_folder, mp4_crf, stream):
438
  total_generated_latent_frames += int(generated_latents.shape[2])
439
+ history_latents = torch.cat([generated_latents.to(history_latents), history_latents], dim=2) if is_last_frame else torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
440
 
441
  if not high_vram:
442
  offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
443
  load_model_as_complete(vae, target_device=gpu)
444
 
445
  if history_pixels is None:
446
+ real_history_latents = history_latents[:, :, :total_generated_latent_frames, :, :] if is_last_frame else history_latents[:, :, -total_generated_latent_frames:, :, :]
447
  history_pixels = vae_decode(real_history_latents, vae).cpu()
448
  else:
449
  section_latent_frames = latent_window_size * 2
450
  overlapped_frames = latent_window_size * 4 - 3
451
 
452
+ real_history_latents = history_latents[:, :, :min(section_latent_frames, total_generated_latent_frames), :, :] if is_last_frame else history_latents[:, :, -min(section_latent_frames, total_generated_latent_frames):, :, :]
453
+ history_pixels = soft_append_bcthw(vae_decode(real_history_latents, vae).cpu(), history_pixels, overlapped_frames) if is_last_frame else soft_append_bcthw(history_pixels, vae_decode(real_history_latents, vae).cpu(), overlapped_frames)
454
 
455
  if not high_vram:
456
  unload_complete_models()
457
 
458
+ if enable_preview or section_index == (0 if is_last_frame else (total_latent_sections - 1)):
459
  output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
460
 
461
  save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
 
465
  stream.output_queue.push(('file', output_filename))
466
  return [total_generated_latent_frames, history_latents, history_pixels]
467
 
468
+ for section_index in range(total_latent_sections - 1, -1, -1) if is_last_frame else range(total_latent_sections):
469
  if stream.input_queue.top() == 'end':
470
  stream.output_queue.push(('end', None))
471
  return
 
473
  print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
474
 
475
  if len(prompt_parameters) > 0:
476
+ [llama_vec, clip_l_pooler, llama_vec_n, clip_l_pooler_n, llama_attention_mask, llama_attention_mask_n] = prompt_parameters.pop((len(prompt_parameters) - 1) if is_last_frame else 0)
477
 
478
  if not high_vram:
479
  unload_complete_models()
 
484
  else:
485
  transformer.initialize_teacache(enable_teacache=False)
486
 
487
+ if is_last_frame:
488
+ clean_latents_1x, clean_latents_2x, clean_latents_4x = history_latents[:, :, :sum([1, 2, 16]), :, :].split([1, 2, 16], dim=2)
489
+ clean_latents = torch.cat([clean_latents_1x, start_latent], dim=2)
490
+ else:
491
+ clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]):, :, :].split([16, 2, 1], dim=2)
492
+ clean_latents = torch.cat([start_latent, clean_latents_1x], dim=2)
493
 
494
  generated_latents = sample_hunyuan(
495
  transformer=transformer,
 
580
  stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Video processing ...'))))
581
 
582
  # 20250506 pftq: Encode video
583
+ start_latent, input_image_np, video_latents, fps, height, width = video_encode(input_video, resolution, no_resize, vae, vae_batch_size=vae_batch, device=gpu)[:6]
584
+ start_latent = start_latent.to(dtype=torch.float32).cpu()
585
+ video_latents = video_latents.cpu()
586
 
587
  # CLIP Vision
588
  stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
 
672
  if effective_clean_frames > 0 and split_idx < len(splits):
673
  clean_latents_1x = splits[split_idx]
674
 
675
+ clean_latents = torch.cat([start_latent, clean_latents_1x], dim=2)
676
 
677
  # 20250507 pftq: Fix for <=1 sec videos.
678
  max_frames = min(latent_window_size * 4 - 3, history_latents.shape[2] * 4)
 
691
  rnd = torch.Generator("cpu").manual_seed(seed)
692
 
693
  # 20250506 pftq: Initialize history_latents with video latents
694
+ history_latents = video_latents
695
  total_generated_latent_frames = history_latents.shape[2]
696
  # 20250506 pftq: Initialize history_pixels to fix UnboundLocalError
697
  history_pixels = None
 
804
  stream.output_queue.push(('end', None))
805
  return
806
 
807
+ def get_duration(input_image, image_position, prompt, generation_mode, n_prompt, randomize_seed, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf):
808
  return total_second_length * 60 * (0.9 if use_teacache else 1.5) * (1 + ((steps - 25) / 100))
809
 
810
  @spaces.GPU(duration=get_duration)
 
825
  gpu_memory_preservation=6,
826
  enable_preview=True,
827
  use_teacache=False,
828
+ mp4_crf=16
 
829
  ):
830
  start = time.time()
831
  global stream
 
850
 
851
  stream = AsyncStream()
852
 
853
+ async_run(worker, input_image, image_position, prompts, n_prompt, seed, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, mp4_crf)
854
 
855
  output_filename = None
856
 
 
863
 
864
  if flag == 'progress':
865
  preview, desc, html = data
 
866
  yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
867
 
868
  if flag == 'end':
 
879
  "You can upscale the result with RIFE. To make all your generated scenes consistent, you can then apply a face swap on the main character.", gr.update(interactive=True), gr.update(interactive=False)
880
  break
881
 
882
+ def get_duration_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
883
  return total_second_length * 60 * (0.9 if use_teacache else 2.3) * (1 + ((steps - 25) / 100))
884
 
885
  # 20250506 pftq: Modified process to pass clean frame count, etc from video_encode
886
  @spaces.GPU(duration=get_duration_video)
887
+ def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, enable_preview, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
 
888
  start = time.time()
889
  global stream, high_vram
890
 
 
932
 
933
  if flag == 'progress':
934
  preview, desc, html = data
 
935
  #yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
936
  yield output_filename, gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True) # 20250506 pftq: Keep refreshing the video in case it got hidden when the tab was in the background
937
 
 
1021
  generation_mode = gr.Radio([["Text-to-Video", "text"], ["Image-to-Video", "image"], ["Video Extension", "video"]], elem_id="generation-mode", label="Generation mode", value = "image")
1022
  text_to_video_hint = gr.HTML("I discourage to use the Text-to-Video feature. You should rather generate an image with Flux and use Image-to-Video. You will save time.")
1023
  input_image = gr.Image(sources='upload', type="numpy", label="Image", height=320)
1024
+ image_position = gr.Slider(label="Image position", minimum=0, maximum=100, value=0, step=100, info='0=Video start; 100=Video end (lower quality)')
1025
  input_video = gr.Video(sources='upload', label="Input Video", height=320)
1026
  timeless_prompt = gr.Textbox(label="Timeless prompt", info='Used on the whole duration of the generation', value='', placeholder="The creature starts to move, fast motion, fixed camera, focus motion, consistent arm, consistent position, mute colors, insanely detailed")
1027
  prompt_number = gr.Slider(label="Timed prompt number", minimum=0, maximum=1000, value=0, step=1, info='Prompts will automatically appear')
 
1181
  False, # enable_preview
1182
  True, # use_teacache
1183
  16 # mp4_crf
1184
+ ],
1185
+ [
1186
+ "./img_examples/Example4.webp", # input_image
1187
+ 100, # image_position
1188
+ "A building starting to explode, photorealistic, realisitc, 8k, insanely detailed",
1189
+ "image", # generation_mode
1190
+ "Missing arm, unrealistic position, impossible contortion, visible bone, muscle contraction, blurred, blurry", # n_prompt
1191
+ True, # randomize_seed
1192
+ 42, # seed
1193
+ 672, # resolution
1194
+ 1, # total_second_length
1195
+ 9, # latent_window_size
1196
+ 25, # steps
1197
+ 1.0, # cfg
1198
+ 10.0, # gs
1199
+ 0.0, # rs
1200
+ 6, # gpu_memory_preservation
1201
+ False, # enable_preview
1202
+ False, # use_teacache
1203
+ 16 # mp4_crf
1204
  ]
1205
  ],
1206
  run_on_click = True,