Fabrice-TIERCELIN commited on
Commit
bd6b924
·
verified ·
1 Parent(s): 5e2a644

Timed prompts

Browse files
Files changed (1) hide show
  1. app.py +51 -2
app.py CHANGED
@@ -778,12 +778,24 @@ def worker_video(input_video, prompt, n_prompt, seed, batch, resolution, total_s
778
 
779
  total_generated_latent_frames += int(generated_latents.shape[2])
780
  history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
 
 
 
 
781
 
782
  if not high_vram:
783
  offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
784
  load_model_as_complete(vae, target_device=gpu)
 
 
 
 
785
 
786
  real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
 
 
 
 
787
 
788
  if history_pixels is None:
789
  history_pixels = vae_decode(real_history_latents, vae).cpu()
@@ -798,9 +810,17 @@ def worker_video(input_video, prompt, n_prompt, seed, batch, resolution, total_s
798
 
799
  current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
800
  history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
 
 
 
 
801
 
802
  if not high_vram:
803
  unload_complete_models()
 
 
 
 
804
 
805
  output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
806
 
@@ -810,6 +830,10 @@ def worker_video(input_video, prompt, n_prompt, seed, batch, resolution, total_s
810
  # 20250508 pftq: Save prompt to mp4 metadata comments
811
  set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompt} | Negative Prompt: {n_prompt}");
812
  print(f"Prompt saved to mp4 metadata comments: {output_filename}")
 
 
 
 
813
 
814
  # 20250506 pftq: Clean up previous partial files
815
  if previous_video is not None and os.path.exists(previous_video):
@@ -819,13 +843,17 @@ def worker_video(input_video, prompt, n_prompt, seed, batch, resolution, total_s
819
  except Exception as e:
820
  print(f"Error deleting previous partial video {previous_video}: {e}")
821
  previous_video = output_filename
 
 
 
 
822
 
823
  print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
824
 
825
  stream.output_queue.push(('file', output_filename))
826
  end = time.time()
827
  secondes = int(end - start)
828
- print("££££££££££££££££££££££££££££££££££££££££ " + str(secondes))
829
 
830
  seed = (seed + 1) % np.iinfo(np.int32).max
831
 
@@ -908,6 +936,15 @@ def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, re
908
  def end_process():
909
  stream.input_queue.push('end')
910
 
 
 
 
 
 
 
 
 
 
911
 
912
  css = make_progress_bar_css()
913
  block = gr.Blocks(css=css).queue()
@@ -933,7 +970,19 @@ adapted from the official code repo [FramePack](https://github.com/lllyasviel/Fr
933
  text_to_video_hint = gr.HTML("I discourage to use the Text-to-Video feature. You should rather generate an image with Flux and use Image-to-Video. You will save time.", visible=False)
934
  input_image = gr.Image(sources='upload', type="numpy", label="Image", height=320)
935
  input_video = gr.Video(sources='upload', label="Input Video", height=320, visible=False)
936
- prompt = gr.Textbox(label="Prompt", value='')
 
 
 
 
 
 
 
 
 
 
 
 
937
  total_second_length = gr.Slider(label="Video Length to Generate (seconds)", minimum=1, maximum=120, value=2, step=0.1)
938
 
939
  with gr.Row():
 
778
 
779
  total_generated_latent_frames += int(generated_latents.shape[2])
780
  history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
781
+ end = time.time()
782
+ secondes = int(end - start)
783
+ print("1 ££££££££££££££££££££££££££££££££££££££££ " + str(secondes))
784
+ start = time.time()
785
 
786
  if not high_vram:
787
  offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
788
  load_model_as_complete(vae, target_device=gpu)
789
+ end = time.time()
790
+ secondes = int(end - start)
791
+ print("2 ££££££££££££££££££££££££££££££££££££££££ " + str(secondes))
792
+ start = time.time()
793
 
794
  real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
795
+ end = time.time()
796
+ secondes = int(end - start)
797
+ print("3 ££££££££££££££££££££££££££££££££££££££££ " + str(secondes))
798
+ start = time.time()
799
 
800
  if history_pixels is None:
801
  history_pixels = vae_decode(real_history_latents, vae).cpu()
 
810
 
811
  current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
812
  history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
813
+ end = time.time()
814
+ secondes = int(end - start)
815
+ print("4 ££££££££££££££££££££££££££££££££££££££££ " + str(secondes))
816
+ start = time.time()
817
 
818
  if not high_vram:
819
  unload_complete_models()
820
+ end = time.time()
821
+ secondes = int(end - start)
822
+ print("5 ££££££££££££££££££££££££££££££££££££££££ " + str(secondes))
823
+ start = time.time()
824
 
825
  output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
826
 
 
830
  # 20250508 pftq: Save prompt to mp4 metadata comments
831
  set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompt} | Negative Prompt: {n_prompt}");
832
  print(f"Prompt saved to mp4 metadata comments: {output_filename}")
833
+ end = time.time()
834
+ secondes = int(end - start)
835
+ print("6 ££££££££££££££££££££££££££££££££££££££££ " + str(secondes))
836
+ start = time.time()
837
 
838
  # 20250506 pftq: Clean up previous partial files
839
  if previous_video is not None and os.path.exists(previous_video):
 
843
  except Exception as e:
844
  print(f"Error deleting previous partial video {previous_video}: {e}")
845
  previous_video = output_filename
846
+ end = time.time()
847
+ secondes = int(end - start)
848
+ print("7 ££££££££££££££££££££££££££££££££££££££££ " + str(secondes))
849
+ start = time.time()
850
 
851
  print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
852
 
853
  stream.output_queue.push(('file', output_filename))
854
  end = time.time()
855
  secondes = int(end - start)
856
+ print("8 ££££££££££££££££££££££££££££££££££££££££ " + str(secondes))
857
 
858
  seed = (seed + 1) % np.iinfo(np.int32).max
859
 
 
936
  def end_process():
937
  stream.input_queue.push('end')
938
 
939
+ timed_prompts = {}
940
+
941
+ def handle_prompt_number_change():
942
+ timed_prompts = {}
943
+ return []
944
+
945
+ def handle_generation_mode_change(timed_prompt_id, timed_prompt):
946
+ timed_prompts[timed_prompt_id] = timed_prompt
947
+ return ";".join(list(timed_prompts.values()))
948
 
949
  css = make_progress_bar_css()
950
  block = gr.Blocks(css=css).queue()
 
970
  text_to_video_hint = gr.HTML("I discourage to use the Text-to-Video feature. You should rather generate an image with Flux and use Image-to-Video. You will save time.", visible=False)
971
  input_image = gr.Image(sources='upload', type="numpy", label="Image", height=320)
972
  input_video = gr.Video(sources='upload', label="Input Video", height=320, visible=False)
973
+ prompt = gr.Textbox(label="Prompt", value='', info='Use ; to separate in time', placeholder="The creature starts to move, fast motion, fixed camera")
974
+ prompt_number = gr.Slider(label="Timed prompt number", minimum=0, maximum=1000, value=0, step=1, info='Not for video extension')
975
+ prompt_number.change(fn=handle_prompt_number_change, inputs=[], outputs=[])
976
+
977
+ @gr.render(inputs=prompt_number)
978
+ def show_split(prompt_number):
979
+ timed_prompts = {}
980
+
981
+ for digit in range(prompt_number):
982
+ timed_prompt_id = gr.Textbox(value="timed_prompt_" + str(digit), visible=False)
983
+ timed_prompt = gr.Textbox(label="Timed prompt #" + str(digit + 1), elem_id="timed_prompt_" + str(digit), value="")
984
+ timed_prompt.change(fn=handle_generation_mode_change, inputs=[timed_prompt_id, timed_prompt], outputs=[prompt])
985
+
986
  total_second_length = gr.Slider(label="Video Length to Generate (seconds)", minimum=1, maximum=120, value=2, step=0.1)
987
 
988
  with gr.Row():