import gradio as gr import subprocess import tempfile import os import shutil from transformers import pipeline # === TEXT SCRIPT GENERATOR === generator = pipeline("text-generation", model="gpt2") def generate_script(prompt): result = generator(prompt, max_length=100)[0]["generated_text"] print(result) return result # === VOICE GENERATOR using Bark === def generate_voice_with_bark(script_text): with tempfile.NamedTemporaryFile(mode="w+", suffix=".txt", delete=False) as tf: tf.write(script_text) tf_path = tf.name output_audio = tempfile.NamedTemporaryFile(suffix=".wav", delete=False).name subprocess.run(["python3", "bark_infer_cpu.py", "--text_file", tf_path, "--output", output_audio], check=True) return output_audio # === BACKGROUND MUSIC MIXER === def mix_audio(narration_path, music_path="bg_music.mp3"): mixed_audio = tempfile.NamedTemporaryFile(suffix=".aac", delete=False).name subprocess.run([ "ffmpeg", "-y", "-i", music_path, "-i", narration_path, "-filter_complex", "[0:a]volume=0.3[a0];[1:a]volume=1.0[a1];[a0][a1]amix=inputs=2:duration=first:dropout_transition=2", "-c:a", "aac", "-b:a", "192k", mixed_audio ], check=True) return mixed_audio # === VIDEO GENERATOR === def generate_video(prompt): script = generate_script(prompt) with open("script.txt", "w") as f: f.write(script) narration_path = generate_voice_with_bark(script) mixed_audio_path = mix_audio(narration_path) subprocess.run(["python3", "generate.py", "--prompt", prompt, "--output", "sample_video.mp4"], check=True) final_output = "final_output.mp4" subprocess.run([ "ffmpeg", "-y", "-i", "sample_video.mp4", "-i", mixed_audio_path, "-c:v", "copy", "-c:a", "aac", "-strict", "experimental", final_output ], check=True) return final_output, script # === UI === iface = gr.Interface( fn=generate_video, inputs=gr.Textbox(label="Enter your scene prompt"), outputs=[gr.Video(label="Generated Video"), gr.Textbox(label="Generated Script")], title="Free AI Reels Generator" ) iface.launch()