gnosticdev's picture
Update app.py
de0afc9 verified
raw
history blame
9.12 kB
import gradio as gr
from tts_module import get_voices, text_to_speech
from pexels_api import search_pexels
from moviepy.editor import (
AudioFileClip, VideoFileClip, CompositeAudioClip,
concatenate_audioclips, concatenate_videoclips, vfx, CompositeVideoClip
)
import asyncio
import os
import time
import requests
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload
import tempfile
import re
import random
# Define la carpeta de salida temporal
output_folder = "outputs"
os.makedirs(output_folder, exist_ok=True)
def clean_text_for_search(text):
"""Limpia el texto para hacer búsquedas válidas en Pexels"""
# Eliminar caracteres especiales y limitar longitud
text = re.sub(r'[^\w\s]', '', text).strip()
return text
def resize_and_blur_video(clip, target_aspect_ratio=16/9):
"""
Redimensiona y aplica desenfoque al fondo del video para mantener el aspecto 16:9.
"""
try:
w, h = clip.size
current_aspect_ratio = w / h
print(f"Procesando video: {w}x{h}, ratio: {current_aspect_ratio}")
if abs(current_aspect_ratio - target_aspect_ratio) < 0.1:
return clip
if current_aspect_ratio < target_aspect_ratio: # Video vertical
target_w = int(h * target_aspect_ratio)
target_h = h
background = clip.resize(width=target_w)
try:
background = background.fx(vfx.blur, sigma=50)
except Exception as e:
print(f"Error al aplicar blur: {e}")
foreground = clip.resize(height=target_h)
x_center = (target_w - foreground.w) / 2
return CompositeVideoClip(
[background, foreground.set_position((x_center, 0))],
size=(target_w, target_h)
)
else: # Video horizontal
return clip.resize(width=int(h * target_aspect_ratio), height=h)
except Exception as e:
print(f"Error en resize_and_blur_video: {e}")
return clip
def concatenate_pexels_videos(keywords, num_videos_per_keyword=1):
"""
Concatena videos de Pexels basados en palabras clave proporcionadas por el usuario.
:param keywords: Palabras clave separadas por comas (ejemplo: "universo, galaxia, bosque, gato").
"""
keyword_list = [keyword.strip() for keyword in keywords.split(",") if keyword.strip()]
if not keyword_list:
raise Exception("No se proporcionaron palabras clave válidas.")
video_clips = []
for keyword in keyword_list:
try:
print(f"Buscando videos para la palabra clave '{keyword}'...")
links = search_pexels(keyword, num_results=num_videos_per_keyword)
if not links:
print(f"No se encontraron videos para la palabra clave '{keyword}'.")
continue
link = links[0] # Usamos solo el primer video encontrado
video_response = requests.get(link)
if video_response.status_code != 200:
print(f"Error al descargar video desde {link}: Código de estado {video_response.status_code}")
continue
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_video:
tmp_video.write(video_response.content)
clip = VideoFileClip(tmp_video.name)
processed_clip = resize_and_blur_video(clip)
video_clips.append(processed_clip)
except Exception as e:
print(f"Error procesando palabra clave '{keyword}': {e}")
continue
if not video_clips:
raise Exception("No se pudieron obtener videos válidos.")
# Aleatorizar el orden de los clips si es necesario
random.shuffle(video_clips)
return concatenate_videoclips(video_clips, method="compose")
def adjust_background_music(video_duration, music_file):
try:
music = AudioFileClip(music_file)
if music.duration < video_duration:
repetitions = int(video_duration / music.duration) + 1
music_clips = [music] * repetitions
music = concatenate_audioclips(music_clips)
if music.duration > video_duration:
music = music.subclip(0, video_duration)
music = music.volumex(0.2)
return music
except Exception as e:
print(f"Error ajustando música: {e}")
return None
def combine_audio_video(audio_file, video_clip, music_clip=None):
try:
audio_clip = AudioFileClip(audio_file)
total_duration = audio_clip.duration + 5
if video_clip.duration < total_duration:
video_clip = video_clip.loop(duration=total_duration)
video_clip = video_clip.set_duration(total_duration).fadeout(5)
final_clip = video_clip.set_audio(audio_clip)
if music_clip:
if music_clip.duration < total_duration:
repetitions = int(total_duration / music_clip.duration) + 1
music_clips = [music_clip] * repetitions
music_clip = concatenate_audioclips(music_clips)
if music_clip.duration > total_duration:
music_clip = music_clip.subclip(0, total_duration)
music_clip = music_clip.audio_fadeout(5)
final_clip = final_clip.set_audio(CompositeAudioClip([audio_clip, music_clip]))
output_filename = f"final_video_{int(time.time())}.mp4"
output_path = os.path.join(output_folder, output_filename)
final_clip.write_videofile(output_path, codec="libx264", audio_codec="aac", fps=24)
return output_path
except Exception as e:
print(f"Error combinando audio y video: {e}")
return None
def process_input(text, txt_file, mp3_file, selected_voice, rate, pitch, keywords):
try:
if text.strip():
final_text = text
elif txt_file is not None:
final_text = txt_file.decode("utf-8")
else:
return "No input provided"
voices = asyncio.run(get_voices())
if selected_voice not in voices:
return f"La voz '{selected_voice}' no es válida. Por favor, seleccione una de las siguientes voces: {', '.join(voices.keys())}"
try:
audio_file = asyncio.run(text_to_speech(final_text, selected_voice, rate, pitch))
except Exception as e:
return f"Error generando audio: {e}"
try:
video_clip = concatenate_pexels_videos(keywords, num_videos_per_keyword=1)
except Exception as e:
return f"Error concatenando videos: {e}"
if mp3_file is not None:
music_clip = adjust_background_music(video_clip.duration, mp3_file.name)
else:
music_clip = None
final_video_path = combine_audio_video(audio_file, video_clip, music_clip)
upload_to_google_drive(final_video_path)
return final_video_path
except Exception as e:
return f"Error durante el procesamiento: {e}"
def upload_to_google_drive(file_path):
try:
api_key = os.getenv("GOOGLE_API_KEY")
if not api_key:
print("Error: GOOGLE_API_KEY no está definida en las variables de entorno.")
return None
service = build("drive", "v3", developerKey=api_key)
file_metadata = {"name": os.path.basename(file_path)}
media = MediaFileUpload(file_path, resumable=True)
file = service.files().create(body=file_metadata, media_body=media, fields="id").execute()
print(f"Archivo subido exitosamente con ID: {file.get('id')}")
return file.get("id")
except Exception as e:
print(f"Error subiendo a Google Drive: {e}")
return None
# Interfaz Gradio
with gr.Blocks() as demo:
gr.Markdown("# Text-to-Video Generator")
with gr.Row():
with gr.Column():
text_input = gr.Textbox(label="Write your text here", lines=5)
txt_file_input = gr.File(label="Or upload a .txt file", file_types=[".txt"])
mp3_file_input = gr.File(label="Upload background music (.mp3)", file_types=[".mp3"])
keyword_input = gr.Textbox(label="Enter keywords separated by commas (e.g., universe, galaxy, forest, cat)")
voices = asyncio.run(get_voices())
voice_dropdown = gr.Dropdown(choices=list(voices.keys()), label="Select Voice")
rate_slider = gr.Slider(minimum=-50, maximum=50, value=0, label="Speech Rate Adjustment (%)", step=1)
pitch_slider = gr.Slider(minimum=-20, maximum=20, value=0, label="Pitch Adjustment (Hz)", step=1)
with gr.Column():
output_video = gr.File(label="Download Generated Video")
btn = gr.Button("Generate Video")
btn.click(
process_input,
inputs=[text_input, txt_file_input, mp3_file_input, voice_dropdown, rate_slider, pitch_slider, keyword_input],
outputs=output_video
)
# Leer el puerto asignado por Hugging Face
port = int(os.getenv("PORT", 7860))
# Lanzar la aplicación
demo.launch(server_name="0.0.0.0", server_port=port, share=True)