Spaces:
Running
Running
File size: 3,816 Bytes
9551276 3be135a 5e72808 5e68066 e66305d 8b87f76 162d15d 8b87f76 162d15d 8b87f76 5e72808 6dcc1a0 8b87f76 6dcc1a0 8eef721 8b87f76 6dcc1a0 5e68066 6dcc1a0 95edb23 6dcc1a0 c3a6074 6dcc1a0 c3a6074 02c31be df51e79 02c31be |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import os
import random
import gradio as gr
from moviepy.editor import VideoFileClip, CompositeVideoClip, ImageClip
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import subprocess
# Ensure ImageMagick is installed
def install_imagemagick():
if not os.path.exists('/usr/bin/convert'):
subprocess.run(['apt-get', 'update'])
subprocess.run(['apt-get', 'install', '-y', 'imagemagick'])
install_imagemagick()
def create_text_clip(text, fontsize, color, size):
img = Image.new('RGB', size, color='black')
draw = ImageDraw.Draw(img)
font_path = "arial.ttf" # Make sure this file is in the root directory of your space
font = ImageFont.truetype(font_path, fontsize)
w, h = draw.textbbox((0, 0), text, font=font)[2:]
draw.text(((size[0] - w) / 2, (size[1] - h) / 2), text, font=font, fill=color)
return np.array(img)
def process_video(text):
video_folder = "videos"
video_files = [os.path.join(video_folder, f) for f in os.listdir(video_folder) if f.endswith(('mp4', 'mov', 'avi', 'mkv'))]
if not video_files:
raise FileNotFoundError("No video files found in the specified directory.")
selected_video = random.choice(video_files)
video = VideoFileClip(selected_video)
start_time = random.uniform(0, max(0, video.duration - 60))
video = video.subclip(start_time, min(start_time + 60, video.duration))
def resize_image(image, new_size):
pil_image = Image.fromarray(image)
resized_pil = pil_image.resize(new_size[::-1], Image.LANCZOS)
return np.array(resized_pil)
text_lines = text.split()
text = "\n".join([" ".join(text_lines[i:i+8]) for i in range(0, len(text_lines), 8)])
text_img = create_text_clip(text, fontsize=70, color='white', size=video.size)
text_clip = ImageClip(text_img).set_duration(video.duration).set_position(('center', 'center')).set_opacity(0.5)
final_clip = CompositeVideoClip([video, text_clip])
output_path = "output.mp4"
final_clip.write_videofile(output_path, codec="libx264")
return output_path
def chat_interface():
return gr.Interface(
fn=generate_response,
inputs=[
gr.Textbox(label="Prompt"),
gr.Textbox(label="History", type="text"),
gr.Dropdown(choices=["llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "gemma-7b-it"], label="Model"),
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Temperature"),
gr.Slider(minimum=1, maximum=32192, step=1, label="Max Tokens"),
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Top P"),
gr.Number(precision=0, label="Seed")
],
outputs=gr.Textbox(label="Response"),
title="YTSHorts Maker - Chat Interface",
description="Powered by GROQ.",
live=True
)
def process_video_interface():
return gr.Interface(
fn=process_video,
inputs=gr.Textbox(label="Text (8 words max per line)"),
outputs=gr.Video(label="Processed Video"),
title="YTSHorts Maker - Video Processing",
description="Select a video file from 'videos' folder, add text, and process.",
)
# Main app definition
with gr.Blocks(theme=gr.themes.Soft(primary_hue="red", secondary_hue="pink")) as demo:
with gr.Tabs():
# Chat Ta
with gr.TabItem("Video Processing"):
text_input = gr.Textbox(lines=5, label="Text (8 words max per line)")
process_button = gr.Button("Process Video")
video_output = gr.Video(label="Processed Video")
process_button.click(
fn=process_video,
inputs=text_input,
outputs=video_output,
)
# Launch the Gradio interface
if __name__ == "__main__":
demo.launch() |