text2video / app.py
Chen ZiLu
init8
fa616cf
import gradio as gr
import os
import api_get
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
# a = os.path.join(os.path.dirname(__file__), "files/world.mp4") # Video
# b = os.path.join(os.path.dirname(__file__), "files/a.mp4") # Video
# c = os.path.join(os.path.dirname(__file__), "files/b.mp4") # Video
clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️
def generate_video(text = "<break time=\"5000\"/>,大家奖fsdfdf,房间号是吗?<break time=\"5000\"/>"):
# gr.Warning("fjskdf=======")
return api_get.generate_video( text = text)
def generate_audio(text = "<break time=\"5000\"/>,大家奖fsdfdf,房间号是吗?<break time=\"5000\"/>"):
# gr.Warning("fjskdf=======")
return api_get.generate_audio( text = text)
with gr.Blocks() as demo:
# gr.Markdown("Flip text or image files using this demo.")
with gr.Tab("文生视频"):
id_part1 = "text_to_video"
with gr.Row():
with gr.Column():
text2video_input = gr.Textbox(placeholder = "输入文案", label = "文字")
with gr.Row():
text2video_button2 = gr.Button("插入停顿", variant='secondary')
text2video_button3 = gr.Button("插入数量", variant='secondary')
cancel_button1 = gr.ClearButton(variant='stop')
text2video_button = gr.Button("生成", variant='primary')
video_output = gr.Video( label = "视频")
text2video_button.click(generate_video, inputs=text2video_input, outputs=video_output)
cancel_button1.click(lambda x: "", inputs = text2video_input, outputs=text2video_input)
text2video_button2.click(lambda x: x+"<break time=\"5000\"/>,", inputs = text2video_input, outputs=text2video_input)
text2video_button3.click(lambda x: x+"<say-as interpret-as=\"cardinal\">123456</say-as>,", inputs = text2video_input, outputs=text2video_input)
with gr.Tab("文生音频"):
id_part2 = "text_to_audio"
with gr.Row():
with gr.Column():
text2audio_input = gr.Textbox(placeholder = "输入文案", label = "文字")
with gr.Row():
text2audio_button2 = gr.Button("插入停顿", variant='secondary')
text2audio_button3 = gr.Button("插入数量", variant='secondary')
cancel_button2 = gr.ClearButton(variant='stop')
text2audio_button = gr.Button("生成", variant='primary')
audio_output = gr.Audio(label = "音频")
text2audio_button.click(generate_audio, inputs=text2audio_input, outputs=audio_output)
cancel_button2.click(lambda x: "", inputs = text2audio_input, outputs=text2audio_input)
text2audio_button2.click(lambda x: x+"<break time=\"5000\"/>,", inputs = text2audio_input, outputs=text2audio_input)
text2audio_button3.click(lambda x: x+"<say-as interpret-as=\"cardinal\">123456</say-as>,", inputs = text2audio_input, outputs=text2audio_input)
if __name__ == "__main__":
demo.launch()