Spaces:
Running
Running
import logging | |
import os | |
import uuid | |
import time | |
import gradio as gr | |
import soundfile as sf | |
from model import get_pretrained_model, language_to_models | |
# Function to update model dropdown based on language selection | |
def update_model_dropdown(language): | |
if language in language_to_models: | |
return gr.Dropdown.update(choices=language_to_models[language]) | |
else: | |
raise ValueError(f"Unsupported language: {language}") | |
# Function to process text to speech conversion | |
def process(language, repo_id, text, sid, speed): | |
logging.info(f"Input text: {text}, SID: {sid}, Speed: {speed}") | |
sid = int(sid) | |
tts = get_pretrained_model(repo_id, speed) | |
start = time.time() | |
audio = tts.generate(text, sid=sid) | |
duration = len(audio.samples) / audio.sample_rate | |
elapsed_seconds = time.time() - start | |
rtf = elapsed_seconds / duration | |
info = f""" | |
Wave duration: {duration:.3f} s<br/> | |
Processing time: {elapsed_seconds:.3f} s<br/> | |
RTF: {rtf:.3f}<br/> | |
""" | |
logging.info(info) | |
filename = f"{uuid.uuid4()}.wav" | |
sf.write(filename, audio.samples, samplerate=audio.sample_rate, subtype="PCM_16") | |
return filename, info | |
# Interface layout | |
demo = gr.Blocks() | |
with demo: | |
gr.Markdown("# Text to Voice") | |
gr.Markdown("High Fidelity TTS. Visit <a href='https://ruslanmv.com/' target='_blank'>ruslanmv.com</a> for more information.") | |
language_radio = gr.Radio(label="Language", choices=list(language_to_models.keys())) | |
model_dropdown = gr.Dropdown(label="Select a model", choices=language_to_models[list(language_to_models.keys())[0]]) | |
language_radio.change(update_model_dropdown, inputs=language_radio, outputs=model_dropdown) | |
input_text = gr.Textbox(lines=10, label="Enter text to convert to speech") | |
input_sid = gr.Textbox(label="Speaker ID", value="0", placeholder="Valid only for multi-speaker model") | |
input_speed = gr.Slider(minimum=0.1, maximum=10, value=1, step=0.1, label="Speed (larger->faster; smaller->slower)") | |
output_audio = gr.Audio(label="Generated audio") | |
output_info = gr.HTML(label="Info") | |
input_button = gr.Button("Submit") | |
input_button.click(process, inputs=[language_radio, model_dropdown, input_text, input_sid, input_speed], outputs=[output_audio, output_info]) | |
gr.Examples( | |
examples=[ | |
["Chinese (Mandarin, 普通话)", "csukuangfj/vits-zh-hf-fanchen-wnj|1", "在一个阳光明媚的夏天...", 0, 1.0], | |
["Thai", "csukuangfj/vits-mms-tha", "ฉันรักคุณ", 0, 1.0], | |
], | |
inputs=[language_radio, model_dropdown, input_text, input_sid, input_speed], | |
outputs=[output_audio, output_info], | |
) | |
# Download necessary data | |
def download_espeak_ng_data(): | |
os.system( | |
""" | |
cd /tmp | |
wget -qq https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/espeak-ng-data.tar.bz2 | |
tar xf espeak-ng-data.tar.bz2 | |
""" | |
) | |
if __name__ == "__main__": | |
download_espeak_ng_data() | |
logging.basicConfig(level=logging.INFO) | |
demo.launch() | |