|
import gradio as gr |
|
import torch |
|
import soundfile as sf |
|
import spaces |
|
import os |
|
import numpy as np |
|
import re |
|
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan |
|
from speechbrain.pretrained import EncoderClassifier |
|
from datasets import load_dataset |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
def load_models_and_data(): |
|
model_name = "microsoft/speecht5_tts" |
|
processor = SpeechT5Processor.from_pretrained(model_name) |
|
model = SpeechT5ForTextToSpeech.from_pretrained("emirhanbilgic/speecht5_finetuned_emirhan_tr").to(device) |
|
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device) |
|
|
|
spk_model_name = "speechbrain/spkrec-xvect-voxceleb" |
|
speaker_model = EncoderClassifier.from_hparams( |
|
source=spk_model_name, |
|
run_opts={"device": device}, |
|
savedir=os.path.join("/tmp", spk_model_name), |
|
) |
|
|
|
|
|
dataset = load_dataset("erenfazlioglu/turkishvoicedataset", split="train") |
|
example = dataset[304] |
|
|
|
return model, processor, vocoder, speaker_model, example |
|
|
|
model, processor, vocoder, speaker_model, default_example = load_models_and_data() |
|
|
|
def create_speaker_embedding(waveform): |
|
with torch.no_grad(): |
|
speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform).unsqueeze(0).to(device)) |
|
speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2) |
|
speaker_embeddings = speaker_embeddings.squeeze() |
|
return speaker_embeddings |
|
|
|
def prepare_default_embedding(example): |
|
audio = example["audio"] |
|
return create_speaker_embedding(audio["array"]) |
|
|
|
default_embedding = prepare_default_embedding(default_example) |
|
|
|
|
|
number_words = { |
|
0: "eber", 1: "koow", 2: "labo", 3: "seddex", 4: "afar", 5: "shan", |
|
6: "lix", 7: "todobo", 8: "sideed", 9: "sagaal", 10: "toban", |
|
11: "toban iyo koow", 12: "toban iyo labo", 13: "toban iyo seddex", |
|
14: "toban iyo afar", 15: "toban iyo shan", 16: "toban iyo lix", |
|
17: "toban iyo todobo", 18: "toban iyo sideed", 19: "toban iyo sagaal", |
|
20: "labaatan", 30: "sodon", 40: "afartan", 50: "konton", |
|
60: "lixdan", 70: "todobaatan", 80: "sideetan", 90: "sagaashan", |
|
100: "boqol", 1000: "kun", |
|
} |
|
|
|
def number_to_words(number): |
|
if number < 20: |
|
return number_words[number] |
|
elif number < 100: |
|
tens, unit = divmod(number, 10) |
|
return number_words[tens * 10] + (" " + number_words[unit] if unit else "") |
|
elif number < 1000: |
|
hundreds, remainder = divmod(number, 100) |
|
return (number_words[hundreds] + " boqol" if hundreds > 1 else "BOQOL") + (" " + number_to_words(remainder) if remainder else "") |
|
elif number < 1000000: |
|
thousands, remainder = divmod(number, 1000) |
|
return (number_to_words(thousands) + " kun" if thousands > 1 else "KUN") + (" " + number_to_words(remainder) if remainder else "") |
|
elif number < 1000000000: |
|
millions, remainder = divmod(number, 1000000) |
|
return number_to_words(millions) + " malyan" + (" " + number_to_words(remainder) if remainder else "") |
|
elif number < 1000000000000: |
|
billions, remainder = divmod(number, 1000000000) |
|
return number_to_words(billions) + " milyaar" + (" " + number_to_words(remainder) if remainder else "") |
|
else: |
|
return str(number) |
|
|
|
def replace_numbers_with_words(text): |
|
|
|
def replace(match): |
|
number = int(match.group()) |
|
return number_to_words(number) |
|
|
|
|
|
result = re.sub(r'\b\d+\b', replace, text) |
|
|
|
return result |
|
|
|
def normalize_text(text): |
|
|
|
text = text.lower() |
|
|
|
|
|
text = replace_numbers_with_words(text) |
|
|
|
|
|
for old, new in replacements: |
|
text = text.replace(old, new) |
|
|
|
|
|
text = re.sub(r'[^\w\s]', '', text) |
|
|
|
return text |
|
|
|
@spaces.GPU(duration=60) |
|
def text_to_speech(text, audio_file=None): |
|
|
|
normalized_text = normalize_text(text) |
|
|
|
|
|
inputs = processor(text=normalized_text, return_tensors="pt").to(device) |
|
|
|
|
|
speaker_embeddings = default_embedding |
|
|
|
|
|
with torch.no_grad(): |
|
speech = model.generate_speech(inputs["input_ids"], speaker_embeddings.unsqueeze(0), vocoder=vocoder) |
|
|
|
speech_np = speech.cpu().numpy() |
|
|
|
return (16000, speech_np) |
|
|
|
iface = gr.Interface( |
|
fn=text_to_speech, |
|
inputs=[ |
|
gr.Textbox(label="soo gali somali language") |
|
], |
|
outputs=[ |
|
gr.Audio(label="Generated Speech", type="numpy") |
|
], |
|
title="soomaali", |
|
description="soomaal lnaguage." |
|
) |
|
|
|
iface.launch(share=True) |
|
|