Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			T4
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			T4
	Commit 
							
							·
						
						d347764
	
1
								Parent(s):
							
							8ff3567
								
Create app.py
Browse files
    	
        app.py
    ADDED
    
    | 
         @@ -0,0 +1,48 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import gradio as gr
         
     | 
| 2 | 
         
            +
            import numpy as np
         
     | 
| 3 | 
         
            +
            import torch
         
     | 
| 4 | 
         
            +
            from datasets import load_dataset
         
     | 
| 5 | 
         
            +
             
     | 
| 6 | 
         
            +
            from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
             
     | 
| 9 | 
         
            +
            device = "cuda:0" if torch.cuda.is_available() else "cpu"
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
            # load speech translation checkpoint
         
     | 
| 12 | 
         
            +
            asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            # load text-to-speech checkpoint and speaker embeddings
         
     | 
| 15 | 
         
            +
            processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
         
     | 
| 18 | 
         
            +
            vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
         
     | 
| 21 | 
         
            +
            speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            def translate(audio):
         
     | 
| 25 | 
         
            +
                outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"})
         
     | 
| 26 | 
         
            +
                return outputs["text"]
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            def synthesise(text):
         
     | 
| 30 | 
         
            +
                inputs = processor(text=text, return_tensors="pt")
         
     | 
| 31 | 
         
            +
                speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
         
     | 
| 32 | 
         
            +
                return speech.cpu()
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            def speech_to_speech_translation(audio):
         
     | 
| 36 | 
         
            +
                translated_text = translate(audio)
         
     | 
| 37 | 
         
            +
                synthesised_speech = synthesise(translated_text)
         
     | 
| 38 | 
         
            +
                synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
         
     | 
| 39 | 
         
            +
                return 16000, synthesised_speech
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
             
     | 
| 42 | 
         
            +
            demo = gr.Interface(
         
     | 
| 43 | 
         
            +
                fn=speech_to_speech_translation,
         
     | 
| 44 | 
         
            +
                inputs=gr.Audio(type="filepath"),
         
     | 
| 45 | 
         
            +
                outputs=gr.Audio(label="Generated Speech", type="numpy"),
         
     | 
| 46 | 
         
            +
                examples=[["./example.wav"]],
         
     | 
| 47 | 
         
            +
            )
         
     | 
| 48 | 
         
            +
            demo.launch()
         
     |