File size: 988 Bytes
5d52c32
6c226f9
 
 
d790c0b
 
88183ad
6c226f9
17f14b2
9d6fa91
6c226f9
 
 
 
 
3da85d4
6c226f9
 
 
5d52c32
3da85d4
 
 
 
 
 
 
1e5de0e
 
 
3da85d4
5b4b106
6c226f9
ab14d7d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import spaces
import torch
import gradio as gr
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
import tempfile
import os

MODEL_NAME = "ylacombe/whisper-large-v3-turbo"
BATCH_SIZE = 8
device = 0 if torch.cuda.is_available() else "cpu"

pipe = pipeline(
    task="automatic-speech-recognition",
    model=MODEL_NAME,
    chunk_length_s=1,
    device=device,
)

@spaces.GPU
def transcribe(inputs, previous_transcription):
    text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": "transcribe"}, return_timestamps=True)["text"]
    if previous_transcription:
        text = previous_transcription + text
    return text

with gr.Blocks() as demo:
     with gr.Column():
        input_audio_microphone = gr.Audio(streaming=True),
        output = gr.Textbox("Transcription")

    input_audio_microphone.stream(transcribe, [input_audio, output], [output], time_limit=15, stream_every=1, concurrency_limit=None)

demo.queue().launch()