KingNish commited on
Commit
4731eae
·
verified ·
1 Parent(s): a1d4f24

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -7,7 +7,7 @@ import tempfile
7
  import os
8
 
9
  MODEL_NAME = "ylacombe/whisper-large-v3-turbo"
10
- BATCH_SIZE = 8
11
  device = 0 if torch.cuda.is_available() else "cpu"
12
 
13
  pipe = pipeline(
@@ -19,15 +19,13 @@ pipe = pipeline(
19
 
20
  @spaces.GPU
21
  def transcribe(inputs, previous_transcription):
22
- text = pipe(inputs[1], batch_size=BATCH_SIZE, generate_kwargs={"task": "transcribe"}, return_timestamps=True)["text"]
23
- if previous_transcription:
24
- text = previous_transcription + text
25
- return text
26
 
27
  with gr.Blocks() as demo:
28
  with gr.Column():
29
  input_audio_microphone = gr.Audio(streaming=True)
30
- output = gr.Textbox(label="Transcription")
31
 
32
  input_audio_microphone.stream(transcribe, [input_audio_microphone, output], [output], time_limit=15, stream_every=1, concurrency_limit=None)
33
 
 
7
  import os
8
 
9
  MODEL_NAME = "ylacombe/whisper-large-v3-turbo"
10
+ BATCH_SIZE = 32
11
  device = 0 if torch.cuda.is_available() else "cpu"
12
 
13
  pipe = pipeline(
 
19
 
20
  @spaces.GPU
21
  def transcribe(inputs, previous_transcription):
22
+ previous_transcription += pipe(inputs[1], batch_size=BATCH_SIZE, generate_kwargs={"task": "transcribe"}, return_timestamps=True)["text"]
23
+ return previous_transcription
 
 
24
 
25
  with gr.Blocks() as demo:
26
  with gr.Column():
27
  input_audio_microphone = gr.Audio(streaming=True)
28
+ output = gr.Textbox(label="Transcription", value="")
29
 
30
  input_audio_microphone.stream(transcribe, [input_audio_microphone, output], [output], time_limit=15, stream_every=1, concurrency_limit=None)
31