Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,9 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import whisper
|
| 3 |
from transformers import pipeline
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
# Load Whisper model
|
| 6 |
whisper_model = whisper.load_model("base")
|
|
@@ -18,9 +21,19 @@ def get_summarizer(model_name):
|
|
| 18 |
|
| 19 |
# Function to transcribe audio file using Whisper
|
| 20 |
def transcribe_audio(model_size, audio):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
model = whisper.load_model(model_size)
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
transcription = result['text']
|
|
|
|
| 24 |
return transcription
|
| 25 |
|
| 26 |
# Function to summarize the transcribed text
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import whisper
|
| 3 |
from transformers import pipeline
|
| 4 |
+
import torch
|
| 5 |
+
import numpy as np
|
| 6 |
+
import librosa
|
| 7 |
|
| 8 |
# Load Whisper model
|
| 9 |
whisper_model = whisper.load_model("base")
|
|
|
|
| 21 |
|
| 22 |
# Function to transcribe audio file using Whisper
|
| 23 |
def transcribe_audio(model_size, audio):
|
| 24 |
+
if audio is None:
|
| 25 |
+
return "No audio file provided."
|
| 26 |
+
|
| 27 |
+
# Load the selected Whisper model
|
| 28 |
model = whisper.load_model(model_size)
|
| 29 |
+
|
| 30 |
+
# Load and convert audio using librosa
|
| 31 |
+
audio_data, sample_rate = librosa.load(audio, sr=16000)
|
| 32 |
+
|
| 33 |
+
# Transcribe the audio file
|
| 34 |
+
result = model.transcribe(audio_data)
|
| 35 |
transcription = result['text']
|
| 36 |
+
|
| 37 |
return transcription
|
| 38 |
|
| 39 |
# Function to summarize the transcribed text
|