Spaces:
Sleeping
Sleeping
import torch | |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor | |
#from transformers import Speech2Text2Processor, SpeechEncoderDecoderModel | |
import streamlit as st | |
from audio_recorder_streamlit import audio_recorder | |
import numpy as np | |
# Function to transcribe audio to text | |
def transcribe_audio(audio_bytes): | |
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") | |
#processor = Speech2Text2Processor.from_pretrained("facebook/s2t-wav2vec2-large-en-de") | |
#model = SpeechEncoderDecoderModel.from_pretrained("facebook/s2t-wav2vec2-large-en-de") | |
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") | |
# Convert bytes to numpy array | |
audio_array = np.frombuffer(audio_bytes, dtype=np.int16) | |
# Cast audio array to double precision and normalize | |
audio_tensor = torch.tensor(audio_array, dtype=torch.float64) / 32768.0 | |
input_values = processor(audio_tensor, return_tensors="pt", sampling_rate=16000).input_values | |
logits = model(input_values).logits | |
predicted_ids = torch.argmax(logits, dim=-1) | |
transcription = processor.decode(predicted_ids[0]) | |
return transcription | |
# Streamlit app | |
st.title("Audio to Text Transcription..") | |
audio_bytes = audio_recorder(pause_threshold=3.0, sample_rate=16_000) | |
if audio_bytes: | |
st.audio(audio_bytes, format="audio/wav") | |
transcription = transcribe_audio(audio_bytes) | |
if transcription: | |
st.write("Transcription:") | |
st.write(transcription) | |
else: | |
st.write("Error: Failed to transcribe audio.") | |
else: | |
st.write("No audio recorded.") | |