Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import GPT2LMHeadModel, GPT2Tokenizer, pipeline | |
# Initialize the GPT2 model and tokenizer | |
tokenizer = GPT2Tokenizer.from_pretrained("gpt2") | |
model = GPT2LMHeadModel.from_pretrained("gpt2") | |
# Initialize the Whisper GPT model | |
translation_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-large-v2") | |
# Geriatric Depression Scale Quiz Questions | |
questions = [ | |
"Are you basically satisfied with your life?", | |
"Have you dropped many of your activities and interests?", | |
"Do you feel that your life is empty?", | |
"Do you often get bored?", | |
"Are you in good spirits most of the time?", | |
"Are you afraid that something bad is going to happen to you?", | |
"Do you feel happy most of the time?", | |
"Do you often feel helpless?", | |
"Do you prefer to stay at home, rather than going out and doing things?", | |
"Do you feel that you have more problems with memory than most?", | |
"Do you think it is wonderful to be alive now?", | |
"Do you feel worthless the way you are now?", | |
"Do you feel full of energy?", | |
"Do you feel that your situation is hopeless?", | |
"Do you think that most people are better off than you are?" | |
] | |
def ask_questions(answers): | |
"""Calculate score based on answers.""" | |
score = 0 | |
for answer in answers: | |
if answer.lower() == 'yes': | |
score += 1 | |
elif answer.lower() != 'no': | |
raise ValueError(f"Invalid answer: {answer}") | |
return score | |
def understand_answers(audio_answers): | |
"""Convert audio answers to text using the Whisper ASR model.""" | |
asr_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-large-v2") | |
text_answers = [] | |
for audio in audio_answers: | |
transcript = asr_pipeline(audio) | |
text_answers.append(transcript[0]['generated_text']) | |
return text_answers | |
# Removing the understand function as it's functionality is covered by understand_answers | |
# Keeping the whisper function for text-to-speech conversion | |
def whisper(text): | |
"""Convert text to speech using the Whisper TTS model.""" | |
tts_pipeline = pipeline("text-to-speech", model="facebook/wav2vec2-base-960h") | |
speech = tts_pipeline(text) | |
return speech[0]['generated_text'] | |
def modified_summarize(answers): | |
"""Summarize answers using the GPT2 model.""" | |
answers_str = " ".join(answers) | |
inputs = tokenizer.encode("summarize: " + answers_str, return_tensors='pt') | |
summary_ids = model.generate(inputs, max_length=150, num_beams=5, early_stopping=True) | |
return tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
def assistant(*audio_answers): | |
"""Calculate score, translate and summarize answers.""" | |
# Convert audio answers to text | |
answers = understand_answers(audio_answers) | |
# Calculate score and summarize | |
score = ask_questions(answers) | |
summary = modified_summarize(answers) | |
# Convert the summary to speech | |
speech = whisper(summary) | |
# Convert the first answer from audio to text (already done in answers[0]) | |
text = answers[0] | |
return {"score": f"Score: {score}", "summary": f"Summary: {summary}", "speech": speech, "text": text} | |
iface_score = gr.Interface(fn=assistant, | |
inputs=[gr.inputs.Audio(source="microphone")] * len(questions), | |
outputs=["text", "text", gr.outputs.Audio(type="auto"), "text"]) | |
iface_score.launch() | |