Spaces:
Runtime error
Runtime error
| from speechbrain.pretrained.interfaces import foreign_class | |
| import gradio as gr | |
| import os | |
| import warnings | |
| warnings.filterwarnings("ignore") | |
| # Loading the speechbrain emotion detection model | |
| learner = foreign_class( | |
| source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP", | |
| pymodule_file="custom_interface.py", | |
| classname="CustomEncoderWav2vec2Classifier" | |
| ) | |
| # Emotion dictionary | |
| emotion_dict = { | |
| 'sad': 'Sad', | |
| 'hap': 'Happy', | |
| 'ang': 'Anger', | |
| 'fea': 'Fear', | |
| 'sur': 'Surprised', | |
| 'neu': 'Neutral' | |
| } | |
| # Function for classification of uploaded files | |
| def predict_emotion_upload(audio): | |
| out_prob, score, index, text_lab = learner.classify_file(audio.name) | |
| return emotion_dict[text_lab[0]] | |
| # Function for classification of selected files from the dropdown | |
| def predict_emotion_select(filename): | |
| file_path = os.path.join('rec', filename) | |
| out_prob, score, index, text_lab = learner.classify_file(file_path) | |
| return emotion_dict[text_lab[0]] | |
| # Function to create an audio player component | |
| def create_audio_player(filename): | |
| file_path = os.path.join('rec', filename) | |
| return file_path | |
| # Retrieve a list of audio file names from the 'rec' directory | |
| audio_files = os.listdir('rec') | |
| audio_files_dropdown = gr.inputs.Dropdown(choices=audio_files, label="Select Audio File") | |
| # Define Gradio interface components for both tabs | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## ML Speech Emotion Detection") | |
| gr.Markdown("Speechbrain powered wav2vec 2.0 pretrained model on IEMOCAP dataset.") | |
| with gr.Tabs(): | |
| with gr.TabItem("Upload Audio"): | |
| with gr.Group(): | |
| audio_upload = gr.Audio(label="Upload Audio", type="file") | |
| submit_btn_1 = gr.Button("Classify Uploaded Audio") | |
| audio_player_1 = gr.Audio(label="Uploaded Audio Player", interactive=True) | |
| output_text_1 = gr.Textbox(label="Prediction") | |
| submit_btn_1.click(predict_emotion_upload, inputs=audio_upload, outputs=[output_text_1, audio_player_1]) | |
| with gr.TabItem("Select from List"): | |
| with gr.Group(): | |
| submit_btn_2 = gr.Button("Classify Selected Audio") | |
| audio_player_2 = gr.Audio(label="Selected Audio Player", interactive=True) | |
| output_text_2 = gr.Textbox(label="Prediction") | |
| audio_files_dropdown.change(create_audio_player, inputs=audio_files_dropdown, outputs=audio_player_2) | |
| submit_btn_2.click(predict_emotion_select, inputs=audio_files_dropdown, outputs=output_text_2) | |
| demo.launch() |