Spaces:
Runtime error
Runtime error
seikin_alexey
commited on
Commit
·
f446a3a
1
Parent(s):
8ac27bb
app4.py
CHANGED
|
@@ -30,34 +30,27 @@ emotion_dict = {
|
|
| 30 |
}
|
| 31 |
|
| 32 |
def predict_emotion(selected_audio):
|
| 33 |
-
if selected_audio is None: # Check if an audio file is selected
|
| 34 |
-
return "Please select an audio file.", None
|
| 35 |
file_path = os.path.join("rec", selected_audio)
|
| 36 |
out_prob, score, index, text_lab = learner.classify_file(file_path)
|
| 37 |
emotion = emotion_dict[text_lab[0]]
|
| 38 |
return emotion, file_path # Return both emotion and file path
|
| 39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
# Get the list of audio files for the dropdown
|
| 41 |
audio_files_list = get_audio_files_list()
|
| 42 |
|
| 43 |
# Loading Gradio interface
|
| 44 |
-
|
| 45 |
-
button = gr.Button("Detect emotion")
|
| 46 |
outputs = [gr.outputs.Textbox(label="Predicted Emotion"), gr.outputs.Audio(label="Play Audio")]
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
|
| 51 |
-
title = "ML Speech Emotion
|
| 52 |
description = "Speechbrain powered wav2vec 2.0 pretrained model on IEMOCAP dataset using Gradio."
|
| 53 |
|
| 54 |
-
|
| 55 |
-
interface = gr.Interface(
|
| 56 |
-
fn=button_click, # Use the button_click function for the interface
|
| 57 |
-
inputs=[dropdown, button],
|
| 58 |
-
outputs=outputs,
|
| 59 |
-
title=title,
|
| 60 |
-
description=description
|
| 61 |
-
)
|
| 62 |
-
|
| 63 |
interface.launch()
|
|
|
|
| 30 |
}
|
| 31 |
|
| 32 |
def predict_emotion(selected_audio):
|
|
|
|
|
|
|
| 33 |
file_path = os.path.join("rec", selected_audio)
|
| 34 |
out_prob, score, index, text_lab = learner.classify_file(file_path)
|
| 35 |
emotion = emotion_dict[text_lab[0]]
|
| 36 |
return emotion, file_path # Return both emotion and file path
|
| 37 |
|
| 38 |
+
def button_click(selected_audio):
|
| 39 |
+
emotion, file_path = predict_emotion(selected_audio)
|
| 40 |
+
return emotion, gradio.Interface.Play("rec/" + selected_audio)
|
| 41 |
+
|
| 42 |
# Get the list of audio files for the dropdown
|
| 43 |
audio_files_list = get_audio_files_list()
|
| 44 |
|
| 45 |
# Loading Gradio interface
|
| 46 |
+
inputs = gr.Dropdown(label="Select Audio", choices=audio_files_list)
|
|
|
|
| 47 |
outputs = [gr.outputs.Textbox(label="Predicted Emotion"), gr.outputs.Audio(label="Play Audio")]
|
| 48 |
|
| 49 |
+
# Create the button
|
| 50 |
+
sub_btn = gr.Interface.Button(label="Detect Emotion", elem_id="btn", onclick=button_click)
|
| 51 |
|
| 52 |
+
title = "ML Speech Emotion Detection3"
|
| 53 |
description = "Speechbrain powered wav2vec 2.0 pretrained model on IEMOCAP dataset using Gradio."
|
| 54 |
|
| 55 |
+
interface = gr.Interface(fn=predict_emotion, inputs=[inputs, sub_btn], outputs=outputs, title=title, description=description)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
interface.launch()
|