Spaces:
Configuration error
Configuration error
Update app.py
Browse files
app.py
CHANGED
@@ -1,36 +1,33 @@
|
|
1 |
-
# app.py
|
2 |
-
import os
|
3 |
-
# ─── 解決 DeepFace 無法寫入預設路徑的問題 ─────────────────────────────────────────
|
4 |
-
# 將 DeepFace 的快取目錄指向可寫入的 /tmp 之下
|
5 |
-
os.environ["DEEPFACE_HOME"] = "/tmp/.deepface"
|
6 |
-
|
7 |
import gradio as gr
|
8 |
-
import numpy as np
|
9 |
-
import
|
10 |
-
import librosa
|
11 |
from deepface import DeepFace
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
-
MODEL_PATH = os.path.join(os.path.dirname(__file__), "voice_model.joblib")
|
16 |
-
audio_model = joblib.load(MODEL_PATH)
|
17 |
|
18 |
-
#
|
19 |
-
def analyze_face(frame
|
20 |
-
#
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
23 |
|
|
|
24 |
def analyze_audio(wav_file):
|
25 |
-
|
26 |
-
|
27 |
-
y, sr = librosa.load(io.BytesIO(
|
28 |
mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
|
29 |
-
|
30 |
-
|
|
|
31 |
|
|
|
32 |
def analyze_text(txt):
|
33 |
-
|
34 |
mapping = {
|
35 |
"😊 happy": ["開心","快樂","愉快","喜悅","歡喜","興奮","高興","歡"],
|
36 |
"😠 angry": ["生氣","憤怒","不爽","發火","火大","氣憤"],
|
@@ -38,40 +35,33 @@ def analyze_text(txt):
|
|
38 |
"😲 surprise":["驚訝","意外","嚇","好奇","驚詫","詫異","訝異"],
|
39 |
"😨 fear": ["怕","恐懼","緊張","懼","膽怯","畏"],
|
40 |
}
|
41 |
-
for emo,
|
42 |
-
if any(w in txt for w in
|
43 |
return emo
|
44 |
-
return "
|
45 |
|
46 |
-
#
|
47 |
-
with gr.Blocks(
|
48 |
-
gr.Markdown("
|
49 |
with gr.Tabs():
|
50 |
-
with gr.TabItem("📷
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
outputs=[out_img, out_lbl],
|
59 |
-
live=True
|
60 |
-
)
|
61 |
-
|
62 |
-
with gr.TabItem("🎤 上傳語音檔"):
|
63 |
-
wav = gr.File(label="選擇 .wav 檔案", file_types=[".wav"])
|
64 |
-
wav_btn = gr.Button("開始分析")
|
65 |
-
wav_out = gr.Textbox(label="語音偵測到的情緒")
|
66 |
-
wav_btn.click(fn=analyze_audio, inputs=wav, outputs=wav_out)
|
67 |
-
|
68 |
with gr.TabItem("⌨️ 輸入文字"):
|
69 |
-
|
70 |
-
txt_btn = gr.Button("
|
71 |
-
txt_out = gr.Textbox(label="
|
72 |
-
txt_btn.click(
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
-
# 啟動
|
75 |
if __name__ == "__main__":
|
76 |
-
|
77 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import numpy as np, cv2, io, base64
|
3 |
+
import librosa, joblib
|
|
|
4 |
from deepface import DeepFace
|
5 |
|
6 |
+
# 预加载模型
|
7 |
+
audio_model = joblib.load("voice_model.joblib")
|
|
|
|
|
8 |
|
9 |
+
# 人脸情绪分析函数
|
10 |
+
def analyze_face(frame):
|
11 |
+
# frame: H×W×3 numpy array, RGB
|
12 |
+
try:
|
13 |
+
res = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False)
|
14 |
+
return res['dominant_emotion']
|
15 |
+
except Exception as e:
|
16 |
+
return f"Error: {e}"
|
17 |
|
18 |
+
# 语音情绪分析函数
|
19 |
def analyze_audio(wav_file):
|
20 |
+
if wav_file is None: return "no audio"
|
21 |
+
data = wav_file.read()
|
22 |
+
y, sr = librosa.load(io.BytesIO(data), sr=None)
|
23 |
mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
|
24 |
+
feat = np.mean(mfccs.T, axis=0)
|
25 |
+
pred = audio_model.predict([feat])[0]
|
26 |
+
return pred
|
27 |
|
28 |
+
# 文本情绪分析函数
|
29 |
def analyze_text(txt):
|
30 |
+
if not txt: return "no text"
|
31 |
mapping = {
|
32 |
"😊 happy": ["開心","快樂","愉快","喜悅","歡喜","興奮","高興","歡"],
|
33 |
"😠 angry": ["生氣","憤怒","不爽","發火","火大","氣憤"],
|
|
|
35 |
"😲 surprise":["驚訝","意外","嚇","好奇","驚詫","詫異","訝異"],
|
36 |
"😨 fear": ["怕","恐懼","緊張","懼","膽怯","畏"],
|
37 |
}
|
38 |
+
for emo, kw in mapping.items():
|
39 |
+
if any(w in txt for w in kw):
|
40 |
return emo
|
41 |
+
return "neutral"
|
42 |
|
43 |
+
# Gradio 界面
|
44 |
+
with gr.Blocks() as demo:
|
45 |
+
gr.Markdown("# 多模態即時情緒分析")
|
46 |
with gr.Tabs():
|
47 |
+
with gr.TabItem("📷 即時人臉"):
|
48 |
+
camera = gr.Camera(label="請對準鏡頭")
|
49 |
+
face_out = gr.Textbox(label="偵測到的情緒")
|
50 |
+
camera.change(analyze_face, inputs=camera, outputs=face_out)
|
51 |
+
with gr.TabItem("🎤 上傳語音"):
|
52 |
+
audio = gr.Audio(source="upload", type="file", label="上傳 WAV")
|
53 |
+
audio_out = gr.Textbox(label="語音情緒")
|
54 |
+
audio.change(analyze_audio, inputs=audio, outputs=audio_out)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
with gr.TabItem("⌨️ 輸入文字"):
|
56 |
+
txt_input = gr.Textbox(label="輸入文字")
|
57 |
+
txt_btn = gr.Button("分析文字")
|
58 |
+
txt_out = gr.Textbox(label="文字情緒")
|
59 |
+
txt_btn.click(analyze_text, inputs=txt_input, outputs=txt_out)
|
60 |
+
|
61 |
+
gr.Markdown(
|
62 |
+
"⚠️ Hugging Face Spaces 無法直接呼叫本機攝影機;"
|
63 |
+
"請在手機/電腦瀏覽器使用,或拉到最下方打開 Camera 頁籤測試。"
|
64 |
+
)
|
65 |
|
|
|
66 |
if __name__ == "__main__":
|
67 |
+
demo.launch()
|
|