File size: 3,687 Bytes
7caecfb
 
 
 
 
 
357796f
c0e97f6
7caecfb
8bda002
357796f
10f35a5
7caecfb
 
 
 
357796f
7caecfb
8bda002
 
 
7caecfb
0b5ee74
c0e97f6
8bda002
7caecfb
8bda002
a2a4ab6
 
8bda002
a2a4ab6
8bda002
7caecfb
8bda002
7caecfb
 
 
 
 
8bda002
 
 
 
7caecfb
a2a4ab6
7caecfb
 
 
8bda002
c0e97f6
7caecfb
 
 
 
 
 
 
 
 
 
8bda002
7caecfb
 
 
 
8bda002
 
 
 
7caecfb
 
8bda002
357796f
7caecfb
 
 
8bda002
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
# app.py
import os
# ─── 解決 DeepFace 無法寫入預設路徑的問題 ─────────────────────────────────────────
# 將 DeepFace 的快取目錄指向可寫入的 /tmp 之下
os.environ["DEEPFACE_HOME"] = "/tmp/.deepface"

import gradio as gr
import numpy as np
import joblib, io
import librosa
from deepface import DeepFace

# ─── 1. 載入模型 ─────────────────────────────────────────────────────────────
# 我們把模型檔放在跟 app.py 同一層的 voice_model.joblib
MODEL_PATH = os.path.join(os.path.dirname(__file__), "voice_model.joblib")
audio_model = joblib.load(MODEL_PATH)

# ─── 2. 定義各種分析函數 ────────────────────────────────────────────────────
def analyze_face(frame: np.ndarray):
    # DeepFace 回傳 dict,裡面有 'dominant_emotion'
    res = DeepFace.analyze(frame, actions=["emotion"], enforce_detection=False)
    return frame, res["dominant_emotion"]

def analyze_audio(wav_file):
    wav_bytes = wav_file.read()
    # 用 librosa 讀入
    y, sr = librosa.load(io.BytesIO(wav_bytes), sr=None)
    mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
    mf = np.mean(mfccs.T, axis=0)
    return audio_model.predict([mf])[0]

def analyze_text(txt):
    # 簡單關鍵字 mapping
    mapping = {
        "😊 happy":   ["開心","快樂","愉快","喜悅","歡喜","興奮","高興","歡"],
        "😠 angry":   ["生氣","憤怒","不爽","發火","火大","氣憤"],
        "😢 sad":     ["傷心","難過","哭","憂","悲","心酸","哀","痛苦","慘","愁"],
        "😲 surprise":["驚訝","意外","嚇","好奇","驚詫","詫異","訝異"],
        "😨 fear":    ["怕","恐懼","緊張","懼","膽怯","畏"],
    }
    for emo, kws in mapping.items():
        if any(w in txt for w in kws):
            return emo
    return "😐 neutral"

# ─── 3. 建立 Gradio 介面 ────────────────────────────────────────────────────
with gr.Blocks(title="多模態即時情緒分析") as demo:
    gr.Markdown("## 🤖 多模態即時情緒分析")
    with gr.Tabs():
        with gr.TabItem("📷 Live Face"):
            # 注意要用 gr.components.Camera 或 gr.components…
            camera = gr.components.Camera(label="請對準鏡頭 (Live)")
            out_img = gr.Image(label="擷取畫面")
            out_lbl = gr.Label(label="檢測到的情緒")
            camera.change(
                fn=analyze_face,
                inputs=camera,
                outputs=[out_img, out_lbl],
                live=True
            )

        with gr.TabItem("🎤 上傳語音檔"):
            wav = gr.File(label="選擇 .wav 檔案", file_types=[".wav"])
            wav_btn = gr.Button("開始分析")
            wav_out = gr.Textbox(label="語音偵測到的情緒")
            wav_btn.click(fn=analyze_audio, inputs=wav, outputs=wav_out)

        with gr.TabItem("⌨️ 輸入文字"):
            txt = gr.Textbox(label="在此輸入文字", lines=3)
            txt_btn = gr.Button("開始分析")
            txt_out = gr.Textbox(label="文字偵測到的情緒")
            txt_btn.click(fn=analyze_text, inputs=txt, outputs=txt_out)

# 啟動
if __name__ == "__main__":
    # Hugging Face Spaces 上不需要傳 host/port,直接 .launch() 即可
    demo.launch()