Update app.py
Browse files
app.py
CHANGED
@@ -9,6 +9,8 @@ from deepface import DeepFace
|
|
9 |
from transformers import pipeline
|
10 |
# 如果不手动用 AutoTokenizer/AutoModel,就不必 import AutoTokenizer, AutoModelForSequenceClassification
|
11 |
|
|
|
|
|
12 |
# --- 1. 加载 SVM 语音模型 ---
|
13 |
print("Downloading SVM model from Hugging Face Hub...")
|
14 |
model_path = hf_hub_download(repo_id="GCLing/emotion-svm-model", filename="svm_emotion_model.joblib")
|
@@ -158,36 +160,39 @@ def predict_face(img: np.ndarray):
|
|
158 |
return {}
|
159 |
|
160 |
# --- 5. Gradio 界面 ---
|
161 |
-
|
162 |
-
gr.
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
gr.
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
with gr.
|
175 |
-
|
|
|
|
|
|
|
|
|
176 |
|
177 |
-
|
178 |
-
|
179 |
-
# 文字情緒 Tab
|
180 |
-
with gr.TabItem("文字情緒"):
|
181 |
gr.Markdown("### 文字情緒 分析 (規則+zero-shot)")
|
182 |
with gr.Row():
|
183 |
text = gr.Textbox(lines=3, placeholder="請輸入中文文字…")
|
184 |
text_out = gr.Label(label="文字情緒結果")
|
185 |
-
|
186 |
-
|
187 |
-
btn
|
188 |
-
|
189 |
-
|
190 |
|
|
|
191 |
if __name__ == "__main__":
|
192 |
demo = build_interface()
|
193 |
demo.launch()
|
|
|
9 |
from transformers import pipeline
|
10 |
# 如果不手动用 AutoTokenizer/AutoModel,就不必 import AutoTokenizer, AutoModelForSequenceClassification
|
11 |
|
12 |
+
|
13 |
+
|
14 |
# --- 1. 加载 SVM 语音模型 ---
|
15 |
print("Downloading SVM model from Hugging Face Hub...")
|
16 |
model_path = hf_hub_download(repo_id="GCLing/emotion-svm-model", filename="svm_emotion_model.joblib")
|
|
|
160 |
return {}
|
161 |
|
162 |
# --- 5. Gradio 界面 ---
|
163 |
+
def build_interface():
|
164 |
+
with gr.Blocks() as demo:
|
165 |
+
gr.Markdown("## 多模態情緒分析示例")
|
166 |
+
with gr.Tabs():
|
167 |
+
# 臉部 Tab
|
168 |
+
with gr.TabItem("臉部情緒"):
|
169 |
+
gr.Markdown("### 臉部情緒 (即時 Webcam Streaming 分析)")
|
170 |
+
with gr.Row():
|
171 |
+
webcam = gr.Image(tool="webcam", streaming=True, type="numpy", label="攝像頭畫面")
|
172 |
+
face_out = gr.Label(label="情緒分布")
|
173 |
+
webcam.stream(fn=predict_face, inputs=webcam, outputs=face_out)
|
174 |
+
|
175 |
+
# 語音 Tab
|
176 |
+
with gr.TabItem("語音情緒"):
|
177 |
+
gr.Markdown("### 語音情緒 分析")
|
178 |
+
with gr.Row():
|
179 |
+
audio = gr.Audio(source="microphone", streaming=False, type="filepath", label="錄音")
|
180 |
+
voice_out = gr.Label(label="語音情緒結果")
|
181 |
+
audio.change(fn=predict_voice, inputs=audio, outputs=voice_out)
|
182 |
|
183 |
+
# 文字 Tab
|
184 |
+
with gr.TabItem("文字情緒"):
|
|
|
|
|
185 |
gr.Markdown("### 文字情緒 分析 (規則+zero-shot)")
|
186 |
with gr.Row():
|
187 |
text = gr.Textbox(lines=3, placeholder="請輸入中文文字…")
|
188 |
text_out = gr.Label(label="文字情緒結果")
|
189 |
+
text.submit(fn=predict_text_mixed, inputs=text, outputs=text_out)
|
190 |
+
# 或按鈕:
|
191 |
+
# btn = gr.Button("分析")
|
192 |
+
# btn.click(fn=predict_text_mixed, inputs=text, outputs=text_out)
|
193 |
+
return demo
|
194 |
|
195 |
+
# --- 4. 啟動 ---
|
196 |
if __name__ == "__main__":
|
197 |
demo = build_interface()
|
198 |
demo.launch()
|