GCLing commited on
Commit
c0131be
·
verified ·
1 Parent(s): 034eede

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -31
app.py CHANGED
@@ -134,58 +134,51 @@ def predict_voice(audio_path: str):
134
  return {}
135
 
136
  # --- 4. 人脸情绪预测 ---
 
 
137
  def predict_face(img: np.ndarray):
138
- if not has_deepface or img is None:
139
- return {}
140
- try:
141
- res = DeepFace.analyze(img, actions=["emotion"], detector_backend="opencv")
142
- if isinstance(res, list):
143
- first = res[0] if res else {}
144
- emo = first.get("emotion", {}) if isinstance(first, dict) else {}
145
- else:
146
- emo = res.get("emotion", {}) if isinstance(res, dict) else {}
147
- return {k: float(v) for k,v in emo.items()}
148
- except Exception as e:
149
- print("DeepFace.analyze error:", e)
150
  return {}
 
 
151
 
152
- # --- 5. Gradio 界面:用 gr.components.Camera ---
153
  def build_interface():
154
  with gr.Blocks() as demo:
155
  gr.Markdown("## 多模態情緒分析示例")
156
  with gr.Tabs():
157
- # 臉部 Tab,仅当 has_deepface=True 时显示
158
- if has_deepface:
159
- with gr.TabItem("臉部情緒"):
160
- gr.Markdown("### 臉部情緒 (即時 Webcam Streaming 分析)")
161
- with gr.Row():
162
- # Video 捕获 Webcam
163
- webcam = gr.Video(source="webcam", streaming=True, label="攝像頭畫面")
164
- face_out = gr.Label(label="情緒分布")
165
- webcam.stream(fn=predict_face, inputs=webcam, outputs=face_out)
166
- else:
167
- # 如果本地缺少 deepface,可给用户提示
168
- with gr.TabItem("臉部情緒 (跳過)"):
169
- gr.Markdown
170
-
171
-
172
- # 語音 Tab
173
  with gr.TabItem("語音情緒"):
174
  gr.Markdown("### 語音情緒 分析")
175
  with gr.Row():
 
176
  audio = gr.Audio(source="microphone", streaming=False, type="filepath", label="錄音")
177
  voice_out = gr.Label(label="語音情緒結果")
178
  audio.change(fn=predict_voice, inputs=audio, outputs=voice_out)
179
 
180
- # 文字 Tab
181
  with gr.TabItem("文字情緒"):
182
- gr.Markdown("### 文字情緒 分析 (规则+Inference API)")
183
  with gr.Row():
184
  text = gr.Textbox(lines=3, placeholder="請輸入中文文字…")
185
  text_out = gr.Label(label="文字情緒結果")
 
186
  text.submit(fn=predict_text_mixed, inputs=text, outputs=text_out)
 
187
  return demo
188
 
 
 
189
  if __name__ == "__main__":
190
  demo = build_interface()
191
  # share=True 可在本地测试时生成临时公网链接
 
134
  return {}
135
 
136
  # --- 4. 人脸情绪预测 ---
137
+ import gradio as gr
138
+
139
  def predict_face(img: np.ndarray):
140
+ # 你的 DeepFace 分析逻辑
141
+ if img is None:
 
 
 
 
 
 
 
 
 
 
142
  return {}
143
+ # ...
144
+ return {"happy": 0.5, "sad": 0.5} # 举例
145
 
 
146
  def build_interface():
147
  with gr.Blocks() as demo:
148
  gr.Markdown("## 多模態情緒分析示例")
149
  with gr.Tabs():
150
+ # 臉部情緒 Tab
151
+ with gr.TabItem("臉部情緒"):
152
+ gr.Markdown("### 臉部情緒 (即時 Webcam Streaming 分析)")
153
+ with gr.Row():
154
+ # 这里用 gr.Image(sources="webcam", streaming=True, type="numpy")
155
+ webcam = gr.Image(sources="webcam", streaming=True, type="numpy", label="攝像頭畫面")
156
+ face_out = gr.Label(label="情緒分佈")
157
+ # 每帧送到 predict_face
158
+ webcam.stream(fn=predict_face, inputs=webcam, outputs=face_out)
159
+
160
+ # 語音情緒 Tab
 
 
 
 
 
161
  with gr.TabItem("語音情緒"):
162
  gr.Markdown("### 語音情緒 分析")
163
  with gr.Row():
164
+ # 浏览器录音用 source="microphone"
165
  audio = gr.Audio(source="microphone", streaming=False, type="filepath", label="錄音")
166
  voice_out = gr.Label(label="語音情緒結果")
167
  audio.change(fn=predict_voice, inputs=audio, outputs=voice_out)
168
 
169
+ # 文字情緒 Tab
170
  with gr.TabItem("文字情緒"):
171
+ gr.Markdown("### 文字情緒 分析 (規則+Inference API)")
172
  with gr.Row():
173
  text = gr.Textbox(lines=3, placeholder="請輸入中文文字…")
174
  text_out = gr.Label(label="文字情緒結果")
175
+ # 使用 submit 触发
176
  text.submit(fn=predict_text_mixed, inputs=text, outputs=text_out)
177
+
178
  return demo
179
 
180
+
181
+
182
  if __name__ == "__main__":
183
  demo = build_interface()
184
  # share=True 可在本地测试时生成临时公网链接