GCLing commited on
Commit
dfd5bb6
·
verified ·
1 Parent(s): 32d5601

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -23
app.py CHANGED
@@ -54,31 +54,24 @@ def extract_feature(signal: np.ndarray, sr: int) -> np.ndarray:
54
  # --- 4. 三種預測函式 ---
55
 
56
  def predict_face(img):
57
- global _last_time, _last_result
58
  if img is None:
59
  return {}
60
- now = time.time()
61
- # 限频: 每 0.5 秒最多分析一次
62
- if now - _last_time < 0.5 and _last_result:
63
- return _last_result
64
  try:
65
  res = DeepFace.analyze(img, actions=["emotion"], detector_backend="opencv")
66
- # 处理返回类型
 
67
  if isinstance(res, list):
68
- first = res[0] if len(res) > 0 else {}
69
  emo = first.get("emotion", {}) if isinstance(first, dict) else {}
70
- elif isinstance(res, dict):
71
- emo = res.get("emotion", {})
72
  else:
73
- emo = {}
74
- _last_result = emo
75
- _last_time = now
76
  print("predict_face result:", emo)
77
  return emo
78
  except Exception as e:
79
  print("DeepFace.analyze error:", e)
80
- # 出错时返回上次有效结果或空
81
- return _last_result if _last_result else {}
82
 
83
 
84
 
@@ -144,15 +137,12 @@ def predict_text_mixed(text: str):
144
 
145
  # --- 5. 建立 Gradio 介面 ---
146
  with gr.Blocks() as demo:
147
- gr.Markdown("## 多模態即時情緒分析")
148
- with gr.Tabs():
149
- # 臉部情緒 Tab
150
- with gr.TabItem("臉部情緒"):
151
- gr.Markdown("### 臉部情緒 (即時 Webcam Streaming 分析)")
152
- with gr.Row():
153
- webcam = gr.Image(sources="webcam", streaming=True, type="numpy", label="攝像頭畫面")
154
- emotion_output = gr.Label(label="情緒分布")
155
- webcam.stream(fn=predict_face, inputs=webcam, outputs=emotion_output)
156
 
157
 
158
  # 其餘 Tab 可按原先寫法,或用 Blocks 方式
 
54
  # --- 4. 三種預測函式 ---
55
 
56
  def predict_face(img):
57
+ print("predict_face called, img is None?", img is None)
58
  if img is None:
59
  return {}
 
 
 
 
60
  try:
61
  res = DeepFace.analyze(img, actions=["emotion"], detector_backend="opencv")
62
+ # 省略 list/dict 处理...
63
+ # 直接取第一张人脸
64
  if isinstance(res, list):
65
+ first = res[0] if res else {}
66
  emo = first.get("emotion", {}) if isinstance(first, dict) else {}
 
 
67
  else:
68
+ emo = res.get("emotion", {}) if isinstance(res, dict) else {}
 
 
69
  print("predict_face result:", emo)
70
  return emo
71
  except Exception as e:
72
  print("DeepFace.analyze error:", e)
73
+ return {}
74
+
75
 
76
 
77
 
 
137
 
138
  # --- 5. 建立 Gradio 介面 ---
139
  with gr.Blocks() as demo:
140
+ with gr.TabItem("臉部情緒"):
141
+ with gr.Row():
142
+ webcam = gr.Image(source="webcam", streaming=True, type="numpy", label="攝像頭畫面")
143
+ face_out = gr.Label(label="情緒分布")
144
+ webcam.stream(fn=predict_face, inputs=webcam, outputs=face_out)
145
+
 
 
 
146
 
147
 
148
  # 其餘 Tab 可按原先寫法,或用 Blocks 方式