Update app.py
Browse files
app.py
CHANGED
@@ -54,31 +54,24 @@ def extract_feature(signal: np.ndarray, sr: int) -> np.ndarray:
|
|
54 |
# --- 4. 三種預測函式 ---
|
55 |
|
56 |
def predict_face(img):
|
57 |
-
|
58 |
if img is None:
|
59 |
return {}
|
60 |
-
now = time.time()
|
61 |
-
# 限频: 每 0.5 秒最多分析一次
|
62 |
-
if now - _last_time < 0.5 and _last_result:
|
63 |
-
return _last_result
|
64 |
try:
|
65 |
res = DeepFace.analyze(img, actions=["emotion"], detector_backend="opencv")
|
66 |
-
#
|
|
|
67 |
if isinstance(res, list):
|
68 |
-
first = res[0] if
|
69 |
emo = first.get("emotion", {}) if isinstance(first, dict) else {}
|
70 |
-
elif isinstance(res, dict):
|
71 |
-
emo = res.get("emotion", {})
|
72 |
else:
|
73 |
-
emo = {}
|
74 |
-
_last_result = emo
|
75 |
-
_last_time = now
|
76 |
print("predict_face result:", emo)
|
77 |
return emo
|
78 |
except Exception as e:
|
79 |
print("DeepFace.analyze error:", e)
|
80 |
-
|
81 |
-
|
82 |
|
83 |
|
84 |
|
@@ -144,15 +137,12 @@ def predict_text_mixed(text: str):
|
|
144 |
|
145 |
# --- 5. 建立 Gradio 介面 ---
|
146 |
with gr.Blocks() as demo:
|
147 |
-
gr.
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
webcam = gr.Image(sources="webcam", streaming=True, type="numpy", label="攝像頭畫面")
|
154 |
-
emotion_output = gr.Label(label="情緒分布")
|
155 |
-
webcam.stream(fn=predict_face, inputs=webcam, outputs=emotion_output)
|
156 |
|
157 |
|
158 |
# 其餘 Tab 可按原先寫法,或用 Blocks 方式
|
|
|
54 |
# --- 4. 三種預測函式 ---
|
55 |
|
56 |
def predict_face(img):
|
57 |
+
print("predict_face called, img is None?", img is None)
|
58 |
if img is None:
|
59 |
return {}
|
|
|
|
|
|
|
|
|
60 |
try:
|
61 |
res = DeepFace.analyze(img, actions=["emotion"], detector_backend="opencv")
|
62 |
+
# 省略 list/dict 处理...
|
63 |
+
# 直接取第一张人脸
|
64 |
if isinstance(res, list):
|
65 |
+
first = res[0] if res else {}
|
66 |
emo = first.get("emotion", {}) if isinstance(first, dict) else {}
|
|
|
|
|
67 |
else:
|
68 |
+
emo = res.get("emotion", {}) if isinstance(res, dict) else {}
|
|
|
|
|
69 |
print("predict_face result:", emo)
|
70 |
return emo
|
71 |
except Exception as e:
|
72 |
print("DeepFace.analyze error:", e)
|
73 |
+
return {}
|
74 |
+
|
75 |
|
76 |
|
77 |
|
|
|
137 |
|
138 |
# --- 5. 建立 Gradio 介面 ---
|
139 |
with gr.Blocks() as demo:
|
140 |
+
with gr.TabItem("臉部情緒"):
|
141 |
+
with gr.Row():
|
142 |
+
webcam = gr.Image(source="webcam", streaming=True, type="numpy", label="攝像頭畫面")
|
143 |
+
face_out = gr.Label(label="情緒分布")
|
144 |
+
webcam.stream(fn=predict_face, inputs=webcam, outputs=face_out)
|
145 |
+
|
|
|
|
|
|
|
146 |
|
147 |
|
148 |
# 其餘 Tab 可按原先寫法,或用 Blocks 方式
|