GCLing commited on
Commit
dd2bb14
·
verified ·
1 Parent(s): 92eb8b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -15
app.py CHANGED
@@ -41,16 +41,18 @@ def extract_feature(signal: np.ndarray, sr: int) -> np.ndarray:
41
  # --- 4. 三種預測函式 ---
42
 
43
  def predict_face(img: np.ndarray):
44
- if img is None:
45
- return {} # 没有帧时返回空
46
  try:
47
  result = DeepFace.analyze(img, actions=["emotion"], detector_backend="opencv")
48
- return result.get("emotion", {})
 
 
49
  except Exception as e:
50
- # 遇到错误时,可返回空或日志
51
- print("DeepFace 分析错误:", e)
52
  return {}
53
 
 
54
  def predict_voice(audio):
55
  """
56
  語音情緒分析:audio 由 Gradio 傳入,形式為暫存檔路徑字串 (str)。
@@ -65,15 +67,20 @@ def predict_voice(audio):
65
  return {labels[i]: float(probs[i]) for i in range(len(labels))}
66
 
67
  def predict_text(text: str):
68
- """
69
- 文字情緒分析:使用 transformers pipeline,
70
- 輸入中文字串,回傳 dict,例如 {"POSITIVE":0.95} 或模型輸出標籤與信心分數。
71
- """
72
- if not text or text.strip() == "":
73
  return {}
74
- pred = text_emotion(text)[0]
75
- # pred 形如 {"label": "...", "score": ...}
76
- return {pred["label"]: float(pred["score"])}
 
 
 
 
 
 
77
 
78
  # --- 5. 建立 Gradio 介面 ---
79
  with gr.Blocks() as demo:
@@ -85,9 +92,9 @@ with gr.Blocks() as demo:
85
  with gr.Row():
86
  webcam = gr.Image(sources="webcam", streaming=True, type="numpy", label="攝像頭畫面")
87
  emotion_output = gr.Label(label="情緒分布")
88
- # 关键:用 stream 让每帧到达时调用 predict_face 并更新 emotion_output
89
  webcam.stream(fn=predict_face, inputs=webcam, outputs=emotion_output)
90
 
 
91
  # 其餘 Tab 可按原先寫法,或用 Blocks 方式
92
  with gr.TabItem("語音情緒"):
93
  audio = gr.Audio(sources="microphone", streaming=False, type="filepath", label="錄音")
@@ -98,7 +105,9 @@ with gr.Blocks() as demo:
98
  with gr.TabItem("文字情緒"):
99
  text = gr.Textbox(lines=3, placeholder="請輸入中文文字…")
100
  text_output = gr.Label(label="文字情緒結果")
101
- text.submit(fn=predict_text, inputs=text, outputs=text_output)
 
 
102
 
103
 
104
 
 
41
  # --- 4. 三種預測函式 ---
42
 
43
  def predict_face(img: np.ndarray):
44
+ print("predict_face called, img is None?", img is None)
45
+ # 你的限频和 DeepFace 分析逻辑...
46
  try:
47
  result = DeepFace.analyze(img, actions=["emotion"], detector_backend="opencv")
48
+ emo = result.get("emotion", {})
49
+ print("DeepFace result:", emo)
50
+ return emo
51
  except Exception as e:
52
+ print("DeepFace.analyze error:", e)
 
53
  return {}
54
 
55
+
56
  def predict_voice(audio):
57
  """
58
  語音情緒分析:audio 由 Gradio 傳入,形式為暫存檔路徑字串 (str)。
 
67
  return {labels[i]: float(probs[i]) for i in range(len(labels))}
68
 
69
  def predict_text(text: str):
70
+
71
+ def predict_text(text: str):
72
+ print("predict_text called, text:", text)
73
+ if not text or text.strip()=="":
 
74
  return {}
75
+ try:
76
+ pred = text_emotion(text)[0]
77
+ result = {pred["label"]: float(pred["score"])}
78
+ print("Text sentiment result:", result)
79
+ return result
80
+ except Exception as e:
81
+ print("predict_text error:", e)
82
+ return {}
83
+
84
 
85
  # --- 5. 建立 Gradio 介面 ---
86
  with gr.Blocks() as demo:
 
92
  with gr.Row():
93
  webcam = gr.Image(sources="webcam", streaming=True, type="numpy", label="攝像頭畫面")
94
  emotion_output = gr.Label(label="情緒分布")
 
95
  webcam.stream(fn=predict_face, inputs=webcam, outputs=emotion_output)
96
 
97
+
98
  # 其餘 Tab 可按原先寫法,或用 Blocks 方式
99
  with gr.TabItem("語音情緒"):
100
  audio = gr.Audio(sources="microphone", streaming=False, type="filepath", label="錄音")
 
105
  with gr.TabItem("文字情緒"):
106
  text = gr.Textbox(lines=3, placeholder="請輸入中文文字…")
107
  text_output = gr.Label(label="文字情緒結果")
108
+ btn = gr.Button("分析文字")
109
+ btn.click(fn=predict_text, inputs=text, outputs=text_output)
110
+
111
 
112
 
113