vikhyatk commited on
Commit
9f8fda1
Β·
verified Β·
1 Parent(s): 1ab5922

Update demo.py

Browse files
Files changed (1) hide show
  1. demo.py +42 -40
demo.py CHANGED
@@ -8,10 +8,8 @@ import matplotlib
8
  from PIL import Image
9
  from transformers import AutoModelForCausalLM
10
 
11
-
12
  matplotlib.use("Agg") # Use Agg backend for non-interactive plotting
13
 
14
-
15
  os.environ["HF_TOKEN"] = os.environ.get("TOKEN_FROM_SECRET") or True
16
  model = AutoModelForCausalLM.from_pretrained(
17
  "vikhyatk/moondream-next",
@@ -21,9 +19,8 @@ model = AutoModelForCausalLM.from_pretrained(
21
  revision="56a3adeae60809e4269c544cde376feb20637ee0"
22
  )
23
 
24
-
25
- def visualize_gaze_multi(face_boxes, gaze_points, image=None, show_plot=True):
26
- """Visualization function with reduced whitespace"""
27
  # Calculate figure size based on image aspect ratio
28
  if image is not None:
29
  height, width = image.shape[:2]
@@ -46,39 +43,42 @@ def visualize_gaze_multi(face_boxes, gaze_points, image=None, show_plot=True):
46
 
47
  colors = plt.cm.rainbow(np.linspace(0, 1, len(face_boxes)))
48
 
49
- for face_box, gaze_point, color in zip(face_boxes, gaze_points, colors):
50
  hex_color = "#{:02x}{:02x}{:02x}".format(
51
  int(color[0] * 255), int(color[1] * 255), int(color[2] * 255)
52
  )
53
 
54
  x, y, width_box, height_box = face_box
55
- gaze_x, gaze_y = gaze_point
56
-
57
  face_center_x = x + width_box / 2
58
  face_center_y = y + height_box / 2
59
 
 
60
  face_rect = plt.Rectangle(
61
  (x, y), width_box, height_box, fill=False, color=hex_color, linewidth=2
62
  )
63
  ax.add_patch(face_rect)
64
 
65
- points = 50
66
- alphas = np.linspace(0.8, 0, points)
 
 
 
 
67
 
68
- x_points = np.linspace(face_center_x, gaze_x, points)
69
- y_points = np.linspace(face_center_y, gaze_y, points)
70
 
71
- for i in range(points - 1):
72
- ax.plot(
73
- [x_points[i], x_points[i + 1]],
74
- [y_points[i], y_points[i + 1]],
75
- color=hex_color,
76
- alpha=alphas[i],
77
- linewidth=4,
78
- )
79
 
80
- ax.scatter(gaze_x, gaze_y, color=hex_color, s=100, zorder=5)
81
- ax.scatter(gaze_x, gaze_y, color="white", s=50, zorder=6)
82
 
83
  # Set plot limits and remove axes
84
  ax.set_xlim(0, width)
@@ -120,41 +120,43 @@ def process_image(input_image):
120
  gaze_points = []
121
 
122
  for face in faces:
123
- gaze = model.detect_gaze(enc_image, face=face, unstable_settings={
124
- "prioritize_accuracy": True,
125
- "flip_enc_img": flip_enc_image
126
- })["gaze"]
127
-
128
- if gaze is None:
129
- continue
130
-
131
  face_box = (
132
  face["x_min"] * pil_image.width,
133
  face["y_min"] * pil_image.height,
134
  (face["x_max"] - face["x_min"]) * pil_image.width,
135
  (face["y_max"] - face["y_min"]) * pil_image.height,
136
  )
 
137
 
138
- gaze_point = (
139
- gaze["x"] * pil_image.width,
140
- gaze["y"] * pil_image.height,
141
- )
 
142
 
143
- face_boxes.append(face_box)
144
- gaze_points.append(gaze_point)
 
 
 
 
 
 
145
 
146
  # Create visualization
147
  image_array = np.array(pil_image)
148
- fig = visualize_gaze_multi(
149
  face_boxes, gaze_points, image=image_array, show_plot=False
150
  )
151
 
152
- return fig, f"Detected {len(faces)} faces."
 
 
153
 
154
  except Exception as e:
155
  return None, f"Error processing image: {str(e)}"
156
 
157
-
158
  with gr.Blocks(title="Moondream Gaze Detection") as app:
159
  gr.Markdown("# πŸŒ” Moondream Gaze Detection")
160
  gr.Markdown("Upload an image to detect faces and visualize their gaze directions.")
@@ -177,4 +179,4 @@ with gr.Blocks(title="Moondream Gaze Detection") as app:
177
  )
178
 
179
  if __name__ == "__main__":
180
- app.launch()
 
8
  from PIL import Image
9
  from transformers import AutoModelForCausalLM
10
 
 
11
  matplotlib.use("Agg") # Use Agg backend for non-interactive plotting
12
 
 
13
  os.environ["HF_TOKEN"] = os.environ.get("TOKEN_FROM_SECRET") or True
14
  model = AutoModelForCausalLM.from_pretrained(
15
  "vikhyatk/moondream-next",
 
19
  revision="56a3adeae60809e4269c544cde376feb20637ee0"
20
  )
21
 
22
+ def visualize_faces_and_gaze(face_boxes, gaze_points=None, image=None, show_plot=True):
23
+ """Visualization function that can handle faces without gaze data"""
 
24
  # Calculate figure size based on image aspect ratio
25
  if image is not None:
26
  height, width = image.shape[:2]
 
43
 
44
  colors = plt.cm.rainbow(np.linspace(0, 1, len(face_boxes)))
45
 
46
+ for i, (face_box, color) in enumerate(zip(face_boxes, colors)):
47
  hex_color = "#{:02x}{:02x}{:02x}".format(
48
  int(color[0] * 255), int(color[1] * 255), int(color[2] * 255)
49
  )
50
 
51
  x, y, width_box, height_box = face_box
 
 
52
  face_center_x = x + width_box / 2
53
  face_center_y = y + height_box / 2
54
 
55
+ # Draw face bounding box
56
  face_rect = plt.Rectangle(
57
  (x, y), width_box, height_box, fill=False, color=hex_color, linewidth=2
58
  )
59
  ax.add_patch(face_rect)
60
 
61
+ # Draw gaze line if gaze data is available
62
+ if gaze_points is not None and i < len(gaze_points) and gaze_points[i] is not None:
63
+ gaze_x, gaze_y = gaze_points[i]
64
+
65
+ points = 50
66
+ alphas = np.linspace(0.8, 0, points)
67
 
68
+ x_points = np.linspace(face_center_x, gaze_x, points)
69
+ y_points = np.linspace(face_center_y, gaze_y, points)
70
 
71
+ for j in range(points - 1):
72
+ ax.plot(
73
+ [x_points[j], x_points[j + 1]],
74
+ [y_points[j], y_points[j + 1]],
75
+ color=hex_color,
76
+ alpha=alphas[j],
77
+ linewidth=4,
78
+ )
79
 
80
+ ax.scatter(gaze_x, gaze_y, color=hex_color, s=100, zorder=5)
81
+ ax.scatter(gaze_x, gaze_y, color="white", s=50, zorder=6)
82
 
83
  # Set plot limits and remove axes
84
  ax.set_xlim(0, width)
 
120
  gaze_points = []
121
 
122
  for face in faces:
123
+ # Add face bounding box regardless of gaze detection
 
 
 
 
 
 
 
124
  face_box = (
125
  face["x_min"] * pil_image.width,
126
  face["y_min"] * pil_image.height,
127
  (face["x_max"] - face["x_min"]) * pil_image.width,
128
  (face["y_max"] - face["y_min"]) * pil_image.height,
129
  )
130
+ face_boxes.append(face_box)
131
 
132
+ # Try to detect gaze
133
+ gaze = model.detect_gaze(enc_image, face=face, unstable_settings={
134
+ "prioritize_accuracy": True,
135
+ "flip_enc_img": flip_enc_image
136
+ })["gaze"]
137
 
138
+ if gaze is not None:
139
+ gaze_point = (
140
+ gaze["x"] * pil_image.width,
141
+ gaze["y"] * pil_image.height,
142
+ )
143
+ gaze_points.append(gaze_point)
144
+ else:
145
+ gaze_points.append(None)
146
 
147
  # Create visualization
148
  image_array = np.array(pil_image)
149
+ fig = visualize_faces_and_gaze(
150
  face_boxes, gaze_points, image=image_array, show_plot=False
151
  )
152
 
153
+ faces_with_gaze = sum(1 for gp in gaze_points if gp is not None)
154
+ status = f"Detected {len(faces)} faces. {faces_with_gaze - len(faces)} faces identified as looking out of frame."
155
+ return fig, status
156
 
157
  except Exception as e:
158
  return None, f"Error processing image: {str(e)}"
159
 
 
160
  with gr.Blocks(title="Moondream Gaze Detection") as app:
161
  gr.Markdown("# πŸŒ” Moondream Gaze Detection")
162
  gr.Markdown("Upload an image to detect faces and visualize their gaze directions.")
 
179
  )
180
 
181
  if __name__ == "__main__":
182
+ app.launch()