Spaces:
Sleeping
Sleeping
Commit
·
098cfe5
1
Parent(s):
a3d28fd
wip
Browse files- app.py +5 -4
- detection/face_detection.py +15 -6
app.py
CHANGED
|
@@ -44,8 +44,9 @@ with gr.Blocks() as demo:
|
|
| 44 |
# Process Button
|
| 45 |
face_process_btn = gr.Button("Process Image")
|
| 46 |
|
| 47 |
-
# Output
|
| 48 |
-
face_image_output = gr.Image(label="Detected Faces")
|
|
|
|
| 49 |
|
| 50 |
# Link radio button change to visibility update function
|
| 51 |
face_input_type.change(
|
|
@@ -57,7 +58,7 @@ with gr.Blocks() as demo:
|
|
| 57 |
)
|
| 58 |
|
| 59 |
# Link process button to the face detection function
|
| 60 |
-
# The face_detection function will
|
| 61 |
face_process_btn.click(
|
| 62 |
fn=face_detection,
|
| 63 |
inputs=[
|
|
@@ -67,7 +68,7 @@ with gr.Blocks() as demo:
|
|
| 67 |
face_base64_input,
|
| 68 |
face_detection_method,
|
| 69 |
],
|
| 70 |
-
outputs=face_image_output,
|
| 71 |
)
|
| 72 |
# Create a tab for age estimation
|
| 73 |
with gr.Tab("Age Estimation"):
|
|
|
|
| 44 |
# Process Button
|
| 45 |
face_process_btn = gr.Button("Process Image")
|
| 46 |
|
| 47 |
+
# Output Components
|
| 48 |
+
face_image_output = gr.Image(label="Detected Faces Image")
|
| 49 |
+
face_bbox_output = gr.JSON(label="Raw Bounding Box Data")
|
| 50 |
|
| 51 |
# Link radio button change to visibility update function
|
| 52 |
face_input_type.change(
|
|
|
|
| 58 |
)
|
| 59 |
|
| 60 |
# Link process button to the face detection function
|
| 61 |
+
# The face_detection function will now return a tuple
|
| 62 |
face_process_btn.click(
|
| 63 |
fn=face_detection,
|
| 64 |
inputs=[
|
|
|
|
| 68 |
face_base64_input,
|
| 69 |
face_detection_method,
|
| 70 |
],
|
| 71 |
+
outputs=[face_image_output, face_bbox_output],
|
| 72 |
)
|
| 73 |
# Create a tab for age estimation
|
| 74 |
with gr.Tab("Age Estimation"):
|
detection/face_detection.py
CHANGED
|
@@ -25,14 +25,21 @@ def face_detection(input_type, uploaded_image, image_url, base64_string, face_de
|
|
| 25 |
face_detection_method (str): The selected face detection method ("OpenCV" or "dlib").
|
| 26 |
|
| 27 |
Returns:
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
"""
|
| 30 |
# Use the centralized function to get the image
|
| 31 |
image = get_image_from_input(input_type, uploaded_image, image_url, base64_string)
|
| 32 |
|
| 33 |
if image is None:
|
| 34 |
print("Image is None after loading/selection.")
|
| 35 |
-
return None #
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
try:
|
| 38 |
# Preprocess the image (convert PIL to numpy, ensure RGB)
|
|
@@ -53,13 +60,14 @@ def face_detection(input_type, uploaded_image, image_url, base64_string, face_de
|
|
| 53 |
if not os.path.exists(cascade_path):
|
| 54 |
error_message = f"Error: Haar cascade file not found at {cascade_path}. Please ensure OpenCV is installed correctly and the file exists."
|
| 55 |
print(error_message)
|
| 56 |
-
return None
|
| 57 |
|
| 58 |
face_cascade = cv2.CascadeClassifier(cascade_path)
|
| 59 |
|
| 60 |
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
|
| 61 |
for x, y, w, h in faces:
|
| 62 |
cv2.rectangle(processed_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
|
|
|
| 63 |
|
| 64 |
elif face_detection_method == "dlib":
|
| 65 |
print("Using dlib for face detection.")
|
|
@@ -71,10 +79,11 @@ def face_detection(input_type, uploaded_image, image_url, base64_string, face_de
|
|
| 71 |
for face in faces:
|
| 72 |
x, y, w, h = face.left(), face.top(), face.width(), face.height()
|
| 73 |
cv2.rectangle(processed_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
|
|
|
| 74 |
|
| 75 |
-
return processed_image
|
| 76 |
else:
|
| 77 |
-
return None
|
| 78 |
except Exception as e:
|
| 79 |
print(f"Error in face detection processing: {e}")
|
| 80 |
-
return None
|
|
|
|
| 25 |
face_detection_method (str): The selected face detection method ("OpenCV" or "dlib").
|
| 26 |
|
| 27 |
Returns:
|
| 28 |
+
tuple: A tuple containing:
|
| 29 |
+
- numpy.ndarray: The image with detected faces, or None if an error occurred.
|
| 30 |
+
- list: A list of dictionaries, where each dictionary represents a bounding box
|
| 31 |
+
with keys 'x', 'y', 'w', 'h', or an empty list if no faces were detected
|
| 32 |
+
or an error occurred.
|
| 33 |
"""
|
| 34 |
# Use the centralized function to get the image
|
| 35 |
image = get_image_from_input(input_type, uploaded_image, image_url, base64_string)
|
| 36 |
|
| 37 |
if image is None:
|
| 38 |
print("Image is None after loading/selection.")
|
| 39 |
+
return None, [] # Return None for image and empty list for bboxes
|
| 40 |
+
|
| 41 |
+
processed_image = None
|
| 42 |
+
bounding_boxes = []
|
| 43 |
|
| 44 |
try:
|
| 45 |
# Preprocess the image (convert PIL to numpy, ensure RGB)
|
|
|
|
| 60 |
if not os.path.exists(cascade_path):
|
| 61 |
error_message = f"Error: Haar cascade file not found at {cascade_path}. Please ensure OpenCV is installed correctly and the file exists."
|
| 62 |
print(error_message)
|
| 63 |
+
return None, [] # Return None for image and empty list for bboxes
|
| 64 |
|
| 65 |
face_cascade = cv2.CascadeClassifier(cascade_path)
|
| 66 |
|
| 67 |
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
|
| 68 |
for x, y, w, h in faces:
|
| 69 |
cv2.rectangle(processed_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
| 70 |
+
bounding_boxes.append({'x': int(x), 'y': int(y), 'w': int(w), 'h': int(h)})
|
| 71 |
|
| 72 |
elif face_detection_method == "dlib":
|
| 73 |
print("Using dlib for face detection.")
|
|
|
|
| 79 |
for face in faces:
|
| 80 |
x, y, w, h = face.left(), face.top(), face.width(), face.height()
|
| 81 |
cv2.rectangle(processed_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
| 82 |
+
bounding_boxes.append({'x': int(x), 'y': int(y), 'w': int(w), 'h': int(h)})
|
| 83 |
|
| 84 |
+
return processed_image, bounding_boxes
|
| 85 |
else:
|
| 86 |
+
return None, [] # Return None for image and empty list for bboxes
|
| 87 |
except Exception as e:
|
| 88 |
print(f"Error in face detection processing: {e}")
|
| 89 |
+
return None, [] # Return None for image and empty list for bboxes
|