Spaces:
Sleeping
Sleeping
| import face_recognition | |
| import cv2 | |
| import numpy as np | |
| import imageSegmentation | |
| from mediapipe.tasks.python import vision | |
| import Visualization_utilities as vis | |
| # Get a reference to webcam #0 (the default one) | |
| # video_capture = cv2.VideoCapture(0) | |
| # Load a sample picture and learn how to recognize it. | |
| def get_face_encoding(path): | |
| print(f'path: {path}') | |
| print('hello') | |
| HKID_cropped = imageSegmentation.auto_cropping(path) | |
| cv2.imwrite('saved/HKID.jpg', HKID_cropped) | |
| HKID_image = face_recognition.load_image_file("saved/HKID.jpg") | |
| HKID_face_encoding = face_recognition.face_encodings(HKID_image)[0] | |
| return HKID_face_encoding | |
| # HKID_image = face_recognition.load_image_file("saved/HKID.jpg") | |
| # HKID_face_encoding = face_recognition.face_encodings(HKID_image)[0] | |
| # Create arrays of known face encodings and their names | |
| # known_face_encodings = [ | |
| # HKID_face_encoding | |
| # ] | |
| # known_face_names = [ | |
| # "Marco" | |
| # ] | |
| # Initialize some variables | |
| # face_locations = [] | |
| # face_encodings = [] | |
| # face_names = [] | |
| # process_this_frame = True | |
| # score = [] | |
| # faces = 0 # number of faces | |
| # while True: | |
| # # Grab a single frame of video | |
| # ret, frame = video_capture.read() | |
| # # # Draw a label with a name below the face | |
| # # cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) | |
| # # font = cv2.FONT_HERSHEY_DUPLEX | |
| # # cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) | |
| # # Display the resulting image | |
| # cv2.imshow('Video', frame) | |
| # # Hit 'q' on the keyboard to quit! | |
| # if cv2.waitKey(1) & 0xFF == ord('q'): | |
| # break | |
| def process_frame(frame, process_this_frame, face_locations, faces, face_names, score): | |
| hkid_face_encoding = get_face_encoding("image") | |
| print(f'encoding: {hkid_face_encoding}') | |
| known_face_encodings = [ | |
| hkid_face_encoding | |
| ] | |
| known_face_names = [ | |
| "recognized" | |
| ] | |
| # Only process every other frame of video to save time | |
| if process_this_frame: | |
| face_names = [] | |
| # Resize frame of video to 1/4 size for faster face recognition processing | |
| small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) | |
| # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) | |
| rgb_small_frame = cv2.cvtColor(small_frame, cv2.COLOR_BGR2RGB) | |
| # Find all the faces and face encodings in the current frame of video | |
| face_locations = face_recognition.face_locations(rgb_small_frame) | |
| face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) | |
| faces = len(face_encodings) # number of faces | |
| for face_encoding in face_encodings: | |
| # See if the face is a match for the known face(s) | |
| matches = face_recognition.compare_faces(known_face_encodings, face_encoding) | |
| name = "Unknown" | |
| # # If a match was found in known_face_encodings, just use the first one. | |
| # if True in matches: | |
| # first_match_index = matches.index(True) | |
| # name = known_face_names[first_match_index] | |
| # Or instead, use the known face with the smallest distance to the new face | |
| face_distances = face_recognition.face_distance(known_face_encodings, face_encoding) | |
| best_match_index = np.argmin(face_distances) | |
| print(face_distances) | |
| if matches[best_match_index] and face_distances[best_match_index] < 0.45: | |
| score.append(face_distances[best_match_index]) | |
| name = known_face_names[best_match_index] | |
| else: | |
| score = [] | |
| face_names.append(name) | |
| # if len(score) > 20: | |
| # avg_score = sum(score) / len(score) | |
| # Display the results | |
| if faces > 1 : | |
| # Define the text and font properties | |
| text = "More than 1 person detected!" | |
| font = cv2.FONT_HERSHEY_DUPLEX | |
| font_scale = 1 | |
| font_thickness = 2 | |
| # Calculate the text size | |
| window_height = frame.shape[0] | |
| window_width = frame.shape[1] | |
| text_size, _ = cv2.getTextSize(text, font, font_scale, font_thickness) | |
| # Calculate the text position | |
| text_x = int((window_width - text_size[0]) / 2) | |
| text_y = window_height - int(text_size[1] / 2) | |
| cv2.putText(frame, text, (text_x, text_y), font, font_scale, (255, 255, 255), font_thickness, cv2.LINE_AA) | |
| for (top, right, bottom, left), name in zip(face_locations, face_names): | |
| # Scale back up face locations since the frame we detected in was scaled to 1/4 size | |
| top *= 4 | |
| right *= 4 | |
| bottom *= 4 | |
| left *= 4 | |
| # Draw a box around the face | |
| cv2.rectangle(frame, (left, top), (right, bottom), (65, 181, 41), 4) | |
| # Define the name box properties | |
| name_box_color = (44, 254, 0) | |
| name_box_alpha = 0.7 | |
| name_box_thickness = -1 | |
| # Define the text properties | |
| font = cv2.FONT_HERSHEY_TRIPLEX | |
| font_scale = 1 | |
| font_thickness = 2 | |
| text_color = (255, 255, 255) | |
| # Calculate the text size | |
| text_width, text_height = cv2.getTextSize(name, font, font_scale, font_thickness)[0] | |
| # Draw the name box | |
| cv2.rectangle(frame, (left, bottom - 35), (right, bottom), | |
| name_box_color, name_box_thickness) | |
| cv2.rectangle(frame, (left, bottom - 35), (right, bottom), | |
| name_box_color, cv2.FILLED) | |
| # Draw the name text | |
| cv2.putText(frame, name, (left + 70, bottom - 6), font, font_scale, text_color, font_thickness) | |
| process_this_frame = process_this_frame | |
| frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) | |
| return frame, process_this_frame, face_locations, faces, face_names, score | |
| def convert_distance_to_percentage(distance, threshold): | |
| if distance < threshold: | |
| score = 80 | |
| score += distance / 0.45 * 20 | |
| else: | |
| score = (1 - distance) * 100 | |
| return score | |
| # percent = convert_distance_to_percentage(avg_score, 0.45) | |
| # print(f'avg_score = {percent:.2f}% : Approved!') | |
| # # Release handle to the webcam | |
| # video_capture.release() | |
| # cv2.destroyAllWindows() |