Tohidichi commited on
Commit
5c9a8e7
·
verified ·
1 Parent(s): eeb32c9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -0
app.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import dlib
3
+ import numpy as np
4
+ from math import hypot
5
+ import time
6
+ from scipy.spatial.distance import euclidean
7
+
8
+ # face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
9
+ detector = dlib.get_frontal_face_detector()
10
+ # wget -nd https://github.com/JeffTrain/selfie/raw/master/shape_predictor_68_face_landmarks.dat
11
+ predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
12
+ blink_count = 0
13
+ time_limit = 7
14
+
15
+
16
+ def mid_point(p1,p2):
17
+ return int((p1.x + p2.x)/2) , int((p1.y + p2.y)/2)
18
+
19
+ def get_ratio(eye_points, facial_landmarks):
20
+ # x-axis
21
+ left_point = (facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y)
22
+ right_point = (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y)
23
+ # y-axis
24
+ center_top = mid_point(facial_landmarks.part(eye_points[1]), facial_landmarks.part(eye_points[2]))
25
+ center_bottom = mid_point(facial_landmarks.part(eye_points[5]), facial_landmarks.part(eye_points[4]))
26
+
27
+ hor_line = cv2.line(frame, left_point, right_point, (0, 255, 0), 1)
28
+ ver_line = cv2.line(frame, center_top, center_bottom, (0, 255, 0), 1)
29
+
30
+ # hor_line_length = hypot((left_point[0] - right_point[0]), (left_point[1] - right_point[1]))
31
+ # ver_line_length = hypot((center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))
32
+
33
+ hor_line_length = euclidean(left_point, right_point)
34
+ ver_line_length = euclidean(center_top, center_bottom)
35
+
36
+ if ver_line_length == 0:
37
+ return 0
38
+
39
+ ratio = hor_line_length / ver_line_length
40
+ return ratio
41
+
42
+
43
+ cap = cv2.VideoCapture(0)
44
+
45
+ start = time.time()
46
+ while True:
47
+ _ , frame = cap.read()
48
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
49
+ faces = detector(gray)
50
+ # print(faces)
51
+
52
+ for face in faces:
53
+ x,y = face.left(), face.top()
54
+ x1, y1 = face.right() , face.bottom()
55
+ cv2.rectangle(frame, (x,y), (x1,y1), (0,0,255), 2)
56
+
57
+ landmarks = predictor(gray, face)
58
+
59
+ left_eye_ratio = get_ratio([36,37,38,39,40,41], landmarks)
60
+ right_eye_ratio = get_ratio([42,43,44,45,46,47],landmarks)
61
+ blink_ratio = left_eye_ratio + right_eye_ratio / 2
62
+ # print(blink_ratio)
63
+
64
+
65
+ if blink_ratio > 8.2 and blink_ratio < 9.3 :
66
+ cv2.putText(frame, "BLINKING..", (130,180), cv2.FONT_HERSHEY_PLAIN, 6, (255, 0, 0))
67
+ blink_count = blink_count + 1
68
+
69
+ blink_count
70
+ cv2.putText(frame, str(blink_count), (40,130), cv2.FONT_HERSHEY_PLAIN, 6, (255, 0, 0), 2)
71
+
72
+ if blink_count >= 2:
73
+ cv2.putText(frame, "HUMANNNN!!!!", (70,200), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)
74
+ print("\n\n\n\n HUMAN !! \n\n\n\n\n")
75
+ break
76
+ if time.time() - start > time_limit:
77
+ cv2.putText(frame, "BOT!!!!", (70,200), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)
78
+ print("\n\n\n\n BOT !!!!\n\n\n\n\n")
79
+ break
80
+
81
+ cv2.putText(frame, "BLINK 3-4 times", (40,150), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)
82
+ cv2.imshow("frame", frame)
83
+ time_left = time.time() - start
84
+ print(f'TIME LEFT : {time_left:.2f} sec / 7 sec')
85
+
86
+ if cv2.waitKey(1) & 0xFF == ord('q'):
87
+ break
88
+
89
+ cap.release()
90
+ cv2.destroyAllWindows()