-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
190 lines (147 loc) · 6.54 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
from parameters import *
from scipy.spatial import distance
from imutils import face_utils as face
from pygame import mixer
import imutils
import time
import dlib
import cv2
# Some supporting functions for facial processing
def get_max_area_rect(rects):
if len(rects)==0: return
areas=[]
for rect in rects:
areas.append(rect.area())
return rects[areas.index(max(areas))]
def get_eye_aspect_ratio(eye):
vertical_1 = distance.euclidean(eye[1], eye[5])
vertical_2 = distance.euclidean(eye[2], eye[4])
horizontal = distance.euclidean(eye[0], eye[3])
return (vertical_1+vertical_2)/(horizontal*2) #aspect ratio of eye
def get_mouth_aspect_ratio(mouth):
horizontal=distance.euclidean(mouth[0],mouth[4])
vertical=0
for coord in range(1,4):
vertical+=distance.euclidean(mouth[coord],mouth[8-coord])
return vertical/(horizontal*3) #mouth aspect ratio
# Facial processing
def facial_processing():
output_labels=[]
mixer.init()
distracton_initlized = False
eye_initialized = False
mouth_initialized = False
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor_path)
ls,le = face.FACIAL_LANDMARKS_IDXS["left_eye"]
rs,re = face.FACIAL_LANDMARKS_IDXS["right_eye"]
##C:\Users\urjit\Downloads\urjit_speed.mp4 urjit_glasses urjitdriving1 urjitdriving2
##C:\Users\urjit\DDD\Training Dataset\001\noglasses\sleepyCombination.avi
#
path_to_video=r"C:\Users\urjit\Downloads\urjit_speed.mp4"
cap=cv2.VideoCapture(path_to_video)
######
no_frame_not_detected=0
fps_couter=0
fps_to_display='initializing...'
fps_timer=time.time()
while True:
ret , frame=cap.read()
if ret==False:
break
frame=cv2.resize(frame,(640,480))
frame_class=0
fps_couter+=1
frame = cv2.flip(frame, flipCode=1)
if time.time()-fps_timer>=1.0:
fps_to_display=fps_couter
fps_timer=time.time()
fps_couter=0
cv2.putText(frame, "FPS :"+str(fps_to_display), (frame.shape[1]-100, frame.shape[0]-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
#frame = imutils.resize(frame, width=900)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
rect=get_max_area_rect(rects)
if rect!=None:
distracton_initlized=False
shape = predictor(gray, rect)
shape = face.shape_to_np(shape)
leftEye = shape[ls:le]
rightEye = shape[rs:re]
leftEAR = get_eye_aspect_ratio(leftEye)
rightEAR = get_eye_aspect_ratio(rightEye)
inner_lips=shape[60:68]
mar=get_mouth_aspect_ratio(inner_lips)
eye_aspect_ratio = (leftEAR + rightEAR) / 2.0
#eye_aspect_ratio=rightEAR
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (255, 255, 255), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (255, 255, 255), 1)
lipHull = cv2.convexHull(inner_lips)
cv2.drawContours(frame, [lipHull], -1, (255, 255, 255), 1)
cv2.putText(frame, "EAR: {:.2f} MAR{:.2f}".format(eye_aspect_ratio,mar), (10, frame.shape[0]-10),\
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
if((eye_aspect_ratio < EYE_DROWSINESS_THRESHOLD) or (mar > MOUTH_DROWSINESS_THRESHOLD)):
frame_class=1
if (eye_aspect_ratio < EYE_DROWSINESS_THRESHOLD ):
#output_labels.append(1)
if not eye_initialized:
eye_start_time= time.time()
eye_initialized=True
if time.time()-eye_start_time >= EYE_DROWSINESS_INTERVAL:
alarm_type=0
cv2.putText(frame, "YOU ARE SLEEPY...\nPLEASE TAKE A BREAK!", (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
if not distracton_initlized and not mouth_initialized and not mixer.music.get_busy():
mixer.music.load(alarm_paths[alarm_type])
mixer.music.play()
else:
#output_labels.append(0)
eye_initialized=False
if not distracton_initlized and not mouth_initialized and mixer.music.get_busy():
mixer.music.stop()
if mar > MOUTH_DROWSINESS_THRESHOLD:
#output_labels.append(1)
if not mouth_initialized:
mouth_start_time= time.time()
mouth_initialized=True
if time.time()-mouth_start_time >= MOUTH_DROWSINESS_INTERVAL:
alarm_type=0
output_labels.append(1)
cv2.putText(frame, "YOU ARE YAWNING...\nDO YOU NEED A BREAK?", (10, 40),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
if not mixer.music.get_busy():
mixer.music.load(alarm_paths[alarm_type])
mixer.music.play()
else:
mouth_initialized=False
if not distracton_initlized and not eye_initialized and mixer.music.get_busy():
mixer.music.stop()
else:
frame_class=1
alarm_type=1
if not distracton_initlized:
distracton_start_time=time.time()
distracton_initlized=True
if time.time()- distracton_start_time> DISTRACTION_INTERVAL:
output_labels.append(1)
cv2.putText(frame, "EYES ON ROAD", (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
if not eye_initialized and not mouth_initialized and not mixer.music.get_busy():
mixer.music.load(alarm_paths[alarm_type])
mixer.music.play()
cv2.imshow("Frame", frame)
output_labels.append(frame_class)
key = cv2.waitKey(5)&0xFF
if key == ord("q"):
break
print(output_labels)
print(len(output_labels))
cv2.destroyAllWindows()
cap.release()
if __name__=='__main__':
op=facial_processing()
#print(op)
#print(len(op))