-
Notifications
You must be signed in to change notification settings - Fork 9
/
ped_detect_2.py
84 lines (65 loc) · 3.35 KB
/
ped_detect_2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
from collections import deque
import numpy as np
import imutils
import cv2
import datetime
from imutils.object_detection import non_max_suppression
import gehl_detect as gd
import json
# same as process_video.py but also using alternate haarcascades classifiers... should be extended to use different classifiers to measure different features and finding overlap using nms
# Macintosh:opencv-haar-classifier-training marioag$ opencv_traincascade -data classifier -vec samples.vec -bg negatives.txt -numStages 20 -minHitRate 0.999 -maxFalseAlarmRate 0.5 -numPos 1000 -numNeg 600 -w 80 -h 40 -mode ALL -precalcValBufSize 1024 -precalcIdxBufSize 1024
# images = "/Users/Mario/Desktop/originalPics/2002/07/28/big/"
# imgs = gd.get_jpgs(images)
# print imgs
Lear_cascade = cv2.CascadeClassifier('/Users/marioag/Documents/GitHub/gehl-detect/ocv2/opencv/data/haarcascades/haarcascade_fullbody.xml')
face_cascade = cv2.CascadeClassifier('/Users/marioag/Documents/GitHub/gehl-detect/ocv2/opencv/data/haarcascades/haarcascade_profileface.xml')
# bod_cascade = cv2.CascadeClassifier("/Users/Mario/Documents/mit-github-projects/gehl/Gehl/opencv/data/lbpcascades/lbpcascade_frontalface.xml")
# read a video from file
vidPath = "examples/ref_video.MP4"
video = cv2.VideoCapture(vidPath)
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# dictionary that will hold timestamp:[lx, ly, ux, uy]
coords = {}
# dictionary to hold the number of nms boxes per timestamp
count = {}
j=0
while True:
start = datetime.datetime.now() # sets up a timer
tstamp = str((datetime.datetime.now() - start).total_seconds())
# grab the current frame
(grabbed, frame) = video.read()
# detect people in the image
image = imutils.resize(frame, width=min(800, frame.shape[1]))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
legs = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in legs:
rect = cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
print len(rect)
roi_gray = gray[y:y + h, x:x + w]
roi_color = image[y:y + h, x:x + w]
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4), padding=(2, 2), scale=1.5)
for (x, y, w, h) in rects:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
# apply non-maxima suppression to the bounding boxes using a
# fairly large overlap threshold to try to maintain overlapping
# boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
for (xA, yA, xB, yB) in pick:
i = 0
i < 1000
nRect = [xA, yA, xB, yB]
nImg = cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
# cnts = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
# writes tstamp and bounding box coordinates to coords
coords.update({tstamp: nRect})
# writes timestamp and # of boxes to count
count.update({tstamp: len(pick)})
print count
# print "lower center point X is: ", xA, "lower center point Y is: ", (yB + yA) / 2
cv2.imwrite('/Users/marioag/Documents/GitHub/gehl-detect/out/find_features/vid_from_file/image{}.png'.format(j), image)
j +=1
# cv2.imshow('img', image)
# cv2.waitKey(1)
# cv2.destroyAllWindows()