Skip to content

Commit e08d04a

Browse files
YacobBYarpit1997
authored andcommitted
OpenCV3.4 Python update (#40)
Updated python code for OpenCV3.4, and readme file for clearer instructions.
1 parent fa03db4 commit e08d04a

File tree

3 files changed

+168
-155
lines changed

3 files changed

+168
-155
lines changed

README.md

+12-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ _In this project we have worked on the problem of human detection,face detection
1515
`sudo apt-get install python3`
1616
* **python libraries:**
1717
Here is a list of all the python dependencies
18-
* Python Image Library (PIL)
18+
* Python Image Library (PILLOW)
1919
* Imutils
2020
* numpy
2121

@@ -38,6 +38,15 @@ _In this project we have worked on the problem of human detection,face detection
3838
* `video/` : This directory contains some of the videos that we used to while testing.
3939

4040
## Installation
41+
42+
## Python
43+
Don't forget to install the necessary libraries described in the install paragraph above.
44+
45+
First you need to run the create_face_model.py file, which uses the images in /data to create a .yaml file
46+
* In the project folder run
47+
```sh
48+
python create_face_model.py
49+
```
4150
* To run the python version of the code you have to put all the input videos in one folder and then provide the path of that folder as command line argument:
4251
```sh
4352
python3 main.py /path/to/input/videos/
@@ -46,6 +55,8 @@ Example- for our directory structure it is:
4655
```sh
4756
python3 main.py /video
4857
```
58+
59+
## C++
4960
* To compile the C++ version of the code with openCV the command is:
5061
```sh
5162
g++ -ggdb `pkg-config --cflags opencv` -o `basename name_of_file.cpp .cpp` name_of_file.cpp `pkg-config --libs opencv`

create_face_model.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
cascadePath = "face_cascades/haarcascade_profileface.xml"
1111
faceCascade = cv2.CascadeClassifier(cascadePath)
12-
recognizer = cv2.face.createLBPHFaceRecognizer()
12+
recognizer = cv2.face.LBPHFaceRecognizer_create()
1313

1414

1515
def get_images_and_labels(path):
@@ -59,4 +59,4 @@ def get_images_and_labels(path):
5959
"""
6060
save the trained data to cont.yaml file
6161
"""
62-
recognizer.save("cont.yaml")
62+
recognizer.save("model.yaml")

main.py

+154-152
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
1-
import cv2
1+
import argparse
22
import glob
33
import os
44
import time
5+
6+
import cv2
57
import imutils
6-
import argparse
78
from imutils.object_detection import non_max_suppression
89

910
subject_label = 1
@@ -13,171 +14,172 @@
1314
face_cascade = cv2.CascadeClassifier(cascade_path)
1415
hog = cv2.HOGDescriptor()
1516
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
16-
recognizer = cv2.face.createLBPHFaceRecognizer()
17-
count=0
17+
recognizer = cv2.face.LBPHFaceRecognizer_create()
18+
count = 0
1819

19-
def detect_people(frame):
20-
"""
21-
detect humans using HOG descriptor
22-
Args:
23-
frame:
24-
Returns:
25-
processed frame
26-
"""
27-
(rects, weights) = hog.detectMultiScale(frame, winStride=(8, 8), padding=(16, 16), scale=1.06)
28-
rects = non_max_suppression(rects, probs=None, overlapThresh=0.65)
29-
for (x, y, w, h) in rects:
30-
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
31-
return frame
3220

21+
def detect_people(frame):
22+
"""
23+
detect humans using HOG descriptor
24+
Args:
25+
frame:
26+
Returns:
27+
processed frame
28+
"""
29+
(rects, weights) = hog.detectMultiScale(frame, winStride=(8, 8), padding=(16, 16), scale=1.06)
30+
rects = non_max_suppression(rects, probs=None, overlapThresh=0.65)
31+
for (x, y, w, h) in rects:
32+
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
33+
return frame
3334

3435

3536
def detect_face(frame):
36-
"""
37-
detect human faces in image using haar-cascade
38-
Args:
39-
frame:
40-
Returns:
41-
coordinates of detected faces
42-
"""
43-
faces = face_cascade.detectMultiScale(frame, 1.1, 2, 0, (20, 20) )
44-
return faces
37+
"""
38+
detect human faces in image using haar-cascade
39+
Args:
40+
frame:
41+
Returns:
42+
coordinates of detected faces
43+
"""
44+
faces = face_cascade.detectMultiScale(frame, 1.1, 2, 0, (20, 20))
45+
return faces
4546

4647

4748
def recognize_face(frame_orginal, faces):
48-
"""
49-
recognize human faces using LBPH features
50-
Args:
51-
frame_orginal:
52-
faces:
53-
Returns:
54-
label of predicted person
55-
"""
56-
predict_label = []
57-
predict_conf = []
58-
for x, y, w, h in faces:
59-
frame_orginal_grayscale = cv2.cvtColor(frame_orginal[y: y + h, x: x + w], cv2.COLOR_BGR2GRAY)
60-
cv2.imshow("cropped", frame_orginal_grayscale)
61-
predict_tuple = recognizer.predict(frame_orginal_grayscale)
62-
a, b = predict_tuple
63-
predict_label.append(a)
64-
predict_conf.append(b)
65-
print("Predition label, confidence: " + str(predict_tuple))
66-
return predict_label
49+
"""
50+
recognize human faces using LBPH features
51+
Args:
52+
frame_orginal:
53+
faces:
54+
Returns:
55+
label of predicted person
56+
"""
57+
predict_label = []
58+
predict_conf = []
59+
for x, y, w, h in faces:
60+
frame_orginal_grayscale = cv2.cvtColor(frame_orginal[y: y + h, x: x + w], cv2.COLOR_BGR2GRAY)
61+
cv2.imshow("cropped", frame_orginal_grayscale)
62+
predict_tuple = recognizer.predict(frame_orginal_grayscale)
63+
a, b = predict_tuple
64+
predict_label.append(a)
65+
predict_conf.append(b)
66+
print("Predition label, confidence: " + str(predict_tuple))
67+
return predict_label
6768

6869

6970
def draw_faces(frame, faces):
70-
"""
71-
draw rectangle around detected faces
72-
Args:
73-
frame:
74-
faces:
75-
Returns:
76-
face drawn processed frame
77-
"""
78-
for (x, y, w, h) in faces:
79-
xA = x
80-
yA = y
81-
xB = x + w
82-
yB = y + h
83-
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
84-
return frame
71+
"""
72+
draw rectangle around detected faces
73+
Args:
74+
frame:
75+
faces:
76+
Returns:
77+
face drawn processed frame
78+
"""
79+
for (x, y, w, h) in faces:
80+
xA = x
81+
yA = y
82+
xB = x + w
83+
yB = y + h
84+
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
85+
return frame
8586

8687

8788
def put_label_on_face(frame, faces, labels):
88-
"""
89-
draw label on faces
90-
Args:
91-
frame:
92-
faces:
93-
labels:
94-
Returns:
95-
processed frame
96-
"""
97-
i = 0
98-
for x, y, w, h in faces:
99-
cv2.putText(frame, str(labels[i]), (x, y), font, 1, (255, 255, 255), 2)
100-
i += 1
101-
return frame
89+
"""
90+
draw label on faces
91+
Args:
92+
frame:
93+
faces:
94+
labels:
95+
Returns:
96+
processed frame
97+
"""
98+
i = 0
99+
for x, y, w, h in faces:
100+
cv2.putText(frame, str(labels[i]), (x, y), font, 1, (255, 255, 255), 2)
101+
i += 1
102+
return frame
103+
102104

103105
def background_subtraction(previous_frame, frame_resized_grayscale, min_area):
104-
"""
105-
This function returns 1 for the frames in which the area
106-
after subtraction with previous frame is greater than minimum area
107-
defined.
108-
Thus expensive computation of human detection face detection
109-
and face recognition is not done on all the frames.
110-
Only the frames undergoing significant amount of change (which is controlled min_area)
111-
are processed for detection and recognition.
112-
"""
113-
frameDelta = cv2.absdiff(previous_frame, frame_resized_grayscale)
114-
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
115-
thresh = cv2.dilate(thresh, None, iterations=2)
116-
im2, cnts, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
117-
temp=0
118-
for c in cnts:
119-
# if the contour is too small, ignore it
120-
if cv2.contourArea(c) > min_area:
121-
temp=1
122-
return temp
106+
"""
107+
This function returns 1 for the frames in which the area
108+
after subtraction with previous frame is greater than minimum area
109+
defined.
110+
Thus expensive computation of human detection face detection
111+
and face recognition is not done on all the frames.
112+
Only the frames undergoing significant amount of change (which is controlled min_area)
113+
are processed for detection and recognition.
114+
"""
115+
frameDelta = cv2.absdiff(previous_frame, frame_resized_grayscale)
116+
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
117+
thresh = cv2.dilate(thresh, None, iterations=2)
118+
im2, cnts, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
119+
temp = 0
120+
for c in cnts:
121+
# if the contour is too small, ignore it
122+
if cv2.contourArea(c) > min_area:
123+
temp = 1
124+
return temp
125+
123126

124127
if __name__ == '__main__':
125-
"""
126-
main function
127-
"""
128-
ap = argparse.ArgumentParser()
129-
ap.add_argument("-v", "--videos", required=True, help="path to videos directory")
130-
args = vars(ap.parse_args())
131-
path = args["videos"]
132-
for f in os.listdir(path):
133-
list_of_videos = glob.glob(os.path.join(os.path.abspath(path), f))
134-
print(os.path.join(os.path.abspath(path), f) + "*.mp4")
135-
print(list_of_videos)
136-
if os.path.exists("model.yaml"):
137-
recognizer.load("model.yaml")
138-
for video in list_of_videos:
139-
camera = cv2.VideoCapture(os.path.join(path, video))
140-
grabbed, frame = camera.read()
141-
print(frame.shape)
142-
frame_resized = imutils.resize(frame, width=min(800, frame.shape[1]))
143-
frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)
144-
print(frame_resized.shape)
145-
146-
# defining min cuoff area
147-
min_area=(3000/800)*frame_resized.shape[1]
148-
149-
while True:
150-
starttime = time.time()
151-
previous_frame = frame_resized_grayscale
152-
grabbed, frame = camera.read()
153-
if not grabbed:
154-
break
155-
frame_resized = imutils.resize(frame, width=min(800, frame.shape[1]))
156-
frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)
157-
temp=background_subtraction(previous_frame, frame_resized_grayscale, min_area)
158-
if temp==1:
159-
frame_processed = detect_people(frame_resized)
160-
faces = detect_face(frame_resized_grayscale)
161-
if len(faces) > 0:
162-
frame_processed = draw_faces(frame_processed, faces)
163-
label = recognize_face(frame_resized, faces)
164-
frame_processed = put_label_on_face(frame_processed, faces, label)
165-
166-
cv2.imshow("Detected Human and face", frame_processed)
167-
key = cv2.waitKey(1) & 0xFF
168-
if key == ord("q"):
169-
break
170-
endtime = time.time()
171-
print("Time to process a frame: " + str(starttime-endtime))
172-
else:
173-
count=count+1
174-
print("Number of frame skipped in the video= " + str(count))
175-
176-
177-
camera.release()
178-
cv2.destroyAllWindows()
179-
180-
181-
else:
182-
print("model file not found")
183-
list_of_videos = []
128+
"""
129+
main function
130+
"""
131+
ap = argparse.ArgumentParser()
132+
ap.add_argument("-v", "--videos", required=True, help="path to videos directory")
133+
args = vars(ap.parse_args())
134+
path = args["videos"]
135+
for f in os.listdir(path):
136+
list_of_videos = glob.glob(os.path.join(os.path.abspath(path), f))
137+
print(os.path.join(os.path.abspath(path), f) + "*.mp4")
138+
print(list_of_videos)
139+
if os.path.exists("model.yaml"):
140+
recognizer.read("model.yaml")
141+
for video in list_of_videos:
142+
camera = cv2.VideoCapture(os.path.join(path, video))
143+
grabbed, frame = camera.read()
144+
print(frame.shape)
145+
frame_resized = imutils.resize(frame, width=min(800, frame.shape[1]))
146+
frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)
147+
print(frame_resized.shape)
148+
149+
# defining min cuoff area
150+
min_area = (3000 / 800) * frame_resized.shape[1]
151+
152+
while True:
153+
starttime = time.time()
154+
previous_frame = frame_resized_grayscale
155+
grabbed, frame = camera.read()
156+
if not grabbed:
157+
break
158+
frame_resized = imutils.resize(frame, width=min(800, frame.shape[1]))
159+
frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)
160+
temp = background_subtraction(previous_frame, frame_resized_grayscale, min_area)
161+
if temp == 1:
162+
frame_processed = detect_people(frame_resized)
163+
faces = detect_face(frame_resized_grayscale)
164+
if len(faces) > 0:
165+
frame_processed = draw_faces(frame_processed, faces)
166+
label = recognize_face(frame_resized, faces)
167+
frame_processed = put_label_on_face(frame_processed, faces, label)
168+
169+
cv2.imshow("Detected Human and face", frame_processed)
170+
key = cv2.waitKey(1) & 0xFF
171+
if key == ord("q"):
172+
break
173+
endtime = time.time()
174+
print("Time to process a frame: " + str(starttime - endtime))
175+
else:
176+
count = count + 1
177+
print("Number of frame skipped in the video= " + str(count))
178+
179+
camera.release()
180+
cv2.destroyAllWindows()
181+
182+
183+
else:
184+
print("model file not found")
185+
list_of_videos = []

0 commit comments

Comments
 (0)