Skip to content

Commit a7fef19

Browse files
updating files
2 parents 51d7cd2 + c746492 commit a7fef19

File tree

5 files changed

+138
-44
lines changed

5 files changed

+138
-44
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
*.mp4
2+
*.avi

README.md

Lines changed: 28 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,28 @@
11
# Yolov4-opencv-python
22

3-
yolov4 object detection using opencv python, its simplest way to run inference on yolo
3+
Yolov4 object detection using opencv python, its simplest way to run inference on yolo V4
44

5-
implementation detail available on [_**Darknet**_](https://github.com/pjreddie/darknet)
5+
## YoloV4 objector Demo Video
6+
7+
https://user-images.githubusercontent.com/66181793/122593546-41e6c980-d07f-11eb-8e18-bcc63d550183.mp4
68

79
---
810

9-
## Installation you need opencv-contrib-python
11+
Implementation detail available on [_**Darknet**_](https://github.com/pjreddie/darknet)
12+
13+
All the Frozen-Inference graph (pre-trained network) available on Darknet
14+
15+
---
16+
17+
## TODO
18+
19+
- [x] [**YoloV4 object detector with opencv-python implementation**](https://youtu.be/1aL6tewfxFY)
20+
21+
- [ ] I will create distance estimation using yolo object detection, its coming up soon In sha Allah 😊
1022

11-
[opencv contrib](https://pypi.org/project/opencv-contrib-python/)
23+
## Installation
24+
25+
You Need [**Opencv Contrib**](https://pypi.org/project/opencv-contrib-python/)
1226

1327
--> **windows**
1428

@@ -18,9 +32,17 @@ implementation detail available on [_**Darknet**_](https://github.com/pjreddie/d
1832

1933
`pip3 install opencv-contrib-python`
2034

21-
then just clone this repository and you are good to go.
35+
### Clone this repo
36+
37+
`git clone https://github.com/Asadullah-Dal17/yolov4-opencv-python`
38+
39+
Guess what you are done that was not hard, isn't it.😉
40+
41+
I have use tiny weights, check out more on [_Darknet_](https://github.com/pjreddie/darknet) github for more
42+
43+
Check out my Complete Video Tutorial on Yolov4 object detection YouTube [**Video Tutorial**](https://youtu.be/1aL6tewfxFY)
44+
2245

23-
I have use tiny weights, check out more on darknet github for more
2446

2547
- [x] I will create distance estimation using yolo object detection, its coming up soon In sha Allah 😊
2648
Project Repository [Yolov4 Distance Estiamtion](https://github.com/Asadullah-Dal17/Yolov4-Detector-and-Distance-Estimator)

output.avi

16.7 MB
Binary file not shown.

yolov4.py

Lines changed: 39 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1,44 +1,45 @@
1-
import cv2
1+
import cv2 as cv
22
import time
3-
4-
CONFIDENCE_THRESHOLD = 0.2
5-
NMS_THRESHOLD = 0.4
6-
COLORS = [(0, 255, 255), (255, 255, 0), (0, 255, 0), (255, 0, 0)]
7-
8-
class_names = []
9-
with open("classes.txt", "r") as f:
10-
class_names = [cname.strip() for cname in f.readlines()]
11-
12-
vc = cv2.VideoCapture(0)
13-
14-
net = cv2.dnn.readNet("yolov4-tiny.weights", "yolov4-tiny.cfg")
15-
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
16-
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA_FP16)
17-
18-
model = cv2.dnn_DetectionModel(net)
3+
Conf_threshold = 0.4
4+
NMS_threshold = 0.4
5+
COLORS = [(0, 255, 0), (0, 0, 255), (255, 0, 0),
6+
(255, 255, 0), (255, 0, 255), (0, 255, 255)]
7+
8+
class_name = []
9+
with open('classes.txt', 'r') as f:
10+
class_name = [cname.strip() for cname in f.readlines()]
11+
# print(class_name)
12+
net = cv.dnn.readNet('yolov4-tiny.weights', 'yolov4-tiny.cfg')
13+
net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
14+
net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA_FP16)
15+
16+
model = cv.dnn_DetectionModel(net)
1917
model.setInputParams(size=(416, 416), scale=1/255, swapRB=True)
2018

21-
while cv2.waitKey(1) < 1:
22-
(grabbed, frame) = vc.read()
23-
if not grabbed:
24-
exit()
25-
26-
start = time.time()
27-
classes, scores, boxes = model.detect(
28-
frame, CONFIDENCE_THRESHOLD, NMS_THRESHOLD)
29-
end = time.time()
3019

31-
start_drawing = time.time()
20+
cap = cv.VideoCapture('output.avi')
21+
starting_time = time.time()
22+
frame_counter = 0
23+
while True:
24+
ret, frame = cap.read()
25+
frame_counter += 1
26+
if ret == False:
27+
break
28+
classes, scores, boxes = model.detect(frame, Conf_threshold, NMS_threshold)
3229
for (classid, score, box) in zip(classes, scores, boxes):
3330
color = COLORS[int(classid) % len(COLORS)]
34-
label = "%s : %f" % (class_names[classid[0]], score)
35-
cv2.rectangle(frame, box, color, 2)
36-
cv2.putText(frame, label, (box[0], box[1] - 10),
37-
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
38-
end_drawing = time.time()
39-
40-
fps_label = "FPS: %.2f (excluding drawing time of %.2fms)" % (
41-
1 / (end - start), (end_drawing - start_drawing) * 1000)
42-
cv2.putText(frame, fps_label, (0, 25),
43-
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
44-
cv2.imshow("detections", frame)
31+
label = "%s : %f" % (class_name[classid[0]], score)
32+
cv.rectangle(frame, box, color, 1)
33+
cv.putText(frame, label, (box[0], box[1]-10),
34+
cv.FONT_HERSHEY_COMPLEX, 0.3, color, 1)
35+
endingTime = time.time() - starting_time
36+
fps = frame_counter/endingTime
37+
# print(fps)
38+
cv.putText(frame, f'FPS: {fps}', (20, 50),
39+
cv.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 0), 2)
40+
cv.imshow('frame', frame)
41+
key = cv.waitKey(1)
42+
if key == ord('q'):
43+
break
44+
cap.release()
45+
cv.destroyAllWindows()

yolov4_Recording.py

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
import cv2 as cv
2+
import time
3+
Conf_threshold = 0.6
4+
NMS_threshold = 0.4
5+
COLORS = [(0, 255, 0), (0, 0, 255), (255, 0, 0),
6+
(255, 255, 0), (255, 0, 255), (0, 255, 255)]
7+
8+
class_name = []
9+
with open('classes.txt', 'r') as f:
10+
class_name = [cname.strip() for cname in f.readlines()]
11+
# print(class_name)
12+
net = cv.dnn.readNet('yolov4-tiny.weights', 'yolov4-tiny.cfg')
13+
net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
14+
net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA_FP16)
15+
16+
model = cv.dnn_DetectionModel(net)
17+
model.setInputParams(size=(416, 416), scale=1/255, swapRB=True)
18+
19+
20+
cap = cv.VideoCapture('pexels-alex-pelsh-6896028.mp4')
21+
frame_width = cap.get(cv.CAP_PROP_FRAME_WIDTH)
22+
frame_height = cap.get(cv.CAP_PROP_FRAME_HEIGHT)
23+
24+
fourcc = cv.VideoWriter_fourcc('M', 'J', 'P', 'G')
25+
# cap.set(cv.CAP_PROP_FPS, 7)
26+
dim = (int(frame_width/4), int(frame_height/4))
27+
print(dim)
28+
out = cv.VideoWriter('OutputVideo3.avi', fourcc, 30.0, dim)
29+
starting_time = time.time()
30+
frame_counter = 0
31+
while True:
32+
ret, frame = cap.read()
33+
34+
frame_counter += 1
35+
if ret == False:
36+
break
37+
38+
# if frame_counter == 100:
39+
# break
40+
41+
frame = cv.resize(frame, dim, interpolation=cv.INTER_AREA)
42+
classes, scores, boxes = model.detect(frame, Conf_threshold, NMS_threshold)
43+
for (classid, score, box) in zip(classes, scores, boxes):
44+
color = COLORS[int(classid) % len(COLORS)]
45+
label = "%s : %f" % (class_name[classid[0]], score)
46+
cv.rectangle(frame, box, color, 1)
47+
# cv.line(frame, (box[0]-3, box[1]-15),
48+
# (box[0]+110, box[1]-15), (0, 0, 0), 15)
49+
cv.rectangle(frame, (box[0]-2, box[1]-20),
50+
(box[0]+120, box[1]-4), (100, 130, 100), -1)
51+
cv.putText(frame, label, (box[0], box[1]-10),
52+
cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1)
53+
endingTime = time.time() - starting_time
54+
fps = frame_counter/endingTime
55+
# print(fps)
56+
cv.line(frame, (18, 43), (140, 43), (0, 0, 0), 27)
57+
cv.putText(frame, f'FPS: {round(fps,2)}', (20, 50),
58+
cv.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 255), 2)
59+
cv.imshow('frame', frame)
60+
61+
out.write(frame)
62+
key = cv.waitKey(1)
63+
if key == ord('q'):
64+
break
65+
out.release()
66+
67+
cap.release()
68+
cv.destroyAllWindows()
69+
print('done')

0 commit comments

Comments
 (0)