Spaces:
Runtime error
Runtime error
AkshatJain1402
commited on
Commit
•
b4fa442
1
Parent(s):
073de03
first push
Browse files
detect.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2 as cv
|
2 |
+
import numpy as n
|
3 |
+
|
4 |
+
cap=cv.VideoCapture(0)
|
5 |
+
|
6 |
+
while True:
|
7 |
+
success,img=cap.read()
|
8 |
+
cv.imshow("Video",img)
|
9 |
+
cv2.waitKey(1)
|
dummy.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from ultralytics import YOLO
|
3 |
+
import cv2
|
4 |
+
import cvzone
|
5 |
+
import math
|
6 |
+
from sort import *
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
# MongoDB connection URI with a default database (replace with your actual values)
|
11 |
+
|
12 |
+
# def connectMongo() -> pymongo.database.Database:
|
13 |
+
# try:
|
14 |
+
|
15 |
+
|
16 |
+
# try:
|
17 |
+
|
18 |
+
# client = pymongo.MongoClient('mongodb+srv://INFINIX:INFINIX@cluster0.rubyoda.mongodb.net/?retryWrites=true&w=majority')
|
19 |
+
# db = client["INFINIX"]
|
20 |
+
# print(db)
|
21 |
+
# if db != None:
|
22 |
+
# print("connected to db")
|
23 |
+
# return db
|
24 |
+
# except Exception as e:
|
25 |
+
# print(e)
|
26 |
+
|
27 |
+
|
28 |
+
# except Exception as e:
|
29 |
+
# return "Error in Connecting to MongoDB" + str(e)
|
30 |
+
|
31 |
+
# db=connectMongo()
|
32 |
+
|
33 |
+
# collection=db['BUS_DETS']
|
34 |
+
|
35 |
+
# Create a collection to store entry count
|
36 |
+
# entry_count_collection = db.entry_count
|
37 |
+
|
38 |
+
cap = cv2.VideoCapture('TrialFootage.mp4')
|
39 |
+
|
40 |
+
model = YOLO("../Yolo-Weights/yolov8n.pt")
|
41 |
+
occupancy = 0
|
42 |
+
coming = 0
|
43 |
+
goin = 0
|
44 |
+
ListPeople = []
|
45 |
+
dict = {}
|
46 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
47 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
48 |
+
print(f"Video Resolution: {width}x{height}")
|
49 |
+
classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat",
|
50 |
+
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat",
|
51 |
+
"dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
|
52 |
+
"handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat",
|
53 |
+
"baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup",
|
54 |
+
"fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli",
|
55 |
+
"carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed",
|
56 |
+
"diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone",
|
57 |
+
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors",
|
58 |
+
"teddy bear", "hair drier", "toothbrush"
|
59 |
+
]
|
60 |
+
|
61 |
+
# Tracking
|
62 |
+
|
63 |
+
tracker = Sort(max_age=20, min_hits=3, iou_threshold=0.3)
|
64 |
+
|
65 |
+
yelloLine = [270, 0, 270, 600]
|
66 |
+
|
67 |
+
RedLine = [173, 0, 173, 600]
|
68 |
+
|
69 |
+
totalCountUp = []
|
70 |
+
mask=cv2.imread('mask.jpg')
|
71 |
+
|
72 |
+
while True:
|
73 |
+
|
74 |
+
|
75 |
+
success, img = cap.read()
|
76 |
+
# imgRegion=cv2.bitwise_and(img,mask)
|
77 |
+
|
78 |
+
results = model(img, stream=True)
|
79 |
+
|
80 |
+
detections = np.empty((0, 5))
|
81 |
+
|
82 |
+
for r in results:
|
83 |
+
boxes = r.boxes
|
84 |
+
for box in boxes:
|
85 |
+
# Bounding Box
|
86 |
+
x1, y1, x2, y2 = box.xyxy[0]
|
87 |
+
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
|
88 |
+
# cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3)
|
89 |
+
w, h = x2 - x1, y2 - y1
|
90 |
+
|
91 |
+
# Confidence
|
92 |
+
conf = math.ceil((box.conf[0] * 100)) / 100
|
93 |
+
# Class Name
|
94 |
+
cls = int(box.cls[0])
|
95 |
+
currentClass = classNames[cls]
|
96 |
+
|
97 |
+
if currentClass == "person" and conf > 0.3:
|
98 |
+
# cvzone.putTextRect(img, f'{currentClass} {conf}', (max(0, x1), max(35, y1)),
|
99 |
+
# scale=0.6, thickness=1, offset=3)
|
100 |
+
# cvzone.cornerRect(img, (x1, y1, w, h), l=9, rt=5)
|
101 |
+
currentArray = np.array([x1, y1, x2, y2, conf])
|
102 |
+
detections = np.vstack((detections, currentArray))
|
103 |
+
|
104 |
+
resultsTracker = tracker.update(detections)
|
105 |
+
|
106 |
+
|
107 |
+
for result in resultsTracker:
|
108 |
+
x1, y1, x2, y2, id = result
|
109 |
+
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
|
110 |
+
print(result)
|
111 |
+
w, h = x2 - x1, y2 - y1
|
112 |
+
cvzone.cornerRect(img, (x1, y1, w, h), l=9, rt=2, colorR=(255, 0, 255))
|
113 |
+
cvzone.putTextRect(img, f' {int(id)}', (max(0, x1), max(35, y1)),
|
114 |
+
scale=2, thickness=3, offset=10)
|
115 |
+
|
116 |
+
cx, cy = x1 + w // 2, y1 + h // 2
|
117 |
+
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
|
118 |
+
|
119 |
+
if yelloLine[0] - 20 < cx < yelloLine[2] + 20:
|
120 |
+
if totalCountUp.count(id) == 0:
|
121 |
+
totalCountUp.append(id)
|
122 |
+
dict[id] = [False]
|
123 |
+
cv2.line(img, (yelloLine[0], yelloLine[1]), (yelloLine[2], yelloLine[3]), (0, 0, 255), 5)
|
124 |
+
elif totalCountUp.count(id) == 1:
|
125 |
+
if (dict[id].count(False) < 1):
|
126 |
+
dict[id].append(False)
|
127 |
+
cv2.line(img, (yelloLine[0], yelloLine[1]), (yelloLine[2], yelloLine[3]), (0, 0, 255), 5)
|
128 |
+
if RedLine[0] - 20 < cx < RedLine[2] + 30:
|
129 |
+
if totalCountUp.count(id) == 0:
|
130 |
+
totalCountUp.append(id)
|
131 |
+
dict[id] = [True]
|
132 |
+
cv2.line(img, (RedLine[0], RedLine[1]), (RedLine[2], RedLine[3]), (0, 255, 200), 5)
|
133 |
+
elif totalCountUp.count(id) == 1:
|
134 |
+
if (dict[id].count(True) < 1):
|
135 |
+
dict[id].append(True)
|
136 |
+
cv2.line(img, (RedLine[0], RedLine[1]), (RedLine[2], RedLine[3]), (0, 255, 200), 5)
|
137 |
+
|
138 |
+
print(totalCountUp)
|
139 |
+
entry_count = 0
|
140 |
+
|
141 |
+
for i in dict.values():
|
142 |
+
if (len(i) == 2):
|
143 |
+
if i[0] == True and i[1] == False:
|
144 |
+
if entry_count > 0:
|
145 |
+
entry_count -= 1
|
146 |
+
if i[0] == False and i[1] == True:
|
147 |
+
entry_count += 1
|
148 |
+
|
149 |
+
|
150 |
+
print('count is ', entry_count)
|
151 |
+
print(dict)
|
152 |
+
# # Update the MongoDB collection with the current count
|
153 |
+
# entry_count_collection.update_one({}, {"$set": {"count":entry_count}}, upsert=True)
|
154 |
+
# collection.update_one({"id": "your_document_id"}, {"$set": {"entry_count": entry_count}})
|
155 |
+
|
156 |
+
cv2.putText(img, str(entry_count), (110, 245), cv2.FONT_HERSHEY_PLAIN, 5, (50, 50, 230), 7)
|
157 |
+
print('count is ', entry_count)
|
158 |
+
print(dict)
|
159 |
+
cv2.imshow("Image", img)
|
160 |
+
cv2.waitKey(1)
|
161 |
+
print(entry_count)
|
162 |
+
# collection.update_one({"id":"826587"},
|
163 |
+
# {"$set": {
|
164 |
+
# "entry_count":entry_count,
|
165 |
+
# }})
|
requirements.txt
ADDED
Binary file (9.35 kB). View file
|
|
sort.py
ADDED
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
SORT: A Simple, Online and Realtime Tracker
|
3 |
+
Copyright (C) 2016-2020 Alex Bewley alex@bewley.ai
|
4 |
+
|
5 |
+
This program is free software: you can redistribute it and/or modify
|
6 |
+
it under the terms of the GNU General Public License as published by
|
7 |
+
the Free Software Foundation, either version 3 of the License, or
|
8 |
+
(at your option) any later version.
|
9 |
+
|
10 |
+
This program is distributed in the hope that it will be useful,
|
11 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
12 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
13 |
+
GNU General Public License for more details.
|
14 |
+
|
15 |
+
You should have received a copy of the GNU General Public License
|
16 |
+
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
17 |
+
"""
|
18 |
+
from __future__ import print_function
|
19 |
+
|
20 |
+
import os
|
21 |
+
import numpy as np
|
22 |
+
import matplotlib
|
23 |
+
|
24 |
+
matplotlib.use('TkAgg')
|
25 |
+
import matplotlib.pyplot as plt
|
26 |
+
import matplotlib.patches as patches
|
27 |
+
from skimage import io
|
28 |
+
|
29 |
+
import glob
|
30 |
+
import time
|
31 |
+
import argparse
|
32 |
+
from filterpy.kalman import KalmanFilter
|
33 |
+
|
34 |
+
np.random.seed(0)
|
35 |
+
|
36 |
+
|
37 |
+
def linear_assignment(cost_matrix):
|
38 |
+
try:
|
39 |
+
import lap
|
40 |
+
_, x, y = lap.lapjv(cost_matrix, extend_cost=True)
|
41 |
+
return np.array([[y[i], i] for i in x if i >= 0]) #
|
42 |
+
except ImportError:
|
43 |
+
from scipy.optimize import linear_sum_assignment
|
44 |
+
x, y = linear_sum_assignment(cost_matrix)
|
45 |
+
return np.array(list(zip(x, y)))
|
46 |
+
|
47 |
+
|
48 |
+
def iou_batch(bb_test, bb_gt):
|
49 |
+
"""
|
50 |
+
From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2]
|
51 |
+
"""
|
52 |
+
bb_gt = np.expand_dims(bb_gt, 0)
|
53 |
+
bb_test = np.expand_dims(bb_test, 1)
|
54 |
+
|
55 |
+
xx1 = np.maximum(bb_test[..., 0], bb_gt[..., 0])
|
56 |
+
yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1])
|
57 |
+
xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2])
|
58 |
+
yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3])
|
59 |
+
w = np.maximum(0., xx2 - xx1)
|
60 |
+
h = np.maximum(0., yy2 - yy1)
|
61 |
+
wh = w * h
|
62 |
+
o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1])
|
63 |
+
+ (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh)
|
64 |
+
return (o)
|
65 |
+
|
66 |
+
|
67 |
+
def convert_bbox_to_z(bbox):
|
68 |
+
"""
|
69 |
+
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
|
70 |
+
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
|
71 |
+
the aspect ratio
|
72 |
+
"""
|
73 |
+
w = bbox[2] - bbox[0]
|
74 |
+
h = bbox[3] - bbox[1]
|
75 |
+
x = bbox[0] + w / 2.
|
76 |
+
y = bbox[1] + h / 2.
|
77 |
+
s = w * h # scale is just area
|
78 |
+
r = w / float(h)
|
79 |
+
return np.array([x, y, s, r]).reshape((4, 1))
|
80 |
+
|
81 |
+
|
82 |
+
def convert_x_to_bbox(x, score=None):
|
83 |
+
"""
|
84 |
+
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
|
85 |
+
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
|
86 |
+
"""
|
87 |
+
w = np.sqrt(x[2] * x[3])
|
88 |
+
h = x[2] / w
|
89 |
+
if (score == None):
|
90 |
+
return np.array([x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2.]).reshape((1, 4))
|
91 |
+
else:
|
92 |
+
return np.array([x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2., score]).reshape((1, 5))
|
93 |
+
|
94 |
+
|
95 |
+
class KalmanBoxTracker(object):
|
96 |
+
"""
|
97 |
+
This class represents the internal state of individual tracked objects observed as bbox.
|
98 |
+
"""
|
99 |
+
count = 0
|
100 |
+
|
101 |
+
def __init__(self, bbox):
|
102 |
+
"""
|
103 |
+
Initialises a tracker using initial bounding box.
|
104 |
+
"""
|
105 |
+
# define constant velocity model
|
106 |
+
self.kf = KalmanFilter(dim_x=7, dim_z=4)
|
107 |
+
self.kf.F = np.array(
|
108 |
+
[[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 0],
|
109 |
+
[0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1]])
|
110 |
+
self.kf.H = np.array(
|
111 |
+
[[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]])
|
112 |
+
|
113 |
+
self.kf.R[2:, 2:] *= 10.
|
114 |
+
self.kf.P[4:, 4:] *= 1000. # give high uncertainty to the unobservable initial velocities
|
115 |
+
self.kf.P *= 10.
|
116 |
+
self.kf.Q[-1, -1] *= 0.01
|
117 |
+
self.kf.Q[4:, 4:] *= 0.01
|
118 |
+
|
119 |
+
self.kf.x[:4] = convert_bbox_to_z(bbox)
|
120 |
+
self.time_since_update = 0
|
121 |
+
self.id = KalmanBoxTracker.count
|
122 |
+
KalmanBoxTracker.count += 1
|
123 |
+
self.history = []
|
124 |
+
self.hits = 0
|
125 |
+
self.hit_streak = 0
|
126 |
+
self.age = 0
|
127 |
+
|
128 |
+
def update(self, bbox):
|
129 |
+
"""
|
130 |
+
Updates the state vector with observed bbox.
|
131 |
+
"""
|
132 |
+
self.time_since_update = 0
|
133 |
+
self.history = []
|
134 |
+
self.hits += 1
|
135 |
+
self.hit_streak += 1
|
136 |
+
self.kf.update(convert_bbox_to_z(bbox))
|
137 |
+
|
138 |
+
def predict(self):
|
139 |
+
"""
|
140 |
+
Advances the state vector and returns the predicted bounding box estimate.
|
141 |
+
"""
|
142 |
+
if ((self.kf.x[6] + self.kf.x[2]) <= 0):
|
143 |
+
self.kf.x[6] *= 0.0
|
144 |
+
self.kf.predict()
|
145 |
+
self.age += 1
|
146 |
+
if (self.time_since_update > 0):
|
147 |
+
self.hit_streak = 0
|
148 |
+
self.time_since_update += 1
|
149 |
+
self.history.append(convert_x_to_bbox(self.kf.x))
|
150 |
+
return self.history[-1]
|
151 |
+
|
152 |
+
def get_state(self):
|
153 |
+
"""
|
154 |
+
Returns the current bounding box estimate.
|
155 |
+
"""
|
156 |
+
return convert_x_to_bbox(self.kf.x)
|
157 |
+
|
158 |
+
|
159 |
+
def associate_detections_to_trackers(detections, trackers, iou_threshold=0.3):
|
160 |
+
"""
|
161 |
+
Assigns detections to tracked object (both represented as bounding boxes)
|
162 |
+
|
163 |
+
Returns 3 lists of matches, unmatched_detections and unmatched_trackers
|
164 |
+
"""
|
165 |
+
if (len(trackers) == 0):
|
166 |
+
return np.empty((0, 2), dtype=int), np.arange(len(detections)), np.empty((0, 5), dtype=int)
|
167 |
+
|
168 |
+
iou_matrix = iou_batch(detections, trackers)
|
169 |
+
|
170 |
+
if min(iou_matrix.shape) > 0:
|
171 |
+
a = (iou_matrix > iou_threshold).astype(np.int32)
|
172 |
+
if a.sum(1).max() == 1 and a.sum(0).max() == 1:
|
173 |
+
matched_indices = np.stack(np.where(a), axis=1)
|
174 |
+
else:
|
175 |
+
matched_indices = linear_assignment(-iou_matrix)
|
176 |
+
else:
|
177 |
+
matched_indices = np.empty(shape=(0, 2))
|
178 |
+
|
179 |
+
unmatched_detections = []
|
180 |
+
for d, det in enumerate(detections):
|
181 |
+
if (d not in matched_indices[:, 0]):
|
182 |
+
unmatched_detections.append(d)
|
183 |
+
unmatched_trackers = []
|
184 |
+
for t, trk in enumerate(trackers):
|
185 |
+
if (t not in matched_indices[:, 1]):
|
186 |
+
unmatched_trackers.append(t)
|
187 |
+
|
188 |
+
# filter out matched with low IOU
|
189 |
+
matches = []
|
190 |
+
for m in matched_indices:
|
191 |
+
if (iou_matrix[m[0], m[1]] < iou_threshold):
|
192 |
+
unmatched_detections.append(m[0])
|
193 |
+
unmatched_trackers.append(m[1])
|
194 |
+
else:
|
195 |
+
matches.append(m.reshape(1, 2))
|
196 |
+
if (len(matches) == 0):
|
197 |
+
matches = np.empty((0, 2), dtype=int)
|
198 |
+
else:
|
199 |
+
matches = np.concatenate(matches, axis=0)
|
200 |
+
|
201 |
+
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
|
202 |
+
|
203 |
+
|
204 |
+
class Sort(object):
|
205 |
+
def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):
|
206 |
+
"""
|
207 |
+
Sets key parameters for SORT
|
208 |
+
"""
|
209 |
+
self.max_age = max_age
|
210 |
+
self.min_hits = min_hits
|
211 |
+
self.iou_threshold = iou_threshold
|
212 |
+
self.trackers = []
|
213 |
+
self.frame_count = 0
|
214 |
+
|
215 |
+
def update(self, dets=np.empty((0, 5))):
|
216 |
+
"""
|
217 |
+
Params:
|
218 |
+
dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
|
219 |
+
Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).
|
220 |
+
Returns the a similar array, where the last column is the object ID.
|
221 |
+
|
222 |
+
NOTE: The number of objects returned may differ from the number of detections provided.
|
223 |
+
"""
|
224 |
+
self.frame_count += 1
|
225 |
+
# get predicted locations from existing trackers.
|
226 |
+
trks = np.zeros((len(self.trackers), 5))
|
227 |
+
to_del = []
|
228 |
+
ret = []
|
229 |
+
for t, trk in enumerate(trks):
|
230 |
+
pos = self.trackers[t].predict()[0]
|
231 |
+
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
|
232 |
+
if np.any(np.isnan(pos)):
|
233 |
+
to_del.append(t)
|
234 |
+
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
|
235 |
+
for t in reversed(to_del):
|
236 |
+
self.trackers.pop(t)
|
237 |
+
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets, trks, self.iou_threshold)
|
238 |
+
|
239 |
+
# update matched trackers with assigned detections
|
240 |
+
for m in matched:
|
241 |
+
self.trackers[m[1]].update(dets[m[0], :])
|
242 |
+
|
243 |
+
# create and initialise new trackers for unmatched detections
|
244 |
+
for i in unmatched_dets:
|
245 |
+
trk = KalmanBoxTracker(dets[i, :])
|
246 |
+
self.trackers.append(trk)
|
247 |
+
i = len(self.trackers)
|
248 |
+
for trk in reversed(self.trackers):
|
249 |
+
d = trk.get_state()[0]
|
250 |
+
if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
|
251 |
+
ret.append(np.concatenate((d, [trk.id + 1])).reshape(1, -1)) # +1 as MOT benchmark requires positive
|
252 |
+
i -= 1
|
253 |
+
# remove dead tracklet
|
254 |
+
if (trk.time_since_update > self.max_age):
|
255 |
+
self.trackers.pop(i)
|
256 |
+
if (len(ret) > 0):
|
257 |
+
return np.concatenate(ret)
|
258 |
+
return np.empty((0, 5))
|
259 |
+
|
260 |
+
|
261 |
+
def parse_args():
|
262 |
+
"""Parse input arguments."""
|
263 |
+
parser = argparse.ArgumentParser(description='SORT demo')
|
264 |
+
parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',
|
265 |
+
action='store_true')
|
266 |
+
parser.add_argument("--seq_path", help="Path to detections.", type=str, default='data')
|
267 |
+
parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train')
|
268 |
+
parser.add_argument("--max_age",
|
269 |
+
help="Maximum number of frames to keep alive a track without associated detections.",
|
270 |
+
type=int, default=1)
|
271 |
+
parser.add_argument("--min_hits",
|
272 |
+
help="Minimum number of associated detections before track is initialised.",
|
273 |
+
type=int, default=3)
|
274 |
+
parser.add_argument("--iou_threshold", help="Minimum IOU for match.", type=float, default=0.3)
|
275 |
+
args = parser.parse_args()
|
276 |
+
return args
|
277 |
+
|
278 |
+
|
279 |
+
if __name__ == '__main__':
|
280 |
+
# all train
|
281 |
+
args = parse_args()
|
282 |
+
display = args.display
|
283 |
+
phase = args.phase
|
284 |
+
total_time = 0.0
|
285 |
+
total_frames = 0
|
286 |
+
colours = np.random.rand(32, 3) # used only for display
|
287 |
+
if (display):
|
288 |
+
if not os.path.exists('mot_benchmark'):
|
289 |
+
print(
|
290 |
+
'\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
|
291 |
+
exit()
|
292 |
+
plt.ion()
|
293 |
+
fig = plt.figure()
|
294 |
+
ax1 = fig.add_subplot(111, aspect='equal')
|
295 |
+
|
296 |
+
if not os.path.exists('output'):
|
297 |
+
os.makedirs('output')
|
298 |
+
pattern = os.path.join(args.seq_path, phase, '*', 'det', 'det.txt')
|
299 |
+
for seq_dets_fn in glob.glob(pattern):
|
300 |
+
mot_tracker = Sort(max_age=args.max_age,
|
301 |
+
min_hits=args.min_hits,
|
302 |
+
iou_threshold=args.iou_threshold) # create instance of the SORT tracker
|
303 |
+
seq_dets = np.loadtxt(seq_dets_fn, delimiter=',')
|
304 |
+
seq = seq_dets_fn[pattern.find('*'):].split(os.path.sep)[0]
|
305 |
+
|
306 |
+
with open(os.path.join('output', '%s.txt' % (seq)), 'w') as out_file:
|
307 |
+
print("Processing %s." % (seq))
|
308 |
+
for frame in range(int(seq_dets[:, 0].max())):
|
309 |
+
frame += 1 # detection and frame numbers begin at 1
|
310 |
+
dets = seq_dets[seq_dets[:, 0] == frame, 2:7]
|
311 |
+
dets[:, 2:4] += dets[:, 0:2] # convert to [x1,y1,w,h] to [x1,y1,x2,y2]
|
312 |
+
total_frames += 1
|
313 |
+
|
314 |
+
if (display):
|
315 |
+
fn = os.path.join('mot_benchmark', phase, seq, 'img1', '%06d.jpg' % (frame))
|
316 |
+
im = io.imread(fn)
|
317 |
+
ax1.imshow(im)
|
318 |
+
plt.title(seq + ' Tracked Targets')
|
319 |
+
|
320 |
+
start_time = time.time()
|
321 |
+
trackers = mot_tracker.update(dets)
|
322 |
+
cycle_time = time.time() - start_time
|
323 |
+
total_time += cycle_time
|
324 |
+
|
325 |
+
for d in trackers:
|
326 |
+
print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1' % (frame, d[4], d[0], d[1], d[2] - d[0], d[3] - d[1]),
|
327 |
+
file=out_file)
|
328 |
+
if (display):
|
329 |
+
d = d.astype(np.int32)
|
330 |
+
ax1.add_patch(patches.Rectangle((d[0], d[1]), d[2] - d[0], d[3] - d[1], fill=False, lw=3,
|
331 |
+
ec=colours[d[4] % 32, :]))
|
332 |
+
|
333 |
+
if (display):
|
334 |
+
fig.canvas.flush_events()
|
335 |
+
plt.draw()
|
336 |
+
ax1.cla()
|
337 |
+
|
338 |
+
print("Total Tracking took: %.3f seconds for %d frames or %.1f FPS" % (
|
339 |
+
total_time, total_frames, total_frames / total_time))
|
340 |
+
|
341 |
+
if (display):
|
342 |
+
print("Note: to get real runtime results run without the option: --display")
|