AkshatJain1402 commited on
Commit
4e39fb3
1 Parent(s): f516a0f
Files changed (6) hide show
  1. Yolo-Weights/yolov8n.pt +3 -0
  2. app.py +226 -0
  3. busHumanDetect.py +120 -0
  4. cam.py +9 -0
  5. requirements.txt +0 -0
  6. sort.py +342 -0
Yolo-Weights/yolov8n.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31e20dde3def09e2cf938c7be6fe23d9150bbbe503982af13345706515f2ef95
3
+ size 6534387
app.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from ultralytics import YOLO
3
+ import cv2
4
+ import cvzone
5
+ import math
6
+ from sort import *
7
+ import datetime
8
+ import geocoder
9
+
10
+ #i want to take co ordinates in lat long of this device
11
+ # def get_coordinates():
12
+ # g = geocoder.ip('me')
13
+ # if g.latlng:
14
+ # return {'lat': str(g.latlng[0]), 'long': str(g.latlng[1])}
15
+ # else:
16
+ # return None
17
+
18
+ # currentCoordinates = get_coordinates()
19
+ # if currentCoordinates:
20
+ # print(currentCoordinates)
21
+ # else:
22
+ # print("Unable to get device coordinates.")
23
+
24
+ #calculate distance b/w two coordinates
25
+ def distance_calculations(stationFromCoordinates, stationToCoordinates):
26
+ stationFromCoordinates = {'lat': ' 28.98', 'long': '77.7064'}
27
+ stationToCoordinates = {'lat': '28.66', 'long': '77.22'}
28
+ lat1 = float(stationFromCoordinates['lat'])
29
+ lon1 = float(stationFromCoordinates['long'])
30
+ lat2 = float(stationToCoordinates['lat'])
31
+ lon2 = float(stationToCoordinates['long'])
32
+ R = 6371
33
+ dlat = math.radians(lat2 - lat1)
34
+ dlon = math.radians(lon2 - lon1)
35
+ a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2)
36
+ c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
37
+ distance = R * c
38
+ return distance
39
+ print(distance_calculations({'lat': ' 28.98', 'long': '77.7'}, {'lat': '28.66', 'long': '77.22'}))
40
+ totalFare=0
41
+ pricePerKm=1.5
42
+
43
+
44
+ def distanceCalculations(inBetweenDepaturePoints,inBetweenOnBoardingPoints):
45
+ print(inBetweenDepaturePoints,inBetweenOnBoardingPoints ,'inbetween points')
46
+
47
+ return 3
48
+
49
+ def fareCalculations():
50
+ global totalFare
51
+ print(dict)
52
+ index=0
53
+ for i in dict.values():
54
+ print(len(i))
55
+ inBetweenDepaturePoints=[]
56
+ inBetweenOnBoardingPoints=[]
57
+ if(len(i)>3):
58
+ print(True)
59
+ #index of the dict
60
+ print(index,"index")
61
+ print(i[0],i[1],i[2],"values of the dict","key of the dict")
62
+
63
+ if i[0]==True and i[1]==False:
64
+ inBetweenDepaturePoints.append(i[2])
65
+ if i[0]==False and i[1]==True:
66
+ inBetweenOnBoardingPoints.append(i[2])
67
+ fare=distanceCalculations(inBetweenDepaturePoints,inBetweenOnBoardingPoints)*pricePerKm
68
+ print(fare,'fare')
69
+ totalFare+=fare
70
+ index+=1
71
+
72
+ print(totalFare)
73
+ cap = cv2.VideoCapture('TrialFootage.mp4')
74
+
75
+ model = YOLO("./Yolo-Weights/yolov8n.pt")
76
+ stationFrom='Meerut'
77
+ stationFromCoordinates={"lat":"12.12.54.4","long":"44.36.09"} # Meerut
78
+ stationToCoordinates={'lat':'54.45.56',"long":'45.45.45'}
79
+ currentCoordinates={'lat': '28.98', 'long': '77.7064'}
80
+ stationTo='Delhi'
81
+ reachedDestination=False
82
+
83
+
84
+
85
+
86
+ ListPeople = []
87
+ dict = {}
88
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
89
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
90
+ print(f"Video Resolution: {width}x{height}")
91
+ classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat",
92
+ "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat",
93
+ "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
94
+ "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat",
95
+ "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup",
96
+ "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli",
97
+ "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed",
98
+ "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone",
99
+ "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors",
100
+ "teddy bear", "hair drier", "toothbrush"
101
+ ]
102
+
103
+ # Tracking
104
+
105
+ tracker = Sort(max_age=20, min_hits=3, iou_threshold=0.3)
106
+
107
+ yelloLine = [270, 0, 270, 600]
108
+
109
+ RedLine = [173, 0, 173, 600]
110
+
111
+ totalCountUp = []
112
+ #mask=cv2.imread('mask.jpg')
113
+ entry_count = 0
114
+ while True:
115
+
116
+
117
+
118
+ success, img = cap.read()
119
+ # imgRegion=cv2.bitwise_and(img,mask)
120
+
121
+ results = model(img, stream=True)
122
+
123
+ detections = np.empty((0, 5))
124
+
125
+ for r in results:
126
+ boxes = r.boxes
127
+ for box in boxes:
128
+ # Bounding Box
129
+ x1, y1, x2, y2 = box.xyxy[0]
130
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
131
+ # cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3)
132
+ w, h = x2 - x1, y2 - y1
133
+
134
+ # Confidence
135
+ conf = math.ceil((box.conf[0] * 100)) / 100
136
+ # Class Name
137
+ cls = int(box.cls[0])
138
+ currentClass = classNames[cls]
139
+
140
+ if currentClass == "person" and conf > 0.3:
141
+ # cvzone.putTextRect(img, f'{currentClass} {conf}', (max(0, x1), max(35, y1)),
142
+ # scale=0.6, thickness=1, offset=3)
143
+ # cvzone.cornerRect(img, (x1, y1, w, h), l=9, rt=5)
144
+ currentArray = np.array([x1, y1, x2, y2, conf])
145
+ detections = np.vstack((detections, currentArray))
146
+
147
+ resultsTracker = tracker.update(detections)
148
+
149
+ cv2.line(img, (yelloLine[0], yelloLine[1]), (yelloLine[2], yelloLine[3]), (0, 0, 255), 5)
150
+ cv2.line(img, (RedLine[0], RedLine[1]), (RedLine[2], RedLine[3]), (0, 255, 200), 5)
151
+ for result in resultsTracker:
152
+ x1, y1, x2, y2, id = result
153
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
154
+ print(result)
155
+ w, h = x2 - x1, y2 - y1
156
+ cvzone.cornerRect(img, (x1, y1, w, h), l=9, rt=2, colorR=(255, 0, 255))
157
+ cvzone.putTextRect(img, f' {int(id)}', (max(0, x1), max(35, y1)),
158
+ scale=2, thickness=3, offset=10)
159
+
160
+ cx, cy = x1 + w // 2, y1 + h // 2
161
+ cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
162
+
163
+ if yelloLine[0] - 20 < cx < yelloLine[2] + 20:
164
+ if totalCountUp.count(id) == 0:
165
+ totalCountUp.append(id)
166
+ dict[id] = [False]
167
+ cv2.line(img, (yelloLine[0], yelloLine[1]), (yelloLine[2], yelloLine[3]), (0, 0, 255), 5)
168
+ elif totalCountUp.count(id) == 1:
169
+ if (dict[id].count(False) < 1):
170
+ dict[id].append(False)
171
+ cv2.line(img, (yelloLine[0], yelloLine[1]), (yelloLine[2], yelloLine[3]), (0, 0, 255), 5)
172
+ if RedLine[0] - 20 < cx < RedLine[2] + 30:
173
+ if totalCountUp.count(id) == 0:
174
+ totalCountUp.append(id)
175
+ dict[id] = [True]
176
+ dict[id].append(currentCoordinates)
177
+ #adding timestamp
178
+ current_time = datetime.datetime.now()
179
+ dict[id].append(current_time)
180
+ cv2.line(img, (RedLine[0], RedLine[1]), (RedLine[2], RedLine[3]), (0, 255, 200), 5)
181
+ elif totalCountUp.count(id) == 1:
182
+ if (dict[id].count(True) < 1):
183
+ dict[id].append(True)
184
+ dict[id].append(currentCoordinates)
185
+ #adding timestamp
186
+ current_time = datetime.datetime.now()
187
+ dict[id].append(current_time)
188
+
189
+ cv2.line(img, (RedLine[0], RedLine[1]), (RedLine[2], RedLine[3]), (0, 255, 200), 5)
190
+
191
+ print(totalCountUp)
192
+ entry_count=0
193
+
194
+ for i in dict.values():
195
+ if (len(i) >= 2):
196
+ if i[0] == True and i[1] == False:
197
+ print('in true/False')
198
+ if entry_count > 0:
199
+ entry_count -= 1
200
+
201
+ if i[0] == False and i[1] == True:
202
+ print('in /False/True')
203
+ entry_count += 1
204
+
205
+
206
+
207
+ # print('count is ', entry_count)
208
+ # print(dict)
209
+
210
+
211
+ cv2.putText(img, str(entry_count), (110, 245), cv2.FONT_HERSHEY_PLAIN, 5, (50, 50, 230), 7)
212
+ print('count is ', entry_count)
213
+ print(dict)
214
+ cv2.imshow("Image", img)
215
+ cv2.waitKey(1)
216
+ print(entry_count)
217
+
218
+ # if currentCoordinates==stationToCoordinates:
219
+ # reachedDestination=True
220
+ # fareCalculations()
221
+ # dict=[]
222
+ # break
223
+ if entry_count>2:
224
+ print('reached destination')
225
+ fareCalculations()
226
+ break
busHumanDetect.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from ultralytics import YOLO
3
+ import cv2
4
+ import cvzone
5
+ import math
6
+
7
+
8
+ totalFare=0
9
+ pricePerKm=1.5
10
+ def distanceCalculations(i):
11
+ return 3
12
+ pass
13
+ def fareCalculations():
14
+ global totalFare
15
+ print(inBetweenDepaturePoints,inBetweenOnBoardingPoints)
16
+ for i in inBetweenDepaturePoints:
17
+ fare=distanceCalculations(i)*pricePerKm
18
+ print(fare)
19
+ totalFare+=fare
20
+ for i in inBetweenOnBoardingPoints:
21
+ fare=distanceCalculations(i)*pricePerKm/2
22
+ print(fare)
23
+ totalFare+=fare
24
+ pass
25
+ print(totalFare)
26
+ cap = cv2.VideoCapture('trialFootage.mp4')
27
+
28
+ model = YOLO("./Yolo-Weights/yolov8n.pt")
29
+ stationFrom='Meerut'
30
+ stationFromCoordinates={"lat":"12.12.54.4","long":"44.36.09"} # Meerut
31
+ stationToCoordinates={'lat':'54.45.56',"long":'45.45.45'}
32
+ currentCoordinates={'lat':'','long':''}
33
+ stationTo='Delhi'
34
+ reachedDestination=False
35
+
36
+ inBetweenDepaturePoints=[{}]
37
+ inBetweenOnBoardingPoints=[{}]
38
+
39
+ countPeople=0
40
+
41
+ ListPeople = []
42
+ dict = {}
43
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
44
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
45
+ print(f"Video Resolution: {width}x{height}")
46
+ classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat",
47
+ "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat",
48
+ "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
49
+ "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat",
50
+ "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup",
51
+ "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli",
52
+ "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed",
53
+ "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone",
54
+ "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors",
55
+ "teddy bear", "hair drier", "toothbrush"
56
+ ]
57
+
58
+ # Tracking
59
+
60
+
61
+
62
+ yelloLine = [270, 0, 270, 600]
63
+
64
+ RedLine = [173, 0, 173, 600]
65
+
66
+ totalCountUp = []
67
+
68
+
69
+ while True:
70
+ print("starting loop",inBetweenOnBoardingPoints)
71
+
72
+
73
+ success, img = cap.read()
74
+ # imgRegion=cv2.bitwise_and(img,mask)
75
+
76
+ results = model(img, stream=True)
77
+
78
+ detections = np.empty((0, 5))
79
+
80
+ for r in results:
81
+ countPeople=0
82
+ boxes = r.boxes
83
+ for box in boxes:
84
+ # Bounding Box
85
+ x1, y1, x2, y2 = box.xyxy[0]
86
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
87
+ # cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3)
88
+ w, h = x2 - x1, y2 - y1
89
+
90
+ # Confidence
91
+ conf = math.ceil((box.conf[0] * 100)) / 100
92
+ # Class Name
93
+ cls = int(box.cls[0])
94
+ currentClass = classNames[cls]
95
+
96
+ if currentClass == "person" and conf > 0.3:
97
+ # cvzone.putTextRect(img, f'{currentClass} {conf}', (max(0, x1), max(35, y1)),
98
+ # scale=0.6, thickness=1, offset=3)
99
+ # cvzone.cornerRect(img, (x1, y1, w, h), l=9, rt=5)
100
+ currentArray = np.array([x1, y1, x2, y2, conf])
101
+ detections = np.vstack((detections, currentArray))
102
+ countPeople+=1
103
+ cv2.putText(img, str(countPeople), (110, 245), cv2.FONT_HERSHEY_PLAIN, 5, (50, 50, 230), 7)
104
+
105
+
106
+
107
+
108
+
109
+
110
+
111
+ # print('count is ', entry_count)
112
+ # print(dict)
113
+
114
+
115
+
116
+ print('count is ', countPeople)
117
+ print(dict,"dict")
118
+ cv2.imshow("Image", img)
119
+ cv2.waitKey(1)
120
+
cam.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import cv2 as cv
2
+ import numpy as n
3
+
4
+ cap=cv.VideoCapture(0)
5
+
6
+ while True:
7
+ success,img=cap.read()
8
+ cv.imshow("Video",img)
9
+ cv2.waitKey(1)
requirements.txt ADDED
Binary file (152 Bytes). View file
 
sort.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SORT: A Simple, Online and Realtime Tracker
3
+ Copyright (C) 2016-2020 Alex Bewley alex@bewley.ai
4
+
5
+ This program is free software: you can redistribute it and/or modify
6
+ it under the terms of the GNU General Public License as published by
7
+ the Free Software Foundation, either version 3 of the License, or
8
+ (at your option) any later version.
9
+
10
+ This program is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ GNU General Public License for more details.
14
+
15
+ You should have received a copy of the GNU General Public License
16
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
17
+ """
18
+ from __future__ import print_function
19
+
20
+ import os
21
+ import numpy as np
22
+ import matplotlib
23
+
24
+ matplotlib.use('TkAgg')
25
+ import matplotlib.pyplot as plt
26
+ import matplotlib.patches as patches
27
+ from skimage import io
28
+
29
+ import glob
30
+ import time
31
+ import argparse
32
+ from filterpy.kalman import KalmanFilter
33
+
34
+ np.random.seed(0)
35
+
36
+
37
+ def linear_assignment(cost_matrix):
38
+ try:
39
+ import lap
40
+ _, x, y = lap.lapjv(cost_matrix, extend_cost=True)
41
+ return np.array([[y[i], i] for i in x if i >= 0]) #
42
+ except ImportError:
43
+ from scipy.optimize import linear_sum_assignment
44
+ x, y = linear_sum_assignment(cost_matrix)
45
+ return np.array(list(zip(x, y)))
46
+
47
+
48
+ def iou_batch(bb_test, bb_gt):
49
+ """
50
+ From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2]
51
+ """
52
+ bb_gt = np.expand_dims(bb_gt, 0)
53
+ bb_test = np.expand_dims(bb_test, 1)
54
+
55
+ xx1 = np.maximum(bb_test[..., 0], bb_gt[..., 0])
56
+ yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1])
57
+ xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2])
58
+ yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3])
59
+ w = np.maximum(0., xx2 - xx1)
60
+ h = np.maximum(0., yy2 - yy1)
61
+ wh = w * h
62
+ o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1])
63
+ + (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh)
64
+ return (o)
65
+
66
+
67
+ def convert_bbox_to_z(bbox):
68
+ """
69
+ Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
70
+ [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
71
+ the aspect ratio
72
+ """
73
+ w = bbox[2] - bbox[0]
74
+ h = bbox[3] - bbox[1]
75
+ x = bbox[0] + w / 2.
76
+ y = bbox[1] + h / 2.
77
+ s = w * h # scale is just area
78
+ r = w / float(h)
79
+ return np.array([x, y, s, r]).reshape((4, 1))
80
+
81
+
82
+ def convert_x_to_bbox(x, score=None):
83
+ """
84
+ Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
85
+ [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
86
+ """
87
+ w = np.sqrt(x[2] * x[3])
88
+ h = x[2] / w
89
+ if (score == None):
90
+ return np.array([x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2.]).reshape((1, 4))
91
+ else:
92
+ return np.array([x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2., score]).reshape((1, 5))
93
+
94
+
95
+ class KalmanBoxTracker(object):
96
+ """
97
+ This class represents the internal state of individual tracked objects observed as bbox.
98
+ """
99
+ count = 0
100
+
101
+ def __init__(self, bbox):
102
+ """
103
+ Initialises a tracker using initial bounding box.
104
+ """
105
+ # define constant velocity model
106
+ self.kf = KalmanFilter(dim_x=7, dim_z=4)
107
+ self.kf.F = np.array(
108
+ [[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 0],
109
+ [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1]])
110
+ self.kf.H = np.array(
111
+ [[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]])
112
+
113
+ self.kf.R[2:, 2:] *= 10.
114
+ self.kf.P[4:, 4:] *= 1000. # give high uncertainty to the unobservable initial velocities
115
+ self.kf.P *= 10.
116
+ self.kf.Q[-1, -1] *= 0.01
117
+ self.kf.Q[4:, 4:] *= 0.01
118
+
119
+ self.kf.x[:4] = convert_bbox_to_z(bbox)
120
+ self.time_since_update = 0
121
+ self.id = KalmanBoxTracker.count
122
+ KalmanBoxTracker.count += 1
123
+ self.history = []
124
+ self.hits = 0
125
+ self.hit_streak = 0
126
+ self.age = 0
127
+
128
+ def update(self, bbox):
129
+ """
130
+ Updates the state vector with observed bbox.
131
+ """
132
+ self.time_since_update = 0
133
+ self.history = []
134
+ self.hits += 1
135
+ self.hit_streak += 1
136
+ self.kf.update(convert_bbox_to_z(bbox))
137
+
138
+ def predict(self):
139
+ """
140
+ Advances the state vector and returns the predicted bounding box estimate.
141
+ """
142
+ if ((self.kf.x[6] + self.kf.x[2]) <= 0):
143
+ self.kf.x[6] *= 0.0
144
+ self.kf.predict()
145
+ self.age += 1
146
+ if (self.time_since_update > 0):
147
+ self.hit_streak = 0
148
+ self.time_since_update += 1
149
+ self.history.append(convert_x_to_bbox(self.kf.x))
150
+ return self.history[-1]
151
+
152
+ def get_state(self):
153
+ """
154
+ Returns the current bounding box estimate.
155
+ """
156
+ return convert_x_to_bbox(self.kf.x)
157
+
158
+
159
+ def associate_detections_to_trackers(detections, trackers, iou_threshold=0.3):
160
+ """
161
+ Assigns detections to tracked object (both represented as bounding boxes)
162
+
163
+ Returns 3 lists of matches, unmatched_detections and unmatched_trackers
164
+ """
165
+ if (len(trackers) == 0):
166
+ return np.empty((0, 2), dtype=int), np.arange(len(detections)), np.empty((0, 5), dtype=int)
167
+
168
+ iou_matrix = iou_batch(detections, trackers)
169
+
170
+ if min(iou_matrix.shape) > 0:
171
+ a = (iou_matrix > iou_threshold).astype(np.int32)
172
+ if a.sum(1).max() == 1 and a.sum(0).max() == 1:
173
+ matched_indices = np.stack(np.where(a), axis=1)
174
+ else:
175
+ matched_indices = linear_assignment(-iou_matrix)
176
+ else:
177
+ matched_indices = np.empty(shape=(0, 2))
178
+
179
+ unmatched_detections = []
180
+ for d, det in enumerate(detections):
181
+ if (d not in matched_indices[:, 0]):
182
+ unmatched_detections.append(d)
183
+ unmatched_trackers = []
184
+ for t, trk in enumerate(trackers):
185
+ if (t not in matched_indices[:, 1]):
186
+ unmatched_trackers.append(t)
187
+
188
+ # filter out matched with low IOU
189
+ matches = []
190
+ for m in matched_indices:
191
+ if (iou_matrix[m[0], m[1]] < iou_threshold):
192
+ unmatched_detections.append(m[0])
193
+ unmatched_trackers.append(m[1])
194
+ else:
195
+ matches.append(m.reshape(1, 2))
196
+ if (len(matches) == 0):
197
+ matches = np.empty((0, 2), dtype=int)
198
+ else:
199
+ matches = np.concatenate(matches, axis=0)
200
+
201
+ return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
202
+
203
+
204
+ class Sort(object):
205
+ def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):
206
+ """
207
+ Sets key parameters for SORT
208
+ """
209
+ self.max_age = max_age
210
+ self.min_hits = min_hits
211
+ self.iou_threshold = iou_threshold
212
+ self.trackers = []
213
+ self.frame_count = 0
214
+
215
+ def update(self, dets=np.empty((0, 5))):
216
+ """
217
+ Params:
218
+ dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
219
+ Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).
220
+ Returns the a similar array, where the last column is the object ID.
221
+
222
+ NOTE: The number of objects returned may differ from the number of detections provided.
223
+ """
224
+ self.frame_count += 1
225
+ # get predicted locations from existing trackers.
226
+ trks = np.zeros((len(self.trackers), 5))
227
+ to_del = []
228
+ ret = []
229
+ for t, trk in enumerate(trks):
230
+ pos = self.trackers[t].predict()[0]
231
+ trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
232
+ if np.any(np.isnan(pos)):
233
+ to_del.append(t)
234
+ trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
235
+ for t in reversed(to_del):
236
+ self.trackers.pop(t)
237
+ matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets, trks, self.iou_threshold)
238
+
239
+ # update matched trackers with assigned detections
240
+ for m in matched:
241
+ self.trackers[m[1]].update(dets[m[0], :])
242
+
243
+ # create and initialise new trackers for unmatched detections
244
+ for i in unmatched_dets:
245
+ trk = KalmanBoxTracker(dets[i, :])
246
+ self.trackers.append(trk)
247
+ i = len(self.trackers)
248
+ for trk in reversed(self.trackers):
249
+ d = trk.get_state()[0]
250
+ if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
251
+ ret.append(np.concatenate((d, [trk.id + 1])).reshape(1, -1)) # +1 as MOT benchmark requires positive
252
+ i -= 1
253
+ # remove dead tracklet
254
+ if (trk.time_since_update > self.max_age):
255
+ self.trackers.pop(i)
256
+ if (len(ret) > 0):
257
+ return np.concatenate(ret)
258
+ return np.empty((0, 5))
259
+
260
+
261
+ def parse_args():
262
+ """Parse input arguments."""
263
+ parser = argparse.ArgumentParser(description='SORT demo')
264
+ parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',
265
+ action='store_true')
266
+ parser.add_argument("--seq_path", help="Path to detections.", type=str, default='data')
267
+ parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train')
268
+ parser.add_argument("--max_age",
269
+ help="Maximum number of frames to keep alive a track without associated detections.",
270
+ type=int, default=1)
271
+ parser.add_argument("--min_hits",
272
+ help="Minimum number of associated detections before track is initialised.",
273
+ type=int, default=3)
274
+ parser.add_argument("--iou_threshold", help="Minimum IOU for match.", type=float, default=0.3)
275
+ args = parser.parse_args()
276
+ return args
277
+
278
+
279
+ if __name__ == '__main__':
280
+ # all train
281
+ args = parse_args()
282
+ display = args.display
283
+ phase = args.phase
284
+ total_time = 0.0
285
+ total_frames = 0
286
+ colours = np.random.rand(32, 3) # used only for display
287
+ if (display):
288
+ if not os.path.exists('mot_benchmark'):
289
+ print(
290
+ '\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
291
+ exit()
292
+ plt.ion()
293
+ fig = plt.figure()
294
+ ax1 = fig.add_subplot(111, aspect='equal')
295
+
296
+ if not os.path.exists('output'):
297
+ os.makedirs('output')
298
+ pattern = os.path.join(args.seq_path, phase, '*', 'det', 'det.txt')
299
+ for seq_dets_fn in glob.glob(pattern):
300
+ mot_tracker = Sort(max_age=args.max_age,
301
+ min_hits=args.min_hits,
302
+ iou_threshold=args.iou_threshold) # create instance of the SORT tracker
303
+ seq_dets = np.loadtxt(seq_dets_fn, delimiter=',')
304
+ seq = seq_dets_fn[pattern.find('*'):].split(os.path.sep)[0]
305
+
306
+ with open(os.path.join('output', '%s.txt' % (seq)), 'w') as out_file:
307
+ print("Processing %s." % (seq))
308
+ for frame in range(int(seq_dets[:, 0].max())):
309
+ frame += 1 # detection and frame numbers begin at 1
310
+ dets = seq_dets[seq_dets[:, 0] == frame, 2:7]
311
+ dets[:, 2:4] += dets[:, 0:2] # convert to [x1,y1,w,h] to [x1,y1,x2,y2]
312
+ total_frames += 1
313
+
314
+ if (display):
315
+ fn = os.path.join('mot_benchmark', phase, seq, 'img1', '%06d.jpg' % (frame))
316
+ im = io.imread(fn)
317
+ ax1.imshow(im)
318
+ plt.title(seq + ' Tracked Targets')
319
+
320
+ start_time = time.time()
321
+ trackers = mot_tracker.update(dets)
322
+ cycle_time = time.time() - start_time
323
+ total_time += cycle_time
324
+
325
+ for d in trackers:
326
+ print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1' % (frame, d[4], d[0], d[1], d[2] - d[0], d[3] - d[1]),
327
+ file=out_file)
328
+ if (display):
329
+ d = d.astype(np.int32)
330
+ ax1.add_patch(patches.Rectangle((d[0], d[1]), d[2] - d[0], d[3] - d[1], fill=False, lw=3,
331
+ ec=colours[d[4] % 32, :]))
332
+
333
+ if (display):
334
+ fig.canvas.flush_events()
335
+ plt.draw()
336
+ ax1.cla()
337
+
338
+ print("Total Tracking took: %.3f seconds for %d frames or %.1f FPS" % (
339
+ total_time, total_frames, total_frames / total_time))
340
+
341
+ if (display):
342
+ print("Note: to get real runtime results run without the option: --display")