sino72 commited on
Commit
f718ab7
1 Parent(s): f609cc3

Upload 6 files

Browse files
Files changed (6) hide show
  1. app.py +177 -0
  2. detectMotion.py +60 -0
  3. openh264-1.8.0-win64.dll +0 -0
  4. requirements.txt +4 -0
  5. sort.py +330 -0
  6. yolov8n.pt +3 -0
app.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO #行人识别,采用YoloV8模型
2
+ import math #用于四舍五入取整
3
+ import cv2 #opencv图像处理库
4
+ import cvzone #在图像上绘画
5
+ #import numpy as np #忘记取消注释了...
6
+ import gradio as gr #GUI界面
7
+ from sort import * #运动检测,采用sort算法
8
+ #from deep_sort_realtime.deepsort_tracker import DeepSort #不会用QAQ
9
+ import tempfile #创建输出临时文件夹
10
+ import os
11
+ from detectMotion import * #单独的运动检测
12
+
13
+ #导入YoloV8模型,第一次使用会下载模型到当前文件夹当中
14
+ model=YOLO("yolov8n.pt")
15
+
16
+ #YoloV8标签数据,本次项目只使用了'person'
17
+ classNames=['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
18
+ 'fire hydrant',
19
+ 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',
20
+ 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
21
+ 'kite',
22
+ 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
23
+ 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
24
+ 'donut',
25
+ 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
26
+ 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
27
+ 'scissors',
28
+ 'teddy bear', 'hair drier', 'toothbrush']
29
+
30
+ #运动检测算法参数
31
+ tracker=Sort(max_age=20,min_hits=3,iou_threshold=0.3)
32
+
33
+ # 彩色图像进行自适应直方图均衡化
34
+ def hisEqulColor(img):
35
+ ## 将RGB图像转换到YCrCb空间中
36
+ ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
37
+ # 将YCrCb图像通道分离
38
+ channels = cv2.split(ycrcb)
39
+ # 以下代码详细注释见官网:
40
+ # https://docs.opencv.org/4.1.0/d5/daf/tutorial_py_histogram_equalization.html
41
+ clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
42
+ clahe.apply(channels[0], channels[0])
43
+ cv2.merge(channels, ycrcb)
44
+ cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR, img)
45
+ return img
46
+
47
+ #添加高斯噪声,并使用中值滤波降噪
48
+ def AddGaussNoise(img,sigma):
49
+ gauss=np.random.normal(0,sigma,img.shape)
50
+ img=np.uint8(img + gauss)#将高斯噪声与原始图像叠加
51
+ img=cv2.medianBlur(img,5)
52
+ return img
53
+
54
+ #图像处理
55
+ def processImg(img,sigma):
56
+ img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
57
+ res1 = AddGaussNoise(img,sigma)
58
+ res1 = hisEqulColor(res1)
59
+ res1=cv2.cvtColor(res1,cv2.COLOR_BGR2RGB)
60
+ return res1
61
+
62
+ #视频处理
63
+ def processVideo(inputPath,codec):
64
+ number_of_people=0
65
+ cap = cv2.VideoCapture(inputPath)#从inputPath读入视频
66
+ fps = cap.get(cv2.CAP_PROP_FPS) #获取视频的帧率
67
+ size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
68
+ int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))#获取视频的大小
69
+ output_viedo = cv2.VideoWriter()#初始化视频写入
70
+ outputPath=tempfile.mkdtemp()#创建输出视频的临时文件夹的路径
71
+
72
+ #输出格式
73
+ if codec == "mp4":
74
+ fourcc = cv2.VideoWriter_fourcc('a','v','c','1')#视频编码:h264,只有h264格式的mp4文件才能在浏览器直接播放
75
+ video_save_path = os.path.join(outputPath,"output.mp4")#创建输出视频路径
76
+ elif codec == "avi":
77
+ fourcc = cv2.VideoWriter_fourcc('h','2','6','4')#视频编码:h264,只能保存为avi格式且不能在浏览器直接播放
78
+ video_save_path = os.path.join(outputPath,"output.avi")#创建输出视频路径
79
+ elif codec == "mkv":
80
+ fourcc = cv2.VideoWriter_fourcc('X','V','I','D')#视频编码:XVID,此编码不需要openh264-1.8.0-win64.dll
81
+ video_save_path = os.path.join(outputPath,"output.mkv")#创建输出视频路径
82
+
83
+ output_viedo.open(video_save_path , fourcc, fps, size, True)
84
+ #对每一帧图片进行读取和处理
85
+ while True:
86
+ ret, img = cap.read()#将每一帧图片读取到img当中
87
+ results=model(img,stream=True)#使用YoloV8模型进行推理,stream=True提高流式处理速度
88
+ detections=np.empty((0, 5))#初始化运动检测
89
+ if not(ret):#当视频全部读完,ret返回false,终止循环,视频帧读取和写入结束
90
+ break
91
+ img = hisEqulColor(img)#视频增强
92
+ #读取推理的数据
93
+ for r in results:
94
+ boxes=r.boxes
95
+ for box in boxes:
96
+ #读取每一帧识别出的边界信息,并显示
97
+ x1,y1,x2,y2=box.xyxy[0]
98
+ x1,y1,x2,y2=int(x1),int(y1),int(x2),int(y2)#将tensor类型转变为整型
99
+ conf=math.ceil(box.conf[0]*100)/100#对conf取2位小数
100
+ cls=int(box.cls[0])#获取物体类别标签
101
+ #当标签为人,且可信度大于0.3的时候,将人标识出来
102
+ if cls==0 and conf > 0.3:
103
+ cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3)
104
+ print(conf)
105
+ #cvzone.putTextRect(img,f'{classNames[cls]}{conf}',(max(0,x1),max(30,y1)),scale=0.7,thickness=1)
106
+ currentArray=np.array([x1,y1,x2,y2,conf])
107
+ detections=np.vstack((detections,currentArray))#按行堆叠数据
108
+ #运动检测
109
+ resultsTracker=tracker.update(detections)
110
+ for result in resultsTracker:
111
+ x1,y1,x2,y2,Id=result
112
+ number_of_people=max(str(int(Id)),str(number_of_people))
113
+ x1,y1,x2,y2=int(x1),int(y1),int(x2),int(y2)#将浮点数转变为整型
114
+ #print(result)
115
+ cvzone.putTextRect(img,f'{int(Id)}',(max(0,x1),max(30,y1)),scale=0.7,thickness=1)
116
+ #image_np = np.squeeze(img.render())#用np.squeeze将输出结果降维
117
+ output_viedo.write(img)#将处理后的图像写入视频
118
+ output_viedo.release()#释放
119
+ cap.release()#释放
120
+ return video_save_path,video_save_path
121
+
122
+
123
+
124
+ #WebUi图形界面(interface)
125
+ #demo = gr.Interface(
126
+ # fn=processVideo,
127
+ # inputs=["text","text"],
128
+ # outputs=["text"],
129
+ # examples=[["D:\\[WPF]JJDown\\Download\\walker.mp4","C:\\Users\\sino\\Downloads\\output.mkv"],["C:\\Users\\sino\\Videos\\test.mp4","C:\\Users\\sino\\Downloads\\output.mkv"]],
130
+ # title="运动检测与行人跟踪",
131
+ # description="请输入绝对路径"
132
+ #)
133
+
134
+ #WebUi图形界面(block)
135
+ with gr.Blocks() as demo:
136
+ gr.Markdown("""
137
+ # 运动检测与行人跟踪
138
+ 基于opencv + yoloV8 + sort,请输入绝对路径
139
+ """)
140
+ with gr.Tab("视频识别"):
141
+ with gr.Row():
142
+ with gr.Column():
143
+ text_inputPath = gr.Video()
144
+ codec = gr.Radio(["mp4","avi","mkv"], label="输出视频格式")
145
+ videoProcess_button = gr.Button("处理")
146
+ with gr.Column():
147
+ text_output = gr.Video()
148
+ text_output_path = gr.Text(label="输出路径")
149
+ with gr.Tab("图像增强"):
150
+ with gr.Row():
151
+ with gr.Column():
152
+ image_input = gr.Image()
153
+ image_sigma = gr.Slider(0,40,label="高斯噪声sigma")
154
+ image_output = gr.Image()
155
+ image_button = gr.Button("处理")
156
+ with gr.Tab("运动检测"):
157
+ with gr.Column():
158
+ with gr.Row():
159
+ with gr.Column():
160
+ motion_inputPath = gr.Video()
161
+ motionProcess_button = gr.Button("处理")
162
+ motion_output_frame = gr.Video()
163
+ motion_output_fmask = gr.Video()
164
+ frame_output_path = gr.Text(label="frame输出路径")
165
+ fmask_output_path = gr.Text(label="mask输出路径")
166
+ with gr.Accordion("算法:"):
167
+ gr.Markdown("高斯混合模型(GMM)")
168
+
169
+
170
+
171
+ videoProcess_button.click(processVideo, inputs=[text_inputPath,codec], outputs=[text_output,text_output_path])
172
+ image_button.click(processImg, inputs=[image_input,image_sigma], outputs=image_output)
173
+ motionProcess_button.click(motionDetection, inputs=[motion_inputPath], outputs=[motion_output_frame,motion_output_fmask,
174
+ frame_output_path,fmask_output_path])
175
+
176
+ demo.queue()#当有多个请求时,排队
177
+ demo.launch()#生成内网链接,如需要公网链接,括号内输入share=True
detectMotion.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #修改自https://blog.csdn.net/qq_29367075/article/details/122933407
2
+ import cv2
3
+ import numpy as np
4
+ import tempfile
5
+ import os
6
+
7
+ kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
8
+ mog = cv2.createBackgroundSubtractorMOG2() # 创建混合高斯模型来用于北京建模
9
+
10
+ def motionDetection(inputPath):
11
+ cap = cv2.VideoCapture(inputPath)#从inputPath读入视频
12
+ fps = cap.get(cv2.CAP_PROP_FPS) #获取视频的帧率
13
+ size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
14
+ int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))#获取视频的大小
15
+ output_viedo_frame = cv2.VideoWriter()#初始化视频写入
16
+ output_viedo_fmask = cv2.VideoWriter()#初始化视频写入
17
+ outputPath=tempfile.mkdtemp()#创建输出视频的临时文件夹的路径
18
+ fourcc = cv2.VideoWriter_fourcc('a','v','c','1')#视频编码:h264,只有h264格式的mp4文件才能在浏览器直接播放
19
+ video_save_path_frame = os.path.join(outputPath,"frame.mp4")#创建输出视频路径
20
+ video_save_path_fmask = os.path.join(outputPath,"fmask.mp4")#创建输出视频路径
21
+ output_viedo_frame.open(video_save_path_frame , fourcc, fps, size, True)
22
+ output_viedo_fmask.open(video_save_path_fmask , fourcc, fps, size, True)
23
+ #对每一帧图片进行读取和处理
24
+ while True:
25
+ ret, frame = cap.read()#将每一帧图片读取到img当中
26
+ if frame is None:
27
+ print("camera is over...")
28
+ break
29
+
30
+ fmask = mog.apply(frame) # 判断哪些是前景和背景
31
+
32
+
33
+ MORPH_OPEN_1 = cv2.morphologyEx(fmask, cv2.MORPH_OPEN, kernel1) # 开运算,去除噪声和毛刺
34
+
35
+ contours, _ = cv2.findContours(fmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # 只检测外边框
36
+
37
+ for cont in contours:
38
+ # 计算各个轮廓的面积
39
+ len = cv2.arcLength(cont, True)
40
+ if len > 300: # 去除一些小的噪声点
41
+ # 找到一个轮廓
42
+ x,y,w,h = cv2.boundingRect(cont)
43
+ # 画出这个矩形
44
+ cv2.rectangle(frame, (x,y), (x+w, y+h), color=(0,255,0), thickness=3)
45
+ fmask=cv2.cvtColor(fmask,cv2.COLOR_BGR2RGB)
46
+ #print(fmask)
47
+ #image_np = np.squeeze(img.render())#用np.squeeze将输出结果降维
48
+ output_viedo_frame.write(frame)#将处理后的图像写入视频
49
+ output_viedo_fmask.write(fmask)#将处理后的图像写入视频
50
+ output_viedo_frame.release()#释放
51
+ output_viedo_fmask.release()#释放
52
+ cap.release()#释放
53
+ return video_save_path_frame,video_save_path_fmask,video_save_path_frame,video_save_path_fmask
54
+
55
+
56
+
57
+
58
+
59
+
60
+
openh264-1.8.0-win64.dll ADDED
Binary file (825 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ ultralytics
2
+ opencv-python
3
+ cvzone
4
+ torch
sort.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SORT: A Simple, Online and Realtime Tracker
3
+ Copyright (C) 2016-2020 Alex Bewley alex@bewley.ai
4
+
5
+ This program is free software: you can redistribute it and/or modify
6
+ it under the terms of the GNU General Public License as published by
7
+ the Free Software Foundation, either version 3 of the License, or
8
+ (at your option) any later version.
9
+
10
+ This program is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ GNU General Public License for more details.
14
+
15
+ You should have received a copy of the GNU General Public License
16
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
17
+ """
18
+ from __future__ import print_function
19
+
20
+ import os
21
+ import numpy as np
22
+ import matplotlib
23
+ matplotlib.use('TkAgg')
24
+ import matplotlib.pyplot as plt
25
+ import matplotlib.patches as patches
26
+ from skimage import io
27
+
28
+ import glob
29
+ import time
30
+ import argparse
31
+ from filterpy.kalman import KalmanFilter
32
+
33
+ np.random.seed(0)
34
+
35
+
36
+ def linear_assignment(cost_matrix):
37
+ try:
38
+ import lap
39
+ _, x, y = lap.lapjv(cost_matrix, extend_cost=True)
40
+ return np.array([[y[i],i] for i in x if i >= 0]) #
41
+ except ImportError:
42
+ from scipy.optimize import linear_sum_assignment
43
+ x, y = linear_sum_assignment(cost_matrix)
44
+ return np.array(list(zip(x, y)))
45
+
46
+
47
+ def iou_batch(bb_test, bb_gt):
48
+ """
49
+ From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2]
50
+ """
51
+ bb_gt = np.expand_dims(bb_gt, 0)
52
+ bb_test = np.expand_dims(bb_test, 1)
53
+
54
+ xx1 = np.maximum(bb_test[..., 0], bb_gt[..., 0])
55
+ yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1])
56
+ xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2])
57
+ yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3])
58
+ w = np.maximum(0., xx2 - xx1)
59
+ h = np.maximum(0., yy2 - yy1)
60
+ wh = w * h
61
+ o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1])
62
+ + (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh)
63
+ return(o)
64
+
65
+
66
+ def convert_bbox_to_z(bbox):
67
+ """
68
+ Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
69
+ [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
70
+ the aspect ratio
71
+ """
72
+ w = bbox[2] - bbox[0]
73
+ h = bbox[3] - bbox[1]
74
+ x = bbox[0] + w/2.
75
+ y = bbox[1] + h/2.
76
+ s = w * h #scale is just area
77
+ r = w / float(h)
78
+ return np.array([x, y, s, r]).reshape((4, 1))
79
+
80
+
81
+ def convert_x_to_bbox(x,score=None):
82
+ """
83
+ Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
84
+ [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
85
+ """
86
+ w = np.sqrt(x[2] * x[3])
87
+ h = x[2] / w
88
+ if(score==None):
89
+ return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
90
+ else:
91
+ return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))
92
+
93
+
94
+ class KalmanBoxTracker(object):
95
+ """
96
+ This class represents the internal state of individual tracked objects observed as bbox.
97
+ """
98
+ count = 0
99
+ def __init__(self,bbox):
100
+ """
101
+ Initialises a tracker using initial bounding box.
102
+ """
103
+ #define constant velocity model
104
+ self.kf = KalmanFilter(dim_x=7, dim_z=4)
105
+ self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0], [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])
106
+ self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])
107
+
108
+ self.kf.R[2:,2:] *= 10.
109
+ self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities
110
+ self.kf.P *= 10.
111
+ self.kf.Q[-1,-1] *= 0.01
112
+ self.kf.Q[4:,4:] *= 0.01
113
+
114
+ self.kf.x[:4] = convert_bbox_to_z(bbox)
115
+ self.time_since_update = 0
116
+ self.id = KalmanBoxTracker.count
117
+ KalmanBoxTracker.count += 1
118
+ self.history = []
119
+ self.hits = 0
120
+ self.hit_streak = 0
121
+ self.age = 0
122
+
123
+ def update(self,bbox):
124
+ """
125
+ Updates the state vector with observed bbox.
126
+ """
127
+ self.time_since_update = 0
128
+ self.history = []
129
+ self.hits += 1
130
+ self.hit_streak += 1
131
+ self.kf.update(convert_bbox_to_z(bbox))
132
+
133
+ def predict(self):
134
+ """
135
+ Advances the state vector and returns the predicted bounding box estimate.
136
+ """
137
+ if((self.kf.x[6]+self.kf.x[2])<=0):
138
+ self.kf.x[6] *= 0.0
139
+ self.kf.predict()
140
+ self.age += 1
141
+ if(self.time_since_update>0):
142
+ self.hit_streak = 0
143
+ self.time_since_update += 1
144
+ self.history.append(convert_x_to_bbox(self.kf.x))
145
+ return self.history[-1]
146
+
147
+ def get_state(self):
148
+ """
149
+ Returns the current bounding box estimate.
150
+ """
151
+ return convert_x_to_bbox(self.kf.x)
152
+
153
+
154
+ def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):
155
+ """
156
+ Assigns detections to tracked object (both represented as bounding boxes)
157
+
158
+ Returns 3 lists of matches, unmatched_detections and unmatched_trackers
159
+ """
160
+ if(len(trackers)==0):
161
+ return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
162
+
163
+ iou_matrix = iou_batch(detections, trackers)
164
+
165
+ if min(iou_matrix.shape) > 0:
166
+ a = (iou_matrix > iou_threshold).astype(np.int32)
167
+ if a.sum(1).max() == 1 and a.sum(0).max() == 1:
168
+ matched_indices = np.stack(np.where(a), axis=1)
169
+ else:
170
+ matched_indices = linear_assignment(-iou_matrix)
171
+ else:
172
+ matched_indices = np.empty(shape=(0,2))
173
+
174
+ unmatched_detections = []
175
+ for d, det in enumerate(detections):
176
+ if(d not in matched_indices[:,0]):
177
+ unmatched_detections.append(d)
178
+ unmatched_trackers = []
179
+ for t, trk in enumerate(trackers):
180
+ if(t not in matched_indices[:,1]):
181
+ unmatched_trackers.append(t)
182
+
183
+ #filter out matched with low IOU
184
+ matches = []
185
+ for m in matched_indices:
186
+ if(iou_matrix[m[0], m[1]]<iou_threshold):
187
+ unmatched_detections.append(m[0])
188
+ unmatched_trackers.append(m[1])
189
+ else:
190
+ matches.append(m.reshape(1,2))
191
+ if(len(matches)==0):
192
+ matches = np.empty((0,2),dtype=int)
193
+ else:
194
+ matches = np.concatenate(matches,axis=0)
195
+
196
+ return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
197
+
198
+
199
+ class Sort(object):
200
+ def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):
201
+ """
202
+ Sets key parameters for SORT
203
+ """
204
+ self.max_age = max_age
205
+ self.min_hits = min_hits
206
+ self.iou_threshold = iou_threshold
207
+ self.trackers = []
208
+ self.frame_count = 0
209
+
210
+ def update(self, dets=np.empty((0, 5))):
211
+ """
212
+ Params:
213
+ dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
214
+ Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).
215
+ Returns the a similar array, where the last column is the object ID.
216
+
217
+ NOTE: The number of objects returned may differ from the number of detections provided.
218
+ """
219
+ self.frame_count += 1
220
+ # get predicted locations from existing trackers.
221
+ trks = np.zeros((len(self.trackers), 5))
222
+ to_del = []
223
+ ret = []
224
+ for t, trk in enumerate(trks):
225
+ pos = self.trackers[t].predict()[0]
226
+ trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
227
+ if np.any(np.isnan(pos)):
228
+ to_del.append(t)
229
+ trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
230
+ for t in reversed(to_del):
231
+ self.trackers.pop(t)
232
+ matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks, self.iou_threshold)
233
+
234
+ # update matched trackers with assigned detections
235
+ for m in matched:
236
+ self.trackers[m[1]].update(dets[m[0], :])
237
+
238
+ # create and initialise new trackers for unmatched detections
239
+ for i in unmatched_dets:
240
+ trk = KalmanBoxTracker(dets[i,:])
241
+ self.trackers.append(trk)
242
+ i = len(self.trackers)
243
+ for trk in reversed(self.trackers):
244
+ d = trk.get_state()[0]
245
+ if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
246
+ ret.append(np.concatenate((d,[trk.id+1])).reshape(1,-1)) # +1 as MOT benchmark requires positive
247
+ i -= 1
248
+ # remove dead tracklet
249
+ if(trk.time_since_update > self.max_age):
250
+ self.trackers.pop(i)
251
+ if(len(ret)>0):
252
+ return np.concatenate(ret)
253
+ return np.empty((0,5))
254
+
255
+ def parse_args():
256
+ """Parse input arguments."""
257
+ parser = argparse.ArgumentParser(description='SORT demo')
258
+ parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true')
259
+ parser.add_argument("--seq_path", help="Path to detections.", type=str, default='data')
260
+ parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train')
261
+ parser.add_argument("--max_age",
262
+ help="Maximum number of frames to keep alive a track without associated detections.",
263
+ type=int, default=1)
264
+ parser.add_argument("--min_hits",
265
+ help="Minimum number of associated detections before track is initialised.",
266
+ type=int, default=3)
267
+ parser.add_argument("--iou_threshold", help="Minimum IOU for match.", type=float, default=0.3)
268
+ args = parser.parse_args()
269
+ return args
270
+
271
+ if __name__ == '__main__':
272
+ # all train
273
+ args = parse_args()
274
+ display = args.display
275
+ phase = args.phase
276
+ total_time = 0.0
277
+ total_frames = 0
278
+ colours = np.random.rand(32, 3) #used only for display
279
+ if(display):
280
+ if not os.path.exists('mot_benchmark'):
281
+ print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
282
+ exit()
283
+ plt.ion()
284
+ fig = plt.figure()
285
+ ax1 = fig.add_subplot(111, aspect='equal')
286
+
287
+ if not os.path.exists('output'):
288
+ os.makedirs('output')
289
+ pattern = os.path.join(args.seq_path, phase, '*', 'det', 'det.txt')
290
+ for seq_dets_fn in glob.glob(pattern):
291
+ mot_tracker = Sort(max_age=args.max_age,
292
+ min_hits=args.min_hits,
293
+ iou_threshold=args.iou_threshold) #create instance of the SORT tracker
294
+ seq_dets = np.loadtxt(seq_dets_fn, delimiter=',')
295
+ seq = seq_dets_fn[pattern.find('*'):].split(os.path.sep)[0]
296
+
297
+ with open(os.path.join('output', '%s.txt'%(seq)),'w') as out_file:
298
+ print("Processing %s."%(seq))
299
+ for frame in range(int(seq_dets[:,0].max())):
300
+ frame += 1 #detection and frame numbers begin at 1
301
+ dets = seq_dets[seq_dets[:, 0]==frame, 2:7]
302
+ dets[:, 2:4] += dets[:, 0:2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2]
303
+ total_frames += 1
304
+
305
+ if(display):
306
+ fn = os.path.join('mot_benchmark', phase, seq, 'img1', '%06d.jpg'%(frame))
307
+ im =io.imread(fn)
308
+ ax1.imshow(im)
309
+ plt.title(seq + ' Tracked Targets')
310
+
311
+ start_time = time.time()
312
+ trackers = mot_tracker.update(dets)
313
+ cycle_time = time.time() - start_time
314
+ total_time += cycle_time
315
+
316
+ for d in trackers:
317
+ print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[4],d[0],d[1],d[2]-d[0],d[3]-d[1]),file=out_file)
318
+ if(display):
319
+ d = d.astype(np.int32)
320
+ ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:]))
321
+
322
+ if(display):
323
+ fig.canvas.flush_events()
324
+ plt.draw()
325
+ ax1.cla()
326
+
327
+ print("Total Tracking took: %.3f seconds for %d frames or %.1f FPS" % (total_time, total_frames, total_frames / total_time))
328
+
329
+ if(display):
330
+ print("Note: to get real runtime results run without the option: --display")
yolov8n.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31e20dde3def09e2cf938c7be6fe23d9150bbbe503982af13345706515f2ef95
3
+ size 6534387