VascoDVRodrigues commited on
Commit
40c6d5b
1 Parent(s): 58ff7c0

1st commit

Browse files
Files changed (4) hide show
  1. app.py +6 -0
  2. mot-metrics.py +187 -0
  3. requirements.txt +3 -0
  4. tests.py +37 -0
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import evaluate
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ module = evaluate.load("SEA-AI/mot-metrics")
6
+ launch_gradio_widget(module)
mot-metrics.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import evaluate
16
+ import datasets
17
+ import motmetrics as mm
18
+ import numpy as np
19
+
20
+ _CITATION = """\
21
+ @InProceedings{huggingface:module,
22
+ title = {A great new module},
23
+ authors={huggingface, Inc.},
24
+ year={2020}
25
+ }\
26
+ @article{milan2016mot16,
27
+ title={MOT16: A benchmark for multi-object tracking},
28
+ author={Milan, Anton and Leal-Taix{\'e}, Laura and Reid, Ian and Roth, Stefan and Schindler, Konrad},
29
+ journal={arXiv preprint arXiv:1603.00831},
30
+ year={2016}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ The MOT Metrics module is designed to evaluate multi-object tracking (MOT)
36
+ algorithms by computing various metrics based on predicted and ground truth bounding
37
+ boxes. It serves as a crucial tool in assessing the performance of MOT systems,
38
+ aiding in the iterative improvement of tracking algorithms."""
39
+
40
+
41
+ _KWARGS_DESCRIPTION = """
42
+
43
+ Calculates how good are predictions given some references, using certain scores
44
+ Args:
45
+ predictions: list of predictions to score. Each predictions
46
+ should be a string with tokens separated by spaces.
47
+ references: list of reference for each prediction. Each
48
+ reference should be a string with tokens separated by spaces.
49
+ max_iou (`float`, *optional*):
50
+ If specified, this is the minimum Intersection over Union (IoU) threshold to consider a detection as a true positive.
51
+ Default is 0.5.
52
+ Returns:
53
+ summary: pandas.DataFrame with the following columns:
54
+ - idf1 (IDF1 Score): The F1 score for the identity assignment, computed as 2 * (IDP * IDR) / (IDP + IDR).
55
+ - idp (ID Precision): Identity Precision, representing the ratio of correctly assigned identities to the total number of predicted identities.
56
+ - idr (ID Recall): Identity Recall, representing the ratio of correctly assigned identities to the total number of ground truth identities.
57
+ - recall: Recall, computed as the ratio of the number of correctly tracked objects to the total number of ground truth objects.
58
+ - precision: Precision, computed as the ratio of the number of correctly tracked objects to the total number of predicted objects.
59
+ - num_unique_objects: Total number of unique objects in the ground truth.
60
+ - mostly_tracked: Number of objects that are mostly tracked throughout the sequence.
61
+ - partially_tracked: Number of objects that are partially tracked but not mostly tracked.
62
+ - mostly_lost: Number of objects that are mostly lost throughout the sequence.
63
+ - num_false_positives: Number of false positive detections (predicted objects not present in the ground truth).
64
+ - num_misses: Number of missed detections (ground truth objects not detected in the predictions).
65
+ - num_switches: Number of identity switches.
66
+ - num_fragmentations: Number of fragmented objects (objects that are broken into multiple tracks).
67
+ - mota (MOTA - Multiple Object Tracking Accuracy): Overall tracking accuracy, computed as 1 - ((num_false_positives + num_misses + num_switches) / num_unique_objects).
68
+ - motp (MOTP - Multiple Object Tracking Precision): Average precision of the object localization, computed as the mean of the localization errors of correctly detected objects.
69
+ - num_transfer: Number of track transfers.
70
+ - num_ascend: Number of ascended track IDs.
71
+ - num_migrate: Number of track ID migrations.
72
+
73
+ Examples:
74
+ >>> import numpy as np
75
+ >>> module = evaluate.load("bascobasculino/mot-metrics")
76
+
77
+ >>> predicted =[
78
+ [1,1,10,20,30,40,0.85],
79
+ [1,2,50,60,70,80,0.92],
80
+ [1,3,80,90,100,110,0.75],
81
+ [2,1,15,25,35,45,0.78],
82
+ [2,2,55,65,75,85,0.95],
83
+ [3,1,20,30,40,50,0.88],
84
+ [3,2,60,70,80,90,0.82],
85
+ [4,1,25,35,45,55,0.91],
86
+ [4,2,65,75,85,95,0.89]
87
+ ]
88
+
89
+ >>> ground_truth = [
90
+ [1, 1, 10, 20, 30, 40],
91
+ [1, 2, 50, 60, 70, 80],
92
+ [1, 3, 85, 95, 105, 115],
93
+ [2, 1, 15, 25, 35, 45],
94
+ [2, 2, 55, 65, 75, 85],
95
+ [3, 1, 20, 30, 40, 50],
96
+ [3, 2, 60, 70, 80, 90],
97
+ [4, 1, 25, 35, 45, 55],
98
+ [5, 1, 30, 40, 50, 60],
99
+ [5, 2, 70, 80, 90, 100]
100
+ ]
101
+ >>> predicted = [np.array(a) for a in predicted]
102
+ >>> ground_truth = [np.array(a) for a in ground_truth]
103
+
104
+ >>> results = module._compute(predictions=predicted, references=ground_truth, max_iou=0.5)
105
+ >>> print(results)
106
+ {'idf1': 0.8421052631578947, 'idp': 0.8888888888888888, 'idr': 0.8, 'recall': 0.8, 'precision': 0.8888888888888888,
107
+ 'num_unique_objects': 3,'mostly_tracked': 2, 'partially_tracked': 1, 'mostly_lost': 0, 'num_false_positives': 1,
108
+ 'num_misses': 2, 'num_switches': 0, 'num_fragmentations': 0, 'mota': 0.7, 'motp': 0.02981870229007634,
109
+ 'num_transfer': 0, 'num_ascend': 0, 'num_migrate': 0}
110
+ """
111
+
112
+
113
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
114
+ class MotMetrics(evaluate.Metric):
115
+ """TODO: Short description of my evaluation module."""
116
+
117
+ def _info(self):
118
+ # TODO: Specifies the evaluate.EvaluationModuleInfo object
119
+ return evaluate.MetricInfo(
120
+ # This is the description that will appear on the modules page.
121
+ module_type="metric",
122
+ description=_DESCRIPTION,
123
+ citation=_CITATION,
124
+ inputs_description=_KWARGS_DESCRIPTION,
125
+ # This defines the format of each prediction and reference
126
+ features=datasets.Features({
127
+ "predictions": datasets.Sequence(
128
+ datasets.Sequence(datasets.Value("float"))
129
+ ),
130
+ "references": datasets.Sequence(
131
+ datasets.Sequence(datasets.Value("float"))
132
+ )
133
+ }),
134
+ # Additional links to the codebase or references
135
+ codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
136
+ reference_urls=["http://path.to.reference.url/new_module"]
137
+ )
138
+
139
+ def _download_and_prepare(self, dl_manager):
140
+ """Optional: download external resources useful to compute the scores"""
141
+ # TODO: Download external resources if needed
142
+ pass
143
+
144
+ def _compute(self, predictions, references, max_iou: float = 0.5):
145
+ """Returns the scores"""
146
+ # TODO: Compute the different scores of the module
147
+
148
+ return calculate(predictions, references, max_iou)
149
+
150
+ def calculate(predictions, references, max_iou: float = 0.5):
151
+ """Returns the scores"""
152
+ try:
153
+ np_predictions = np.array(predictions)
154
+ except:
155
+ raise ValueError("The predictions should be a list of np.arrays in the format [frame number, object id, bb_left, bb_top, bb_width, bb_height, confidence]")
156
+
157
+ try:
158
+ np_references = np.array(references)
159
+ except:
160
+ raise ValueError("The references should be a list of np.arrays in the format [frame number, object id, bb_left, bb_top, bb_width, bb_height]")
161
+
162
+ if np_predictions.shape[1] != 7:
163
+ raise ValueError("The predictions should be a list of np.arrays in the format [frame number, object id, bb_left, bb_top, bb_width, bb_height, confidence]")
164
+ if np_references.shape[1] != 6:
165
+ raise ValueError("The references should be a list of np.arrays in the format [frame number, object id, bb_left, bb_top, bb_width, bb_height]")
166
+
167
+ if np_predictions[:, 0].min() <= 0:
168
+ raise ValueError("The frame number in the predictions should be a positive integer")
169
+ if np_references[:, 0].min() <= 0:
170
+ raise ValueError("The frame number in the references should be a positive integer")
171
+
172
+
173
+ num_frames = max(np_references[:, 0].max(), np_predictions[:, 0].max())
174
+
175
+ acc = mm.MOTAccumulator(auto_id=True)
176
+ for i in range(1, num_frames+1):
177
+ preds = np_predictions[np_predictions[:, 0] == i, 1:6]
178
+ refs = np_references[np_references[:, 0] == i, 1:6]
179
+ C = mm.distances.iou_matrix(refs[:,1:], preds[:,1:], max_iou = max_iou)
180
+ acc.update(refs[:,0].astype('int').tolist(), preds[:,0].astype('int').tolist(), C)
181
+
182
+ mh = mm.metrics.create()
183
+ summary = mh.compute(acc).to_dict()
184
+ for key in summary:
185
+ summary[key] = summary[key][0]
186
+
187
+ return summary
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ git+https://github.com/huggingface/evaluate@main
2
+ numpy
3
+ motmetrics
tests.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ test_cases = [
3
+ {
4
+ "predictions": [np.array(a) for a in [
5
+ [1,1,10,20,30,40,0.85],
6
+ [1,2,50,60,70,80,0.92],
7
+ [1,3,80,90,100,110,0.75],
8
+ [2,1,15,25,35,45,0.78],
9
+ [2,2,55,65,75,85,0.95],
10
+ [3,1,20,30,40,50,0.88],
11
+ [3,2,60,70,80,90,0.82],
12
+ [4,1,25,35,45,55,0.91],
13
+ [4,2,65,75,85,95,0.89]
14
+ ]],
15
+ "references": [np.array(a) for a in [
16
+ [1, 1, 10, 20, 30, 40],
17
+ [1, 2, 50, 60, 70, 80],
18
+ [1, 3, 85, 95, 105, 115],
19
+ [2, 1, 15, 25, 35, 45],
20
+ [2, 2, 55, 65, 75, 85],
21
+ [3, 1, 20, 30, 40, 50],
22
+ [3, 2, 60, 70, 80, 90],
23
+ [4, 1, 25, 35, 45, 55],
24
+ [5, 1, 30, 40, 50, 60],
25
+ [5, 2, 70, 80, 90, 100]
26
+ ]],
27
+ "result": {'idf1': 0.8421052631578947, 'idp': 0.8888888888888888,
28
+ 'idr': 0.8, 'recall': 0.8, 'precision': 0.8888888888888888,
29
+ 'num_unique_objects': 3,'mostly_tracked': 2,
30
+ 'partially_tracked': 1, 'mostly_lost': 0,
31
+ 'num_false_positives': 1, 'num_misses': 2,
32
+ 'num_switches': 0, 'num_fragmentations': 0,
33
+ 'mota': 0.7, 'motp': 0.02981870229007634,
34
+ 'num_transfer': 0, 'num_ascend': 0,
35
+ 'num_migrate': 0}
36
+ },
37
+ ]