Spaces:
Runtime error
Runtime error
File size: 5,196 Bytes
7734d5b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
"""py-motmetrics - metrics for multiple object tracker (MOT) benchmarking.
Christoph Heindl, 2017
https://github.com/cheind/py-motmetrics
Modified by Rufeng Zhang
"""
import argparse
import glob
import os
import logging
import motmetrics as mm
import pandas as pd
from collections import OrderedDict
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser(description="""
Compute metrics for trackers using MOTChallenge ground-truth data.
Files
-----
All file content, ground truth and test files, have to comply with the
format described in
Milan, Anton, et al.
"Mot16: A benchmark for multi-object tracking."
arXiv preprint arXiv:1603.00831 (2016).
https://motchallenge.net/
Structure
---------
Layout for ground truth data
<GT_ROOT>/<SEQUENCE_1>/gt/gt.txt
<GT_ROOT>/<SEQUENCE_2>/gt/gt.txt
...
Layout for test data
<TEST_ROOT>/<SEQUENCE_1>.txt
<TEST_ROOT>/<SEQUENCE_2>.txt
...
Sequences of ground truth and test will be matched according to the `<SEQUENCE_X>`
string.""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--groundtruths', type=str, help='Directory containing ground truth files.')
parser.add_argument('--tests', type=str, help='Directory containing tracker result files')
parser.add_argument('--score_threshold', type=float, help='Score threshold',default=0.5)
parser.add_argument('--gt_type', type=str, default='')
parser.add_argument('--eval_official', action='store_true')
parser.add_argument('--loglevel', type=str, help='Log level', default='info')
parser.add_argument('--fmt', type=str, help='Data format', default='mot15-2D')
parser.add_argument('--solver', type=str, help='LAP solver to use')
return parser.parse_args()
def compare_dataframes(gts, ts):
accs = []
names = []
for k, tsacc in ts.items():
if k in gts:
logging.info('Comparing {}...'.format(k))
accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5))
names.append(k)
else:
logging.warning('No ground truth for {}, skipping.'.format(k))
return accs, names
if __name__ == '__main__':
args = parse_args()
loglevel = getattr(logging, args.loglevel.upper(), None)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: {} '.format(args.loglevel))
logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s - %(message)s', datefmt='%I:%M:%S')
if args.solver:
mm.lap.default_solver = args.solver
gt_type = args.gt_type
print('gt_type', gt_type)
gtfiles = glob.glob(
os.path.join(args.groundtruths, '*/gt/gt_{}.txt'.format(gt_type)))
print('gt_files', gtfiles)
tsfiles = [f for f in glob.glob(os.path.join(args.tests, '*.txt')) if not os.path.basename(f).startswith('eval')]
logging.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
logging.info('Available LAP solvers {}'.format(mm.lap.available_solvers))
logging.info('Default LAP solver \'{}\''.format(mm.lap.default_solver))
logging.info('Loading files.')
gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt=args.fmt, min_confidence=1)) for f in gtfiles])
ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt=args.fmt, min_confidence=args.score_threshold)) for f in tsfiles])
# ts = gt
mh = mm.metrics.create()
accs, names = compare_dataframes(gt, ts)
logging.info('Running metrics')
metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked',
'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses',
'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects']
summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)
# summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True)
# print(mm.io.render_summary(
# summary, formatters=mh.formatters,
# namemap=mm.io.motchallenge_metric_names))
div_dict = {
'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'],
'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']}
for divisor in div_dict:
for divided in div_dict[divisor]:
summary[divided] = (summary[divided] / summary[divisor])
fmt = mh.formatters
change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked',
'partially_tracked', 'mostly_lost']
for k in change_fmt_list:
fmt[k] = fmt['mota']
print(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names))
if args.eval_official:
metrics = mm.metrics.motchallenge_metrics + ['num_objects']
summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)
print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))
logging.info('Completed')
|