"""py-motmetrics - metrics for multiple object tracker (MOT) benchmarking.

Christoph Heindl, 2017
https://github.com/cheind/py-motmetrics
"""

import argparse
import glob
import os
import logging
import motmetrics as mm
import pandas as pd
from collections import OrderedDict
from pathlib import Path
import pdb
import xlrd
import xlwt

def parse_args():
    parser = argparse.ArgumentParser(description="""
Compute metrics for trackers using MOTChallenge ground-truth data.

Files
-----
All file content, ground truth and test files, have to comply with the
format described in 

Milan, Anton, et al. 
"Mot16: A benchmark for multi-object tracking." 
arXiv preprint arXiv:1603.00831 (2016).
https://motchallenge.net/

Structure
---------

Layout for ground truth data
    <GT_ROOT>/<SEQUENCE_1>/gt/gt.txt
    <GT_ROOT>/<SEQUENCE_2>/gt/gt.txt
    ...

Layout for test data
    <TEST_ROOT>/<SEQUENCE_1>.txt
    <TEST_ROOT>/<SEQUENCE_2>.txt
    ...

Sequences of ground truth and test will be matched according to the `<SEQUENCE_X>`
string.""", formatter_class=argparse.RawTextHelpFormatter)

    # parser.add_argument('groundtruths', type=str, help='Directory containing ground truth files.')
    parser.add_argument('--groundtruths', type=str, help='Directory containing ground truth files.',  default='F:\\Project\\MOT\\py_motmetrics\\data\\')
    # parser.add_argument('tests', type=str, help='Directory containing tracker result files')
    parser.add_argument('--tests', type=str, help='Directory containing tracker result files', default='F:\\Project\\MOT\\py_motmetrics\\data\\osnet_x0_25_deepsort\\')
    parser.add_argument('--loglevel', type=str, help='Log level', default='info')
    parser.add_argument('--fmt', type=str, help='Data format', default='mot15-2D')
    parser.add_argument('--solver', type=str, help='LAP solver to use')
    parser.add_argument('--output', type=str, help='output the results to xls', default='F:\\Project\\MOT\\py_motmetrics\\results')
    return parser.parse_args()

def compare_dataframes(gts, ts):
    accs = []
    names = []
    for k, tsacc in ts.items():
        if k in gts:            
            logging.info('Comparing {}...'.format(k))
            accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5))
            names.append(k)
        else:
            logging.warning('No ground truth for {}, skipping.'.format(k))

    return accs, names

def write_to_xls(summary_list, output_path, sheet_no):
    book=xlwt.Workbook(encoding='utf-8', style_compression=0)
    title = ['IDF1', 'IDP', 'IDR', 'Rcll', 'Prcn', 'GT', 'MT', 'PT', 'ML', 'FP', 'FN', 'IDs', 'FM', 'MOTA', 'MOTP', 'IDt', 'IDa', 'IDm']
    data = summary_list[0]

def main():
    args = parse_args()
    data_path = 'F:\\Project\\MOT\\py_motmetrics\\data\\'
    father_name = 'osnet_x0_25_deepsort'
    title = ['video_name', 'IDF1', 'IDP', 'IDR', 'Rcll', 'Prcn', 'GT', 'MT', 'PT', 'ML', 'FP', 'FN', 'IDs', 'FM', 'MOTA', 'MOTP',
             'IDt', 'IDa', 'IDm']
    father_path = data_path+father_name
    args.groundtruths = father_path+'\\'
    child_list = os.listdir(father_path)
    child_list.sort()
    output_path = args.output + '\\' + father_name + '\\' + father_name+'.xls'
    if not os.path.exists(args.output + '\\' + father_name):
        os.makedirs(args.output + '\\' + father_name)
    if os.path.exists(output_path):
        os.remove(output_path)
    book = xlwt.Workbook(encoding='utf-8', style_compression=0)
    sheet1 = book.add_sheet("test01", cell_overwrite_ok=True)
    for i in range(0, len(title)):
        sheet1.write(0, i, title[i])
    cnt = 1
    for cur_child in child_list:
        child_path = os.path.join(father_path, cur_child)
        args.tests = child_path+'\\'

        # pdb.set_trace()
        loglevel = getattr(logging, args.loglevel.upper(), None)
        if not isinstance(loglevel, int):
            raise ValueError('Invalid log level: {} '.format(args.loglevel))
        logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s - %(message)s', datefmt='%I:%M:%S')

        if args.solver:
            mm.lap.default_solver = args.solver

        gtfiles = glob.glob(os.path.join(args.groundtruths, '*/gt/gt.txt'))
        tsfiles = [f for f in glob.glob(os.path.join(args.tests, '*.txt')) if
                   not os.path.basename(f).startswith('eval')]
        # pdb.set_trace()

        logging.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
        logging.info('Available LAP solvers {}'.format(mm.lap.available_solvers))
        logging.info('Default LAP solver \'{}\''.format(mm.lap.default_solver))
        logging.info('Loading files.')

        gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt=args.fmt, min_confidence=1)) for f in gtfiles])
        ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt=args.fmt)) for f in tsfiles])

        mh = mm.metrics.create()
        accs, names = compare_dataframes(gt, ts)

        logging.info('Running metrics')

        summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True)
        summary_list = summary.values.tolist()[0]
        summary_list[-4] = 1-summary_list[-4] # MOTP转换 1-motp
        summary_list = [float('{:.4f}'.format(i)) for i in summary_list]
        for i in range(len(summary_list)):
            if summary_list[i]>=1 or summary_list[i]==0:
                summary_list[i] = int(summary_list[i])
        summary_list = list(map(str, summary_list))
        # pdb.set_trace()
        summary_list.insert(0, cur_child)
        # pdb.set_trace()
        for i in range(0, len(summary_list)):
            sheet1.write(cnt, i, summary_list[i])
        cnt +=1
        # pdb.set_trace()
        print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))
        logging.info('Completed')
    book.save(output_path)

if __name__ == '__main__':

    args = parse_args()
    main()
    # loglevel = getattr(logging, args.loglevel.upper(), None)
    # if not isinstance(loglevel, int):
    #     raise ValueError('Invalid log level: {} '.format(args.loglevel))
    # logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s - %(message)s', datefmt='%I:%M:%S')
    #
    # if args.solver:
    #     mm.lap.default_solver = args.solver
    #
    # gtfiles = glob.glob(os.path.join(args.groundtruths, '*/gt/gt.txt'))
    # tsfiles = [f for f in glob.glob(os.path.join(args.tests, '*.txt')) if not os.path.basename(f).startswith('eval')]
    # # pdb.set_trace()
    #
    # logging.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
    # logging.info('Available LAP solvers {}'.format(mm.lap.available_solvers))
    # logging.info('Default LAP solver \'{}\''.format(mm.lap.default_solver))
    # logging.info('Loading files.')
    #
    # gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt=args.fmt, min_confidence=1)) for f in gtfiles])
    # ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt=args.fmt)) for f in tsfiles])
    #
    # mh = mm.metrics.create()
    # accs, names = compare_dataframes(gt, ts)
    #
    # logging.info('Running metrics')
    #
    # summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True)
    # summary_list = summary.values.tolist()
    # # pdb.set_trace()
    # print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))
    # logging.info('Completed')