import argparse
import glob
import os
import logging
import motmetrics as mm
import pandas as pd
from collections import OrderedDict
from pathlib import Path


def test():
    # df = mm.io.loadtxt('etc/data/iotest/motchallenge.txt', fmt=mm.io.Format.MOT15_2D)
    data_id = '02'
    df = mm.io.loadtxt('mot16Label/train_v1/MOT16-{}/gt/gt.txt'.format(data_id), fmt=mm.io.Format.MOT16)

    # expected = pd.DataFrame([
    #     (1,1,398,181,121,229,1,-1,-1), #Note -1 on x and y for correcting matlab
    #     (1,2,281,200,92,184,1,-1,-1),
    #     (2,2,268,201,87,182,1,-1,-1),
    #     (2,3,70,150,100,284,1,-1,-1),
    #     (2,4,199,205,55,137,1,-1,-1),
    # ])
    expected_file = 'deep-sort/results/MOT16-{}-pose.txt'
    expected_file = 'deep-sort/results/MOT16-{}-onlyiou.txt'
    expected_file = 'deep-sort/results/MOT16-{}-pose-onlyiou.txt'
    expected_file = './output/my_mot_joints_result_MOT16-{}.txt'

    # expected_file = '/Volumes/more/source/cv/mot/sort/output_16/MOT16-{}.txt'
    # expected_file = '/Volumes/more/source/cv/mot/sort/output_16/pose_ret_MOT16-{}.txt'
    expected = mm.io.loadtxt(expected_file.format(data_id), fmt=mm.io.Format.MOT16)

    # acc = mm.MOTAccumulator(auto_id=True)
    mh = mm.metrics.create() 
    accs = []
    names = []
    accs.append(mm.utils.compare_to_groundtruth(df, expected, 'iou', distth=0.5))
    names.append('mot16-{}'.format(data_id))
    summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics)
    print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))

def mot15_eval():
    data_name = 'ETH-Sunnyday'# 'PETS09-S2L1'
    data_name = 'PETS09-S2L1'
    # data_name = 'ETH-Pedcross2'
    # expected_file = '/Users/gerrie/prj/end2end_mot/output/train_result/{}.txt'.format(data_name)
    # data_name = 'TUD-Stadtmitte'
    # expected_file = '/Users/gerrie/prj/end2end_mot/sort/test.txt'
    # expected_file = './output/sort_joint/train/TUD-Stadtmitte'
    expected_file = './output/tuning/{}'.format(data_name)
    expected = mm.io.loadtxt(expected_file, fmt=mm.io.Format.MOT15_2D)
    # acc = mm.MOTAccumulator(auto_id=True)
    df = mm.io.loadtxt('MOT15/train/{}/gt/gt.t【xt'.format(data_name), fmt=mm.io.Format.MOT15_2D)
    mh = mm.metrics.create()
    accs = []
    names = []
    accs.append(mm.utils.compare_to_groundtruth(df, expected, 'iou', distth=0.5))
    names.append(data_name)
    summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics)
    print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))

def compare_dataframes(gts, ts):
    accs = []
    names = []
    for k, tsacc in ts.items():
        if k in gts:            
            logging.info('Comparing {}...'.format(k))
            accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5))  # 这里原本是0.5
            names.append(k)
        else:
            logging.warning('No ground truth for {}, skipping.'.format(k))
    return accs, names


def my_batch_eval():
    # loglevel = getattr(logging, args.loglevel.upper(), None)
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s - %(message)s', datefmt='%I:%M:%S')
    dataset = 'test'
    gtfiles = glob.glob('./MOT15/train/*/gt/gt.txt')
    # gtfiles = glob.glob('./MOT15/test/*/gt/gt.txt')
    # tsfiles = glob.glob('./output/train_result/*.txt')        # pose joint box result
    # tsfiles = glob.glob('./output/sort_joint/train/*.txt')        # pose joint box result 使用 joints-pose-dist 预测的结果（20191003)
    # tsfiles = glob.glob('./output/test_mode_2_result/*.txt')        # test pose box model
    tsfiles = glob.glob('./output/train_v1_mode_1_result/*.txt')        # ???
    # tsfiles = glob.glob('./output/mot15_pose_result/*.txt')
    # tsfiles = glob.glob('./output/origin/train_v1/*.txt')
    # tsfiles = glob.glob('/Users/gerrie/source/cv/mot/sort/output/*.txt')
    # tsfiles = [f for f in glob.glob(os.path.join(args.tests, '*.txt')) if not os.path.basename(f).startswith('eval')]

    logging.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
    logging.info('Loading files.')

    gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt=mm.io.Format.MOT15_2D, min_confidence=1)) for f in gtfiles])
    ts = OrderedDict(
        [(os.path.splitext(Path(f).parts[-1])[0][:], mm.io.loadtxt(f, fmt=mm.io.Format.MOT15_2D)) for f in tsfiles])

    mh = mm.metrics.create()
    accs, names = compare_dataframes(gt, ts)
    summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics)
    output_str = mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names)
    print(output_str)
    print(output_str, file=open('./output/result.txt', 'w'))


def batch_eval():
    # loglevel = getattr(logging, args.loglevel.upper(), None)
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s - %(message)s', datefmt='%I:%M:%S')

    gtfiles = glob.glob('mot16Label/train_v1/MOT16-*/gt/gt.txt')
    tsfiles = glob.glob('./deep-sort/results/MOT16*tsort.txt')
    # tsfiles = [f for f in glob.glob(os.path.join(args.tests, '*.txt')) if not os.path.basename(f).startswith('eval')]

    logging.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
    logging.info('Loading files.')
    
    gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt=mm.io.Format.MOT16, min_confidence=1)) for f in gtfiles])
    ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0][:8], mm.io.loadtxt(f, fmt=mm.io.Format.MOT16)) for f in tsfiles])    

    mh = mm.metrics.create()    
    accs, names = compare_dataframes(gt, ts)
    summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics)
    print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))


if __name__ == '__main__':
    # test()
    # batch_eval() # for mot16

    mot15_eval()
    # my_batch_eval()