# -*- coding: utf-8 -*-
import numpy as np
import json
import vsum_tools
from pathlib import Path
from tabulate import tabulate
import h5py
import re
import matplotlib.pyplot as plt
import os
import argparse
import sys
plt.switch_backend('agg')

'''
python my_eval.py --root /home/zgp/data/results/project --fold-list 1 2 --debug
'''
parser = argparse.ArgumentParser("evaluation")

# parser.add_argument('--eval-type', type=str, default='original', choices=['original', 'sample'])
parser.add_argument('--root', type=str, required=True, help="project root dir")
parser.add_argument('--eval-tpl', nargs='+', type=str, default=['original', 'sample'], help="eval type list")
parser.add_argument('--fold-list', nargs='+', type=int, help='eval fold list', default=[1, 2, 3, 4, 5])
parser.add_argument('--debug', action='store_true')
parser.add_argument('--name', type=str, default='evaluation', help="log/pdf/fig save name")

args = parser.parse_args()
print(args)


collect = []
def evaluate(dtype, res_f, eval_t='original'):
    '''
    :eval_t: original, sample
    :res_f: {'fea_x': [pred_scos, gt_score], ...}
    '''
    result = json.load(open(res_f, encoding='utf-8'))
    dataset = h5py.File(f'/home/zgp/pytorch-vsumm-reinforce/datasets/eccv16_dataset_{dtype}_google_pool5.h5', 'r')
    # TODO wait for zhang pan
    if dtype == 'ovp':
        dataset = h5py.File(f'/home/zgp/pytorch-vsumm-reinforce/datasets/dataset_OVP_sampled_pool5.h5', 'r')

    fms = []
    if dtype == 'tvsum':
        eval_metric = 'avg'    # tvsum
    else:
        eval_metric = 'max'    # other

    table = [["ID"], ["F-score"]]

    # h5_res = h5py.File(osp.join(args.save_dir, 'result.h5'), 'w')
    for k, v in result.items():
        vdo = int(k[4:])
        probs, _ = np.array(v)
        key = 'video_' + str(vdo)

        cps = dataset[key]['change_points'][...]
        num_frames = dataset[key]['n_frames'][()]
        nfps = dataset[key]['n_frame_per_seg'][...].tolist()
        positions = dataset[key]['picks'][...]
        user_summary = dataset[key]['user_summary'][...]

        # summe/tvsum gtsummary used for evaluation
        # user_summary = dataset[key]['gtsummary'][...]
        # user_summary_tmp = np.zeros((num_frames), dtype=np.float32)
        # for i in range(len(positions) - 1):
        #     pos_left, pos_right = positions[i], positions[i+1]
        #     user_summary_tmp[pos_left:pos_right] = user_summary[i]
        # user_summary = user_summary_tmp.reshape(1, -1)

        if dtype == 'ovp':
            cps = dataset[key]['change_points'][...]
            num_frames = cps[-1][1] + 1
            nfps = dataset[key]['n_frame_per_seg'][...].tolist()
            positions = np.array(range(num_frames))
            user_summary = dataset[key]['gtsummary'][...].reshape(1, -1)

        machine_summary = vsum_tools.generate_summary(probs, cps, num_frames, nfps, positions)
        if eval_t == 'original':
            fm, _, _ = vsum_tools.evaluate_summary(machine_summary, user_summary, eval_metric)
        elif eval_t == 'sample':
            fm, _, _ = vsum_tools.evaluate_sample_summary(machine_summary, user_summary, positions, eval_metric)
        fms.append(fm)
        collect.append(vdo)

        table[0].append(vdo)
        table[1].append(f'{fm:.1%}')
        # table.append([vdo, f"{fm:.1%}"])

        # h5_res.create_dataset(key + '/score', data=probs)
        # h5_res.create_dataset(key + '/machine_summary', data=machine_summary)
        # h5_res.create_dataset(key + '/gtscore', data=dataset[key]['gtscore'][...])
        # h5_res.create_dataset(key + '/fm', data=fm)

    mean_fm = np.mean(fms)
    # table.append(["Ave", f"{mean_fm:.1%}"])
    print(tabulate(table))

    # h5_res.close()
    # print("Average F-score {:.1%}".format(mean_fm))

    return mean_fm

def datatype(dir):
    """judge summe/tvsum/other

    :dir: TODO
    :returns: TODO

    """
    d_t = None
    ck_lst = []
    shift = None
    if re.search('summe', dir) is not None:
        d_t = 'summe'
        ck_lst = list(range(1, 26))
    elif re.search('tvsum', dir) is not None:
        d_t = 'tvsum'
        ck_lst = list(range(1, 51))
    elif re.search('ovp', dir) is not None:
        d_t = 'ovp'
        ck_lst = list(range(1, 51))
    else:
        print('dataset type error')

    if re.search('transfer', dir) is not None:
        shift = 'transfer'
    elif re.search('canonical', dir) is not None:
        shift = 'canonical'
    elif re.search('aug', dir) is not None:
        shift = 'aug'

    return d_t, ck_lst, shift

def average(seq):
    tmp = .0
    for i in seq:
        tmp += i
    return tmp / len(seq)

def find_decay(lst):
    """find a list decay.

    :lst: TODO
    :returns: times, i(0, 1, ...)

    """
    find = False

    for i in range(len(lst)-3):
        if lst[i] > lst[i+1] > lst[i+2] > lst[i+3]:
            find = True
            break
    if find:
        return 3, i

    for i in range(len(lst)-2):
        if lst[i] > lst[i+1] > lst[i+2]:
            find = True
            break
    if find:
        return 2, i

    for i in range(len(lst)-1):
        if lst[i] > lst[i+1]:
            find = True
            break
    if find:
        return 1, i
    else:
        return 0, -1    # reasonable


if __name__ == '__main__':
    root = args.root
    name = root.split('/')[-1]
    h5f = h5py.File(f'{root}/info.h5','w')
    data_t, ck_lst, shift = datatype(root)
    for eval_t in args.eval_tpl:
        fname = f'{args.name}_{eval_t}'
        if args.debug is False:
            fd = open(f'{root}/{fname}.log', 'w')
            sys.stdout = fd

        plt.figure(figsize=(16, 8))
        if shift == 'canonical' or shift == 'aug':
            max_epk = 30
            fs_epks = {'ave': []}
            for epk in range(max_epk):
                collect = []
                table = [["Split"], ["Ave F-score"]]
                ave = 0
                for i in args.fold_list:
                    res_f = f'{root}/fold{i}/score/epoch_{epk}.json'
                    mean_f = evaluate(data_t, res_f, eval_t=eval_t)
                    print(f'epk{epk}-split{i} {mean_f:.1%}\n')
                    key = f'split{i}'
                    if key in fs_epks:
                        fs_epks[key].append(mean_f*100)
                    else:
                        fs_epks[key] = [mean_f*100]
                    table[0].append(i)
                    table[1].append(f"{mean_f:.1%}")
                    # table.append([i, f"{mean_f:.1%}"])
                    ave += mean_f
                ave /= len(args.fold_list)
                # table.append([f'ave{epk}', f"{ave:.1%}"])
                fs_epks['ave'].append(ave*100)
                print(tabulate(table))
                print(f'epk{epk} ave {ave:.1%}\n')

                if len(args.fold_list) == 5:    # 弱检查, 假设输入合法
                    assert sorted(collect) == ck_lst

            finalF1_5 = {}
            split_decay = []
            decay_info = ""
            x = range(max_epk)
            colo = {'split1': 'grey', 'split2': 'cadetblue', 'split3': 'tan', 'split4': 'darkorange', 'split5': 'lightpink', 'ave': 'lightgreen'}
            for key, fs_epk in fs_epks.items():
                finalF1_5[key] = (max_epk-1, fs_epk[-1])
                # TODO 添加对不同长度list的支持, x = range(len(fs_epks[i]))
                plt.plot(x,fs_epk,label=key,linewidth=3,color=colo[key],marker='o',markerfacecolor='blue',markersize=4)
                for a, b in zip(np.arange(max_epk), fs_epk):
                    plt.text(a, b, f"({a}, {b:.1f})", ha='center', va='bottom', fontsize=2)

                decay_tm, k = find_decay(fs_epk)
                if decay_tm > 0:
                    print(f'{key}在epk{k}之后F-score连续{decay_tm}次下降')
                    decay_info += f"{key:>7}: ({k}, {decay_tm}, {fs_epk[k]:.1f})\n"
                    plt.plot(k, fs_epk[k], marker='o', markerfacecolor='red')
                    split_decay.append(fs_epk[k])
                else:
                    decay_info += f"{key:>7}: not found\n"
            split_ave = average(split_decay)
            decay_info += f"spave: {split_ave:.1f}\n"
            max_ave = max(fs_epks['ave'])
            max_ave_info = f"max: ({fs_epks['ave'].index(max_ave)}, {max_ave:.1f})\n"
            final_ave_info = f"final: ({max_epk-1}, {fs_epks['ave'][-1]:.1f})\n"
        elif shift == 'transfer':
            max_epk = len(os.listdir(f'{root}/fold1/score'))
            ave_fs = []
            for epk in range(max_epk):
                print(f'epk{epk}')
                collect = []
                res_f = f'{root}/fold1/score/epoch_{epk}.json'
                mean_f = evaluate(data_t, res_f, eval_t=eval_t)
                print(f'epk{epk} ave {mean_f:.1%}\n')
                ave_fs.append(mean_f*100)

                assert sorted(collect) == ck_lst

            decay_tm, i = find_decay(ave_fs)
            max_ave = max(ave_fs)
            # TODO 对齐
            max_ave_info = f"max ({ave_fs.index(max_ave)}, {max_ave:.1f})\n"
            final_ave_info = f"final ({max_epk-1}, {ave_fs[-1]:.1f})\n"
            finalF1_5 = None

            x = range(max_epk)
            plt.plot(x,ave_fs,label='ave',linewidth=3,color='cadetblue',marker='o',markerfacecolor='blue',markersize=4)

            if decay_tm > 0:
                print(f'epk{i}之后F-score连续{decay_tm}次下降')
                decay_info = f'decay ({i}, {decay_tm}, {ave_fs[i]:.1f})\n'
                plt.plot(i, ave_fs[i], marker='o', markerfacecolor='red')
            else:
                decay_info = f"not found\n"

            for a, b in zip(np.arange(max_epk), ave_fs):
                # f'{round(b, 3)}'
                plt.text(a, b, f"({a}, {b:.1f})", ha='center', va='bottom', fontsize=2)

        # h5f.create_group(eval_t)
        h5f[f'/{eval_t}/decay'] = decay_info
        h5f[f'/{eval_t}/max_ave'] = max_ave_info
        h5f[f'/{eval_t}/final_ave'] = final_ave_info
        h5f[f'/{eval_t}/finalF1_5'] = finalF1_5
        xmin, xmax, ymin, ymax = plt.gcf().axes[0].axis()
        plt.text(xmax, ymax, decay_info + max_ave_info + final_ave_info, size=10, rotation=0.,
                ha="left", va="top",
                bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), pad=0.2, ))    # fc: facecolor, ec: edgecolor
        plt.legend(bbox_to_anchor=(1.005, 0), loc=3, borderaxespad=0.)  # 锚定, loc code
        plt.title(name)
        plt.xlabel('epoch')
        plt.xticks(range(0, max_epk, 1))
        plt.ylabel('F-score')
        plt.savefig(f"{root}/{fname}.pdf", format='pdf')
        plt.savefig(f"{root}/{fname}.jpeg")
