#!/usr/bin/env python
# coding: utf-8

import os
import json
import time
import torch, random
import argparse
from collections import defaultdict
import pandas as pd
from _code.Utils import recall

"""
python Recall.py --result-dir /home/zgp/Project/DREML/results/car_12ensemble_18metaclass_32epochs --learners "range(1, 12)"
"""

parser = argparse.ArgumentParser(description="Calculate Recall")
group = parser.add_mutually_exclusive_group()
group.add_argument('--all', type=str)
group.add_argument('--result-dir', type=str)
parser.add_argument('--learners', type=str)
args = parser.parse_args()
print(args)


def load_config(root: str):
    path = os.path.join(root, 'config.json')
    with open(path, 'r') as f:
        cfg = json.load(f)
    dataset = cfg['dataset']
    if dataset == 'CAR' or dataset == 'CUB':
        cfg['ks'] = [1, 2, 4, 8, 16]
    elif dataset == 'SHOP':
        cfg['ks'] = [1, 10, 20, 30, 50]
    elif dataset == 'SOP':
        cfg['ks'] = [1, 10, 100, 1000]
    return cfg


def acc(src: str, learners: list=None):
    """
    src: str  result directory {x-x-x}_..._{x-x-x}
    L: int  total ensembled size (Number of learners)
    """
    # os.path.basename(src).split('_')

    try:
        cfg = load_config(src)
        if not learners:
            learners = range(int(cfg['ensemble_size']))
        # loading dataset info
        dsets = torch.load(os.path.join(src, 'testdsets.pth'))
        # loading feature vectors
        R = [torch.load(os.path.join(src, f'{d}testFvecs.pth')) for d in learners]
        R = torch.cat(R, 1)
        last_modified = (time.time() - os.path.getmtime(os.path.join(src, f'{learners[-1]}testFvecs.pth'))) // (60*60*24)
    except:
        print(f'Incomplete: {src}')
        return

    print(f'{src}: {R.size()}')

    ks = cfg.pop('ks')
    acc_list = recall(R, dsets.idx_to_class, rank=ks)
    acc_dict = { k: v.item() for k, v in zip(ks, acc_list) }

    rv = {}
    rv.update(cfg)
    rv['embedding_size'] = R.size(1)
    rv['ensemble_size'] = len(learners)
    rv['S'] = str(int(last_modified))
    for k, v in acc_dict.items():
        rv[f'R@{k}'] = f'{v*100:.2f}'
    return rv


def main():
    files = []
    if args.all:
        for p in os.listdir(args.all):
            path = os.path.join(args.all, p)
            if os.path.isdir(path):
                files.append(path)
    else:
        files.append(args.result_dir)

    results = defaultdict(dict)
    if args.all:  # load cache
        path = os.path.join(args.all, 'results.json')
        try:
            with open(path, 'r') as f:
                cache = json.load(f)
            cache = { k: v for k, v in cache.items() if k in files }
            results.update(cache)
        except Exception as e:
            print(repr(e))

    for f in files:
        if f not in results:
            if args.learners:
                learners = eval(args.learners)
                result = acc(f, learners)
            else:
                result = acc(f)
            if result:
                results[f] = result
    if args.all:  # save cache
        with open(path, 'w') as f:
            json.dump(results, f, indent=4)
    k, v = results.popitem()
    df = pd.DataFrame(index=[k], data=v)
    for k, v in results.items():
        # row = pd.Series(v, name=k)
        # df = df.append(row)
        df = df.append(v, ignore_index=True)

    df.sort_values(by=['dataset', 'R@1'], inplace=True)
    # columns to display
    columns = ['name','dataset','backbone','head_tail','attention','ensemble_size','meta_class_size','embedding_size','batch_size','nb_epochs','sampling','nmi','R@1','R@2','R@4','R@8','R@10','R@20','R@30','R@50','R@100','R@1000','S']
    columns = list(filter(lambda x: x in df.columns.tolist(), columns))
    df = df[columns]
    df = df.rename(columns={"ensemble_size": "ensemble",
                            "meta_class_size": "meta-class",
                            "embedding_size": "embedding",
                            "batch_size": "bs",
                            "nb_epochs": "epochs"})

    pd.set_option('display.max_rows', 10000)
    pd.set_option('display.max_columns', 10000)
    pd.set_option('display.max_colwidth', 100)
    pd.set_option('display.width', 1000000)
    pd.set_option('precision', 4)
    df.reset_index(drop=True, inplace=True)
    df.fillna('-', inplace=True)
    print(df)
    to_txt(df)

def to_txt(df: pd.core.frame.DataFrame):
    import math
    def trunc(x):
        if type(x) == type(0.):
            return math.trunc(10000 * x) / 10000
        else:
            return x
    df = df.applymap(trunc)
    head = ['id'] + df.columns.values.tolist()
    data = [head]
    max_lens = list(map(len, head))
    for t in df.itertuples():
        line = []
        for i, col in enumerate(t):
            line.append(str(col))
            if len(str(col)) > max_lens[i]:
                max_lens[i] = len(str(col))
        data.append(line)
    with open('result.txt', 'w') as f:
        for line in data:
            for i, col in enumerate(line):
                f.write(col.ljust(max_lens[i]) + '│')
            f.write('\n')


if __name__ == "__main__":
    main()
