import shelve
# from collections import defaultdict
import sys
import os
import numpy as np
import pandas as pd
import time
import glob
import argparse
from tensorboardX import SummaryWriter
'''
python browse_results.py --all log
python browse_results.py --exp-dir log/car1
'''
# TODO: Results in percents.
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('--all', type=str)
group.add_argument('--exp-dir', type=str)
parser.add_argument('-cw', '--col-width', type=int, default=100)
args = parser.parse_args()
print(args)

if args.all:
    files: list = []
    for p in os.listdir(args.all):
        files += sorted(list(map(lambda x: x[:-4], glob.glob(os.path.join(args.all, p, '*.dat')))))
else:
    files = sorted(list(map(lambda x: x[:-4], glob.glob(os.path.join(args.exp_dir, '*.dat')))))

resultdict = {}
last_modified = None

writer = SummaryWriter(os.path.join(args.all, 'TensorBoard'))
for p in files:
    try:
        db = shelve.open(p)
        log_path = p + '.log'
        assert os.path.exists(log_path), log_path
        last_modified = (time.time() - os.path.getmtime(log_path)) / 60
    except Exception as e:
        print(repr(e))
        print('Failed to open', p)
        continue

    dtype = db['config']['dataset_selected']
    if dtype == 'inshop':
        ks = [1, 10, 20, 30, 50]
    elif dtype == 'sop':
        ks = [1, 10, 100, 1000]
    else:
        ks = [1, 2, 4, 8]

    # p = os.path.basename(p)
    try:
        result_arr = np.array([(epoch, d['score']['nmi'], *d['score']['recall'])
                               for (epoch, d) in db['metrics'].items()])
        columns = [
            'epoch',
            'nmi',
            *['R@{}'.format(i) for i in ks],
        ]
    except Exception as e:
        print(repr(e))
        result_arr = np.array([(epoch, *d['score']['recall'])
                               for (epoch, d) in db['metrics'].items()])
        columns = [
            'epoch',
            *['R@{}'.format(i) for i in ks],
        ]

    for i, row in enumerate(result_arr):
        writer.add_scalar(f'{p}-R@1', row[1], i)

    idx = columns.index('R@1')
    idx_max_recall = result_arr[:, idx].argmax()
    best_epoch_results = result_arr[idx_max_recall]
    max_epoch = result_arr[:, 0].max()
    best_epoch_results = best_epoch_results.tolist()
    best_epoch_results[0] = '{:02}/{:02}'.format(int(best_epoch_results[0]), int(max_epoch))
    assert len(best_epoch_results) == len(columns)

    results = dict()
    results['name'] = p.split(os.path.sep)[-2]
    results['dataset'] = dtype
    cfg = db['config']
    results['nb_clusters'] = cfg['nb_clusters']
    results['finetune_epoch'] = cfg['finetune_epoch']
    results['mod_epoch'] = cfg['recluster']['mod_epoch']
    if 'sampling' in cfg:
        results['sampling'] = cfg['sampling']
    for i, col_name in enumerate(columns):
        results[col_name] = best_epoch_results[i]

    # if the file was last modified < 10 minute ago; than print Running status
    if last_modified is None:
        results['S'] = '?'
    elif last_modified > 10:
        results['S'] = '-'
    else:
        results['S'] = '[R]'

    results['S'] = str(int(last_modified//1440)) + ' days ago'

    resultdict[p] = results

k, v = resultdict.popitem()
df = pd.DataFrame(index=[k], data=v)
for k, v in resultdict.items():
    # row = pd.Series(v, name=k)
    # df = df.append(row)
    df = df.append(v, ignore_index=True)

df.sort_values(by=['dataset', 'R@1'], inplace=True)
# columns to display
columns = ['name','dataset','epoch','nb_clusters','mod_epoch','finetune_epoch','sampling','nmi','R@1','R@2','R@4','R@8','R@10','R@20','R@30','R@50','R@100','R@1000','S']
columns = list(filter(lambda x: x in df.columns.tolist(), columns))
df = df[columns]

pd.set_option('display.max_rows', 10000)
pd.set_option('display.max_columns', 10000)
pd.set_option('display.max_colwidth', args.col_width)
pd.set_option('display.width', 1000000)
pd.set_option('precision', 4)
df.reset_index(drop=True, inplace=True)
df.fillna('-', inplace=True)
print(df)

def to_txt(df: pd.core.frame.DataFrame):
    import math
    def trunc(x):
        if type(x) == type(0.):
            return math.trunc(10000 * x) / 10000
        else:
            return x
    df = df.applymap(trunc)
    head = ['id'] + df.columns.values.tolist()
    data = [head]
    max_lens = list(map(len, head))
    for t in df.itertuples():
        line = []
        for i, col in enumerate(t):
            line.append(str(col))
            if len(str(col)) > max_lens[i]:
                max_lens[i] = len(str(col))
        data.append(line)
    with open('result.txt', 'w') as f:
        for line in data:
            for i, col in enumerate(line):
                f.write(col.ljust(max_lens[i]) + '│')
            f.write('\n')
        f.write('<!-- vim: set nowrap: -->')


if __name__ == "__main__":
    to_txt(df)
