import argparse
import math
import matplotlib
import os
import sys
from lib import config as g_cfg
import train


class Config(dict):

    def __init__(self, kv: dict):
        super(Config, self).__init__()
        for k, v in kv.items():
            super(Config, self).__setitem__(k, v)

    def __setitem__(self, k, v):
        raise Exception(f'const: can not set {k}->{v}')


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--nb-clusters', required=True, type=int)
    parser.add_argument('--dataset',
                        dest='dataset_selected',
                        choices=['sop', 'inshop', 'vid', 'car', 'cub'],
                        required=True)
    parser.add_argument('--nb-epochs', type=int, default=200)
    parser.add_argument('--finetune-epoch', type=int, default=190)
    parser.add_argument('--mod-epoch', type=int, default=2)
    parser.add_argument('--num-workers', default=4, type=int)
    parser.add_argument('--sz-batch', type=int, default=80)
    parser.add_argument('--sz-embedding', default=128, type=int)
    parser.add_argument('--state-path', type=str)
    parser.add_argument('--sampling', default='simple', type=str,
                        help='For triplet-based losses.',
                        choices=('simple', 'random', 'semihard', 'distance'))
    parser.add_argument('--exp',
                        default='0',
                        type=str,
                        help='experiment identifier')
    parser.add_argument('--dir', default='default', type=str)
    parser.add_argument('--backend',
                        default='faiss',
                        choices=('torch+sklearn', 'faiss', 'faiss-gpu', 'random', 'const'))
    parser.add_argument('--random-seed', default=0, type=int)
    parser.add_argument('--backbone', default='resnet50', type=str, choices=('resnet50', 'bninception', 'googlenet'))
    parser.add_argument('--backbone-wd', default=1e-4, type=float)
    parser.add_argument('--backbone-lr', default=1e-5, type=float)
    parser.add_argument('--embedding-lr', default=1e-5, type=float)
    parser.add_argument('--embedding-wd', default=1e-4, type=float)
    parser.add_argument('--margin', default=0.2, type=float)
    parser.add_argument('--num-samples-per-class', default=16, type=int)
    parser.add_argument('--verbose', action='store_true')
    args = vars(parser.parse_args())

    config = train.load_config(config_name='config.json')

    config['backbone'] = args.pop('backbone')
    config['dataloader']['batch_size'] = args.pop('sz_batch')
    config['dataloader']['num_workers'] = args.pop('num_workers')
    config['sampling'] = args.pop('sampling')
    config['state_path'] = args.pop('state_path')
    config['recluster']['mod_epoch'] = args.pop('mod_epoch')
    config['opt']['backbone']['lr'] = args.pop('backbone_lr')
    config['opt']['backbone']['weight_decay'] = args.pop('backbone_wd')
    config['opt']['embedding']['lr'] = args.pop('embedding_lr')
    config['opt']['embedding']['weight_decay'] = args.pop('embedding_wd')
    config['margin'] = args.pop('margin')
    config['num_samples_per_class'] = args.pop('num_samples_per_class')

    for k in args:
        if k in config:
            config[k] = args[k]

    for ds in list(config['dataset'].keys()):
        if ds != args['dataset_selected']:
            config['dataset'].pop(ds)

    if config['nb_clusters'] == 1:
        config['recluster']['enabled'] = False

    log_path = args['dir'] if os.path.isabs(args['dir']) else 'log/{}'.format(args['dir'])

    config['log'] = {
        'name':
        '{}-K-{}-M-{}-exp-{}'.format(config['dataset_selected'],
                                     config['nb_clusters'],
                                     config['recluster']['mod_epoch'],
                                     args['exp']),
        'path': log_path
    }

    # config = Config(config)
    g_cfg._init(config)
    train.start(config)
