import collections
import os
import matplotlib
import numpy as np
import logging
import torch
import time
import json
import random
import shelve
from tqdm import tqdm
import lib
from lib.clustering import make_clustered_dataloaders
import warnings

warnings.simplefilter("ignore", category=PendingDeprecationWarning)
os.putenv("OMP_NUM_THREADS", "8")


def load_config(config_name):
    with open(config_name, 'r') as f:
        config = json.load(f)
    # config = json.load(open(config_name))
    def eval_json(config):
        for k in config:
            if type(config[k]) != dict:
                if type(config[k]) is str:
                    # if python types, then evaluate str expressions
                    if config[k][:5] in ['range', 'float']:
                        config[k] = eval(config[k])
            else:
                eval_json(config[k])

    eval_json(config)
    return config


def json_dumps(**kwargs):
    # __repr__ may contain `\n`, json replaces it by `\\n` + indent
    return json.dumps(**kwargs).replace('\\n', '\n    ')


class JSONEncoder(json.JSONEncoder):
    def default(self, x):
        # add encoding for other types if necessary
        if isinstance(x, range):
            return 'range({}, {})'.format(x.start, x.stop)
        if not isinstance(x, (int, str, list, float, bool)):
            return repr(x)
        return json.JSONEncoder.default(self, x)


def __eval(model, dataloaders, logging, backend='faiss', config=None):
    assert config is not None
    dt = config['dataset_selected']
    if dt == 'inshop':
        K = [1, 10, 20, 30, 50]
    elif dt == 'sop':
        K = [1, 10, 100, 1000]
    else:
        K = [1, 2, 4, 8]
    if dt == 'inshop':
        dl_query = lib.data.loader.make(config, model, 'eval', inshop_type='query')
        dl_gallery = lib.data.loader.make(config, model, 'eval', inshop_type='gallery')
        score = lib.utils.evaluate_in_shop(model, dl_query=dl_query, dl_gallery=dl_gallery, use_penultimate=False, backend=backend, K=K)
    else:
        score = lib.utils.evaluate(model, dataloaders['eval'], use_penultimate=False, backend=backend, K=K)
    return score


def train_batch(model, criterion, opt, cfg, batch, dset, epoch):
    X = batch[0].cuda(non_blocking=True)  # images
    T = batch[1].cuda(non_blocking=True)  # class labels
    I = batch[2]  # image ids

    opt.zero_grad()
    M = model(X)

    if epoch >= cfg['finetune_epoch'] * 8 / 19:
        pass
    else:
        M = M.split(cfg['sz_embedding'] // cfg['nb_clusters'], dim=1)
        M = M[dset.id]

    M = torch.nn.functional.normalize(M, p=2, dim=1)
    loss = criterion[dset.id](M, T)
    loss.backward()
    opt.step()
    return loss.item()


def get_criterion(cfg):
    # name = 'margin'
    ds_name = cfg['dataset_selected']
    nb_classes = len(cfg['dataset'][ds_name]['classes']['train'])
    logging.debug('Create margin loss. #classes={}'.format(nb_classes))
    # lib.loss.TripletLoss()  # NOTE low performance
    # lib.loss.ProxyStaticLoss(64).cuda()  # NOTE: can't work
    criterion = [
        lib.loss.MarginLoss(nb_classes, **cfg).cuda()
        for i in range(cfg['nb_clusters'])
    ]
    return criterion


def get_optimizer(config, model, criterion):

  if config['backbone']=='resnet50':
    opt = torch.optim.Adam([{'params': model.parameters_dict['backbone'], **config['opt']['backbone']},
                            {'params': model.parameters_dict['embedding'], **config['opt']['embedding']}])
  elif config['backbone']=='bninception':
    opt = torch.optim.Adamax(model.parameters(), lr=1e-5, weight_decay=1e-4)
  elif config['backbone']=='googlenet':
    opt = torch.optim.Adamax(model.parameters(), lr=1e-5, weight_decay=1e-4)

  return opt

def prepare_model(cfg):

    if cfg['backbone'] == 'bninception':
        cfg['transform_parameters']['sz_crop'] = 299
        cfg['transform_parameters']['sz_resize'] = 320
        # from lib.bninception import bninception
        # model = bninception(config['sz_embedding'])
        from lib.inceptionv3 import inception_v3
        model = inception_v3(True)
        if cfg['state_path']:
            state = torch.load(cfg['state_path'])
            state2 = collections.OrderedDict()
            for k, v in state.items():
                if k.startswith('module.AuxLogits'):
                    continue
                state2[k[7:]] = v
            model.load_state_dict(state2)
        model = torch.nn.DataParallel(model).cuda()
    elif cfg['backbone'] == 'googlenet':
        from lib.googlenet import googlenet
        model = googlenet(True)
        model = torch.nn.DataParallel(model).cuda()
    elif cfg['backbone'] == 'resnet50':
        model = lib.model.make(cfg).cuda()
        if cfg['state_path']:
            state = torch.load(cfg['state_path'])
            state2 = collections.OrderedDict()
            for k, v in state.items():
                state2[k[7:]] = v
            model.features.load_state_dict(state2)
        model.features = torch.nn.DataParallel(model.features).cuda()
    return model

def start(cfg):
    # tkinter not installed on this system, use non-GUI backend
    matplotlib.use('agg')
    """
    Import `plt` after setting `matplotlib` backend to `agg`, because `tkinter`
    missing. If `agg` set, when this module is imported, then plots can not
    be displayed in jupyter notebook, because backend can be set only once.
    """
    import matplotlib.pyplot as plt

    # reserve GPU memory for faiss if faiss-gpu used
    faiss_reserver = lib.faissext.MemoryReserver()

    # create logging directory
    os.makedirs(cfg['log']['path'], exist_ok=True)

    # warn if log file exists already and append underscore
    fpath = os.path.join(cfg['log']['path'], cfg['log']['name'])
    if os.path.exists(fpath):
        warnings.warn('Log file exists already: {}'.format(fpath))
        print('Appending underscore to log file and database')
        cfg['log']['name'] += '_'

    # initialize logger
    logging.basicConfig(
        format="%(asctime)s %(message)s",
        level=logging.DEBUG if cfg['verbose'] else logging.INFO,
        handlers=[
            logging.FileHandler("{0}/{1}.log".format(cfg['log']['path'], cfg['log']['name'])),
            logging.StreamHandler()
        ])

    # print summary of config
    logging.info(json_dumps(obj=cfg, indent=4, cls=JSONEncoder, sort_keys=True))

    if not os.path.isdir(cfg['log']['path']):
        os.mkdir(cfg['log']['path'])

    # set random seed for all gpus
    seed = cfg['random_seed']
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

    faiss_reserver.lock(cfg['backend'])

    model = prepare_model(cfg)

    from lib.config import _global_cfg as CFG
    if CFG.state_path:
        e = CFG.state_path.split('@')[-1]
        e = e.split('.')[0]
        CFG.e = e
    else:
        CFG.e = -1
    # TODO resume epoch
    # create init and eval dataloaders; init used for creating clustered DLs
    dataloaders = {}
    for dl_type in ['init', 'eval']:
        if cfg['dataset_selected'] == 'inshop':
            # query and gallery initialized in `make_clustered_dataloaders`
            if dl_type == 'init':
                dataloaders[dl_type] = lib.data.loader.make(cfg, model, dl_type, inshop_type='train')
        else:
            dataloaders[dl_type] = lib.data.loader.make(cfg, model, dl_type)

    criterion = get_criterion(cfg)
    opt = get_optimizer(cfg, model, criterion)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(opt, cfg['nb_epochs'])

    faiss_reserver.release()
    logging.info("Evaluating initial model...")
    metrics = {}
    metrics[-1] = {
        'score': __eval(model, dataloaders, logging, backend=cfg['backend'], config=cfg)
    }
    recall_curr = metrics[-1]['score']['recall'][0]  # take R@1
    best_epoch = -1
    best_recall = recall_curr

    dataloaders['train'], C, T, I = make_clustered_dataloaders(model, dataloaders['init'], cfg, logging=logging)
    faiss_reserver.lock(cfg['backend'])

    metrics[-1].update({'C': C, 'T': T, 'I': I})

    if cfg['verbose']:
        print('Printing only first 200 classes (because of SOProducts)')
        for c in range(cfg['nb_clusters']):
            print(np.bincount(np.array(dataloaders['train'][c].dataset.ys))[:200])
            plt.hist(np.array(dataloaders['train'][c].dataset.ys), bins=100)
            plt.show()

    logging.info("Training for {} epochs.".format(cfg['nb_epochs']))
    losses = []
    train_begin = time.time()

    for e in range(0, cfg['nb_epochs']):
        is_best = False
        CFG.e = e
        metrics[e] = {}
        epoch_begin = time.time()
        losses_per_epoch = []

        if e >= cfg['finetune_epoch']:
            if e == cfg['finetune_epoch'] or e == 0:
                logging.info('Starting to finetune model...')
                cfg['nb_clusters'] = 1
                logging.debug("config['nb_clusters']: {})".format(cfg['nb_clusters']))
                faiss_reserver.release()
                dataloaders['train'], C, T, I = make_clustered_dataloaders(model, dataloaders['init'], cfg, logging=logging)
                assert len(dataloaders['train']) == 1
        elif e > 0 and cfg['recluster']['enabled'] and cfg['nb_clusters'] > 0:
            if e % cfg['recluster']['mod_epoch'] == 0:
                logging.info("Reclustering dataloaders...")
                faiss_reserver.release()
                dataloaders['train'], C, T, I = make_clustered_dataloaders(model, dataloaders['init'], cfg,
                                                                           reassign=True, C_prev=C, I_prev=I, logging=logging)
                faiss_reserver.lock(cfg['backend'])
                if cfg['verbose']:
                    for c in range(cfg['nb_clusters']):
                        print(np.bincount(np.array(dataloaders['train'][c].dataset.ys))[:200])

                metrics[e].update({'C': C, 'T': T, 'I': I})

        # merge dataloaders (created from clusters) into one dataloader
        mdl = lib.data.loader.merge(dataloaders['train'])

        # calculate number of batches for tqdm
        max_len_dataloaders = max([len(dl) for dl in dataloaders['train']])
        num_batches_approx = max_len_dataloaders * len(dataloaders['train'])

        for batch, dset in tqdm(mdl,
                                total=num_batches_approx,
                                disable=num_batches_approx < 100,
                                desc='Train epoch {}.'.format(e)):
            # batch: [images, labels, image_ids]
            loss = train_batch(model, criterion, opt, cfg, batch, dset, e)
            losses_per_epoch.append(loss)

        epoch_end = time.time()
        losses.append(np.mean(losses_per_epoch[-20:]))
        time_per_epoch = epoch_end - epoch_begin
        logging.info("Epoch: {}, loss: {:.4f}, time: {:.2f}s.".format(e, losses[-1], time_per_epoch))

        faiss_reserver.release()
        eval_begin = time.time()
        metrics[e].update({
            'score': __eval(model, dataloaders, logging, backend=cfg['backend'], config=cfg),
            'loss': { 'train': losses[-1] }
        })
        logging.debug('Evaluation total elapsed time: {:.2f} s'.format(time.time() - eval_begin))
        faiss_reserver.lock(cfg['backend'])

        scheduler.step(losses[-1])
        logging.info(f'Current lr: {scheduler.get_lr()}')
        recall_curr = metrics[e]['score']['recall'][0]  # take R@1
        if recall_curr > best_recall:
            best_recall = recall_curr
            best_epoch = e
            is_best = True
            logging.info(f'Best epoch@{e} R@1: {best_recall*100:.3f}')

        # save metrics etc. to shelve file (*.dat, *.bak, *.dir)
        with shelve.open(os.path.join(cfg['log']['path'], cfg['log']['name']), writeback=True) as f:
            if 'config' not in f:
                f['config'] = cfg
            if 'metrics' not in f:
                f['metrics'] = {}
                # if initial model evaluated, append metrics
                if -1 in metrics:
                    f['metrics'][-1] = metrics[-1]
            f['metrics'][e] = metrics[e]

        if cfg['save_model'] and is_best:
            state_save_path = os.path.join(cfg['log']['path'], cfg['log']['name'] + f'@{e}.pt')
            if cfg['backbone']=='resnet50':
                torch.save(model.features.state_dict(), state_save_path)
            elif cfg['backbone']=='bninception' or cfg['backbone']=='googlenet':
                torch.save(model.state_dict(), state_save_path)

            logging.info(f'Save the checkpoint: {state_save_path}!')

    logging.info("Total training time (minutes): {:.2f}.".format((time.time() - train_begin) / 60))
    logging.info("Best R@1 = {:.3f} at epoch {}.".format(best_recall*100, best_epoch))
