import os
import sys
import random

import yaml
import numpy as np
import torch
from torch.utils.data.dataloader import DataLoader

from WSOL.model import build_detector
from WSOL.dataset import build_dataset, build_preprocess_op
from WSOL.log import build_logger
from WSOL.utils import get_predict_metrices, Meter, build_optimizer, build_scheduler


def train(cfg):
    exp_cfg = cfg['Exp']
    model_cfg = cfg['Model']
    dataset_cfg = cfg['Dataset']
    train_cfg = cfg['Train_cfg']
    test_cfg = cfg['Test_cfg']

    ''' TRAINING SET INIT '''
    train_set = build_dataset(dataset_cfg)

    data_pipe_cfgs = train_cfg['data_pipe']
    optimizer_cfg = train_cfg['optimizer']
    epoch = train_cfg['epoch']
    batch_size = train_cfg['batch_size']
    log_cfg = train_cfg['log']

    data_pipe = []
    for data_preprocess_cfg in data_pipe_cfgs:
        data_pipe.append(build_preprocess_op(data_preprocess_cfg))
    train_set.set_data_pipe(data_pipe)

    train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True, drop_last=True)

    ''' LOGGER INIT '''
    log_interval = int(log_cfg['interval'])
    logger = build_logger({**log_cfg['logger'], **exp_cfg})
    logger.log([{'Exp': exp_cfg, 'Model': model_cfg, 'Dataset': dataset_cfg, 'Train': train_cfg}])
    model_logger = build_logger({**log_cfg['model_logger'], **exp_cfg})

    ''' TEST SET INIT '''
    dataset_cfg['mode'] = 'test'
    loc_test_set = build_dataset(dataset_cfg)
    cls_test_set = build_dataset(dataset_cfg)

    loc_data_pipe_cfgs = test_cfg['loc_data_pipe']
    cls_data_pipe_cfgs = test_cfg['cls_data_pipe']
    batch_size = test_cfg['batch_size']
    thres = [test_cfg['thres']]

    data_pipe = []
    for data_preprocess_cfg in loc_data_pipe_cfgs:
        data_pipe.append(build_preprocess_op(data_preprocess_cfg))
    loc_test_set.set_data_pipe(data_pipe)

    loc_test_loader = DataLoader(dataset=loc_test_set, batch_size=batch_size, shuffle=False, drop_last=False)

    data_pipe = []
    for data_preprocess_cfg in cls_data_pipe_cfgs:
        data_pipe.append(build_preprocess_op(data_preprocess_cfg))
    cls_test_set.set_data_pipe(data_pipe)

    cls_test_loader = DataLoader(dataset=cls_test_set, batch_size=batch_size, shuffle=False, drop_last=False)

    ''' MODEL INIT '''
    model = build_detector(model_cfg).cuda()
    model.init_weights()

    ''' SCHEDULER AND OPTIMIZER INIT '''
    scheduler_cfg = optimizer_cfg.pop('scheduler')
    scheduler = build_scheduler(scheduler_cfg)
    params = optimizer_cfg.pop('params')
    lrs = optimizer_cfg.pop('lr')
    optimizer_cfg['params'] = [
        {'params': eval('model' if param == '' else 'model.' + param, {'model': model}).parameters(),
         'lr': lr} for param, lr in zip(params, lrs)]
    optimizer = build_optimizer(optimizer_cfg)

    ''' Record average loss of recent N batch(es) '''
    train_loss = Meter(limit=200)

    ''' Train '''
    for e in range(epoch):
        model.train()
        for i, data in enumerate(train_loader):
            input_data, annotation, size, name = data
            input_data = input_data.cuda()
            cls_label = annotation[:, 0]
            cls_label = cls_label.cuda().long()
            cls, loss = model(input_data, cls_label, 'train')

            optimizer.zero_grad()
            loss.backward()
            scheduler.schedule(e + 1, optimizer)
            optimizer.step()

            loss_val = loss.cpu().item()
            train_loss.update(loss_val)
            metric_dict = dict()
            metric_dict['loss'] = round(train_loss.avg, 5)
            metric_dict['lr'] = round(optimizer.param_groups[0]['lr'], 5)
            metric_dict['step'] = i
            metric_dict['epoch'] = e + 1
            if i % log_interval == 0:
                logger.log(metric_dict)

        if model_logger.log(model.state_dict(), e + 1):
            logger.log('ModelLogger: epoch{:d}.pth saved'.format(e + 1))

        ''' Test '''
        if e % 1 == 0:
            gt_loc_acc, top1_loc, top1_cls = Meter(), Meter(), Meter()
            model.eval()
            for idx, (loc_data, cls_data) in enumerate(zip(loc_test_loader, cls_test_loader)):
                input_data, annotation, size, name = loc_data
                input_data = input_data.cuda()
                cls_label = annotation[:, 0]
                cls_label = cls_label.cuda().long()
                bbox_label = annotation[:, 1:].numpy()
                _, predict_bbox_list, _ = model(feature=input_data, label=cls_label, mode="loc", thres=thres)

                input_cls, _, _, name = cls_data
                input_cls = input_cls.view(-1, input_data.size(1), input_cls.size(2), input_cls.size(3)).cuda()
                predict_cls = model(feature=input_cls, mode="cls").unsqueeze(0).mean(1)

                metric_dict = get_predict_metrices(sizes=size, input_size=input_data.shape[-2:],
                                                   predict_bboxes=predict_bbox_list[0],
                                                   bbox_labels=bbox_label,
                                                   predict_clses=predict_cls.cpu().detach().numpy(),
                                                   cls_labels=cls_label.cpu().detach().numpy())
                gt_loc_acc.update(metric_dict["gt_loc_acc"])
                top1_loc.update(metric_dict["top1_loc"])
                top1_cls.update(metric_dict["top1_cls"])
            metric_dict = {"threshold": thres[0], "gt_loc_acc": round(gt_loc_acc.avg, 5),
                           "top1_loc": round(top1_loc.avg, 5), "top1_cls": round(top1_cls.avg, 5)}
            logger.log(metric_dict)


def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True


if __name__ == '__main__':
    torch.cuda.set_device(int(sys.argv[1]))
    setup_seed(0)
    config = "./configs/cub/sal_div_cam_detector_cub.yaml"
    print('Config file: {}'.format(config))
    f = open(config, 'r', encoding='utf-8')
    cfg = yaml.safe_load(f)
    f.close()
    train(cfg)
