import os
import argparse
import builtins
import random
import shutil
import time
import warnings
import torch
import torch.nn.functional as F
import torch.nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data.distributed


from args import get_args, extra_args

from utils.model import CECL
from tqdm import tqdm
from backbone.net import *
from utils.builder import build_cifarn_dataset_loader
from utils.util import Config, load_from_cfg, linear_rampup2
from utils.utils_algo import *
from utils.utils_loss import SupConLoss, EntropyLoss, CE_Soft_Label
from logger import get_logger

from dataset.cifar_dataloader import CifarDataloader


def main_worker():
    logger = get_logger(os.path.join('./log', cfg.name))
    cudnn.benchmark = True
    random.seed(cfg.seed)
    torch.manual_seed(cfg.seed)
    torch.cuda.manual_seed(cfg.seed)
    torch.cuda.manual_seed_all(cfg.seed)
    np.random.seed(cfg.seed)
    cudnn.deterministic = True

    # create model
    model = CECL(cfg, SupConNet).cuda()

    # set optimizer
    optimizer_model = torch.optim.SGD(model.parameters(), cfg.lr, cfg.momentum, cfg.weight_decay)
    dataloader = CifarDataloader(cfg)
    warmup_loader = dataloader.warmup()

    test_loader = dataloader.test()

    loss_fn_soft = CE_Soft_Label()
    loss_cont_fn = SupConLoss()

    print('\nStart Training\n')

    all_time = time.time()
    for epoch in range(1, cfg.epochs + 1):
        adjust_learning_rate(cfg, optimizer_model, epoch - 1)
        end = time.time()
        if epoch <= cfg.warm:
            # warm-up
            warm(warmup_loader, model, loss_fn_soft, loss_cont_fn, optimizer_model, epoch, logger, cfg)
        else:
            # training
            train(warmup_loader, model, loss_fn_soft, loss_cont_fn, optimizer_model, epoch, logger, cfg)
        logger.info(f'Epoch[{epoch}] Time {time.time() - end:.3f}s')
        end = time.time()
        # test
        evaluate(test_loader, model, logger)
        logger.info(f'Epoch[{epoch}] Test Time {time.time() - end:.3f}s')
    logger.info(f'All Time {time.time() - all_time:.3f}s')


def warm(train_loader, model, loss_fn, loss_cont_fn, optimizer, epoch, logger, cfg=None):
    loss_cls_log = AverageMeter('Loss@Cls', ':2.2f')
    loss_cont_log = AverageMeter('Loss@Cont', ':2.2f')

    # switch to train mode
    model.train()

    # --------------- start ---------------
    for i, (index, D, train_Y, _) in enumerate(train_loader):
        X, X_w, X_s = D
        train_Y = train_Y.long().detach().cuda()

        # for showing training accuracy and will not be used when training
        batch_size = train_Y.size(0)

        output_x, moco_queue = model(X, X_w, train_Y, None, None, cfg, 's0')
        queue_features = moco_queue['feature']
        queue_targets = moco_queue['target']

        mask = torch.eq(queue_targets[:batch_size].unsqueeze(dim=1), queue_targets.unsqueeze(dim=1).T).float().cuda()

        y = torch.zeros(batch_size, cfg.num_class).cuda().scatter_(1, train_Y.view(-1, 1), 1)
        loss_cls = loss_fn(output_x, y).mean()
        loss_cont = loss_cont_fn(features=queue_features, index=torch.ones(batch_size).bool(), mask=mask,
                                 batch_size=batch_size)
        loss = loss_cls + cfg.weight_cl * loss_cont

        loss_cls_log.update(loss_cls.item())
        loss_cont_log.update(loss_cont.item())

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if i % 100 == 0:
            logger.info(f'Epoch[{epoch}] Classification Loss {loss_cls_log.avg}\t'
                        f'Contrastive Loss {loss_cont_log.avg}')

    logger.info(f'Epoch[{epoch}] Classification Loss {loss_cls_log.avg}\t'
                f'Contrastive Loss {loss_cont_log.avg}')


def train(train_loader, model, loss_fn, loss_cont_fn, optimizer, epoch, logger, cfg=None):
    loss_cls_log = AverageMeter('Loss@Cls', ':2.2f')
    loss_cont_log = AverageMeter('Loss@Cont', ':2.2f')

    model.train()
    # --------------- start ---------------
    for i, (D, train_Y, _) in enumerate(train_loader):
        X, X_w, X_s = D
        train_Y = train_Y.long().detach().cuda()
        batch_size = train_Y.size(0)

        output_x, selected, moco_queue = model(X, X_w, train_Y, cfg=cfg)

        queue_features = moco_queue['feature']
        queue_targets = moco_queue['target']
        queue_isid = moco_queue['IDindex']

        mask = torch.eq(queue_targets[:batch_size].unsqueeze(dim=1),
                        queue_targets[batch_size:].unsqueeze(dim=1).T).float().cuda().mul(queue_isid[batch_size:])

        y = torch.zeros(batch_size, cfg.num_class).cuda().scatter_(1, train_Y.view(-1, 1), 1)
        loss_cls = loss_fn(output_x[selected], y[selected]).mean()

        loss_cont = loss_cont_fn(features=queue_features, index=selected, mask=mask, batch_size=batch_size)

        loss = loss_cls + cfg.weight_cl * loss_cont
        # loss = loss_cls

        loss_cls_log.update(loss_cls.item())
        loss_cont_log.update(loss_cont.item())

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if i % 100 == 0:
            logger.info(f'Epoch[{epoch}] Classification Loss {loss_cls_log.avg}\t'
                        f'Contrastive Loss {loss_cont_log.avg}')

    logger.info(f'Epoch[{epoch}] Classification Loss {loss_cls_log.avg}\t'
                f'Contrastive Loss {loss_cont_log.avg}')


best_acc1, best_acc5 = 0, 0


@torch.no_grad()
def evaluate(dataloader, model, logger):
    model.eval()
    global best_acc1, best_acc5
    end = time.time()
    losses = AverageMeter('Loss', ':.4f')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')

    for images, labels in dataloader:
        images = images.cuda(non_blocking=True)
        labels = labels.cuda(non_blocking=True)
        _, _, logits = model(images)
        loss = F.cross_entropy(logits, labels)
        acc1, acc5 = accuracy(logits.cpu(), labels.cpu(), topk=(1, 5))
        batch_size = images.size(0)
        top1.update(acc1[0], batch_size)
        top5.update(acc5[0], batch_size)
        losses.update(loss.item(), batch_size)

    best_acc1 = max(best_acc1, top1.avg)
    best_acc5 = max(best_acc5, top5.avg)
    logger.info(f'Evaluate Summary '
                f'Time {time.time() - end:.3f}s\t'
                f'Loss {losses.avg:.3f}\t'
                f'Acc@1 {top1.avg:.3f}(Best: {best_acc1:.3f})\t'
                f'Acc@5 {top5.avg:.3f}(Best: {best_acc5:.3f})\t')


if __name__ == '__main__':
    args = extra_args(get_args())

    if os.path.exists('./log') is False:
        os.makedirs('./log')

    name = f'cifar{args.dataset}-{args.backbone}-{args.r_ood}-{args.r_id}-{args.r_imb}'
    if args.asym:
        name += '-asym'
    if os.path.exists(os.path.join('./log', name)) is False:
        os.makedirs(os.path.join('./log', name))
    args.name = name

    iterations = args.lr_decay_epochs.split(',')
    args.lr_decay_epochs = []
    for it in iterations:
        args.lr_decay_epochs.append(int(it))
    args.num_class = args.dataset
    cfg = args
    main_worker()
