import torch
from torch.optim import AdamW
from src.metrics import cusMetric, accMetric, Metrics
from tqdm import tqdm
from timm.models import create_model
from timm.scheduler.cosine_lr import CosineLRScheduler
from timm.loss import LabelSmoothingCrossEntropy
from argparse import ArgumentParser
from src.utils import (
    get_config,
    partial_load,
    set_seed,
    mkdirss,
    merge_config,
    partial_save,
)
from src.logger import create_logger, LOGGER
from src.criterion import MoECtn
from src.models import vision_transformer_mol  # noqa: F401
from datetime import datetime
from typing import Tuple
import numpy as np

# torch.autograd.set_detect_anomaly(True)
SAVE_KEYS = ["mol", "head"]


def mixup_data(x, y, alpha=1.0, use_cuda=True):
    """Returns mixed inputs, pairs of targets, and lambda"""
    if alpha > 0:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1

    batch_size = x.size()[0]
    if use_cuda:
        index = torch.randperm(batch_size).cuda()
    else:
        index = torch.randperm(batch_size)

    mixed_x = lam * x + (1 - lam) * x[index, :]
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam


def mixup_criterion(criterion, pred, y_a, y_b, lam):
    return lam * criterion(pred, y_a) + (1 - lam) * criterion(
        pred, y_b
    )


def args_parser():
    parser = ArgumentParser()
    parser.add_argument(
        "--model_ckp",
        type=str,
        default="./released_models/ViT-B_16.npz",
    )
    parser.add_argument(
        "--model", type=str, default="vit_base_patch16_224_in21k_mol"
    )
    parser.add_argument("--task", type=str, default="vtab")
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--dataset", type=str, default="cifar")
    parser.add_argument("--model_type", type=str, default="vit_mol")
    parser.add_argument("--tuning_mode", type=str, default="mol")

    parser.add_argument("--lr", type=float, default=1e-3)
    parser.add_argument("--wd", type=float, default=1e-4)
    parser.add_argument("--dpr", type=float, default=0.1)
    parser.add_argument("--epochs", type=int, default=None)

    parser.add_argument("--eval", action="store_false")
    parser.add_argument("--resume", action="store_false")

    parser.add_argument("--num_experts", type=int, default=None)
    parser.add_argument("--rank", type=int, default=None)
    parser.add_argument("--alpha", type=int, default=None)
    parser.add_argument("--poststr", type=str, default=None)

    parser.add_argument("--loss_coef", type=float, default=1e-4)
    parser.add_argument("--top_k", type=int, default=None)
    parser.add_argument("--jitter_noise", type=float, default=1e-2)
    parser.add_argument(
        "--rloss",
        type=str,
        default="zloss",
        help="Options include [zloss, lbloss]",
    )

    args = parser.parse_args()
    return args


def finetune(config, model, dls, logger, epoch):
    """finetune the classifier"""
    # set new trainable params, optimizer(SGD) and scheduler(StepLR)
    trainable = []
    for n, p in model.named_parameters():
        if "head" in n:
            p.requires_grad = True
            trainable.append(p)
        else:
            p.requires_grad = False
    for n, p in model.named_parameters():
        if p.requires_grad:
            logger.info(str(n))

    opt = torch.optim.SGD(trainable, lr=1e-4, weight_decay=1e-5)

    loss_metric = Metrics(
        [
            cusMetric(name="Total loss"),
        ],
        prefix="Loss Report",
    )
    acc_metric = Metrics([accMetric()], prefix="Epoch Finished")
    process_bar = tqdm(range(epoch))
    train_dl, test_dl = dls
    for ep in process_bar:
        loss_metric.reset()
        acc_metric.reset()

        model.train()
        model = model.cuda()
        for i, batch in enumerate(train_dl):
            torch.cuda.empty_cache()
            x, y = batch[0].cuda(), batch[1].cuda()
            out = model(x)

            loss = torch.nn.CrossEntropyLoss()(out, y)
            loss_metric[0].update(loss.data, x.size(0))
            acc_metric[0].update(out.argmax(dim=1).view(-1), y)

            opt.zero_grad()
            loss.backward()
            opt.step()

        # get learning rate
        lr = opt.param_groups[0]["lr"]
        process_bar.set_description(
            "Avg Epoch Loss: "
            + str(round(loss_metric[0].result(), 3))
            + f" lr: {lr:.2e}"
        )

        if (ep + 1) % 5 == 0:
            logger.info(loss_metric.title(f"Epoch {ep:>2d}"))
            logger.info(loss_metric.format_str)
            logger.info(acc_metric.format_str)

            memory_used = torch.cuda.max_memory_allocated() / (
                1024.0 * 1024.0
            )
            acc_t_metric = test(model, test_dl, use_bp=False)
            acc = acc_t_metric[0].result()
            if acc > config.best_acc:
                config.best_acc = acc
                partial_save(
                    SAVE_KEYS,
                    model,
                    opt,
                    scheduler,
                    acc,
                    ep,
                    config.exp_base_path,
                    ckp_name="ckp_best_finetune",
                )
            logger.info(acc_t_metric.format_str)
            logger.info(
                f"epoch: {ep:>2d}-th acc: {acc*100:.2f}% memory: {memory_used:.2f}MB"
            )
    model = model.cpu()
    return model


def train(
    config,
    model,
    criterion: MoECtn,
    dls: Tuple,
    opt: torch.optim.Optimizer,
    scheduler: CosineLRScheduler,
    logger: LOGGER,
):
    loss_metrics = Metrics(
        [
            cusMetric(name="Total loss"),
            cusMetric(name="Routing loss"),
            cusMetric(name="Addtional loss"),
        ],
        prefix="Loss Report",
    )
    acc_metric = Metrics([accMetric()], prefix="Epoch Finished")
    process_bar = tqdm(range(config.cur_ep, config.epochs))
    train_dl, test_dl = dls
    for ep in process_bar:
        loss_metrics.reset()
        acc_metric.reset()
        criterion.reset_loss()

        model.train()
        model = model.cuda()
        for i, batch in enumerate(train_dl):
            x, y = batch[0].cuda(), batch[1].cuda()
            x_aug, y_aug = classAug(x, y)
            out = model(x_aug)

            criterion(out, y_aug)
            acc_metric[0].update(out.argmax(dim=1).view(-1), y_aug)  # type: ignore
            loss_metrics.update(
                [
                    criterion.value,
                    criterion.route_value,
                    criterion.additional_value,
                ],
                x.size(0),
            )
            opt.zero_grad()
            criterion.backward()
            opt.step()

        if scheduler is not None:
            scheduler.step(ep)

        # get learning rate
        lr = opt.param_groups[0]["lr"]
        process_bar.set_description(
            "Avg Epoch Loss: "
            + str(round(loss_metrics[0].result(), 3))
            + f" lr: {lr:.2e}"
        )

        if ep % 10 == 9:
            logger.info(loss_metrics.title(f"Epoch {ep:>2d}"))
            logger.info(loss_metrics.format_str)
            logger.info(acc_metric.format_str)
            memory_used = torch.cuda.max_memory_allocated() / (
                1024.0 * 1024.0
            )

            model.update_classifier(nbCls)
            acc_t_metric = test(model, test_dl, use_bp=True)
            acc = acc_t_metric[0].result()
            if acc > config.best_acc:
                config.best_acc = acc
                partial_save(
                    SAVE_KEYS,
                    model,
                    opt,
                    scheduler,
                    acc,
                    ep,
                    config.exp_base_path,
                )
            logger.info(acc_t_metric.format_str)
            logger.info(
                f"epoch: {ep:>2d}-th acc: {acc*100:.2f}% memory: {memory_used:.2f}MB"
            )


@torch.no_grad()
def test(model, dl, use_bp) -> Metrics:
    model.eval()
    acc_metric = Metrics([accMetric()], prefix="Test Finished")
    model = model.cuda()
    for batch in dl:  # pbar:
        x, y = batch[0].cuda(), batch[1].cuda()
        out = model(x, use_bp).data
        acc_metric[0].update(out.argmax(dim=1).view(-1), y)
    return acc_metric


def generate_label(y_a, y_b):
    y_a, y_b = y_a, y_b
    assert y_a != y_b
    if y_a > y_b:
        tmp = y_a
        y_a = y_b
        y_b = tmp
    label_index = ((2 * nbCls - y_a - 1) * y_a) / 2 + (y_b - y_a) - 1
    return label_index + nbCls


def classAug(x, y, alpha=20.0, mix_times=4):
    batch_size = x.size()[0]
    device = x.device
    mix_data = []
    mix_target = []
    for _ in range(mix_times):
        index = torch.randperm(batch_size).to(device)
        for i in range(batch_size):
            if y[i] != y[index][i]:
                new_label = generate_label(
                    y[i].item(), y[index][i].item()
                )
                lam = np.random.beta(alpha, alpha)
                if lam < 0.3 or lam > 0.7:
                    lam = 0.5
                mix_data.append(
                    lam * x[i] + (1 - lam) * x[index, :][i]
                )
                mix_target.append(new_label)

    new_target = torch.Tensor(mix_target)
    y = torch.cat((y, new_target.to(device).long()), 0)
    for item in mix_data:
        x = torch.cat((x, item.unsqueeze(0)), 0)
    return x, y


if __name__ == "__main__":
    args = args_parser()
    config = get_config("model_mol", args.task, args.dataset)
    # replace the default config with the command line arguments
    config = merge_config(args, config)
    assert hasattr(config, "num_experts")
    assert hasattr(config, "loss_coef")
    set_seed(config.seed)
    config.name = (
        "Aug_"
        + config.name
        + "_lr_%.1e" % (config.lr)
        + "_nbEx_%d" % (config.num_experts)
        + "_rloss_%s" % (config.rloss)
        + "_rank_%d" % (config.rank)
        + "_dpr_%.1f" % (config.dpr)
        + "_topk_%s" % (config.top_k)
        + "_jnoise_%.1e" % (config.jitter_noise)
        + "_lcoef_%.1e" % (config.loss_coef)
    )
    if args.poststr is not None:
        config.name = config.name + "_" + args.poststr
    config.exp_base_path = "./output/%s/%s/%s" % (
        config.model_type,
        config.task,
        config.name,
    )
    mkdirss(config.exp_base_path)
    logger = create_logger(
        output_dir=config.exp_base_path, name=config.name
    )
    logger.format_info(vars(config))
    global nbCls
    nbCls = config.class_num

    # prepare training data
    if config.task == "vtab":
        from vtab import get_data

        basedir = "./data/vtab-1k"
    elif config.task == "fgvc":
        # from fgvc import *
        raise NotImplementedError

    train_dl, test_dl = get_data(
        basedir,
        config.dataset,
        logger,
        evaluate=config.eval,
        train_aug=config.train_aug,
        batch_size=config.batch_size,
    )

    if "swin" in config.model:
        model = create_model(
            config.model,
            pretrained=False,
            drop_path_rate=config.dpr,
            tuning_mode=config.tuning_mode,
        )
        model.load_state_dict(
            torch.load(config.model_ckp)["model"], False
        )  # not include adapt module
    else:
        model = create_model(
            config.model,
            checkpoint_path=config.model_ckp,
            drop_path_rate=config.dpr,
            # cus args
            tuning_mode=config.tuning_mode,
            num_experts=config.num_experts,
            rank=config.rank,
            alpha=config.alpha,
            jitter_noise=config.jitter_noise,
            top_k=config.top_k,
        )

    config.aug_cls = config.class_num + int(
        config.class_num * (config.class_num + 1) / 2
    )
    model.reset_classifier(config.aug_cls)

    logger.info(str(model))
    device = "cuda:0"
    model = model.to(device)
    config.best_acc = 0.0
    config.cur_ep = 0
    config.task = config.task

    # ############## #
    # Training Stage #
    # ############## #
    trainable = []
    for n, p in model.named_parameters():
        if "mol" in n or "head" in n:
            p.requires_grad = True
            trainable.append(p)
        else:
            p.requires_grad = False

    for n, p in model.named_parameters():
        if p.requires_grad:
            logger.info(str(n))

    opt = AdamW(trainable, lr=config.lr, weight_decay=config.wd)
    if hasattr(config, "cycle_decay"):
        cycle_decay = config.cycle_decay
    else:
        cycle_decay = 0.1
    scheduler = CosineLRScheduler(
        opt,
        t_initial=config.epochs,
        warmup_t=config.warmup_epochs,
        lr_min=1e-5,
        warmup_lr_init=1e-6,
        cycle_decay=cycle_decay,
    )

    # ############## #
    # Resuming Stage #
    # ############## #
    if config.resume:
        ckps = partial_load(config.exp_base_path)
        if not isinstance(ckps, Exception):
            model_st, opt_st, scheduler_st, acc, ep = ckps
            config.best_acc = acc
            config.cur_ep = ep
            model.load_state_dict(model_st, strict=False)
            opt.load_state_dict(opt_st)
            scheduler.load_state_dict(scheduler_st)
            model.update_classifier(nbCls)
        else:
            logger.info(ckps)

    n_parameters = sum(
        p.numel() for p in model.parameters() if p.requires_grad
    )
    logger.info(f"number of extra params: {n_parameters}")

    # ############## #
    # Create Loss #
    # ############## #
    criterion = MoECtn(loss_coef=config.loss_coef)
    if config.labelsmoothing > 0.0:
        # label smoothing
        criterion.add(
            LabelSmoothingCrossEntropy(
                smoothing=config.labelsmoothing
            )
        )
        logger.info("label smoothing")
    else:
        criterion.add(torch.nn.CrossEntropyLoss())
        logger.info("CrossEntropyLoss")

    if config.rloss == "zloss":
        print("\nRegistering Z-Routing Loss Hook Funcs")
        for k, v in model.named_modules():
            if k.endswith("router"):
                v.register_forward_hook(criterion.compute_zloss)
                print(
                    f"Register routing loss hook func on module {k}"
                )
    elif config.rloss == "lbloss":
        print("\nRegistering Load-Balance Loss Hook Funcs")
        for k, v in model.named_modules():
            if k.endswith("router"):
                v.register_forward_hook(
                    criterion.compute_balance_loss
                )
                print(
                    f"Register routing loss hook func on module {k}"
                )
    else:
        raise NotImplementedError

    train(
        config,
        model,
        criterion,
        (train_dl, test_dl),
        opt,
        scheduler,
        logger,
    )
    cur_time = datetime.now().strftime("%Y%m%d%H%M%S")
    bp_name = f"{cur_time}_{config.best_acc*100:.2f}_train"
    logger.backup(bp_name)

    print(config.best_acc)
    model.update_classifier(nbCls, inplace=True)

    finetune(
        config,
        model,
        (train_dl, test_dl),
        logger,
        config.epochs // 10,
    )
    bp_name = f"{cur_time}_{config.best_acc*100:.2f}_ft"
    logger.backup(bp_name)

    logger.info("end")
