import torch
from torch.optim import AdamW
from avalanche.evaluation.metrics.accuracy import Accuracy
from avalanche.evaluation.metrics.loss import LossMetric
from tqdm import tqdm
from timm.models import create_model
from timm.scheduler.cosine_lr import CosineLRScheduler
from timm.loss import LabelSmoothingCrossEntropy
from argparse import ArgumentParser
from utils import (
    get_config,
    set_seed,
    mkdirss,
    merge_config,
    partial_save,
    # partial_load,
)
from logger import create_logger
from src.models import vision_transformer_lora  # noqa: F401

SAVE_KEYS = ["linear_a", "linear_a"]


def train(
    config, model, criterion, dl, opt, scheduler, logger, epoch
):

    model.train()
    model = model.cuda()
    process_bar = tqdm(range(epoch))
    for ep in process_bar:
        lossMetric = LossMetric()
        model.train()
        model = model.cuda()
        # pbar = tqdm(dl)
        for i, batch in enumerate(dl):
            # torch.cuda.empty_cache()
            x, y = batch[0].cuda(), batch[1].cuda()
            out = model(x)

            loss = criterion(out, y)
            lossMetric.update(loss.data, x.size(0))
            opt.zero_grad()
            loss.backward()
            opt.step()

        if scheduler is not None:
            scheduler.step(ep)
        # get learning rate
        lr = opt.param_groups[0]["lr"]
        process_bar.set_description(
            "Avg Epoch Loss: "
            + str(round(lossMetric.result(), 3))
            + f" lr: {lr:2e}"
        )

        if ep % 10 == 9:
            memory_used = torch.cuda.max_memory_allocated() / (
                1024.0 * 1024.0
            )
            acc = test(model, test_dl)
            if acc > config.best_acc:
                config.best_acc = acc
                partial_save(
                    SAVE_KEYS,
                    model,
                    config.model_type,
                    config.task,
                    config.name,
                    acc,
                    ep,
                )
            logger.info(
                f"epoch: {ep:>2d}-th acc: {acc*100:.2f}% memory: {memory_used:.2f}MB"
            )
    model = model.cpu()
    return model


@torch.no_grad()
def test(model, dl):
    model.eval()
    acc = Accuracy()
    model = model.cuda()
    for batch in dl:  # pbar:
        torch.cuda.empty_cache()
        x, y = batch[0].cuda(), batch[1].cuda()
        out = model(x).data
        acc.update(out.argmax(dim=1).view(-1), y)
    return acc.result()


if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--lr", type=float, default=1e-3)
    parser.add_argument("--wd", type=float, default=1e-4)
    parser.add_argument("--eval", type=str, default="True")
    parser.add_argument("--dpr", type=float, default=0.1)
    parser.add_argument(
        "--model_ckp",
        type=str,
        default="./released_models/ViT-B_16.npz",
    )
    parser.add_argument(
        "--model", type=str, default="vit_base_patch16_224_in21k_lora"
    )
    parser.add_argument("--task", type=str, default="vtab")
    parser.add_argument("--dataset", type=str, default="cifar")
    parser.add_argument("--model_type", type=str, default="vit_lora")
    parser.add_argument("--tuning_mode", type=str, default="lora")
    parser.add_argument("--topN", type=int, default=None)

    args = parser.parse_args()
    config = get_config("model_lora", args.task, args.dataset)
    # replace the default config with the command line arguments
    config = merge_config(args, config)
    print(config)
    set_seed(config.seed)

    config.name = config.name
    exp_base_path = "./output/%s/%s/%s" % (
        config.model_type,
        config.task,
        config.name,
    )
    mkdirss(exp_base_path)
    logger = create_logger(output_dir=exp_base_path, name=config.name)

    logger.info(args)
    logger.info(config)

    # prepare training data
    if config.eval == "True":
        evalflag = True
    else:
        evalflag = False

    if config.task == "vtab":
        from vtab import get_data

        basedir = "./data/vtab-1k"
    elif config.task == "fgvc":
        # from fgvc import *
        raise NotImplementedError

    if hasattr(config, "train_aug"):
        train_aug = config.train_aug
    else:
        train_aug = False

    train_dl, test_dl = get_data(
        basedir,
        config.dataset,
        logger,
        evaluate=evalflag,
        train_aug=train_aug,
        batch_size=config.batch_size,
    )

    if "swin" in config.model:
        model = create_model(
            config.model,
            pretrained=False,
            drop_path_rate=config.dpr,
            tuning_mode=config.tuning_mode,
        )
        model.load_state_dict(
            torch.load(config.model_ckp)["model"], False
        )  # not include adapt module
    else:
        model = create_model(
            config.model,
            checkpoint_path=config.model_ckp,
            drop_path_rate=config.dpr,
            tuning_mode=config.tuning_mode,
            topN=config.topN,
        )

    model.reset_classifier(config.class_num)

    logger.info(str(model))

    config.best_acc = 0.0
    config.task = config.task

    trainable = []
    for n, p in model.named_parameters():
        if "linear_a" in n or "linear_b" in n or "head" in n:
            trainable.append(p)
            logger.info(str(n))
        else:
            p.requires_grad = False

    opt = AdamW(trainable, lr=config.lr, weight_decay=config.wd)

    if hasattr(config, "cycle_decay"):
        cycle_decay = config.cycle_decay
    else:
        cycle_decay = 0.1

    scheduler = CosineLRScheduler(
        opt,
        t_initial=config.epochs,
        warmup_t=config.warmup_epochs,
        lr_min=1e-5,
        warmup_lr_init=1e-6,
        cycle_decay=cycle_decay,
    )

    n_parameters = sum(
        p.numel() for p in model.parameters() if p.requires_grad
    )

    logger.info(f"number of extra params: {n_parameters}")

    if config.labelsmoothing > 0.0:
        criterion = LabelSmoothingCrossEntropy(
            smoothing=config.labelsmoothing
        )
        logger.info("label smoothing")
    else:
        criterion = torch.nn.CrossEntropyLoss()
        logger.info("CrossEntropyLoss")

    model = train(
        config,
        model,
        criterion,
        train_dl,
        opt,
        scheduler,
        logger,
        config.epochs,
    )
    print(config.best_acc)

    logger.info("end")
