import torch
import torch.nn as nn

from transformers import get_linear_schedule_with_warmup
from transformers import get_constant_schedule_with_warmup

import torch
from torch.optim import Optimizer
from loguru import logger


class PriorWD(Optimizer):
    def __init__(self, optim, use_prior_wd=False, exclude_last_group=True):
        super(PriorWD, self).__init__(optim.param_groups, optim.defaults)

        # python dictionary does not copy by default
        self.param_groups = optim.param_groups
        self.optim = optim
        self.use_prior_wd = use_prior_wd
        self.exclude_last_group = exclude_last_group

        self.weight_decay_by_group = []
        for i, group in enumerate(self.param_groups):
            self.weight_decay_by_group.append(group["weight_decay"])
            group["weight_decay"] = 0

        self.prior_params = {}
        for i, group in enumerate(self.param_groups):
            for p in group["params"]:
                self.prior_params[id(p)] = p.detach().clone()

    def step(self, closure=None):
        if self.use_prior_wd:
            for i, group in enumerate(self.param_groups):
                for p in group["params"]:
                    if self.exclude_last_group and i == len(self.param_groups):
                        p.data.add_(
                            -group["lr"] * self.weight_decay_by_group[i], p.data
                        )
                    else:
                        p.data.add_(
                            -group["lr"] * self.weight_decay_by_group[i],
                            p.data - self.prior_params[id(p)],
                        )
        loss = self.optim.step(closure)

        return loss

    def compute_distance_to_prior(self, param):
        """
        Compute the L2-norm between the current parameter value to its initial (pre-trained) value.
        """
        assert id(param) in self.prior_params, "parameter not in PriorWD optimizer"
        return (param.data - self.prior_params[id(param)]).pow(2).sum().sqrt()


def get_optimizer_grouped_parameters(
    args, model, layerwise_learning_rate_decay=1.0, model_type="bert"
):
    no_decay = ["bias", "LayerNorm.weight"]
    if layerwise_learning_rate_decay == 1.0:
        optimizer_grouped_parameters = [
            {
                "params": [
                    p
                    for n, p in model.named_parameters()
                    if not any(nd in n for nd in no_decay)
                ],
                "weight_decay": args.weight_decay,
                "lr": args.learning_rate,
            },
            {
                "params": [
                    p
                    for n, p in model.named_parameters()
                    if any(nd in n for nd in no_decay)
                ],
                "weight_decay": 0.0,
                "lr": args.learning_rate,
            },
        ]
    else:
        optimizer_grouped_parameters = [
            {
                "params": [
                    p
                    for n, p in model.named_parameters()
                    if "classifier" in n or "pooler" in n
                ],
                "weight_decay": 0.0,
                "lr": args.learning_rate,
            },
        ]

        if model_type in ["bert", "roberta", "electra"]:
            num_layers = model.config.num_hidden_layers
            layers = [getattr(model, args.model_type).embeddings] + list(
                getattr(model, args.model_type).encoder.layer
            )
            layers.reverse()
            lr = args.learning_rate
            for layer in layers:
                lr *= layerwise_learning_rate_decay
                optimizer_grouped_parameters += [
                    {
                        "params": [
                            p
                            for n, p in layer.named_parameters()
                            if not any(nd in n for nd in no_decay)
                        ],
                        "weight_decay": args.weight_decay,
                        "lr": lr,
                    },
                    {
                        "params": [
                            p
                            for n, p in layer.named_parameters()
                            if any(nd in n for nd in no_decay)
                        ],
                        "weight_decay": 0.0,
                        "lr": lr,
                    },
                ]
        else:
            raise NotImplementedError
    return optimizer_grouped_parameters


def configure_optimizers(model, args, total_steps, use_pd=False):
    # logger.info("Optimizer: {}, Lr: {}, Warmup: {}".format(args.optimizer, args.learning_rate, args.warmup_type))

    no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
    optimizer_grouped_params = [
        {
            "params": [
                p
                for n, p in model.named_parameters()
                if not any(nd in n for nd in no_decay)
            ],
            "weight_decay": args.weight_decay,
        },
    ]

    #### IF use bert!!
    # optimizer_grouped_params = get_optimizer_grouped_parameters(args, model)

    if args.optimizer == "AdamW":
        optimizer = torch.optim.AdamW(
            optimizer_grouped_params,
            lr=args.learning_rate,
            eps=1e-8,
            weight_decay=args.weight_decay,
        )
    elif args.optimizer == "SGD":
        optimizer = torch.optim.SGD(
            optimizer_grouped_params,
            lr=args.learning_rate,
        )

    warmup_steps = args.warmup_ratio * total_steps
    if args.warmup_type == "linear":
        scheduler = get_linear_schedule_with_warmup(
            optimizer=optimizer,
            num_warmup_steps=int(warmup_steps),
            num_training_steps=total_steps,
        )
    else:
        scheduler = get_constant_schedule_with_warmup(
            optimizer=optimizer,
            num_warmup_steps=int(warmup_steps),
            # num_training_steps=total_steps,
        )

    #### IF use PriorWD
    # optimizer = PriorWD(optimizer, use_prior_wd=use_pd)

    # return [optimizer], [scheduler]
    return optimizer, scheduler


def main():

    # if args.max_steps > 0:
    #     t_total = args.max_steps
    #     args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
    # else:
    #     t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
    pass


if __name__ == "__main__":
    main()
