"""
Reference:
https://github.com/hshustc/CVPR19_Incremental_Learning/blob/master/cifar100-class-incremental/modified_linear.py
"""

import math
import torch
from torch import nn
from torch.nn import functional as F
from timm.layers.weight_init import trunc_normal_
from copy import deepcopy


class SimpleContinualLinear(nn.Module):
    def __init__(
        self, embed_dim, nb_classes, feat_expand=False, with_norm=False
    ):
        super().__init__()

        self.embed_dim = embed_dim
        self.feat_expand = feat_expand
        self.with_norm = with_norm
        heads = []
        single_head = []
        if with_norm:
            single_head.append(nn.LayerNorm(embed_dim))

        single_head.append(nn.Linear(embed_dim, nb_classes, bias=False))
        head = nn.Sequential(*single_head)

        heads.append(head)
        self.heads = nn.ModuleList(heads)
        for m in self.modules():
            if isinstance(m, nn.Linear):
                trunc_normal_(m.weight, std=0.02)

    def backup(self):
        self.old_state_dict = deepcopy(self.state_dict())

    def recall(self):
        self.load_state_dict(self.old_state_dict)

    def update(self, nb_classes, freeze_old=True):
        single_head = []
        if self.with_norm:
            single_head.append(nn.LayerNorm(self.embed_dim))

        _fc = nn.Linear(self.embed_dim, nb_classes, bias=False)
        trunc_normal_(_fc.weight, std=0.02)
        single_head.append(_fc)
        new_head = nn.Sequential(*single_head)

        if freeze_old:
            for p in self.heads.parameters():
                p.requires_grad = False

        self.heads.append(new_head)

    def forward(self, x, feat_expand=None):
        feat_expand = (
            feat_expand if feat_expand is not None else self.feat_expand
        )
        out = []
        for ti in range(len(self.heads)):
            fc_inp = x[ti] if feat_expand else x
            out.append(
                1
                * (
                    F.linear(
                        F.normalize(fc_inp, p=2, dim=1),
                        F.normalize(self.heads[ti][0].weight, p=2, dim=1),  # type: ignore
                    )
                )
            )
        out = {"logits": torch.cat(out, dim=1)}
        return out

    def after_task(self):
        pass

    def update_intermedium(self):
        pass


class ABLinear(nn.Module):
    def __init__(
        self,
        in_features,
        out_features,
        nb_proxy=1,
        to_reduce=False,
    ):
        super(ABLinear, self).__init__()
        self.in_features = in_features
        self.out_features = out_features * nb_proxy
        self.nb_proxy = nb_proxy
        self.to_reduce = to_reduce

    def reset_parameters(self):
        pass

    def reset_parameters_to_zero(self):
        pass

    def forward(self):
        pass

    def forward_reweight_single(self):
        pass

    def forward_reweight(self):
        pass

    def after_task(self):
        pass

    def update_intermedium(self):
        pass

    @staticmethod
    def _prob(x):
        """
        Args:
            x: input tensor
                [batch, 1, 1]
        """
        x = torch.where(x < 0, x * 0.75, x)
        pi = torch.tensor(math.pi)
        result = (pi - torch.acos(x)) / pi
        return result


class MARCLinear(ABLinear):
    """
    A wrapper for nn.Linear with support of MARC method.
    """

    def __init__(self, in_features, out_features):
        super().__init__(
            in_features,
            out_features,
        )
        self.fc = nn.Linear(in_features, out_features)
        self.a = torch.nn.Parameter(torch.ones(1, out_features))
        self.b = torch.nn.Parameter(torch.zeros(1, out_features))

    def forward(self, input, *args):
        with torch.no_grad():
            logit_before = self.fc(input)
            w_norm = torch.norm(self.fc.weight, dim=1)
        logit_after = self.a * logit_before + self.b * w_norm
        return logit_after


class Linear(ABLinear):
    def __init__(
        self,
        in_features,
        out_features,
        nb_proxy=1,
        to_reduce=False,
        bias=True,
        device=None,
        dtype=None,
    ):
        super(Linear, self).__init__(
            in_features,
            out_features,
            nb_proxy,
            to_reduce,
        )
        self.nb_proxy = nb_proxy
        self.to_reduce = to_reduce

        self.in_features = in_features
        self.out_features = out_features * nb_proxy
        factory_kwargs = {"device": device, "dtype": dtype}
        self.weight = nn.Parameter(
            torch.empty((out_features, in_features), **factory_kwargs)
        )
        if bias:
            self.bias = nn.Parameter(
                torch.empty(out_features, **factory_kwargs)
            )
        else:
            self.register_parameter("bias", None)
        self.reset_parameters()

    def reset_parameters(self) -> None:
        # Setting a=sqrt(5) in kaiming_uniform is the same as initializing with
        # uniform(-1/sqrt(in_features), 1/sqrt(in_features)). For details, see
        # https://github.com/pytorch/pytorch/issues/57109
        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
        if self.bias is not None:
            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
            bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
            nn.init.uniform_(self.bias, -bound, bound)

    def forward(self, input):
        out = F.linear(input, self.weight, self.bias)

        if self.to_reduce:
            # Reduce_proxy
            out = reduce_proxies(out, self.nb_proxy)

        return {"logits": out}

    def forward_reweight_single(
        self,
        input,
        cur_task,
        alpha=0.1,
        beta=0.0,
        init_cls=10,
        inc=10,
        out_dim=768,
        use_init_ptm=False,
    ):
        """
        Args:
            input: [nb_tasks, batch, embed_dim]
        """
        # class id range
        for i in range(cur_task + 1):
            start_cls = 0 if i == 0 else init_cls + (i - 1) * inc
            end_cls = init_cls if i == 0 else start_cls + inc

            # break down the features for each task
            # [B, out_dim]
            input1 = input[:, i * out_dim : (i + 1) * out_dim]

            weight1 = self.weight[start_cls:end_cls, :]
            bias1 = self.bias[start_cls:end_cls]

            out = F.linear(input1, weight1, bias1)

            # concat the output logits for each task
            if i == 0:
                out_all = out
            else:
                out_all = (
                    torch.cat((out_all, out), dim=1) if i != 0 else out  # type: ignore
                )

        return {"logits": out_all}


class CosineLinear(ABLinear):
    def __init__(
        self,
        in_features,
        out_features,
    ):
        super(CosineLinear, self).__init__(in_features, out_features)
        self.in_features = in_features
        self.out_features = out_features
        self.weight = nn.Parameter(
            torch.Tensor(self.out_features, in_features)
        )

        self.reset_parameters()

    def reset_parameters(self):
        stdv = 1.0 / math.sqrt(self.weight.size(1))
        self.weight.data.uniform_(-stdv, stdv)
        # trunc_normal_(self.weight, std=0.02)

    def reset_parameters_to_zero(self):
        self.weight.data.fill_(0)

    def forward(self, input):
        out = (
            F.linear(
                F.normalize(input, p=2, dim=1),
                F.normalize(self.weight, p=2, dim=1),
            )
            * 1.0
        )

        return {"logits": out}

    def forward_reweight(
        self,
        input,
        cur_task,
        alpha=0.1,
        beta=0.0,
        init_cls=10,
        inc=10,
        out_dim=768,
        use_init_ptm=False,
    ):
        """
        Args:
            input: [nb_tasks, batch, embed_dim]
        """
        # class id range
        for i in range(cur_task + 1):
            if i == 0:
                start_cls = 0
                end_cls = init_cls
            else:
                start_cls = init_cls + (i - 1) * inc
                end_cls = start_cls + inc

            out = 0.0
            # break down the features for each task
            for j in range((self.in_features // out_dim)):
                # [B, out_dim]
                input1 = F.normalize(input[j, :, :], p=2, dim=1)
                weight1 = F.normalize(
                    self.weight[
                        start_cls:end_cls, j * out_dim : (j + 1) * out_dim
                    ],
                    p=2,
                    dim=1,
                )
                if use_init_ptm:
                    if j != (i + 1):
                        out1 = alpha * F.linear(input1, weight1)
                        out1 /= cur_task
                    else:
                        out1 = F.linear(input1, weight1)
                else:
                    if j != i:
                        out1 = alpha * F.linear(input1, weight1)
                        out1 /= cur_task
                    else:
                        out1 = F.linear(input1, weight1)

                out += out1

            # concat the output logits for each task
            if i == 0:
                out_all = out
            else:
                out_all = (
                    torch.cat((out_all, out), dim=1) if i != 0 else out  # type: ignore
                )

        return {"logits": out_all}


class IDCosineLinear(CosineLinear):
    ider: torch.nn.Parameter

    def update_intermedium(self, freeze=True):
        if not hasattr(self, "ider"):
            print("Update Adapter with ID")
            if freeze:
                for k, v in self.named_parameters():
                    if k != "ider":
                        print(f"{'':4s}freeze param {k}")
                        v.requires_grad = False
            param = nn.parameter.Parameter(
                torch.Tensor(1, self.in_features)
            )
            self.register_parameter("ider", param)
            nn.init.kaiming_normal_(self.ider)
            self.to(self.weight.device)

    def forward(self, input):
        """ "
        Args:
            input: [batch, out_dim]
        """
        out = F.linear(
            F.normalize(input, p=2, dim=1),
            F.normalize(self.weight, p=2, dim=1),
        )

        if hasattr(self, "ider"):
            sim = self._cal_sim(input)
            pure_logits = out
            out = sim * out

            return {"logits": out, "sim": sim, "pure_logits": pure_logits}

        return {"logits": out}

    def _cal_sim(self, input):
        sim = torch.tensor(1.0)
        if hasattr(self, "ider"):
            B, _ = input.shape
            cls_token = input[:, :]
            sim = torch.einsum(
                "bd,bd->b",
                F.normalize(cls_token, p=2, dim=-1),
                F.normalize(self.ider.repeat(B, 1), p=2, dim=-1),
            )
            sim = self._prob(sim).unsqueeze(-1)

        return sim


class MultiIDCosineLinear(CosineLinear):
    ider: torch.nn.Parameter

    def update_intermedium(self, freeze=True):
        if not hasattr(self, "ider"):
            print("Update Adapter with ID")
            if freeze:
                for k, v in self.named_parameters():
                    if k != "ider":
                        print(f"{'':4s}freeze param {k}")
                        v.requires_grad = False
            param = nn.parameter.Parameter(
                torch.Tensor(3, self.in_features)
            )
            self.register_parameter("ider", param)
            nn.init.kaiming_normal_(self.ider)
            self.to(self.weight.device)

    def forward(self, input):
        """ "
        Args:
            input: [batch, out_dim]
            sim: [batch, 3] -> [batch, 1]
        """
        out = F.linear(
            F.normalize(input, p=2, dim=1),
            F.normalize(self.weight, p=2, dim=1),
        )

        if hasattr(self, "ider"):
            sim_full = self._cal_sim(input)
            sim = torch.max(sim_full, dim=-1, keepdim=True)[0]
            pure_logits = out
            out = sim * out

            return {
                "logits": out,
                "sim": sim_full,
                "pure_logits": pure_logits,
            }

        return {"logits": out}

    def _cal_sim(self, input):
        sim = torch.tensor(1.0)
        if hasattr(self, "ider"):
            B, _ = input.shape
            cls_token = input[:, :]
            sim = torch.einsum(
                "bd,bld->bl",
                F.normalize(cls_token, p=2, dim=-1),
                F.normalize(
                    self.ider.unsqueeze(0).repeat(B, 1, 1), p=2, dim=-1
                ),
            )
            sim = self._prob(sim)

        return sim


class DualCosineLinear(CosineLinear):
    ider: torch.nn.Parameter

    def __init__(
        self,
        in_features,
        out_features,
        nb_proxy=1,
        to_reduce=False,
        sigma=True,
    ):
        super().__init__(in_features, out_features)
        self.ider = nn.parameter.Parameter(
            torch.Tensor(3, self.in_features)
        )
        nn.init.kaiming_normal_(self.ider)

    def forward(self, input):
        """ "
        Args:
            input: [batch, out_dim]
            sim: [batch, 3] -> [batch, 1]
        """
        out = F.linear(
            F.normalize(input, p=2, dim=1),
            F.normalize(self.weight, p=2, dim=1),
        )

        if hasattr(self, "ider"):
            sim_full = self._cal_sim(input)
            sim = torch.max(sim_full, dim=-1, keepdim=True)[0]
            pure_logits = out
            out = sim * out

            return {
                "logits": out,
                "sim": sim_full,
                "pure_logits": pure_logits,
            }

        return {"logits": out}

    def _cal_sim(self, input):
        sim = torch.tensor(1.0)
        if hasattr(self, "ider"):
            B, _ = input.shape
            cls_token = input[:, :]
            sim = torch.einsum(
                "bd,bld->bl",
                F.normalize(cls_token, p=2, dim=-1),
                F.normalize(
                    self.ider.unsqueeze(0).repeat(B, 1, 1), p=2, dim=-1
                ),
            )
            sim = self._prob(sim)

        return sim


class ContinualCosineLinear(nn.Module):
    heads: nn.ModuleList = nn.ModuleList()
    feat_expand: bool = True

    def after_train(self, head: IDCosineLinear, freeze=True):
        if freeze:
            for p in self.heads.parameters():
                p.requires_grad = False
        self.heads.append(head)

    def forward(self, x, feat_expand=None):
        feat_expand = (
            feat_expand if feat_expand is not None else self.feat_expand
        )
        out = []
        for ti in range(len(self.heads)):
            fc_inp = x[ti] if feat_expand else x
            ti_out = self.heads[ti](fc_inp)
            out.append(ti_out["logits"])

        out = {
            "logits": torch.cat(out, dim=1),
        }
        return out

    def forward_sim(self, x, feat_expand=None):
        feat_expand = (
            feat_expand if feat_expand is not None else self.feat_expand
        )
        sim = []
        for ti in range(len(self.heads)):
            tmp = []
            fc_inp = x[ti] if self.feat_expand else x
            for j in range(len(self.heads)):
                ti_out = self.heads[j]._cal_sim(fc_inp)  # type: ignore
                tmp.append(ti_out)

            tmp = torch.cat(tmp, dim=1)
            sim.append(tmp)
        out = {
            "sim": torch.cat(sim, dim=0),
        }
        return out

    def after_task(self):
        for p in self.heads.parameters():
            p.requires_grad = False


class SCLinear(ABLinear):
    def __init__(self, embed_dim, nb_classes):
        super(SCLinear, self).__init__(embed_dim, nb_classes)
        self.embed_dim = embed_dim
        self.nb_classes = nb_classes
        self.weight = nn.Parameter(torch.Tensor(nb_classes, embed_dim))

        self.reset_parameters()

    def forward(self, input):
        """ "
        Args:
            input: [batch, out_dim]
        """
        out = F.linear(
            F.normalize(input, p=2, dim=1),
            F.normalize(self.weight, p=2, dim=1),
        )
        return {"logits": out}

    def reset_parameters(self):
        stdv = 1.0 / math.sqrt(self.weight.size(1))
        self.weight.data.uniform_(-stdv, stdv)
        # trunc_normal_(self.weight, std=0.02)


class SContinualCls(nn.Module):
    heads: nn.ModuleList = nn.ModuleList()
    feat_expand: bool = True

    def forward(self, x, feat_expand=None):
        feat_expand = (
            feat_expand if feat_expand is not None else self.feat_expand
        )
        out = []
        for ti in range(len(self.heads)):
            fc_inp = x[ti] if feat_expand else x
            ti_out = self.heads[ti](fc_inp)
            out.append(ti_out["logits"])

        out = {
            "logits": torch.cat(out, dim=1),
        }
        return out

    def after_train(self, head: SCLinear, freeze=True):
        if freeze:
            for p in self.heads.parameters():
                p.requires_grad = False
        self.heads.append(head)

    def after_task(self):
        for p in self.heads.parameters():
            p.requires_grad = False


def reduce_proxies(out, nb_proxy):
    if nb_proxy == 1:
        return out
    bs = out.shape[0]
    nb_classes = out.shape[1] / nb_proxy
    assert nb_classes.is_integer(), "Shape error"
    nb_classes = int(nb_classes)

    simi_per_class = out.view(bs, nb_classes, nb_proxy)
    attentions = F.softmax(simi_per_class, dim=-1)

    return (attentions * simi_per_class).sum(-1)
