from typing import Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from .wrapper import MarginCalibrationModule


class CosineClf(nn.Module):
    def __init__(self, embed_dim, nb_classes, **fc_kwargs):
        super(CosineClf, self).__init__()
        self.nb_classes = nb_classes
        self.embed_dim = embed_dim
        self.weight = nn.Parameter(
            torch.randn(self.nb_classes, self.embed_dim)
        )
        if fc_kwargs["fc_temperture"]:
            self.temperature = nn.Parameter(
                torch.ones(self.embed_dim).float()
            )
        else:
            self.temperature = nn.Parameter(
                torch.ones(self.embed_dim).float()
            )
            self.temperature.requires_grad = False

        self.__init_weights()

    def __init_weights(self):
        nn.init.xavier_uniform_(self.weight)
        if self.temperature.requires_grad:
            nn.init.xavier_uniform_(self.temperature)

    def forward(self, logits):
        """Use temperature to scale the cosine similarity
        Args:
            x: [B, D] where D is the embedding dimension
            self.weight: [C, D] where C is the number of classes
            self.temperature : [D]
        """
        logits = F.normalize(logits, p=2, dim=-1)
        weight = F.normalize(self.weight, p=2, dim=-1) * self.temperature
        dists = F.linear(logits, weight)
        out = {"logits": dists}
        return out

    def to(self, *args, **kwargs):
        self.temperature = self.temperature.to(*args, **kwargs)
        return super().to(*args, **kwargs)

    def cuda(self, device=None):
        self.temperature = self.temperature.cuda(device=device)
        return super().cuda(device=device)

    def cpu(self):
        self.temperature = self.temperature.cpu()
        return super().cpu()


class ContinualClf(nn.Module):
    def __init__(
        self,
        embed_dim,
        fc_func: Callable,
        feat_expand=False,
        **fc_kwargs,
    ):
        super().__init__()
        self.fc_func = fc_func
        self.embed_dim = embed_dim
        self.feat_expand = feat_expand
        self.fc_kwargs = fc_kwargs

        self.unprocess = False

        self.heads = nn.ModuleList()

    def forward(self, x, feat_expand=None):
        feat_expand = (
            feat_expand if feat_expand is not None else self.feat_expand
        )
        out = []
        for ti in range(len(self.heads)):
            fc_inp = x[:, ti] if feat_expand else x
            out.append(self.heads[ti](fc_inp)["logits"])

        out = {"logits": torch.cat(out, dim=1)}
        return out

    def update_intermedium(self):
        pass

    def after_train(
        self, freeze=True, train_params=["beta", "omega"]
    ):
        if self.unprocess:
            self.process()
        self.update(fc=None, freeze=freeze, train_params=train_params)

    def after_task(self):
        pass

    def add(self, fc):
        self.heads.append(fc)
        self.unprocess = True

    def process(self):
        if self.unprocess and self.fc_kwargs["fc_margin"]:
            device = next(self.heads[0].parameters()).device
            self.heads[-1] = MarginCalibrationModule(self.heads[-1])
            self.heads[-1].to(device)
            self.unprocess = False

    def update(self, fc=None, freeze=True, train_params=["beta", "omega"]):
        assert not self.unprocess, "Please process the lastest head first"

        if fc:
            _fc = (
                fc
                if not self.fc_kwargs["fc_margin"]
                else MarginCalibrationModule(fc)
            )
            self.heads.append(_fc)
        if freeze:
            for p in self.heads.parameters():
                p.requires_grad = False

        for k, v in self.named_parameters():
            if k in train_params:
                v.requires_grad = True

    def backup(self):
        self.old_state_dict = copy.deepcopy(self.state_dict())

    def recall(self):
        self.load_state_dict(self.old_state_dict)
