from typing import Callable
import torch
import torch.nn as nn
import copy


class NCM_Euler_Classifier(nn.Module):
    def __init__(self, embed_dim, nb_classes, **fc_kwargs):
        super(NCM_Euler_Classifier, self).__init__()
        self.nb_classes = nb_classes
        self.embed_dim = embed_dim
        self._class_mean = nn.Parameter(
            torch.randn(self.nb_classes, self.embed_dim)
        )
        self.temperature  = nn.Parameter(torch.ones(self.embed_dim).float())

        self.__init_weights()

    def __init_weights(self):
        nn.init.xavier_uniform_(self._class_mean)

    def forward(self, x):
        """
        Args:
            x: [B, D] where D is the embedding dimension
            self._class_mean: [C, D] where C is the number of classes
        """
        dists = torch.sqrt(
            torch.sum(
                torch.square(
                    torch.div(
                        x[:, None, :] - self._class_mean[None, :, :],
                        self.temperature [None, None],
                    )
                ),
                dim=-1,
            )
        )  # channel wise scaling
        # scores are negative of the distances themselves.
        out = {"logits": -dists / 2}
        return out


class ContinualCls(nn.Module):
    def __init__(
        self,
        embed_dim,
        nb_classes,
        fc_func: Callable,
        feat_expand=False,
        **fc_kwargs,
    ):
        super().__init__()
        self.fc_func = fc_func
        self.embed_dim = embed_dim
        self.feat_expand = feat_expand

        self.heads = nn.ModuleList()
        head = fc_func(embed_dim, nb_classes, **fc_kwargs)
        self.heads.append(head)

    def backup(self):
        self.old_state_dict = copy.deepcopy(self.state_dict())

    def recall(self):
        self.load_state_dict(self.old_state_dict)

    def update(self, nb_classes, freeze_old=True, **fc_kwargs):
        _fc = self.fc_func(self.embed_dim, nb_classes, **fc_kwargs)

        if freeze_old:
            for p in self.heads.parameters():
                p.requires_grad = False

        self.heads.append(_fc)

    def forward(self, x, feat_expand=None):
        feat_expand = (
            feat_expand if feat_expand is not None else self.feat_expand
        )
        out = []
        for ti in range(len(self.heads)):
            fc_inp = x[ti] if feat_expand else x
            out.append(self.heads[ti](fc_inp)["logits"])

        out = {"logits": torch.cat(out, dim=1)}
        return out

    def after_task(self):
        pass

    def update_intermedium(self):
        pass
