from typing import Dict, Any
import torch
import torch.nn as nn
import copy
from classifier.base import BaseClassifier, build_base_classifier


class ABContinualClf(nn.Module):
    def __init__(
        self,
        in_features: int,
        out_features: int,
        fc_func: str,
        freeze_old: bool = True,
        constant_dim: bool = True,
        **fc_kwargs: Any,
    ) -> None:
        super(ABContinualClf, self).__init__()
        self.in_features = in_features
        self.out_features = out_features

        self.constant_dim = constant_dim
        self.freeze_old = freeze_old

        self.fc_func = fc_func
        self.fc_kwargs = fc_kwargs

        # ModuleList 不需要在类型注解中指定参数类型
        self.heads = nn.ModuleList()

    def forward(self, x: torch.Tensor, mode: str = "all") -> Dict[str, torch.Tensor]:
        """
        Args:
            x: [B, T, D] where T is the number of tasks
                or [B, D] where D is the embedding dimension
            mode: "all" or "cur". Does not matter if constant_dim is True
        """
        mode = mode.lower()

        out = []
        if mode == "all":
            for task_id in range(len(self.heads)):
                task_input = x[:, task_id] if not self.constant_dim else x
                out.append(self.heads[task_id](task_input)["logits"])

        elif mode == "cur":
            for task_id in range(len(self.heads)):
                task_input = x[:, -1] if not self.constant_dim else x
                out.append(self.heads[task_id](task_input)["logits"])

        out = {"logits": torch.cat(out, dim=1)}
        return out

    def backup(self):
        self.old_state_dict = copy.deepcopy(self.state_dict())

    def recall(self):
        self.load_state_dict(self.old_state_dict)

    def update(self, *args, **kwargs):
        pass

    def after_medium(self):
        pass

    def after_train(self):
        for head in self.heads:
            # 检查 head 是否是 BaseClassifier 的实例
            if isinstance(head, BaseClassifier) and hasattr(head, "after_train"):
                head.after_train()
            else:
                raise ValueError(
                    f"Head {head} does not have an after_train method or is not a BaseClassifier instance."
                )

    def before_task(self):
        pass

    def after_task(self):
        pass

    def reset_parameters(self):
        pass

    def reset_parameters_to_zero(self):
        pass

    def freeze(self):
        pass


class ContinualClf(ABContinualClf):
    def __init__(
        self,
        in_features,
        out_features,
        fc_func: str,
        freeze_old: bool = True,
        constant_dim: bool = True,
        **fc_kwargs,
    ):
        super(ContinualClf, self).__init__(
            in_features,
            out_features,
            fc_func,
            freeze_old,
            constant_dim,
            **fc_kwargs,
        )

    def before_task(self):
        fc = build_base_classifier(
            self.fc_func, self.in_features, self.out_features, **self.fc_kwargs
        )
        self.update(fc=fc, freeze_old=self.freeze_old)

    def update(self, fc=None, freeze=False, freeze_old=True, train_params=[]):
        if freeze_old:
            for p in self.heads.parameters():
                p.requires_grad = False
        if fc:
            self.heads.append(fc)
        else:
            raise ValueError("fc is None")

        if freeze:
            for p in self.heads.parameters():
                p.requires_grad = False

        for k, v in self.named_parameters():
            if k in train_params:
                v.requires_grad = True

    def _add(self, fc):
        self.heads.append(fc)


from classifier.utils import RouterWeights, SoftRouter


class MoFClf(ABContinualClf):
    def __init__(
        self,
        in_features,
        out_features,
        fc_func: str,
        init_experts,
        constant_dim=True,
        **fc_kwargs,
    ):
        """
        classifier for feature-set classification
        """
        super(MoFClf, self).__init__(
            in_features,
            out_features,
            fc_func,
            constant_dim,
            **fc_kwargs,
        )
        self.task_count = 1
        self.constant_dim = False
        self.nb_experts = init_experts
        self.init_experts = init_experts

    def forward(self, x, mode="all"):
        """
        Args:
            x: [B, T, D] where T is the number of tasks
                or [B, D] where D is the embedding dimension
            mode: "all" or "cur". Does not matter if constant_dim is True
        """
        mode = mode.lower()

        out = []
        soft_x = self.router(x, apply_jitter=False)["logits"]
        if mode == "all":
            assert len(self.heads) == soft_x.size(1) - self.init_experts + 1
            for task_id in range(len(self.heads)):
                task_input = soft_x[:, task_id + self.init_experts - 1]
                out.append(self.heads[task_id](task_input)["logits"])

        elif mode == "cur":
            for task_id in range(len(self.heads)):
                task_input = soft_x[:, -1]
                out.append(self.heads[task_id](task_input)["logits"])

        out = {"logits": torch.cat(out, dim=1)}
        return out

    def update(self, fc=None, freeze=False, freeze_old=True, train_params=[]):
        if freeze_old:
            for p in self.heads.parameters():
                p.requires_grad = False
        if fc:
            self.heads.append(fc)
        else:
            raise ValueError("fc is None")

        if freeze:
            for p in self.heads.parameters():
                p.requires_grad = False

        for k, v in self.named_parameters():
            if k in train_params:
                v.requires_grad = True

        if self.task_count == 1:
            self.router_weights = RouterWeights(
                input_dim=self.in_features, num_experts=self.nb_experts
            )
            self.router = SoftRouter(self.router_weights, top_k=2)
        else:
            self.router_weights.add_expert()
            self.router.update_weights(self.router_weights)

    def after_task(self):
        self.task_count += 1
        self.nb_experts += 1
