from typing import Dict, Any

import torch
from torch import nn, Tensor
from .components.classifier.continual import ContinualClf, MoFClf
from utils.projectors import _PROJECTORS
from .base import BaseModel, BASE_CLF


class BaseMulti(BaseModel[ContinualClf]):
    """Base class for multi-backbone models.

    Implements a model with multiple feature extraction pathways
    for incremental learning tasks.

    Attributes:
        fc (ContinualClf): The classifier layer
    """

    constant_dim = False

    def forward(self, x: Tensor, mode: str = "", ca: bool = False) -> Dict[str, Any]:
        """Forward pass through the multi-backbone model.

        Args:
            x: Input tensor
            mode: Mode for feature extraction ("", "all", "cur", etc.)
            ca: If True, performs classifier alignment forward pass

        Returns:
            Dict containing model outputs with logits and features
        """
        if ca:
            return self.fc(x, mode)

        feats = self.extract_token(x, mode)
        out = self.fc(feats, mode)

        out.update({"features": feats})
        return out

    def extract_token(self, x: Tensor, mode: str = "") -> Tensor:
        """Extract cls token features from the vit backbone.

        Args:
            x: Input tensor
            mode: Mode for feature extraction ("", "all", "cur", etc.)

        Returns:
            torch.Tensor: Extracted token features [B, T, D]

        Raises:
            AttributeError: If the backbone doesn't have the requested method
        """
        mode = mode.lower()
        method_name = f"forward_{mode}_token"  # Create the method name dynamically
        if not hasattr(self.backbone, method_name):
            raise AttributeError(
                f"Backbone does not have method {method_name}. "
                f"Make sure you are using a compatible backbone."
            )
        method = getattr(self.backbone, method_name)  # Get the method reference
        return method(x)  # Call the method

    def extract_feats(self, x: Tensor, mode: str = "") -> Tensor:
        """Extract all (cls token and patch token) features from the vit backbone.

        Args:
            x: Input tensor
            mode: Mode for feature extraction ("", "all", "cur", etc.)

        Returns:
            torch.Tensor: Extracted features [B, T, L, D]

        Raises:
            AttributeError: If the backbone doesn't have the requested method
        """
        mode = mode.lower()
        method_name = f"forward_{mode}_feats"  # Create the method name dynamically
        if not hasattr(self.backbone, method_name):
            raise AttributeError(
                f"Backbone does not have method {method_name}. "
                f"Make sure you are using a compatible backbone."
            )
        method = getattr(self.backbone, method_name)
        return method(x)

    def generate_fc(self, in_dim: int, out_dim: int, *args, **kwargs) -> ContinualClf:
        """Generate a new classifier layer.

        Args:
            in_dim: Input dimension
            out_dim: Output dimension

        Returns:
            A new classifier layer

        Raises:
            ValueError: If fc_func is not set
        """
        fc = ContinualClf(
            in_dim,
            out_dim,
            BASE_CLF[self.fc_func.lower()],
            constant_dim=self.constant_dim,
        )
        return fc


class MultiRoute(BaseModel[MoFClf]):
    """Multi-route model implementation.

    Uses routing mechanism to direct inputs through different pathways.

    Attributes:
        fc (MoFClf): Mixture of experts classifier
    """

    fc: MoFClf
    constant_dim = False

    def forward(self, x: Tensor, mode: str = "", ca: bool = False) -> Dict[str, Any]:
        """Forward pass through the multi-backbone model.

        Args:
            x: Input tensor
            mode: Mode for feature extraction ("", "all", "cur", etc.)
            ca: If True, performs classifier alignment forward pass

        Returns:
            Dict containing model outputs with logits and features
        """
        if ca:
            return self.fc(x, mode)

        feats = self.extract_token(x, mode)
        out = self.fc(feats, mode)

        out.update({"features": feats})
        return out

    def extract_token(self, x: Tensor, mode: str = "") -> Tensor:
        """Extract cls token features from the vit backbone.

        Args:
            x: Input tensor
            mode: Mode for feature extraction ("", "all", "cur", etc.)

        Returns:
            torch.Tensor: Extracted token features [B, T, D]

        Raises:
            AttributeError: If the backbone doesn't have the requested method
        """
        mode = mode.lower()
        method_name = f"forward_{mode}_token"  # Create the method name dynamically
        if not hasattr(self.backbone, method_name):
            raise AttributeError(
                f"Backbone does not have method {method_name}. "
                f"Make sure you are using a compatible backbone."
            )
        method = getattr(self.backbone, method_name)  # Get the method reference
        return method(x)  # Call the method

    def extract_feats(self, x: Tensor, mode: str = "") -> Tensor:
        """Extract all (cls token and patch token) features from the vit backbone.

        Args:
            x: Input tensor
            mode: Mode for feature extraction ("", "all", "cur", etc.)

        Returns:
            torch.Tensor: Extracted features [B, T, L, D]

        Raises:
            AttributeError: If the backbone doesn't have the requested method
        """
        mode = mode.lower()
        method_name = f"forward_{mode}_feats"  # Create the method name dynamically
        if not hasattr(self.backbone, method_name):
            raise AttributeError(
                f"Backbone does not have method {method_name}. "
                f"Make sure you are using a compatible backbone."
            )
        method = getattr(self.backbone, method_name)
        return method(x)

    def update_fc(
        self, nb_classes: int, freeze_old: bool = True, fc_kwargs: Dict[str, Any] = {}
    ) -> None:
        """Update the classifier layer with new classes.

        Args:
            nb_classes: Number of new classes
            freeze_old: Whether to freeze old classifier weights
            fc_kwargs: Additional arguments for classifier initialization

        Raises:
            KeyError: If init_experts is not provided in fc_kwargs
        """
        if "init_experts" not in fc_kwargs:
            raise KeyError("init_experts must be provided in fc_kwargs for MultiRoute")

        if getattr(self, "fc", None) is None:
            self.fc = self.generate_fc(
                self.feature_dim, nb_classes, fc_kwargs["init_experts"]
            )

        self._fc = BASE_CLF[self.fc_func.lower()](
            self.feature_dim, nb_classes, **fc_kwargs
        )
        self.fc.update(fc=self._fc, freeze=False, freeze_old=freeze_old)

    def generate_fc(self, in_dim: int, out_dim: int, *args, **kwargs) -> MoFClf:
        """Generate a new mixture of experts classifier layer.

        Args:
            in_dim: Input dimension
            out_dim: Output dimension
            init_experts: Initial number of experts

        Returns:
            A new mixture of experts classifier
        """
        init_experts = kwargs.get("init_experts", None)
        assert init_experts is not None, "init_experts must be provided in kwargs"

        fc = MoFClf(
            in_features=in_dim,
            out_features=out_dim,
            fc_func=BASE_CLF[self.fc_func.lower()],
            init_experts=init_experts,
            constant_dim=self.constant_dim,
        )
        return fc


class MultiSSL(BaseMulti):
    """Multi-backbone model with self-supervised learning capabilities.

    Attributes:
        fc (ContinualClf): The classifier layer
        projector: Projection module for self-supervised learning
    """

    fc: ContinualClf
    projector: nn.Module

    def __init__(self, args: Dict[str, Any]) -> None:
        """Initialize MultiSSL model.

        Args:
            args: Configuration dictionary

        Raises:
            KeyError: If projector is not specified in args
        """
        super().__init__(args)
        if "projector" not in args:
            raise KeyError("projector must be specified in args for MultiSSL")
        self.projector = _PROJECTORS[args["projector"]](
            ft_dim=self.out_dim,
            bottleneck_dim=self.out_dim,
        )

    def extract_token_ssl(self, x: Tensor, proj_mode: str = "") -> Tensor:
        """Extract cls token features from the vit backbone,
        and project it for Self-Supervised Learning.

        Args:
            x: Input tensor
            proj_mode: Projection mode ("all", "cur")

        Returns:
            torch.Tensor: Projected token features

        Raises:
            ValueError: If proj_mode is not supported
        """
        tokens = self.extract_token(x, mode="all")

        if proj_mode == "all":
            proj_tokens = self.projector(tokens)
        elif proj_mode == "cur":
            proj_token = self.projector(tokens[:, -1])
            proj_tokens = torch.cat([tokens[:, :-1], proj_token.unsqueeze(1)], dim=1)
        else:
            raise ValueError(f"Unsupported projection mode: {proj_mode}")

        return proj_tokens
