import copy
from torch import nn, Tensor
from abc import ABC, abstractmethod
from typing import Dict, Any, Union, TypeVar, Type, Generic
import torch
from .components.classifier.base import CosineClf, LinearClf
from .components.classifier.continual import ABContinualClf

from .components.backbone import get_backbone

# Type aliases to improve code readability
SingleSessionClf = Union[Type[CosineClf], Type[LinearClf]]
# Type aliases for classifiers - single session vs multi-session collaboration
MultiSessionClf = TypeVar(
    "MultiSessionClf", bound=ABContinualClf, covariant=True
)  # Classifiers for continual learning across sessions

BASE_CLF: Dict[str, SingleSessionClf] = {
    "cosine": CosineClf,
    "linear": LinearClf,
}


# abstract class
class BaseModel(ABC, nn.Module, Generic[MultiSessionClf]):
    """BaseModel abstract class for all DIL_Frame models.
    This class defines the common interface and functionality that all models
       must implement, enforcing a consistent API across different model types.

       Attributes:
           fc: Classifier component responsible for final predictions
           fc_func: Type of classifier function to use
           constant_dim: Whether feature dimension remains constant across tasks
           backbone: Featuction backbone network
    """

    fc: MultiSessionClf
    fc_func: str
    backbone: nn.Module
    _device: torch.device
    init_cls: int
    inc_cls: int
    out_dim: int
    constant_dim: bool

    def __init__(self, args: Dict[str, Any]) -> None:
        super(BaseModel, self).__init__()

        print("This is for the BaseNet initialization.")
        self.backbone = get_backbone(args)
        print("After BaseNet initialization.")
        self._device = args["device"][0]
        self.init_cls = args["init_cls"]
        self.inc_cls = args["inc_cls"]
        self.out_dim = self.backbone.out_dim

        if "fc_func" not in args or args["fc_func"] is None:
            raise ValueError("fc_func must be specified in args")
        self.fc_func = args["fc_func"]

    @property
    def feature_dim(self) -> int:
        """Get the dimension of feature vectors produced by the model.

        Returns:
            int: Feature dimension size
        """
        return self.out_dim

    @abstractmethod
    def forward(self, x: Tensor, *args, **kwargs) -> Dict[str, Any]:
        """Forward pass through the model.

        Args:
            x: Input tensor
            **kwargs: Additional arguments for specialized behavior

        Returns:
            Dict containing model outputs including logits and features
        """
        pass

    @abstractmethod
    def extract_feats(self, x: Tensor) -> Tensor:
        """Extract detailed features from the input.

        Args:
            x: Input tensor

        Returns:
            Tensor containing extracted features
        """
        pass

    @abstractmethod
    def extract_token(self, x: Tensor) -> Tensor:
        """Extract token representations from the input.

        Args:
            x: Input tensor

        Returns:
            Tensor containing extracted tokens
        """
        pass

    @abstractmethod
    def generate_fc(
        self, in_dim: int, out_dim: int, *args, **kwargs
    ) -> MultiSessionClf:
        """Generate a new classifier layer.

        Args:
            in_dim: Input dimension
            out_dim: Output dimension

        Returns:
            A new classifier layer

        Raises:
            ValueError: If fc_func is not set
        """
        pass

    def update_fc(
        self, nb_classes: int, freeze_old: bool = True, fc_kwargs: Dict[str, Any] = {}
    ) -> None:
        """Update the classifier layer with new classes.

        Args:
            nb_classes: Number of new classes
            freeze_old: Whether to freeze old classifier weights
            fc_kwargs: Additional arguments for classifier initialization
        """
        if getattr(self, "fc", None) is None:
            self.fc = self.generate_fc(self.feature_dim, nb_classes)

        self._fc = BASE_CLF[self.fc_func.lower()](
            self.feature_dim, nb_classes, **fc_kwargs
        )
        self.fc.update(fc=self._fc, freeze=False, freeze_old=freeze_old)

    def copy(self) -> "BaseModel":
        """Create a deep copy of the model.

        Returns:
            A deep copy of the model
        """
        return copy.deepcopy(self)

    def freeze(self) -> "BaseModel":
        """Freeze all model parameters.

        Returns:
            Self with all parameters frozen
        """
        for param in self.parameters():
            param.requires_grad = False
        self.eval()

        return self

    def show_trainable_params(self) -> None:
        """Display all trainable parameters in the model."""
        for name, param in self.named_parameters():
            if param.requires_grad:
                print(name, param.numel())

    def after_train(self) -> None:
        """Perform operations after training."""
        self.backbone.after_train()  # type: ignore
        self.fc.after_train()  # type: ignore

    def after_task(self) -> None:
        """Perform operations after completing a task."""
        self.backbone.after_task()  # type: ignore
        self.fc.after_task()  # type: ignore

    def after_medium(self) -> None:
        """Perform operations after completing a medium task."""
        self.backbone.after_medium()  # type: ignore
        self.fc.after_medium()  # type: ignore
