from abc import ABC, abstractmethod
from typing import Dict, Any, Tuple, Optional, TypeVar, Generic
import torch
from torch.utils.data import DataLoader

# Define TypeVars as part of the class definition
T = TypeVar("T")  # Generic type for network
D = TypeVar("D")  # Generic type for data manager


class LearnerInterface(Generic[T, D], ABC):
    """Interface for all incremental learning models.

    This interface defines the contract that all learner implementations must follow.
    It provides methods for the entire lifecycle of incremental learning, from
    task preparation to evaluation.

    Type Parameters:
        T: The type of network/model used by this learner
        D: The type of data manager used by this learner
    """

    # ------------------------------------------------------------------
    # Core lifecycle methods
    # ------------------------------------------------------------------

    @abstractmethod
    def incremental_train(self, *args, **kwargs) -> None:
        """Train the model incrementally on the current task data.

        This method orchestrates the entire training process for a single
        incremental task, including before/after hooks.

        Args:
            *args, **kwargs: Additional arguments for extended functionality
        """
        pass

    @abstractmethod
    def train_task(
        self, *args, **kwargs
    ) -> Tuple[Dict[str, Any], Optional[Dict[str, Any]]]:
        """Complete lifecycle for training a task.

        This method combines incremental_train, eval_task, and after_task into a
        single method call for convenience.

        Args:
            *args, **kwargs: Additional arguments for extended functionality

        Returns:
            Tuple containing (cnn_accuracy, nme_accuracy)
        """
        pass

    @abstractmethod
    def before_task(self, *args, **kwargs) -> None:
        """Prepare for a new task before training.

        This method is called at the beginning of each new task and typically
        handles task counter updates, model architecture updates, etc.

        Args:
            *args, **kwargs: Additional arguments for extended functionality
        """
        pass

    @abstractmethod
    def before_train(self, *args, **kwargs) -> None:
        """Prepare for training within the current task.

        This method is called before the training loop starts and typically
        handles data preparation, metric initialization, etc.

        Args:
            *args, **kwargs: Additional arguments for extended functionality
        """
        pass

    @abstractmethod
    def after_train(self, *args, **kwargs) -> None:
        """Clean up after training within the current task.

        This method is called after the training loop completes and typically
        handles model post-processing, prototype computation, etc.

        Args:
            *args, **kwargs: Additional arguments for extended functionality
        """
        pass

    @abstractmethod
    def after_task(self, *args, **kwargs) -> None:
        """Clean up after completing the current task.

        This method is called at the end of each task and typically handles
        model state backups, class counter updates, etc.

        Args:
            *args, **kwargs: Additional arguments for extended functionality
        """
        pass

    # ------------------------------------------------------------------
    # Feature extraction methods
    # ------------------------------------------------------------------

    @abstractmethod
    def _extract_token(self, inputs: torch.Tensor, *args, **kwargs) -> torch.Tensor:
        """Extract feature tokens from input data.

        Args:
            inputs: Input tensor to extract features from
            *args, **kwargs: Additional arguments for extended functionality

        Returns:
            Feature tensor extracted from the inputs
        """
        pass

    @abstractmethod
    def _extract_tokens(
        self, loader: DataLoader, *args, **kwargs
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """Extract features and targets from a data loader.

        This method processes an entire dataset through the network to extract
        feature representations and their corresponding labels.

        Args:
            loader: DataLoader containing the data
            *args, **kwargs: Additional arguments for extended functionality

        Returns:
            Tuple of (features, targets)
        """
        pass

    # ------------------------------------------------------------------
    # Model interaction methods
    # ------------------------------------------------------------------

    @abstractmethod
    def forward_train(
        self, model: T, inputs: torch.Tensor, *args, **kwargs
    ) -> Dict[str, Any]:
        """Forward pass for training.

        Args:
            model: The model to use for the forward pass
            inputs: Input tensor
            *args, **kwargs: Additional arguments for extended functionality

        Returns:
            Dictionary containing outputs from the model
        """
        pass

    @abstractmethod
    def forward_test(
        self, model: T, inputs: torch.Tensor, *args, **kwargs
    ) -> Dict[str, Any]:
        """Forward pass for testing.

        Args:
            model: The model to use for the forward pass
            inputs: Input tensor
            *args, **kwargs: Additional arguments for extended functionality

        Returns:
            Dictionary containing outputs from the model
        """
        pass

    @abstractmethod
    def forward_ca(
        self, model: T, inputs: torch.Tensor, *args, **kwargs
    ) -> Dict[str, Any]:
        """Forward pass for classifier alignment.

        Args:
            model: The model to use for the forward pass
            inputs: Input tensor
            *args, **kwargs: Additional arguments for extended functionality

        Returns:
            Dictionary containing outputs from the model
        """
        pass

    # ------------------------------------------------------------------
    # Evaluation methods
    # ------------------------------------------------------------------

    @abstractmethod
    def eval_task(
        self, *args, **kwargs
    ) -> Tuple[Dict[str, Any], Optional[Dict[str, Any]]]:
        """Evaluate the model on the current task.

        Performs evaluation using both CNN classification and optionally
        nearest-mean-exemplar (NME) classification.

        Args:
            *args, **kwargs: Additional arguments for extended functionality

        Returns:
            Tuple containing CNN accuracy metrics and optionally NME accuracy metrics
        """
        pass

    @abstractmethod
    def eval_current_task(self, *args, **kwargs) -> Dict[str, float]:
        """Evaluate performance on the current task only.

        Focuses on evaluating the model's performance on data from the current task,
        ignoring past tasks.

        Args:
            *args, **kwargs: Additional arguments for extended functionality

        Returns:
            Dictionary of evaluation metrics for the current task
        """
        pass

    @abstractmethod
    def evaluate(
        self, data_loaders: Dict[int, DataLoader], task_id: int, *args, **kwargs
    ) -> Dict[str, Any]:
        """Evaluate model performance across multiple tasks.

        Args:
            data_loaders: Dictionary mapping task IDs to their respective data loaders
            task_id: Current task ID
            *args, **kwargs: Additional arguments for extended functionality

        Returns:
            Dictionary containing evaluation metrics
        """
        pass

    # ------------------------------------------------------------------
    # Memory management methods
    # ------------------------------------------------------------------

    @abstractmethod
    def build_rehearsal_memory(
        self, data_manager: D, per_class: int, *args, **kwargs
    ) -> None:
        """Build memory for rehearsal learning.

        This method handles the selection and storage of exemplars for rehearsal-based
        incremental learning.

        Args:
            data_manager: Data manager for accessing dataset
            per_class: Number of samples per class to store
            *args, **kwargs: Additional arguments for extended functionality
        """
        pass

    # ------------------------------------------------------------------
    # Properties
    # ------------------------------------------------------------------

    @property
    @abstractmethod
    def _cur_task(self) -> int:
        """Get the current task index."""
        pass

    @property
    @abstractmethod
    def _known_classes(self) -> int:
        """Get the number of known classes from previous tasks."""
        pass

    @property
    @abstractmethod
    def _total_classes(self) -> int:
        """Get the total number of classes including the current task."""
        pass
