"""
Base class for classification heads in continual learning.

Classification heads are specialized modules that convert feature representations
from backbone networks or adapters into task-specific outputs (typically class logits).
In continual learning, heads can be:

1. Shared across all tasks (with a unified output space)
2. Task-specific (with separate parameters for each task)
3. Dynamically expanded (growing with each new task)

This module defines the base interface for all head implementations, ensuring
consistent integration with the rest of the continual learning framework.
"""

from abc import abstractmethod
from typing import List, Optional, Dict, Any
import torch
import torch.nn as nn
from continuallearning.interfaces import MultiHeadInterface
from continuallearning.utils.logging import get_logger
from continuallearning.models.pefts.common.context import (
    get_current_task_ids,
)

logger = get_logger(__name__)


class BaseHead(MultiHeadInterface):
    """
    Base class for all classification heads in continual learning.

    Classification heads take feature representations from backbone networks or adapters
    and transform them into task-specific outputs (typically class logits). In continual
    learning, heads play a critical role in managing task-specific knowledge and can be
    implemented in various ways:

    - Multi-head: Separate classifiers for each task
    - Single-head: Unified output space across all tasks
    - Dynamic head: Growing or adapting with each new task

    The choice of head architecture significantly impacts how a model handles the
    stability-plasticity trade-off in continual learning.

    Args:
        in_features (int): Number of input features from the backbone/adapter
        num_classes (List[int]): Number of classes for each task
    """

    def __init__(self, clf_cfg: Dict[str, Any]) -> None:
        super().__init__()
        self.clf_cfg = clf_cfg
        self._current_task = -1
        self._registered_tasks: List[int] = []
        self.classifiers = nn.ModuleDict()

    @abstractmethod
    def _create_classifier(self, **kwargs) -> nn.Module:
        """
        Create a task-specific classifier module.

        This method is responsible for creating the appropriate classifier module
        based on the task's input features and number of classes. It is called during
        task registration or when expanding the head for new tasks.

        Args:
            in_features (int): Number of input features
            num_classes (int): Number of output classes for the task
            *args, **kwargs
        """
        ...

    def forward(
        self,
        features: torch.Tensor,
        task_ids: Optional[List[int]] = None,
        *args,
        **kwargs,
    ) -> torch.Tensor:
        """
        Forward pass through the classification head.

        This method transforms input features into task-specific outputs (logits).
        Different implementations may handle this differently:
        - Multi-head implementations select the appropriate classifier based on task_id
        - Single-head implementations use all outputs regardless of task
        - Dynamic heads may use task routing mechanisms

        During inference, task_id may or may not be provided, depending on whether
        the continual learning scenario is task-aware or task-agnostic.

        Args:
            features (torch.Tensor): Input features of shape [B, in_features]
            task_id (int, optional): Task identifier. If None, use current_task.

        Returns:
            torch.Tensor: Logits of shape [B, num_classes[task_id]]
        """
        # Handle different task_ids scenarios
        if task_ids is None or len(task_ids) < 1:
            # Default behavior: use all registered tasks
            active_task_ids = self.registered_tasks
            if not active_task_ids:
                raise RuntimeError(
                    "No tasks registered. Please register at least one task before forward pass."
                )
        elif len(task_ids) == 1:
            # Single task mode
            if task_ids[0] not in self.registered_tasks:
                raise ValueError(
                    f"Task {task_ids} is not registered. Available tasks: {self.registered_tasks}"
                )
            return self.process_with_task(features, task_ids[0], *args, **kwargs)
        else:
            # Multiple specific tasks mode
            active_task_ids = task_ids
            # Validate all task_ids are registered
            unregistered = set(active_task_ids) - set(self.registered_tasks)
            if unregistered:
                raise ValueError(
                    f"Tasks {unregistered} are not registered. Available tasks: {self.registered_tasks}"
                )

        # Multi-task processing
        return self.process_with_tasks(features, active_task_ids, *args, **kwargs)

    # task irrelevant interface methods
    def get_parameters(self) -> List[nn.Parameter]:
        """
        Get all parameters for this head.

        Returns:
            List[nn.Parameter]: List of all parameters
        """
        return list(self.parameters())

    def get_trainable_parameters(self) -> List[nn.Parameter]:
        """
        Get trainable parameters managed by this head.

        Returns:
            List[nn.Parameter]: List of trainable parameters
        """
        return [param for param in self.parameters() if param.requires_grad]

    # task aware interface methods
    def prepare_task(self, task_id: int, **kwargs) -> None:
        """
        Prepare adapter for the incremental training
        """
        self.register_task(task_id, **kwargs)
        self.set_task(task_id)
        self.set_trainable_tasks(task_id)

    def register_task(self, task_id: int, **kwargs) -> None:
        """
        Register a new task with this hook.

        Args:
            task_id: Identifier for the new task
            config: Optional task-specific configuration
        """
        self._registered_tasks.append(task_id)
        clf = self._create_classifier(
            **(kwargs or {}),
        )
        self.classifiers[str(task_id)] = clf

    def set_task(self, task_id: int) -> None:
        """
        Set the current task ID for the classification head.

        This method configures the head to perform classification for a specific task.
        For multi-head implementations, this activates the appropriate classifier.
        For dynamic heads, this may adjust routing or parameter selection.

        This is typically called during both training and task-aware evaluation
        scenarios to ensure the correct output space is used.

        Args:
            task_id (int): Task identifier

        Raises:
            ValueError: If the task ID exceeds the number of known tasks
        """
        if task_id not in self._registered_tasks:
            raise ValueError(
                f"Task {task_id} is not registered. "
                f"Available tasks: {self._registered_tasks}"
            )
        self._current_task = task_id

    def set_trainable_tasks(self, task_ids: int | List[int], **kwargs) -> None:
        """
        Set which tasks should be trainable (unfrozen) while freezing all others.

        This method configures the trainable state for parameters associated
        with the specified tasks. All other tasks will be frozen.

        Args:
            task_ids: Task identifier(s) to make trainable
            **kwargs: Additional configuration parameters
        """
        # Convert single task_id to list for uniform processing
        if isinstance(task_ids, int):
            task_ids = [task_ids]

        # Validate all task IDs are registered
        for task_id in sorted(task_ids):
            if task_id not in self._registered_tasks:
                raise ValueError(
                    f"Task {task_id} is not registered. "
                    f"Available tasks: {self._registered_tasks}"
                )

        # Get tasks to freeze (all registered tasks except specified ones)
        tasks_to_freeze = [tid for tid in self._registered_tasks if tid not in task_ids]

        # Freeze and unfreeze parameters
        self._freeze(tasks_to_freeze)
        self._unfreeze(task_ids)

        logger.debug(
            f"Set trainable tasks: {task_ids}, frozen tasks: {tasks_to_freeze}"
        )

    def _freeze(self, task_ids: int | List[int]) -> None:
        """
        Freeze parameters for specific tasks.

        This method sets requires_grad=False for all parameters associated
        with the specified task IDs. Subclasses should override this method
        to implement task-specific parameter freezing.

        Args:
            task_ids: Task identifier(s) to freeze parameters for
        """
        # Convert single task_id to list for uniform processing
        if isinstance(task_ids, int):
            task_ids = [task_ids]

        for task_id in sorted(task_ids):
            task_key = str(task_id)
            logger.debug(f"Unfreezing parameters for task {task_id}")
            # Unfreeze the classifier for this task
            if task_id >= len(self.classifiers):
                raise ValueError(
                    f"Task ID {task_id} exceeds the number of classifiers {len(self.classifiers)}"
                )
            for param in self.classifiers[task_key].parameters():
                param.requires_grad = False

    def _unfreeze(self, task_ids: int | List[int]) -> None:
        """
        Unfreeze parameters for specific tasks.

        This method sets requires_grad=True for all parameters associated
        with the specified task IDs. Subclasses should override this method
        to implement task-specific parameter unfreezing.

        Args:
            task_ids: Task identifier(s) to unfreeze parameters for
        """
        # Convert single task_id to list for uniform processing
        if isinstance(task_ids, int):
            task_ids = [task_ids]

        for task_id in sorted(task_ids):
            task_key = str(task_id)
            logger.debug(f"Unfreezing parameters for task {task_id}")
            # Unfreeze the classifier for this task
            if task_id >= len(self.classifiers):
                raise ValueError(
                    f"Task ID {task_id} exceeds the number of classifiers {len(self.classifiers)}"
                )
            for param in self.classifiers[task_key].parameters():
                param.requires_grad = True

    @property
    def current_task(self) -> int:
        """Get the current task ID."""
        return self._current_task

    @current_task.setter
    def current_task(self, value: int) -> None:
        self._current_task = value

    @property
    def registered_tasks(self) -> List[int]:
        """Get list of registered task IDs."""
        assert len(self._registered_tasks) == len(self.classifiers), (
            "Registered tasks must match the number of classifiers"
        )
        return self._registered_tasks.copy()

    def process_with_task(
        self, x: torch.Tensor, task_id: int, *args, **kwargs
    ) -> torch.Tensor:
        """Process input with a specific task."""
        task_key = str(task_id)
        if task_key not in self.classifiers:
            raise ValueError(f"Task {task_id} is not registered")

        # Single task doesn't need router/combiner
        return self.classifiers[task_key].forward(x, *args, **kwargs)

    def process_with_tasks(
        self, x: torch.Tensor, task_ids: Optional[List[int]] = None, *args, **kwargs
    ) -> torch.Tensor:
        """Process input among multiple tasks."""
        if task_ids is None:
            task_ids = self.registered_tasks

        outputs = []
        for task_id in sorted(task_ids):
            task_key = str(task_id)
            if task_key not in self.classifiers:
                raise ValueError(f"Task {task_id} is not registered")

            outputs.append(self.classifiers[task_key].forward(x, *args, **kwargs))

        return torch.cat(outputs, dim=1)
