"""
Implementation of the standard continual learning model.

This module implements the concrete ContinualModel class that combines backbone networks,
adapters, and heads into a cohesive continual learning system following the
composition pattern.
"""

from typing import List, Optional, Dict
import torch
import torch.nn as nn

from continuallearning.interfaces.types.outputs import ModelOutput
from continuallearning.models.core.output_utils import (
    standardize_model_output,
)
from continuallearning.interfaces import (
    ModelInterface,
)
from continuallearning.registry import (
    BACKBONE_REGISTRY,
    HEAD_REGISTRY,
    ADAPTER_REGISTRY,
)


class ContinualModel(ModelInterface):
    """
    Standard implementation of a continual learning model.

    This class implements the BaseModel interface with a composition pattern,
    combining a backbone network with optional adapters and task-specific heads.
    It follows Hugging Face's design philosophy of using standardized outputs
    and clear component separation.
    """

    def __init__(
        self,
        backbone_config: Dict,
        head_config: Dict,
        adapter_config: Dict,
    ) -> None:
        """
        Initialize a continual learning model.

        Args:
            backbone: Feature extraction backbone
            heads: Dictionary mapping task IDs to classification heads
            adapters: Optional dictionary mapping task IDs to adapter modules
        """
        super().__init__()
        backbone = BACKBONE_REGISTRY.create(
            backbone_config["backbone_name"], **backbone_config["kwargs"]
        )
        self.head = HEAD_REGISTRY.create(
            head_config["head_name"], **head_config["kwargs"]
        )
        self.adapter = ADAPTER_REGISTRY.create(
            adapter_config["adapter_name"],
            backbone=backbone,
            **adapter_config["kwargs"],
        )
        self.backbone = backbone
        self._freeze_backbone()

    def _freeze_backbone(self) -> None:
        """
        Freeze the backbone parameters to prevent them from being updated during training.
        This is a common practice in continual learning to retain previously learned features.
        """
        for param in self.backbone.parameters():
            param.requires_grad = False

        self.backbone.eval()  # Set backbone to evaluation mode

    def forward(
        self, x: torch.Tensor, task_ids: Optional[List[int]] = None, **kwargs
    ) -> ModelOutput:
        """
        Forward pass through the model.

        Applies the appropriate processing sequence based on adapter type
        and returns a standardized ModelOutput object.

        Args:
            x: Input tensor
            task_id: Optional task identifier, defaults to current_task if None
            **kwargs: Additional arguments. Supports:
                - return_dict_only: If True, always return ModelOutput. Defaults to False
                  for backward compatibility.

        Returns:
            ModelOutput: Standardized output with logits and features.

            Note: For backward compatibility, when return_dict_only=False (default),
            the returned ModelOutput implements __getitem__ and direct tensor access,
            so it can be used both as a dataclass and as logits tensor with:
            `output` (full ModelOutput) or `output.logits` (tensor).
        """
        adapter_output = self.adapter(x, task_ids=task_ids, **kwargs)
        logits = self.head(adapter_output.features, task_ids=task_ids, **kwargs)
        model_output = standardize_model_output(adapter_output, logits=logits)
        return model_output

    def get_features(
        self, x: torch.Tensor, task_ids: Optional[List[int]] = None, **kwargs
    ) -> torch.Tensor:
        """
        Extract features from input without applying the classification head.

        Args:
            x: Input tensor
            task_id: Optional task identifier
            **kwargs: Additional arguments

        Returns:
            torch.Tensor: Feature representations
        """
        # Call forward with return_dict=True to get ModelOutput with features
        adapter_output = self.adapter(x, task_ids=task_ids, **kwargs)
        return adapter_output.features

    def freeze_backbone(self) -> None:
        """
        Freeze backbone parameters while keeping task-specific parameters trainable.
        """
        for param in self.backbone.parameters():
            param.requires_grad = False

    # # TaskAwareInterface methods
    def prepare_task(self, task_id: int, **kwargs) -> None:
        """
        Prepare adapter for the incremental training
        """
        self.register_task(task_id, **kwargs)
        self.set_task(task_id)
        self.set_trainable_tasks(task_id)

    def register_task(self, task_id: int, **kwargs) -> None:
        """
        Register a new task with this hook.

        Args:
            task_id: Identifier for the new task
            config: Optional task-specific configuration
        """
        self.adapter.register_task(task_id, **kwargs)
        self.head.register_task(task_id, **kwargs)

    def set_task(self, task_id: int) -> None:
        """
        Configure the model for a specific task.

        Args:
            task_id: Task identifier
        """
        self._current_task = task_id
        self.adapter.set_task(task_id)
        self.head.set_task(task_id)

    def set_trainable_tasks(self, task_ids: int | List[int], **kwargs) -> None:
        """
        Prepare this hook for a specific task.

        Args:
            task_id: Identifier for the task to prepare
            **kwargs: Additional preparation parameters
        """
        self.adapter.set_trainable_tasks(task_ids, **kwargs)
        self.head.set_trainable_tasks(task_ids, **kwargs)

    def _freeze(self, task_ids: int | List[int]) -> None:
        """
        Freeze parameters for a specific task.

        Args:
            task_id: Task identifier to freeze parameters for
        """
        self.adapter._freeze(task_ids)
        self.head._freeze(task_ids)

    def _unfreeze(self, task_ids: int | List[int]) -> None:
        """
        Freeze parameters for a specific task.

        Args:
            task_id: Task identifier to freeze parameters for
        """
        self.adapter._unfreeze(task_ids)
        self.head._unfreeze(task_ids)

    def process_with_task(
        self, x: torch.Tensor, task_id: int, **kwargs
    ) -> torch.Tensor:
        """Process input with a specific task."""
        adapter_output = self.adapter(x, task_ids=[task_id], **kwargs)
        logits = self.head(adapter_output.features, task_ids=[task_id], **kwargs)
        return logits

    def process_with_tasks(
        self, x: torch.Tensor, task_ids: Optional[List[int]] = None, **kwargs
    ) -> torch.Tensor:
        """Process input among multiple tasks."""
        adapter_output = self.adapter(x, task_ids=task_ids, **kwargs)
        logits = self.head(adapter_output.features, task_ids=task_ids, **kwargs)
        return logits

    @property
    def current_task(self) -> int:
        """Get the current task ID."""
        return self._current_task

    @property
    def registered_tasks(self) -> List[int]:
        """Get list of registered task IDs."""
        rt_adapter = self.adapter.registered_tasks
        rt_head = self.head.registered_tasks
        assert rt_head == rt_adapter, "Registered tasks do not match"
        return rt_adapter

    # task irrelevant interface methods
    def get_parameters(self) -> List[nn.Parameter]:
        """
        Get all parameters managed by this model.

        This method collects parameters from the backbone, adapter, and head,
        returning a unified list of all trainable parameters.

        Returns:
            List[nn.Parameter]: List of parameters from backbone, adapter, and head
        """
        params = list(self.backbone.parameters())
        params.extend(self.adapter.get_trainable_parameters())
        params.extend(self.head.get_trainable_parameters())
        return params

    def get_trainable_parameters(self) -> List[nn.Parameter]:
        """
        Get trainable parameters managed.

        Returns:
            List[nn.Parameter]: List of parameters
        """
        params = []
        params.extend(self.backbone.get_trainable_parameters())
        params.extend(self.adapter.get_trainable_parameters())
        params.extend(self.head.get_trainable_parameters())
        return params
