"""
Base method classes for continual learning.

This module defines abstract base classes and interfaces for continual learning methods,
which control how models adapt to new tasks while preserving knowledge of previously learned tasks.
"""

from abc import abstractmethod
from typing import Dict, List, Optional, Any, Union
import torch
import torch.nn as nn
from torch.utils.data import DataLoader

from continuallearning.registry import MethodInterface


class BaseMethod(MethodInterface):
    """
    Abstract base class for continual learning methods.

    This class defines the interface that all continual learning methods should implement.
    A method provides mechanisms for knowledge preservation, custom loss computation,
    and handling task transitions in a continual learning scenario.

    Args:
        **kwargs: Additional arguments specific to the method
    """

    def __init__(self, **kwargs) -> None:
        """
        Initialize the method.

        Args:
            **kwargs: Additional method-specific arguments
        """
        self.current_task: int = 0
        self.seen_tasks: set = set()
        self._initialized: bool = False
        self._prepare_method(**kwargs)

    def _prepare_method(self, **kwargs) -> None:
        """
        Prepare the method with additional arguments.

        This method should be overridden by subclasses to handle
        method-specific initialization.

        Args:
            **kwargs: Method-specific arguments
        """
        pass

    def on_task_start(
        self, task_id: int, task_dataloader: Optional[DataLoader] = None
    ) -> None:
        """
        Called at the beginning of a task.

        Use this method to prepare the method for a new task,
        e.g., by initializing task-specific parameters or buffers.

        Args:
            task_id: Task identifier
            task_dataloader: Optional dataloader with task data
        """
        self.current_task = task_id
        self.seen_tasks.add(task_id)

    def on_task_end(
        self, task_id: int, task_dataloader: Optional[DataLoader] = None
    ) -> None:
        """
        Called at the end of a task.

        Use this method to perform operations after a task has been learned,
        e.g., updating importance weights or storing exemplars.

        Args:
            task_id: Task identifier
            task_dataloader: Optional dataloader with task data
        """
        pass

    def on_epoch_start(self, epoch: int) -> None:
        """
        Called at the beginning of an epoch.

        Args:
            epoch: Epoch number
        """
        pass

    def on_epoch_end(self, epoch: int) -> None:
        """
        Called at the end of an epoch.

        Args:
            epoch: Epoch number
        """
        pass

    def on_batch_start(self, batch: Any, batch_idx: int) -> None:
        """
        Called at the beginning of a batch.

        Args:
            batch: Batch data
            batch_idx: Batch index
        """
        pass

    def on_batch_end(self, batch: Any, batch_idx: int, outputs: Any) -> None:
        """
        Called at the end of a batch.

        Args:
            batch: Batch data
            batch_idx: Batch index
            outputs: Model outputs
        """
        pass

    @abstractmethod
    def compute_loss(
        self,
        model: nn.Module,
        criterion: nn.Module,
        output: Any,
        targets: torch.Tensor,
        task_id: int,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """
        Compute the loss for the given outputs and targets.

        This method should be implemented by all methods to define
        how the loss is computed for continual learning.

        Args:
            model: The model being trained
            criterion: The base criterion (loss function)
            output: Model output (logits or dict with logits)
            targets: Target labels
            task_id: Task identifier
            **kwargs: Additional arguments

        Returns:
            Dictionary with loss values (total loss under key 'loss')
        """
        pass

    def get_parameters_to_optimize(
        self, model: nn.Module
    ) -> Union[List[nn.Parameter], List[Dict]]:
        """
        Get parameters that should be optimized for the current task.

        This method can be overridden to provide different parameters
        to the optimizer, e.g., for parameter-efficient fine-tuning.

        Args:
            model: The model being trained

        Returns:
            List of parameters or list of parameter groups
        """
        return list(model.parameters())

    def state_dict(self) -> Dict[str, Any]:
        """
        Get state dictionary for serialization.

        Returns:
            Dictionary containing method state
        """
        return {
            "current_task": self.current_task,
            "seen_tasks": list(self.seen_tasks),
            "_initialized": self._initialized,
        }

    def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
        """
        Load state dictionary for deserialization.

        Args:
            state_dict: Dictionary containing method state
        """
        self.current_task = state_dict.get("current_task", 0)
        self.seen_tasks = set(state_dict.get("seen_tasks", []))
        self._initialized = state_dict.get("_initialized", False)


class RegularizationMethod(BaseMethod):
    """
    Base class for regularization-based continual learning methods.

    These methods use regularization terms in the loss function
    to prevent forgetting of previously learned tasks.

    Args:
        regularization_weight: Weight of the regularization loss
        **kwargs: Additional arguments
    """

    def __init__(self, regularization_weight: float = 1.0, **kwargs) -> None:
        """
        Initialize the regularization method.

        Args:
            regularization_weight: Weight of the regularization loss
            **kwargs: Additional method-specific arguments
        """
        self.regularization_weight = regularization_weight
        super().__init__(**kwargs)

        # Task-specific parameter importance to be maintained by subclasses
        self.parameter_importance: Dict[int, Dict[str, torch.Tensor]] = {}

    def compute_loss(
        self,
        model: nn.Module,
        criterion: nn.Module,
        output: Any,
        targets: torch.Tensor,
        task_id: int,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """
        Compute the loss with regularization.

        Args:
            model: The model being trained
            criterion: The base criterion (loss function)
            output: Model output
            targets: Target labels
            task_id: Task identifier
            **kwargs: Additional arguments

        Returns:
            Dictionary with task loss, regularization loss, and total loss
        """
        # Extract logits if output is a dictionary
        if isinstance(output, dict) and "logits" in output:
            logits = output["logits"]
        else:
            logits = output

        # Compute the basic task loss
        task_loss = criterion(logits, targets)

        # Compute regularization loss if not on the first task
        reg_loss = torch.tensor(0.0, device=task_loss.device)
        if self.seen_tasks and task_id > 0:
            reg_loss = self.compute_regularization_loss(model, task_id)

        # Combine losses
        total_loss = task_loss + self.regularization_weight * reg_loss

        return {"loss": total_loss, "task_loss": task_loss, "reg_loss": reg_loss}

    @abstractmethod
    def compute_regularization_loss(
        self, model: nn.Module, task_id: int
    ) -> torch.Tensor:
        """
        Compute the regularization loss for the current task.

        This method should be implemented by subclasses to define
        how the regularization loss is computed.

        Args:
            model: The model being trained
            task_id: Current task identifier

        Returns:
            Regularization loss tensor
        """
        pass

    @abstractmethod
    def update_parameter_importance(
        self, model: nn.Module, task_id: int, task_dataloader: DataLoader
    ) -> None:
        """
        Update parameter importance after learning a task.

        This method should be implemented by subclasses to define
        how parameter importance is estimated.

        Args:
            model: The model being trained
            task_id: Task identifier
            task_dataloader: Dataloader with task data
        """
        pass

    def on_task_end(
        self, task_id: int, task_dataloader: Optional[DataLoader] = None
    ) -> None:
        """
        Update parameter importance at the end of a task.

        Args:
            task_id: Task identifier
            task_dataloader: Dataloader with task data
        """
        super().on_task_end(task_id, task_dataloader)
        # Note: Actual implementation will happen in subclass where model is available

    def state_dict(self) -> Dict[str, Any]:
        """Get state dictionary including parameter importance."""
        state = super().state_dict()
        state["parameter_importance"] = {
            k: {name: tensor.cpu() for name, tensor in v.items()}
            for k, v in self.parameter_importance.items()
        }
        state["regularization_weight"] = self.regularization_weight
        return state

    def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
        """Load state dictionary including parameter importance."""
        super().load_state_dict(state_dict)
        if "parameter_importance" in state_dict:
            device = (
                next(
                    iter(
                        next(iter(state_dict["parameter_importance"].values())).values()
                    )
                ).device
                if state_dict["parameter_importance"]
                else torch.device("cpu")
            )
            self.parameter_importance = {
                k: {name: tensor.to(device) for name, tensor in v.items()}
                for k, v in state_dict["parameter_importance"].items()
            }
        if "regularization_weight" in state_dict:
            self.regularization_weight = state_dict["regularization_weight"]


class ReplayMethod(BaseMethod):
    """
    Base class for replay-based continual learning methods.

    These methods use stored examples from previous tasks
    to prevent forgetting when learning new tasks.

    Args:
        memory_size: Maximum number of examples to store
        **kwargs: Additional arguments
    """

    def __init__(self, memory_size: int = 200, **kwargs) -> None:
        """
        Initialize the replay method.

        Args:
            memory_size: Maximum number of examples to store
            **kwargs: Additional method-specific arguments
        """
        self.memory_size = memory_size
        self.memory_data = {"inputs": [], "targets": [], "task_ids": []}
        super().__init__(**kwargs)

    @abstractmethod
    def update_memory(self, task_id: int, dataloader: DataLoader) -> None:
        """
        Update memory with examples from the current task.

        This method should be implemented by subclasses to define
        how examples are selected for memory.

        Args:
            task_id: Task identifier
            dataloader: Dataloader with task data
        """
        pass

    def on_task_end(
        self, task_id: int, task_dataloader: Optional[DataLoader] = None
    ) -> None:
        """
        Update memory at the end of a task.

        Args:
            task_id: Task identifier
            task_dataloader: Dataloader with task data
        """
        super().on_task_end(task_id, task_dataloader)

        if task_dataloader is not None:
            self.update_memory(task_id, task_dataloader)

    def get_memory_samples(
        self, batch_size: int = 64, exclude_task: Optional[int] = None
    ) -> Optional[Dict[str, torch.Tensor]]:
        """
        Get a batch of examples from memory for replay.

        Args:
            batch_size: Number of examples to retrieve
            exclude_task: Optional task to exclude from replay

        Returns:
            Dict with 'inputs', 'targets', and 'task_ids' or None if memory is empty
        """
        if not self.memory_data["inputs"]:
            return None

        # Filter out excluded task if needed
        if exclude_task is not None:
            valid_indices = [
                i
                for i, task in enumerate(self.memory_data["task_ids"])
                if task != exclude_task
            ]
        else:
            valid_indices = list(range(len(self.memory_data["inputs"])))

        if not valid_indices:
            return None

        # Sample batch_size indices (with replacement if necessary)
        if len(valid_indices) < batch_size:
            indices = [valid_indices[i % len(valid_indices)] for i in range(batch_size)]
        else:
            indices = torch.randperm(len(valid_indices))[:batch_size].tolist()
            indices = [valid_indices[i] for i in indices]

        # Gather examples
        return {
            "inputs": torch.stack([self.memory_data["inputs"][i] for i in indices]),
            "targets": torch.tensor([self.memory_data["targets"][i] for i in indices]),
            "task_ids": torch.tensor(
                [self.memory_data["task_ids"][i] for i in indices]
            ),
        }

    def state_dict(self) -> Dict[str, Any]:
        """Get state dictionary including memory data."""
        state = super().state_dict()
        # Convert tensors to CPU for storage
        state["memory_data"] = {
            "inputs": [x.cpu() for x in self.memory_data["inputs"]],
            "targets": self.memory_data["targets"],
            "task_ids": self.memory_data["task_ids"],
        }
        state["memory_size"] = self.memory_size
        return state

    def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
        """Load state dictionary including memory data."""
        super().load_state_dict(state_dict)
        if "memory_data" in state_dict:
            # Keep tensors on CPU until needed
            self.memory_data = state_dict["memory_data"]
        if "memory_size" in state_dict:
            self.memory_size = state_dict["memory_size"]
