"""
Strategy-specific interfaces for continual learning.

This module defines interfaces specifically for continual learning strategies,
including loss computation, memory management, and batch processing.
"""

from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, TypeVar, runtime_checkable, Generic
import torch

from continuallearning.events.core.event_types import EventType, BaseEventData
from continuallearning.interfaces import ComponentInterface

# Type variables
E = TypeVar("E", bound=BaseEventData)
T = TypeVar("T")


# ========== Strategy Event Handling ==========


@runtime_checkable
class StrategyEventHandlerInterface(Generic[E], ComponentInterface):
    """
    Specialized interface for continual learning strategy event handlers.

    This interface defines the protocol for strategy-specific event handling
    with additional capabilities beyond basic event handling.
    """

    def can_handle(self, event: E) -> bool:
        """Check if this handler can process the given event."""
        ...

    def handle_event(self, event: E) -> Optional[E]:
        """Handle an event and optionally return a response."""
        ...

    @property
    def priority(self) -> int:
        """Handler priority (higher numbers execute first)."""
        ...

    @property
    def name(self) -> str:
        """Handler name for identification."""
        ...

    def get_strategy_name(self) -> str:
        """Get the name of this strategy."""
        ...

    def get_strategy_priority(self) -> int:
        """Get the execution priority of this strategy."""
        ...

    def get_handled_event_types(self) -> List[EventType]:
        """Get the event types this strategy handles."""
        ...


# ========== Loss Computation Interfaces ==========


class LossComputationInterface(ABC):
    """Interface for strategy-specific loss computation."""

    @abstractmethod
    def compute_loss(
        self,
        model: torch.nn.Module,
        outputs: torch.Tensor,
        targets: torch.Tensor,
        task_id: int,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """
        Compute strategy-specific loss components.

        Args:
            model: The model being trained
            outputs: Model outputs
            targets: Target labels
            task_id: Current task identifier
            **kwargs: Additional strategy-specific parameters

        Returns:
            Dictionary of loss components
        """
        pass

    @abstractmethod
    def get_loss_weights(self, task_id: int) -> Dict[str, float]:
        """Get the weights for different loss components."""
        pass


class RegularizationInterface(ABC):
    """Interface for regularization strategies."""

    @abstractmethod
    def compute_regularization_loss(
        self,
        model: torch.nn.Module,
        task_id: int,
        **kwargs,
    ) -> torch.Tensor:
        """Compute regularization loss for the current task."""
        pass

    @abstractmethod
    def update_regularization_params(
        self, model: torch.nn.Module, task_id: int
    ) -> None:
        """Update regularization parameters after task completion."""
        pass


# ========== Memory Management Interfaces ==========


class MemoryManagerInterface(ABC):
    """Interface for memory management in continual learning."""

    @abstractmethod
    def update_memory(self, data: Any, targets: Any, task_id: int) -> None:
        """
        Update the memory with new data.

        Args:
            data: Input data to store
            targets: Target labels
            task_id: Task identifier
        """
        pass

    @abstractmethod
    def sample_from_memory(
        self, num_samples: int, task_id: Optional[int] = None
    ) -> Optional[Any]:
        """
        Sample data from memory.

        Args:
            num_samples: Number of samples to retrieve
            task_id: Optional task identifier for task-specific sampling

        Returns:
            Sampled data or None if memory is empty
        """
        pass

    @abstractmethod
    def get_memory_size(self) -> int:
        """Get current memory usage."""
        pass

    @abstractmethod
    def clear_memory(self, task_id: Optional[int] = None) -> None:
        """Clear memory, optionally for a specific task."""
        pass


class ExperienceReplayInterface(ABC):
    """Interface for experience replay mechanisms."""

    @abstractmethod
    def store_experience(
        self,
        data: Any,
        targets: Any,
        task_id: int,
        importance_score: Optional[float] = None,
    ) -> None:
        """Store an experience in the replay buffer."""
        pass

    @abstractmethod
    def replay_batch(self, batch_size: int) -> Optional[Any]:
        """Generate a replay batch."""
        pass

    @abstractmethod
    def update_importance_scores(self, task_id: int) -> None:
        """Update importance scores for stored experiences."""
        pass


# ========== Batch Processing Interfaces ==========


class BatchProcessorInterface(ABC):
    """Interface for batch preprocessing and augmentation."""

    @abstractmethod
    def preprocess_batch(
        self, batch: Any, batch_idx: int, task_id: int, **kwargs
    ) -> Optional[Any]:
        """
        Preprocess a batch of data.

        Args:
            batch: Input batch
            batch_idx: Batch index
            task_id: Current task identifier
            **kwargs: Additional processing parameters

        Returns:
            Processed batch or None to skip
        """
        pass

    @abstractmethod
    def augment_batch(self, batch: Any, task_id: int) -> Any:
        """Apply data augmentation to a batch."""
        pass


class TaskBoundaryProcessorInterface(ABC):
    """Interface for handling task boundary events."""

    @abstractmethod
    def on_task_start(self, task_id: int, task_metadata: Dict[str, Any]) -> None:
        """Handle task start event."""
        pass

    @abstractmethod
    def on_task_end(self, task_id: int, task_results: Dict[str, Any]) -> None:
        """Handle task end event."""
        pass

    @abstractmethod
    def on_task_switch(self, from_task_id: int, to_task_id: int) -> None:
        """Handle task switching event."""
        pass


# ========== Model Adaptation Interfaces ==========


class ModelAdapterInterface(ABC):
    """Interface for model adaptation strategies."""

    @abstractmethod
    def adapt_model(
        self,
        model: torch.nn.Module,
        task_id: int,
        adaptation_data: Optional[Any] = None,
    ) -> torch.nn.Module:
        """Adapt the model for a new task."""
        pass

    @abstractmethod
    def get_task_specific_parameters(self, task_id: int) -> List[str]:
        """Get names of task-specific parameters."""
        pass

    @abstractmethod
    def freeze_shared_parameters(self, model: torch.nn.Module) -> None:
        """Freeze parameters shared across tasks."""
        pass
