"""
Strategy interface integration with the enhanced event system.

This module provides the base classes and interfaces for implementing
continual learning strategies using the event-driven architecture.
"""

from abc import abstractmethod
from typing import Any, Dict, List, Optional, Set, Union
import torch
import torch.nn as nn
from torch.utils.data import DataLoader

from continuallearning.events.core.event_types import (
    BaseEventData,
    EventType,
    StrategyEvent,
    TaskEvent,
    EventFactory,
)
from continuallearning.events.core.handlers import BaseEventHandler, with_error_handling
from continuallearning.events.core.dispatcher import EventDispatcher, global_dispatcher


class ContinualLearningStrategy(BaseEventHandler[StrategyEvent]):
    """
    Base class for continual learning strategies using event-driven architecture.

    This class provides a framework for implementing CL strategies that can
    respond to various events in the training lifecycle while maintaining
    clean separation of concerns.
    """

    def __init__(
        self,
        name: Optional[str] = None,
        priority: int = 100,  # Higher priority for strategies
        enabled: bool = True,
        track_metrics: bool = True,  # Enable metrics for strategies
        event_dispatcher: Optional[EventDispatcher] = None,
    ):
        """
        Initialize the continual learning strategy.

        Args:
            name: Optional name for this strategy
            priority: Handler priority (higher numbers execute first)
            enabled: Whether this strategy is enabled
            event_dispatcher: Event dispatcher to use (defaults to global)
        """
        super().__init__(name, priority, enabled, track_metrics)
        self.event_dispatcher = event_dispatcher or global_dispatcher

        # Strategy state
        self.current_task = 0
        self.seen_tasks: Set[int] = set()
        self.task_data: Dict[int, Dict[str, Any]] = {}

        # Register with dispatcher
        self.event_dispatcher.register_handler(self)

        self._logger.info(f"Initialized strategy: {self.name}")

    def get_handled_events(self) -> List[EventType]:
        """
        Return the list of event types this strategy handles.

        Default implementation handles core strategy events.
        Override to add strategy-specific events.
        """
        return [
            EventType.TASK_STARTED,
            EventType.TASK_COMPLETED,
            EventType.LOSS_COMPUTATION_REQUESTED,
            EventType.BATCH_PREPROCESSING_REQUESTED,
            EventType.EPOCH_COMPLETED,
            EventType.TRAINING_COMPLETED,
        ]

    def _handle_event_impl(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """
        Handle strategy events and dispatch to appropriate methods.

        Args:
            event: The strategy event to handle

        Returns:
            Optional response event
        """
        # Dispatch to specific handler methods
        handler_map = {
            EventType.TASK_STARTED: self._handle_task_start,
            EventType.TASK_COMPLETED: self._handle_task_end,
            EventType.LOSS_COMPUTATION_REQUESTED: self._handle_loss_compute_request,
            EventType.BATCH_PREPROCESSING_REQUESTED: self._handle_batch_preprocess_request,
            EventType.EPOCH_COMPLETED: self._handle_epoch_end,
            EventType.TRAINING_COMPLETED: self._handle_training_end,
        }

        handler_method = handler_map.get(event.event_type)
        if handler_method:
            return handler_method(event)

        return None

    @abstractmethod
    def compute_strategy_loss(
        self,
        model: nn.Module | None,
        criterion: nn.Module | None,
        outputs: torch.Tensor | None,
        targets: torch.Tensor | None,
        task_id: int | None,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """
        Compute strategy-specific loss.

        Args:
            model: The model being trained
            criterion: The base loss criterion
            outputs: Model outputs
            targets: Target labels
            task_id: Current task ID
            **kwargs: Additional arguments

        Returns:
            Dictionary containing loss components
        """
        pass

    def preprocess_batch(
        self, batch: Any, batch_idx: int, task_id: int, **kwargs
    ) -> Optional[Any]:
        """
        Preprocess a batch before training.

        Override this method to implement batch preprocessing
        (e.g., adding replay samples, data augmentation).

        Args:
            batch: Input batch
            batch_idx: Batch index
            task_id: Current task ID
            **kwargs: Additional arguments

        Returns:
            Processed batch or None if no processing needed
        """
        return None

    def on_task_start(self, task_id: int, **kwargs) -> None:
        """
        Called when a new task starts.

        Args:
            task_id: The starting task ID
            **kwargs: Additional arguments
        """
        self.current_task = task_id
        self.seen_tasks.add(task_id)
        self._logger.info(f"Starting task {task_id}")

    def on_task_end(self, task_id: int, **kwargs) -> None:
        """
        Called when a task ends.

        Args:
            task_id: The ending task ID
            **kwargs: Additional arguments
        """
        self._logger.info(f"Ending task {task_id}")

    def on_epoch_end(self, epoch: int, task_id: int, **kwargs) -> None:
        """
        Called at the end of each epoch.

        Args:
            epoch: Epoch number
            task_id: Current task ID
            **kwargs: Additional arguments
        """
        pass

    def on_training_end(self, **kwargs) -> None:
        """
        Called when training ends.

        Args:
            **kwargs: Additional arguments
        """
        self._logger.info("Training ended")

    def get_parameters_to_optimize(
        self, model: nn.Module
    ) -> Union[List[nn.Parameter], List[Dict]]:
        """
        Get parameters that should be optimized for the current task.

        Args:
            model: The model being trained

        Returns:
            List of parameters or parameter groups
        """
        return list(model.parameters())

    def state_dict(self) -> Dict[str, Any]:
        """
        Get strategy state for serialization.

        Returns:
            Dictionary containing strategy state
        """
        return {
            "current_task": self.current_task,
            "seen_tasks": list(self.seen_tasks),
            "task_data": self.task_data,
        }

    def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
        """
        Load strategy state from dictionary.

        Args:
            state_dict: Dictionary containing strategy state
        """
        self.current_task = state_dict.get("current_task", 0)
        self.seen_tasks = set(state_dict.get("seen_tasks", []))
        self.task_data = state_dict.get("task_data", {})

    def _handle_task_start(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle task start event."""
        task_id = event.task_id or 0
        self.on_task_start(task_id)

        return (
            EventFactory.strategy(
                EventType.STRATEGY_STATE_CHANGED,
                strategy_name=self.name,
            )
            .task_id(task_id)
            .source(f"{self.name}._handle_task_start")
            .response_data({"action": "task_started", "task_id": task_id})
            .build()
        )

    def _handle_task_end(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle task end event."""
        task_id = event.task_id
        self.on_task_end(task_id)

        return (
            EventFactory.strategy(
                EventType.STRATEGY_STATE_CHANGED,
                strategy_name=self.name,
            )
            .task_id(task_id)
            .source(f"{self.name}._handle_task_end")
            .response_data({"action": "task_ended", "task_id": task_id})
            .build()
        )

    def _handle_loss_compute_request(
        self, event: StrategyEvent
    ) -> Optional[StrategyEvent]:
        """Handle loss computation request."""
        if not all([event.model, event.criterion, event.outputs, event.targets]):
            self._logger.warning("Incomplete data for loss computation")
            return None

        try:
            loss_dict = self.compute_strategy_loss(
                model=event.model,
                criterion=event.criterion,
                outputs=event.outputs,
                targets=event.targets,
                task_id=event.task_id,
            )
            assert event.task_id is not None
            return (
                EventFactory.strategy(
                    EventType.LOSS_COMPUTATION_COMPLETED,
                    strategy_name=self.name,
                )
                .task_id(event.task_id)
                .source(f"{self.name}._handle_loss_compute_request")
                .response_data(loss_dict)
                .build()
            )

        except Exception as e:
            self._logger.error(f"Error computing loss: {e}", exc_info=True)
            return None

    def _handle_batch_preprocess_request(
        self, event: StrategyEvent
    ) -> Optional[StrategyEvent]:
        """Handle batch preprocessing request."""
        if event.batch is None:
            return None

        processed_batch = self.preprocess_batch(
            batch=event.batch,
            batch_idx=event.batch_idx or 0,
            task_id=event.task_id,
        )

        if processed_batch is not None:
            return (
                EventFactory.strategy(
                    EventType.BATCH_PREPROCESSING_COMPLETED,
                    strategy_name=self.name,
                )
                .task_id(event.task_id)
                .source(f"{self.name}._handle_batch_preprocess_request")
                .response_data({"batch": processed_batch, "augmented": True})
                .build()
            )

        return None

    def _handle_epoch_end(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle epoch end event."""
        self.on_epoch_end(
            epoch=event.current_epoch or 0,
            task_id=event.task_id,
        )
        return None

    def _handle_training_end(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle training end event."""
        self.on_training_end()
        return None

    def __repr__(self) -> str:
        """String representation of the strategy."""
        return f"{self.__class__.__name__}(name='{self.name}', current_task={self.current_task})"


class BaselineStrategy(ContinualLearningStrategy):
    """
    Baseline strategy that performs standard training without any CL modifications.

    This strategy serves as a reference implementation and testing baseline.
    """

    def compute_strategy_loss(
        self,
        model: nn.Module | None,
        criterion: nn.Module | None,
        outputs: torch.Tensor | None,
        targets: torch.Tensor | None,
        task_id: int | None,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """Compute standard cross-entropy loss."""
        # Extract logits if outputs is a dict
        if isinstance(outputs, dict):
            logits = outputs.get("logits", outputs.get("predictions"))
            if logits is None:
                raise ValueError(
                    "Outputs dict must contain 'logits' or 'predictions' key"
                )
        else:
            logits = outputs

        assert criterion is not None
        base_loss = criterion(logits, targets)

        return {
            "loss": base_loss,
            "base_loss": base_loss,
        }


class RegularizationStrategy(ContinualLearningStrategy):
    """
    Base class for regularization-based continual learning strategies.

    Provides common functionality for strategies that add regularization terms
    to the loss function (e.g., EWC, L2, etc.).
    """

    def __init__(
        self, regularization_weight: float = 1.0, name: Optional[str] = None, **kwargs
    ):
        """
        Initialize regularization strategy.

        Args:
            regularization_weight: Weight for the regularization term
            name: Optional name for this strategy
            **kwargs: Additional arguments for parent class
        """
        super().__init__(name, **kwargs)
        self.regularization_weight = regularization_weight

    @abstractmethod
    def compute_regularization_loss(
        self, model: nn.Module, task_id: int
    ) -> torch.Tensor:
        """
        Compute the regularization loss.

        Args:
            model: The model being trained
            task_id: Current task ID

        Returns:
            Regularization loss tensor
        """
        pass

    def compute_strategy_loss(
        self,
        model: nn.Module | None,
        criterion: nn.Module | None,
        outputs: torch.Tensor | None,
        targets: torch.Tensor | None,
        task_id: int | None,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """Compute loss with regularization."""
        # Base loss
        if isinstance(outputs, dict):
            logits = outputs.get("logits", outputs.get("predictions", outputs))
        else:
            logits = outputs

        assert criterion is not None
        base_loss = criterion(logits, targets)

        # Regularization loss
        reg_loss = torch.tensor(0.0, device=base_loss.device)
        if task_id is not None and model is not None and task_id > 0:  # Only apply regularization after first task
            reg_loss = self.compute_regularization_loss(model, task_id)

        total_loss = base_loss + self.regularization_weight * reg_loss

        return {
            "loss": total_loss,
            "base_loss": base_loss,
            "regularization_loss": reg_loss,
        }


class MemoryBasedStrategy(ContinualLearningStrategy):
    """
    Base class for memory-based continual learning strategies.

    Provides common functionality for strategies that maintain
    exemplars or replay buffers (e.g., Experience Replay, GEM, etc.).
    """

    def __init__(self, memory_size: int = 1000, name: Optional[str] = None, **kwargs):
        """
        Initialize memory-based strategy.

        Args:
            memory_size: Maximum number of exemplars to store
            name: Optional name for this strategy
            **kwargs: Additional arguments for parent class
        """
        super().__init__(name, **kwargs)
        self.memory_size = memory_size
        self.memory: Dict[str, List[Any]] = {
            "inputs": [],
            "targets": [],
            "task_ids": [],
        }

    @abstractmethod
    def select_exemplars(
        self, dataloader: DataLoader, task_id: int, num_exemplars: int
    ) -> Dict[str, List[Any]]:
        """
        Select exemplars from the current task data.

        Args:
            dataloader: DataLoader for current task
            task_id: Current task ID
            num_exemplars: Number of exemplars to select

        Returns:
            Dictionary containing selected exemplars
        """
        pass

    def update_memory(self, dataloader: DataLoader, task_id: int) -> None:
        """
        Update memory with exemplars from the current task.

        Args:
            dataloader: DataLoader for current task
            task_id: Current task ID
        """
        # Calculate exemplars per task
        num_tasks = len(self.seen_tasks)
        exemplars_per_task = self.memory_size // max(num_tasks, 1)

        # Select new exemplars
        new_exemplars = self.select_exemplars(dataloader, task_id, exemplars_per_task)

        # Update memory
        for key in self.memory:
            self.memory[key].extend(new_exemplars.get(key, []))

        # Trim memory if needed
        self._trim_memory()

    def _trim_memory(self) -> None:
        """Trim memory to maintain size constraints."""
        current_size = len(self.memory["inputs"])
        if current_size > self.memory_size:
            # Keep the most recent exemplars
            for key in self.memory:
                self.memory[key] = self.memory[key][-self.memory_size :]

    def on_task_end(self, task_id: int, **kwargs) -> None:
        """Update memory when task ends."""
        super().on_task_end(task_id, **kwargs)

        # Update memory if dataloader is provided
        dataloader = kwargs.get("dataloader")
        if dataloader:
            self.update_memory(dataloader, task_id)
