"""
Example implementations of continual learning strategies using the advanced event system.

This module demonstrates how to implement CL strategies following SOLID principles
and using the event-driven architecture for clean separation of concerns.
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, List, Optional, Any
from collections import defaultdict
import random
import numpy as np

from continuallearning.events.interfaces import (
    StrategyEventHandlerInterface as StrategyEventHandler,
    LossComputationInterface,
    BatchProcessorInterface,
    MemoryManagerInterface,
)
from continuallearning.registry import strategy


@strategy("EWC")
class AdvancedEWCStrategy(StrategyEventHandler, LossComputationInterface):
    """
    Advanced EWC implementation using the event system.

    This implementation demonstrates how to create a clean, maintainable
    strategy that integrates seamlessly with the event system.
    """

    def __init__(
        self,
        regularization_weight: float = 5000.0,
        fisher_estimation_samples: int = 1000,
        name: Optional[str] = None,
        priority: int = 100,
        enabled: bool = True,
        **kwargs,
    ):
        super().__init__(
            strategy_name="EWC",
            strategy_priority=priority,
            name=name or "EWCStrategy",
            priority=priority,
            enabled=enabled,
            **kwargs,
        )

        self.regularization_weight = regularization_weight
        self.fisher_estimation_samples = fisher_estimation_samples

        # EWC-specific state
        self.fisher_information: Dict[str, torch.Tensor] = {}
        self.optimal_parameters: Dict[str, torch.Tensor] = {}
        self.task_fisher_info: Dict[int, Dict[str, torch.Tensor]] = {}

        # Performance tracking
        self.ewc_loss_history: List[float] = []
        self.regularization_strength_history: List[float] = []

    def get_handled_events(self) -> List[EventType]:
        """Return EWC-specific event types."""
        return [
            EventType.TASK_STARTED,
            EventType.TASK_COMPLETED,
            EventType.LOSS_COMPUTATION_REQUESTED,
            EventType.PARAMETER_IMPORTANCE_UPDATE_REQUESTED,
        ]

    def _handle_event_impl(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle EWC-specific events."""
        handler_map = {
            EventType.TASK_STARTED: self._handle_task_started,
            EventType.TASK_COMPLETED: self._handle_task_completed,
            EventType.LOSS_COMPUTATION_REQUESTED: self._handle_loss_computation,
            EventType.PARAMETER_IMPORTANCE_UPDATE_REQUESTED: self._handle_parameter_importance_update,
        }

        handler_method = handler_map.get(event.event_type)
        if handler_method:
            return handler_method(event)

        return None

    def _handle_task_started(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle task start event."""
        task_id = event.task_id or self.current_task
        self.update_task_context(task_id)

        self._logger.info(f"EWC strategy started for task {task_id}")

        return (
            EventFactory.strategy(EventType.STRATEGY_STATE_CHANGED, self.strategy_name)
            .source(f"{self.name}._handle_task_started")
            .build()
        )

    def _handle_task_completed(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle task completion - compute Fisher information."""
        task_id = event.task_id or self.current_task

        if event.model and hasattr(event, "dataloader"):
            self._compute_fisher_information(event.model, event.dataloader, task_id)
            self._store_optimal_parameters(event.model, task_id)

            self._logger.info(f"EWC: Computed Fisher information for task {task_id}")

            # Store task-specific Fisher information
            self.task_fisher_info[task_id] = self.fisher_information.copy()

        return (
            EventFactory.strategy(
                EventType.PARAMETER_IMPORTANCE_UPDATE_COMPLETED, self.strategy_name
            )
            .task_id(task_id)
            .source(f"{self.name}._handle_task_completed")
            .build()
        )

    def _handle_loss_computation(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Compute EWC-regularized loss."""
        if not all([event.model, event.criterion, event.model_outputs, event.targets]):
            return None

        # Compute base loss
        base_loss = event.criterion(event.model_outputs, event.targets)

        # Compute EWC regularization loss
        ewc_loss = self._compute_ewc_regularization_loss(event.model)

        # Total loss
        total_loss = base_loss + self.regularization_weight * ewc_loss

        # Track metrics
        self.ewc_loss_history.append(ewc_loss.item())
        self.regularization_strength_history.append(self.regularization_weight)

        # Create response
        response_data = {
            "loss": total_loss,
            "base_loss": base_loss,
            "ewc_regularization_loss": ewc_loss,
            "regularization_weight": self.regularization_weight,
            "fisher_diagonal_norm": self._get_fisher_diagonal_norm(),
        }

        return (
            EventFactory.strategy(
                EventType.LOSS_COMPUTATION_COMPLETED, self.strategy_name
            )
            .response_data(response_data)
            .source(f"{self.name}._handle_loss_computation")
            .build()
        )

    def _handle_parameter_importance_update(
        self, event: StrategyEvent
    ) -> Optional[StrategyEvent]:
        """Handle parameter importance update request."""
        if event.model and hasattr(event, "dataloader"):
            task_id = event.task_id or self.current_task
            self._compute_fisher_information(event.model, event.dataloader, task_id)

            return (
                EventFactory.strategy(
                    EventType.PARAMETER_IMPORTANCE_UPDATE_COMPLETED, self.strategy_name
                )
                .task_id(task_id)
                .source(f"{self.name}._handle_parameter_importance_update")
                .build()
            )

        return None

    def compute_loss(
        self,
        model: nn.Module,
        outputs: torch.Tensor,
        targets: torch.Tensor,
        task_id: int,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """Compute EWC loss (interface implementation)."""
        criterion = kwargs.get("criterion", nn.CrossEntropyLoss())

        base_loss = criterion(outputs, targets)
        ewc_loss = self._compute_ewc_regularization_loss(model)
        total_loss = base_loss + self.regularization_weight * ewc_loss

        return {
            "loss": total_loss,
            "base_loss": base_loss,
            "ewc_loss": ewc_loss,
        }

    def _compute_ewc_regularization_loss(self, model: nn.Module) -> torch.Tensor:
        """Compute EWC regularization term."""
        if not self.fisher_information or not self.optimal_parameters:
            return torch.tensor(0.0, device=next(model.parameters()).device)

        ewc_loss = torch.tensor(0.0, device=next(model.parameters()).device)

        for name, param in model.named_parameters():
            if name in self.fisher_information and name in self.optimal_parameters:
                fisher = self.fisher_information[name]
                optimal = self.optimal_parameters[name]

                # Ensure tensors are on the same device
                fisher = fisher.to(param.device)
                optimal = optimal.to(param.device)

                # Compute quadratic penalty
                diff = param - optimal
                penalty = fisher * (diff**2)
                ewc_loss += penalty.sum()

        return ewc_loss

    def _compute_fisher_information(
        self, model: nn.Module, dataloader: torch.utils.data.DataLoader, task_id: int
    ) -> None:
        """Compute Fisher Information Matrix diagonal."""
        model.eval()

        # Initialize Fisher information accumulator
        fisher_accum = {}
        for name, param in model.named_parameters():
            if param.requires_grad:
                fisher_accum[name] = torch.zeros_like(param.data)

        # Sample-based Fisher information estimation
        samples_processed = 0

        with torch.enable_grad():
            for batch_idx, batch in enumerate(dataloader):
                if samples_processed >= self.fisher_estimation_samples:
                    break

                # Extract inputs and targets
                if isinstance(batch, (list, tuple)):
                    inputs, targets = batch[0], batch[1]
                else:
                    inputs, targets = batch["input"], batch["target"]

                inputs = inputs.to(next(model.parameters()).device)
                targets = targets.to(next(model.parameters()).device)

                # Forward pass
                model.zero_grad()
                outputs = model(inputs)

                # Compute log-likelihood loss
                log_likelihood = F.log_softmax(outputs, dim=1)
                loss = F.nll_loss(log_likelihood, targets)

                # Backward pass
                loss.backward()

                # Accumulate squared gradients (Fisher diagonal approximation)
                for name, param in model.named_parameters():
                    if param.grad is not None and name in fisher_accum:
                        fisher_accum[name] += param.grad.data**2

                samples_processed += inputs.size(0)

        # Normalize by number of samples
        for name in fisher_accum:
            fisher_accum[name] /= samples_processed

        # Store Fisher information
        self.fisher_information = fisher_accum

        self._logger.debug(
            f"Computed Fisher information using {samples_processed} samples"
        )

    def _store_optimal_parameters(self, model: nn.Module, task_id: int) -> None:
        """Store optimal parameters for the current task."""
        self.optimal_parameters = {}
        for name, param in model.named_parameters():
            if param.requires_grad:
                self.optimal_parameters[name] = param.data.clone()

        self._logger.debug(f"Stored optimal parameters for task {task_id}")

    def _get_fisher_diagonal_norm(self) -> float:
        """Get the norm of Fisher diagonal for monitoring."""
        if not self.fisher_information:
            return 0.0

        total_norm = 0.0
        for fisher_diag in self.fisher_information.values():
            total_norm += torch.norm(fisher_diag).item()

        return total_norm

    def get_strategy_metrics(self) -> Dict[str, Any]:
        """Get strategy-specific metrics."""
        return {
            "ewc_loss_history": self.ewc_loss_history[-100:],  # Last 100 values
            "regularization_strength": self.regularization_weight,
            "fisher_diagonal_norm": self._get_fisher_diagonal_norm(),
            "num_stored_tasks": len(self.task_fisher_info),
            "avg_ewc_loss": (
                np.mean(self.ewc_loss_history) if self.ewc_loss_history else 0.0
            ),
        }


@strategy(
    name="ExperienceReplay",
    version="2.0.0",
    description="Experience Replay strategy with advanced memory management",
    tags=["memory_replay", "rehearsal", "episodic_memory"],
    categories=["memory_based", "rehearsal"],
)
class AdvancedExperienceReplayStrategy(
    StrategyEventHandler, BatchProcessorInterface, MemoryManagerInterface
):
    """
    Advanced Experience Replay implementation with sophisticated memory management.

    This implementation demonstrates memory-based continual learning with
    efficient storage and sampling strategies.
    """

    def __init__(
        self,
        memory_size: int = 1000,
        replay_ratio: float = 0.5,
        sampling_strategy: str = "random",  # random, herding, gradient_based
        memory_update_strategy: str = "reservoir",  # reservoir, ring_buffer, priority
        name: Optional[str] = None,
        priority: int = 100,
        enabled: bool = True,
        **kwargs,
    ):
        super().__init__(
            strategy_name="ExperienceReplay",
            strategy_priority=priority,
            name=name or "ExperienceReplayStrategy",
            priority=priority,
            enabled=enabled,
            **kwargs,
        )

        self.memory_size = memory_size
        self.replay_ratio = replay_ratio
        self.sampling_strategy = sampling_strategy
        self.memory_update_strategy = memory_update_strategy

        # Memory storage
        self.memory_inputs: List[torch.Tensor] = []
        self.memory_targets: List[torch.Tensor] = []
        self.memory_task_ids: List[int] = []
        self.memory_priorities: List[float] = []

        # Memory management
        self.samples_seen = 0
        self.memory_usage_history: List[int] = []

        # Performance tracking
        self.replay_effectiveness_history: List[float] = []

    def get_handled_events(self) -> List[EventType]:
        """Return Experience Replay specific event types."""
        return [
            EventType.BATCH_PREPROCESSING_REQUESTED,
            EventType.MEMORY_UPDATE_REQUESTED,
            EventType.TASK_COMPLETED,
        ]

    def _handle_event_impl(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle Experience Replay specific events."""
        handler_map = {
            EventType.BATCH_PREPROCESSING_REQUESTED: self._handle_batch_preprocessing,
            EventType.MEMORY_UPDATE_REQUESTED: self._handle_memory_update,
            EventType.TASK_COMPLETED: self._handle_task_completed,
        }

        handler_method = handler_map.get(event.event_type)
        if handler_method:
            return handler_method(event)

        return None

    def _handle_batch_preprocessing(
        self, event: StrategyEvent
    ) -> Optional[StrategyEvent]:
        """Handle batch preprocessing - add replay samples."""
        if not self.memory_inputs or self.current_task == 0:
            return None  # No replay for first task

        original_batch = event.batch_data
        if original_batch is None:
            return None

        # Extract original batch components
        if isinstance(original_batch, (list, tuple)):
            orig_inputs, orig_targets = original_batch[0], original_batch[1]
        else:
            orig_inputs = original_batch["input"]
            orig_targets = original_batch["target"]

        # Sample replay data
        replay_batch_size = int(orig_inputs.size(0) * self.replay_ratio)
        replay_samples = self.sample_from_memory(replay_batch_size)

        if replay_samples:
            replay_inputs, replay_targets, replay_task_ids = replay_samples

            # Combine original and replay batches
            combined_inputs = torch.cat([orig_inputs, replay_inputs], dim=0)
            combined_targets = torch.cat([orig_targets, replay_targets], dim=0)

            # Create augmented batch
            if isinstance(original_batch, (list, tuple)):
                augmented_batch = [combined_inputs, combined_targets]
            else:
                augmented_batch = {
                    "input": combined_inputs,
                    "target": combined_targets,
                    "task_id": torch.cat(
                        [
                            torch.full((orig_inputs.size(0),), self.current_task),
                            replay_task_ids,
                        ]
                    ),
                }

            response_data = {
                "batch": augmented_batch,
                "augmented": True,
                "replay_ratio": len(replay_inputs) / len(combined_inputs),
                "original_size": orig_inputs.size(0),
                "replay_size": replay_inputs.size(0),
            }

            return (
                EventFactory.strategy(
                    EventType.BATCH_PREPROCESSING_COMPLETED, self.strategy_name
                )
                .response_data(response_data)
                .source(f"{self.name}._handle_batch_preprocessing")
                .build()
            )

        return None

    def _handle_memory_update(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle memory update request."""
        if event.batch_data:
            batch = event.batch_data
            task_id = event.task_id or self.current_task

            # Extract batch components
            if isinstance(batch, (list, tuple)):
                inputs, targets = batch[0], batch[1]
            else:
                inputs, targets = batch["input"], batch["target"]

            # Update memory
            self.update_memory(inputs, targets, task_id)

            response_data = {
                "memory_size": self.get_memory_size(),
                "memory_usage": len(self.memory_inputs),
                "task_id": task_id,
            }

            return (
                EventFactory.strategy(
                    EventType.MEMORY_UPDATE_COMPLETED, self.strategy_name
                )
                .response_data(response_data)
                .source(f"{self.name}._handle_memory_update")
                .build()
            )

        return None

    def _handle_task_completed(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle task completion - memory consolidation."""
        task_id = event.task_id or self.current_task

        # Perform memory consolidation if needed
        self._consolidate_memory(task_id)

        # Track memory usage
        self.memory_usage_history.append(len(self.memory_inputs))

        self._logger.info(
            f"Experience Replay: Memory size after task {task_id}: {len(self.memory_inputs)}"
        )

        return (
            EventFactory.strategy(EventType.STRATEGY_STATE_CHANGED, self.strategy_name)
            .task_id(task_id)
            .source(f"{self.name}._handle_task_completed")
            .build()
        )

    def preprocess_batch(
        self, batch: Any, batch_idx: int, task_id: int, **kwargs
    ) -> Optional[Any]:
        """Preprocess batch by adding replay samples (interface implementation)."""
        if not self.memory_inputs or task_id == 0:
            return None

        # Extract inputs and targets
        if isinstance(batch, (list, tuple)):
            inputs, targets = batch[0], batch[1]
        else:
            inputs, targets = batch["input"], batch["target"]

        # Sample replay data
        replay_batch_size = int(inputs.size(0) * self.replay_ratio)
        replay_samples = self.sample_from_memory(replay_batch_size)

        if replay_samples:
            replay_inputs, replay_targets, _ = replay_samples

            # Combine batches
            combined_inputs = torch.cat([inputs, replay_inputs], dim=0)
            combined_targets = torch.cat([targets, replay_targets], dim=0)

            if isinstance(batch, (list, tuple)):
                return [combined_inputs, combined_targets]
            else:
                return {
                    "input": combined_inputs,
                    "target": combined_targets,
                }

        return None

    def update_memory(self, data: Any, targets: Any, task_id: int) -> None:
        """Update memory with new data (interface implementation)."""
        # Convert to individual samples
        for i in range(data.size(0)):
            sample_input = data[i].clone()
            sample_target = targets[i].clone() if targets.dim() > 0 else targets.clone()

            if self.memory_update_strategy == "reservoir":
                self._reservoir_sampling_update(sample_input, sample_target, task_id)
            elif self.memory_update_strategy == "ring_buffer":
                self._ring_buffer_update(sample_input, sample_target, task_id)
            elif self.memory_update_strategy == "priority":
                self._priority_based_update(sample_input, sample_target, task_id)

            self.samples_seen += 1

    def sample_from_memory(
        self, num_samples: int, task_id: Optional[int] = None
    ) -> Optional[tuple]:
        """Sample data from memory (interface implementation)."""
        if not self.memory_inputs or num_samples <= 0:
            return None

        available_samples = len(self.memory_inputs)
        actual_samples = min(num_samples, available_samples)

        if self.sampling_strategy == "random":
            indices = random.sample(range(available_samples), actual_samples)
        elif self.sampling_strategy == "herding":
            # Implement herding strategy (simplified)
            indices = self._herding_sampling(actual_samples, task_id)
        elif self.sampling_strategy == "gradient_based":
            # Implement gradient-based sampling (simplified)
            indices = self._gradient_based_sampling(actual_samples)
        else:
            indices = random.sample(range(available_samples), actual_samples)

        # Gather samples
        sampled_inputs = torch.stack([self.memory_inputs[i] for i in indices])
        sampled_targets = torch.stack([self.memory_targets[i] for i in indices])
        sampled_task_ids = torch.tensor([self.memory_task_ids[i] for i in indices])

        return sampled_inputs, sampled_targets, sampled_task_ids

    def get_memory_size(self) -> int:
        """Get current memory usage (interface implementation)."""
        return len(self.memory_inputs)

    def _reservoir_sampling_update(
        self, sample_input: torch.Tensor, sample_target: torch.Tensor, task_id: int
    ) -> None:
        """Update memory using reservoir sampling."""
        if len(self.memory_inputs) < self.memory_size:
            # Memory not full, just add
            self.memory_inputs.append(sample_input)
            self.memory_targets.append(sample_target)
            self.memory_task_ids.append(task_id)
            self.memory_priorities.append(1.0)
        else:
            # Memory full, reservoir sampling
            replace_idx = random.randint(0, self.samples_seen)
            if replace_idx < self.memory_size:
                self.memory_inputs[replace_idx] = sample_input
                self.memory_targets[replace_idx] = sample_target
                self.memory_task_ids[replace_idx] = task_id
                self.memory_priorities[replace_idx] = 1.0

    def _ring_buffer_update(
        self, sample_input: torch.Tensor, sample_target: torch.Tensor, task_id: int
    ) -> None:
        """Update memory using ring buffer strategy."""
        if len(self.memory_inputs) < self.memory_size:
            self.memory_inputs.append(sample_input)
            self.memory_targets.append(sample_target)
            self.memory_task_ids.append(task_id)
            self.memory_priorities.append(1.0)
        else:
            # Overwrite oldest sample
            oldest_idx = self.samples_seen % self.memory_size
            self.memory_inputs[oldest_idx] = sample_input
            self.memory_targets[oldest_idx] = sample_target
            self.memory_task_ids[oldest_idx] = task_id
            self.memory_priorities[oldest_idx] = 1.0

    def _priority_based_update(
        self, sample_input: torch.Tensor, sample_target: torch.Tensor, task_id: int
    ) -> None:
        """Update memory using priority-based strategy."""
        priority = self._compute_sample_priority(sample_input, sample_target, task_id)

        if len(self.memory_inputs) < self.memory_size:
            self.memory_inputs.append(sample_input)
            self.memory_targets.append(sample_target)
            self.memory_task_ids.append(task_id)
            self.memory_priorities.append(priority)
        else:
            # Find lowest priority sample to replace
            min_priority_idx = min(
                range(len(self.memory_priorities)),
                key=lambda i: self.memory_priorities[i],
            )

            if priority > self.memory_priorities[min_priority_idx]:
                self.memory_inputs[min_priority_idx] = sample_input
                self.memory_targets[min_priority_idx] = sample_target
                self.memory_task_ids[min_priority_idx] = task_id
                self.memory_priorities[min_priority_idx] = priority

    def _compute_sample_priority(
        self, sample_input: torch.Tensor, sample_target: torch.Tensor, task_id: int
    ) -> float:
        """Compute priority for a sample (simplified implementation)."""
        # In a real implementation, this could use gradient norm, uncertainty, etc.
        return random.random()  # Placeholder implementation

    def _herding_sampling(
        self, num_samples: int, task_id: Optional[int] = None
    ) -> List[int]:
        """Herding-based sampling strategy (simplified)."""
        # In a real implementation, this would use class centroids
        available_indices = list(range(len(self.memory_inputs)))
        if task_id is not None:
            available_indices = [
                i for i in available_indices if self.memory_task_ids[i] == task_id
            ]

        return random.sample(
            available_indices, min(num_samples, len(available_indices))
        )

    def _gradient_based_sampling(self, num_samples: int) -> List[int]:
        """Gradient-based sampling strategy (simplified)."""
        # In a real implementation, this would use gradient diversity
        return random.sample(range(len(self.memory_inputs)), num_samples)

    def _consolidate_memory(self, task_id: int) -> None:
        """Perform memory consolidation after task completion."""
        # Remove duplicate samples, balance task representation, etc.
        # This is a simplified implementation
        self._logger.debug(f"Memory consolidation completed for task {task_id}")

    def get_strategy_metrics(self) -> Dict[str, Any]:
        """Get strategy-specific metrics."""
        task_distribution = defaultdict(int)
        for task_id in self.memory_task_ids:
            task_distribution[task_id] += 1

        return {
            "memory_usage": len(self.memory_inputs),
            "memory_capacity": self.memory_size,
            "memory_utilization": len(self.memory_inputs) / self.memory_size,
            "task_distribution": dict(task_distribution),
            "samples_seen": self.samples_seen,
            "replay_ratio": self.replay_ratio,
            "sampling_strategy": self.sampling_strategy,
            "memory_update_strategy": self.memory_update_strategy,
        }


# Register strategies with global dispatcher
def register_example_strategies():
    """Register example strategies with the global event dispatcher."""
    from continuallearning.events import global_dispatcher

    # Create strategy instances
    ewc_strategy = AdvancedEWCStrategy()
    replay_strategy = AdvancedExperienceReplayStrategy()

    # Register with dispatcher
    global_dispatcher.register_handler(ewc_strategy)
    global_dispatcher.register_handler(replay_strategy)

    print("Registered example strategies with global dispatcher")


if __name__ == "__main__":
    # Example usage
    register_example_strategies()

    # Create sample events and test strategies
    from continuallearning.events import EventFactory, global_dispatcher

    # Create a task start event
    task_event = EventFactory.task(EventType.TASK_STARTED, task_id=1).build()

    # Dispatch the event
    result = global_dispatcher.dispatch(task_event)

    print(f"Event processing result: {result}")
