"""
Concrete implementations of continual learning strategies using the event system.

This module provides ready-to-use implementations of popular CL strategies.
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
from typing import Any, Dict, List, Optional, Tuple
import random
import numpy as np

from .base import RegularizationStrategy, MemoryBasedStrategy
from ..core.event_types import EventType, EventFactory


class EWCStrategy(RegularizationStrategy):
    """
    Elastic Weight Consolidation (EWC) strategy implementation.

    Protects important parameters from previous tasks by adding
    a quadratic penalty based on Fisher Information Matrix.
    """

    def __init__(
        self,
        regularization_weight: float = 5000.0,
        fisher_estimation_samples: int = 1000,
        name: Optional[str] = None,
        **kwargs,
    ):
        """
        Initialize EWC strategy.

        Args:
            regularization_weight: Weight for the EWC regularization term
            fisher_estimation_samples: Number of samples for Fisher estimation
            name: Optional name for this strategy
            **kwargs: Additional arguments for parent class
        """
        super().__init__(regularization_weight, name or "EWC", **kwargs)
        self.fisher_estimation_samples = fisher_estimation_samples
        self.fisher_information: Dict[str, torch.Tensor] = {}
        self.optimal_params: Dict[str, torch.Tensor] = {}

    def compute_regularization_loss(
        self, model: nn.Module, task_id: int
    ) -> torch.Tensor:
        """Compute EWC regularization loss."""
        ewc_loss = torch.tensor(0.0, device=next(model.parameters()).device)

        for name, param in model.named_parameters():
            if name in self.fisher_information and name in self.optimal_params:
                fisher = self.fisher_information[name]
                optimal = self.optimal_params[name]
                ewc_loss += (fisher * (param - optimal) ** 2).sum()

        return ewc_loss

    def on_task_end(self, task_id: int, **kwargs) -> None:
        """Update Fisher information and optimal parameters at task end."""
        super().on_task_end(task_id, **kwargs)

        # Get dataloader from kwargs if available
        dataloader = kwargs.get("dataloader")
        model = kwargs.get("model")

        if dataloader is not None and model is not None:
            self._update_fisher_information(model, dataloader)
        else:
            self._logger.warning(
                "DataLoader or model not provided for Fisher information update"
            )

    def _update_fisher_information(
        self, model: nn.Module, dataloader: DataLoader
    ) -> None:
        """Update Fisher Information Matrix using the current task data."""
        self._logger.info(f"Updating Fisher information for task {self.current_task}")

        # Store optimal parameters
        for name, param in model.named_parameters():
            self.optimal_params[name] = param.data.clone()

        # Compute Fisher Information
        model.eval()
        fisher_info = {}

        # Initialize Fisher information accumulators
        for name, param in model.named_parameters():
            fisher_info[name] = torch.zeros_like(param.data)

        # Sample data for Fisher estimation
        sample_count = 0
        with torch.enable_grad():
            for batch_idx, batch in enumerate(dataloader):
                if sample_count >= self.fisher_estimation_samples:
                    break

                # Extract inputs and targets
                if isinstance(batch, (tuple, list)):
                    inputs, targets = batch[:2]
                else:
                    inputs, targets = batch.inputs, batch.targets

                inputs = inputs.to(next(model.parameters()).device)
                targets = targets.to(next(model.parameters()).device)

                # Forward pass
                model.zero_grad()
                outputs = model(inputs)

                # Extract logits
                if isinstance(outputs, dict):
                    logits = outputs.get("logits", outputs.get("predictions", outputs))
                else:
                    logits = outputs

                # Compute loss for gradient calculation
                loss = F.cross_entropy(logits, targets)
                loss.backward()

                # Accumulate squared gradients (Fisher approximation)
                for name, param in model.named_parameters():
                    if param.grad is not None:
                        fisher_info[name] += param.grad.data**2

                sample_count += inputs.size(0)

        # Normalize Fisher information
        for name in fisher_info:
            fisher_info[name] /= sample_count

        # Store Fisher information
        self.fisher_information.update(fisher_info)

        model.train()
        self._logger.info(f"Fisher information updated with {sample_count} samples")

    def state_dict(self) -> Dict[str, Any]:
        """Get EWC strategy state."""
        state = super().state_dict()
        state.update(
            {
                "fisher_information": {
                    k: v.cpu() for k, v in self.fisher_information.items()
                },
                "optimal_params": {k: v.cpu() for k, v in self.optimal_params.items()},
                "regularization_weight": self.regularization_weight,
            }
        )
        return state

    def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
        """Load EWC strategy state."""
        super().load_state_dict(state_dict)

        device = (
            next(iter(self.fisher_information.values())).device
            if self.fisher_information
            else "cpu"
        )

        self.fisher_information = {
            k: v.to(device) for k, v in state_dict.get("fisher_information", {}).items()
        }
        self.optimal_params = {
            k: v.to(device) for k, v in state_dict.get("optimal_params", {}).items()
        }
        self.regularization_weight = state_dict.get(
            "regularization_weight", self.regularization_weight
        )


class ExperienceReplayStrategy(MemoryBasedStrategy):
    """
    Experience Replay strategy implementation.

    Maintains a buffer of past experiences and replays them
    during training to prevent catastrophic forgetting.
    """

    def __init__(
        self,
        memory_size: int = 1000,
        replay_ratio: float = 0.5,
        selection_strategy: str = "random",
        name: Optional[str] = None,
        **kwargs,
    ):
        """
        Initialize Experience Replay strategy.

        Args:
            memory_size: Maximum number of exemplars to store
            replay_ratio: Ratio of replay samples to current batch samples
            selection_strategy: Strategy for selecting exemplars ("random", "balanced")
            name: Optional name for this strategy
            **kwargs: Additional arguments for parent class
        """
        super().__init__(memory_size, name or "ExperienceReplay", **kwargs)
        self.replay_ratio = replay_ratio
        self.selection_strategy = selection_strategy

    def select_exemplars(
        self, dataloader: DataLoader, task_id: int, num_exemplars: int
    ) -> Dict[str, List[Any]]:
        """Select exemplars using the specified strategy."""
        exemplars = {"inputs": [], "targets": [], "task_ids": []}

        # Collect all data from the dataloader
        all_inputs, all_targets = [], []
        for batch in dataloader:
            if isinstance(batch, (tuple, list)):
                inputs, targets = batch[:2]
            else:
                inputs, targets = batch.inputs, batch.targets

            all_inputs.append(inputs)
            all_targets.append(targets)

        if not all_inputs:
            return exemplars

        # Concatenate all data
        all_inputs = torch.cat(all_inputs, dim=0)
        all_targets = torch.cat(all_targets, dim=0)

        # Select exemplars based on strategy
        if self.selection_strategy == "random":
            indices = torch.randperm(len(all_inputs))[:num_exemplars]
        elif self.selection_strategy == "balanced":
            indices = self._balanced_selection(all_targets, num_exemplars)
        else:
            raise ValueError(f"Unknown selection strategy: {self.selection_strategy}")

        # Extract selected exemplars
        selected_inputs = all_inputs[indices]
        selected_targets = all_targets[indices]

        exemplars["inputs"] = [selected_inputs[i] for i in range(len(selected_inputs))]
        exemplars["targets"] = [
            selected_targets[i] for i in range(len(selected_targets))
        ]
        exemplars["task_ids"] = [task_id] * len(selected_inputs)

        return exemplars

    def _balanced_selection(
        self, targets: torch.Tensor, num_exemplars: int
    ) -> torch.Tensor:
        """Select exemplars in a balanced way across classes."""
        unique_classes = torch.unique(targets)
        exemplars_per_class = num_exemplars // len(unique_classes)

        selected_indices = []
        for class_id in unique_classes:
            class_indices = torch.where(targets == class_id)[0]
            if len(class_indices) > exemplars_per_class:
                selected = class_indices[
                    torch.randperm(len(class_indices))[:exemplars_per_class]
                ]
            else:
                selected = class_indices
            selected_indices.append(selected)

        return torch.cat(selected_indices)

    def preprocess_batch(
        self, batch: Any, batch_idx: int, task_id: int, **kwargs
    ) -> Optional[Any]:
        """Augment batch with replay samples."""
        if not self.memory["inputs"] or task_id == 0:
            return None  # No replay needed for first task or empty memory

        # Extract current batch data
        if isinstance(batch, (tuple, list)):
            current_inputs, current_targets = batch[:2]
        else:
            current_inputs, current_targets = batch.inputs, batch.targets

        # Calculate number of replay samples
        batch_size = current_inputs.size(0)
        num_replay = max(1, int(batch_size * self.replay_ratio))

        # Sample from memory
        replay_samples = self._sample_from_memory(num_replay)
        if not replay_samples:
            return None

        # Combine current batch with replay samples
        replay_inputs = torch.stack(replay_samples["inputs"])
        replay_targets = torch.stack(replay_samples["targets"])

        # Ensure same device
        replay_inputs = replay_inputs.to(current_inputs.device)
        replay_targets = replay_targets.to(current_targets.device)

        # Concatenate
        combined_inputs = torch.cat([current_inputs, replay_inputs], dim=0)
        combined_targets = torch.cat([current_targets, replay_targets], dim=0)

        # Return combined batch in same format as input
        if isinstance(batch, (tuple, list)):
            result = [combined_inputs, combined_targets]
            if len(batch) > 2:
                result.extend(list(batch[2:]))
            return tuple(result)
        else:
            # Assume batch is an object with inputs and targets attributes
            combined_batch = type(batch)()
            combined_batch.inputs = combined_inputs
            combined_batch.targets = combined_targets
            # Copy other attributes
            for attr in dir(batch):
                if not attr.startswith("_") and attr not in ["inputs", "targets"]:
                    setattr(combined_batch, attr, getattr(batch, attr))
            return combined_batch

    def _sample_from_memory(self, num_samples: int) -> Dict[str, List[torch.Tensor]]:
        """Sample from replay memory."""
        if not self.memory["inputs"]:
            return {}

        memory_size = len(self.memory["inputs"])
        sample_indices = random.sample(
            range(memory_size), min(num_samples, memory_size)
        )

        return {
            "inputs": [self.memory["inputs"][i] for i in sample_indices],
            "targets": [self.memory["targets"][i] for i in sample_indices],
            "task_ids": [self.memory["task_ids"][i] for i in sample_indices],
        }

    def compute_strategy_loss(
        self,
        model: nn.Module | None,
        criterion: nn.Module | None,
        outputs: torch.Tensor | None,
        targets: torch.Tensor | None,
        task_id: int | None,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """Compute standard loss (replay is handled in preprocessing)."""
        # Validate inputs
        if outputs is None or targets is None or criterion is None:
            device = next(model.parameters()).device if model is not None else "cpu"
            return {"loss": torch.tensor(0.0, device=device)}

        # Extract logits if outputs is a dict
        if isinstance(outputs, dict):
            logits = outputs.get("logits", outputs.get("predictions", outputs))
        else:
            logits = outputs

        loss = criterion(logits, targets)

        return {
            "loss": loss,
            "replay_loss": loss,  # All loss is replay loss in this strategy
        }

    def state_dict(self) -> Dict[str, Any]:
        """Get Experience Replay strategy state."""
        state = super().state_dict()
        state.update(
            {
                "replay_ratio": self.replay_ratio,
                "selection_strategy": self.selection_strategy,
                "memory": {
                    "inputs": [
                        tensor.cpu() if torch.is_tensor(tensor) else tensor
                        for tensor in self.memory["inputs"]
                    ],
                    "targets": [
                        tensor.cpu() if torch.is_tensor(tensor) else tensor
                        for tensor in self.memory["targets"]
                    ],
                    "task_ids": self.memory["task_ids"],
                },
            }
        )
        return state

    def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
        """Load Experience Replay strategy state."""
        super().load_state_dict(state_dict)

        self.replay_ratio = state_dict.get("replay_ratio", self.replay_ratio)
        self.selection_strategy = state_dict.get(
            "selection_strategy", self.selection_strategy
        )

        memory_data = state_dict.get("memory", {})
        self.memory = {
            "inputs": memory_data.get("inputs", []),
            "targets": memory_data.get("targets", []),
            "task_ids": memory_data.get("task_ids", []),
        }


class L2RegularizationStrategy(RegularizationStrategy):
    """
    Simple L2 regularization strategy.

    Adds L2 penalty on the difference between current and initial parameters.
    """

    def __init__(
        self, regularization_weight: float = 0.01, name: Optional[str] = None, **kwargs
    ):
        """Initialize L2 regularization strategy."""
        super().__init__(regularization_weight, name or "L2Regularization", **kwargs)
        self.initial_params: Dict[str, torch.Tensor] = {}

    def on_task_start(self, task_id: int, **kwargs) -> None:
        """Store initial parameters for the first task."""
        super().on_task_start(task_id, **kwargs)

        if task_id == 0:
            model = kwargs.get("model")
            if model is not None:
                for name, param in model.named_parameters():
                    self.initial_params[name] = param.data.clone()

    def compute_regularization_loss(
        self, model: nn.Module, task_id: int
    ) -> torch.Tensor:
        """Compute L2 regularization loss."""
        l2_loss = torch.tensor(0.0, device=next(model.parameters()).device)

        for name, param in model.named_parameters():
            if name in self.initial_params:
                initial = self.initial_params[name]
                l2_loss += torch.norm(param - initial, p=2) ** 2

        return l2_loss

    def state_dict(self) -> Dict[str, Any]:
        """Get L2 strategy state."""
        state = super().state_dict()
        state.update(
            {
                "initial_params": {k: v.cpu() for k, v in self.initial_params.items()},
            }
        )
        return state

    def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
        """Load L2 strategy state."""
        super().load_state_dict(state_dict)

        device = (
            next(iter(self.initial_params.values())).device
            if self.initial_params
            else "cpu"
        )
        self.initial_params = {
            k: v.to(device) for k, v in state_dict.get("initial_params", {}).items()
        }
