"""
Complete event-driven continual learning strategy implementations.

This module demonstrates how to implement sophisticated CL strategies using the
enhanced event system, showing full integration with the ContinualModule and
Lightning training pipeline.
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Any, Dict, List, Optional, Set, Union
import logging
from collections import defaultdict
from dataclasses import dataclass, field

from continuallearning.events.core.event_types import (
    EventType,
    StrategyEvent,
    TaskEvent,
    EventFactory,
)
from continuallearning.events.core.handlers import BaseEventHandler
from continuallearning.events.core.dispatcher import (
    EventDispatcher as global_dispatcher,
)
from ..strategies.base import ContinualLearningStrategy


@dataclass
class EWCState:
    """State information for EWC strategy."""

    fisher_information: Dict[str, torch.Tensor] = field(default_factory=dict)
    optimal_params: Dict[str, torch.Tensor] = field(default_factory=dict)
    task_importance: Dict[int, float] = field(default_factory=dict)
    completed_tasks: Set[int] = field(default_factory=set)
    regularization_weight: float = 5000.0


class EventDrivenEWC(ContinualLearningStrategy):
    """
    Event-driven Elastic Weight Consolidation (EWC) implementation.

    This implementation showcases how to build a sophisticated CL strategy
    using the event system for all interactions with the training pipeline.
    """

    def __init__(
        self,
        regularization_weight: float = 5000.0,
        fisher_estimation_samples: int = 1000,
        name: str = "EventDrivenEWC",
    ):
        super().__init__(name=name, priority=100)

        self.state = EWCState(regularization_weight=regularization_weight)
        self.fisher_estimation_samples = fisher_estimation_samples

        self._logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")

        # Register for specific events we care about
        self.register_event_handlers()

    def register_event_handlers(self) -> None:
        """Register event handlers for EWC-specific events."""

        # Register with the global dispatcher for automatic event handling
        global_dispatcher.register_handler(self)

        self._logger.info(f"Registered {self.name} strategy with event system")

    def get_handled_events(self) -> List[EventType]:
        """Return the event types this strategy handles."""
        return [
            EventType.TASK_COMPLETED,
            EventType.LOSS_COMPUTATION_REQUESTED,
            EventType.PARAMETER_IMPORTANCE_UPDATE_REQUESTED,
            EventType.STRATEGY_STATE_CHANGED,
        ]

    def _handle_event_impl(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle strategy events."""
        if event.event_type == EventType.TASK_COMPLETED:
            return self._handle_task_completion(event)
        elif event.event_type == EventType.LOSS_COMPUTATION_REQUESTED:
            return self._handle_loss_computation(event)
        elif event.event_type == EventType.PARAMETER_IMPORTANCE_UPDATE_REQUESTED:
            return self._handle_parameter_importance_update(event)
        elif event.event_type == EventType.STRATEGY_STATE_CHANGED:
            return self._handle_strategy_state_change(event)

        return None

    def _handle_task_completion(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle task completion by computing Fisher information."""
        if event.task_id is None:
            self._logger.warning("Task completion event missing task_id")
            return None

        task_id = event.task_id
        model = event.model

        if model is None:
            self._logger.warning("Task completion event missing model")
            return None

        try:
            # Store optimal parameters for this task
            self._store_optimal_parameters(model, task_id)

            # Compute Fisher information if we have the data
            if hasattr(event, "dataloader") and event.dataloader is not None:
                self._compute_fisher_information(model, event.dataloader, task_id)

            # Mark task as completed
            self.state.completed_tasks.add(task_id)
            self.state.task_importance[task_id] = 1.0

            self._logger.info(f"EWC: Completed processing for task {task_id}")

            # Return completion confirmation event
            return (
                EventFactory.strategy(
                    EventType.PARAMETER_IMPORTANCE_UPDATE_COMPLETED,
                    strategy_name=self.name,
                )
                .task_id(task_id)
                .response_data(
                    {
                        "fisher_computed": True,
                        "parameters_stored": True,
                        "task_completed": True,
                    }
                )
                .build()
            )

        except Exception as e:
            self._logger.error(
                f"Error handling task completion for task {task_id}: {e}"
            )

            # Return error event
            return (
                EventFactory.strategy(
                    EventType.STRATEGY_STATE_CHANGED, strategy_name=self.name
                )
                .task_id(task_id)
                .response_data({"error": str(e), "success": False})
                .severity(EventSeverity.ERROR)
                .build()
            )

    def _handle_loss_computation(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle loss computation request with EWC regularization."""
        try:
            request_data = event.request_data or {}
            model = request_data.get("model")
            criterion = request_data.get("criterion")
            targets = request_data.get("targets")

            if not all([model, criterion, targets]):
                self._logger.warning("Loss computation event missing required data")
                return None

            # Get model outputs from event
            model_outputs = event.model_outputs
            if model_outputs is None:
                self._logger.warning("Loss computation event missing model outputs")
                return None

            # Compute base loss
            if isinstance(model_outputs, torch.Tensor):
                logits = model_outputs
            elif isinstance(model_outputs, dict) and "logits" in model_outputs:
                logits = model_outputs["logits"]
            else:
                self._logger.warning("Unable to extract logits from model outputs")
                return None

            base_loss = criterion(logits, targets)

            # Compute EWC regularization loss
            ewc_loss = self._compute_ewc_loss(model)

            # Combine losses
            total_loss = base_loss + ewc_loss

            loss_dict = {
                "loss": total_loss,
                "base_loss": base_loss,
                "ewc_loss": ewc_loss,
            }

            # Return response event with computed losses
            return (
                EventFactory.strategy(
                    EventType.LOSS_COMPUTATION_COMPLETED, strategy_name=self.name
                )
                .task_id(event.task_id)
                .batch_idx(event.batch_idx)
                .response_data({"loss_dict": loss_dict, "computation_successful": True})
                .build()
            )

        except Exception as e:
            self._logger.error(f"Error computing EWC loss: {e}")

            # Return error response
            return (
                EventFactory.strategy(
                    EventType.LOSS_COMPUTATION_COMPLETED, strategy_name=self.name
                )
                .task_id(event.task_id)
                .response_data({"error": str(e), "computation_successful": False})
                .severity(EventSeverity.ERROR)
                .build()
            )

    def _handle_parameter_importance_update(
        self, event: StrategyEvent
    ) -> Optional[StrategyEvent]:
        """Handle parameter importance update requests."""
        try:
            request_data = event.request_data or {}
            model = request_data.get("model")
            dataloader = request_data.get("dataloader")
            task_id = event.task_id

            if not all([model, dataloader, task_id is not None]):
                self._logger.warning(
                    "Parameter importance update event missing required data"
                )
                return None

            # Compute Fisher information
            self._compute_fisher_information(model, dataloader, task_id)

            # Return success response
            return (
                EventFactory.strategy(
                    EventType.PARAMETER_IMPORTANCE_UPDATE_COMPLETED,
                    strategy_name=self.name,
                )
                .task_id(task_id)
                .response_data({"fisher_updated": True, "task_id": task_id})
                .build()
            )

        except Exception as e:
            self._logger.error(f"Error updating parameter importance: {e}")
            return None

    def _handle_strategy_state_change(
        self, event: StrategyEvent
    ) -> Optional[StrategyEvent]:
        """Handle strategy state change notifications."""
        request_data = event.request_data or {}
        action = request_data.get("action")

        if action == "task_transition":
            previous_task = request_data.get("previous_task")
            new_task = request_data.get("new_task")

            self._logger.info(
                f"EWC: Task transition from {previous_task} to {new_task}"
            )

            # Adjust importance weights if needed
            if (
                previous_task is not None
                and previous_task in self.state.task_importance
            ):
                # Gradually decrease importance of older tasks
                decay_factor = 0.9
                self.state.task_importance[previous_task] *= decay_factor

        return None

    def _store_optimal_parameters(self, model: nn.Module, task_id: int) -> None:
        """Store optimal parameters for the current task."""
        for name, param in model.named_parameters():
            if param.requires_grad:
                self.state.optimal_params[f"{name}_task_{task_id}"] = param.data.clone()

    def _compute_fisher_information(
        self, model: nn.Module, dataloader: torch.utils.data.DataLoader, task_id: int
    ) -> None:
        """Compute Fisher Information Matrix for the given task."""
        fisher_dict = {}

        # Initialize Fisher information
        for name, param in model.named_parameters():
            if param.requires_grad:
                fisher_dict[name] = torch.zeros_like(param.data)

        model.eval()
        samples_processed = 0

        with torch.no_grad():
            for batch_idx, batch in enumerate(dataloader):
                if samples_processed >= self.fisher_estimation_samples:
                    break

                # Extract inputs and targets
                if isinstance(batch, (tuple, list)):
                    inputs, targets = batch[0], batch[1]
                elif isinstance(batch, dict):
                    inputs = batch["inputs"]
                    targets = batch["targets"]
                else:
                    continue

                # Forward pass
                model.zero_grad()
                outputs = model(inputs)

                # Extract logits
                if isinstance(outputs, torch.Tensor):
                    logits = outputs
                elif isinstance(outputs, dict) and "logits" in outputs:
                    logits = outputs["logits"]
                else:
                    continue

                # Sample from model distribution
                probs = F.softmax(logits, dim=1)
                sampled_targets = torch.multinomial(probs, 1).squeeze()

                # Compute log likelihood
                log_likelihood = F.cross_entropy(logits, sampled_targets)

                # Backward pass to get gradients
                log_likelihood.backward()

                # Accumulate squared gradients (Fisher information)
                for name, param in model.named_parameters():
                    if param.requires_grad and param.grad is not None:
                        fisher_dict[name] += param.grad.data**2

                samples_processed += inputs.size(0)

        # Normalize Fisher information
        for name in fisher_dict:
            fisher_dict[name] /= samples_processed
            # Store with task-specific key
            self.state.fisher_information[f"{name}_task_{task_id}"] = fisher_dict[name]

        self._logger.info(
            f"Computed Fisher information for task {task_id} using {samples_processed} samples"
        )

    def _compute_ewc_loss(self, model: nn.Module) -> torch.Tensor:
        """Compute EWC regularization loss."""
        ewc_loss = torch.tensor(0.0, device=next(model.parameters()).device)

        for name, param in model.named_parameters():
            if not param.requires_grad:
                continue

            # Accumulate loss from all previous tasks
            for task_id in self.state.completed_tasks:
                fisher_key = f"{name}_task_{task_id}"
                param_key = f"{name}_task_{task_id}"

                if (
                    fisher_key in self.state.fisher_information
                    and param_key in self.state.optimal_params
                ):
                    fisher = self.state.fisher_information[fisher_key]
                    optimal_param = self.state.optimal_params[param_key]
                    task_importance = self.state.task_importance.get(task_id, 1.0)

                    # EWC loss term
                    loss_term = fisher * (param - optimal_param) ** 2
                    ewc_loss += (
                        self.state.regularization_weight
                        * task_importance
                        * loss_term.sum()
                    )

        return ewc_loss

    def get_parameters_to_optimize(self, model: nn.Module) -> List[torch.nn.Parameter]:
        """Return parameters that should be optimized."""
        return list(model.parameters())

    def compute_loss(
        self,
        model: nn.Module,
        criterion: nn.Module,
        output: Union[torch.Tensor, Dict[str, torch.Tensor]],
        targets: torch.Tensor,
        task_id: int,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """
        Fallback method for direct loss computation (when events aren't used).
        """
        # Extract logits
        if isinstance(output, torch.Tensor):
            logits = output
        elif isinstance(output, dict) and "logits" in output:
            logits = output["logits"]
        else:
            raise ValueError("Unable to extract logits from model output")

        # Compute base loss
        base_loss = criterion(logits, targets)

        # Compute EWC regularization
        ewc_loss = self._compute_ewc_loss(model)

        # Total loss
        total_loss = base_loss + ewc_loss

        return {"loss": total_loss, "base_loss": base_loss, "ewc_loss": ewc_loss}


class EventDrivenExperienceReplay(ContinualLearningStrategy):
    """
    Event-driven Experience Replay implementation.

    Demonstrates how to implement memory-based CL strategies using events.
    """

    def __init__(
        self,
        memory_size: int = 1000,
        replay_batch_size: int = 32,
        name: str = "EventDrivenExperienceReplay",
    ):
        super().__init__(name=name, priority=90)

        self.memory_size = memory_size
        self.replay_batch_size = replay_batch_size
        self.memory_buffer = []
        self.memory_targets = []
        self.memory_task_ids = []

        self._logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")

        # Register for memory-related events
        self.register_event_handlers()

    def register_event_handlers(self) -> None:
        """Register event handlers for memory management."""
        global_dispatcher.register_handler(self)
        self._logger.info(f"Registered {self.name} strategy with event system")

    def get_handled_events(self) -> List[EventType]:
        """Return the event types this strategy handles."""
        return [
            EventType.BATCH_PREPROCESSING_REQUESTED,
            EventType.MEMORY_UPDATE_REQUESTED,
            EventType.TASK_COMPLETED,
        ]

    def _handle_event_impl(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle strategy events."""
        if event.event_type == EventType.BATCH_PREPROCESSING_REQUESTED:
            return self._handle_batch_preprocessing(event)
        elif event.event_type == EventType.MEMORY_UPDATE_REQUESTED:
            return self._handle_memory_update(event)
        elif event.event_type == EventType.TASK_COMPLETED:
            return self._handle_task_completion(event)

        return None

    def _handle_batch_preprocessing(
        self, event: StrategyEvent
    ) -> Optional[StrategyEvent]:
        """Add replay samples to the current batch."""
        try:
            if not self.memory_buffer:
                return None  # No replay samples available

            current_batch = event.batch_data
            if current_batch is None:
                return None

            # Extract current batch data
            if isinstance(current_batch, (tuple, list)):
                inputs, targets = current_batch[0], current_batch[1]
                task_ids = current_batch[2] if len(current_batch) > 2 else None
            elif isinstance(current_batch, dict):
                inputs = current_batch["inputs"]
                targets = current_batch["targets"]
                task_ids = current_batch.get("task_ids")
            else:
                return None

            # Sample from memory buffer
            replay_indices = torch.randperm(len(self.memory_buffer))[
                : self.replay_batch_size
            ]
            replay_inputs = torch.stack([self.memory_buffer[i] for i in replay_indices])
            replay_targets = torch.stack(
                [self.memory_targets[i] for i in replay_indices]
            )
            replay_task_ids = torch.tensor(
                [self.memory_task_ids[i] for i in replay_indices]
            )

            # Combine current batch with replay batch
            combined_inputs = torch.cat([inputs, replay_inputs], dim=0)
            combined_targets = torch.cat([targets, replay_targets], dim=0)

            if task_ids is not None:
                combined_task_ids = torch.cat([task_ids, replay_task_ids], dim=0)
            else:
                combined_task_ids = replay_task_ids

            # Create combined batch
            if isinstance(current_batch, dict):
                combined_batch = {
                    "inputs": combined_inputs,
                    "targets": combined_targets,
                    "task_ids": combined_task_ids,
                }
            else:
                combined_batch = (combined_inputs, combined_targets, combined_task_ids)

            # Return preprocessing completion event
            return (
                EventFactory.strategy(
                    EventType.BATCH_PREPROCESSING_COMPLETED, strategy_name=self.name
                )
                .task_id(event.task_id)
                .batch_idx(event.batch_idx)
                .batch_data(combined_batch)
                .response_data(
                    {
                        "replay_samples_added": len(replay_indices),
                        "original_batch_size": inputs.size(0),
                        "final_batch_size": combined_inputs.size(0),
                    }
                )
                .build()
            )

        except Exception as e:
            self._logger.error(f"Error in batch preprocessing: {e}")
            return None

    def _handle_memory_update(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Update memory buffer with new samples."""
        try:
            request_data = event.request_data or {}
            inputs = request_data.get("inputs")
            targets = request_data.get("targets")
            task_id = event.task_id

            if not all([inputs is not None, targets is not None, task_id is not None]):
                return None

            # Add samples to memory buffer
            batch_size = inputs.size(0)
            for i in range(batch_size):
                if len(self.memory_buffer) >= self.memory_size:
                    # Remove oldest sample (FIFO)
                    self.memory_buffer.pop(0)
                    self.memory_targets.pop(0)
                    self.memory_task_ids.pop(0)

                self.memory_buffer.append(inputs[i].detach())
                self.memory_targets.append(targets[i].detach())
                self.memory_task_ids.append(task_id)

            # Return update completion event
            return (
                EventFactory.strategy(
                    EventType.MEMORY_UPDATE_COMPLETED, strategy_name=self.name
                )
                .task_id(task_id)
                .response_data(
                    {
                        "samples_added": batch_size,
                        "memory_size": len(self.memory_buffer),
                        "memory_capacity": self.memory_size,
                    }
                )
                .build()
            )

        except Exception as e:
            self._logger.error(f"Error updating memory: {e}")
            return None

    def _handle_task_completion(self, event: StrategyEvent) -> Optional[StrategyEvent]:
        """Handle task completion for memory management."""
        task_id = event.task_id
        if task_id is not None:
            memory_stats = {
                "task_completed": task_id,
                "memory_samples": len(self.memory_buffer),
                "task_distribution": self._get_task_distribution(),
            }

            self._logger.info(f"Task {task_id} completed. Memory stats: {memory_stats}")

        return None

    def _get_task_distribution(self) -> Dict[int, int]:
        """Get distribution of tasks in memory buffer."""
        distribution = defaultdict(int)
        for task_id in self.memory_task_ids:
            distribution[task_id] += 1
        return dict(distribution)

    def get_parameters_to_optimize(self, model: nn.Module) -> List[torch.nn.Parameter]:
        """Return parameters that should be optimized."""
        return list(model.parameters())

    def compute_loss(
        self,
        model: nn.Module,
        criterion: nn.Module,
        output: Union[torch.Tensor, Dict[str, torch.Tensor]],
        targets: torch.Tensor,
        task_id: int,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """
        Fallback method for direct loss computation.
        """
        # Extract logits
        if isinstance(output, torch.Tensor):
            logits = output
        elif isinstance(output, dict) and "logits" in output:
            logits = output["logits"]
        else:
            raise ValueError("Unable to extract logits from model output")

        # Standard cross-entropy loss
        loss = criterion(logits, targets)

        return {"loss": loss}


# Export the event-driven strategies
__all__ = ["EventDrivenEWC", "EventDrivenExperienceReplay", "EWCState"]
