"""
Implementation of Elastic Weight Consolidation (EWC) for continual learning.

EWC is a regularization-based approach that prevents catastrophic forgetting by
constraining important parameters to stay close to their previous values.

Reference:
    Kirkpatrick, J. et al. "Overcoming catastrophic forgetting in neural networks."
    Proceedings of the National Academy of Sciences 114.13 (2017): 3521-3526.
"""

import logging
from typing import Dict, Any
import torch
import torch.nn as nn
from torch.utils.data import DataLoader

from .base import RegularizationMethod
from ..registry import METHOD_REGISTRY
from ..events.event_types import EventType, TaskEvent
from ..events.handlers import on_event
from ..events.dispatcher import global_dispatcher


@METHOD_REGISTRY.register()
class EWC(RegularizationMethod):
    """
    Elastic Weight Consolidation (EWC) method.

    EWC prevents catastrophic forgetting by adding a penalty term to the loss function
    that discourages updates to parameters that were important for previous tasks.
    The importance of parameters is estimated using the Fisher information matrix,
    which approximates the curvature of the loss surface.

    Args:
        regularization_weight: Weight of the regularization loss
        fisher_samples: Number of samples to use for Fisher estimation
        fisher_alpha: Decay factor for Fisher information across tasks
            (0 = no decay, 1 = complete replacement)
        **kwargs: Additional arguments
    """

    def __init__(
        self,
        regularization_weight: float = 1.0,
        fisher_samples: int = 200,
        fisher_alpha: float = 0.4,
        **kwargs,
    ) -> None:
        """
        Initialize the EWC method.

        Args:
            regularization_weight: Weight of the regularization loss
            fisher_samples: Number of samples to use for Fisher estimation
            fisher_alpha: Decay factor for Fisher information across tasks
            **kwargs: Additional arguments
        """
        super().__init__(regularization_weight=regularization_weight, **kwargs)
        self.fisher_samples = fisher_samples
        self.fisher_alpha = fisher_alpha
        self.logger = logging.getLogger(__name__)

        # Store previous parameters for regularization
        self.prev_params: Dict[str, torch.Tensor] = {}

        # Register task end event handler
        self._register_event_handlers()

    def _register_event_handlers(self) -> None:
        """Register event handlers for task events."""
        task_end_handler = on_event(
            EventType.TASK_END, global_dispatcher, name="EWC_TaskEnd_Handler"
        )(self._handle_task_end_event)

    def _handle_task_end_event(self, event: TaskEvent) -> None:
        """Handle task end events to update parameter importance."""
        if event.task_id is None:
            self.logger.warning("Received task end event with no task_id")
            return

        if not event.pl_module or not event.dataloader:
            self.logger.debug(
                "Skipping importance update - missing module or dataloader"
            )
            return

        # Update parameter importance using event data
        self.logger.info(f"EWC updating parameter importance for task {event.task_id}")
        try:
            model = getattr(event.pl_module, "model", event.pl_module)
            self.update_parameter_importance(model, event.task_id, event.dataloader)
        except Exception as e:
            self.logger.error(f"Error updating parameter importance: {str(e)}")

    def compute_regularization_loss(
        self, model: nn.Module, task_id: int
    ) -> torch.Tensor:
        """
        Compute EWC regularization loss.

        This loss penalizes changes to parameters that were important for previous tasks,
        weighted by their importance (Fisher information).

        Args:
            model: The model being trained
            task_id: Current task identifier

        Returns:
            Regularization loss tensor
        """
        # No regularization on first task
        if task_id == 0 or not self.parameter_importance:
            return torch.tensor(0.0, device=self._get_device(model))

        # Initialize loss
        loss = torch.tensor(0.0, device=self._get_device(model))

        # Check if we have stored parameters
        if not self.prev_params:
            return loss

        # Compute loss for each previous task
        for prev_task_id in range(task_id):
            if prev_task_id in self.parameter_importance:
                for name, param in model.named_parameters():
                    if (
                        param.requires_grad
                        and name in self.prev_params
                        and name in self.parameter_importance[prev_task_id]
                    ):
                        # Weight importance (Fisher) * squared difference from previous value
                        fisher = self.parameter_importance[prev_task_id][name]
                        loss += (fisher * (param - self.prev_params[name]).pow(2)).sum()

        return loss * self.regularization_weight

    def _get_device(self, model: nn.Module) -> torch.device:
        """Helper to get the device from a model."""
        try:
            return next(model.parameters()).device
        except StopIteration:
            return torch.device("cpu")

    def update_parameter_importance(
        self, model: nn.Module, task_id: int, task_dataloader: DataLoader
    ) -> None:
        """
        Update parameter importance after learning a task.

        This method estimates the importance of each parameter for the current task
        using the Fisher information matrix.

        Args:
            model: The model being trained
            task_id: Task identifier
            task_dataloader: Dataloader with task data
        """
        if task_dataloader is None:
            self.logger.warning(
                f"No dataloader provided for task {task_id}, skipping importance update"
            )
            return

        # Check if dataloader has data
        if len(task_dataloader.dataset) == 0:
            self.logger.warning(
                f"Empty dataloader for task {task_id}, skipping importance update"
            )
            return

        # Store current parameters
        self.prev_params = {}
        for name, param in model.named_parameters():
            if param.requires_grad:
                self.prev_params[name] = param.detach().clone()

        # Set model to evaluation mode for Fisher calculation
        original_mode = model.training
        model.eval()

        # Get device from model
        device = self._get_device(model)

        # Initialize Fisher diagonal approximation
        fisher_diag = {
            name: torch.zeros_like(param)
            for name, param in model.named_parameters()
            if param.requires_grad
        }

        # Create a criterion for Fisher calculation
        criterion = nn.CrossEntropyLoss()

        # Determine number of samples for Fisher estimation
        sample_count = min(self.fisher_samples, len(task_dataloader.dataset))
        self.logger.info(
            f"Computing Fisher information with {sample_count} samples for task {task_id}"
        )

        # Setup progress bar if available
        iterator = iter(task_dataloader)
        processed_samples = 0

        # Collect samples for Fisher estimation
        try:
            while processed_samples < sample_count:
                # Get a batch of data
                try:
                    batch = next(iterator)
                except StopIteration:
                    iterator = iter(task_dataloader)
                    batch = next(iterator)

                # Extract inputs and targets with better error handling
                try:
                    if isinstance(batch, (list, tuple)) and len(batch) >= 2:
                        inputs, targets = batch[0], batch[1]
                    elif (
                        isinstance(batch, dict)
                        and "inputs" in batch
                        and "targets" in batch
                    ):
                        inputs, targets = batch["inputs"], batch["targets"]
                    else:
                        self.logger.warning(
                            f"Unsupported batch format, skipping: {type(batch)}"
                        )
                        continue
                except Exception as e:
                    self.logger.error(f"Error extracting batch data: {str(e)}")
                    continue

                # Limit batch to remaining samples needed
                batch_size = inputs.size(0)
                if processed_samples + batch_size > sample_count:
                    inputs = inputs[: sample_count - processed_samples]
                    targets = targets[: sample_count - processed_samples]
                    batch_size = inputs.size(0)

                inputs = inputs.to(device)
                targets = targets.to(device)

                # Forward pass
                model.zero_grad()
                try:
                    outputs = model(inputs)

                    # Extract logits if output is a dictionary
                    if isinstance(outputs, dict) and "logits" in outputs:
                        logits = outputs["logits"]
                    else:
                        logits = outputs

                    # Compute loss
                    loss = criterion(logits, targets)

                    # Backward pass to get gradients
                    loss.backward()

                    # Accumulate squared gradients (Fisher)
                    for name, param in model.named_parameters():
                        if param.requires_grad and param.grad is not None:
                            fisher_diag[name] += param.grad.data.pow(2) / sample_count
                except Exception as e:
                    self.logger.error(f"Error during Fisher computation: {str(e)}")
                    continue

                # Update processed samples count
                processed_samples += batch_size

            # Apply Fisher alpha (exponential moving average with previous tasks)
            if task_id > 0:
                for prev_task_id in range(task_id):
                    if prev_task_id in self.parameter_importance:
                        for name in fisher_diag.keys():
                            if name in self.parameter_importance[prev_task_id]:
                                fisher_diag[name] = (
                                    self.fisher_alpha * fisher_diag[name]
                                    + (1 - self.fisher_alpha)
                                    * self.parameter_importance[prev_task_id][name]
                                )

            # Store Fisher information as parameter importance
            self.parameter_importance[task_id] = fisher_diag

            self.logger.info(
                f"Successfully updated parameter importance for task {task_id}"
            )

        except Exception as e:
            self.logger.error(f"Exception during parameter importance update: {str(e)}")
        finally:
            # Set model back to original mode
            if original_mode:
                model.train()

    def state_dict(self) -> Dict[str, Any]:
        """Get state dictionary including previous parameters."""
        state = super().state_dict()
        state["prev_params"] = {
            name: tensor.cpu() for name, tensor in self.prev_params.items()
        }
        state["fisher_samples"] = self.fisher_samples
        state["fisher_alpha"] = self.fisher_alpha
        return state

    def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
        """Load state dictionary including previous parameters."""
        super().load_state_dict(state_dict)
        if "prev_params" in state_dict:
            device = (
                next(iter(state_dict["prev_params"].values())).device
                if state_dict["prev_params"]
                else torch.device("cpu")
            )
            self.prev_params = {
                name: tensor.to(device)
                for name, tensor in state_dict["prev_params"].items()
            }
        if "fisher_samples" in state_dict:
            self.fisher_samples = state_dict["fisher_samples"]
        if "fisher_alpha" in state_dict:
            self.fisher_alpha = state_dict["fisher_alpha"]
