"""
Evaluation callbacks for continual learning.
"""

from typing import Dict, Optional
from collections import defaultdict
import torch.nn as nn

from ..task_callbacks import TaskCallback


class ContinualEvaluationCallback(TaskCallback):
    """
    Callback for comprehensive continual learning evaluation.

    This callback tracks various continual learning specific metrics:
    - Average accuracy over all tasks
    - Forgetting measure
    - Forward/backward transfer
    - Learning curve stability

    Args:
        compute_forgetting (bool): Whether to compute forgetting metric
        compute_transfer (bool): Whether to compute transfer metrics
        track_per_class (bool): Whether to track per-class metrics
    """

    def __init__(
        self,
        compute_forgetting: bool = True,
        compute_transfer: bool = True,
        track_per_class: bool = True,
    ):
        super().__init__()
        self.compute_forgetting = compute_forgetting
        self.compute_transfer = compute_transfer
        self.track_per_class = track_per_class

        # Metrics storage
        self.task_accuracies: Dict[int, float] = {}
        self.best_accuracies: Dict[int, float] = {}
        self.class_accuracies: Dict[int, Dict[int, float]] = defaultdict(dict)
        self.forgetting_metrics: Dict[int, float] = {}
        self.transfer_metrics: Dict[str, float] = {}
        self.current_task = 0

    def on_task_start(self, trainer, pl_module, task_id):
        """Initialize metrics for new task."""
        super().on_task_start(trainer, pl_module, task_id)
        self.current_task = task_id

    def on_train_start(self, trainer, pl_module):
        """Initialize metrics tracking."""
        # Ensure the module has metrics tracking
        if not hasattr(pl_module, "track_metrics") or not pl_module.track_metrics:
            """
            FIXME interface inconsistancy with continual_module? currently we offer a workaround, but this need to be fixed in the futhure
                pl_module.track_metrics = True
                pl_module.best_acc_per_task = defaultdict(float)
            """
            setattr(pl_module, "track_metrics", True)

            if not hasattr(pl_module, "test_acc"):
                pl_module.test_acc = nn.ModuleDict()
            if not hasattr(pl_module, "best_acc_per_task"):
                setattr(pl_module, "best_acc_per_task", defaultdict(float))

    def on_test_epoch_end(self, trainer, pl_module):
        """Compute comprehensive metrics at the end of testing."""
        # Only compute metrics if we have at least one task
        if not hasattr(pl_module, "seen_tasks") or len(pl_module.seen_tasks) == 0:
            return

        # Compute metrics from test results
        self._compute_continual_metrics(pl_module)

        # Log computed metrics
        self._log_metrics(trainer, pl_module)

    def _compute_continual_metrics(self, pl_module):
        """Compute continual learning metrics."""
        # Get current and previous tasks
        current_task = pl_module.current_task
        seen_tasks = sorted(list(pl_module.seen_tasks))

        # Skip if we only have one task
        if len(seen_tasks) <= 1:
            return

        # Update task accuracies
        for task_id in seen_tasks:
            if str(task_id) in pl_module.test_acc:
                acc = pl_module.test_acc[str(task_id)].compute().item()
                self.task_accuracies[task_id] = acc

                # Update best accuracy
                if (
                    task_id not in self.best_accuracies
                    or acc > self.best_accuracies[task_id]
                ):
                    self.best_accuracies[task_id] = acc

        # Compute forgetting for previous tasks
        if self.compute_forgetting:
            for task_id in seen_tasks:
                if task_id == current_task:
                    continue

                if task_id in self.best_accuracies and task_id in self.task_accuracies:
                    forgetting = max(
                        0, self.best_accuracies[task_id] - self.task_accuracies[task_id]
                    )
                    self.forgetting_metrics[task_id] = forgetting

        # Compute transfer metrics
        if self.compute_transfer and current_task > 0:
            # Forward transfer: how current task benefits from previous tasks
            # (Compare to a baseline model that would be trained from scratch)
            # This is a placeholder - actual implementation would need baseline results
            self.transfer_metrics["forward"] = 0.0

            # Backward transfer: how previous tasks benefit from current task
            if len(seen_tasks) > 1:
                backward_transfer = 0.0
                count = 0

                for task_id in seen_tasks:
                    if task_id == current_task:
                        continue

                    if (
                        task_id in self.task_accuracies
                        and task_id in self.best_accuracies
                    ):
                        # Positive value means improvement, negative means forgetting
                        diff = (
                            self.task_accuracies[task_id]
                            - self.best_accuracies[task_id]
                        )
                        backward_transfer += diff
                        count += 1

                if count > 0:
                    self.transfer_metrics["backward"] = backward_transfer / count

    def _log_metrics(self, trainer, pl_module):
        """Log computed metrics to the logger."""
        # Log average accuracy across tasks
        if self.task_accuracies:
            avg_acc = sum(self.task_accuracies.values()) / len(self.task_accuracies)
            trainer.logger.log_metrics({"avg_accuracy": avg_acc})

        # Log forgetting metrics
        if self.forgetting_metrics:
            avg_forgetting = sum(self.forgetting_metrics.values()) / len(
                self.forgetting_metrics
            )
            trainer.logger.log_metrics({"avg_forgetting": avg_forgetting})

            for task_id, forgetting in self.forgetting_metrics.items():
                trainer.logger.log_metrics({f"forgetting/task_{task_id}": forgetting})

        # Log transfer metrics
        if "backward" in self.transfer_metrics:
            trainer.logger.log_metrics(
                {"backward_transfer": self.transfer_metrics["backward"]}
            )

        if "forward" in self.transfer_metrics:
            trainer.logger.log_metrics(
                {"forward_transfer": self.transfer_metrics["forward"]}
            )
