"""
Metrics tracking callback for continual learning.

This module provides callbacks for tracking and computing various continual learning metrics
across multiple tasks, such as forgetting, knowledge transfer, and overall performance.
"""

from typing import Dict, List, Optional, Any, Union
import torch
import numpy as np
from collections import defaultdict

import pytorch_lightning as pl
from pytorch_lightning.callbacks import Callback


class ContinualMetricsCallback(Callback):
    """
    Callback for tracking continual learning-specific metrics.

    This callback keeps track of model performance across tasks and computes
    various metrics that are useful for analyzing continual learning scenarios:
    - Average accuracy across all tasks
    - Forgetting (performance drop on previous tasks)
    - Knowledge transfer (how new tasks benefit from prior learning)
    - Task-specific performance trends

    Args:
        monitor: Metric to track ('acc' or 'loss')
        compute_forgetting: Whether to compute forgetting metrics
        verbose: Whether to print metrics during training
    """

    def __init__(
        self,
        monitor: str = "acc",
        compute_forgetting: bool = True,
        verbose: bool = True,
    ):
        super().__init__()
        self.monitor = monitor
        self.compute_forgetting = compute_forgetting
        self.verbose = verbose

        # Track metrics for each task
        self.task_metrics = defaultdict(lambda: defaultdict(list))
        self.peak_metrics = defaultdict(dict)
        self.current_task_id = 0

    def on_test_epoch_end(
        self, trainer: pl.Trainer, pl_module: pl.LightningModule
    ) -> None:
        """
        Compute continual learning metrics at the end of a test epoch.

        Args:
            trainer: PyTorch Lightning trainer
            pl_module: The lightning module being trained
        """
        # Check if we have logged metrics for multiple tasks
        task_metrics = {}

        # Collect metrics from the test results
        for k, v in trainer.callback_metrics.items():
            # Look for task-specific metrics like test/task_0/acc
            if k.startswith("test/task_"):
                parts = k.split("/")
                if len(parts) >= 3:
                    task_id_str = parts[1]  # 'task_0'
                    task_id = int(task_id_str.split("_")[1])
                    metric_name = parts[2]  # 'acc' or 'loss'

                    if metric_name == self.monitor:
                        task_metrics[task_id] = v.item()

        # Compute continual learning metrics
        if task_metrics:
            self._compute_metrics(task_metrics)

    def _compute_metrics(self, task_metrics: Dict[int, float]) -> None:
        """
        Compute and log continual learning metrics.

        Args:
            task_metrics: Dictionary mapping task IDs to their metric values
        """
        # Store the latest metrics
        for task_id, value in task_metrics.items():
            self.task_metrics[task_id][self.current_task_id].append(value)

            # Update peak performance
            if self.monitor == "acc":
                # For accuracy, higher is better
                if task_id not in self.peak_metrics or value > self.peak_metrics[
                    task_id
                ].get("value", 0):
                    self.peak_metrics[task_id] = {
                        "value": value,
                        "task": self.current_task_id,
                    }
            else:
                # For loss, lower is better
                if task_id not in self.peak_metrics or value < self.peak_metrics[
                    task_id
                ].get("value", float("inf")):
                    self.peak_metrics[task_id] = {
                        "value": value,
                        "task": self.current_task_id,
                    }

        # Calculate average accuracy/performance across all tasks
        avg_metric = sum(task_metrics.values()) / len(task_metrics)

        # Calculate forgetting if applicable
        forgetting = None
        if self.compute_forgetting and self.current_task_id > 0:
            forgetting_values = []

            for prev_task_id in range(self.current_task_id):
                if prev_task_id in self.task_metrics:
                    peak_value = self.peak_metrics.get(prev_task_id, {}).get(
                        "value", None
                    )
                    current_value = task_metrics.get(prev_task_id, None)

                    if peak_value is not None and current_value is not None:
                        if self.monitor == "acc":
                            # For accuracy, forgetting = peak - current
                            task_forgetting = peak_value - current_value
                        else:
                            # For loss, forgetting = current - peak
                            task_forgetting = current_value - peak_value

                        forgetting_values.append(task_forgetting)

            if forgetting_values:
                forgetting = sum(forgetting_values) / len(forgetting_values)

        # Log the calculated metrics
        if self.verbose:
            metric_name = "accuracy" if self.monitor == "acc" else "performance"
            print(
                f"Task {self.current_task_id} - Average {metric_name}: {avg_metric:.4f}"
            )

            if forgetting is not None:
                print(
                    f"Task {self.current_task_id} - Average forgetting: {forgetting:.4f}"
                )

            print("Task-specific metrics:")
            for task_id, value in task_metrics.items():
                print(f"  Task {task_id}: {value:.4f}")

            print("")

    def on_fit_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
        """
        Set the current task at the start of training.

        Args:
            trainer: PyTorch Lightning trainer
            pl_module: The lightning module being trained
        """
        # Try to get the current task ID from the module
        if hasattr(pl_module, "current_task"):
            self.current_task_id = pl_module.current_task

    def get_metrics_summary(self) -> Dict[str, Any]:
        """
        Get a summary of all tracked metrics.

        Returns:
            Dictionary with metrics summary
        """
        summary = {
            "task_metrics": dict(self.task_metrics),
            "peak_metrics": dict(self.peak_metrics),
            "average_metric": -1.0,
            "average_forgetting": -1.0,
        }

        # Compute average accuracy across all tasks for the final model
        final_metrics = {}
        for task_id in self.task_metrics:
            if self.current_task_id in self.task_metrics[task_id]:
                final_metrics[task_id] = self.task_metrics[task_id][
                    self.current_task_id
                ][-1]

        if final_metrics:
            avg_metric: float = sum(final_metrics.values()) / len(final_metrics)
            summary["average_metric"] = avg_metric

        # Compute average forgetting
        if self.compute_forgetting and self.current_task_id > 0:
            forgetting_values = []

            for prev_task_id in range(self.current_task_id):
                if (
                    prev_task_id in self.task_metrics
                    and prev_task_id in self.peak_metrics
                ):
                    peak_value = self.peak_metrics[prev_task_id]["value"]
                    final_value = final_metrics.get(prev_task_id, None)

                    if final_value is not None:
                        if self.monitor == "acc":
                            task_forgetting = max(0, peak_value - final_value)
                        else:
                            task_forgetting = max(0, final_value - peak_value)

                        forgetting_values.append(task_forgetting)

            if forgetting_values:
                summary["average_forgetting"] = sum(forgetting_values) / len(
                    forgetting_values
                )

        return summary
