"""
Evaluation Manager for Domain Incremental Learning.

This module provides the EvaluationManager class which handles the evaluation
of incremental learning models at both epoch and task levels, calculating various metrics
including accuracy, forgetting, and other performance indicators.
"""

import logging
import torch
from typing import Dict, List, Any, Callable, Optional, Tuple, Union
from torch.utils.data import DataLoader
from learners.registry import register_evaluation_manager
from learners.interfaces import EvaluationInterface


@register_evaluation_manager(name="cil_evaluation_manager")
class EvaluationManager(EvaluationInterface):
    """
    Manager for incremental learning model evaluation.

    This class provides tools for evaluating models at both epoch level (during training)
    and task level (after completing tasks). It supports both standard classifier-based
    evaluation and nearest-mean-exemplar (NME) evaluation methods.

    Attributes:
        metrics (Dict[str, List[float]]): Dictionary of metrics tracked over time
        task_accuracy (Dict[int, Dict[str, float]]): Accuracy metrics broken down by task
        epoch_metrics (Dict[int, Dict[int, Dict[str, float]]]): Metrics tracked by task and epoch
        _best_accuracy (Dict[int, float]): Record of best accuracy achieved for each task
    """

    def __init__(self):
        """Initialize the evaluation manager with empty metrics tracking."""
        # Task-level metrics
        self.metrics = {
            "accuracy": [],  # Current task accuracy
            "forgetting": [],  # Forgetting measure
            "task_accuracy": [],  # Accuracy for each task
            "avg_incremental_accuracy": [],  # Average incremental accuracy
        }
        self.task_accuracy = {}  # Detailed accuracy per task
        self._best_accuracy = {}  # Best accuracy record for each task

        # Epoch-level metrics - indexed by [task_id][epoch]
        self.epoch_metrics = {}

    def evaluate_epoch(
        self,
        network: torch.nn.Module,
        data_loader: DataLoader,
        forward_fn: Callable,
        task_id: int,
        epoch_id: int,
        device: torch.device,
    ) -> Dict[str, float]:
        """
        Evaluate the network at the epoch level during training.

        Args:
            network: Network to evaluate
            data_loader: Test data loader for the current task
            forward_fn: Function to use for forward pass during evaluation
            task_id: ID of the current task
            epoch_id: Current epoch number
            device: Device to run evaluation on

        Returns:
            Dictionary containing evaluation metrics for this epoch
        """
        # Ensure network is in evaluation mode
        network.eval()

        # Basic metrics
        metrics = {
            "accuracy": 0.0,
            "loss": 0.0,
        }

        # Compute accuracy using the same method as task evaluation for consistency
        acc = self._compute_accuracy(network, data_loader, forward_fn, device)
        metrics["accuracy"] = acc

        # Store metrics in the epoch tracking structure
        if task_id not in self.epoch_metrics:
            self.epoch_metrics[task_id] = {}
        self.epoch_metrics[task_id][epoch_id] = metrics

        # Track best accuracy for this task (helps with early stopping)
        if task_id not in self._best_accuracy or acc > self._best_accuracy[task_id]:
            self._best_accuracy[task_id] = acc
            metrics["is_best"] = True

        return metrics

    def evaluate_task(
        self,
        network: torch.nn.Module,
        data_loaders: Dict[int, DataLoader],
        forward_fn: Callable,
        task_id: int,
        device: torch.device,
        use_nme: bool = False,
        nme_forward_fn: Optional[Callable] = None,
        nme_k: int = -1,
        class_means: Optional[torch.Tensor] = None,
        known_classes: int = 0,
    ) -> Dict[str, Any]:
        """
        Evaluate the network across all completed tasks.

        This comprehensive evaluation is typically done after finishing a task's training.
        It evaluates performance on all previous tasks to track metrics like forgetting.

        Args:
            network: Network to evaluate
            data_loaders: Dictionary mapping task IDs to test data loaders
            forward_fn: Function to use for forward pass during evaluation
            task_id: ID of the current task
            device: Device to run evaluation on
            use_nme: Whether to use nearest-mean-exemplar classification
            nme_forward_fn: Function for NME forward pass
            nme_k: Top-k for NME classification (-1 means all)
            class_means: Class mean vectors for NME
            known_classes: Number of classes known from previous tasks

        Returns:
            Dictionary containing comprehensive evaluation metrics
        """
        # Store results for all tasks
        results = {"cnn": {}, "nme": {}}

        # Set model to evaluation mode
        network.eval()

        # Evaluate on each task
        for t_id, loader in data_loaders.items():
            if t_id > task_id:
                continue

            # CNN-based evaluation
            cnn_acc = self._compute_accuracy(network, loader, forward_fn, device)
            results["cnn"][t_id] = cnn_acc

            # NME-based evaluation (if enabled)
            if use_nme and nme_forward_fn is not None and class_means is not None:
                nme_acc = self._compute_nme_accuracy(
                    network,
                    loader,
                    nme_forward_fn,
                    class_means,
                    known_classes,
                    device,
                    nme_k,
                )
                results["nme"][t_id] = nme_acc

        # Store results for the current task
        self.task_accuracy[task_id] = results["cnn"]

        # Calculate average accuracy across all tasks
        total_tasks = len([t for t in results["cnn"] if t <= task_id])
        avg_acc = sum(results["cnn"].values()) / total_tasks if total_tasks > 0 else 0

        # Calculate forgetting compared to best accuracy
        forgetting = self._compute_forgetting(task_id)

        # Update metrics
        self.metrics["accuracy"].append(results["cnn"][task_id])
        self.metrics["avg_incremental_accuracy"].append(avg_acc)
        self.metrics["forgetting"].append(forgetting)

        # Create summary results
        summary = {
            "task_accuracy": results["cnn"],
            "average_accuracy": avg_acc,
            "current_task_accuracy": results["cnn"][task_id],
            "forgetting": forgetting,
        }

        # Add NME results if available
        if use_nme and results["nme"]:
            nme_avg_acc = sum(results["nme"].values()) / len(results["nme"])
            summary["nme_task_accuracy"] = results["nme"]
            summary["nme_average_accuracy"] = nme_avg_acc
            summary["nme_current_task_accuracy"] = results["nme"][task_id]

        return summary

    def evaluate_current_task(
        self,
        network: torch.nn.Module,
        data_loader: DataLoader,
        forward_fn: Callable,
        task_id: int,
        device: torch.device,
    ) -> Dict[str, float]:
        """
        Evaluate only the current task, useful for quick assessments.

        Args:
            network: Network to evaluate
            data_loader: Test data loader for the current task
            forward_fn: Function to use for forward pass during evaluation
            task_id: ID of the current task
            device: Device to run evaluation on

        Returns:
            Dictionary containing evaluation metrics for the current task
        """
        network.eval()

        # Compute accuracy
        accuracy = self._compute_accuracy(network, data_loader, forward_fn, device)

        # Create simple result dictionary
        result = {"accuracy": accuracy, "task_id": task_id}

        return result

    @staticmethod
    def _compute_accuracy(
        network: torch.nn.Module,
        loader: DataLoader,
        forward_fn: Callable,
        device: torch.device,
    ) -> float:
        """
        Compute classification accuracy.

        Args:
            network: Model to evaluate
            loader: DataLoader with test data
            forward_fn: Function for forward pass
            device: Device to run evaluation on

        Returns:
            Accuracy percentage (0-100)
        """
        correct, total = 0, 0

        with torch.no_grad():
            for inputs, targets, domains in loader:
                inputs, targets = inputs.to(device), targets.to(device)
                outputs = forward_fn(network, inputs)

                _, preds = torch.max(outputs["logits"], dim=1)
                correct += (preds == targets).sum().item()
                total += targets.shape[0]

        return 100.0 * correct / total if total > 0 else 0.0

    @staticmethod
    def _compute_nme_accuracy(
        network: torch.nn.Module,
        loader: DataLoader,
        forward_fn: Callable,
        class_means: torch.Tensor,
        known_classes: int,
        device: torch.device,
        nme_k: int = -1,
    ) -> float:
        """
        Compute nearest-mean-exemplar accuracy.

        This method uses cosine similarity between extracted features
        and class prototypes for classification, which is especially
        effective for domain incremental learning scenarios.

        Args:
            network: Model to evaluate
            loader: DataLoader with test data
            forward_fn: Function for feature extraction
            class_means: Tensor of class mean vectors (prototypes)
            known_classes: Number of classes from previous tasks
            device: Device to run evaluation on
            nme_k: Top-k for classification (-1 means all)

        Returns:
            Accuracy percentage (0-100)
        """
        correct, total = 0, 0
        means = class_means.to(device)

        with torch.no_grad():
            for inputs, targets, domains in loader:
                inputs, targets = inputs.to(device), targets.to(device)
                # Extract features from the network
                features = forward_fn(network, inputs)["features"]

                # Normalize features for cosine similarity
                features_norm = torch.nn.functional.normalize(features, p=2, dim=1)

                # Calculate distance to class means
                means_norm = torch.nn.functional.normalize(means, p=2, dim=1)
                similarity = torch.matmul(features_norm, means_norm.T)

                # If top-k is specified
                if nme_k > 0:
                    _, indices = similarity.topk(nme_k, dim=1)
                    preds = indices[:, 0]  # Use the most similar class
                else:
                    _, preds = torch.max(similarity, dim=1)

                correct += (preds == targets).sum().item()
                total += targets.shape[0]

        return 100.0 * correct / total if total > 0 else 0.0

    def _compute_forgetting(self, task_id: int) -> float:
        """
        Compute forgetting compared to best accuracy.

        Forgetting is defined as the decrease in accuracy for previously
        learned tasks. This metric is crucial for assessing catastrophic
        forgetting in incremental learning.

        Args:
            task_id: Current task ID

        Returns:
            Average forgetting across all previous tasks
        """
        # No forgetting on the first task
        if task_id == 0:
            return 0.0

        forgetting = 0.0
        count = 0

        # Calculate forgetting for each previous task
        for t in range(task_id):
            if t in self._best_accuracy and t in self.task_accuracy.get(task_id, {}):
                best_acc = self._best_accuracy[t]
                current_acc = self.task_accuracy[task_id][t]
                # Only consider positive forgetting (accuracy decrease)
                forgetting += max(0.0, best_acc - current_acc)
                count += 1

        # Average forgetting across all previous tasks
        return forgetting / count if count > 0 else 0.0

    def get_metrics(self) -> Dict[str, List[float]]:
        """
        Get all tracked task-level metrics.

        Returns:
            Dictionary of metrics tracked over time
        """
        return self.metrics

    def get_epoch_metrics(self, task_id: Optional[int] = None) -> Dict:
        """
        Get epoch-level metrics, optionally filtering by task.

        Args:
            task_id: If provided, return metrics only for this task

        Returns:
            Dictionary of epoch metrics, either for all tasks or the specified task
        """
        if task_id is not None:
            return self.epoch_metrics.get(task_id, {})
        return self.epoch_metrics

    def get_task_accuracy(self) -> Dict[int, Dict[str, float]]:
        """
        Get accuracy by task.

        Returns:
            Dictionary mapping task IDs to accuracy metrics
        """
        return self.task_accuracy

    def get_best_accuracy(
        self, task_id: Optional[int] = None
    ) -> Union[Dict[int, float], float]:
        """
        Get best accuracy achieved, either for all tasks or a specific one.

        Args:
            task_id: If provided, return best accuracy for this specific task

        Returns:
            Either a dictionary of best accuracies by task or a single accuracy value
        """
        if task_id is not None:
            return self._best_accuracy.get(task_id, 0.0)
        return self._best_accuracy

    def reset_epoch_metrics(self, task_id: int):
        """
        Reset epoch metrics for a specific task.

        Args:
            task_id: The task ID to reset metrics for
        """
        if task_id in self.epoch_metrics:
            self.epoch_metrics[task_id] = {}

    def get_epoch_learning_curve(self, task_id: int) -> Tuple[List[int], List[float]]:
        """
        Get learning curve data (epoch vs accuracy) for a specific task.

        Args:
            task_id: The task ID to get learning curve for

        Returns:
            Tuple of (epoch_numbers, accuracy_values)
        """
        if task_id not in self.epoch_metrics:
            return [], []

        epochs = sorted(self.epoch_metrics[task_id].keys())
        accuracies = [self.epoch_metrics[task_id][e]["accuracy"] for e in epochs]

        return epochs, accuracies

    def get_summary(self) -> str:
        """
        Generate a summary string of metrics.

        Returns:
            Text summary of key performance metrics
        """
        summary = []
        summary.append("=== Evaluation Summary ===")

        if self.metrics["accuracy"]:
            summary.append(f"Final Task Accuracy: {self.metrics['accuracy'][-1]:.2f}%")

        if self.metrics["avg_incremental_accuracy"]:
            summary.append(
                f"Average Incremental Accuracy: {self.metrics['avg_incremental_accuracy'][-1]:.2f}%"
            )

        if self.metrics["forgetting"]:
            summary.append(f"Average Forgetting: {self.metrics['forgetting'][-1]:.2f}%")

        return "\n".join(summary)

    def plot_metrics(self, save_path: Optional[str] = None) -> None:
        """
        Plot task metrics as a graph.

        This method creates a visualization of key metrics over tasks,
        which is useful for analyzing model performance trends.

        Args:
            save_path: Path to save the plot, if None the plot will be displayed
        """
        try:
            import matplotlib.pyplot as plt

            plt.figure(figsize=(10, 6))

            x = list(range(len(self.metrics["accuracy"])))

            plt.plot(x, self.metrics["accuracy"], "b-o", label="Current Task Accuracy")
            plt.plot(
                x,
                self.metrics["avg_incremental_accuracy"],
                "g-s",
                label="Average Accuracy",
            )
            plt.plot(x, self.metrics["forgetting"], "r-^", label="Forgetting")

            plt.xlabel("Task")
            plt.ylabel("Percentage")
            plt.title("Incremental Learning Performance")
            plt.legend()
            plt.grid(True)

            if save_path:
                plt.savefig(save_path)
                logging.info(f"Metrics plot saved to {save_path}")
            else:
                plt.show()
        except ImportError:
            logging.warning("Cannot plot: please install matplotlib")
