import abc
import torch
import torch.nn as nn
import copy
import logging
from typing import Callable, Dict, Any, Optional
from learners.components.evaluation_manager import EvaluationManager
from learners.interfaces import TrainingInterface
from torch.optim.lr_scheduler import LRScheduler as PyTorchScheduler
from utils.loss import AngularPenaltySMLoss


class TrainingStrategy(TrainingInterface):
    """
    Abstract base class for training strategies.

    This defines the interface for all training strategies and provides
    common utility methods that can be used by implementations.
    """

    def __init__(self):
        """Initialize the training strategy."""
        # Training results
        self.best_acc = 0.0
        self.best_epoch = 0
        self.best_model = None
        self.learning_curve = []  # Track accuracy over epochs
        self.loss_curve = []  # Track loss over epochs

        # Reset history for new training sessions
        self._reset_history()

    """State Management"""

    def _reset_history(self):
        """Reset training history when starting a new training session."""
        self.best_acc = 0.0
        self.best_epoch = 0
        self.best_model = None
        self.learning_curve = []
        self.loss_curve = []

    def get_results(self) -> Dict[str, Any]:
        """Return the results of the training process.

        Returns:
            Dictionary with training metrics including best accuracy,
            best epoch, and learning curves.
        """
        return {
            "best_acc": self.best_acc,
            "best_epoch": self.best_epoch,
            "learning_curve": self.learning_curve,
            "loss_curve": self.loss_curve,
        }

    """Stateless training strategy interface"""

    def train_task(
        self,
        network: torch.nn.Module,
        train_loader: torch.utils.data.DataLoader,
        test_loader: torch.utils.data.DataLoader,
        cfgs: Dict[str, Any],
        device: torch.device,
        evaluation_manager: EvaluationManager,
        **kwargs,
    ) -> Dict[str, Any]:
        """
        Train the network on the current task.

        Args:
            network: The neural network to train
            train_loader: DataLoader for training data
            test_loader: DataLoader for testing data
            args: Configuration arguments
            device: Device to train on
            evaluation_manager: Manager for evaluation metrics
            **kwargs: Additional arguments

        Returns:
            Dictionary containing training results
        """
        # Reset training history for new task
        self._reset_history()
        # If no evaluation manager provided, create a temporary one
        hook_manager = kwargs.get("hook_manager", None)
        if evaluation_manager is None:
            evaluation_manager = EvaluationManager()
        # Extract common parameters
        cur_task = kwargs.get("cur_task", None)
        assert cur_task is not None, "Current task ID must be provided."

        # Initialize training components
        epochs = self._get_epochs(cfgs, cur_task)
        optimizer = self._get_optimizer(network, cfgs, cur_task)
        scheduler = self._get_scheduler(optimizer, cfgs, cur_task)
        criterion = self._get_criterion(cfgs)

        # Call hook before training starts
        if hook_manager:
            hook_manager.call_hook(
                "before_training", network=network, cur_task=cur_task
            )

        # Main training loop
        for epoch in range(epochs):
            # Train one epoch
            train_metrics = self._train_epoch(
                network=network,
                train_loader=train_loader,
                criterion=criterion,
                optimizer=optimizer,
                device=device,
                cur_task=cur_task,
                hook_manager=hook_manager,
                epoch=epoch,
                cfgs=cfgs,
                **kwargs,
            )

            # Apply scheduler if provided
            if scheduler:
                scheduler.step()

            # Evaluate model and update best model if needed
            eval_results = self._evaluate_and_track_best_model(
                network=network,
                test_loader=test_loader,
                evaluation_manager=evaluation_manager,
                cur_task=cur_task,
                epoch=epoch,
                device=device,
                train_metrics=train_metrics,
                epochs=epochs,
                hook_manager=hook_manager,
            )

            # Store in learning history
            self.learning_curve.append(eval_results["test_acc"])
            self.loss_curve.append(train_metrics.get("loss", 0.0))

        # Apply early stopping if enabled
        if cfgs.get("early_stop", False) and self.best_model is not None:
            network.load_state_dict(self.best_model)

        return {
            "model": network,
            "best_acc": self.best_acc,
            "best_epoch": self.best_epoch,
            "best_model": self.best_model,
            "learning_curve": self.learning_curve,
            "loss_curve": self.loss_curve,
            "metrics": evaluation_manager.get_epoch_metrics(cur_task),
        }

    def _evaluate_and_track_best_model(
        self,
        network,
        test_loader,
        evaluation_manager,
        cur_task,
        epoch,
        device,
        train_metrics,
        epochs=None,
        hook_manager=None,
    ) -> Dict[str, Any]:
        """Evaluate the model and track the best model based on accuracy."""
        # Evaluate the model using the evaluation manager
        eval_metrics = evaluation_manager.evaluate_epoch(
            network=network,
            data_loader=test_loader,
            forward_fn=lambda model, inputs: model(inputs),
            task_id=cur_task,
            epoch_id=epoch,
            device=device,
        )
        test_acc = eval_metrics["accuracy"]

        # Log progress
        train_loss = train_metrics.get("loss", 0.0)
        train_acc = train_metrics.get("accuracy", 0.0)
        self._log_progress(epoch, epochs, cur_task, train_loss, train_acc, test_acc)

        # Update best model if needed
        if test_acc > self.best_acc:
            self.best_acc = test_acc
            self.best_epoch = epoch
            self.best_model = copy.deepcopy(network.state_dict())

        # Call hook after epoch
        if hook_manager:
            hook_manager.call_hook(
                "after_epoch",
                network=network,
                epoch=epoch,
                cur_task=cur_task,
                train_acc=train_acc,
                test_acc=test_acc,
                loss=train_loss,
            )

        return {
            "test_acc": test_acc,
        }

    @abc.abstractmethod
    def _train_epoch(
        self,
        network,
        train_loader: torch.utils.data.DataLoader,
        criterion: Callable,
        optimizer: torch.optim.Optimizer,
        device: torch.device,
        hook_manager: Optional[Any],
        epoch: int,
        cfgs: Dict[str, Any],
        cur_task: int,
        **kwargs,
    ) -> Dict[str, float]:
        """Train for one epoch."""
        pass

    def _log_progress(self, epoch, epochs, cur_task, train_loss, train_acc, test_acc):
        """Log training progress."""
        info = (
            f"Task {cur_task}, Epoch {epoch+1}/{epochs} => "
            f"Loss {train_loss:.3f}, TrAcc {train_acc:.2f}, TeAcc {test_acc:.2f}"
        )
        logging.info(info)

    def _get_epochs(self, cfgs, cur_task):
        """Get number of epochs based on current task."""
        return cfgs["epochs"][cur_task]

    def _get_optimizer(
        self, network: nn.Module, optimizer_cfg: Dict[str, Any], cur_task: int
    ) -> torch.optim.Optimizer:
        """Create optimizer based on configuration.

        Args:
            network: Neural network to train
            cfg: Configuration dictionary
            cur_task: Current task ID

        Returns:
            Configured PyTorch optimizer

        Raises:
            ValueError: If unsupported optimizer is specified
        """
        # deal with changing optimizer kwargs during the incremental sessions
        trainable_params = [p for p in network.parameters() if p.requires_grad]
        optimizer_name = optimizer_cfg.get("optimizer", "").lower()
        for k, v in optimizer_cfg.items():
            if isinstance(v, list):
                optimizer_cfg["cfg"][k] = v[cur_task]

        optimizer_map = {
            "sgd": torch.optim.SGD,
            "adam": torch.optim.Adam,
            "adamw": torch.optim.AdamW,
        }

        # Create and return optimizer
        try:
            return optimizer_map[optimizer_name](
                trainable_params, **optimizer_cfg["cfg"]
            )
        except KeyError:
            supported = list(optimizer_map.keys())
            raise ValueError(
                f"Unsupported optimizer: '{optimizer_name}'. "
                f"Choose from: {', '.join(supported)}"
            )

    def _get_scheduler(
        self,
        optimizer: torch.optim.Optimizer,
        scheduler_cfg: Dict[str, Any],
        cur_task: int,
    ) -> PyTorchScheduler:
        """Create learning rate scheduler based on configuration.

        Args:
            optimizer: Optimizer to schedule
            epochs: Total number of epochs for training
            sched_cfg: Configuration dictionary containing:
                - scheduler: Scheduler type name (e.g., "cosine", "steplr")
                - min_lr: Minimum learning rate for cosine scheduler
                - milestones: Epoch milestones for StepLR
                - lr_decay: Gamma value for StepLR
                - warmup_epochs: Number of warmup epochs for LinearWarmup scheduler

        Returns:
            PyTorch learning rate scheduler or None for constant LR

        Raises:
            ValueError: If unsupported scheduler is specified
        """
        scheduler_name = scheduler_cfg.get("scheduler", "").lower()
        for k, v in scheduler_cfg.items():
            if isinstance(v, list):
                scheduler_cfg["cfg"][k] = v[cur_task]

        # Define scheduler configuration functions for each supported type
        scheduler_creators = {
            "cosine": torch.optim.lr_scheduler.CosineAnnealingLR,
            "steplr": torch.optim.lr_scheduler.MultiStepLR,
        }

        if scheduler_name not in scheduler_creators:
            raise ValueError(
                f"Unsupported scheduler: '{scheduler_name}'. "
                f"Supported schedulers: {list(scheduler_creators.keys())}"
            )

        return scheduler_creators[scheduler_name](optimizer, **scheduler_cfg["cfg"])

    def _get_criterion(self, criterion_cfg: Dict[str, Any]) -> nn.Module:
        """Create loss function based on configuration.

        Args:
            crit_cfg: Configuration dictionary containing:
                - criterion: Loss function name ("ce", "bce", "mse")
                - Additional parameters specific to each criterion

        Returns:
            PyTorch loss function

        Raises:
            ValueError: If unsupported criterion is specified
        """
        criterion_name = criterion_cfg.get("criterion", "").lower()
        cfg = criterion_cfg.get("cfg", {})
        criterion_map = {
            "ce": nn.CrossEntropyLoss,
            "bce": nn.BCEWithLogitsLoss,
            "mse": nn.MSELoss,
            "angular": AngularPenaltySMLoss,
        }

        if criterion_name not in criterion_map:
            raise ValueError(
                f"Unsupported criterion: '{criterion_name}'. "
                f"Supported criteria: {list(criterion_map.keys())}"
            )

        return criterion_map[criterion_name](**cfg)
