"""
Training Management Interface for Continual Learning

This interface defines methods for managing the training process in continual learning,
including optimizer configuration, loss functions, and training strategies.
"""

from abc import ABC, abstractmethod
from typing import Any, Dict, Optional, Tuple
import torch
import torch.nn as nn
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data import DataLoader


class TrainingManager(ABC):
    """Interface for managing training process in continual learning"""

    @abstractmethod
    def train_task(
        self,
        train_loader: DataLoader,
        test_loader: Optional[DataLoader] = None
    ) -> Dict[str, Any]:
        """
        Train the model on current task

        Args:
            train_loader: Training data loader
            test_loader: Optional test data loader for validation

        Returns:
            Training results and metrics
        """
        pass

    @abstractmethod
    def train_epoch(
        self,
        train_loader: DataLoader,
        optimizer: Optimizer,
        criterion: nn.Module
    ) -> Dict[str, float]:
        """
        Train for one epoch

        Args:
            train_loader: Training data loader
            optimizer: Optimizer instance
            criterion: Loss function

        Returns:
            Epoch training metrics
        """
        pass

    @abstractmethod
    def train_batch(
        self,
        batch: Tuple[Any, ...],
        optimizer: Optimizer,
        criterion: nn.Module
    ) -> Dict[str, float]:
        """
        Train on single batch

        Args:
            batch: Training batch data
            optimizer: Optimizer instance
            criterion: Loss function

        Returns:
            Batch training metrics
        """
        pass


class OptimizerFactory(ABC):
    """Interface for creating optimizers"""

    @abstractmethod
    def create_optimizer(
        self,
        parameters: Any,
        lr: float,
        optimizer_type: str = "sgd",
        **kwargs
    ) -> Optimizer:
        """
        Create optimizer instance

        Args:
            parameters: Model parameters to optimize
            lr: Learning rate
            optimizer_type: Type of optimizer (sgd, adam, adamw, etc.)
            **kwargs: Additional optimizer parameters

        Returns:
            Optimizer instance
        """
        pass

    @abstractmethod
    def get_supported_optimizers(self) -> list[str]:
        """Get list of supported optimizer types"""
        pass


class SchedulerFactory(ABC):
    """Interface for creating learning rate schedulers"""

    @abstractmethod
    def create_scheduler(
        self,
        optimizer: Optimizer,
        scheduler_type: str = "cosine",
        epochs: int = 100,
        **kwargs
    ) -> _LRScheduler:
        """
        Create learning rate scheduler

        Args:
            optimizer: Optimizer instance
            scheduler_type: Type of scheduler
            epochs: Total number of epochs
            **kwargs: Additional scheduler parameters

        Returns:
            Learning rate scheduler instance
        """
        pass

    @abstractmethod
    def get_supported_schedulers(self) -> list[str]:
        """Get list of supported scheduler types"""
        pass


class LossFactory(ABC):
    """Interface for creating loss functions"""

    @abstractmethod
    def create_loss_function(
        self,
        loss_type: str = "crossentropy",
        num_classes: Optional[int] = None,
        **kwargs
    ) -> nn.Module:
        """
        Create loss function

        Args:
            loss_type: Type of loss function
            num_classes: Number of classes (if needed)
            **kwargs: Additional loss function parameters

        Returns:
            Loss function instance
        """
        pass

    @abstractmethod
    def get_supported_losses(self) -> list[str]:
        """Get list of supported loss function types"""
        pass


class TrainingStrategy(ABC):
    """Interface for different training strategies"""

    @abstractmethod
    def prepare_training(self, task_id: int) -> Dict[str, Any]:
        """
        Prepare for training a specific task

        Args:
            task_id: Task identifier

        Returns:
            Training configuration
        """
        pass

    @abstractmethod
    def compute_loss(
        self,
        outputs: torch.Tensor,
        targets: torch.Tensor,
        **kwargs
    ) -> torch.Tensor:
        """
        Compute loss for training

        Args:
            outputs: Model outputs
            targets: Ground truth targets
            **kwargs: Additional parameters

        Returns:
            Computed loss
        """
        pass

    @abstractmethod
    def post_training_step(self, task_id: int) -> None:
        """Post-processing after training step"""
        pass


class RegularizationManager(ABC):
    """Interface for managing regularization techniques"""

    @abstractmethod
    def apply_regularization(
        self,
        loss: torch.Tensor,
        model: nn.Module,
        task_id: int
    ) -> torch.Tensor:
        """
        Apply regularization to loss

        Args:
            loss: Base loss
            model: Model instance
            task_id: Current task ID

        Returns:
            Regularized loss
        """
        pass

    @abstractmethod
    def compute_regularization_loss(
        self,
        model: nn.Module,
        task_id: int
    ) -> torch.Tensor:
        """
        Compute regularization loss component

        Args:
            model: Model instance
            task_id: Current task ID

        Returns:
            Regularization loss
        """
        pass


class MetricsManager(ABC):
    """Interface for managing training metrics"""

    @abstractmethod
    def update_metrics(self, predictions: torch.Tensor, targets: torch.Tensor) -> None:
        """Update metrics with new predictions and targets"""
        pass

    @abstractmethod
    def get_metrics(self) -> Dict[str, float]:
        """Get current metric values"""
        pass

    @abstractmethod
    def reset_metrics(self) -> None:
        """Reset all metrics"""
        pass

    @abstractmethod
    def log_metrics(self, epoch: int, phase: str = "train") -> None:
        """Log metrics for current epoch and phase"""
        pass
