"""
Multi-GPU Training Strategy - Clean Strategy Pattern Implementation

This module provides a comprehensive, SOLID-compliant strategy pattern implementation
for multi-GPU training in continual learning scenarios. It abstracts multi-GPU concerns
completely from learner business logic, enabling clean separation of concerns and
maintainable code.

Key Features:
    - Strategy pattern for pluggable multi-GPU implementations
    - Complete abstraction of DataParallel complexities
    - Context manager for clean resource management
    - Type-safe interfaces with proper error handling
    - Extensible design for future DDP support
    - Zero coupling between learners and multi-GPU logic

Classes:
    MultiGPUTrainingStrategy: Abstract base strategy interface
    SingleGPUStrategy: No-op strategy for single GPU scenarios
    DataParallelStrategy: PyTorch DataParallel implementation
    DistributedDataParallelStrategy: Future DDP implementation (placeholder)
    MultiGPUContext: Context manager for strategy lifecycle

Functions:
    create_multi_gpu_strategy: Factory function for strategy creation

Usage:
    >>> strategy = create_multi_gpu_strategy([0, 1, 2, 3])
    >>> with MultiGPUContext(strategy) as ctx:
    ...     network = ctx.setup_training(network)
    ...     # Training code using ctx methods
    ...     network = ctx.teardown_training(network)
"""

from abc import ABC, abstractmethod
from typing import Any, List, Union, Dict
import logging
import torch
from torch import nn


class MultiGPUTrainingStrategy(ABC):
    """
    Abstract strategy interface for multi-GPU training implementations.

    This abstract base class defines the contract that all multi-GPU training strategies
    must implement. It follows the Strategy pattern to completely decouple multi-GPU
    concerns from learner business logic, enabling clean architecture and testability.

    The strategy pattern allows for:
        - Runtime switching between different multi-GPU implementations
        - Clean separation of concerns between training logic and parallelization
        - Easy testing with mock strategies
        - Future extensibility for new parallelization approaches (DDP, model parallel, etc.)

    All concrete strategies must implement the six core methods that provide:
        1. Setup/teardown lifecycle management
        2. Safe method execution on wrapped networks
        3. Safe property access/modification on wrapped networks
        4. Status reporting and device information

    Note:
        This is designed to be completely stateless regarding the actual network,
        making it safe to reuse across different training sessions.
    """

    @abstractmethod
    def setup_training(self, network: nn.Module) -> nn.Module:
        """
        Prepare a network for multi-GPU training.

        This method wraps the provided network with the appropriate multi-GPU
        mechanism (e.g., DataParallel) and ensures it's properly configured
        for distributed training.

        Args:
            network (nn.Module): The neural network to prepare for multi-GPU training.
                Should be a single-GPU network on CPU or primary GPU.

        Returns:
            nn.Module: The network prepared for multi-GPU training. May be the same
                object (for single GPU) or a wrapped version (for multi-GPU).

        Raises:
            RuntimeError: If the setup process fails due to hardware or configuration issues.
            ValueError: If the network is in an invalid state for multi-GPU setup.

        Note:
            The returned network should be treated as opaque by the caller.
            Use the strategy methods to interact with it safely.
        """
        pass

    @abstractmethod
    def teardown_training(self, network: nn.Module) -> nn.Module:
        """
        Clean up multi-GPU setup and return the underlying network.

        This method removes any multi-GPU wrappers and returns the clean,
        unwrapped network that can be used for single-GPU operations like
        saving, evaluation, or further training setup.

        Args:
            network (nn.Module): The network to clean up. Should be a network
                that was previously prepared by setup_training().

        Returns:
            nn.Module: The clean, unwrapped network ready for single-GPU operations.

        Raises:
            ValueError: If the provided network is not in a valid state for teardown.

        Note:
            After teardown, the network should be equivalent to its state before
            setup_training() was called, but potentially on a different device.
        """
        pass

    @abstractmethod
    def execute_method(
        self, network: nn.Module, method_name: str, *args, **kwargs
    ) -> Any:
        """
        Safely execute a method on the underlying network.

        This method provides safe access to network methods when the network
        might be wrapped in multi-GPU containers (like DataParallel). It ensures
        the method is called on the actual implementation, not the wrapper.

        Args:
            network (nn.Module): The (potentially wrapped) network.
            method_name (str): Name of the method to call.
            *args: Positional arguments to pass to the method.
            **kwargs: Keyword arguments to pass to the method.

        Returns:
            Any: The return value of the called method.

        Raises:
            AttributeError: If the network doesn't have the specified method.
            TypeError: If the method signature doesn't match the provided arguments.

        Example:
            >>> strategy.execute_method(network, 'train', True)
            >>> result = strategy.execute_method(network, 'forward', input_tensor)
        """
        pass

    @abstractmethod
    def get_property(
        self, network: nn.Module, property_name: str, default: Any = None
    ) -> Any:
        """
        Safely get a property from the underlying network.

        This method provides safe access to network properties when the network
        might be wrapped in multi-GPU containers. It ensures property access
        goes to the actual implementation, not the wrapper.

        Args:
            network (nn.Module): The (potentially wrapped) network.
            property_name (str): Name of the property to retrieve.
            default (Any, optional): Default value if property doesn't exist.
                Defaults to None.

        Returns:
            Any: The value of the requested property, or default if not found.

        Example:
            >>> num_classes = strategy.get_property(network, 'num_classes', 10)
            >>> device = strategy.get_property(network, 'device')
        """
        pass

    @abstractmethod
    def set_property(self, network: nn.Module, property_name: str, value: Any) -> None:
        """
        Safely set a property on the underlying network.

        This method provides safe property modification when the network
        might be wrapped in multi-GPU containers. It ensures property setting
        affects the actual implementation, not the wrapper.

        Args:
            network (nn.Module): The (potentially wrapped) network.
            property_name (str): Name of the property to set.
            value (Any): Value to assign to the property.

        Raises:
            AttributeError: If the property cannot be set on the network.

        Example:
            >>> strategy.set_property(network, 'num_classes', 100)
            >>> strategy.set_property(network, 'training_mode', 'finetune')
        """
        pass

    @abstractmethod
    def is_multi_gpu_active(self) -> bool:
        """
        Check if multi-GPU training is currently active.

        Returns:
            bool: True if multiple GPUs are being used for training, False otherwise.

        Note:
            This should return False for single-GPU strategies even if setup_training()
            has been called, and True for multi-GPU strategies only when they are
            actually utilizing multiple devices.
        """
        pass

    @abstractmethod
    def get_device_info(self) -> Dict[str, Any]:
        """
        Get comprehensive information about the devices being used.

        Returns:
            Dict[str, Any]: A dictionary containing device information with keys:
                - 'strategy': str, the strategy type name
                - 'device_count': int, number of devices being used
                - 'primary_device': str, the primary/master device identifier
                - 'devices': List[str], list of all device identifiers
                - Additional strategy-specific information

        Example:
            >>> info = strategy.get_device_info()
            >>> print(f"Using {info['device_count']} devices: {info['devices']}")
        """
        pass


class SingleGPUStrategy(MultiGPUTrainingStrategy):
    """
    No-operation strategy for single GPU training scenarios.

    This strategy provides a consistent interface for single-GPU training while
    maintaining compatibility with the multi-GPU strategy pattern. All operations
    are pass-through operations that work directly with the network without any
    wrapper overhead.

    This strategy is optimal for:
        - Development and debugging on single GPU systems
        - Small models that don't benefit from parallelization
        - Baseline comparisons against multi-GPU implementations
        - Scenarios where deterministic behavior is required

    Attributes:
        device (Union[str, torch.device]): The target device for the network.
        logger (logging.Logger): Logger instance for debugging and monitoring.

    Example:
        >>> strategy = SingleGPUStrategy(device="cuda:0")
        >>> network = strategy.setup_training(network)  # Just moves to device
        >>> # All other operations are direct pass-through
    """

    def __init__(self, device: Union[str, torch.device] = "cuda:0"):
        """
        Initialize single GPU strategy.

        Args:
            device (Union[str, torch.device], optional): Target device for the network.
                Can be a string like "cuda:0" or a torch.device object.
                Defaults to "cuda:0".
        """
        self.device = device
        self.logger = logging.getLogger(f"{__name__}.SingleGPU")

    def setup_training(self, network: nn.Module) -> nn.Module:
        """
        Prepare network for single GPU training.

        Simply moves the network to the target device. No wrapper is applied.

        Args:
            network (nn.Module): The network to prepare.

        Returns:
            nn.Module: The same network moved to the target device.
        """
        self.logger.debug(f"Setting up single GPU training on {self.device}")
        return network.to(self.device)

    def teardown_training(self, network: nn.Module) -> nn.Module:
        """
        Clean up after single GPU training.

        Since no wrapper is applied, this is a no-op that returns the network unchanged.

        Args:
            network (nn.Module): The network to clean up.

        Returns:
            nn.Module: The same network unchanged.
        """
        return network

    def execute_method(
        self, network: nn.Module, method_name: str, *args, **kwargs
    ) -> Any:
        """
        Execute method directly on the network.

        Args:
            network (nn.Module): The network to call the method on.
            method_name (str): Name of the method to call.
            *args: Positional arguments for the method.
            **kwargs: Keyword arguments for the method.

        Returns:
            Any: The return value of the method call.

        Raises:
            AttributeError: If the network doesn't have the specified method.
        """
        if not hasattr(network, method_name):
            raise AttributeError(f"Network does not have method '{method_name}'")
        return getattr(network, method_name)(*args, **kwargs)

    def get_property(
        self, network: nn.Module, property_name: str, default: Any = None
    ) -> Any:
        """
        Get property directly from the network.

        Args:
            network (nn.Module): The network to get the property from.
            property_name (str): Name of the property to retrieve.
            default (Any, optional): Default value if property doesn't exist.

        Returns:
            Any: The property value or default if not found.
        """
        return getattr(network, property_name, default)

    def set_property(self, network: nn.Module, property_name: str, value: Any) -> None:
        """
        Set property directly on the network.

        Args:
            network (nn.Module): The network to set the property on.
            property_name (str): Name of the property to set.
            value (Any): Value to assign to the property.
        """
        setattr(network, property_name, value)

    def is_multi_gpu_active(self) -> bool:
        """
        Check if multi-GPU training is active.

        Returns:
            bool: Always False for single GPU strategy.
        """
        return False

    def get_device_info(self) -> Dict[str, Any]:
        """
        Get device information for single GPU setup.

        Returns:
            Dict[str, Any]: Information about the single device being used.
        """
        return {
            "strategy": "single_gpu",
            "device_count": 1,
            "primary_device": str(self.device),
            "devices": [str(self.device)],
        }


class DataParallelStrategy(MultiGPUTrainingStrategy):
    """
    PyTorch DataParallel strategy for multi-GPU training.

    This strategy implements data parallelism using PyTorch's nn.DataParallel,
    which replicates the model on multiple GPUs and splits input batches across
    devices. It provides automatic handling of the DataParallel wrapper complexity
    while maintaining a clean interface for learners.

    Key Features:
        - Automatic DataParallel wrapper management
        - Intelligent single vs multi-GPU detection
        - Safe property and method access through wrapper
        - Proper device management and primary device selection
        - Comprehensive error handling and logging

    Limitations:
        - Limited to single-node training (no multi-node support)
        - Can have load balancing issues with uneven batch sizes
        - Python GIL limitations may affect performance scaling

    Attributes:
        device_ids (List[int]): List of GPU device IDs to use for training.
        primary_device (str): The primary device identifier (first in device_ids).
        is_setup (bool): Whether the strategy is currently active.
        logger (logging.Logger): Logger instance for monitoring and debugging.

    Example:
        >>> strategy = DataParallelStrategy([0, 1, 2, 3])
        >>> network = strategy.setup_training(network)  # Wraps with DataParallel
        >>> # Use strategy methods to interact with wrapped network
        >>> network = strategy.teardown_training(network)  # Removes wrapper
    """

    def __init__(self, device_ids: List[int]):
        """
        Initialize DataParallel strategy.

        Args:
            device_ids (List[int]): List of GPU device IDs to use for training.
                Must contain at least one device ID. The first device becomes
                the primary device where gradients are accumulated.

        Raises:
            ValueError: If device_ids is empty or invalid.

        Example:
            >>> strategy = DataParallelStrategy([0, 1, 2])  # Use GPUs 0, 1, 2
            >>> strategy = DataParallelStrategy([0])        # Single GPU mode
        """
        if not device_ids:
            raise ValueError("DataParallel strategy requires at least one device ID")

        self.device_ids = device_ids
        self.primary_device = f"cuda:{device_ids[0]}"
        self.is_setup = False
        self.logger = logging.getLogger(f"{__name__}.DataParallel")

    def setup_training(self, network: nn.Module) -> nn.Module:
        """
        Setup DataParallel wrapper for multi-GPU training.

        This method prepares the network for data parallel training by:
        1. Moving the network to the primary device
        2. Wrapping with DataParallel if multiple devices are specified
        3. Handling edge cases like single device or already wrapped networks

        Args:
            network (nn.Module): The network to prepare for training.
                Should be on CPU or single GPU initially.

        Returns:
            nn.Module: The network prepared for multi-GPU training.
                Will be wrapped with DataParallel if using multiple GPUs.

        Note:
            If only one device is specified, behaves like single GPU training.
            If network is already wrapped with DataParallel, logs and continues.
        """
        if len(self.device_ids) == 1:
            self.logger.debug("Single device in list, using single GPU mode")
            network = network.to(self.primary_device)
        elif not isinstance(network, nn.DataParallel):
            self.logger.info(
                f"Wrapping network with DataParallel on devices: {self.device_ids}"
            )
            network = network.to(self.primary_device)
            network = nn.DataParallel(network, device_ids=self.device_ids)
        else:
            self.logger.debug("Network already wrapped with DataParallel")

        self.is_setup = True
        return network

    def teardown_training(self, network: nn.Module) -> nn.Module:
        """
        Remove DataParallel wrapper and return clean network.

        This method safely unwraps the network from DataParallel container,
        returning the underlying model that can be used for single-GPU operations
        like saving, evaluation, or inference.

        Args:
            network (nn.Module): The network to unwrap. Should be a network
                that was previously wrapped by setup_training().

        Returns:
            nn.Module: The clean, unwrapped network ready for single-GPU use.

        Note:
            If the network is not wrapped with DataParallel, returns it unchanged.
            The returned network will be on the primary device.
        """
        if isinstance(network, nn.DataParallel):
            self.logger.debug("Unwrapping network from DataParallel")
            network = network.module

        self.is_setup = False
        return network

    def execute_method(
        self, network: nn.Module, method_name: str, *args, **kwargs
    ) -> Any:
        """
        Execute method on the underlying network, bypassing DataParallel wrapper.

        This method ensures that method calls reach the actual model implementation
        rather than the DataParallel wrapper, which may not expose all methods.

        Args:
            network (nn.Module): The (potentially wrapped) network.
            method_name (str): Name of the method to execute.
            *args: Positional arguments for the method.
            **kwargs: Keyword arguments for the method.

        Returns:
            Any: The return value from the method call.

        Raises:
            AttributeError: If the underlying network doesn't have the method.

        Example:
            >>> strategy.execute_method(network, 'eval')
            >>> output = strategy.execute_method(network, 'forward', input_data)
        """
        target_network = self._get_underlying_network(network)

        if not hasattr(target_network, method_name):
            raise AttributeError(f"Network does not have method '{method_name}'")

        return getattr(target_network, method_name)(*args, **kwargs)

    def get_property(
        self, network: nn.Module, property_name: str, default: Any = None
    ) -> Any:
        """
        Get property from the underlying network, bypassing DataParallel wrapper.

        Args:
            network (nn.Module): The (potentially wrapped) network.
            property_name (str): Name of the property to retrieve.
            default (Any, optional): Default value if property doesn't exist.

        Returns:
            Any: The property value from the underlying network.

        Example:
            >>> num_classes = strategy.get_property(network, 'num_classes')
            >>> hidden_dim = strategy.get_property(network, 'hidden_dim', 512)
        """
        target_network = self._get_underlying_network(network)
        return getattr(target_network, property_name, default)

    def set_property(self, network: nn.Module, property_name: str, value: Any) -> None:
        """
        Set property on the underlying network, bypassing DataParallel wrapper.

        Args:
            network (nn.Module): The (potentially wrapped) network.
            property_name (str): Name of the property to set.
            value (Any): Value to assign to the property.

        Example:
            >>> strategy.set_property(network, 'num_classes', 100)
            >>> strategy.set_property(network, 'dropout_rate', 0.1)
        """
        target_network = self._get_underlying_network(network)
        setattr(target_network, property_name, value)

    def is_multi_gpu_active(self) -> bool:
        """
        Check if multi-GPU training is currently active.

        Returns:
            bool: True if using multiple GPUs and setup is complete, False otherwise.
        """
        return len(self.device_ids) > 1 and self.is_setup

    def get_device_info(self) -> Dict[str, Any]:
        """
        Get comprehensive device information for this strategy.

        Returns:
            Dict[str, Any]: Device information including:
                - strategy: "data_parallel"
                - device_count: Number of devices being used
                - primary_device: Primary device identifier
                - devices: List of all device identifiers
                - is_setup: Whether strategy is currently active
        """
        return {
            "strategy": "data_parallel",
            "device_count": len(self.device_ids),
            "primary_device": self.primary_device,
            "devices": [f"cuda:{device_id}" for device_id in self.device_ids],
            "is_setup": self.is_setup,
        }

    def _get_underlying_network(self, network: nn.Module) -> nn.Module:
        """
        Get the underlying network implementation, bypassing DataParallel wrapper.

        This is a utility method that safely extracts the actual model from
        a DataParallel wrapper when present, or returns the network unchanged
        if it's not wrapped.

        Args:
            network (nn.Module): The potentially wrapped network.

        Returns:
            nn.Module: The underlying network implementation.

        Note:
            This method is internal and should not be called directly by users.
        """
        return network.module if isinstance(network, nn.DataParallel) else network


class CPUStrategy(MultiGPUTrainingStrategy):
    """
    Simple strategy for CPU-only training.

    This strategy provides a straightforward implementation for CPU training
    without any GPU usage. It's useful for development, debugging, and
    environments where GPU is not available.

    Attributes:
        logger (logging.Logger): Logger instance for debugging and monitoring.

    Example:
        >>> strategy = CPUStrategy()
        >>> network = strategy.setup_training(network)  # Just ensures it's on CPU
    """

    def __init__(self):
        """Initialize CPU strategy."""
        self.logger = logging.getLogger(f"{__name__}.CPU")

    def setup_training(self, network: nn.Module) -> nn.Module:
        """
        Prepare network for CPU training.

        Simply ensures the network is on CPU.

        Args:
            network (nn.Module): The network to prepare.

        Returns:
            nn.Module: The same network moved to CPU.
        """
        self.logger.debug("Setting up CPU training")
        return network.to("cpu")

    def teardown_training(self, network: nn.Module) -> nn.Module:
        """
        Clean up after CPU training.

        Since no wrapper is applied, this is a no-op that returns the network unchanged.

        Args:
            network (nn.Module): The network to clean up.

        Returns:
            nn.Module: The same network unchanged.
        """
        return network

    def execute_method(
        self, network: nn.Module, method_name: str, *args, **kwargs
    ) -> Any:
        """
        Execute method directly on the network.

        Args:
            network (nn.Module): The network to call the method on.
            method_name (str): Name of the method to call.
            *args: Positional arguments for the method.
            **kwargs: Keyword arguments for the method.

        Returns:
            Any: The return value of the method call.

        Raises:
            AttributeError: If the network doesn't have the specified method.
        """
        if not hasattr(network, method_name):
            raise AttributeError(f"Network does not have method '{method_name}'")
        return getattr(network, method_name)(*args, **kwargs)

    def get_property(
        self, network: nn.Module, property_name: str, default: Any = None
    ) -> Any:
        """
        Get property directly from the network.

        Args:
            network (nn.Module): The network to get the property from.
            property_name (str): Name of the property to retrieve.
            default (Any, optional): Default value if property doesn't exist.

        Returns:
            Any: The property value or default if not found.
        """
        return getattr(network, property_name, default)

    def set_property(self, network: nn.Module, property_name: str, value: Any) -> None:
        """
        Set property directly on the network.

        Args:
            network (nn.Module): The network to set the property on.
            property_name (str): Name of the property to set.
            value (Any): Value to assign to the property.
        """
        setattr(network, property_name, value)

    def is_multi_gpu_active(self) -> bool:
        """
        Check if multi-GPU training is active.

        Returns:
            bool: Always False for CPU strategy.
        """
        return False

    def get_device_info(self) -> Dict[str, Any]:
        """
        Get device information for CPU setup.

        Returns:
            Dict[str, Any]: Information about CPU device.
        """
        return {
            "strategy": "cpu",
            "device_count": 0,
            "primary_device": "cpu",
            "devices": ["cpu"],
        }


class DistributedDataParallelStrategy(MultiGPUTrainingStrategy):
    """
    Future implementation placeholder for DistributedDataParallel training.

    This class serves as a placeholder for future high-performance multi-node
    distributed training using PyTorch's DistributedDataParallel (DDP). DDP
    provides better performance and scalability compared to DataParallel,
    especially for multi-node setups.

    When implemented, this strategy will provide:
        - Multi-node distributed training capability
        - Better memory efficiency and communication patterns
        - Support for gradient synchronization optimizations
        - Integration with torch.distributed backends (NCCL, Gloo, MPI)

    Attributes:
        local_rank (int): Local rank of the process within the node.
        world_size (int): Total number of processes across all nodes.
        logger (logging.Logger): Logger instance for monitoring.

    Note:
        This is currently a placeholder implementation. All methods raise
        NotImplementedError to indicate that DDP support is not yet available.
        Future implementation will follow the same interface pattern as other
        strategies.

    Example (Future Usage):
        >>> strategy = DistributedDataParallelStrategy(local_rank=0, world_size=4)
        >>> # Will be implemented in future versions
    """

    def __init__(self, local_rank: int, world_size: int):
        """
        Initialize DDP strategy (placeholder).

        Args:
            local_rank (int): Local rank of the process within the current node.
            world_size (int): Total number of processes across all nodes.

        Note:
            This initialization is a placeholder. Future implementation will
            include proper distributed setup, process group initialization,
            and device assignment.
        """
        self.local_rank = local_rank
        self.world_size = world_size
        self.logger = logging.getLogger(f"{__name__}.DDP")

    def setup_training(self, network: nn.Module) -> nn.Module:
        """
        Setup DDP wrapper (placeholder).

        Raises:
            NotImplementedError: DDP strategy is not yet implemented.
        """
        raise NotImplementedError(
            "DistributedDataParallel strategy is not yet implemented. "
            "This is reserved for future high-performance multi-node training."
        )

    def teardown_training(self, network: nn.Module) -> nn.Module:
        """
        Teardown DDP wrapper (placeholder).

        Raises:
            NotImplementedError: DDP strategy is not yet implemented.
        """
        raise NotImplementedError("DDP strategy not implemented")

    def execute_method(
        self, network: nn.Module, method_name: str, *args, **kwargs
    ) -> Any:
        """
        Execute method on DDP network (placeholder).

        Raises:
            NotImplementedError: DDP strategy is not yet implemented.
        """
        raise NotImplementedError("DDP strategy not implemented")

    def get_property(
        self, network: nn.Module, property_name: str, default: Any = None
    ) -> Any:
        """
        Get property from DDP network (placeholder).

        Raises:
            NotImplementedError: DDP strategy is not yet implemented.
        """
        raise NotImplementedError("DDP strategy not implemented")

    def set_property(self, network: nn.Module, property_name: str, value: Any) -> None:
        """
        Set property on DDP network (placeholder).

        Raises:
            NotImplementedError: DDP strategy is not yet implemented.
        """
        raise NotImplementedError("DDP strategy not implemented")

    def is_multi_gpu_active(self) -> bool:
        """
        Check if DDP multi-GPU is active (placeholder).

        Raises:
            NotImplementedError: DDP strategy is not yet implemented.
        """
        raise NotImplementedError("DDP strategy not implemented")

    def get_device_info(self) -> Dict[str, Any]:
        """
        Get DDP device information (placeholder).

        Raises:
            NotImplementedError: DDP strategy is not yet implemented.
        """
        raise NotImplementedError("DDP strategy not implemented")


def create_multi_gpu_strategy(
    device_ids: Union[List, int, str, torch.device, None] = None, strategy_type: str = "auto"
) -> MultiGPUTrainingStrategy:
    """
    Factory function to create appropriate multi-GPU training strategy.

    This factory function provides a convenient way to create the right strategy
    based on device configuration and requirements. It handles device ID parsing,
    strategy selection, and provides sensible defaults for common use cases.

    The factory supports automatic strategy selection based on the number of devices,
    or explicit strategy specification for fine-grained control.

    Args:
        device_ids (Union[List[int], int, str, torch.device, None]): Device specification in various formats:
            - List[int]: Explicit list of GPU device IDs, e.g., [0, 1, 2, 3]
            - List[torch.device]: List of device objects, can include CPU devices
            - int: Single device ID, e.g., 0 (converted to [0])
            - str: Device string like "cuda:0" (parsed to extract device ID)
                   or "cpu" for CPU-only operation
            - torch.device: PyTorch device object, e.g., torch.device("cuda:0")
                            or torch.device("cpu")
            - None: Use CPU (equivalent to "cpu")

        strategy_type (str, optional): Strategy selection mode. Defaults to "auto".
            Options:
            - "auto": Automatically select based on device count
            - "single": Force single GPU/CPU strategy
            - "dataparallel": Force DataParallel strategy
            - "ddp": Force DDP strategy (not yet implemented)

    Returns:
        MultiGPUTrainingStrategy: An instance of the appropriate strategy:
            - SingleGPUStrategy for single device scenarios or CPU
            - DataParallelStrategy for multiple device scenarios
            - Future: DistributedDataParallelStrategy for DDP

    Raises:
        ValueError: If device_ids format is invalid or strategy_type is unknown.
        NotImplementedError: If DDP strategy is requested.

    Examples:
        >>> # Auto-select based on device count
        >>> strategy = create_multi_gpu_strategy([0, 1, 2, 3])  # DataParallel
        >>> strategy = create_multi_gpu_strategy([0])            # SingleGPU
        >>> strategy = create_multi_gpu_strategy(0)              # SingleGPU
        >>> strategy = create_multi_gpu_strategy("cpu")          # CPU mode
        >>> strategy = create_multi_gpu_strategy(None)           # CPU mode
        >>> strategy = create_multi_gpu_strategy([torch.device("cpu")])  # CPU mode

        >>> # Explicit strategy selection
        >>> strategy = create_multi_gpu_strategy([0], "single")
        >>> strategy = create_multi_gpu_strategy([0, 1], "dataparallel")
        >>> strategy = create_multi_gpu_strategy("cpu", "single") # SingleGPUStrategy with CPU

        >>> # Device string parsing
        >>> strategy = create_multi_gpu_strategy("cuda:2")       # SingleGPU on device 2

        >>> # PyTorch device object
        >>> device = torch.device("cuda:3")
        >>> strategy = create_multi_gpu_strategy(device)         # SingleGPU on device 3
        >>> device = torch.device("cpu")
        >>> strategy = create_multi_gpu_strategy(device)         # CPU mode

    Note:
        The "auto" mode provides intelligent defaults:
        - None or "cpu" → SingleGPUStrategy with CPU device
        - Single device → SingleGPUStrategy
        - Multiple devices → DataParallelStrategy

        This covers the most common use cases while allowing override when needed.
    """

    # Handle CPU cases
    if device_ids is None:
        return CPUStrategy()
    if isinstance(device_ids, str) and device_ids == "cpu":
        return CPUStrategy()
    if isinstance(device_ids, torch.device) and device_ids.type == "cpu":
        return CPUStrategy()

    # Handle list of CPU devices (which could be from trainer.py)
    if isinstance(device_ids, List):
        if len(device_ids) == 0:
            return CPUStrategy()
        if all(isinstance(d, torch.device) for d in device_ids) and all(d.type == "cpu" for d in device_ids):
            return CPUStrategy()

    # Normalize device_ids to list of integers for GPU cases
    def convert_device(device: Union[int, str, torch.device]) -> int:
        if isinstance(device, int):
            return device
        elif isinstance(device, str):
            if device.startswith("cuda:"):
                return int(device.split(":")[1])
            else:
                raise ValueError(f"Invalid device string format: {device}")
        elif isinstance(device, torch.device):
            if device.type == "cuda":
                return device.index if device.index is not None else 0
            else:
                raise ValueError(f"Unsupported device type: {device.type}")
        else:
            raise ValueError(f"Invalid device type: {type(device)}")

    if isinstance(device_ids, List):
        if not all(isinstance(d, (int, str, torch.device)) for d in device_ids):
            raise ValueError(
                "All elements in device_ids must be int, str, or torch.device"
            )
        device_ids = [convert_device(d) for d in device_ids]
    elif isinstance(device_ids, (int, str, torch.device)):
        device_ids = [convert_device(device_ids)]
    else:
        raise ValueError(
            "device_ids must be a List of int, str, torch.device or a single int, str, torch.device, or None for CPU"
        )

    # Auto-detect strategy if needed
    if strategy_type == "auto":
        if len(device_ids) == 1:
            strategy_type = "single"
        else:
            strategy_type = "dataparallel"

    # Create strategy
    if strategy_type == "single":
        primary_device = f"cuda:{device_ids[0]}" if device_ids else "cuda:0"
        return SingleGPUStrategy(primary_device)
    elif strategy_type == "dataparallel":
        return DataParallelStrategy(device_ids)
    elif strategy_type == "ddp":
        raise NotImplementedError("DDP strategy not yet available")
    else:
        raise ValueError(f"Unknown strategy type: {strategy_type}")


class MultiGPUContext:
    """
    Context manager for multi-GPU training lifecycle management.

    This context manager provides a clean, RAII-style interface for managing
    multi-GPU training setup and teardown. It ensures proper resource management
    and provides a consistent API that learners can use without being coupled
    to specific multi-GPU implementations.

    The context manager handles:
        - Automatic setup and teardown of multi-GPU configurations
        - Safe method and property access on wrapped networks
        - Resource cleanup on exceptions or normal exit
        - Strategy-agnostic interface for all multi-GPU operations

    This design follows the Dependency Inversion Principle by depending on the
    abstract strategy interface rather than concrete implementations.

    Attributes:
        strategy (MultiGPUTrainingStrategy): The strategy instance to delegate to.
        _original_network (nn.Module): Reference to the original network before wrapping.

    Example:
        >>> strategy = create_multi_gpu_strategy([0, 1, 2, 3])
        >>> with MultiGPUContext(strategy) as ctx:
        ...     network = ctx.setup_training(network)
        ...     # Training loop using ctx methods
        ...     result = ctx.execute_method(network, 'forward', inputs)
        ...     network = ctx.teardown_training(network)

        >>> # Or with automatic teardown
        >>> with MultiGPUContext(strategy) as ctx:
        ...     network = ctx.setup_training(network)
        ...     # Training code
        ...     # Automatic teardown on context exit

    Note:
        The context manager can be used either with explicit setup/teardown calls
        or with automatic teardown on context exit. The latter is safer for
        exception handling but requires the context to track the network state.
    """

    def __init__(self, strategy: MultiGPUTrainingStrategy):
        """
        Initialize context manager with a strategy.

        Args:
            strategy (MultiGPUTrainingStrategy): The strategy to use for multi-GPU
                operations. Can be any concrete strategy implementation.
        """
        self.strategy = strategy
        self._original_network = None

    def __enter__(self):
        """
        Context manager entry point.

        Returns:
            MultiGPUContext: Self reference for use in with statements.

        Note:
            Setup is handled externally via setup_training() call to allow
            for explicit control over when the network is wrapped.
        """
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        """
        Context manager exit point with automatic cleanup.

        Automatically performs teardown if a network was previously set up
        and teardown hasn't been called explicitly.

        Args:
            exc_type: Exception type (if any).
            exc_val: Exception value (if any).
            exc_tb: Exception traceback (if any).

        Note:
            This provides automatic cleanup for exception safety, but explicit
            teardown_training() calls are preferred for clarity.
        """
        if self._original_network is not None:
            self.teardown_training()

    def setup_training(self, network: nn.Module) -> nn.Module:
        """
        Setup network for multi-GPU training using the configured strategy.

        Args:
            network (nn.Module): The network to prepare for multi-GPU training.

        Returns:
            nn.Module: The network prepared for training (may be wrapped).

        Note:
            This method stores a reference to the original network for automatic
            cleanup. Only one network can be managed per context instance.
        """
        self._original_network = network
        return self.strategy.setup_training(network)

    def teardown_training(self, network: Union[nn.Module, None] = None) -> nn.Module:
        """
        Teardown multi-GPU training and return clean network.

        Args:
            network (Union[nn.Module, None], optional): The network to teardown.
                If None, uses the network from setup_training().

        Returns:
            nn.Module: The clean, unwrapped network.

        Raises:
            ValueError: If no network is available for teardown.

        Note:
            After teardown, the context no longer tracks the network state.
        """
        target_network = network if network is not None else self._original_network
        if target_network is None:
            raise ValueError("No network available for teardown")

        result = self.strategy.teardown_training(target_network)
        self._original_network = None
        return result

    def execute_method(self, network: nn.Module, method_name: str, *args, **kwargs):
        """
        Execute method on network through the strategy.

        Args:
            network (nn.Module): The (potentially wrapped) network.
            method_name (str): Name of the method to call.
            *args: Positional arguments for the method.
            **kwargs: Keyword arguments for the method.

        Returns:
            Any: The return value from the method call.

        Example:
            >>> ctx.execute_method(network, 'train', True)
            >>> output = ctx.execute_method(network, 'forward', input_tensor)
        """
        return self.strategy.execute_method(network, method_name, *args, **kwargs)

    def get_property(self, network: nn.Module, property_name: str, default=None):
        """
        Get property from network through the strategy.

        Args:
            network (nn.Module): The (potentially wrapped) network.
            property_name (str): Name of the property to retrieve.
            default: Default value if property doesn't exist.

        Returns:
            Any: The property value or default.

        Example:
            >>> num_classes = ctx.get_property(network, 'num_classes', 10)
        """
        return self.strategy.get_property(network, property_name, default)

    def set_property(self, network: nn.Module, property_name: str, value):
        """
        Set property on network through the strategy.

        Args:
            network (nn.Module): The (potentially wrapped) network.
            property_name (str): Name of the property to set.
            value: Value to assign to the property.

        Example:
            >>> ctx.set_property(network, 'num_classes', 100)
        """
        return self.strategy.set_property(network, property_name, value)

    def is_multi_gpu_active(self) -> bool:
        """
        Check if multi-GPU training is currently active.

        Returns:
            bool: True if multiple GPUs are being used, False otherwise.
        """
        return self.strategy.is_multi_gpu_active()

    def get_device_info(self):
        """
        Get device information from the strategy.

        Returns:
            Dict[str, Any]: Comprehensive device information.
        """
        return self.strategy.get_device_info()
