"""
Multi-GPU Management Interface for Continual Learning

This interface defines methods for managing multi-GPU training in continual learning,
providing abstraction for distributed training strategies.
"""

from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Union, Callable
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel, DistributedDataParallel


class MultiGPUStrategy(ABC):
    """Interface for different multi-GPU training strategies"""

    @abstractmethod
    def setup(self, model: nn.Module, device_ids: List[int]) -> nn.Module:
        """
        Setup model for multi-GPU training

        Args:
            model: Model to setup
            device_ids: List of GPU device IDs

        Returns:
            Model configured for multi-GPU training
        """
        pass

    @abstractmethod
    def cleanup(self, model: nn.Module) -> nn.Module:
        """
        Cleanup multi-GPU setup and return base model

        Args:
            model: Model with multi-GPU setup

        Returns:
            Base model without multi-GPU wrapper
        """
        pass

    @abstractmethod
    def is_active(self) -> bool:
        """Check if multi-GPU strategy is currently active"""
        pass

    @abstractmethod
    def get_strategy_name(self) -> str:
        """Get name of the strategy"""
        pass


class MultiGPUManager(ABC):
    """Interface for managing multi-GPU operations"""

    @abstractmethod
    def setup_multi_gpu_training(self) -> None:
        """Setup multi-GPU training environment"""
        pass

    @abstractmethod
    def teardown_multi_gpu_training(self) -> None:
        """Teardown multi-GPU training environment"""
        pass

    @abstractmethod
    def is_multi_gpu_active(self) -> bool:
        """Check if multi-GPU training is currently active"""
        pass

    @abstractmethod
    def call_network_method(self, method_name: str, *args, **kwargs) -> Any:
        """
        Call a method on the network, handling multi-GPU wrapper

        Args:
            method_name: Name of the method to call
            *args: Positional arguments for the method
            **kwargs: Keyword arguments for the method

        Returns:
            Result of the method call
        """
        pass

    @abstractmethod
    def get_network_property(self, property_name: str, default: Any = None) -> Any:
        """
        Get a property from the network, handling multi-GPU wrapper

        Args:
            property_name: Name of the property to get
            default: Default value if property not found

        Returns:
            Property value
        """
        pass

    @abstractmethod
    def set_network_property(self, property_name: str, value: Any) -> None:
        """
        Set a property on the network, handling multi-GPU wrapper

        Args:
            property_name: Name of the property to set
            value: Value to set
        """
        pass


class DistributedTrainingManager(ABC):
    """Interface for distributed training management"""

    @abstractmethod
    def init_distributed_training(
        self,
        backend: str = "nccl",
        init_method: str = "env://"
    ) -> None:
        """
        Initialize distributed training

        Args:
            backend: Backend for distributed training
            init_method: Method for process group initialization
        """
        pass

    @abstractmethod
    def cleanup_distributed_training(self) -> None:
        """Cleanup distributed training"""
        pass

    @abstractmethod
    def get_world_size(self) -> int:
        """Get total number of processes in distributed training"""
        pass

    @abstractmethod
    def get_rank(self) -> int:
        """Get rank of current process"""
        pass

    @abstractmethod
    def is_main_process(self) -> bool:
        """Check if current process is the main process"""
        pass

    @abstractmethod
    def barrier(self) -> None:
        """Synchronize all processes"""
        pass


class DeviceManager(ABC):
    """Interface for managing device allocation and operations"""

    @abstractmethod
    def get_available_devices(self) -> List[str]:
        """Get list of available devices"""
        pass

    @abstractmethod
    def get_device_count(self) -> int:
        """Get number of available devices"""
        pass

    @abstractmethod
    def get_primary_device(self) -> str:
        """Get primary device for training"""
        pass

    @abstractmethod
    def move_to_device(self, tensor: torch.Tensor, device: Optional[str] = None) -> torch.Tensor:
        """Move tensor to specified device"""
        pass

    @abstractmethod
    def get_device_memory_info(self, device: str) -> Dict[str, int]:
        """Get memory information for device"""
        pass

    @abstractmethod
    def clear_device_cache(self, device: Optional[str] = None) -> None:
        """Clear device cache"""
        pass


class DataParallelStrategy(MultiGPUStrategy):
    """Strategy for DataParallel multi-GPU training"""
    pass


class DistributedDataParallelStrategy(MultiGPUStrategy):
    """Strategy for DistributedDataParallel multi-GPU training"""
    pass


class MultiGPUContext:
    """Context manager for multi-GPU operations"""

    def __init__(self, strategy: MultiGPUStrategy):
        self.strategy = strategy
        self._original_model = None

    def __enter__(self) -> MultiGPUStrategy:
        return self.strategy

    def __exit__(self, exc_type, exc_val, exc_tb):
        if self._original_model is not None:
            self.strategy.cleanup(self._original_model)


class GPUMemoryManager(ABC):
    """Interface for managing GPU memory"""

    @abstractmethod
    def monitor_memory_usage(self) -> Dict[str, float]:
        """Monitor current memory usage across GPUs"""
        pass

    @abstractmethod
    def optimize_memory_allocation(self) -> None:
        """Optimize memory allocation across GPUs"""
        pass

    @abstractmethod
    def get_memory_summary(self) -> str:
        """Get formatted memory usage summary"""
        pass

    @abstractmethod
    def clear_memory_cache(self) -> None:
        """Clear memory cache on all GPUs"""
        pass


class LoadBalancer(ABC):
    """Interface for load balancing across GPUs"""

    @abstractmethod
    def balance_load(self, batch_size: int, num_gpus: int) -> List[int]:
        """
        Balance load across GPUs

        Args:
            batch_size: Total batch size
            num_gpus: Number of GPUs

        Returns:
            List of batch sizes per GPU
        """
        pass

    @abstractmethod
    def get_optimal_batch_distribution(
        self,
        total_samples: int,
        gpu_capabilities: List[Dict[str, Any]]
    ) -> Dict[int, int]:
        """
        Get optimal batch distribution based on GPU capabilities

        Args:
            total_samples: Total number of samples
            gpu_capabilities: List of GPU capability dictionaries

        Returns:
            Dictionary mapping GPU ID to number of samples
        """
        pass
