import logging
import torch
import os
import gc
import functools
import time
import threading
from typing import Union, Optional, Tuple, Dict, Any, Callable

from qtorch.core.exceptions import GPUMemoryError, GPUOperationError, PrecisionConversionError

class DeviceManager:
    """
    设备管理器：用于自动检测和管理计算设备
    支持CPU、MPS（Apple Silicon）和CUDA（NVIDIA GPU）
    """
    def __init__(self, force_cpu: bool = False):
        """
        初始化设备管理器
        
        参数:
            force_cpu: 是否强制使用CPU，即使有GPU可用
        """
        self.force_cpu = force_cpu
        self.device_name = "cpu"
        self.device = torch.device("cpu")
        self.available_devices = {
            "cpu": True,
            "mps": False,
            "cuda": False
        }
        
        # 检测可用设备
        self._detect_devices()
        
        # 设置最优设备
        if not force_cpu:
            self._set_optimal_device()
        
        logging.info(f"初始化计算设备: {self.device_name}")
    
    def _detect_devices(self) -> None:
        """检测系统上可用的计算设备"""
        # 检测CUDA
        self.available_devices["cuda"] = torch.cuda.is_available()
        
        # 检测MPS (Apple Silicon GPU)
        try:
            if hasattr(torch, 'backends') and hasattr(torch.backends, 'mps') and hasattr(torch.backends.mps, 'is_available'):
                self.available_devices["mps"] = torch.backends.mps.is_available()
        except:
            self.available_devices["mps"] = False
        
        logging.info(f"可用计算设备: CPU{'✓' if self.available_devices['cpu'] else '✗'}, "
                    f"MPS{'✓' if self.available_devices['mps'] else '✗'}, "
                    f"CUDA{'✓' if self.available_devices['cuda'] else '✗'}")
    
    def _set_optimal_device(self) -> None:
        """设置最优计算设备"""
        # 优先顺序: CUDA > MPS > CPU
        if self.available_devices["cuda"]:
            self.device_name = "cuda"
            self.device = torch.device("cuda")
            # 设置CUDA相关环境
            self._setup_cuda_env()
        elif self.available_devices["mps"]:
            self.device_name = "mps"
            self.device = torch.device("mps")
        else:
            self.device_name = "cpu"
            self.device = torch.device("cpu")
    
    def _setup_cuda_env(self) -> None:
        """设置CUDA环境变量和配置"""
        # 获取CUDA设备数量
        n_gpus = torch.cuda.device_count()
        logging.info(f"检测到 {n_gpus} 个CUDA设备")
        
        # 显示CUDA设备信息
        for i in range(n_gpus):
            device_props = torch.cuda.get_device_properties(i)
            logging.info(f"CUDA:{i} - {device_props.name}, "
                        f"内存: {device_props.total_memory / 1024**3:.1f}GB")
        
        # 设置CUDA环境变量
        if "CUBLAS_WORKSPACE_CONFIG" not in os.environ:
            # 为了确定性计算
            os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
    
    def get_device(self) -> torch.device:
        """获取当前设备"""
        return self.device
    
    def get_device_name(self) -> str:
        """获取当前设备名称"""
        return self.device_name
    
    def to_device(self, tensor_or_module: Union[torch.Tensor, torch.nn.Module]) -> Union[torch.Tensor, torch.nn.Module]:
        """
        将张量或模块移至当前设备
        
        参数:
            tensor_or_module: 要移动的张量或模块
            
        返回:
            移动到设备后的张量或模块
        """
        return tensor_or_module.to(self.device)
    
    def is_gpu_available(self) -> bool:
        """检查是否有GPU可用"""
        return self.available_devices["cuda"] or self.available_devices["mps"]

# 全局设备管理器实例
_DEVICE_MANAGER = None

def get_device_manager(force_cpu: bool = False) -> DeviceManager:
    """
    获取（或创建）全局设备管理器实例
    
    参数:
        force_cpu: 是否强制使用CPU
        
    返回:
        DeviceManager实例
    """
    global _DEVICE_MANAGER
    if _DEVICE_MANAGER is None or _DEVICE_MANAGER.force_cpu != force_cpu:
        _DEVICE_MANAGER = DeviceManager(force_cpu=force_cpu)
    return _DEVICE_MANAGER

def get_device() -> torch.device:
    """获取当前最优设备"""
    return get_device_manager().get_device()

def to_device(tensor_or_module: Union[torch.Tensor, torch.nn.Module]) -> Union[torch.Tensor, torch.nn.Module]:
    """将张量或模块移至当前最优设备"""
    return get_device_manager().to_device(tensor_or_module)

def is_gpu_available() -> bool:
    """检查是否有GPU可用"""
    return get_device_manager().is_gpu_available()

# 数据转换工具
def tensor_to_device(data: Any, device: Optional[torch.device] = None) -> Any:
    """
    将数据结构中的所有张量移至指定设备
    支持嵌套的列表、元组、字典等
    
    参数:
        data: 要转换的数据
        device: 目标设备，如果为None则使用自动检测的最优设备
        
    返回:
        转换后的数据
    """
    if device is None:
        device = get_device()
        
    if isinstance(data, torch.Tensor):
        return data.to(device)
    elif isinstance(data, (list, tuple)):
        dtype = type(data)
        return dtype([tensor_to_device(x, device) for x in data])
    elif isinstance(data, dict):
        return {k: tensor_to_device(v, device) for k, v in data.items()}
    else:
        return data

# 混合精度训练工具
def get_autocast_context(enabled: bool = True, dtype: Optional[torch.dtype] = None):
    """
    获取适合当前设备的自动混合精度上下文
    
    参数:
        enabled: 是否启用混合精度
        dtype: 数据类型，如果为None则自动选择
        
    返回:
        torch.autocast上下文管理器
        
    异常:
        PrecisionConversionError: 当设备不支持请求的精度类型时抛出
    """
    device_name = get_device_manager().get_device_name()
    
    if not enabled:
        # 禁用混合精度，返回空上下文
        from contextlib import nullcontext
        return nullcontext()
    
    if dtype is None:
        if device_name == "cuda":
            dtype = torch.float16
        elif device_name == "mps":
            dtype = torch.float16
        else:
            dtype = torch.bfloat16
    
    try:
        # 检查设备是否支持请求的精度类型
        if device_name == "cuda" and not torch.cuda.is_available():
            raise PrecisionConversionError("CUDA设备不可用，无法使用混合精度",
                                          target_dtype=str(dtype))
        
        if device_name == "mps" and not (hasattr(torch.backends, 'mps') and
                                         hasattr(torch.backends.mps, 'is_available') and
                                         torch.backends.mps.is_available()):
            raise PrecisionConversionError("MPS设备不可用，无法使用混合精度",
                                          target_dtype=str(dtype))
            
        # 检查特定精度类型的支持情况
        if device_name == "cuda" and dtype == torch.bfloat16 and not torch.cuda.is_bf16_supported():
            raise PrecisionConversionError("当前CUDA设备不支持bfloat16精度",
                                          target_dtype="bfloat16")
            
        return torch.autocast(device_type=device_name, dtype=dtype)
    except Exception as e:
        if not isinstance(e, PrecisionConversionError):
            raise PrecisionConversionError(f"混合精度设置失败: {str(e)}",
                                          source_dtype="float32",
                                          target_dtype=str(dtype))
        raise

# 性能benchmark工具
def benchmark_device(tensor_size: Tuple[int, ...] = (1000, 1000),
                    n_iterations: int = 100) -> Dict[str, float]:
    """
    对当前设备进行基准测试
    
    参数:
        tensor_size: 测试张量的大小
        n_iterations: 迭代次数
        
    返回:
        Dict: 包含基准结果
        
    异常:
        GPUOperationError: 当GPU操作失败时抛出
    """
    import time
    
    results = {}
    device_mgr = get_device_manager()
    device = device_mgr.get_device()
    device_name = device_mgr.get_device_name()
    
    try:
        # 矩阵乘法测试
        a = torch.randn(*tensor_size, device=device)
        b = torch.randn(*tensor_size, device=device)
        
        # 预热
        for _ in range(10):
            _ = torch.matmul(a, b)
        
        if device_name != "cpu":
            torch.cuda.synchronize() if device_name == "cuda" else torch.mps.synchronize() if device_name == "mps" else None
        
        # 计时
        start_time = time.time()
        for _ in range(n_iterations):
            _ = torch.matmul(a, b)
            
        if device_name != "cpu":
            torch.cuda.synchronize() if device_name == "cuda" else torch.mps.synchronize() if device_name == "mps" else None
            
        matmul_time = (time.time() - start_time) / n_iterations
        results["matmul_time"] = matmul_time
        
        # 返回结果
        return results
    except Exception as e:
        if device_name in ["cuda", "mps"]:
            # 如果是GPU设备上的操作失败，抛出GPUOperationError
            operation = f"矩阵乘法 (大小: {tensor_size})"
            raise GPUOperationError(
                message=f"GPU基准测试失败: {str(e)}",
                device_name=device_name,
                operation=operation
            ) from e
        # 其他异常直接抛出
        raise


class MemoryMonitor:
        """
        GPU显存监控器：实时监测和记录GPU显存使用情况
        支持CUDA和MPS设备
        """
        def __init__(self, device_name: Optional[str] = None,
                     warning_threshold: float = 0.8,
                     critical_threshold: float = 0.9,
                     check_interval: float = 1.0):
            """
            初始化显存监控器
            
            参数:
                device_name: 设备名称，如果为None则使用当前设备
                warning_threshold: 显存使用警告阈值（占总显存比例）
                critical_threshold: 显存使用临界阈值（占总显存比例）
                check_interval: 自动检查间隔（秒）
            """
            if device_name is None:
                device_name = get_device_manager().get_device_name()
            
            self.device_name = device_name
            self.warning_threshold = warning_threshold
            self.critical_threshold = critical_threshold
            self.check_interval = check_interval
            
            # 显存统计
            self.peak_memory = 0
            self.current_memory = 0
            self.total_memory = 0
            self.start_memory = 0
            
            # 监控状态
            self.is_monitoring = False
            self.monitor_thread = None
            
            # 初始化
            self._init_memory_stats()
            
        def _init_memory_stats(self):
            """初始化显存统计信息"""
            if self.device_name == "cuda":
                if torch.cuda.is_available():
                    # 获取当前设备索引
                    device_idx = torch.cuda.current_device()
                    # 获取总显存
                    self.total_memory = torch.cuda.get_device_properties(device_idx).total_memory
                    # 获取当前显存使用量
                    self.current_memory = torch.cuda.memory_allocated(device_idx)
                    self.start_memory = self.current_memory
                    self.peak_memory = self.current_memory
                else:
                    logging.warning("CUDA设备不可用，无法监控显存")
            elif self.device_name == "mps":
                if hasattr(torch.mps, 'current_allocated_memory'):
                    # MPS设备显存监控（Apple Silicon）
                    self.current_memory = torch.mps.current_allocated_memory()
                    self.start_memory = self.current_memory
                    self.peak_memory = self.current_memory
                    # MPS没有直接获取总显存的API，使用系统总内存的一部分作为估计
                    import psutil
                    self.total_memory = psutil.virtual_memory().total * 0.5  # 假设可用50%系统内存作为GPU内存
                else:
                    logging.warning("MPS设备不支持显存监控")
            else:
                logging.info("CPU设备无需监控显存")
        
        def get_memory_usage(self) -> Dict[str, Any]:
            """获取当前显存使用情况"""
            self._update_memory_stats()
            
            return {
                "current": self.current_memory,
                "peak": self.peak_memory,
                "total": self.total_memory,
                "current_percent": self.current_memory / self.total_memory if self.total_memory > 0 else 0,
                "peak_percent": self.peak_memory / self.total_memory if self.total_memory > 0 else 0,
                "device": self.device_name
            }
        
        def _update_memory_stats(self):
            """更新显存统计信息"""
            if self.device_name == "cuda" and torch.cuda.is_available():
                device_idx = torch.cuda.current_device()
                self.current_memory = torch.cuda.memory_allocated(device_idx)
                self.peak_memory = max(self.peak_memory, self.current_memory)
            elif self.device_name == "mps" and hasattr(torch.mps, 'current_allocated_memory'):
                self.current_memory = torch.mps.current_allocated_memory()
                self.peak_memory = max(self.peak_memory, self.current_memory)
        
        def start_monitoring(self):
            """开始自动监控显存"""
            if self.is_monitoring:
                return
            
            if self.device_name not in ["cuda", "mps"]:
                logging.info(f"设备 {self.device_name} 不支持显存监控")
                return
            
            self.is_monitoring = True
            self.monitor_thread = threading.Thread(target=self._monitor_loop, daemon=True)
            self.monitor_thread.start()
            logging.info(f"已启动 {self.device_name} 显存监控")
        
        def stop_monitoring(self):
            """停止自动监控显存"""
            self.is_monitoring = False
            if self.monitor_thread:
                self.monitor_thread.join(timeout=2.0)
                self.monitor_thread = None
            logging.info("已停止显存监控")
        
        def _monitor_loop(self):
            """显存监控循环"""
            while self.is_monitoring:
                self._update_memory_stats()
                
                # 检查是否超过阈值
                if self.total_memory > 0:
                    usage_percent = self.current_memory / self.total_memory
                    
                    if usage_percent >= self.critical_threshold:
                        self._handle_critical_memory()
                    elif usage_percent >= self.warning_threshold:
                        self._handle_warning_memory()
                
                time.sleep(self.check_interval)
        
        def _handle_warning_memory(self):
            """处理显存警告"""
            logging.warning(f"显存使用率达到警告阈值: {self.current_memory / self.total_memory:.1%}")
        
        def _handle_critical_memory(self):
            """处理显存临界状态"""
            usage_percent = self.current_memory / self.total_memory
            logging.error(f"显存使用率达到临界阈值: {usage_percent:.1%}，执行紧急清理")
            
            # 尝试清理显存
            self.clear_memory()
            
            # 清理后再次检查显存使用率
            self._update_memory_stats()
            usage_percent_after = self.current_memory / self.total_memory
            
            # 如果清理后仍然超过临界阈值，抛出GPUMemoryError异常
            if usage_percent_after >= self.critical_threshold:
                raise GPUMemoryError(
                    message="显存不足，即使在紧急清理后仍超过临界阈值",
                    device_name=self.device_name,
                    usage_percent=usage_percent_after
                )
        
        def clear_memory(self):
            """清理GPU显存"""
            if self.device_name == "cuda":
                # 清理CUDA缓存
                torch.cuda.empty_cache()
                # 强制垃圾回收
                gc.collect()
            elif self.device_name == "mps":
                # MPS设备清理
                if hasattr(torch.mps, 'empty_cache'):
                    torch.mps.empty_cache()
                gc.collect()
            
            # 更新显存统计
            self._update_memory_stats()
            logging.info(f"显存清理完成，当前使用: {self.current_memory / self.total_memory:.1%}")
        
        def __enter__(self):
            """上下文管理器入口"""
            self._init_memory_stats()
            return self
        
        def __exit__(self, exc_type, exc_val, exc_tb):
            """上下文管理器退出"""
            self.stop_monitoring()
            
            # 打印显存使用统计
            usage = self.get_memory_usage()
            logging.info(f"显存使用统计: 峰值 {usage['peak'] / 1024**2:.1f}MB ({usage['peak_percent']:.1%}), "
                        f"净增加 {(usage['current'] - self.start_memory) / 1024**2:.1f}MB")
            
            # 如果发生异常，清理显存
            if exc_type is not None:
                logging.warning(f"检测到异常 {exc_type.__name__}，执行显存清理")
                self.clear_memory()
    
    
def memory_safety(threshold: float = 0.9, auto_clear: bool = True, monitor_interval: float = 0.5):
        """
        显存安全装饰器：监控函数执行期间的显存使用，并在必要时自动清理
        
        参数:
            threshold: 显存使用阈值（占总显存比例），超过此阈值将触发清理
            auto_clear: 是否在函数执行后自动清理显存
            monitor_interval: 监控间隔（秒）
            
        用法:
            @memory_safety(threshold=0.8)
            def my_gpu_function():
                # 使用GPU的代码
        """
        def decorator(func):
            @functools.wraps(func)
            def wrapper(*args, **kwargs):
                device_name = get_device_manager().get_device_name()
                
                # 仅对GPU设备应用显存保护
                if device_name not in ["cuda", "mps"]:
                    return func(*args, **kwargs)
                
                # 创建显存监控器
                monitor = MemoryMonitor(
                    device_name=device_name,
                    critical_threshold=threshold,
                    check_interval=monitor_interval
                )
                
                # 启动监控
                monitor.start_monitoring()
                
                try:
                    # 执行原函数
                    result = func(*args, **kwargs)
                    return result
                except (GPUMemoryError, GPUOperationError, PrecisionConversionError) as e:
                    # 特别处理GPU相关异常
                    logging.error(f"函数 {func.__name__} 执行出现GPU异常: {e.__class__.__name__}: {str(e)}")
                    # 发生异常时清理显存
                    monitor.clear_memory()
                    raise
                except Exception as e:
                    logging.error(f"函数 {func.__name__} 执行出错: {str(e)}")
                    # 发生异常时清理显存
                    monitor.clear_memory()
                    raise
                finally:
                    # 停止监控
                    monitor.stop_monitoring()
                    
                    # 如果需要，自动清理显存
                    if auto_clear:
                        monitor.clear_memory()
                    
                    # 打印显存使用统计
                    usage = monitor.get_memory_usage()
                    logging.info(f"函数 {func.__name__} 显存使用: "
                                f"峰值 {usage['peak'] / 1024**2:.1f}MB ({usage['peak_percent']:.1%})")
            
            return wrapper
        
        return decorator