#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
功能：判断CUDA是否可用，并打印设备数量和ID
作者：Claude AI Assistant
日期：创建于用户请求时
"""

import sys
import subprocess
import platform
import os


def check_cuda_with_torch():
    """
    使用PyTorch检查CUDA是否可用
    
    返回值:
        dict: 包含是否可用、版本等信息的字典
    """
    result = {
        "available": False,
        "device_count": 0
    }
    
    try:
        import torch
        
        # 获取PyTorch版本
        result["torch_version"] = torch.__version__
        
        # 判断CUDA是否可用
        cuda_available = torch.cuda.is_available()
        result["available"] = cuda_available
        
        # 获取设备数量
        device_count = torch.cuda.device_count()
        result["device_count"] = device_count
        
        # 收集所有可用设备的信息
        if device_count > 0:
            result["devices"] = []
            for i in range(device_count):
                device_info = {}
                try:
                    # 收集设备基本信息
                    device_info["index"] = i
                    device_info["name"] = torch.cuda.get_device_name(i)
                    device_info["capability"] = torch.cuda.get_device_capability(i)
                    
                    # 收集设备属性
                    props = torch.cuda.get_device_properties(i)
                    device_info["total_memory"] = f"{props.total_memory / 1024 / 1024 / 1024:.2f} GB"
                    device_info["multi_processor_count"] = props.multi_processor_count
                    device_info["is_integrated"] = props.is_integrated
                    device_info["is_multi_gpu_board"] = props.is_multi_gpu_board
                    
                    # 收集内存信息
                    total_mem = props.total_memory / 1024 / 1024 / 1024  # 转换为GB
                    reserved_mem = torch.cuda.memory_reserved(i) / 1024 / 1024 / 1024
                    allocated_mem = torch.cuda.memory_allocated(i) / 1024 / 1024 / 1024
                    free_mem = total_mem - reserved_mem
                    
                    device_info["memory"] = {
                        "total": f"{total_mem:.2f} GB",
                        "reserved": f"{reserved_mem:.2f} GB",
                        "allocated": f"{allocated_mem:.2f} GB",
                        "free": f"{free_mem:.2f} GB"
                    }
                    
                    result["devices"].append(device_info)
                except Exception as e:
                    device_info["error"] = str(e)
                    result["devices"].append(device_info)
        
        # 如果CUDA不可用或设备数量为0，记录详细信息
        if not cuda_available or device_count == 0:
            result["cuda_unavailable_reason"] = []
            
            # 检查CUDA_VISIBLE_DEVICES环境变量
            if "CUDA_VISIBLE_DEVICES" in os.environ:
                cuda_visible_devices = os.environ["CUDA_VISIBLE_DEVICES"]
                result["cuda_visible_devices"] = cuda_visible_devices
                if cuda_visible_devices == "" or cuda_visible_devices == "-1":
                    result["cuda_unavailable_reason"].append("环境变量CUDA_VISIBLE_DEVICES被设置为禁用所有GPU")
            
            # 检查PyTorch是否支持CUDA
            result["torch_cuda_built"] = torch.backends.cuda.is_built()
            if not torch.backends.cuda.is_built():
                result["cuda_unavailable_reason"].append("PyTorch没有使用CUDA编译")
                
            # 检查操作系统是否为Windows
            if platform.system() == "Windows":
                result["cuda_unavailable_reason"].append("Windows系统可能需要确保PATH中包含CUDA库路径")
        
        # 如果CUDA可用，获取当前设备和CUDA版本信息
        if cuda_available and device_count > 0:
            result["current_device"] = torch.cuda.current_device()
            result["cuda_version"] = torch.version.cuda
            
            # 检查当前GPU是否可见
            if "CUDA_VISIBLE_DEVICES" in os.environ:
                result["cuda_visible_devices"] = os.environ["CUDA_VISIBLE_DEVICES"]
                
        return result
    except ImportError:
        result["error"] = "PyTorch未安装"
        return result
    except Exception as e:
        result["error"] = str(e)
        return result


def check_cuda_command_line():
    """
    通过命令行工具检查CUDA是否可用
    
    返回值:
        dict: 包含是否可用、版本等信息的字典
    """
    result = {
        "available": False,
        "nvidia_smi_available": False,
        "nvcc_available": False
    }
    
    try:
        # 检查环境变量
        if "CUDA_HOME" in os.environ:
            result["cuda_home"] = os.environ["CUDA_HOME"]
        if "CUDA_PATH" in os.environ:
            result["cuda_path"] = os.environ["CUDA_PATH"]
        if "CUDA_VISIBLE_DEVICES" in os.environ:
            result["cuda_visible_devices"] = os.environ["CUDA_VISIBLE_DEVICES"]
        
        # 检查nvidia-smi命令
        if platform.system() == "Windows":
            # Windows系统使用where命令
            try:
                subprocess.check_output("where nvidia-smi", shell=True)
                has_nvidia_smi = True
                result["nvidia_smi_available"] = True
            except subprocess.CalledProcessError:
                has_nvidia_smi = False
        else:
            # Linux/Mac系统使用which命令
            try:
                subprocess.check_output("which nvidia-smi", shell=True)
                has_nvidia_smi = True
                result["nvidia_smi_available"] = True
            except subprocess.CalledProcessError:
                has_nvidia_smi = False
                
        if has_nvidia_smi:
            try:
                nvidia_smi_output = subprocess.check_output("nvidia-smi", shell=True).decode("utf-8")
                result["available"] = True
                result["nvidia_smi_output"] = nvidia_smi_output
                
                # 尝试提取CUDA版本
                if "CUDA Version:" in nvidia_smi_output:
                    cuda_version_line = [line for line in nvidia_smi_output.split("\n") if "CUDA Version:" in line][0]
                    cuda_version = cuda_version_line.split("CUDA Version:")[1].strip()
                    result["cuda_version"] = cuda_version
                    
                # 尝试提取驱动程序版本
                if "Driver Version:" in nvidia_smi_output:
                    driver_version_line = [line for line in nvidia_smi_output.split("\n") if "Driver Version:" in line][0]
                    driver_version = driver_version_line.split("Driver Version:")[1].strip().split()[0]
                    result["driver_version"] = driver_version
                    
                # 尝试解析设备信息和ID
                try:
                    import re
                    # 匹配包含GPU信息的行
                    gpu_lines = re.findall(r"\|\s+(\d+)\s+[^|]+\|\s+([^|]+)\|", nvidia_smi_output)
                    if gpu_lines:
                        result["gpu_devices"] = []
                        for gpu_id, gpu_name in gpu_lines:
                            result["gpu_devices"].append({
                                "id": int(gpu_id),
                                "name": gpu_name.strip()
                            })
                except Exception as e:
                    result["gpu_parse_error"] = str(e)
            except subprocess.CalledProcessError:
                result["nvidia_smi_error"] = "nvidia-smi命令执行失败"
        else:
            result["nvidia_smi_error"] = "未找到nvidia-smi命令"
            
        # 检查nvcc命令
        try:
            nvcc_output = subprocess.check_output("nvcc --version", shell=True).decode("utf-8")
            result["nvcc_available"] = True
            result["nvcc_output"] = nvcc_output
            
            # 尝试提取CUDA版本
            if "release" in nvcc_output and "V" in nvcc_output:
                for line in nvcc_output.split("\n"):
                    if "release" in line and "V" in line:
                        import re
                        version_match = re.search(r"V(\d+\.\d+\.\d+)", line)
                        if version_match:
                            result["cuda_version_nvcc"] = version_match.group(1)
        except (subprocess.CalledProcessError, FileNotFoundError):
            result["nvcc_error"] = "未找到nvcc命令或执行失败"
            
    except Exception as e:
        result["error"] = str(e)
        
    return result


def main():
    """
    主函数，执行CUDA检查并输出结果
    """
    print("=== CUDA可用性检查 ===")
    
    # 检查PyTorch中的CUDA
    print("\n[检查PyTorch中的CUDA]")
    torch_result = check_cuda_with_torch()
    
    is_available = torch_result.get("available", False)
    device_count = torch_result.get("device_count", 0)
    
    print(f"  torch.cuda.is_available(): {is_available}")
    print(f"  torch.cuda.device_count(): {device_count}")
    
    if "cuda_visible_devices" in torch_result:
        print(f"  CUDA_VISIBLE_DEVICES: {torch_result['cuda_visible_devices']}")
    
    if is_available and device_count > 0:
        print("✅ PyTorch检测到CUDA可用!")
        print(f"  - CUDA版本: {torch_result.get('cuda_version', '未知')}")
        print(f"  - PyTorch版本: {torch_result.get('torch_version', '未知')}")
        print(f"  - 当前设备ID: {torch_result.get('current_device', '未知')}")
        
        # 打印所有设备的详细信息
        print("\n  [GPU设备列表]")
        if "devices" in torch_result:
            for device in torch_result["devices"]:
                device_id = device.get("index", "未知")
                device_name = device.get("name", "未知")
                print(f"  - GPU {device_id}: {device_name}")
                
                if "capability" in device:
                    print(f"    · 计算能力: {device['capability'][0]}.{device['capability'][1]}")
                if "multi_processor_count" in device:
                    print(f"    · 多处理器数量: {device['multi_processor_count']}")
                
                # 打印内存信息
                if "memory" in device:
                    mem = device["memory"]
                    print(f"    · 总内存: {mem['total']}")
                    print(f"    · 已分配: {mem['allocated']}")
                    print(f"    · 已预留: {mem['reserved']}")
                    print(f"    · 可用: {mem['free']}")
                
                # 打印错误信息(如果有)
                if "error" in device:
                    print(f"    · 错误: {device['error']}")
    else:
        print("❌ PyTorch未检测到CUDA!")
        
        if "error" in torch_result:
            print(f"  - 错误: {torch_result['error']}")
            
        if "cuda_unavailable_reason" in torch_result and torch_result["cuda_unavailable_reason"]:
            print("  - 可能的原因:")
            for reason in torch_result["cuda_unavailable_reason"]:
                print(f"    · {reason}")
                
        if "torch_cuda_built" in torch_result:
            if torch_result["torch_cuda_built"]:
                print("  - PyTorch已使用CUDA编译")
            else:
                print("  - PyTorch未使用CUDA编译，考虑重新安装支持CUDA的PyTorch版本")
    
    # 检查命令行CUDA工具
    print("\n[检查命令行CUDA工具]")
    cmd_result = check_cuda_command_line()
    
    if cmd_result["available"]:
        print("✅ 系统中检测到NVIDIA GPU!")
        if "driver_version" in cmd_result:
            print(f"  - NVIDIA驱动版本: {cmd_result['driver_version']}")
        if "cuda_version" in cmd_result:
            print(f"  - CUDA版本(nvidia-smi): {cmd_result['cuda_version']}")
        
        # 显示nvidia-smi检测到的设备信息
        if "gpu_devices" in cmd_result:
            print("\n  [nvidia-smi检测到的GPU设备]")
            for device in cmd_result["gpu_devices"]:
                print(f"  - GPU {device['id']}: {device['name']}")
        
        if "nvcc_available" in cmd_result and cmd_result["nvcc_available"]:
            print("\n✅ 检测到NVCC编译器!")
            if "cuda_version_nvcc" in cmd_result:
                print(f"  - CUDA版本(nvcc): {cmd_result['cuda_version_nvcc']}")
    else:
        print("❌ 系统中未检测到NVIDIA GPU!")
        
        if "nvidia_smi_available" in cmd_result and not cmd_result["nvidia_smi_available"]:
            print("  - 未找到nvidia-smi命令")
            
        if "nvidia_smi_error" in cmd_result:
            print(f"  - nvidia-smi错误: {cmd_result['nvidia_smi_error']}")
            
        if "nvcc_error" in cmd_result:
            print(f"  - NVCC错误: {cmd_result['nvcc_error']}")
            
        # 显示环境变量
        if "cuda_home" in cmd_result:
            print(f"  - CUDA_HOME={cmd_result['cuda_home']}")
        if "cuda_path" in cmd_result:
            print(f"  - CUDA_PATH={cmd_result['cuda_path']}")
        if "cuda_visible_devices" in cmd_result:
            print(f"  - CUDA_VISIBLE_DEVICES={cmd_result['cuda_visible_devices']}")
    
    print("\n[结论]")
    if is_available and device_count > 0:
        print(f"✅ CUDA可用。检测到 {device_count} 个GPU设备，可以运行需要GPU加速的程序。")
    else:
        print("❌ CUDA不可用。只能使用CPU进行计算。")
        print("可能的原因:")
        print("  1. 没有安装NVIDIA GPU或GPU不支持CUDA")
        print("  2. NVIDIA驱动程序未正确安装或版本过旧")
        print("  3. CUDA运行时未正确安装")
        print("  4. PyTorch没有使用CUDA编译")
        print("  5. 环境变量CUDA_VISIBLE_DEVICES被设置为禁用所有GPU")
        print("\n建议解决方案:")
        print("  1. 确认系统中是否有NVIDIA GPU (使用设备管理器或lspci命令)")
        print("  2. 安装/更新NVIDIA驱动程序")
        print("  3. 安装/更新CUDA工具包")
        print("  4. 重新安装支持CUDA的PyTorch版本:")
        print("     - 访问 https://pytorch.org/get-started/locally/ 选择适合您的安装命令")
        print("  5. 确认环境变量CUDA_VISIBLE_DEVICES没有被设置为-1或空字符串")


if __name__ == "__main__":
    main()
