import torch
import sys

def check_pytorch_version():
    """检查PyTorch版本信息"""
    print("=== PyTorch版本信息 ===")
    print(f"PyTorch版本: {torch.__version__}")
    print(f"Python版本: {sys.version}")
    print()

def check_cuda_availability():
    """检查CUDA可用性"""
    print("=== CUDA可用性检查 ===")
    print(f"CUDA是否可用: {torch.cuda.is_available()}")
    if torch.cuda.is_available():
        print(f"CUDA版本: {torch.version.cuda}")
        print(f"GPU数量: {torch.cuda.device_count()}")
        print(f"当前GPU: {torch.cuda.current_device()}")
        print(f"GPU名称: {torch.cuda.get_device_name()}")
    else:
        print("CUDA不可用，将使用CPU进行计算")
    print()

def check_cudnn_info():
    """检查cuDNN信息"""
    print("=== cuDNN信息 ===")
    print(f"cuDNN是否可用: {torch.backends.cudnn.enabled}")
    if torch.backends.cudnn.enabled:
        print(f"cuDNN版本: {torch.backends.cudnn.version()}")
    print()

def demonstrate_device_placement():
    """演示设备放置"""
    print("=== 设备放置演示 ===")
    
    # 创建张量
    x_cpu = torch.tensor([1., 2., 3.])
    print(f"CPU张量: {x_cpu} (设备: {x_cpu.device})")
    
    if torch.cuda.is_available():
        # 将张量移到GPU
        x_gpu = x_cpu.to('cuda')
        print(f"GPU张量: {x_gpu} (设备: {x_gpu.device})")
        
        # 在GPU上执行计算
        y_gpu = x_gpu * 2
        print(f"GPU计算结果: {y_gpu} (设备: {y_gpu.device})")
        
        # 将结果移回CPU
        y_cpu = y_gpu.to('cpu')
        print(f"移回CPU的结果: {y_cpu} (设备: {y_cpu.device})")
    else:
        print("由于CUDA不可用，只能在CPU上进行计算")
    print()

def explain_cuda_packages():
    """解释CUDA相关包的作用"""
    print("=== CUDA相关包的作用解释 ===")
    
    cuda_info = {
        "PyTorch CUDA版本": {
            "作用": "包含针对NVIDIA GPU优化的PyTorch实现",
            "功能": [
                "GPU上的张量操作",
                "神经网络层的GPU实现",
                "自动求导机制的GPU支持"
            ]
        },
        "cudatoolkit": {
            "作用": "NVIDIA CUDA工具包，提供GPU计算基础库",
            "功能": [
                "CUDA运行时和驱动API",
                "cuBLAS - GPU线性代数库",
                "cuFFT - GPU快速傅里叶变换库",
                "cuRAND - GPU随机数生成库",
                "cuSOLVER - GPU数值计算库"
            ]
        },
        "cuDNN": {
            "作用": "NVIDIA深度神经网络库，专门优化深度学习操作",
            "功能": [
                "优化的卷积操作",
                "池化层实现",
                "激活函数GPU实现",
                "归一化层优化"
            ]
        },
        "NCCL": {
            "作用": "NVIDIA集合通信库，用于多GPU和分布式训练",
            "功能": [
                "多GPU间的高效通信",
                "分布式训练支持",
                "集合操作优化（all-reduce, broadcast等）"
            ]
        }
    }
    
    for package, info in cuda_info.items():
        print(f"\n{package}:")
        print(f"  作用: {info['作用']}")
        print("  功能:")
        for func in info['功能']:
            print(f"    - {func}")

def explain_cuda_installation():
    """解释CUDA安装选项"""
    print("\n=== CUDA安装选项 ===")
    
    installation_options = {
        "官方PyTorch安装命令": {
            "说明": "从pytorch.org获取的官方安装命令",
            "CPU版本": "pip install torch torchvision torchaudio",
            "CUDA 11.8": "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118",
            "CUDA 11.7": "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117",
            "ROCm 5.4 (AMD GPU)": "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.4"
        },
        "Conda安装": {
            "说明": "使用conda包管理器安装",
            "CPU版本": "conda install pytorch torchvision torchaudio cpuonly -c pytorch",
            "CUDA版本": "conda install pytorch torchvision torchaudio pytorch-cuda=11.7 -c pytorch -c nvidia"
        }
    }
    
    for option, commands in installation_options.items():
        print(f"\n{option}:")
        print(f"  说明: {commands['说明']}")
        for key, command in commands.items():
            if key != '说明':
                print(f"  {key}: {command}")

def explain_cuda_compatibility():
    """解释CUDA兼容性"""
    print("\n=== CUDA兼容性说明 ===")
    
    compatibility_info = {
        "版本匹配": [
            "PyTorch版本需要与CUDA版本兼容",
            "不同PyTorch版本支持的CUDA版本不同",
            "通常PyTorch会标明支持的CUDA版本"
        ],
        "向后兼容": [
            "新版本CUDA驱动通常支持旧版本CUDA工具包",
            "但旧版本驱动不支持新版本工具包",
            "建议使用PyTorch官方推荐的CUDA版本"
        ],
        "常见问题": [
            "CUDA版本不匹配会导致运行时错误",
            "缺少cuDNN库会导致深度学习操作无法加速",
            "多版本CUDA共存可能引起冲突"
        ]
    }
    
    for topic, points in compatibility_info.items():
        print(f"\n{topic}:")
        for point in points:
            print(f"  - {point}")

def demonstrate_performance_difference():
    """演示GPU和CPU性能差异"""
    print("\n=== GPU与CPU性能差异演示 ===")
    
    if torch.cuda.is_available():
        # 创建大尺寸张量
        size = 1000
        x_cpu = torch.randn(size, size)
        y_cpu = torch.randn(size, size)
        
        # CPU计算
        import time
        start_time = time.time()
        z_cpu = torch.mm(x_cpu, y_cpu)
        cpu_time = time.time() - start_time
        print(f"CPU计算时间: {cpu_time:.4f}秒")
        
        # GPU计算
        x_gpu = x_cpu.to('cuda')
        y_gpu = y_cpu.to('cuda')
        
        # 预热GPU
        _ = torch.mm(x_gpu, y_gpu)
        torch.cuda.synchronize()
        
        start_time = time.time()
        z_gpu = torch.mm(x_gpu, y_gpu)
        torch.cuda.synchronize()  # 等待GPU计算完成
        gpu_time = time.time() - start_time
        print(f"GPU计算时间: {gpu_time:.4f}秒")
        
        print(f"加速比: {cpu_time/gpu_time:.2f}x")
    else:
        print("CUDA不可用，无法演示GPU加速效果")

def explain_memory_management():
    """解释GPU内存管理"""
    print("\n=== GPU内存管理 ===")
    
    memory_info = {
        "内存分配": [
            "PyTorch自动管理GPU内存分配",
            "张量创建时分配GPU内存",
            "张量销毁时释放GPU内存"
        ],
        "内存优化": [
            "torch.cuda.empty_cache() - 释放未使用的缓存",
            "torch.cuda.memory_summary() - 显示内存使用摘要",
            "合理复用张量避免频繁内存分配"
        ],
        "内存监控": [
            "torch.cuda.memory_allocated() - 已分配内存",
            "torch.cuda.max_memory_allocated() - 最大分配内存",
            "torch.cuda.memory_reserved() - 已保留内存"
        ]
    }
    
    for topic, points in memory_info.items():
        print(f"\n{topic}:")
        for point in points:
            print(f"  - {point}")
    
    if torch.cuda.is_available():
        print(f"\n当前GPU内存使用情况:")
        print(f"  已分配内存: {torch.cuda.memory_allocated() / 1024**2:.2f} MB")
        print(f"  已保留内存: {torch.cuda.memory_reserved() / 1024**2:.2f} MB")

def main():
    """主函数"""
    print("PyTorch CUDA支持详解")
    print("=" * 50)
    
    # 检查PyTorch版本
    check_pytorch_version()
    
    # 检查CUDA可用性
    check_cuda_availability()
    
    # 检查cuDNN信息
    check_cudnn_info()
    
    # 演示设备放置
    demonstrate_device_placement()
    
    # 解释CUDA相关包的作用
    explain_cuda_packages()
    
    # 解释CUDA安装选项
    explain_cuda_installation()
    
    # 解释CUDA兼容性
    explain_cuda_compatibility()
    
    # 演示性能差异
    demonstrate_performance_difference()
    
    # 解释内存管理
    explain_memory_management()
    
    print("\n" + "=" * 50)
    print("程序执行完成!")

if __name__ == '__main__':
    main()