#!/usr/bin/env python3
"""
GPU检测脚本 - 验证PyTorch的GPU功能
"""

import torch
import sys
import time
import numpy as np

def check_gpu_availability():
    """检查GPU可用性"""
    print("=" * 50)
    print("PyTorch GPU功能检测")
    print("=" * 50)
    
    # 检查PyTorch版本
    print(f"PyTorch版本: {torch.__version__}")
    
    # 检查CUDA可用性
    cuda_available = torch.cuda.is_available()
    print(f"CUDA可用: {cuda_available}")
    
    # 检查MPS可用性（苹果Metal加速）
    mps_available = hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
    print(f"MPS可用（苹果Metal加速）: {mps_available}")
    
    if cuda_available:
        # 获取GPU数量
        gpu_count = torch.cuda.device_count()
        print(f"GPU数量: {gpu_count}")
        
        # 显示每个GPU的信息
        for i in range(gpu_count):
            print(f"\nGPU {i}:")
            print(f"  名称: {torch.cuda.get_device_name(i)}")
            print(f"  计算能力: {torch.cuda.get_device_capability(i)}")
            print(f"  总内存: {torch.cuda.get_device_properties(i).total_memory / 1024**3:.2f} GB")
            
        # 设置当前GPU
        current_device = torch.cuda.current_device()
        print(f"\n当前GPU: {current_device}")
        
    elif mps_available:
        print("检测到苹果MPS（Metal Performance Shaders）支持")
        print("将使用Metal进行GPU加速计算")
        
    else:
        print("警告: 未检测到GPU支持，将使用CPU进行计算")
    
    return cuda_available or mps_available

def test_tensor_operations():
    """测试张量操作性能"""
    print("\n" + "=" * 50)
    print("张量操作性能测试")
    print("=" * 50)
    
    # 创建测试数据
    size = (1000, 1000)
    print(f"测试张量大小: {size}")
    
    # CPU操作
    start_time = time.time()
    cpu_tensor = torch.randn(size)
    cpu_result = cpu_tensor @ cpu_tensor.T
    cpu_time = time.time() - start_time
    print(f"CPU矩阵乘法时间: {cpu_time:.4f} 秒")
    
    # 确定设备
    device = None
    if torch.cuda.is_available():
        device = torch.device('cuda')
        device_name = "CUDA GPU"
    elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
        device = torch.device('mps')
        device_name = "Apple MPS"
    else:
        print("GPU不可用，跳过GPU测试")
        return
    
    print(f"使用设备: {device_name}")
    
    # 数据传输到设备
    start_time = time.time()
    device_tensor = cpu_tensor.to(device)
    transfer_time = time.time() - start_time
    print(f"数据传输到{device_name}时间: {transfer_time:.4f} 秒")
    
    # 设备计算
    start_time = time.time()
    device_result = device_tensor @ device_tensor.T
    
    # 同步设备
    if device.type == 'cuda':
        torch.cuda.synchronize()
    elif device.type == 'mps':
        torch.mps.synchronize()
    
    device_time = time.time() - start_time
    print(f"{device_name}矩阵乘法时间: {device_time:.4f} 秒")
    
    # 数据传输回CPU
    start_time = time.time()
    device_result_cpu = device_result.cpu()
    transfer_back_time = time.time() - start_time
    print(f"数据传回CPU时间: {transfer_back_time:.4f} 秒")
    
    # 验证结果一致性
    diff = torch.abs(cpu_result - device_result_cpu).max().item()
    print(f"CPU/{device_name}结果差异: {diff:.6f}")
    
    # 计算加速比
    if device_time > 0:
        speedup = cpu_time / device_time
        print(f"{device_name}加速比: {speedup:.2f}x")
    else:
        print(f"{device_name}时间过短，无法计算准确加速比")

def test_memory_operations():
    """测试内存操作"""
    print("\n" + "=" * 50)
    print("内存操作测试")
    print("=" * 50)
    
    # 确定设备
    device = None
    if torch.cuda.is_available():
        device = torch.device('cuda')
        device_name = "CUDA GPU"
    elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
        device = torch.device('mps')
        device_name = "Apple MPS"
    else:
        print("GPU不可用，跳过内存操作测试")
        return
    
    print(f"使用设备: {device_name}")
    
    if device.type == 'cuda':
        # 检查GPU内存状态
        print("GPU内存状态:")
        print(f"  已分配: {torch.cuda.memory_allocated() / 1024**2:.2f} MB")
        print(f"  已缓存: {torch.cuda.memory_reserved() / 1024**2:.2f} MB")
        
        # 测试内存分配和释放
        print("\n内存分配测试:")
        start_mem = torch.cuda.memory_allocated()
        
        # 分配大张量
        large_tensor = torch.randn(5000, 5000).to(device)
        allocated_mem = torch.cuda.memory_allocated() - start_mem
        print(f"  分配内存: {allocated_mem / 1024**2:.2f} MB")
        
        # 释放张量
        del large_tensor
        torch.cuda.empty_cache()
        final_mem = torch.cuda.memory_allocated()
        print(f"  释放后内存: {final_mem / 1024**2:.2f} MB")
        
        print(f"  内存释放成功: {final_mem <= start_mem}")
    elif device.type == 'mps':
        print("MPS内存状态:")
        print("  注意: MPS内存监控功能有限，主要依赖系统内存管理")
        
        # 测试内存分配和释放
        print("\n内存分配测试:")
        
        # 分配大张量
        start_time = time.time()
        large_tensor = torch.randn(5000, 5000).to(device)
        allocation_time = time.time() - start_time
        print(f"  分配时间: {allocation_time:.4f} 秒")
        
        # 执行一些操作
        start_time = time.time()
        result = large_tensor @ large_tensor.T
        torch.mps.synchronize()
        operation_time = time.time() - start_time
        print(f"  操作时间: {operation_time:.4f} 秒")
        
        # 释放张量
        start_time = time.time()
        del large_tensor, result
        release_time = time.time() - start_time
        print(f"  释放时间: {release_time:.4f} 秒")
        
        print("  MPS内存管理由系统自动处理")

def main():
    """主函数"""
    print("开始PyTorch GPU功能测试...")
    
    # 检查GPU可用性
    gpu_available = check_gpu_availability()
    
    # 测试张量操作
    test_tensor_operations()
    
    # 测试内存操作（如果GPU可用）
    if gpu_available:
        test_memory_operations()
    
    print("\n" + "=" * 50)
    print("测试完成!")
    print("=" * 50)

if __name__ == "__main__":
    main()
