import torch
import time
import numpy as np


def check_gpu_availability():
    """
    检查是否有可用的GPU
    """
    if torch.cuda.is_available():
        device_count = torch.cuda.device_count()
        print(f"检测到 {device_count} 个可用的GPU设备")
        for i in range(device_count):
            device_name = torch.cuda.get_device_name(i)
            print(f"GPU {i}: {device_name}")
        return True
    else:
        print("未检测到可用的GPU设备，将只使用CPU进行计算")
        return False


def compare_performance(size=10000, iterations=5):
    """
    对比CPU和GPU的计算性能
    参数:
        size: 矩阵的大小
        iterations: 重复计算的次数，取平均值以减少误差
    """
    has_gpu = check_gpu_availability()
    
    # 创建随机矩阵
    print(f"\n创建大小为 {size}x{size} 的随机矩阵...")
    matrix_cpu = torch.randn(size, size)
    
    # CPU计算
    print("\n开始CPU计算...")
    cpu_times = []
    for i in range(iterations):
        start_time = time.time()
        # 执行矩阵乘法运算
        result_cpu = torch.matmul(matrix_cpu, matrix_cpu)
        torch.cuda.synchronize() if has_gpu else None  # 确保计算完成
        end_time = time.time()
        elapsed_time = end_time - start_time
        cpu_times.append(elapsed_time)
        print(f"CPU迭代 {i+1}/{iterations}: {elapsed_time:.4f} 秒")
    
    avg_cpu_time = np.mean(cpu_times)
    print(f"\nCPU平均计算时间: {avg_cpu_time:.4f} 秒")
    
    # GPU计算（如果可用）
    if has_gpu:
        # 将矩阵移至GPU
        matrix_gpu = matrix_cpu.cuda()
        
        print("\n开始GPU计算...")
        gpu_times = []
        for i in range(iterations):
            start_time = time.time()
            # 执行矩阵乘法运算
            result_gpu = torch.matmul(matrix_gpu, matrix_gpu)
            torch.cuda.synchronize()  # 确保GPU计算完成
            end_time = time.time()
            elapsed_time = end_time - start_time
            gpu_times.append(elapsed_time)
            print(f"GPU迭代 {i+1}/{iterations}: {elapsed_time:.4f} 秒")
        
        avg_gpu_time = np.mean(gpu_times)
        print(f"\nGPU平均计算时间: {avg_gpu_time:.4f} 秒")
        
        # 计算加速比
        speedup = avg_cpu_time / avg_gpu_time
        print(f"\n性能对比:")
        print(f"CPU平均时间: {avg_cpu_time:.4f} 秒")
        print(f"GPU平均时间: {avg_gpu_time:.4f} 秒")
        print(f"GPU比CPU快 {speedup:.2f} 倍")
    
    # 验证CPU和GPU计算结果是否一致（如果有GPU）
    if has_gpu:
        result_cpu = torch.matmul(matrix_cpu, matrix_cpu)
        result_gpu = torch.matmul(matrix_gpu, matrix_gpu).cpu()
        
        # 检查结果是否接近（考虑浮点精度误差）
        is_equal = torch.allclose(result_cpu, result_gpu, rtol=1e-5, atol=1e-5)
        print(f"\n计算结果验证: {'一致' if is_equal else '不一致'}")


def compare_memory_usage(size=10000):
    """
    对比CPU和GPU的内存使用情况
    """
    has_gpu = torch.cuda.is_available()
    
    # 创建矩阵
    matrix_cpu = torch.randn(size, size)
    
    # 计算CPU内存使用
    cpu_memory = matrix_cpu.element_size() * matrix_cpu.nelement() / (1024 * 1024)  # MB
    print(f"\n内存使用情况:")
    print(f"CPU矩阵内存: {cpu_memory:.2f} MB")
    
    # 计算GPU内存使用（如果可用）
    if has_gpu:
        matrix_gpu = matrix_cpu.cuda()
        gpu_memory = matrix_gpu.element_size() * matrix_gpu.nelement() / (1024 * 1024)  # MB
        print(f"GPU矩阵内存: {gpu_memory:.2f} MB")
        
        # 查看当前GPU内存使用情况
        current_memory = torch.cuda.memory_allocated() / (1024 * 1024)  # MB
        peak_memory = torch.cuda.max_memory_allocated() / (1024 * 1024)  # MB
        print(f"当前GPU内存占用: {current_memory:.2f} MB")
        print(f"峰值GPU内存占用: {peak_memory:.2f} MB")


def run_comparison_tests():
    """
    运行一系列不同规模的性能对比测试
    """
    print("========== CPU vs GPU 性能对比工具 ==========")
    
    # 不同规模的矩阵测试
    test_sizes = [1000, 5000, 10000]
    
    for size in test_sizes:
        print(f"\n\n========== 测试规模: {size}x{size} ==========")
        compare_performance(size=size, iterations=3)
        compare_memory_usage(size=size)
        print("=====================================")


if __name__ == "__main__":
    run_comparison_tests()