import numpy as np
import time
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import pycuda.driver as cuda

def proper_gpu_performance_test():
    """正确的GPU性能测试"""
    print("=== 正确的GPU性能测试 ===\n")
    
    # 预热GPU（第一次运行通常较慢）
    print("预热GPU...")
    a_small = np.random.randn(100, 100).astype(np.float32)
    a_gpu_small = gpuarray.to_gpu(a_small)
    b_gpu_small = gpuarray.to_gpu(a_small)
    for _ in range(3):
        _ = gpuarray.dot(a_gpu_small, b_gpu_small)
    
    sizes = [100, 500, 1000, 2000]
    iterations = 10  # 多次迭代取平均
    
    for size in sizes:
        print(f"\n测试数据规模: {size}x{size}")
        
        # 生成测试数据
        a_cpu = np.random.randn(size, size).astype(np.float32)
        b_cpu = np.random.randn(size, size).astype(np.float32)
        
        # CPU测试
        cpu_times = []
        for i in range(iterations):
            start = time.time()
            result_cpu = np.dot(a_cpu, b_cpu)
            cpu_times.append(time.time() - start)
        
        cpu_time_avg = np.mean(cpu_times[1:])  # 去掉第一次
        
        # GPU测试 - 包含数据传输时间
        gpu_times_with_transfer = []
        for i in range(iterations):
            start = time.time()
            a_gpu = gpuarray.to_gpu(a_cpu)
            b_gpu = gpuarray.to_gpu(b_cpu)
            result_gpu = gpuarray.dot(a_gpu, b_gpu)
            result_cpu = result_gpu.get()  # 传回结果
            gpu_times_with_transfer.append(time.time() - start)
        
        gpu_time_with_transfer_avg = np.mean(gpu_times_with_transfer[1:])
        
        # GPU测试 - 仅计算时间（数据已在GPU）
        a_gpu = gpuarray.to_gpu(a_cpu)
        b_gpu = gpuarray.to_gpu(b_cpu)
        
        gpu_times_compute_only = []
        for i in range(iterations):
            start = time.time()
            result_gpu = gpuarray.dot(a_gpu, b_gpu)
            cuda.Context.synchronize()  # 确保计算完成
            gpu_times_compute_only.append(time.time() - start)
        
        gpu_time_compute_avg = np.mean(gpu_times_compute_only)
        
        print(f"CPU平均时间: {cpu_time_avg:.6f}s")
        print(f"GPU总时间(含传输): {gpu_time_with_transfer_avg:.6f}s")
        print(f"GPU纯计算时间: {gpu_time_compute_avg:.6f}s")
        print(f"加速比(纯计算): {cpu_time_avg/gpu_time_compute_avg:.2f}x")
        
        if cpu_time_avg < gpu_time_with_transfer_avg:
            print("⚠️  GPU总时间比CPU慢，数据传输开销太大")

def neural_network_performance_test():
    """神经网络特定性能测试"""
    print("\n=== 神经网络性能测试 ===\n")
    
    # 模拟神经网络计算
    layer_sizes = [784, 128, 64, 10]  # MNIST类似的网络
    batch_sizes = [1, 32, 128]
    
    for batch_size in batch_sizes:
        print(f"\n批量大小: {batch_size}")
        
        # 生成模拟数据
        inputs = np.random.randn(batch_size, layer_sizes[0]).astype(np.float32)
        targets = np.random.randn(batch_size, layer_sizes[-1]).astype(np.float32)
        
        # 生成权重
        weights = []
        for i in range(len(layer_sizes)-1):
            w = np.random.randn(layer_sizes[i+1], layer_sizes[i]).astype(np.float32)
            weights.append(w)
        
        # CPU前向传播测试
        start = time.time()
        # 模拟前向传播
        activation = inputs.T
        for w in weights:
            activation = np.dot(w, activation)
            activation = 1 / (1 + np.exp(-activation))  # sigmoid
        cpu_time = time.time() - start
        
        # GPU前向传播测试
        start = time.time()
        # 数据传输到GPU
        inputs_gpu = gpuarray.to_gpu(inputs.T.astype(np.float32))
        weights_gpu = [gpuarray.to_gpu(w) for w in weights]
        
        # GPU计算
        activation_gpu = inputs_gpu
        for w_gpu in weights_gpu:
            activation_gpu = gpuarray.dot(w_gpu, activation_gpu)
            # GPU sigmoid
            activation_gpu = 1 / (1 + gpuarray.exp(-activation_gpu))
        
        # 传回结果
        result = activation_gpu.get()
        gpu_time = time.time() - start
        
        print(f"CPU时间: {cpu_time:.6f}s")
        print(f"GPU时间: {gpu_time:.6f}s")
        print(f"GPU/CPU时间比: {gpu_time/cpu_time:.2f}x")
        
        if gpu_time > cpu_time:
            print("⚠️  GPU比CPU慢！建议：")
            if batch_size == 1:
                print("  - 使用更大的批量大小")
            else:
                print("  - 检查数据传输频率")

def check_gpu_setup():
    """检查GPU设置"""
    print("=== GPU设置检查 ===\n")
    
    try:
        # 检查可用GPU内存
        free, total = cuda.mem_get_info()
        print(f"GPU内存: 可用 {free/1024**3:.1f}GB / 总共 {total/1024**3:.1f}GB")
        
        # 检查当前设备
        device = cuda.Device(0)
        print(f"GPU设备: {device.name()}")
        print(f"计算能力: {device.compute_capability()}")
        
        # 简单的GPU计算测试
        print("\n执行简单GPU计算测试...")
        a = gpuarray.to_gpu(np.array([1, 2, 3], dtype=np.float32))
        b = gpuarray.to_gpu(np.array([4, 5, 6], dtype=np.float32))
        result = gpuarray.dot(a, b)
        print(f"GPU计算测试: {a.get()} · {b.get()} = {result.get()}")
        print("✅ GPU基本功能正常")
        
    except Exception as e:
        print(f"❌ GPU设置有问题: {e}")

if __name__ == "__main__":
    check_gpu_setup()
    proper_gpu_performance_test()
    neural_network_performance_test()