import torch
import torch.nn as nn
import time
import numpy as np

# 简单的神经网络模型用于性能测试
class SimpleNet(nn.Module):
    def __init__(self):
        super(SimpleNet, self).__init__()
        self.flatten = nn.Flatten()
        self.fc1 = nn.Linear(28*28, 512)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, 10)
        
    def forward(self, x):
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        x = self.relu(x)
        x = self.fc3(x)
        return x

def test_device_performance(device, input_data, model, iterations=100):
    """
    测试指定设备上的性能
    """
    print(f"测试设备: {device}")
    
    # 将模型和数据移到指定设备
    model = model.to(device)
    input_data = input_data.to(device)
    
    # 预热运行
    for _ in range(10):
        _ = model(input_data)
    
    # 同步设备（如果是MPS或CUDA）
    if device.type != 'cpu':
        torch.mps.synchronize() if device.type == 'mps' else torch.cuda.synchronize()
    
    # 开始计时
    start_time = time.time()
    
    # 执行多次前向传播
    for _ in range(iterations):
        output = model(input_data)
        # 同步设备以确保计算完成
        if device.type != 'cpu':
            torch.mps.synchronize() if device.type == 'mps' else torch.cuda.synchronize()
    
    # 结束计时
    end_time = time.time()
    
    # 计算平均时间
    avg_time = (end_time - start_time) / iterations
    
    return avg_time

def main():
    print("PyTorch MPS vs CPU 性能测试")
    print("=" * 40)
    
    # 检查设备可用性
    devices = []
    if torch.backends.mps.is_available():
        devices.append(torch.device("mps"))
        print("MPS设备可用")
    else:
        print("MPS设备不可用")
    
    devices.append(torch.device("cpu"))
    print("CPU设备可用")
    
    # 创建测试数据
    batch_size = 64
    input_data = torch.randn(batch_size, 1, 28, 28)
    print(f"\n测试数据: 批次大小={batch_size}, 输入形状={input_data.shape}")
    
    # 创建模型
    model = SimpleNet()
    print("模型: 简单全连接网络 (784 -> 512 -> 256 -> 10)")
    
    # 测试各个设备的性能
    results = {}
    for device in devices:
        try:
            avg_time = test_device_performance(device, input_data.clone(), 
                                             model.__class__(), 100)
            results[device.type] = avg_time
            print(f"{device.type.upper()} 平均前向传播时间: {avg_time*1000:.4f} ms")
        except Exception as e:
            print(f"测试 {device.type} 时出错: {e}")
    
    # 比较结果
    print("\n性能比较结果:")
    print("-" * 30)
    if 'mps' in results and 'cpu' in results:
        if results['mps'] < results['cpu']:
            speedup = results['cpu'] / results['mps']
            print(f"MPS 比 CPU 快 {speedup:.2f}x")
        else:
            slowdown = results['mps'] / results['cpu']
            print(f"MPS 比 CPU 慢 {slowdown:.2f}x")
    elif 'mps' in results:
        print("仅测试了 MPS 性能")
    elif 'cpu' in results:
        print("仅测试了 CPU 性能")
    
    # 额外的大规模测试
    print("\n大规模数据测试:")
    print("-" * 30)
    large_batch_size = 512
    large_input_data = torch.randn(large_batch_size, 1, 28, 28)
    print(f"大规模测试数据: 批次大小={large_batch_size}")
    
    for device in devices:
        try:
            avg_time = test_device_performance(device, large_input_data.clone(), 
                                             model.__class__(), 50)
            print(f"{device.type.upper()} 平均前向传播时间: {avg_time*1000:.4f} ms")
        except Exception as e:
            print(f"测试 {device.type} 时出错: {e}")

if __name__ == "__main__":
    main()