import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import numpy as np
import os

# 设置线程数以提高MPS稳定性
torch.set_num_threads(1)
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'

# 简单CNN模型用于性能测试
class SimpleCNN(nn.Module):
    def __init__(self):
        super(SimpleCNN, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, 3, 1)
        self.conv2 = nn.Conv2d(32, 64, 3, 1)
        self.dropout1 = nn.Dropout(0.25)
        self.dropout2 = nn.Dropout(0.5)
        self.fc1 = nn.Linear(9216, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x)
        x = self.conv2(x)
        x = F.relu(x)
        x = F.max_pool2d(x, 2)
        x = self.dropout1(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        return x

# 复杂CNN模型用于性能测试
class ComplexCNN(nn.Module):
    def __init__(self):
        super(ComplexCNN, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, 3, 1)
        self.conv2 = nn.Conv2d(32, 64, 3, 1)
        self.conv3 = nn.Conv2d(64, 128, 3, 1)
        self.conv4 = nn.Conv2d(128, 256, 3, 1)
        self.dropout1 = nn.Dropout2d(0.25)
        self.dropout2 = nn.Dropout2d(0.5)
        self.fc1 = nn.Linear(4096, 512)  # 根据实际输出尺寸调整
        self.fc2 = nn.Linear(512, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x)
        x = self.conv2(x)
        x = F.relu(x)
        x = F.max_pool2d(x, 2)
        x = self.dropout1(x)
        
        x = self.conv3(x)
        x = F.relu(x)
        x = self.conv4(x)
        x = F.relu(x)
        x = F.max_pool2d(x, 2)
        x = self.dropout1(x)
        
        x = torch.flatten(x, 1)
        # print(f"Flattened size: {x.shape}")  # 调试用
        x = self.fc1(x)
        x = F.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        return x

def test_device_performance(device, input_data, model, iterations=30, warmup=5):
    """
    测试指定设备上的性能
    """
    print(f"测试设备: {device}")
    
    # 将模型和数据移到指定设备
    model = model.to(device)
    input_data = input_data.to(device)
    
    # 确保模型在评估模式下
    model.eval()
    
    # 预热运行
    with torch.no_grad():
        for _ in range(warmup):
            _ = model(input_data)
        
        # 同步设备（如果是MPS或CUDA）
        if device.type != 'cpu':
            if hasattr(torch, 'mps') and device.type == 'mps':
                torch.mps.synchronize()
            elif hasattr(torch, 'cuda') and device.type == 'cuda':
                torch.cuda.synchronize()
        
        # 开始计时
        start_time = time.time()
        
        # 执行多次前向传播
        for _ in range(iterations):
            output = model(input_data)
            # 同步设备以确保计算完成
            if device.type != 'cpu':
                if hasattr(torch, 'mps') and device.type == 'mps':
                    torch.mps.synchronize()
                elif hasattr(torch, 'cuda') and device.type == 'cuda':
                    torch.cuda.synchronize()
        
        # 结束计时
        end_time = time.time()
    
    # 计算平均时间
    avg_time = (end_time - start_time) / iterations
    
    return avg_time

def test_training_performance(device, input_data, labels, model, iterations=20, warmup=3):
    """
    测试训练过程的性能
    """
    print(f"训练性能测试设备: {device}")
    
    # 将模型和数据移到指定设备
    model = model.to(device)
    input_data = input_data.to(device)
    labels = labels.to(device)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
    
    # 确保模型在训练模式下
    model.train()
    
    # 预热运行
    for _ in range(warmup):
        optimizer.zero_grad()
        outputs = model(input_data)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        # 同步设备（如果是MPS或CUDA）
        if device.type != 'cpu':
            if hasattr(torch, 'mps') and device.type == 'mps':
                torch.mps.synchronize()
            elif hasattr(torch, 'cuda') and device.type == 'cuda':
                torch.cuda.synchronize()
    
    # 开始计时
    start_time = time.time()
    
    # 执行多次训练迭代
    for _ in range(iterations):
        optimizer.zero_grad(set_to_none=True)
        outputs = model(input_data)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        # 同步设备以确保计算完成
        if device.type != 'cpu':
            if hasattr(torch, 'mps') and device.type == 'mps':
                torch.mps.synchronize()
            elif hasattr(torch, 'cuda') and device.type == 'cuda':
                torch.cuda.synchronize()
    
    # 结束计时
    end_time = time.time()
    
    # 计算平均时间
    avg_time = (end_time - start_time) / iterations
    
    return avg_time

def main():
    print("PyTorch MPS vs CPU 综合性能测试")
    print("=" * 50)
    
    # 检查设备可用性
    devices = []
    if torch.backends.mps.is_available():
        devices.append(torch.device("mps"))
        print("MPS设备可用")
    else:
        print("MPS设备不可用")
    
    devices.append(torch.device("cpu"))
    print("CPU设备可用")
    
    # 测试不同规模的数据
    test_configs = [
        {"name": "小规模数据", "batch_size": 8, "iterations": 50},
        {"name": "中等规模数据", "batch_size": 32, "iterations": 30},
        {"name": "大规模数据", "batch_size": 64, "iterations": 20}
    ]
    
    # 测试不同复杂度的模型
    models = [
        {"name": "简单CNN", "model": SimpleCNN()},
        {"name": "复杂CNN", "model": ComplexCNN()}
    ]
    
    print(f"\n测试环境信息:")
    print(f"PyTorch版本: {torch.__version__}")
    print(f"Python版本: {torch.version}")
    
    # 进行所有测试
    results = {}
    
    for model_info in models:
        model_name = model_info["name"]
        model = model_info["model"]
        print(f"\n{'='*20} {model_name} {'='*20}")
        
        total_params = sum(p.numel() for p in model.parameters())
        print(f"模型参数数量: {total_params:,}")
        
        results[model_name] = {}
        
        for config in test_configs:
            config_name = config["name"]
            batch_size = config["batch_size"]
            iterations = config["iterations"]
            
            print(f"\n--- {config_name} (批次大小: {batch_size}) ---")
            
            # 创建测试数据
            input_data = torch.randn(batch_size, 1, 28, 28)
            labels = torch.randint(0, 10, (batch_size,))
            
            results[model_name][config_name] = {}
            
            # 测试前向传播性能
            print("前向传播性能测试:")
            for device in devices:
                try:
                    avg_time = test_device_performance(
                        device, input_data.clone(), model.__class__(), 
                        iterations, warmup=5)
                    results[model_name][config_name][device.type] = {
                        "forward": avg_time
                    }
                    print(f"  {device.type.upper()}: {avg_time*1000:.4f} ms")
                except Exception as e:
                    print(f"  {device.type.upper()} 测试出错: {e}")
            
            # 测试训练性能
            print("训练性能测试:")
            for device in devices:
                try:
                    avg_time = test_training_performance(
                        device, input_data.clone(), labels.clone(), model.__class__(), 
                        iterations//2, warmup=3)
                    if device.type in results[model_name][config_name]:
                        results[model_name][config_name][device.type]["train"] = avg_time
                    else:
                        results[model_name][config_name][device.type] = {"train": avg_time}
                    print(f"  {device.type.upper()}: {avg_time*1000:.4f} ms")
                except Exception as e:
                    print(f"  {device.type.upper()} 训练测试出错: {e}")
    
    # 输出性能比较结果
    print(f"\n{'='*20} 性能比较结果 {'='*20}")
    
    for model_name in results:
        print(f"\n{model_name}:")
        print("-" * 40)
        
        for config_name in results[model_name]:
            print(f"\n  {config_name}:")
            
            config_results = results[model_name][config_name]
            
            # 前向传播比较
            if all("forward" in config_results.get(device, {}) for device in ["cpu", "mps"] if device in [d.type for d in devices]):
                cpu_time = config_results["cpu"]["forward"]
                mps_time = config_results["mps"]["forward"]
                
                if mps_time < cpu_time:
                    speedup = cpu_time / mps_time
                    print(f"    前向传播: MPS 比 CPU 快 {speedup:.2f}x")
                else:
                    slowdown = mps_time / cpu_time
                    print(f"    前向传播: MPS 比 CPU 慢 {slowdown:.2f}x")
            
            # 训练比较
            if all("train" in config_results.get(device, {}) for device in ["cpu", "mps"] if device in [d.type for d in devices]):
                cpu_time = config_results["cpu"]["train"]
                mps_time = config_results["mps"]["train"]
                
                if mps_time < cpu_time:
                    speedup = cpu_time / mps_time
                    print(f"    训练过程: MPS 比 CPU 快 {speedup:.2f}x")
                else:
                    slowdown = mps_time / cpu_time
                    print(f"    训练过程: MPS 比 CPU 慢 {slowdown:.2f}x")

if __name__ == "__main__":
    main()