import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import numpy as np

# GoogLeNet的Inception模块
class Inception(nn.Module):
    def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
        super(Inception, self).__init__()
        
        # 1x1卷积分支
        self.branch1 = nn.Conv2d(in_channels, ch1x1, kernel_size=1)
        
        # 1x1卷积 + 3x3卷积分支
        self.branch2 = nn.Sequential(
            nn.Conv2d(in_channels, ch3x3red, kernel_size=1),
            nn.Conv2d(ch3x3red, ch3x3, kernel_size=3, padding=1)
        )
        
        # 1x1卷积 + 5x5卷积分支
        self.branch3 = nn.Sequential(
            nn.Conv2d(in_channels, ch5x5red, kernel_size=1),
            nn.Conv2d(ch5x5red, ch5x5, kernel_size=5, padding=2)
        )
        
        # 3x3最大池化 + 1x1卷积分支
        self.branch4 = nn.Sequential(
            nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
            nn.Conv2d(in_channels, pool_proj, kernel_size=1)
        )
        
    def forward(self, x):
        branch1 = F.relu(self.branch1(x))
        branch2 = F.relu(self.branch2(x))
        branch3 = F.relu(self.branch3(x))
        branch4 = F.relu(self.branch4(x))
        
        outputs = [branch1, branch2, branch3, branch4]
        return torch.cat(outputs, 1)

# 简化版GoogLeNet用于性能测试
class SimpleGoogLeNet(nn.Module):
    def __init__(self, num_classes=10):
        super(SimpleGoogLeNet, self).__init__()
        
        # 初始卷积层
        self.conv1 = nn.Conv2d(1, 32, kernel_size=7, stride=2, padding=3)
        self.maxpool1 = nn.MaxPool2d(3, stride=2, padding=1)
        self.batch_norm1 = nn.BatchNorm2d(32)
        
        # 简化的Inception模块
        self.inception3a = Inception(32, 32, 48, 64, 8, 16, 16)
        self.inception3b = Inception(120, 64, 64, 96, 16, 48, 32)
        self.maxpool3 = nn.MaxPool2d(3, stride=2, padding=1)
        
        self.inception4a = Inception(240, 96, 48, 104, 8, 24, 32)
        self.inception4b = Inception(260, 80, 56, 112, 12, 32, 32)
        self.maxpool4 = nn.MaxPool2d(3, stride=2, padding=1)
        
        # 全局平均池化和分类器
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.dropout = nn.Dropout(0.4)
        self.fc = nn.Linear(260, num_classes)
        
    def forward(self, x):
        # 初始卷积层
        x = F.relu(self.conv1(x))
        x = self.maxpool1(x)
        x = self.batch_norm1(x)
        
        # Inception模块
        x = self.inception3a(x)
        x = self.inception3b(x)
        x = self.maxpool3(x)
        
        x = self.inception4a(x)
        x = self.inception4b(x)
        x = self.maxpool4(x)
        
        # 全局平均池化
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.dropout(x)
        x = self.fc(x)
        
        return x

def test_device_performance(device, input_data, model, iterations=50):
    """
    测试指定设备上的性能
    """
    print(f"测试设备: {device}")
    
    # 将模型和数据移到指定设备
    model = model.to(device)
    input_data = input_data.to(device)
    
    # 确保模型在评估模式下
    model.eval()
    
    # 预热运行
    with torch.no_grad():
        for _ in range(5):
            _ = model(input_data)
        
        # 同步设备（如果是MPS或CUDA）
        if device.type != 'cpu':
            if device.type == 'mps':
                torch.mps.synchronize()
            else:
                torch.cuda.synchronize()
        
        # 开始计时
        start_time = time.time()
        
        # 执行多次前向传播
        for _ in range(iterations):
            output = model(input_data)
            # 同步设备以确保计算完成
            if device.type != 'cpu':
                if device.type == 'mps':
                    torch.mps.synchronize()
                else:
                    torch.cuda.synchronize()
        
        # 结束计时
        end_time = time.time()
    
    # 计算平均时间
    avg_time = (end_time - start_time) / iterations
    
    return avg_time

def main():
    print("GoogLeNet MPS vs CPU 性能测试")
    print("=" * 40)
    
    # 检查设备可用性
    devices = []
    if torch.backends.mps.is_available():
        devices.append(torch.device("mps"))
        print("MPS设备可用")
    else:
        print("MPS设备不可用")
    
    devices.append(torch.device("cpu"))
    print("CPU设备可用")
    
    # 创建测试数据 (较小尺寸以适应简化版GoogLeNet)
    batch_size = 16
    input_data = torch.randn(batch_size, 1, 224, 224)
    print(f"\n测试数据: 批次大小={batch_size}, 输入形状={input_data.shape}")
    
    # 创建简化版GoogLeNet模型
    model = SimpleGoogLeNet()
    print("模型: 简化版GoogLeNet")
    
    # 计算模型参数数量
    total_params = sum(p.numel() for p in model.parameters())
    print(f"模型参数数量: {total_params:,}")
    
    # 测试各个设备的性能
    results = {}
    for device in devices:
        try:
            avg_time = test_device_performance(device, input_data.clone(), 
                                             model.__class__(), 30)
            results[device.type] = avg_time
            print(f"{device.type.upper()} 平均前向传播时间: {avg_time*1000:.4f} ms")
        except Exception as e:
            print(f"测试 {device.type} 时出错: {e}")
    
    # 比较结果
    print("\n性能比较结果:")
    print("-" * 30)
    if 'mps' in results and 'cpu' in results:
        if results['mps'] < results['cpu']:
            speedup = results['cpu'] / results['mps']
            print(f"MPS 比 CPU 快 {speedup:.2f}x")
        else:
            slowdown = results['mps'] / results['cpu']
            print(f"MPS 比 CPU 慢 {slowdown:.2f}x")
    elif 'mps' in results:
        print("仅测试了 MPS 性能")
    elif 'cpu' in results:
        print("仅测试了 CPU 性能")
    
    # 额外的小批次测试（更接近实际训练情况）
    print("\n小批次数据测试 (更接近实际训练):")
    print("-" * 40)
    small_batch_size = 8
    small_input_data = torch.randn(small_batch_size, 1, 224, 224)
    print(f"小批次测试数据: 批次大小={small_batch_size}")
    
    for device in devices:
        try:
            avg_time = test_device_performance(device, small_input_data.clone(), 
                                             model.__class__(), 20)
            print(f"{device.type.upper()} 平均前向传播时间: {avg_time*1000:.4f} ms")
        except Exception as e:
            print(f"测试 {device.type} 时出错: {e}")

if __name__ == "__main__":
    main()