import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
import time
import os

# 设置线程数以提高MPS稳定性
torch.set_num_threads(1)
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'

# 检查设备可用性
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
print(f"PyTorch版本: {torch.__version__}")
print(f"设备: {device}")

# 数据预处理
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

# 加载MNIST数据集
print("加载MNIST数据集...")
train_dataset = torchvision.datasets.MNIST(
    root='./data', train=True, download=True, transform=transform)
test_dataset = torchvision.datasets.MNIST(
    root='./data', train=False, download=True, transform=transform)

# 使用较大的批次大小以充分发挥MPS性能
batch_size = 256
train_loader = DataLoader(
    train_dataset, 
    batch_size=batch_size, 
    shuffle=True,
    num_workers=0,        # 避免多进程问题
    pin_memory=False,     # 减少内存压力
    persistent_workers=False
)
test_loader = DataLoader(
    test_dataset, 
    batch_size=batch_size, 
    shuffle=False,
    num_workers=0,        # 避免多进程问题
    pin_memory=False,     # 减少内存压力
    persistent_workers=False
)

# 高性能CNN模型
class HighPerformanceCNN(nn.Module):
    def __init__(self):
        super(HighPerformanceCNN, self).__init__()
        # 第一个卷积块
        self.conv1 = nn.Conv2d(1, 64, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm2d(64)
        self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm2d(64)
        self.pool1 = nn.MaxPool2d(2, 2)
        self.dropout1 = nn.Dropout2d(0.25)
        
        # 第二个卷积块
        self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
        self.bn3 = nn.BatchNorm2d(128)
        self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
        self.bn4 = nn.BatchNorm2d(128)
        self.pool2 = nn.MaxPool2d(2, 2)
        self.dropout2 = nn.Dropout2d(0.25)
        
        # 第三个卷积块
        self.conv5 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
        self.bn5 = nn.BatchNorm2d(256)
        self.conv6 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.bn6 = nn.BatchNorm2d(256)
        self.pool3 = nn.MaxPool2d(2, 2)
        self.dropout3 = nn.Dropout2d(0.25)
        
        # 全连接层
        self.fc1 = nn.Linear(256 * 3 * 3, 512)
        self.dropout4 = nn.Dropout(0.5)
        self.fc2 = nn.Linear(512, 10)
        
    def forward(self, x):
        # 第一个卷积块
        x = F.relu(self.bn1(self.conv1(x)))
        x = F.relu(self.bn2(self.conv2(x)))
        x = self.pool1(x)
        x = self.dropout1(x)
        
        # 第二个卷积块
        x = F.relu(self.bn3(self.conv3(x)))
        x = F.relu(self.bn4(self.conv4(x)))
        x = self.pool2(x)
        x = self.dropout2(x)
        
        # 第三个卷积块
        x = F.relu(self.bn5(self.conv5(x)))
        x = F.relu(self.bn6(self.conv6(x)))
        x = self.pool3(x)
        x = self.dropout3(x)
        
        # 全连接层
        x = x.view(x.size(0), -1)  # 展平
        x = F.relu(self.fc1(x))
        x = self.dropout4(x)
        x = self.fc2(x)
        
        return x

def train_model(model, train_loader, test_loader, epochs=5):
    """
    训练模型
    """
    model = model.to(device)
    criterion = nn.CrossEntropyLoss()
    # 使用SGD优化器以提高MPS稳定性
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
    
    print(f"开始训练，共 {epochs} 轮，训练集大小: {len(train_loader.dataset)}，批次大小: {train_loader.batch_size}")
    
    for epoch in range(epochs):
        start_time = time.time()
        
        # 训练阶段
        model.train()
        train_loss = 0.0
        correct_train = 0
        total_train = 0
        
        # 获取总批次数量
        num_batches = len(train_loader)
        
        for batch_idx, (data, target) in enumerate(train_loader):
            # 将数据迁移到设备
            data, target = data.to(device, non_blocking=False), target.to(device, non_blocking=False)
            
            # 清零梯度
            optimizer.zero_grad(set_to_none=True)
            
            # 前向传播
            output = model(data)
            loss = criterion(output, target)
            
            # 检查损失值有效性
            if not torch.isfinite(loss):
                print(f"警告: 检测到无效损失值 {loss.item()}，跳过此批次")
                continue
            
            # 反向传播
            loss.backward()
            
            # 更新参数
            optimizer.step()
            
            # 统计信息
            train_loss += loss.item()
            _, predicted = output.max(1)
            total_train += target.size(0)
            correct_train += predicted.eq(target).sum().item()
            
            # 显示进度条
            progress = (batch_idx + 1) / num_batches
            bar_length = 30
            block = int(round(bar_length * progress))
            bar = "#" * block + "-" * (bar_length - block)
            print(f"\rEpoch {epoch+1}/{epochs} [{bar}] {batch_idx+1}/{num_batches} ({int(progress*100)}%)", end="")
        
        print()  # 换行
        
        # 同步设备以确保计算完成
        if device.type == 'mps' and hasattr(torch.mps, 'synchronize'):
            torch.mps.synchronize()
        elif device.type == 'cuda':
            torch.cuda.synchronize()
        
        # 计算训练准确率
        train_acc = 100. * correct_train / total_train
        avg_train_loss = train_loss / len(train_loader)
        
        # 测试阶段
        test_acc = evaluate_model(model, test_loader)
        
        # 计算耗时
        epoch_time = time.time() - start_time
        
        print(f'Epoch {epoch+1}/{epochs} 完成:')
        print(f'  训练损失: {avg_train_loss:.6f}, 训练准确率: {train_acc:.2f}%, '
              f'测试准确率: {test_acc:.2f}%, 耗时: {epoch_time:.2f}秒')
        
        # 清理内存缓存
        if device.type == 'mps' and hasattr(torch.mps, 'empty_cache'):
            torch.mps.empty_cache()
        elif device.type == 'cuda':
            torch.cuda.empty_cache()
    
    return model

def evaluate_model(model, test_loader):
    """
    评估模型
    """
    model.eval()
    correct = 0
    total = 0
    
    # 获取总批次数量
    num_batches = len(test_loader)
    
    with torch.no_grad():
        for batch_idx, (data, target) in enumerate(test_loader):
            try:
                data, target = data.to(device, non_blocking=False), target.to(device, non_blocking=False)
                output = model(data)
                _, predicted = output.max(1)
                total += target.size(0)
                correct += predicted.eq(target).sum().item()
            except Exception as e:
                print(f"评估过程中出现错误: {e}")
                continue
            
            # 显示进度条
            progress = (batch_idx + 1) / num_batches
            bar_length = 20
            block = int(round(bar_length * progress))
            bar = "#" * block + "-" * (bar_length - block)
            print(f"\r测试中 [{bar}] {batch_idx+1}/{num_batches} ({int(progress*100)}%)", end="")
    
    print()  # 换行
    
    accuracy = 100. * correct / total
    return accuracy

def benchmark_performance():
    """
    性能基准测试
    """
    print("\n开始性能基准测试...")
    
    # 创建测试数据
    batch_sizes = [32, 64, 128, 256]
    results = {}
    
    for batch_size in batch_sizes:
        print(f"\n测试批次大小: {batch_size}")
        
        # 创建测试数据加载器
        test_loader_perf = DataLoader(
            test_dataset, 
            batch_size=batch_size, 
            shuffle=False,
            num_workers=0,
            pin_memory=False,
            persistent_workers=False
        )
        
        # 创建简单模型用于测试
        simple_model = nn.Sequential(
            nn.Flatten(),
            nn.Linear(28*28, 512),
            nn.ReLU(),
            nn.Linear(512, 10)
        ).to(device)
        
        simple_model.eval()
        
        # 预热运行
        with torch.no_grad():
            for _ in range(5):
                data, _ = next(iter(test_loader_perf))
                data = data.to(device)
                _ = simple_model(data)
                if device.type == 'mps' and hasattr(torch.mps, 'synchronize'):
                    torch.mps.synchronize()
                elif device.type == 'cuda':
                    torch.cuda.synchronize()
        
        # 正式测试
        num_iterations = 20
        start_time = time.time()
        
        with torch.no_grad():
            for i in range(num_iterations):
                data, _ = next(iter(test_loader_perf))
                data = data.to(device)
                _ = simple_model(data)
                if device.type == 'mps' and hasattr(torch.mps, 'synchronize'):
                    torch.mps.synchronize()
                elif device.type == 'cuda':
                    torch.cuda.synchronize()
                
                # 显示进度条
                progress = (i + 1) / num_iterations
                bar_length = 20
                block = int(round(bar_length * progress))
                bar = "#" * block + "-" * (bar_length - block)
                print(f"\r基准测试中 [{bar}] {i+1}/{num_iterations} ({int(progress*100)}%)", end="")
        
        print()  # 换行
        
        end_time = time.time()
        avg_time = (end_time - start_time) / num_iterations
        
        results[batch_size] = avg_time
        print(f"  平均前向传播时间: {avg_time*1000:.2f} ms")
    
    return results

def main():
    """
    主函数
    """
    print("MNIST手写数字识别 - 高性能MPS版本")
    print("=" * 50)
    
    # 显示设备信息
    # 未保留MPS设备信息显示功能以避免潜在问题
    
    # 创建模型
    print("\n创建高性能CNN模型...")
    model = HighPerformanceCNN()
    
    # 计算模型参数数量
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"模型参数数量: {total_params:,} (可训练: {trainable_params:,})")
    
    # 性能基准测试
    perf_results = benchmark_performance()
    
    # 训练模型
    print("\n开始训练模型...")
    trained_model = train_model(model, train_loader, test_loader, epochs=5)
    
    # 最终评估
    print("\n最终模型评估...")
    final_accuracy = evaluate_model(trained_model, test_loader)
    print(f"最终测试准确率: {final_accuracy:.2f}%")
    
    # 保存模型
    torch.save(trained_model.state_dict(), 'high_performance_mnist_model.pth')
    print("\n模型已保存为 'high_performance_mnist_model.pth'")
    
    # 显示性能测试结果
    print("\n性能测试结果总结:")
    print("-" * 30)
    for batch_size, avg_time in perf_results.items():
        print(f"批次大小 {batch_size:3d}: {avg_time*1000:.2f} ms/批次")
    
    print(f"\n训练完成! 最终准确率: {final_accuracy:.2f}%")

if __name__ == "__main__":
    main()