import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np

# 设置随机种子以保证可重复性
torch.manual_seed(42)

# 定义神经网络模型
class NeuralNet(nn.Module):
    def __init__(self):
        super(NeuralNet, self).__init__()
        self.fc1 = nn.Linear(28*28, 512)  # 输入层到隐藏层
        self.fc2 = nn.Linear(512, 10)      # 隐藏层到输出层
        self.relu = nn.ReLU()
        
    def forward(self, x):
        x = x.view(-1, 28*28)  # 展平图像
        x = self.relu(self.fc1(x))
        x = self.fc2(x)
        return x

# 加载MNIST数据集
def load_mnist_data():
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5,), (0.5,))
    ])
    
    train_set = torchvision.datasets.MNIST(
        root='./data', 
        train=True, 
        download=True, 
        transform=transform
    )
    
    test_set = torchvision.datasets.MNIST(
        root='./data', 
        train=False, 
        download=True, 
        transform=transform
    )
    
    train_loader = torch.utils.data.DataLoader(
        train_set, 
        batch_size=64, 
        shuffle=True
    )
    
    test_loader = torch.utils.data.DataLoader(
        test_set, 
        batch_size=64, 
        shuffle=False
    )
    
    return train_loader, test_loader

# 训练模型
def train_model(model, train_loader, criterion, optimizer, num_epochs=10):
    train_losses = []
    train_accuracies = []
    test_losses = []
    test_accuracies = []
    
    for epoch in range(num_epochs):
        model.train()
        running_loss = 0.0
        correct = 0
        total = 0
        
        for images, labels in train_loader:
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            
            running_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
        
        train_loss = running_loss / len(train_loader)
        train_accuracy = 100 * correct / total
        train_losses.append(train_loss)
        train_accuracies.append(train_accuracy)
        
        # 测试集评估
        test_loss, test_accuracy = evaluate_model(model, test_loader, criterion)
        test_losses.append(test_loss)
        test_accuracies.append(test_accuracy)
        
        print(f'Epoch [{epoch+1}/{num_epochs}], '
              f'Train Loss: {train_loss:.4f}, Train Acc: {train_accuracy:.2f}%, '
              f'Test Loss: {test_loss:.4f}, Test Acc: {test_accuracy:.2f}%')
    
    return train_losses, train_accuracies, test_losses, test_accuracies

# 评估模型
def evaluate_model(model, test_loader, criterion):
    model.eval()
    test_loss = 0
    correct = 0
    total = 0
    
    with torch.no_grad():
        for images, labels in test_loader:
            outputs = model(images)
            loss = criterion(outputs, labels)
            test_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    
    test_loss = test_loss / len(test_loader)
    test_accuracy = 100 * correct / total
    
    return test_loss, test_accuracy

# 可视化训练过程
def visualize_training(train_losses, train_accuracies, test_losses, test_accuracies):
    plt.figure(figsize=(12, 5))
    
    # 损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='Train Loss')
    plt.plot(test_losses, label='Test Loss')
    plt.title('Training and Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    
    # 准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(train_accuracies, label='Train Accuracy')
    plt.plot(test_accuracies, label='Test Accuracy')
    plt.title('Training and Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    
    plt.tight_layout()
    plt.show()

# 可视化预测结果
def visualize_predictions(model, test_loader):
    model.eval()
    dataiter = iter(test_loader)
    images, labels = next(dataiter)
    
    with torch.no_grad():
        outputs = model(images)
        _, predicted = torch.max(outputs, 1)
    
    # 显示图像和预测结果
    plt.figure(figsize=(10, 4))
    for i in range(6):
        plt.subplot(2, 3, i+1)
        img = images[i].numpy().squeeze()
        plt.imshow(img, cmap='gray')
        plt.title(f'Predicted: {predicted[i]}, Actual: {labels[i]}')
        plt.axis('off')
    plt.tight_layout()
    plt.show()

if __name__ == "__main__":
    # 初始化模型、损失函数和优化器
    model = NeuralNet()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    
    # 加载数据
    train_loader, test_loader = load_mnist_data()
    
    # 训练模型
    print("开始训练...")
    train_losses, train_accuracies, test_losses, test_accuracies = train_model(
        model, train_loader, criterion, optimizer, num_epochs=10
    )
    
    # 可视化训练过程
    visualize_training(train_losses, train_accuracies, test_losses, test_accuracies)
    
    # 可视化预测结果
    visualize_predictions(model, test_loader)
    
    # 最终测试集准确率
    final_test_loss, final_test_accuracy = evaluate_model(model, test_loader, criterion)
    print(f'最终测试集准确率: {final_test_accuracy:.2f}%')