import torch
from torch import optim, nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import matplotlib.pyplot as plt

# 假设 LeNet5 类和 utils 模块已经定义好了
from LeNet5 import LeNet5

# 参数设置
batch_size = 32
learning_rate = 0.01
epochs = 40

# 设备设置为 CPU
device = torch.device('cpu')

# 数据预处理
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

# 加载 MNIST 数据集
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)

# 创建 DataLoader
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)

# 实例化模型并将其移至 CPU
model = LeNet5().to(device)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)

# 初始化变量用于存储训练损失和测试准确率
train_losses = []
test_accuracies = []

# 训练函数
def train(model, device, train_loader, optimizer, epoch):
    model.train()
    total_loss = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))
    # 记录每个 epoch 的平均训练损失
    avg_loss = total_loss / len(train_loader)
    train_losses.append(avg_loss)

# 评估函数
def evaluate(model, device, test_loader):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += criterion(output, target).item()
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()
    test_loss /= len(test_loader.dataset)
    test_accuracy = 100. * correct / len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset), test_accuracy))
    return test_accuracy

# 可视化函数
def plot_metrics(train_losses, test_accuracies):
    plt.figure(figsize=(12, 5))

    # 绘制训练损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training Loss Curve')
    plt.legend()

    # 绘制测试准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(test_accuracies, label='Test Accuracy', color='orange')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.title('Test Accuracy Curve')
    plt.legend()

    plt.tight_layout()
    plt.show()

# 主函数
def main():
    best_accuracy = 0.0
    # 训练模型
    for epoch in range(1, epochs + 1):
        train(model, device, train_loader, optimizer, epoch)
        test_accuracy = evaluate(model, device, test_loader)
        test_accuracies.append(test_accuracy)
        # 可选：保存模型参数
        #torch.save(model.state_dict(), 'mnist_model_epoch_{}.pth'.format(epoch))
        # 如果当前模型比之前最好的模型更好，则更新最佳模型
        if test_accuracy > best_accuracy:
            best_accuracy = test_accuracy
            torch.save(model.state_dict(), 'best_mnist_model.pth')
            print('Saved new best model with accuracy: {:.2f}%'.format(best_accuracy))

    # 加载最佳模型参数（如果之前保存过）
    model.load_state_dict(torch.load('best_mnist_model.pth', map_location=torch.device('cpu')))

    # 在测试集上评估最终模型性能
    final_test_accuracy = evaluate(model, device, test_loader)
    print('Final test accuracy:', final_test_accuracy)

    # 可视化训练损失和测试准确率
    plot_metrics(train_losses, test_accuracies)

if __name__ == '__main__':
    main()