import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms

# 定义神经网络模型
class SimpleNet(nn.Module):
    def __init__(self):
        super(SimpleNet, self).__init__()
        self.fc1 = nn.Linear(28 * 28, 128)  # 输入层到隐藏层的全连接层
        self.fc2 = nn.Linear(128, 64)       # 隐藏层到隐藏层的全连接层
        self.fc3 = nn.Linear(64, 10)        # 隐藏层到输出层的全连接层

    def forward(self, x):
        x = x.view(-1, 28 * 28)  # 将输入数据展平
        x = torch.relu(self.fc1(x))  # 第一个隐藏层并使用ReLU激活函数
        x = torch.relu(self.fc2(x))  # 第二个隐藏层并使用ReLU激活函数
        x = self.fc3(x)              # 输出层
        return x

# 加载数据集
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
train_set = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)  # 训练集
test_set = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)  # 测试集

train_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)   # 训练数据加载器
test_loader = torch.utils.data.DataLoader(test_set, batch_size=64, shuffle=False)    # 测试数据加载器

# 初始化模型、损失函数和优化器
model = SimpleNet()                                         # 初始化模型
criterion = nn.CrossEntropyLoss()                            # 交叉熵损失函数  entropy:熵
optimizer = optim.Adam(model.parameters(), lr=0.001)         # Adam优化器 adam:亚当

# 训练模型
def train(model, train_loader, criterion, optimizer, epochs=5):  # 训练函数
    model.train()                                               # 设置模型为训练模式
    for epoch in range(epochs):                                 # 迭代epoch   epoch:时代;时期;纪元;世(地质年代，纪下分世);
        running_loss = 0.0
        for inputs, labels in train_loader:                     # 遍历训练数据集
            optimizer.zero_grad()                               # 梯度清零
            outputs = model(inputs)                             # 正向传播
            loss = criterion(outputs, labels)                   # 计算损失
            loss.backward()                                     # 反向传播
            optimizer.step()                                    # 更新权重
            running_loss += loss.item()
        print(f"Epoch {epoch+1}, Loss: {running_loss/len(train_loader)}")

# 测试模型
def test(model, test_loader):                                   # 测试函数
    model.eval()                                                # 设置模型为评估模式
    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, labels in test_loader:                      # 遍历测试数据集
            outputs = model(inputs)                             # 正向传播
            _, predicted = torch.max(outputs.data, 1)            # 获取预测结果
            total += labels.size(0)
            correct += (predicted == labels).sum().item()       # 统计正确预测数量
    accuracy = correct / total                                  # 计算准确率
    print(f"Accuracy on test set: {accuracy}")

# 训练模型
train(model, train_loader, criterion, optimizer)

# 测试模型
test(model, test_loader)

# 保存模型
torch.save(model.state_dict(), 'trained_model.pth')            # 保存训练好的模型参数


# 加载模型
model = SimpleNet()
model.load_state_dict(torch.load('trained_model.pth'))
model.eval()

# 使用模型进行预测
def predict(model, image):
    transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
    image = transform(image).unsqueeze(0)  # 加载单个图像并添加批处理维度
    with torch.no_grad():
        output = model(image)
        _, predicted = torch.max(output, 1)
    return predicted.item()

# 例如，使用模型进行预测
import matplotlib.pyplot as plt
import numpy as np

# 从测试集中随机选择一张图片
index = np.random.randint(0, len(test_set))
image, label = test_set[index]

# 预测标签
predicted_label = predict(model, image)

# 显示图片和预测结果
plt.imshow(image.squeeze(), cmap='gray')
plt.title(f"True Label: {label}, Predicted Label: {predicted_label}")
plt.show()