import torch
import torch.nn as nn
import torchvision.datasets as normal_datasets
import torchvision.transforms as transforms
from torch.autograd import Variable

# 设置每个批量大小
batch_size = 100

# 从torchvision.datasets中加载一些常用数据集
train_dataset = normal_datasets.MNIST(
    root='./mnist/',  # 数据集保存路径
    train=True,  # 是否作为训练集
    transform=transforms.ToTensor(),  # 数据如何处理, 可以自己自定义
    download=True)  # 路径下没有的话, 可以下载
test_dataset = normal_datasets.MNIST(
    root='./mnist/',
    train=False,
    transform=transforms.ToTensor())

# 数据加载
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)
 
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False)

# 搭建一个普通的三层多层感知器
class NN(nn.Module):
    def  __init__(self, input_data, hidden_layer, output_data):
      super(NN, self).__init__()
      self.f1 = nn.Linear(input_data, hidden_layer)
      self.f2 = nn.Linear(hidden_layer, output_data)
      self.relu = nn.ReLU()
    
    def forward(self, x):
        x = self.f1(x)
        x = self.relu(x)
        x = self.f2(x)
        return x

# 设置各层次的神经元数量、训练次数、学习率
hidden_layer = 1000
input_data = 784
output_data = 10
epoch_n = 20
learning_rate = 0.001

# 创建神经网络模型对象
model = NN(input_data, hidden_layer, output_data)
# 定义损失函数
loss_fn = nn.CrossEntropyLoss() # 交叉熵损失函数
# loss_fn = nn.MSELoss()            # 均方差损失函数
# 使用torch.optim.Adam作为自动优化函数
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# 训练模型
for epoch in range(epoch_n):
    loss = 0
    print("Epoch  {}/{}".format(epoch+1, epoch_n))
    print("-"*10)

    for i, (images, labels) in enumerate(train_loader):
        images = images.reshape(100, 784)
        x, y = Variable(images), Variable(labels)

        y_pred = model(x)
        # 使用均方差计算损失
        y_pred = torch.max(y_pred, 1).values
        loss = loss_fn(y_pred.float(), y.float())
        # 使用交叉熵计算损失
        # loss = loss_fn(y_pred, y) 
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if (i+1) % 100 == 0:
            print('Epoch [{}/{}], Loss: {:.4f}'
                  .format(epoch + 1, epoch_n, loss.item()))

# 识别测试样本，计算准确率    
print("\nTraining Done ")
with torch.no_grad():
    correct, total = 0, 0
    for images_test, labels_test in test_loader:
        images_test = images_test.reshape(-1, 28*28)
        x_test, y_test = Variable(images_test),Variable(labels_test)

        y_test_pred = model(x_test)
        _, pred = torch.max(y_test_pred,1)

        correct += (pred == labels_test).sum().item()
        total += labels.size(0)

    print("{} correct, the accuracy of total {} images: {}%".
            format(correct, total, 100 * correct/total))