import torch
from torch import nn
import read_mnist_data as data

batch_size = 100

class LeNet5(nn.Module):
    def __init__(self):
        super(LeNet5, self).__init__()
        self.conv_unit = nn.Sequential(
            nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0),
            nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
            nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
            nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
        )
        self.fc_unit = nn.Sequential(
            nn.Linear(16*4*4, 120),
            nn.ReLU(),
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Linear(84, 10)
        )
    def forward(self, x):
        x = self.conv_unit(x)
        x = x.view(batch_size, 16*4*4)
        x = self.fc_unit(x)
        return x

model = LeNet5()
loss = nn.CrossEntropyLoss()
num_epochs, lr = 10, 0.001

optimizer = torch.optim.SGD(model.parameters(), lr=lr)


train_images = data.load_train_images()
train_labels = data.load_train_labels()
test_images = data.load_test_images()
test_labels = data.load_test_labels()
# 总数60000，batch_size = 200，则一共需要300次的mini_batch

for epoch in range(num_epochs):
    model.train()
    for i in range(int(60000 / batch_size)):
        input = train_images[i * batch_size: (i + 1) * batch_size]
        labels = train_labels[i * batch_size: (i + 1) * batch_size]
        input = torch.from_numpy(input)
        labels = torch.from_numpy(labels)

        input = input.to(torch.float32)
        labels = labels.to(torch.long)

        input = input.reshape((batch_size, 1, 28, 28))

        out = model(input)
        error = loss(out, labels)
        optimizer.zero_grad()
        error.backward()
        optimizer.step()

        if i % 20 == 0:
            print("epoch: {}, 进度: {:.2f}, loss: {:.4f}".format(epoch, i / int(60000 / batch_size) * 100, error.item()))


    test_loss = 0
    correct = 0
    for i in range(int(10000/batch_size)):
        input = train_images[i * batch_size: (i + 1) * batch_size]
        labels = train_labels[i * batch_size: (i + 1) * batch_size]
        input = torch.from_numpy(input)
        labels = torch.from_numpy(labels)
        input = input.to(torch.float32)
        labels = labels.to(torch.long)

        input = input.reshape((batch_size, 1, 28, 28))
        out = model(input)
        test_loss += loss(out, labels).item()

        pred = out.data.argmax(dim=1)
        # 统计正确预测的个数
        correct += pred.eq(labels.data).sum().item()

    print("第{}轮,  测试集平均损失: {:.4f}, 正确率: {:.2f}".format(epoch, test_loss / 10000, correct / 10000 * 100))

input = train_images[: batch_size]
labels = train_labels[: batch_size]
input = torch.from_numpy(input)
input = input.to(torch.float32)
input = input.reshape((batch_size, 1, 28, 28))
out = model(input)
pred = out.data.argmax(dim=1)
print(pred)
print(labels)