import torch
from torch import nn
import read_mnist_data as data
import torch.nn.functional as F
# dense layer = 稠密层 = 全连接层 = 线性层

num_inputs, num_outputs, num_hiddens = 784, 10, 256
W1 = nn.Parameter(torch.randn(num_inputs, num_hiddens, requires_grad=True)) #(784, 256)
b1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True))

W2 = nn.Parameter(torch.randn(256, 256, requires_grad=True))
b2 = nn.Parameter(torch.zeros(256, requires_grad=True))

W3 = nn.Parameter(torch.randn(256, num_outputs, requires_grad=True)) #(256, 10)
b3 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True))

params = [W1, b1, W2, b2, W3, b3]
loss = nn.CrossEntropyLoss()
dropout1 = nn.Dropout(0.3)
dropout2 = nn.Dropout(0.6)
num_epochs, lr = 10, 0.001
batch_size = 200
updater = torch.optim.SGD(params, lr=lr)

def net(X):
    X = X.reshape((-1, num_inputs))  #(1, 784)
    H = F.relu(torch.mm(X, W1) + b1)  # (1, 256)
    H = dropout1(H)
    H = F.relu(torch.mm(H, W2) + b2)
    H = dropout2(H)
    return (torch.mm(H, W3) + b3)  # (1, 10)

train_images = data.load_train_images()
train_labels = data.load_train_labels()
# 总数60000，batch_size = 200，则一共需要300次的mini_batch

for epoch in range(num_epochs):
    for i in range(int(60000 / batch_size)):
        input = train_images[i * batch_size: (i + 1) * batch_size]
        labels = train_labels[i * batch_size: (i + 1) * batch_size]
        input = torch.from_numpy(input)
        labels = torch.from_numpy(labels)

        input = input.to(torch.float32)
        labels = labels.to(torch.long)

        out = net(input)
        error = loss(out, labels)
        updater.zero_grad()
        error.backward()
        updater.step()

        if i % 20 == 0:
            print("epoch: {}, 进度: {:.2f}, loss: {}".format(epoch, i / 300, error.item()))

    test_loss = 0
    correct = 0
    test_images = data.load_test_images()
    test_labels = data.load_test_labels()

    for i in range(int(10000/batch_size)):
        input = train_images[i * batch_size: (i + 1) * batch_size]
        labels = train_labels[i * batch_size: (i + 1) * batch_size]
        input = torch.from_numpy(input)
        labels = torch.from_numpy(labels)
        input = input.to(torch.float32)
        labels = labels.to(torch.long)
        out = net(input)
        test_loss += loss(out, labels).item()

        pred = out.data.argmax(dim=1)
        # 统计正确预测的个数
        correct += pred.eq(labels.data).sum()

    print("第{}轮,  测试集平均损失: {}, 正确率: {}".format(epoch, test_loss / 10000, correct / 10000))

input = train_images[: 20]
labels = train_labels[: 20]
input = torch.from_numpy(input)
input = input.to(torch.float32)
out = net(input)
pred = out.data.argmax(dim=1)
print(pred)
print(labels)