from mxnet import nd
from mxnet.gluon import loss as gloss
import d2lzh as d2l

batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)

num_inputs = 784
num_outputs = 10
num_hiddens = 256

w1 = nd.random.normal(scale = 0.01, shape = (num_inputs, num_hiddens))
b1 = nd.zeros(num_hiddens)
w2 = nd.random.normal(scale = 0.01, shape = (num_hiddens, num_outputs))
b2 = nd.zeros(num_outputs)

params = [w1, b1, w2, b2]
for param in params:
    param.attach_grad()

def relu(x):
    return nd.maximum(x, 0)

def net(x):
    x = x.reshape((-1, num_inputs))
    h = relu(nd.dot(x, w1) + b1)
    return nd.dot(h, w2) + b2

loss = gloss.SoftmaxCrossEntropyLoss()

d2l.train_ch3(net, train_iter, test_iter, loss, 5, 256, params, 0.5)
