from matplotlib import pyplot as plt
from mxnet import autograd,nd,gluon
import random

#initialize model parameter
num_in = 2
exam = 1000
ture_w = [2,-3.4] # weight
ture_b = 4.2 # bias
x  = nd.random.normal(shape=(exam,num_in))
y = ture_w[0]*x[:,0]+ture_w[1]*x[:,1]+ture_b
y += nd.random.normal(scale=0.01,shape=y.shape) # noise
batch_size = 10
lr = 0.03
epoch = 5

dataset = gluon.data.ArrayDataset(x,y) # combine x and y to a data set
data_iter = gluon.data.DataLoader(dataset,batch_size,shuffle=True) # read mini-batch data by random

# define model
net = gluon.nn.Sequential() # ensure a squential instance
net.add(gluon.nn.Dense(1)) # define the number of outputs
#initialize parameter of model
from mxnet import init
net.initialize(init.Normal(sigma=0.01)) # e.g. w and b parameter of initialization
# define cost function
loss = gluon.loss.L2Loss() #2-norm cost
#define argorithm 
trainer = gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':lr}) #sgd mini-batch
# train model
for epoch in range(1,epoch+1):
    for x1,y1 in data_iter:
        with autograd.record():
            l = loss(net(x1),y1)
        l.backward()
        trainer.step(batch_size) # step() need indicating how large the batch is and calculate the mean gradient
    l = loss(net(x),y)
    print('epoch %d,loss %f' % (epoch,l.mean().asnumpy()))
print(ture_w,net[0].weight.data())
print(ture_b,net[0].bias.data())