# coding=utf-8
##
## Author: jmdvirus@aliyun.com
##
## Create: 2019年02月11日 星期一 11时34分06秒
##

import logging
import math
import random
import mxnet as mx
import numpy as np

logging.getLogger().setLevel(logging.DEBUG)

n_sample = 1000
batch_size = 1
learning_rate = 0.1
n_epoch = 1

## input data init
train_in = [[ random.uniform(0, 1) for c in range(2) ] for n in range(n_sample) ]

## init out
train_out = [0 for n in range(n_sample) ]

## expect out
for i in range(n_sample):
    train_out[i] = max(train_in[i][0], train_in[i][1])

train_iter = mx.io.NDArrayIter(data = np.array(train_in),
        label = {'reg_label':np.array(train_out)},
        batch_size = batch_size, shuffle = True)

## input layer
src = mx.sym.Variable('data')

## fc is fullyconnected
## hidden layer
fc = mx.sym.FullyConnected(data = src, num_hidden = 1, name = 'fc')

## output layer
## regression
net = mx.sym.LinearRegressionOutput(data = fc, name = 'reg')

module = mx.mod.Module(symbol = net, label_names = (['reg_label']))

def epoch_callback(epoch, symbol, arg_params, aux_params):
    print("epoch callback ==>")
    for k in arg_params:
        print(k)
        print(arg_params[k].asnumpy())


module.fit(
        train_iter,
        eval_data = None,
        eval_metric = mx.metric.create('mse'), ## mse, mean squared error
        optimizer = 'sgd', ## gradient descent
        optimizer_params = {'learning_rate': learning_rate},
        num_epoch = n_epoch,
        batch_end_callback = mx.callback.Speedometer(batch_size, 1),
        epoch_end_callback = epoch_callback,
        )

