#!/usr/bin/env python3
# -*- encoding: utf-8 -*-


import sys
sys.path.append('..')
from dataset.mnist import load_mnist
from ch04.two_layer_net import TwoLayerNet
import time
import matplotlib.pyplot as plt


# hyper parameters
iter_num = 20
batch_size = 100
debug = True
debug_iter = True
learning_rate = 0.1


if __name__ == '__main__':
    # load mnist
    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True,
                                                      one_hot_label=True, shuffle=True)
    # init two layer net
    net = TwoLayerNet(28 * 28, 50, 10)

    # mini-batch train net
    train_size = x_train.shape[0]
    if debug:
        print('train data size is %d' % train_size)
        print('mini-batch size is %d' % batch_size)

    lost_list = []
    train_acc_list = []
    test_acc_list = []

    start_idx = 0
    for i in range(iter_num):
        print()
        print('iter_num = %d' % i)

        # fetch mini-batch data
        tmp_idx = start_idx + batch_size
        stop_idx = tmp_idx if tmp_idx < train_size else train_size
        x_tmp = x_train[start_idx : stop_idx]
        t_tmp = t_train[start_idx : stop_idx]
        if debug_iter:
            print('(start_idx, stop_idx) = (%d, %d)' % (start_idx, stop_idx))
        start_idx = stop_idx

        # numerical gradient
        if debug_iter:
            print('Caculating numerical gradient ...')
        grad_time_start = time.time()
        grad = net.numerical_gradient(x_tmp, t_tmp)
        grad_time_stop = time.time()
        if debug_iter:
            print('Done')
            print('gradient time consumed %g' % (grad_time_stop - grad_time_start))

        # update parameters
        if debug_iter:
            print('Updating parameters ...')
        for key in net.params:
            net.params[key] = net.params[key] - learning_rate * grad[key]
        if debug_iter:
            print('Done')

        # record lost
        if debug_iter:
            print('Caculating loss ...')
        lost_list.append(net.loss(x_tmp, t_tmp))
        if debug_iter:
            print('Done')

        # reach one epoch
        if stop_idx == train_size:
            start_idx = 0

    # draw loss picture
    plt.plot(range(len(lost_list)), lost_list)
    plt.show()
