#!/usr/bin/env python3
#-*- coding:utf8 -*-
# Power by 2020-06-13 00:48:57

import os
import random
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D,Pool2D,Linear
import numpy as np
from PIL import Image
import gzip
import json

def load_data(model='train'):
    """TODO: Docstring for load_data.

    :model: TODO
    :returns: TODO

    """
    datafile='./mnist.json.gz'
    print('loading mnist dataset from {} .......'.format(datafile))
    data=json.load(gzip.open(datafile))
    train_set,val_set,eval_set=data
    IMG_ROWS=28
    IMG_COLS=28
    if model == 'train':
        imgs=train_set[0]
        labels=train_set[1]
    elif model =='valid':
        imgs=val_set[0]
        labels=val_set[1]
    elif model =='eval':
        imgs=eval_set[0]
        labels=eval_set[1]
    imgs_length=len(imgs)
    assert len(imgs)==len(labels),"length of train_imgs ({}) should be the same as train_labels({})".format(len(imgs),len(labels))
    index_list=list(range(imgs_length))
    BATCHSIZE=100
    def data_generator():
        if model =='train':
            random.shuffle(index_list)
        imgs_list=[]
        labels_list=[]
        for i in index_list:
            img=np.reshape(imgs[i],[1,IMG_ROWS,IMG_COLS]).astype('float32')
            label=np.reshape(labels[i],[1]).astype('int64')
            imgs_list.append(img)
            labels_list.append(label)
            if len(imgs_list) == BATCHSIZE:
                yield np.array(imgs_list),np.array(labels_list)
                imgs_list=[]
                labels_list=[]
        if len(imgs_list) >0:
            yield np.array(imgs_list),np.array(labels_list)
    return data_generator
class MNIST(fluid.dygraph.Layer):

    """Docstring for MNIST. """

    def __init__(self):
        """TODO: to be defined. """
        super(MNIST,self).__init__()
        self.conv1=Conv2D(num_channels=1,num_filters=20,filter_size=5,stride=1,padding=2,act='relu')
        self.pool1=Pool2D(pool_size=2,pool_stride=2,pool_type='max')
        self.conv2=Conv2D(num_channels=20,num_filters=20,filter_size=5,stride=1,padding=2,act='relu')
        self.pool2=Pool2D(pool_size=2,pool_stride=2,pool_type='max')
        self.fc=Linear(input_dim=980,output_dim=10,act='softmax')
    def forward(self, inputs):
        """TODO: Docstring for forward.

        :inputs: TODO
        :returns: TODO

        """
        x=self.conv1(inputs)
        x=self.pool1(x)
        x=self.conv2(x)
        x=self.pool2(x)
        x=fluid.layers.reshape(x,[x.shape[0],980])
        x=self.fc(x)
        return x

with fluid.dygraph.guard():
    model=MNIST()
    model.train()
    train_loader=load_data('train')
    optimizer=fluid.optimizer.SGDOptimizer(learning_rate=0.01,parameter_list=model.parameters())
    EPOCH_NUM=5
    for epoch_id in range(EPOCH_NUM):
        for batch_id,data in enumerate(train_loader()):
            image_data,label_data=data
            image=fluid.dygraph.to_variable(image_data)
            label=fluid.dygraph.to_variable(label_data)
            predict=model(image)
            loss=fluid.layers.cross_entropy(predict,label)
            avg_loss=fluid.layers.mean(loss)
            if batch_id % 200 ==0:
                print("epoch:{},batch:{},loss is:{}".format(epoch_id,batch_id,avg_loss.numpy()))
            avg_loss.backward()
            optimizer.minimize(avg_loss)
            model.clear_gradients()
    fluid.save_dygraph(model.state_dict(),'mnist_cross_entropy')

        
