"""
手写体识别：本地文件
"""
import gzip
import json

import paddle
from paddle.vision.transforms import Normalize
from paddle.io import Dataset
import matplotlib.pyplot as plt
import paddle.nn.functional as F


# datafile = './work/mnist.json.gz'
# 定义图像归一化处理方法，这里的CHW指图像格式需为[C通道数，H图像高度，W图像宽度]
transform = Normalize(mean=[127.5],std=[127.5],data_format='CHW')

class MNISTDataset(Dataset):
    """
    步骤一：继承paddle.io.Dataset类
    """

    def __init__(self,datafile,mode='train',transform = None):
        """
        步骤二：实现构造函数
        :param datafile:
        :param mode:
        :param transform:
        """
        super().__init__()

        self.mode = mode
        self.transform = transform

        print('loading mnist dataset from {} ......'.format(datafile))
        # 加载json数据文件
        data = json.load(gzip.open(datafile))
        print('mnist dataset load done')

        # 读取到的数据区分训练集，验证集，测试集
        train_set,val_set,eval_set = data

        if mode == 'train':
            # 获得训练集数据集
            self.imgs,self.labels = train_set[0],train_set[1]
        elif mode == 'valid':
            # 获取验证数据集
            self.imgs,self.labels = val_set[0],val_set[1]
        elif mode == 'test':
            # 获得测试数据集
            self.imgs,self.labels = eval_set[0],eval_set[1]
        else:
            raise Exception("mode can only be one of ['train','valid','test']")

    def __getitem__(self,index):
        """
        步骤三：实现__getitem__方法，定义指定index时如何获取数据
        :param index:
        :return:
        """
        data = self.imgs[index]
        label = self.labels[index]

        return self.transform(data),label

    def __len__(self):
        """
        步骤四：实现__len__方法，返回数据集总目录
        :return:
        """
        return len(self.imgs)


class MNIST(paddle.nn.Layer):
    """
    定义模型对象
    """
    def __init__(self):
        super(MNIST,self).__init__()

        # 定义一层全连接层，输出维度是1
        self.fc = paddle.nn.Linear(in_features=784,out_features=1)

    # 定义网络结构的前向计算过程
    def forward(self,inputs):
        outputs = self.fc(inputs)
        return outputs

datafile = './work/mnist.json.gz'

# 下载数据集并初始化 Dataset
train_dataset = MNISTDataset(datafile,mode='train',transform=transform)
test_dataset = MNISTDataset(datafile,mode='test',transform=transform)

print('train images:',train_dataset.__len__(),',test images:',test_dataset.__len__())

# for data in train_dataset:
#     image,label = data
#     print('shape of image:',image.shape)
#     plt.title(str(label))
#     plt.imshow(image[0])
#     plt.show()
#     break

# 定义并初始化数据读取器
train_loader = paddle.io.DataLoader(train_dataset,batch_size=64,shuffle=True,num_workers=1,drop_last=True)
print('step num:',len(train_loader))

def train(model):
    print('train:')
    model.train()
    opt = paddle.optimizer.SGD(learning_rate=0.001,parameters=model.parameters())
    EPOCH_NUM = 3
    for epoch_id in range(EPOCH_NUM):
        print('epoch:',epoch_id)
        for batch_id, data in enumerate(train_loader()):
            images, labels = data
            images = paddle.to_tensor(images).astype('float32')
            labels = paddle.to_tensor(labels).astype('float32')

            # 前向计算的过程
            predicts = model(images)

            # 计算损失，取一个批次样本损失的平均值
            loss = F.square_error_cost(predicts,labels)
            avg_loss = paddle.mean(loss)

            # 每训练了200批次的数据，打印下当前loss的情况
            if batch_id % 200 == 0:
                print("epoch: {}, batch: {}, loss is: {}".format(epoch_id,batch_id,avg_loss.numpy()))


            # 后向传播，更新参数的过程
            avg_loss.backward()
            opt.step()
            opt.clear_grad()

    paddle.save(model.state_dict(),'mnist2.pdparams')

# 创建模型
print('create model:')
model = MNIST()
# 启动训练过程
train(model)