#!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time    : 2019/7/14 21:49
# @Email  : jtyoui@qq.com
# @Software: PyCharm
import paddle
from paddle import fluid
from functools import reduce
import numpy


def load_data(image_file, label_file):  # 加载数据：可以用paddle自带的数据
    def __reader__():
        with open(image_file) as f:
            f.seek(16)
            image = numpy.reshape(numpy.fromfile(f, dtype='uint8'), [-1, 28, 28])
            image = image / 255.0 * 2.0 - 1.0  # 图像的灰度范围在 [-1，-1]
        with open(label_file) as f:
            f.seek(8)
            labels = numpy.fromfile(f, dtype='uint8')  # 标签的数字范围 [0,9]
        for idx in range(len(labels)):
            yield image[idx, :], labels[idx]

    return __reader__


# reader = paddle.dataset.mnist.train()  # paddle自带的数据
reader = load_data(image_file='./数据/train-images', label_file='./数据/train-labels')
images = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')

# 卷积层
pool_1 = fluid.nets.simple_img_conv_pool(
    input=images,
    filter_size=5,
    num_filters=20,
    pool_size=2,
    pool_stride=2,
    act='relu')

pool_2 = fluid.nets.simple_img_conv_pool(
    input=pool_1,
    filter_size=5,
    num_filters=50,
    pool_size=2,
    pool_stride=2,
    act='relu')

size = 10
input_shape = pool_2.shape
param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [size]
scale = (2.0 / (param_shape[0] ** 2 * size)) ** 0.5

predict = fluid.layers.fc(  # 连接层
    input=pool_2,
    size=size,
    act='softmax',
    param_attr=fluid.initializer.NormalInitializer(loc=0.0, scale=scale))

cost = fluid.layers.cross_entropy(input=predict, label=label)  # 交叉熵损失值
avg_cost = fluid.layers.mean(x=cost)  # 求平均
opt = fluid.optimizer.AdamaxOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999)  # 选择Adam优化器
opt.minimize(avg_cost)

batch_size = fluid.layers.create_tensor(dtype='int64')  # 取batch的个数求平均准确率
acc = fluid.layers.accuracy(input=predict, label=label, total=batch_size)

train_batch = paddle.batch(reader=paddle.reader.shuffle(reader, buf_size=500), batch_size=64)  # 分批数据

# place = fluid.CUDAPlace(0)  # 开启GPU
place = fluid.CPUPlace()  # 开启CPU
exe = fluid.Executor(place=place)  # 创建执行器
exe.run(fluid.default_startup_program())

# 告知网络传入的数据分为两部分，第一部分是img值，第二部分是label值
feeder = fluid.DataFeeder(feed_list=[images, label], place=place)

for epoch in range(10):  # 训练10次
    # 将分类准确率存储在acc_set中,将平均损失存储在avg_loss_set中
    acc_set, loss_set = [], []
    for index, data in enumerate(train_batch()):
        image_data = numpy.array([x[0].reshape([1, 28, 28]) for x in data]).astype('float32')
        label_date = numpy.array([x[1] for x in data]).reshape([len(image_data), 1]).astype('int64')
        loss, accuracy = exe.run(program=fluid.default_main_program(),
                                 feed={'image': image_data, 'label': label_date},
                                 fetch_list=[avg_cost, acc])
        if index % 100 == 0:
            print(f'epoch={epoch},batch={index},loss={loss},accuracy={accuracy}')
        acc_set.append(accuracy)
        loss_set.append(loss)
    acc_mean = numpy.array(acc_set).mean()
    loss_mean = numpy.array(loss_set).mean()
    print(f'第{epoch}次Epoch，平均损失值是：{loss_mean}，平均准确率是：{acc_mean}')

# 保存模型
fluid.io.save_inference_model(dirname='./model',
                              feeded_var_names=["image"],
                              target_vars=[predict],
                              executor=exe)
