import os
import tensorflow as tf
from tensorflow.keras import layers, models
from src.utils.load_emnist import *

'''
python 3.7
tensorflow 2.0.0b0
'''

config = {
    'check_path': "./ckpt/cp-{epoch:04d}.ckpt",
    'class_num': 10,
    'type': 'digits',
    'data_set_path': './../../data_set_emnist_digits/',
    'train_size': 240000,
    'test_size': 40000,
    'data_sets': [
        'emnist-digits-train-labels-idx1-ubyte.gz', 'emnist-digits-train-images-idx3-ubyte.gz',
        'emnist-digits-test-labels-idx1-ubyte.gz', 'emnist-digits-test-images-idx3-ubyte.gz']
}


class CNN(object):
    def __init__(self):
        model = models.Sequential()
        # 第1层卷积，卷积核大小为5*5，6个，28*28为待训练图片的大小
        model.add(layers.Conv2D(6, (5, 5), activation=tf.nn.relu, input_shape=(28, 28, 1), name='conv1'))
        model.add(layers.MaxPooling2D((2, 2), strides=(2, 2), name='max_pool1'))
        # 第2层卷积，卷积核大小为3*3，16个
        model.add(layers.Conv2D(16, (3, 3), activation=tf.nn.relu, name='conv2'))
        model.add(layers.MaxPooling2D((2, 2), strides=(2, 2), name='max_pool2'))
        # 第3层卷积，卷积核大小为3*3，64个
        # model.add(layers.Conv2D(64, (3, 3), activation=tf.nn.relu))

        model.add(layers.Flatten())
        model.add(layers.Dense(120, activation=tf.nn.relu, name='fc1'))
        model.add(layers.Dense(84, activation=tf.nn.relu, name='fc2'))
        # 输出层，27代表26个英文字母+1（为什么加1？因为0号不用，而26个英文字母，所以需要27个）
        model.add(layers.Dense(config['class_num'], activation=tf.nn.softmax, name='predictions'))

        model.summary()

        self.model = model


class DataSourceMnist(object):
    """
    读取idx3-ubyte.gz格式的数据集
    """

    def __init__(self):
        dir_path = os.path.abspath(os.path.dirname(
            __file__)) + config['data_set_path']
        files = config['data_sets']
        paths = []
        for i in range(len(files)):
            paths.append(dir_path + files[i])

        train_labels = read_idx1(paths[0])
        train_images = read_idx3(paths[1])
        test_labels = read_idx1(paths[2])
        test_images = read_idx3(paths[3])

        # 符合Mnist规范的数据集中，6/7是训练图片，1/7是测试图片。
        # reshape()中的第一个参数传入训练集（测试集）的大小，下同。（为什么呢？）
        train_images = train_images.reshape((config['train_size'], 28, 28, 1))
        test_images = test_images.reshape((config['test_size'], 28, 28, 1))

        # 像素值映射到 0 - 1 之间
        train_images, test_images = train_images / 255.0, test_images / 255.0

        self.train_images, self.train_labels = train_images, train_labels
        self.test_images, self.test_labels = test_images, test_labels


class Train:
    def __init__(self):
        self.cnn = CNN()
        # self.data = DataSource()
        self.data = DataSourceMnist()

    def train(self):
        # check_path = './ckpt/cp-{epoch:04d}.ckpt'
        check_path = config['check_path']
        # period 每隔5epoch保存一次
        save_model_cb = tf.keras.callbacks.ModelCheckpoint(
            check_path, save_weights_only=True, verbose=1, period=5)

        self.cnn.model.compile(optimizer='adam',
                               loss='sparse_categorical_crossentropy',
                               metrics=['accuracy'])
        self.cnn.model.fit(self.data.train_images, self.data.train_labels,
                           epochs=5, callbacks=[save_model_cb])

        test_loss, test_acc = self.cnn.model.evaluate(
            self.data.test_images, self.data.test_labels)
        print("准确率: %.4f，共测试了%d张图片 " % (test_acc, len(self.data.test_labels)))


if __name__ == "__main__":
    app = Train()
    app.train()
