#!/usr/bin/python
# coding:utf-8
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from keras import backend as K
import tensorflow as tf

num_classes = 10
img_rows, img_cols = 28, 28

class Keras1(object):
    # 数据加载与预处理
    def pre_process(self):
        (trainX, trainY), (testX, testY) = mnist.load_data()

        # 根据不同的底层设置输入层的格式
        if K.image_data_format() == "channels_first":
            trainX = trainX.reshape(trainX.shape[0], 1, img_rows, img_cols)
            testX = testX.reshape(testX.shape[0], 1, img_rows, img_cols)

            self.input_shape = (1, img_rows, img_cols)  # mnist都是黑白图片, 所以第一维为1
        else:
            trainX = trainX.reshape(trainX.shape[0], img_rows, img_cols, 1)
            testX = testX.reshape(testX.shape[0], img_rows, img_cols, 1)
            self.input_shape = (img_rows, img_cols, 1)

        # 将像素转化为0~1间的实数
        self.trainX = trainX.astype("float32")
        self.testX = testX.astype("float32")
        self.trainX /= 255.0
        self.testX /= 255.0

        # 将标准答案转化为需要的格式(one-hot)
        self.trainY = keras.utils.to_categorical(trainY, num_classes)
        self.testY = keras.utils.to_categorical(testY, num_classes)

    # 定义模型
    def creat_model(self):
        self.model = Sequential()  # 创建模型类
        self.model.add(Conv2D(32, kernel_size=(5,5), activation="relu",input_shape=self.input_shape))  # 深度为32, 过滤器大小为5*5的卷积层
        self.model.add(MaxPooling2D(pool_size=(2, 2)))  # 尺寸为2*2的最大池化层

        self.model.add(Conv2D(64, kernel_size=(5,5),activation="relu"))
        self.model.add(MaxPooling2D(pool_size=(2,2)))

        self.model.add(Flatten())  # 将卷积层的输出拉直
        self.model.add(Dense(500,activation="relu"))  # 全连接层
        self.model.add(Dense(num_classes,activation="softmax"))  # 得到最后的输出

    # 定义损失函数
    def cross(self):
        """
        可以通过compile指定损失函数, 优化函数, 以及训练过程需要优化的指标
        :return:
        """
        self.model.compile(loss=keras.losses.categorical_crossentropy,  # 指定损失函数
                           optimizer=keras.optimizers.SGD(),  # 指定优化函数
                           metrics=["accuracy"])  # 指定需要优化的函数

    def train_fit(self):
        self.model.fit(self.trainX, self.trainY,  # 指定训练数据
                       batch_size=128, epochs=20,  # 指定batch size和训练轮数
                       validation_data=(self.testX,self.testY))  # 指定验证集

    def test_fit(self):
        score = self.model.evaluate(self.testX,self.testY)
        print("test loss", score[0])
        print("test accuracy:", score[1])

        writer = tf.summary.FileWriter("../log/keras", tf.get_default_graph())
        writer.close()

    def run(self):
        self.pre_process()
        self.creat_model()
        self.cross()
        self.train_fit()
        self.test_fit()

if __name__ == '__main__':
    ker1 = Keras1()
    ker1.run()

