'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''

from __future__ import print_function
import tensorflow as tf
import keras
import os
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K


batch_size = 128  #批尺寸
num_classes = 10  #分类
epochs = 12       #迭代次数

# input image dimensions  行 列
img_rows, img_cols = 28, 28

# the data, split between train and test sets   参数path指明位置

fashion_mnist = keras.datasets.fashion_mnist
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()

class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

# 返回默认图像数据格式约定 ('channels_first' 或 'channels_last')。
if K.image_data_format() == 'channels_first':  # 数据类型是通道在首位还是末尾
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)  # 个数，通道，长，宽
    x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)

x_train = x_train.astype('float32')  # 转类型
x_test = x_test.astype('float32')
x_train /= 255.0  # 归一化
x_test /= 255.0
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices 将类向量（整数）转换为二进制类矩阵。
# 将其转换为具有尽可能多表示类别数的列的矩阵。行数保持不变。 【0,0,0,0,1】
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()  # 序列模型
model.add(Conv2D(32, kernel_size=(3, 3),  # 32个过滤器，filter为3x3，激活函数为relu
                 activation='relu',
                 input_shape=input_shape))  # 第一层卷积的时候才Input_shape
model.add(Conv2D(64, (3, 3), activation='relu'))  # 64个过滤器，filter为3x3，激活函数为relu
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))  # 防止过拟合  需要丢弃的输入比例
model.add(Flatten())  # 将输入展平  一维化
model.add(Dense(128, activation='relu'))   # 全连接层 128个结点
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

# 用于配置训练模型。
model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

# 以固定数量的轮次（数据集上的迭代）训练模型。
model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,  # 整数，0, 1 或 2。日志显示模式。 0 = 安静模式, 1 = 进度条, 2 = 每轮一行。
          validation_data=(x_test, y_test))

score = model.evaluate(x_test, y_test, verbose=0)
# 模型保存
model.save('listm.h5')

print('Test loss:', score[0])
print('Test accuracy:', score[1])
