import matplotlib as mpl
import matplotlib.pyplot as plt

import numpy as np
import sklearn  # 注意
import pandas as pd
import os
import sys
import time
import tensorflow as tf

from tensorflow.keras import Sequential, layers, datasets


# 加载手写数据集文件
def preprocess(x, y):
    """
    预处理函数
    """
    # [b, 28, 28], [b]
    x = tf.cast(x, dtype=tf.float32) / 255.
    y = tf.cast(y, dtype=tf.int32)
    y = tf.one_hot(y, depth=10)
    return x, y


(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()  # 加载手写数据集数据
x_train, x_test = x_train / 255.0, x_test / 255.0
# 增加一个维度
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)

print("train shape:", x_train.shape)
print("test shape:", x_test.shape)


model = Sequential([
    layers.Conv2D(32, kernel_size=5, padding='Same', activation='relu', strides=1, input_shape=(28, 28, 1)),  # 第一组卷积核，32个5X5的卷积核，
    layers.MaxPooling2D(pool_size=2),  # 高宽各减半的池化层
    layers.Conv2D(64, kernel_size=3, padding='Same', activation='relu', strides=1),  # 第二组卷积核，32个3X3的卷积核，
    layers.Conv2D(64, kernel_size=3, padding='Same', activation='relu', strides=1),  # 第二组卷积核，32个3X3的卷积核，
    layers.MaxPooling2D(pool_size=2),  # 高宽各减半的池化层
    layers.Conv2D(128, kernel_size=3, padding='Same', activation='relu', strides=1),  # 第二组卷积核，32个3X3的卷积核，
    layers.Conv2D(128, kernel_size=3, padding='Same', activation='relu', strides=1),  # 第二组卷积核，32个3X3的卷积核，
    layers.MaxPooling2D(pool_size=2),  # 高宽各减半的池化层

    layers.Flatten(),  # 打平层，方便全连接层处理
    layers.Dense(512, activation='sigmoid'),  # 全连接层，256个结点
    layers.Dropout(0.25),  # 激活函数

    layers.Dense(512, activation='sigmoid'),  # 全连接层，256个结点
    layers.Dropout(0.25),  # 激活函数

    layers.Dense(256, activation='sigmoid'),  # 全连接层，256个结点
    layers.Dropout(0.1),  # 激活函数

    layers.Dense(10, activation='sigmoid')  # 全连接层，10个结点
])

# 创建损失函数的类，在实际计算时直接调用类实例
losses = tf.keras.losses.SparseCategoricalCrossentropy()
# optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.95)  # batch,128,lr=0.01,acc:0.9908
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)  # batch,128,0,89, acc:0.9930, 50epochs,99.33

model.compile(optimizer=optimizer,
              loss=losses,
              metrics=['accuracy'])

# 训练模型
model.fit(x=x_train,
          y=y_train,
          batch_size=256,
          epochs=50)

model.summary()

# step5 模型测试
loss, acc = model.evaluate(x_test, y_test)
print("train model, accuracy:{:5.2f}%".format(100 * acc))