import tensorflow as tf
from tensorflow.keras import layers, losses, optimizers, metrics, activations
import numpy as np
from sklearn.model_selection import train_test_split
import os

np.random.seed(777)
tf.random.set_seed(777)

# 1.	按照要求，使用mnist数据集完成cnn处理（每题10分）
# ①	获取数据
# 1)	获取数据信息
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)

# 3)	对数据进行预处理工作
x_train = x_train.astype(np.float32)
x_test = x_test.astype(np.float32)
x_train /= 255.
x_test /= 255.
x_train = x_train.reshape([-1, 28, 28, 1])
x_test = x_test.reshape([-1, 28, 28, 1])

# 2)	切分数据集
batch_size = 128
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,
                                                  random_state=777, train_size=0.9)

# ②	模型处理
# 1)	创建模型
# 2)	做两次卷积，卷积核尺寸3*3，卷积核大小32，使用relu激活
# 3)	配合最大池化，池化和2*2
# 4)	设计随机失活比例0.25
# 5)	将维度展开
# 6)	创建两个全连接层，神经元数量分别为128,10
dropout_rate = 0.25
model = tf.keras.Sequential([
    layers.Conv2D(32, [3, 3], strides=[1, 1], padding='valid',
                  activation=activations.relu,
                  input_shape=[28, 28, 1]),
    layers.MaxPooling2D(pool_size=[2, 2], strides=[2, 2], padding='same'),
    layers.Dropout(dropout_rate),
    layers.Conv2D(32, [3, 3], strides=[1, 1], padding='valid',
                  activation=activations.relu),
    layers.MaxPooling2D(pool_size=[2, 2], strides=[2, 2], padding='same'),
    layers.Dropout(dropout_rate),
    layers.Flatten(),
    layers.Dense(128, activation=activations.relu),
    layers.Dense(10, activation=activations.softmax),
])

ver = 'v1.0'
alpha = 0.001
filename = os.path.basename(__file__)
logdir = os.path.join('_log', filename, ver)
tb_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir, update_freq='batch')
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_sparse_categorical_crossentropy',
                                              min_delta=1e-3,
                                              patience=2,
                                              verbose=1)
model.compile(
    optimizer=optimizers.Adam(learning_rate=alpha),
    loss=losses.sparse_categorical_crossentropy,
    metrics=[metrics.sparse_categorical_accuracy]
)

# 7)	设置合理训练次数，保证测试集准确率高于80%
n_epochs = 2
model.fit(x_train,
          y_train,
          batch_size=batch_size, epochs=n_epochs,verbose=1,
          callbacks=[tb_callback, early_stop],
          validation_data=(x_val, y_val)
          )

print('测试集：')
model.evaluate(x_test, y_test, verbose=1)
