#coding=utf-8
# 针对mnist数据集的 采用TF2.0 的三层神经网络
# 函数式创建构建
# 模型自动更新

import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics
import time

time_start = time.time()#用作计时

(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()  # 首次运行要等待下载
# 数据保存在C:\Users\Carson\.keras\datasets


# cutSize = 5000  #缩减训练集的大小
# x_train = x_train[:cutSize, :, :]
# y_train = y_train[:cutSize]

print(x_train.shape)
print(y_train.shape)

x_train = x_train / 255.0
x_test = x_test / 255.0
x_train = x_train.reshape(x_train.shape[0], -1).astype(np.float32)  # 转成60000*784的矩阵数据
x_test = x_test.reshape(x_test.shape[0], -1).astype(np.float32)  # 转成60000*784的矩阵数据
y_train = tf.one_hot(y_train, depth=10)
y_test = tf.one_hot(y_test, depth=10)

train_size, num_feas = x_train.shape    # (60000, 784)
labels = 10               # 类别个数

# 超参数 ################
learning_rate = 0.01      # 设置学习速率
training_epochs = 10      # 设置训练次数
batch_size = 300          # 每次只训练300
#################################################3
tf.random.set_seed(2021)  # 设置随机种子

train_iter = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_data = train_iter.shuffle(buffer_size=train_size).batch(batch_size=batch_size)


# 第一种建立模型方法，函数式
inputs = keras.Input(shape=(num_feas), name="input")
x = layers.Dense(128, activation="sigmoid", name="hidden")(inputs)
outputs = layers.Dense(labels, activation="softmax", name="output")(x)
model = keras.Model(inputs=inputs, outputs=outputs)


# 第二种建立模型方法，Sequential
# model = keras.Sequential([
#     keras.layers.Dense(128, input_shape=[num_feas, ], activation='sigmoid', name="hidden"),  # 120*84
#     keras.layers.Dense(labels, activation='softmax')  # 84*10
# ])


####################
model.summary()
model.compile(optimizer=tf.optimizers.Adam(learning_rate=learning_rate),
              # optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
              # loss = tf.losses.categorical_crossentropy,
              # metrics=['accuracy']
              loss=tf.losses.CategoricalCrossentropy(),
              metrics=['accuracy']
              # metrics=[keras.metrics.SparseCategoricalAccuracy()]
              )

history = model.fit(train_data, epochs=training_epochs)

# 以下这个代码也可以，
#history = model.fit(x_train, y_train, batch_size=batch_size, epochs=training_epochs, shuffle=True)

plt.plot(history.history['loss']) # 画出损失函数
model.save('./data/course8_1_model.h5')

time_end = time.time()
print('训练用时：', time_end - time_start, 's')

print(model.evaluate(x_test, y_test, batch_size=2000))  # 输出测试集（lost, accuracy)
print(model.evaluate(x_train, y_train, batch_size=2000))# 输出训练集（lost, accuracy)
plt.show()
print('done')