#coding=utf-8
# 针对 MNIST, 现代的Lenet-5结构，采用TF2.0的建模方式方法,
# 3个卷积层
# 增加TensorBoard，增加模型保存
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'

import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras import Sequential, layers, losses, optimizers, datasets
import datetime
import matplotlib.pyplot as plt
import time

time_start = time.time()

# 数据预处理函数
def preprocess(x, y):
    x = tf.cast(x, dtype=tf.float32) / 255.
    # y = tf.one_hot(y, depth=10)  # one_hot 编码
    return x, y


# 加载数据集 ############
np.random.seed(2021)
batch_size = 200

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()

# 生成数据库 ############
train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_db = train_db.shuffle(10000)  # 打乱训练集样本
train_db = train_db.batch(batch_size)
train_db = train_db.map(preprocess)

test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_db = test_db.batch(1000)
test_db = test_db.map(preprocess)


_, *input_shape = x_train.shape ##获得每张图像的尺寸(32,32,3)

# 创建模型 ############################
model = keras.Sequential([
    # 卷积层1
    keras.layers.Conv2D(6, 5, input_shape=input_shape),  # 使用6个5*5的卷积核对单通道32*32的图片进行卷积，结果得到6个28*28的特征图
    keras.layers.ReLU(),  # ReLU激活函数
    keras.layers.MaxPooling2D(pool_size=2, strides=2),  # 对28*28的特征图进行2*2最大池化，得到14*14的特征图

    # 卷积层2
    keras.layers.Conv2D(16, 5),  # 使用16个5*5的卷积核对6通道14*14的图片进行卷积，结果得到16个10*10的特征图
    keras.layers.ReLU(),  # ReLU激活函数
    keras.layers.MaxPooling2D(pool_size=2, strides=2),  # 对10*10的特征图进行2*2最大池化，得到5*5的特征图

    # 卷积层3
    keras.layers.Conv2D(120, 5),
    keras.layers.ReLU(),

    # 将 (None, 1, 1, 120) 的下采样图片拉伸成 (None, 120) 的形状
    keras.layers.Flatten(),
    # 全连接层1
    keras.layers.Dense(84, activation='relu'),  # 120*84
    # 全连接层2
    keras.layers.Dense(10, activation='softmax')  # 84*10
])
model.summary()

# 模型加载
# model.load_weights('./logs./Lenet5/model_ep-004_acc-0.301040.h5')
# model.load_weights('./content/drive/MyDrive/tf/lenet5_model.h5')

# 模型配置
model.compile(optimizer=keras.optimizers.SGD(0.01),
              loss=keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

# 调用TensorBoard  #####################
parent_dir = './logs'
sub_dir = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
log_dir= os.path.join(parent_dir, sub_dir)
print(log_dir)
if not os.path.exists(parent_dir):  # 创建日志目录
    os.mkdir(parent_dir)
if not os.path.exists(log_dir):     # 创建日志目录
    os.mkdir(log_dir)

tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir,
                                                   histogram_freq=1,
                                                   profile_batch=100000)
## 笔记本显存不够，要加 profile_batch=100000000，否则异常抛出

# 加上模型定期保存 #####################
model_name = 'model_ep-{epoch:03d}_acc-{accuracy:03f}.h5'
model_dir = "./logs/course9_2"
model_path = os.path.join(model_dir, model_name)
print(model_dir)
if not os.path.exists(model_dir):
    os.mkdir(model_dir)


checkpoint = keras.callbacks.ModelCheckpoint(
    filepath=model_path,
    monitor='accuracy',         #如果有验证集，选用val_accuracy
    verbose=1,
    save_weights_only=True,     #仅保存权重
    save_best_only=True,        #当结果优于上一个时才保存
    mode='max',                 #对应monitor指标的评价方向
    #save_freq=4
    save_freq='epoch',
    period=4
)                   #每训练4个epoch保存一次
early_stop = keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3)

# 训练 ################
training_epoches = 100
history = model.fit(train_db, epochs=training_epoches,
                    verbose=1, shuffle=False,
                    callbacks=[tensorboard_callback, checkpoint, early_stop])

# 损失下降曲线
plt.plot(history.history['loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')


# 测试
model.evaluate(test_db)
# print(model.evaluate(train_db))

model.save('./data/course9_2.h5')
# model.save('./content/drive/MyDrive/tf/lenet5_model.h5')

time_end = time.time()
print('time cost', time_end - time_start, 's')
plt.show()
