#coding=utf-8
# 针对 MNIST, 现代的Lenet-5结构，采用TF2.0的建模方式方法,
# 3个卷积层
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'

import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras import Sequential, layers, losses, optimizers, datasets
import datetime
import matplotlib.pyplot as plt
import time

time_start = time.time()


# 加载数据集 ############################
np.random.seed(2021)
batch_size = 200

(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
# x_train.shape = (50000, 32, 32, 3)       y_train.shape=(50000, 1)
# x_test.shape  = (10000, 32, 32, 3)       y_test.shape=(10000, 1)

# y_train = tf.one_hot(y_train, depth=10)  # one_hot 直接独热编码会出错!!, 要采用下面方法：

# y_train = tf.squeeze(y_train, axis=1)    # 删除1-维度
# y_train = tf.one_hot(y_train, depth=10)  # 然后才独热编码
# y_test = tf.squeeze(y_test, axis=1)    # 删除1-维度
# y_test = tf.one_hot(y_test, depth=10)  # 然后才独热编码

# 生成数据库 ############
def preprocess(x, y): # 数据预处理函数
    x = tf.cast(x, dtype=tf.float32) / 255.
    # y = tf.one_hot(y, depth=10)  # one_hot 直接独热编码会出错!!,成变成（50000, 1, 10)
    return x, y

train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_db = train_db.shuffle(10000)  # 打乱训练集样本
train_db = train_db.batch(batch_size)
train_db = train_db.map(preprocess)

test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_db = test_db.batch(1000)
test_db = test_db.map(preprocess)


_, *input_shape = x_train.shape ##获得每张图像的尺寸(32,32,3)

# 创建模型 ############################
model = Sequential([
    # 卷积层1
    layers.Conv2D(6, 5, input_shape=input_shape),  # 使用6个5*5的卷积核对单通道32*32的图片进行卷积，结果得到6个28*28的特征图
    layers.ReLU(),  # ReLU激活函数
    layers.MaxPooling2D(pool_size=2, strides=2),  # 对28*28的特征图进行2*2最大池化，得到14*14的特征图

    # 卷积层2
    layers.Conv2D(16, 5),  # 使用16个5*5的卷积核对6通道14*14的图片进行卷积，结果得到16个10*10的特征图
    layers.ReLU(),  # ReLU激活函数
    layers.MaxPooling2D(pool_size=2, strides=2),  # 对10*10的特征图进行2*2最大池化，得到5*5的特征图

    # 卷积层3
    layers.Conv2D(120, 5),
    layers.ReLU(),

    # 将 (None, 1, 1, 120) 的下采样图片拉伸成 (None, 120) 的形状
    layers.Flatten(),
    # 全连接层1
    layers.Dense(84, activation='relu'),  # 120*84
    # 全连接层2
    layers.Dense(10, activation='softmax')  # 84*10
])
model.summary()
# input()
# 模型配置
model.compile(optimizer=optimizers.SGD(0.01),
              loss=losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

# 训练 #######################
training_epoches = 30
history = model.fit(train_db, epochs=training_epoches,
                    verbose=1, shuffle=False)


# 损失下降曲线  #############
plt.plot(history.history['loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')


# 测试 结果集 ##################
model.evaluate(test_db)
# print(model.evaluate(train_db))

model.save('./data/course9_1_model.h5')  # 保存模型
# model.save('./content/drive/MyDrive/tf/lenet5_model.h5')

time_end = time.time()
print('time cost', time_end - time_start, 's')
plt.show()
