"""
内容：搭建卷积神经网络模型
日期：2020年6月30日
作者：Howie
"""
# 调用要使用的包
from keras.models import Sequential, load_model
from keras.layers import Dense, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.utils import plot_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard, EarlyStopping
import numpy as np
import os
import matplotlib.pyplot as plt

# 定义随机种子
np.random.seed(3)

# 预设
DATASET_DIR = '../dataset/tri_and_rect/'  # 数据集目录
HIST_PATH = './logs/Demo3_Acc-Loss Curve.pdf'
MODEL_PATH = './infer_model/'  # 模型保存路径
GRAPH_SAVE_PATH = './graph/'  # TensorBoard可视化
tb_hist = TensorBoard(
    log_dir=GRAPH_SAVE_PATH,
    histogram_freq=0,
    write_graph=True,
    write_images=True)
early_stopping = EarlyStopping(  # 设置早停，监控每个训练周期的验证精度
    monitor='val_accuracy',
    patience=8
)
CALL_BACK_FUNC = [tb_hist, early_stopping]  # 回调函数
N_CLASSES = 1  # 二元分类问题
N_TRAIN_SAMPLES = 30  # 训练样本数
N_TEST_SAMPLES = 10  # 验证样本数
IMG_WIDTH = 128  # 图像宽
IMG_HEIGHT = 128  # 图像高
IMG_CHANNELS = 3  # 图像信道数
IMG_DIM = (IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
AUG_MULTIPLE = 100 # 数据集增强倍数

# 设置超参
BATCH_SIZE = 5  # 批次数量
EPOCHS = 100  # 周期
conv_filter_dim = {
    'conv_filter_1': [(3, 3), 32],
    'conv_filter_2': [(3, 3), 64]
}  # 卷积层
pool_filter_dim = {
    'pool_filter_1': (2, 2),
    'pool_filter_2': (2, 2)
}  # 最大池化层
dense_units = {
    'dense_dim_1': 128,
    'dense_dim_2': N_CLASSES
}  # Dense层

# 定义问题
"""
问题类型：二元分类问题
输入：三角形与矩形图像
输出：体现三角形和矩形概率的向量
"""


def hist_plot(hist):
    """
    # 可视化训练过程
    :param hist: history对象
    :return:
    """
    fig, loss_ax = plt.subplots()
    acc_ax = loss_ax.twinx()
    # 每个训练周期的训练误差与验证误差
    loss_ax.plot(hist.history['loss'], 'y', label='train loss')
    loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')
    # 每个训练周期的训练精度与验证精度
    acc_ax.plot(hist.history['accuracy'], 'b', label='train acc')
    acc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')
    # 横轴与纵轴
    loss_ax.set_xlabel('epoch')
    loss_ax.set_ylabel('loss')
    acc_ax.set_ylabel('accuracy')
    # 标签
    loss_ax.legend(loc='upper left')
    acc_ax.legend(loc='lower left')
    # 保存
    plt.savefig(HIST_PATH)
    # 展示
    plt.show()


def load_dataset(dataset_dir):
    """
    # 准备数据并生成数据集
    :param dataset_dir: 数据集路径
    :return: 划分好的训练集和测试集
    """
    # 生成数据
    print("Training set: ", end="")
    train_datagen = ImageDataGenerator(rescale=1. / 255)  # 生成对象
    train_generator = train_datagen.flow_from_directory(  # 生成训练生成器
        directory=os.path.join(dataset_dir, 'train'),
        target_size=(IMG_HEIGHT, IMG_WIDTH),  # 权重图像大小
        batch_size=BATCH_SIZE,
        class_mode='binary'  # 指定分类方式，返回1D的二进制标签
    )
    print("Test set: ", end="")
    test_datagen = ImageDataGenerator(rescale=1. / 255)  # 生成对象
    test_generator = test_datagen.flow_from_directory(  # 生成验证生成器
        directory=os.path.join(dataset_dir, 'test'),
        target_size=(IMG_HEIGHT, IMG_WIDTH),  # 权重图像大小
        batch_size=BATCH_SIZE,
        class_mode='binary'  # 指定分类方式，返回1D的二进制标签
    )
    print("Training set after Augmentation: ", end="")
    data_aug_gen = ImageDataGenerator(rescale=1. / 255,
                                      rotation_range=10,
                                      width_shift_range=0.2,
                                      height_shift_range=0.2,
                                      shear_range=0.7,
                                      zoom_range=[0.9, 2.2],
                                      horizontal_flip=True,
                                      vertical_flip=True,
                                      fill_mode='nearest')  # 生成对象
    train_aug_generator = data_aug_gen.flow_from_directory(
        directory=os.path.join(dataset_dir, 'train'),
        target_size=(IMG_HEIGHT, IMG_WIDTH),  # 权重图像大小
        batch_size=BATCH_SIZE,
        class_mode='binary'  # 指定分类方式，返回1D的二进制标签
    )
    return train_generator, test_generator, train_aug_generator


def model_building():
    """
    # 搭建模型
    :return: 模型
    """
    model = Sequential()
    # 卷积层
    model.add(Conv2D(filters=conv_filter_dim['conv_filter_1'][1],
                     kernel_size=conv_filter_dim['conv_filter_1'][0],
                     input_shape=IMG_DIM,
                     activation='relu'))
    # 最大池化层
    model.add(MaxPooling2D(pool_size=pool_filter_dim['pool_filter_1']))
    # 卷积层
    model.add(Conv2D(filters=conv_filter_dim['conv_filter_2'][1],
                     kernel_size=conv_filter_dim['conv_filter_2'][0],
                     activation='relu'))
    # 最大池化层
    model.add(MaxPooling2D(pool_size=pool_filter_dim['pool_filter_2']))
    # Flatten层
    model.add(Flatten())
    # Dense层
    model.add(Dense(units=dense_units['dense_dim_1'], activation='relu'))
    # Dense层
    model.add(Dense(units=dense_units['dense_dim_2'], activation='sigmoid'))
    # 模型结构可视化
    plot_model(
        model=model,
        to_file='./logs/Demo3_model_2.pdf',
        show_shapes=True)
    # 保存已训练模型
    model.save(os.path.join(MODEL_PATH, 'shape_mlp_model.h5'))
    return model


def model_training():
    """
    # 训练模型
    :return:
    """
    train_generator, test_generator, train_aug_generator = load_dataset(
        DATASET_DIR)
    model = model_building()
    # 设置模型训练过程
    model.compile(
        loss='binary_crossentropy',
        optimizer='adam',
        metrics=['accuracy'])
    # 训练模型
    hist = model.fit_generator(
        generator=train_aug_generator,  # 提供训练数据集的生成器
        steps_per_epoch=N_TRAIN_SAMPLES // BATCH_SIZE * AUG_MULTIPLE,  # 每个训练周期的阶段数量
        epochs=EPOCHS,  # 针对全部训练数据集的训练反复次数
        validation_data=test_generator,  # 提供验证数据集的生成器
        validation_steps=N_TEST_SAMPLES // BATCH_SIZE  # 在每次训练周期结束时的验证阶段数量
    )
    # 评价模型
    print("----Evaluate----")
    hist_plot(hist)  # 可视化训练过程
    scores = model.evaluate_generator(test_generator, steps=5)
    print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))


def model_application(infer_model_dir):
    """
    # 使用模型
    :param infer_model_dir: 预测模型保存目录
    :return:
    """
    print("----Predict----")
    _, test_generator, _ = load_dataset(DATASET_DIR)
    model = load_model(os.path.join(infer_model_dir, 'shape_mlp_model.h5'))
    output = model.predict_generator(
        test_generator, steps=N_TEST_SAMPLES // BATCH_SIZE)
    print(test_generator.class_indices)
    print(output)


if __name__ == '__main__':
    model_training()
    # 使用模型
    model_application(MODEL_PATH)
