"""
Created on 2021/12/15
note  : Train and validation with VGG19 model
author: Yuze Xuan, Xiaohu Hao, Xuan Wang, Sida Wang
"""

# * 导入包
import copy
import glob
import random
from typing import List, Tuple

import cv2
import matplotlib.pyplot as plt
import numpy as np
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, History, TensorBoard

from config import *
from vgg19 import vgg19


# * 获取数据文件信息
def get_img_info(path: str, image_ext_ls: List[str]) -> Tuple[List[str], List[str], int]:
    """ 获取文件夹中所有图像的路径和标签.
    :param path: 训练数据集路径.
    :param image_ext_ls: 图像格式信息列表.
    :return: 元组：包含图像路径列表、图像类别标签列表、类别数量.
    """
    img_path_ls, lb_ls, path_ls_temp = [], [], []
    sub_dirs = os.listdir(path)  # 数据集子路径
    if '.DS_Store' in sub_dirs:  # 去除Apple临时文件
        sub_dirs.remove('.DS_Store')
    sub_dirs.sort()
    print(sub_dirs)
    for index in range(len(sub_dirs)):
        sub_dir = os.path.join(path, sub_dirs[index])
        # sys.stdout.flush()
        print("dir --> label : {} --> {}".format(sub_dir, index))
        path_ls_temp = glob.glob("{}/*{}".format(sub_dir, image_ext_ls))
        img_path_ls += path_ls_temp
        img_num_temp = len(path_ls_temp)
        lb_ls += copy.deepcopy([index] * img_num_temp)
    return img_path_ls, lb_ls, len(sub_dirs)


image_path_ls, label_ls, classes_num = get_img_info(os.path.join(DATASET_BASE_PATH, 'train_and_val'), IMAGE_EXT)

# 采用相同的随机数种子打乱图像路径列表和标签列表
SEED = random.random()
random.seed(SEED)
random.shuffle(image_path_ls)
random.seed(SEED)
random.shuffle(label_ls)
labels_ls: List[np.ndarray] = list(keras.utils.to_categorical(label_ls, classes_num))


# * 图像增强及图像生成器
def image_load_enhancer(img_path: str, **kwargs):
    """ 图像读取、图像增强.
    :param img_path: 图像路径.
    :param kwargs: 图像增强关键字实参字典.
    :return: 增强后的图像数据.
    """
    img: np.ndarray = cv2.imread(img_path)
    img = cv2.resize(img, dsize=IMAGE_SIZE, interpolation=cv2.INTER_AREA)
    img = img.astype("float32")
    img /= 255.

    if random.random() < kwargs.get('crop_probability', 0):
        x_crop = random.randint(0, kwargs.get('crop_fix_range', 0))
        y_crop = random.randint(0, kwargs.get('crop_fix_range', 0))
        img = img[x_crop:, y_crop:, :]
        img = cv2.resize(img, dsize=IMAGE_SIZE, interpolation=cv2.INTER_AREA)
    if random.random() < kwargs.get('flip_probability', 0):
        flip_type = np.random.choice([1, 0, -1], p=[kwargs['horizontal_flip'],
                                                    kwargs['vertical_flip'],
                                                    kwargs['diagonal_flip']])  # 分别对应[水平, 垂直, 对角]镜像
        img = cv2.flip(img, flip_type, dst=None)
        img = cv2.resize(img, dsize=IMAGE_SIZE, interpolation=cv2.INTER_AREA)
    return np.array(img)


def image_generator(img_paths: List[str], labels: List[np.ndarray], batch_size: int, **kwargs):
    """ 图像生成器.
    :param img_paths: 图像路径列表.
    :param labels: 图像标签列表.
    :param batch_size: 批处理图像数.
    :param kwargs: 图像增强关键字实参字典.
    :return: 图像生成器.
    """
    start = 0
    while start < len(img_paths):
        stop = start + batch_size
        if stop > len(img_paths):
            stop = len(img_paths)
        curr_images, curr_labels = [], []
        for idx in range(start, stop):
            curr_images.append(image_load_enhancer(img_paths[idx], **kwargs))
            curr_labels.append(labels[idx])
        yield np.array(curr_images), np.array(curr_labels)
        if start + batch_size < len(img_paths):
            start += batch_size
        else:
            seed = random.random()
            random.seed(seed)
            random.shuffle(img_paths)
            random.seed(seed)
            random.shuffle(labels)
            start = 0


# * 数据集划分
# 数据集划分训练集与验证集
train_num = int(TRAIN_RATIO * len(image_path_ls))  # 训练集图像数
train_x, train_y = image_path_ls[:train_num], labels_ls[:train_num]
val_x, val_y = image_path_ls[train_num:], labels_ls[train_num:]

# * 建立模型
# 建立模型实体
model = vgg19(classes_num)


# * 训练及验证
def train_and_val(**kwargs) -> History:
    """ 模型训练和验证.
    :param kwargs: 训练超参数关键字实参.
    :return: tensorflow.keras.callbacks.History对象，其中History.history属性记录了模型训练的损失和精度等.
    """
    # 模型学习过程配置
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(learning_rate=kwargs['lr'], decay=0.),
                  metrics=['acc'])

    tb_callback = TensorBoard(log_dir=os.path.join(SAVE_BASE_PATH, "tensorboard"),
                              histogram_freq=0, write_graph=True,
                              write_images=True)  # TensorBoard CallBack对象
    model_checkpoint = ModelCheckpoint(
        filepath=os.path.join(SAVE_BASE_PATH, 'checkpoints', "./vgg_{epoch:02d}-{val_loss:.2f}-{val_acc:.2f}.hdf5"),
        verbose=0, save_best_only=True)  # ModelCheckpoint CallBack对象
    early_stopping = EarlyStopping(monitor='val_acc', patience=kwargs['patienceEpoch'])  # EarlyStopping CallBack对象

    # 训练信息输出
    print("Train and Validation Information:\n", "-" * 50)
    print("class num : {}, train num : {}, validation num : {}".format(classes_num, len(train_x), len(val_x)))
    print("batch : {}, epochs : {}\n".format(kwargs['batch_size'], kwargs['epochs']), "-" * 50)

    history = model.fit(x=image_generator(train_x, train_y, kwargs['batch_size'], **IMAGE_ENHANCER_KWARGS),
                        epochs=kwargs['epochs'], steps_per_epoch=int(len(train_x) / kwargs['batch_size']),
                        validation_data=image_generator(val_x, val_y, kwargs['batch_size'], **IMAGE_ENHANCER_KWARGS),
                        validation_steps=int(len(val_x) / kwargs['batch_size']),
                        callbacks=[early_stopping, tb_callback, model_checkpoint])
    return history


train_history = train_and_val(**TRAIN_KWARGS)


# * 训练过程分析
def plot_train_history(history: History) -> None:
    """ 训练过程acc和loss变化绘制.
    :param history: tensorflow.keras.callbacks.History对象.
    :return: None
    """
    plt.figure()  # 新建一张图绘制acc变化曲线
    plt.plot(history.history['acc'], label='training acc')
    plt.plot(history.history['val_acc'], label='val acc')
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(loc='lower right')
    plt.savefig(os.path.join(SAVE_BASE_PATH, 'figures', 'acc_curve.png'))
    plt.show()
    plt.figure()  # 新建一张图绘制loss变化曲线
    plt.plot(history.history['loss'], label='training loss')
    plt.plot(history.history['val_loss'], label='val loss')
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(loc='upper right')
    plt.savefig(os.path.join(SAVE_BASE_PATH, 'figures', 'loss_curve.png'))
    plt.show()


plot_train_history(train_history)
