import sys
import os

sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import argparse
from datetime import datetime
from pprint import pprint
from sklearn.metrics import classification_report
from tensorflow.keras.optimizers import Adadelta, Adam
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint
import json
import shutil

from src.image_classification import data_analysis, data_processor, model_generator

##############################################
# 超参数
learning_rate = 1e-1  # 初始化学习率
min_learning_rate = 1e-5  # 最低学习率
learning_rate_reduction_factor = 0.5  # 学习率降低系数
patience = 3  # 当损失达到峰值时，在降低学习率之前要等待多少个epochs
verbose = 1  # 控制是否繁杂输出
image_size = (100, 100)  # 所用图像的(宽度,高度)
input_shape = image_size + (3,)  # 受训模型的预期输入形状；由于图像尺寸是100 x 100的RGB图像，因此维度为(宽度,高度,3)

use_label_file = False  # true: 从一个文件中加载标签名称；该文件应包含所使用的标签名称，每个标签在一个单独的行中
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))  # 相对路径
base_dir = '/'.join(base_dir.split('\\'))
label_file = base_dir + '/src/image_classification/labels.txt'
test_dir = base_dir + '/data/Testing'
train_dir = base_dir + '/data/Training'
retrain_dir = base_dir + '/data/Retraining'  # 重新加入训练的图片文件夹
retest_dir = base_dir + '/data/Retesting'  # 重新加入测试的图片文件夹
output_dir = base_dir + '/src/image_classification/output_files'  # 保存输出文件的根文件夹；这些文件将在output_dir下


##############################################
def get_num_classes(directory: str) -> []:
    """
    根据文件夹返回训练的所有的训练标签

    :param directory: 文件夹路径
    :return: 标签列表
    """
    raw_labels = os.listdir(directory)
    labels = []
    for label in raw_labels:
        if label.startswith('.'):
            continue
        labels.append(label)
    return labels


if not os.path.exists(output_dir):
    os.makedirs(output_dir)
# 如果想对水果类的一个子集而不是全部进行训练，可以将use_label_file设置为true，并在label_file中放入我们想训练的类，每行一个。
if not os.path.exists(base_dir + '/data/all_labels.txt'):
    labels = get_num_classes(train_dir)
    with open(base_dir + '/data/all_labels.txt', mode='w', encoding='utf-8') as f:
        for label in labels:
            f.write('{}\n'.format(label))
else:
    labels = []
    with open(base_dir + '/data/all_labels.txt', mode='r', encoding='utf-8') as f:
        for line in f.readlines():
            labels.append(line.strip())
    now_labels = get_num_classes(train_dir)
    difference = set(now_labels).difference(set(labels))
    if difference:
        with open(base_dir + '/data/key_val.json', mode='r', encoding='utf-8') as f:
            key_val = json.loads(f.read())
        string = ''
        for d in difference:
            try:
                string += '{}:{}\n'.format(d, key_val[d])
            except KeyError:
                raise Exception('key_val.json未更新！<{}>找不到！'.format(d))
        raise Exception('上次的训练类别与本次不同，请确认！\n不同的标签:\n{}\n确认后请删除all_labels.txt再重新运行程序（建议备份一下，以免之前的模型无法运行）'.format(string))
if use_label_file:
    with open(label_file, "r") as f:
        labels = [x.strip() for x in f.readlines()]
num_classes = len(labels)


def train_and_evaluate_model(model, input_dir='', args=None, name="", epochs=25, batch_size=50, verbose=verbose):
    """
    该方法执行数据设置、培训和测试模型以及绘制结果的所有步骤

    :param input_dir: 如果需要继续训练，则传入原来保存模型的文件夹路径
    :param args: 如果需要通过命令行运行程序，则需要传入命令行参数
    :param model: 模型：该模型是任何可训练模型；类的输入形状和输出数量取决于使用的数据集，比如:输入为100x100 RGB图像，输出为具有118个概率的softmax层
    :param name: 分类报告名称：该名称用于保存分类报告，其中包含模型的f1分数、显示损失和准确性的曲线图以及混淆矩阵
    :param epochs: 训练次数
    :param batch_size: 批处理大小：批量大小用于确定一次通过网络的图像数量，每个历元的步数由此导出为（集合中的图像总数//批量大小）+1
    :param verbose: 是否繁杂输出
    :return: 无
    """
    print(model.summary())
    model_out_dir = output_dir + '/' + name
    if not os.path.exists(model_out_dir):
        os.makedirs(model_out_dir)
    if args is not None:
        model.load_weights(output_dir + '/' + args.input_dir + "/model.h5")
        train_gen, valid_gen, test_gen = data_processor.build_data_generators(retrain_dir, retest_dir, labels=labels,
                                                                              image_size=image_size,
                                                                              batch_size=batch_size)
        epochs = args.epochs
        batch_size = args.batch_size
        _, _, origin_test_gen = data_processor.build_data_generators(train_dir, test_dir, labels=labels,
                                                                     image_size=image_size,
                                                                     batch_size=batch_size)
    else:
        if input_dir != '':
            model.load_weights(output_dir + '/' + input_dir + "/model.h5")
        train_gen, valid_gen, test_gen = data_processor.build_data_generators(train_dir, test_dir, labels=labels,
                                                                              image_size=image_size,
                                                                              batch_size=batch_size)
    optimizer = Adadelta(learning_rate=learning_rate)
    # optimizer = Adam(learning_rate=learning_rate)
    model.compile(optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"])
    learning_rate_reduction = ReduceLROnPlateau(monitor='loss', patience=patience, verbose=verbose,
                                                factor=learning_rate_reduction_factor, min_lr=min_learning_rate)
    save_model = ModelCheckpoint(filepath=model_out_dir + "/model.h5", monitor='loss', verbose=verbose,
                                 save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch')

    history = model.fit(train_gen,
                        epochs=epochs,
                        validation_data=valid_gen,
                        steps_per_epoch=(train_gen.n // batch_size) + 1,
                        verbose=verbose,
                        callbacks=[learning_rate_reduction, save_model])

    model.load_weights(model_out_dir + "/model.h5")

    train_gen.reset()  # 把批处理大小改为0
    loss_t, accuracy_t = model.evaluate(train_gen, steps=(train_gen.n // batch_size) + 1, verbose=verbose)
    loss, accuracy = model.evaluate(test_gen, steps=(test_gen.n // batch_size) + 1, verbose=verbose)
    print("Train: accuracy = %f  ;  loss_v = %f" % (accuracy_t, loss_t))
    print("Test: accuracy = %f  ;  loss_v = %f" % (accuracy, loss))
    data_analysis.plot_model_history(history, out_path=model_out_dir)
    test_gen.reset()  # 把批处理大小改为0
    y_pred = model.predict(test_gen, steps=(test_gen.n // batch_size) + 1, verbose=verbose)
    y_true = test_gen.classes[test_gen.index_array]
    if args is not None:
        data_analysis.plot_confusion_matrix(y_true, y_pred.argmax(axis=-1), get_num_classes(retrain_dir),
                                            out_path=model_out_dir)
        origin_test_gen.reset()  # 把批处理大小改为0
        y_pred_origin = model.predict(origin_test_gen, steps=(origin_test_gen.n // batch_size) + 1, verbose=verbose)
        y_true_origin = origin_test_gen.classes[origin_test_gen.index_array]
        class_report = classification_report(y_true_origin, y_pred_origin.argmax(axis=-1), target_names=labels)
        with open(model_out_dir + "/classification_report_origin.txt", "w") as text_file:
            text_file.write("%s" % class_report)
        class_report = classification_report(y_true, y_pred.argmax(axis=-1), target_names=get_num_classes(retrain_dir))
        with open(model_out_dir + "/classification_report.txt", "w") as text_file:
            text_file.write("%s" % class_report)
    else:
        shutil.copyfile(base_dir + '/data/all_labels.txt', model_out_dir + "/all_labels.txt")
        shutil.copyfile(base_dir + '/data/key_val.json', model_out_dir + "/key_val.json")
        class_report = classification_report(y_true, y_pred.argmax(axis=-1), target_names=labels)
        with open(model_out_dir + "/classification_report.txt", "w") as text_file:
            text_file.write("%s" % class_report)
        data_analysis.plot_confusion_matrix(y_true, y_pred.argmax(axis=-1), labels, out_path=model_out_dir)


def parse():
    """
    解析命令行参数。使用该命令行对已经训练好的模型进行进一步训练
    -input_dir：保存的模型所在文件夹，如：rnn_model-2021-10-03-17-50-06/

    :return: 参数列表
    """
    parser = argparse.ArgumentParser(description="use trained model to predict new pictures")
    parser.add_argument('-input_dir', type=str, default='None',
                        help='directory of the saved model, for example:rnn_model-2021-10-03-17-50-06')
    parser.add_argument('-epochs', type=int, default='25', help='training epochs')
    parser.add_argument('-batch_size', type=int, default='50', help='training batch_size')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    # pycharm启动，直接运行即可
    name = "cnn-自己拍摄的数据-11种-normalization" + datetime.now().strftime("-%Y-%m-%d-%H-%M-%S")
    pprint(labels)
    print(num_classes)
    model = model_generator.network_cnn_normalization(input_shape=input_shape, num_classes=300)  # num_classes=300代表最多可以分300类
    train_and_evaluate_model(model, name=name, epochs=5, batch_size=41)

    # # 命令行启动
    # name = "rnn_model" + datetime.now().strftime("-%Y-%m-%d-%H-%M-%S")
    # args = parse()
    # model = model_generator.network_rnn(input_shape=input_shape, num_classes=num_classes)
    # train_and_evaluate_model(model, args, name)
