# -*- coding: utf-8 -*-
"""
******* 文档说明 ******
模型训练

# 当前项目: Cifar10-Classification
# 创建时间: 2019/6/22 13:41
# 开发作者: vincent
# 创建平台: PyCharm Community Edition
# 版    本: V1.0
"""
import sys
import os
import json
import time
import tensorflow as tf
import importlib
from sklearn.metrics import classification_report, confusion_matrix

from CODE import argument_init         # 参数初始化
from CODE import val_data, TrainData   # 导入训练数据、验证数据


# 打印模型信息
def model_info(model, model_structure=None, logger=None):
    """
    :param model:   模型
    :param model_structure:   模型结构保存文件， 若为 None 则不保存
    :param logger:  日志对象，若为 None 则直接打印
    :return:
    """
    def _print(string, level='INFO'):
        if logger is None:
            print('---> ', string)
        else:
            if level == 'INFO':
                logger.info(string)
            else:
                logger.debug(string)

    # 打印模型结构
    model.summary(line_length=150, print_fn=_print)

    # 打印训练参数
    _print('Trainable params:  ')
    for i, layer in enumerate(model.layers):
        if 'trainable' in layer.get_config() and layer.get_config()['trainable'] and len(layer.weights) > 0:
            _print('       '.join(['{}  {}'.format(w_i.name, w_i.shape) for w_i in layer.weights]))
    _print('_' * 98)

    # 保存模型结构
    if model_structure is not None:
        with open(model_structure, "w", encoding='utf-8') as f_json:
            json.dump(json.loads(model.to_json()), f_json, indent=4, sort_keys=True, ensure_ascii=False)


# Tensorboard 监测
def tensorboard_monitor(accuracy, loss):
    # 训练过程参数 变化跟踪[验证集准确率、损失值]
    with tf.name_scope('Evaluation'):
        # tf.summary.scalar('learning_rate', learning_rate)
        tf.summary.scalar('loss', loss)
        tf.summary.scalar('accuracy', accuracy)

    # 合并所有的summary
    merged = tf.summary.merge_all()

    return merged


# 模型训练主程序
def main(argv):
    # 参数初始化
    args = argument_init(argv)

    # GPU 占用率设置  Session Config
    session_config = tf.ConfigProto(
        allow_soft_placement=args.session__allow_soft_placement,
        log_device_placement=args.session__log_device_placement)
    # Allowing GPU memory growth
    session_config.gpu_options.allow_growth = args.session__allow_growth
    # only allocate 80% of the total memory of each GPU
    session_config.gpu_options.per_process_gpu_memory_fraction = args.session__per_process_gpu_memory_fraction

    with tf.Session(config=session_config).as_default() as sess:
        # 模型代码导入
        try:
            network = importlib.import_module(args.model__name)
        except Exception as error:  # 导入失败，直接退出
            args.Logger.error("Import Module Failure:  {}".format(error))
            raise Exception("Import Module Failure:  {}".format(error))

        # 创建模型
        model = network.inference(input_shape=args.model__input_image_shape,
                                  output_shape=args.model__output_image_label,
                                  drop_out=args.train__dropout)

        # 打印模型结构
        model_info(model, model_structure=args.model_structure, logger=args.Logger)

        #  ##############################################################################
        # 查看模型结构
        if args.train__pattern == "Check":
            args.Logger.info("Check The TF Keras Model [{}].".format(args.model__name))
            # 初始化所有变量
            sess.run(tf.global_variables_initializer())
            # 保存模型图结构  tensorboard --logdir=log_path
            tf.summary.FileWriter(args.Tensorboard_log_path, sess.graph)

        #  ##############################################################################
        # 训练模型
        else:
            # #################################  初始化模型参数
            if args.train__pretrained_model is not None:
                args.Logger.info("Fine Tuning From :  {}".format(args.train__pretrained_model))
                model.load_weights(args.train__pretrained_model, by_name=True)

                # # 手动为某些 Tensor 赋值   # ResNet50参数
                # print(model.get_layer("bn_conv1").get_weights())
                # weight_temp = model.get_layer("bn_conv1").get_weights()
                # weight_temp[0] += 900
                # model.get_layer("bn_conv1").set_weights(weight_temp)
                # # 获取模型参数
                # print(model.get_layer("bn_conv1").get_weights()[0])

                # # 手动为某些 Tensor 赋值   # denseNet121参数
                # print(model.get_layer("conv1/bn").get_weights())
                # weight_temp = model.get_layer("conv1/bn").get_weights()
                # weight_temp[0] += 900
                # model.get_layer("conv1/bn").set_weights(weight_temp)
                # # 获取模型参数
                # print(model.get_layer("conv1/bn").get_weights()[0])

            # #################################  优化器
            from tensorflow import keras

            if args.train__optimizer == 'SGD':
                # LearningRate = LearningRate * 1/(1 + decay * epoch)
                optimizer = keras.optimizers.SGD(
                    lr=args.train__learning_rate, decay=args.train__learning_rate_decay_rate)
            elif args.train__optimizer == 'RMSPROP':
                optimizer = keras.optimizers.RMSprop()
            elif args.train__optimizer == 'ADADELTA':
                optimizer = keras.optimizers.Adadelta()
            elif args.train__optimizer == 'ADAGRAD':
                optimizer = keras.optimizers.Adagrad()
            else:
                optimizer = keras.optimizers.Adam()

            # #################################  编译模型
            model.compile(loss='categorical_crossentropy',
                          optimizer=optimizer,
                          metrics=['accuracy'])

            # #################################  Tensorboard 监测
            # Tensorboard 记录对象
            tensorboard_writer = tf.summary.FileWriter(args.Tensorboard_log_path, sess.graph)

            loss_tensorboard = tf.placeholder(tf.float32, [])
            acc_tensorboard = tf.placeholder(tf.float32, [])

            merged = tensorboard_monitor(acc_tensorboard, loss_tensorboard)

            # #################################  模型训练
            step = 0

            # 训练数据读取类
            train_data = TrainData(args.trainData__path, logger=args.Logger)

            # 保存数据、模型 Config
            config = {'label_mapping': train_data.label_mapping,
                      'time': args.Start_time_str}
            with open(args.model_config, 'w', encoding='utf-8') as json_file:
                json.dump(config, json_file, ensure_ascii=True, indent=4, sort_keys=True)

            # 读取验证数据
            x_val, y_val = val_data(args.valData__path, input_image_shape=args.model__input_image_shape,
                                    label_mapping=train_data.label_mapping, logger=args.Logger)

            for epoch_i in range(1, args.trainData__epochs + 1):
                train_data_generator = train_data.generators(input_image_shape=args.model__input_image_shape,
                                                             output_image_label=args.model__output_image_label,
                                                             batch_size=args.trainData__batch_size)
                # 分批次训练
                loss = list()
                acc = list()
                for batch_i, (x_train, y_train) in enumerate(train_data_generator):
                    loss_i, acc_i = model.train_on_batch(x_train, y_train)
                    print("\r{:2d}/{:2d}  {:5d}    Loss:{:.3f}    Acc:{:.3f}            ".
                          format(epoch_i, args.trainData__epochs, batch_i, loss_i, acc_i), end='')

                    loss.append(loss_i)
                    acc.append(acc_i)

                    # 每一步训练过程写入 TensorBoard 中
                    summary = sess.run(merged, feed_dict={acc_tensorboard: acc_i, loss_tensorboard: loss_i})
                    tensorboard_writer.add_summary(summary, step)
                    step += 1

                # 模型验证  分批次预测，批次数量为训练批次数量的5倍
                val_loss, val_acc = model.evaluate(x_val, y_val, verbose=args.train__verbose,
                                                   batch_size=args.trainData__batch_size*5)

                batch_len = len(acc)
                # {训练批次序号} BatchLen:{批次大小}  LossMean:{平均训练loss}  AccMean:{平均训练Acc}  ValLoss:{验证loss}  ValAcc:{验证Acc}
                print_format = '{} BatchLen:{}  LossMean:{:.3f}  AccMean:{:.3f}  ValLoss:{:.3f}  ValAcc:{:.3f}'
                args.Logger.info(print_format.format(
                    epoch_i, batch_len, sum(loss) / batch_len, sum(acc) / batch_len, val_loss, val_acc))

                # 若只保存最佳模型，则模型名称不带 epoch
                if args.train__checkpoint_save_best_only:
                    checkpoint_model_name = "{}_{}_lately.h5".format(args.model__name, args.Start_time_str)
                else:
                    checkpoint_model_name = "{}_{}_{:02d}.h5".format(args.model__name, args.Start_time_str, epoch_i)
                # 保存模型
                model.save(os.path.join(args.Train_check_point_path, checkpoint_model_name), overwrite=True)

            args.Logger.info("Model Train Over!!!  UsedTime:{:.1f} Second.".format(
                time.time() - args.Start_time_float))

            #  ##############################################################################
            # 模型测试
            args.Logger.info("Model Test:{}".format('-' * 50))
            y_predict = model.predict(x_val)

            # 分类结果报告
            dev_report = classification_report(y_val.argmax(axis=1), y_predict.argmax(axis=1), digits=3)
            args.Logger.info('Evaluation Report: \n{}'.format(dev_report))
            # 分类结果混淆矩阵
            dev_confusion_matrix = confusion_matrix(y_val.argmax(axis=1), y_predict.argmax(axis=1))
            args.Logger.info('Evaluation Confusion_matrix: \n{}\n\n'.format(dev_confusion_matrix))

            # ################## 将参数写入TensorBoard 的 Text 中
            # TODO 打印需要关注的参数
            args_info = "【learning_rate】:{:.5f}  【dropout_keep_p】:{:.2f}  【epochs】:{:3d}  【batch_size】:{}" \
                        "【optimizer】:{} <br/>【pretrained_model】:{}". \
                format(args.train__learning_rate, args.train__dropout, args.trainData__epochs,
                       args.trainData__batch_size, args.train__optimizer, args.train__pretrained_model)

            argument_info = sess.run(tf.summary.text("ArgumentInfo",
                                                     tf.convert_to_tensor(args_info.replace(' ', '&nbsp;'))))
            tensorboard_writer.add_summary(argument_info)


if __name__ == '__main__':
    r"""  模型调试参数   
    --train__pattern Train
    --model__output_image_label 10
    --model__input_image_shape 224
    --model__name model.resnet
    --trainData__epochs 2
    --train__checkpoint_epoch 1
    --train__checkpoint_save_best_only
    --trainData__batch_size 5
    --train__pretrained_model D:\Desktop\CatsDogsWar\resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5
    --trainData__path D:\Desktop\Cifar10-Classification\Data\Image\train_data.csv
    --valData__path D:\Desktop\Cifar10-Classification\Data\Image\test_data.csv    
    --result__path D:\_temp\__result
    --log__path D:\_temp\__log

    --train__pattern Train
    --model__output_image_label 10
    --model__input_image_shape 224
    --model__name model.denseNet121
    --trainData__epochs 2
    --train__checkpoint_epoch 1
    --train__checkpoint_save_best_only
    --trainData__batch_size 5
    --train__pretrained_model /home/vincent/Project/PreTrainedModel/tf_keras/densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5  
    --trainData__path /home/vincent/Project/Cifar10-Classification/Data/Image/test_data.csv
    --valData__path /home/vincent/Project/Cifar10-Classification/Data/Image/train_data.csv
    """

    main(sys.argv[1:])
