import datetime
import os
from functools import partial

import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import (EarlyStopping, LearningRateScheduler,
                                        TensorBoard)
from tensorflow.keras.optimizers import SGD, Adam

from nets.unet1 import Unet

from nets.unet_training import (CE, Focal_Loss, dice_loss_with_CE,
                                dice_loss_with_Focal_Loss, get_lr_scheduler)
from utils.callbacks import (EvalCallback, ExponentDecayScheduler, LossHistory,
                             ModelCheckpoint)
from utils.dataloader import UnetDataset
from utils.utils import show_config
from utils.utils_fit import fit_one_epoch
from utils.utils_metrics import Iou_score, f_score
from keras.callbacks import LambdaCallback
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


if __name__ == "__main__":
    # ---------------------------------------------------------------------#
    #   是否使用eager模式训练
    # ---------------------------------------------------------------------#
    eager = False
    # ---------------------------------------------------------------------#
    #   train_gpu   训练用到的GPU
    #               默认为第一张卡、双卡为[0, 1]、三卡为[0, 1, 2]
    #               在使用多GPU时，每个卡上的batch为总batch除以卡的数量。
    # ---------------------------------------------------------------------#
    # train_gpu = [0, ]
    train_gpu =[]
    # ---------------------------------------------------------------------#
    #   训练自己的数据集必须要修改的
    #   自己需要的分类个数+1，如2+1
    # ---------------------------------------------------------------------#
    num_classes = 2
    # -------------------------------#
    #   主干网络选择
    #   vgg
    #   resnet50
    # -------------------------------#
    backbone = "resnet50"
    # ----------------------------------------------------------------------------------------------------------------------------#
    #   当model_path = ''的时候不加载整个模型的权值。
    #
    #   此处使用的是整个模型的权重，因此是在train.py进行加载的。
    #   如果想要让模型从主干的预训练权值开始训练，则设置model_path为主干网络的权值，此时仅加载主干。
    #   如果想要让模型从0开始训练，则设置model_path = ''，下面的Freeze_Train = Fasle，此时从0开始训练，且没有冻结主干的过程。
    # ----------------------------------------------------------------------------------------------------------------------------#
    model_path =''
        # "model_data/unet_resnet_voc.h5"
    # ---------------------------------------------------------#
    #   input_shape 输入图片的大小，32的倍数
    # ---------------------------------------------------------#
    input_shape = [256, 256]

    # ----------------------------------------------------------------------------------------------------------------------------#
    #   训练分为两个阶段，分别是冻结阶段和解冻阶段。设置冻结阶段是为了满足机器性能不足的同学的训练需求。
    #   冻结训练需要的显存较小，显卡非常差的情况下，可设置Freeze_Epoch等于UnFreeze_Epoch，此时仅仅进行冻结训练。
    # ------------------------------------------------------------------#
    #   冻结阶段训练参数
    #   此时模型的主干被冻结了，特征提取网络不发生改变
    #   占用的显存较小，仅对网络进行微调
    #   Init_Epoch          模型当前开始的训练世代，其值可以大于Freeze_Epoch，如设置：
    #                       Init_Epoch = 60、Freeze_Epoch = 50、UnFreeze_Epoch = 100
    #                       会跳过冻结阶段，直接从60代开始，并调整对应的学习率。
    #                       （断点续练时使用）
    #   Freeze_Epoch        模型冻结训练的Freeze_Epoch
    #                       (当Freeze_Train=False时失效)
    #   Freeze_batch_size   模型冻结训练的batch_size
    #                       (当Freeze_Train=False时失效)
    # ------------------------------------------------------------------#
    Init_Epoch = 0
    Freeze_Epoch = 10
    Freeze_batch_size = 2
    # ------------------------------------------------------------------#
    #   解冻阶段训练参数
    #   此时模型的主干不被冻结了，特征提取网络会发生改变
    #   占用的显存较大，网络所有的参数都会发生改变
    #   UnFreeze_Epoch          模型总共训练的epoch
    #   Unfreeze_batch_size     模型在解冻后的batch_size
    # ------------------------------------------------------------------#
    UnFreeze_Epoch = 200
    Unfreeze_batch_size = 8
    # ------------------------------------------------------------------#
    #   Freeze_Train    是否进行冻结训练
    #                   默认先冻结主干训练后解冻训练。
    # ------------------------------------------------------------------#

    Freeze_Train = False

    # ------------------------------------------------------------------#
    #   其它训练参数：学习率、优化器、学习率下降有关
    # ------------------------------------------------------------------#
    # ------------------------------------------------------------------#
    #   Init_lr         模型的最大学习率
    #                   当使用Adam优化器时建议设置  Init_lr=1e-4
    #                   当使用SGD优化器时建议设置   Init_lr=1e-2
    #   Min_lr          模型的最小学习率，默认为最大学习率的0.01
    # ------------------------------------------------------------------#
    Init_lr = 1e-4
    Min_lr = Init_lr * 0.01
    # ------------------------------------------------------------------#
    #   optimizer_type  使用到的优化器种类，可选的有adam、sgd
    #                   当使用Adam优化器时建议设置  Init_lr=1e-4
    #                   当使用SGD优化器时建议设置   Init_lr=1e-2
    #   momentum        优化器内部使用到的momentum参数
    # ------------------------------------------------------------------#
    optimizer_type = "adam"
    momentum = 0.9
    # ------------------------------------------------------------------#
    #   lr_decay_type   使用到的学习率下降方式，可选的有'step'、'cos'
    # ------------------------------------------------------------------#
    lr_decay_type = 'cos'
    # ------------------------------------------------------------------#
    #   save_period     多少个epoch保存一次权值
    # ------------------------------------------------------------------#
    save_period = 2
    # ------------------------------------------------------------------#
    #   save_dir        权值与日志文件保存的文件夹
    # ------------------------------------------------------------------#
    save_log_dir = 'logs_tqh'
    save_dir = 'pt'
    # ------------------------------------------------------------------#
    #   eval_flag       是否在训练时进行评估，评估对象为验证集
    #   eval_period     代表多少个epoch评估一次，不建议频繁的评估
    #                   评估需要消耗较多的时间，频繁评估会导致训练非常慢
    #   此处获得的mAP会与get_map.py获得的会有所不同，原因有二：
    #   （一）此处获得的mAP为验证集的mAP。
    #   （二）此处设置评估参数较为保守，目的是加快评估速度。
    # ------------------------------------------------------------------#
    eval_flag = True
    eval_period = 1

    # ------------------------------------------------------------------#
    #   VOCdevkit_path  数据集路径
    # ------------------------------------------------------------------#
    VOCdevkit_path = 'VOCdevkit'
    # ------------------------------------------------------------------#
    #   建议选项：
    #   种类少（几类）时，设置为True
    #   种类多（十几类）时，如果batch_size比较大（10以上），那么设置为True
    #   种类多（十几类）时，如果batch_size比较小（10以下），那么设置为False
    # ------------------------------------------------------------------#
    dice_loss = True
        # False
    # ------------------------------------------------------------------#
    #   是否使用focal loss来防止正负样本不平衡
    # ------------------------------------------------------------------#
    focal_loss = True
    # ------------------------------------------------------------------#
    #   是否给不同种类赋予不同的损失权值，默认是平衡的。
    #   设置的话，注意设置成numpy形式的，长度和num_classes一样。
    #   如：
    #   num_classes = 3
    #   cls_weights = np.array([1, 2, 3], np.float32)
    # ------------------------------------------------------------------#
    cls_weights = np.ones([num_classes], np.float32)
    # -------------------------------------------------------------------#
    #   用于设置是否使用多线程读取数据，1代表关闭多线程
    #   开启后会加快数据读取速度，但是会占用更多内存
    #   在IO为瓶颈的时候再开启多线程，即GPU运算速度远大于读取图片的速度。
    # -------------------------------------------------------------------#
    num_workers = 2

    # ------------------------------------------------------#
    #   设置用到的显卡
    # ------------------------------------------------------#
    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in train_gpu)
    ngpus_per_node = len(train_gpu)

    gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
    for gpu in gpus:
        tf.config.experimental.set_memory_growth(gpu, True)

    # ------------------------------------------------------#
    #   判断当前使用的GPU数量与机器上实际的GPU数量
    # ------------------------------------------------------#
    if ngpus_per_node > 1 and ngpus_per_node > len(gpus):
        raise ValueError("The number of GPUs specified for training is more than the GPUs on the machine")

    if ngpus_per_node > 1:
        strategy = tf.distribute.MirroredStrategy()
    else:
        strategy = None
    print('Number of devices: {}'.format(ngpus_per_node))

    if ngpus_per_node > 1:
        with strategy.scope():
            # ------------------------------------------------------#
            #   获取model
            # ------------------------------------------------------#
            model = Unet([input_shape[0], input_shape[1], 3], num_classes, backbone)
            # if model_path != '':
            #     # ------------------------------------------------------#
            #     #   载入预训练权重
            #     # ------------------------------------------------------#
            #     model.load_weights(model_path, by_name=True, skip_mismatch=True)
    else:
        # ------------------------------------------------------#
        #   获取model
        # ------------------------------------------------------#
        model = Unet([input_shape[0], input_shape[1], 3], num_classes, backbone)
        # if model_path != '':
        #     # ------------------------------------------------------#
        #     #   载入预训练权重
        #     # ------------------------------------------------------#
        #     model.load_weights(model_path, by_name=True, skip_mismatch=True)

    # ---------------------------#
    #   读取数据集对应的txt
    # ---------------------------#
    with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/train.txt"), "r") as f:
        train_lines = f.readlines()
    with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"), "r") as f:
        val_lines = f.readlines()
    num_train = len(train_lines)
    num_val = len(val_lines)

    # --------------------------#
    #   使用到的损失函数
    # --------------------------#
    if focal_loss:
        if dice_loss:
            loss = dice_loss_with_Focal_Loss(cls_weights)
        else:
            loss = Focal_Loss(cls_weights)
    else:
        if dice_loss:
            loss = dice_loss_with_CE(cls_weights)
        else:
            loss = CE(cls_weights)

    show_config(
        num_classes=num_classes, backbone=backbone, model_path=model_path, input_shape=input_shape, \
        Init_Epoch=Init_Epoch, Freeze_Epoch=Freeze_Epoch, UnFreeze_Epoch=UnFreeze_Epoch, Freeze_batch_size=Freeze_batch_size, Unfreeze_batch_size=Unfreeze_batch_size,
        Freeze_Train=Freeze_Train, \
        Init_lr=Init_lr, Min_lr=Min_lr, optimizer_type=optimizer_type, momentum=momentum, lr_decay_type=lr_decay_type, \
        save_period=save_period, save_dir=save_dir, num_workers=num_workers, num_train=num_train, num_val=num_val
    )

    # ------------------------------------------------------#
    #   主干特征提取网络特征通用，冻结训练可以加快训练速度
    #   也可以在训练初期防止权值被破坏。
    #   Init_Epoch为起始世代
    #   Freeze_Epoch为冻结训练的世代
    #   UnFreeze_Epoch总训练世代
    #   提示OOM或者显存不足请调小Batch_size
    # ------------------------------------------------------#
    if True:
        if Freeze_Train:
            # ------------------------------------#
            #   冻结一定部分训练
            # ------------------------------------#
            if backbone == "vgg":
                freeze_layers = 17
            elif backbone == "resnet50":
                freeze_layers = 172
            else:
                raise ValueError('Unsupported backbone - `{}`, Use vgg, resnet50.'.format(backbone))
            for i in range(freeze_layers): model.layers[i].trainable = False
            print('Freeze the first {} layers of total {} layers.'.format(freeze_layers, len(model.layers)))

        # -------------------------------------------------------------------#
        #   如果不冻结训练的话，直接设置batch_size为Unfreeze_batch_size
        # -------------------------------------------------------------------#
        batch_size =Unfreeze_batch_size
            # Freeze_batch_size if Freeze_Train else Unfreeze_batch_size

        # -------------------------------------------------------------------#
        #   判断当前batch_size，自适应调整学习率
        # -------------------------------------------------------------------#
        nbs = 16
        lr_limit_max = 1e-4 if optimizer_type == 'adam' else 1e-1
        lr_limit_min = 1e-4 if optimizer_type == 'adam' else 5e-4
        Init_lr_fit = min(max(batch_size / nbs * Init_lr, lr_limit_min), lr_limit_max)
        Min_lr_fit = min(max(batch_size / nbs * Min_lr, lr_limit_min * 1e-2), lr_limit_max * 1e-2)

        # ---------------------------------------#
        #   获得学习率下降的公式
        # ---------------------------------------#
        epoch_step = num_train // batch_size
        epoch_step_val = num_val // batch_size
        # lr_scheduler_func = get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, UnFreeze_Epoch)
        # 可以修改训练还是验证吗？

        if epoch_step == 0 or epoch_step_val == 0:
            raise ValueError('数据集过小，无法进行训练，请扩充数据集。')
        # 数据访问

        train_dataloader = UnetDataset(train_lines, input_shape, batch_size, num_classes, True, VOCdevkit_path)
        val_dataloader = UnetDataset(val_lines, input_shape, batch_size, num_classes, False, VOCdevkit_path)

        optimizer = {
            'adam': Adam(learning_rate=Init_lr, beta_1=momentum),
            'sgd': SGD(learning_rate=Init_lr, momentum=momentum, nesterov=True)
        }[optimizer_type]
        if eager:
            start_epoch = Init_Epoch
        #     end_epoch = UnFreeze_Epoch
        #     UnFreeze_flag = False
        #
        #     gen = tf.data.Dataset.from_generator(partial(train_dataloader.generate), (tf.float32, tf.float32))
        #     gen_val = tf.data.Dataset.from_generator(partial(val_dataloader.generate), (tf.float32, tf.float32))
        #
        #     gen = gen.shuffle(buffer_size=batch_size).prefetch(buffer_size=batch_size)
        #     gen_val = gen_val.shuffle(buffer_size=batch_size).prefetch(buffer_size=batch_size)
        #
        #     if ngpus_per_node > 1:
        #         gen = strategy.experimental_distribute_dataset(gen)
        #         gen_val = strategy.experimental_distribute_dataset(gen_val)
        #
        #     time_str = datetime.datetime.strftime(datetime.datetime.now(), '%Y_%m_%d_%H_%M_%S')
        #     log_dir = os.path.join(save_dir, "loss_" + str(time_str))
        #     loss_history = LossHistory(log_dir)
        #     eval_callback = EvalCallback(model, input_shape, num_classes, val_lines, VOCdevkit_path, log_dir, \
        #                                  eval_flag=eval_flag, period=eval_period)
        #     # ---------------------------------------#
        #     #   开始模型训练
        #     # ---------------------------------------#
        #     for epoch in range(start_epoch, end_epoch):
        #         # ---------------------------------------#
        #         #   如果模型有冻结学习部分
        #         #   则解冻，并设置参数
        #         # ---------------------------------------#
        #         if epoch >= Freeze_Epoch and not UnFreeze_flag and Freeze_Train:
        #             batch_size = Unfreeze_batch_size
        #
        #             # -------------------------------------------------------------------#
        #             #   判断当前batch_size，自适应调整学习率
        #             # -------------------------------------------------------------------#
        #             nbs = 16
        #             lr_limit_max = 1e-4 if optimizer_type == 'adam' else 1e-1
        #             lr_limit_min = 1e-4 if optimizer_type == 'adam' else 5e-4
        #             Init_lr_fit = min(max(batch_size / nbs * Init_lr, lr_limit_min), lr_limit_max)
        #             Min_lr_fit = min(max(batch_size / nbs * Min_lr, lr_limit_min * 1e-2), lr_limit_max * 1e-2)
        #             # ---------------------------------------#
        #             #   获得学习率下降的公式
        #             # ---------------------------------------#
        #             lr_scheduler_func = get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, UnFreeze_Epoch)
        #
        #             for i in range(len(model.layers)):
        #                 model.layers[i].trainable = True
        #
        #             epoch_step = num_train // batch_size
        #             epoch_step_val = num_val // batch_size
        #
        #             if epoch_step == 0 or epoch_step_val == 0:
        #                 raise ValueError("数据集过小，无法继续进行训练，请扩充数据集。")
        #
        #             train_dataloader.batch_size = batch_size
        #             val_dataloader.batch_size = batch_size
        #             print('train_dataloader' * 10, train_dataloader.__getitem__(0)[0].shape)
        #
        #             gen = tf.data.Dataset.from_generator(partial(train_dataloader.generate), (tf.float32, tf.float32))
        #             gen_val = tf.data.Dataset.from_generator(partial(val_dataloader.generate), (tf.float32, tf.float32))
        #
        #             gen = gen.shuffle(buffer_size=batch_size).prefetch(buffer_size=batch_size)
        #             gen_val = gen_val.shuffle(buffer_size=batch_size).prefetch(buffer_size=batch_size)
        #
        #             if ngpus_per_node > 1:
        #                 gen = strategy.experimental_distribute_dataset(gen)
        #                 gen_val = strategy.experimental_distribute_dataset(gen_val)
        #
        #             UnFreeze_flag = True
        #
        #         lr = lr_scheduler_func(epoch)
        #         K.set_value(optimizer.lr, lr)
        #
        #         fit_one_epoch(model, loss, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val,
        #                       end_epoch, f_score(), save_period, save_dir, strategy)
        #
        #         train_dataloader.on_epoch_end()
        #         val_dataloader.on_epoch_end()
        #
        else:
            start_epoch = Init_Epoch
            end_epoch = Freeze_Epoch if Freeze_Train else UnFreeze_Epoch

            if ngpus_per_node > 1:
                with strategy.scope():
                    model.compile(loss=loss,
                                  optimizer=optimizer,
                                  metrics=[f_score()])
            else:
                model.compile(loss=loss,
                              optimizer=optimizer,
                              metrics=[f_score()])
            # -------------------------------------------------------------------------------#
            #   训练参数的设置
            #   logging         用于设置tensorboard的保存地址
            #   checkpoint      用于设置权值保存的细节，period用于修改多少epoch保存一次
            #   lr_scheduler       用于设置学习率下降的方式
            #   early_stopping  用于设定早停，val_loss多次不下降自动结束训练，表示模型基本收敛
            # -------------------------------------------------------------------------------#
            time_str = datetime.datetime.strftime(datetime.datetime.now(), '%Y_%m_%d_%H_%M_%S')
            log_dir = os.path.join(save_log_dir, "loss_" + str(time_str))
            logging = TensorBoard(log_dir)
            loss_history = LossHistory(log_dir)
            checkpoint = ModelCheckpoint(os.path.join(save_dir, "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5"),
                                         monitor='val_loss', save_weights_only=True, save_best_only=False, period=save_period)
            checkpoint_last = ModelCheckpoint(os.path.join(save_dir, "last_epoch_weights.h5"),
                                              monitor='val_loss', save_weights_only=True, save_best_only=False, period=1)
            checkpoint_best = ModelCheckpoint(os.path.join(save_dir, "best_epoch_weights.h5"),
                                              monitor='val_loss', save_weights_only=True, save_best_only=True, period=1)
            early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
            lr_scheduler = LearningRateScheduler(get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, UnFreeze_Epoch), verbose=1)
            eval_callback = EvalCallback(model, input_shape, num_classes, val_lines, VOCdevkit_path, log_dir, \
                                         eval_flag=eval_flag, period=eval_period)
            lr_callback = LambdaCallback(
                on_epoch_end=lambda epoch, logs: model.optimizer.lr.assign(
                    get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, UnFreeze_Epoch)(epoch, model.optimizer.lr))
                #  on_epoch_end = lambda epoch, logs: model.optimizer.lr.assign(
                # get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, UnFreeze_Epoch)(epoch))

            )
            callbacks = [logging, loss_history, checkpoint, checkpoint_last, checkpoint_best,
                         lr_callback,
                         # lr_scheduler,
                         eval_callback]

            if start_epoch < end_epoch:
                print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
                model.fit(
                    x=train_dataloader,
                    steps_per_epoch=epoch_step,
                    validation_data=val_dataloader,
                    validation_steps=epoch_step_val,
                    epochs=end_epoch,
                    initial_epoch=start_epoch,
                    use_multiprocessing=True if num_workers > 1 else False,
                    workers=num_workers,
                    callbacks=callbacks
                )
            # ---------------------------------------#
            #   如果模型有冻结学习部分
            #   则解冻，并设置参数
            # ---------------------------------------#
            # if Freeze_Train:
            #     batch_size = Unfreeze_batch_size
            #     start_epoch = Freeze_Epoch if start_epoch < Freeze_Epoch else start_epoch
            #     end_epoch = UnFreeze_Epoch
            #
            #     # -------------------------------------------------------------------#
            #     #   判断当前batch_size，自适应调整学习率
            #     # -------------------------------------------------------------------#
            #     nbs = 16
            #     lr_limit_max = 1e-4 if optimizer_type == 'adam' else 1e-1
            #     lr_limit_min = 1e-4 if optimizer_type == 'adam' else 5e-4
            #     Init_lr_fit = min(max(batch_size / nbs * Init_lr, lr_limit_min), lr_limit_max)
            #     Min_lr_fit = min(max(batch_size / nbs * Min_lr, lr_limit_min * 1e-2), lr_limit_max * 1e-2)
            #     # ---------------------------------------#
            #     #   获得学习率下降的公式
            #     # ---------------------------------------#
            #     lr_scheduler_func = get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, UnFreeze_Epoch)
            #     lr_scheduler = LearningRateScheduler(lr_scheduler_func, verbose=1)
            #     callbacks = [logging, loss_history, checkpoint, checkpoint_last, checkpoint_best, lr_scheduler, eval_callback]
            #
            #     for i in range(len(model.layers)):
            #         model.layers[i].trainable = True
            #     if ngpus_per_node > 1:
            #         with strategy.scope():
            #             model.compile(loss=loss,
            #                           optimizer=optimizer,
            #                           metrics=[f_score()])
            #     else:
            #         model.compile(loss=loss,
            #                       optimizer=optimizer,
            #                       metrics=[f_score()])
            #
            #     epoch_step = num_train // batch_size
            #     epoch_step_val = num_val // batch_size
            #
            #     if epoch_step == 0 or epoch_step_val == 0:
            #         raise ValueError("数据集过小，无法继续进行训练，请扩充数据集。")
            #
            #     train_dataloader.batch_size = Unfreeze_batch_size
            #     val_dataloader.batch_size = Unfreeze_batch_size
            #
            #     print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
            #     model.fit(
            #         x=train_dataloader,
            #         steps_per_epoch=epoch_step,
            #         validation_data=val_dataloader,
            #         validation_steps=epoch_step_val,
            #         epochs=end_epoch,
            #         initial_epoch=start_epoch,
            #         use_multiprocessing=True if num_workers > 1 else False,
            #         workers=num_workers,
            #         callbacks=callbacks
            #     )
