import os
import mindspore.dataset as ds
from mindspore import Tensor
import numpy as np
from HEC_metric import HEC
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore.dataset.transforms.py_transforms import Compose
from mydataset import DatasetGenerator
from mindspore import nn
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.model import Model
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.callback import LossMonitor, TimeMonitor, ModelCheckpoint, CheckpointConfig
from mindspore import context
from nestedunet import NestedUNet
from FWIoU_metric import EvalCallBack, FWIoU
from loss import WeightedBCELoss
from UNet import UNet
from attention_Unet import AttU_Net
# import moxing as mox
import learning_rates
from config import cfg
import mindspore as MS
from mindspore.train.callback import Callback
from logger import get_logger


class EvalCallBack(Callback):
    def __init__(self, model, net, eval_dataset, epochs_to_eval, per_eval, dataset_sink_mode):
        self.model = model
        self.net = net
        self.eval_dataset = eval_dataset
        # epochs_to_eval是一个int数字，代表着：每隔多少个epoch进行一次验证
        self.epochs_to_eval = epochs_to_eval
        self.per_eval = per_eval
        self.dataset_sink_mode = dataset_sink_mode
        self.best = 0

    def epoch_end(self, run_context):
        # 获取到现在的epoch数
        cb_param = run_context.original_args()
        cur_epoch = cb_param.cur_epoch_num
        # 如果达到进行验证的epoch数，则进行以下验证操作
        if cur_epoch % self.epochs_to_eval == 0:
            # 此处model设定的metrics是准确率Accuracy
            acc = self.model.eval(self.eval_dataset, dataset_sink_mode=self.dataset_sink_mode)
            self.per_eval["epoch"].append(cur_epoch)
            self.per_eval["dice"].append(acc["Hec_dice"])
            if acc["Hec_dice"] > self.best:
                self.best = acc["Hec_dice"]
                MS.save_checkpoint(self.net, "./output_train/attunet/unet_1-2_1.ckpt".format(
                    cur_epoch, round(acc["Hec_dice"], 2)
                ))
            logger.info("------------Hec_dice为: {} ------------".format(acc["Hec_dice"]))


def train():
    logger.info("-----------------start training!------------------")

    # 训练
    train_dataset_generator = DatasetGenerator(cfg.cases_path)
    # 验证集
    valid_dataset_generator = DatasetGenerator(
        os.path.join(cfg.cases_path)
    )

    train_dataset = ds.GeneratorDataset(train_dataset_generator,
                                        ["image", "label"],
                                        shuffle=True)
    valid_dataset = ds.GeneratorDataset(valid_dataset_generator,
                                        ["image", "label"],
                                        shuffle=False)

    train_dataset = train_dataset.batch(cfg.BATCH_SIZE, num_parallel_workers=1)
    valid_dataset = valid_dataset.batch(cfg.BATCH_SIZE, num_parallel_workers=1)

    # loss = WeightedBCELoss(w0=1.39, w1=1.69)
    loss = nn.BCEWithLogitsLoss()
    # loss = nn.DiceLoss()
    loss.add_flags_recursive(fp32=True)
    # loss = nn.FocalLoss(weight=Tensor([0.28, 0.28, 0.44]), gamma=2.0, reduction='mean')
    # loss = WeightedMCLoss()
    # train_net = AttU_Net()
    train_net = UNet()

    # 不同的网络在这里进行设置，可以选择不同的模型
    # load pretrained model   预训练模型
    if cfg.CKPT_PRE_TRAINED:
        param_dict = load_checkpoint(cfg.CKPT_PRE_TRAINED)
        load_param_into_net(train_net, param_dict)
# ---------------------------------------------------------
    # optimizer
    iters_per_epoch = train_dataset.get_dataset_size()
    total_train_steps = iters_per_epoch * cfg.EPOCHS
    if cfg.LR_TYPE == 'cos':
        lr_iter = learning_rates.cosine_lr(cfg.BASE_LR,
                                           total_train_steps,
                                           total_train_steps)
    # default
    elif cfg.LR_TYPE == 'poly':
        lr_iter = learning_rates.poly_lr(cfg.BASE_LR,
                                         total_train_steps,
                                         total_train_steps,
                                         end_lr=0.0,
                                         power=0.9)
    elif cfg.LR_TYPE == 'exp':
        # 学习率的初始值1e-2  学习率的变化的步数 40000  学习率的变化速率0.1
        lr_iter = learning_rates.exponential_lr(cfg.BASE_LR,
                                                cfg.LR_DECAY_STEP,
                                                cfg.LR_DECAY_RATE,
                                                total_train_steps,
                                                staircase=True)
    else:
        raise ValueError('unknown learning rate type')

    # ---------------------------------------------------

    # 随机梯度下降
    opt = nn.SGD(params=train_net.trainable_params(),
                 learning_rate=lr_iter,
                 momentum=0.9,
                 weight_decay=0.0001,
                 loss_scale=cfg.LOSS_SCALE)

    # loss scale 3072.0
    manager_loss_scale = FixedLossScaleManager(cfg.LOSS_SCALE,
                                               drop_overflow_update=False)
    model = Model(train_net,
                  optimizer=opt,
                  amp_level="O3",
                  loss_fn=loss,
                  metrics={"Hec_dice": HEC()},
                  loss_scale_manager=manager_loss_scale)
    # epoch_per_eval = {"epoch": [], "FWIou": []}

    # 回调函数 callback for saving ckpts
    time_cb = TimeMonitor(data_size=iters_per_epoch)
    loss_cb = LossMonitor()

    # 保存模型
    config_ckpt = CheckpointConfig(
        # save_checkpoint_steps表示每隔多少个step保存一次
        save_checkpoint_steps=cfg.SAVE_CHECKPOINT_STEPS,
        # keep_checkpoint_max表示最多保留checkpoint文件的数量
        keep_checkpoint_max=cfg.KEEP_CHECKPOINT_MAX
    )

    # # 应用模型保存参数
    # ckpoint = ModelCheckpoint(prefix="lenet", directory="./lenet", config=config_ckpt)

    # prefix表示生成CheckPoint文件的前缀名；directory：表示存放模型的目录
    cbs_1 = ModelCheckpoint(prefix=cfg.PREFIX,
                            directory=cfg.OUTPUT_DIR,
                            config=config_ckpt)
    # eval_cb = EvalCallBack(model, valid_dataset, cfg.EVAL_PER_EPOCH, epoch_per_eval)
    eval_dice = EvalCallBack(model=model, net=train_net, eval_dataset=valid_dataset, epochs_to_eval=1,
                             per_eval={"epoch": [], "dice": []}, dataset_sink_mode=True)
    cbs = [time_cb, loss_cb, cbs_1, eval_dice]
    # 训练模型
    model.train(cfg.EPOCHS, train_dataset, callbacks=cbs, dataset_sink_mode=False)
    # mox.file.copy_parallel(src_url=cfg.OUTPUT_DIR,
    #                        dst_url='./image-segment/output_train')
    # mox.file.copy_parallel(src_url=cfg.SUMMARY_DIR,
    #                        dst_url='obs://image-segment/summary_log')


if __name__ == "__main__":
    # mox.file.copy_parallel(src_url='obs://mushroom-data/MD_DATA',
    #                        dst_url='./MD_DATA')

    log_path = 'logs/train'
    logger = get_logger('%s.log' % (log_path))  # 创建一个有明细记录名称的日志文件，这里的name也可以自动获取函数名或者模型名来命名。
    # Ascend/GPU
    context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
    train()
