
import glob
import os

import mindspore as ms
from PIL import Image
from mindspore import nn, ops, ParallelMode

from models.ChangeFormer2.ChangeFormer import ChangeFormerV6
from models.unet_model import UNetMedical


import numpy as np
import mindspore.dataset as ds
from mindspore import Model
from util.loss import CrossEntropyWithLogits
from mindvision.engine.callback import LossMonitor

from util.monitor import StepLossTimeMonitor

from util.eval_metric import ChangeFormerEval, CF_metric, TempLoss, EvalCallBack, apply_eval

from utils import get_loader, get_loaders, get_loader_multi_gpu
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint
from mindspore.train.serialization import load_checkpoint, load_param_into_net

from mindspore.communication.management import init, get_rank, get_group_size


# 指定暴露的gpuid
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"

def linear_lr(base_lr, max_epochs, steps_per_epoch):
    lambda_lr_steps = []
    def lambda_rule(epoch):
        lr_l = 1.0 - epoch / float(max_epochs + 1)
        return lr_l
    for epoch_number in range(max_epochs):
        epoch_lr_scale_factor = lambda_rule(epoch_number)
        steps_lr = np.repeat(base_lr*epoch_lr_scale_factor, steps_per_epoch)
        for i, step_lr in enumerate(steps_lr):
            lambda_lr_steps.append(step_lr)

    return np.array(lambda_lr_steps).astype(np.float32)


def train_net(epoch_size=5, batch_size=2, multi_scale_train=False, run_Eval=False, is_distributed=False):

    # define network
    net = ChangeFormerV6(embed_dim=256)
    # net = UNetMedical(n_channels=6, n_classes=2)


    # load pretrained weight file
    resume = False
    param_dict_path = None
    if resume:
        param_dict = load_checkpoint((param_dict_path))
        load_param_into_net(net, param_dict)
        print("load pretrained weight file: {}".format(os.path.split(param_dict_path)[-1]))
    else:
        print("training from scratch ...")

    # define dataset name
    data_name = 'LEVIR'
    dataset = 'CDDataset'

    if is_distributed:
        init("nccl")
        rank_id = get_rank()

        # 设置GPU个数
        device_num = get_group_size()
        ms.common.set_seed(6)
        ms.context.set_auto_parallel_context(device_num=device_num, gradients_mean=True,
                                             parallel_mode=ParallelMode.DATA_PARALLEL, parameter_broadcast=True)

        train_loader = get_loader_multi_gpu(data_name=data_name, dataset=dataset, split='train', batch_size=batch_size, img_size=256, num_shards=device_num, shard_id=rank_id)
        val_loader = get_loader(data_name, img_size=256, batch_size=1, split='val', is_train=False, dataset=dataset)

        print("GPU nums: ", device_num, " load dataset successfully...")
    else:
        data_loaders = get_loaders(data_name=data_name, dataset='CDDataset', split='train', batch_size=batch_size, img_size=256)
        train_loader = data_loaders['train']
        val_loader = data_loaders['val']
        print("Single GPU Mode load dataset successfully...")

    train_data_size = train_loader.get_dataset_size()
    print("train dataset length is:", train_data_size)

    val_data_size = val_loader.get_dataset_size()
    print("val dataset length is:", val_data_size)

    output_ckpt_path = './checkpoints'
    ckpt_save_dir = os.path.join(output_ckpt_path, 'cf_multi_gpu_dynamic_lr')

    save_ck_steps = train_data_size
    ckpt_config = CheckpointConfig(save_checkpoint_steps=save_ck_steps,
                                   keep_checkpoint_max=10)
    ckpoint_cb = ModelCheckpoint(prefix='cf_adamw_lr_0.0001_levircd',
                                 directory=ckpt_save_dir,
                                 config=ckpt_config)


    # CD_ChangeFormerV6_LEVIR_b16_lr0.0001_adamw_train_test_200_linear
    # _ce_multi_train_True_multi_infer_False_shuffle_AB_False_embed_dim_256

    lr = linear_lr(base_lr=0.0001, max_epochs=epoch_size,steps_per_epoch=save_ck_steps)

    # define optimizer
    optimizer = nn.AdamWeightDecay(net.trainable_params(), learning_rate=ms.Tensor(lr, dtype=ms.float32),
                                           beta1=0.9, beta2=0.999, weight_decay=0.01)
    criterion = CrossEntropyWithLogits(multi_scale_train=multi_scale_train)

    # TODO: 需要指定修改
    device_target = 'GPU'
    loss_scale_manager = ms.train.loss_scale_manager.FixedLossScaleManager(1024., False)
    amp_level = "O0" if device_target == "GPU" else "O3"
    model = ms.Model(net, loss_fn=criterion, loss_scale_manager=loss_scale_manager, optimizer=optimizer,
                  amp_level=amp_level)


    print("============== Starting Training ==============")
    time_cb = ms.TimeMonitor(data_size=batch_size)
    loss_cb = LossMonitor(lr_init=lr.tolist(), per_print_times=1)
    cbs = [time_cb, loss_cb]
    cbs.append(ckpoint_cb)

    if run_Eval:
        eval_model = Model(ChangeFormerEval(net, 'argmax'), loss_fn=TempLoss(),
                           metrics={"CF_metric": CF_metric(num_class=2)})
        eval_param_dict = {"model": eval_model, "dataset": val_loader, "metrics_name": "CF_metric"}

        eval_cb = EvalCallBack(apply_eval, eval_param_dict, interval=1, eval_start_epoch=0, save_best_ckpt=True,
                               ckpt_directory='./checkpoints/best_multi_gpu_cf', besk_ckpt_name='best_cf.ckpt',
                               metrics_name='CF_metric')
        cbs.append(eval_cb)

    model.train(epoch=epoch_size, train_dataset=train_loader, callbacks=cbs)
    print("finished .......")


if __name__ == '__main__':

    ms.context.set_context(mode=ms.GRAPH_MODE, device_target="GPU")

    epoch_size = 200
    batch_size = 8
    multi_scale_train = False  # 是否使用多尺寸输出联合训练

    is_distributed = True    # 是否使用分布式训练
    train_net(epoch_size, batch_size, multi_scale_train, run_Eval=False, is_distributed=is_distributed)