import glob
import os

import mindspore as ms
from PIL import Image
from mindspore import nn, ops


from models.ChangeFormer2.ChangeFormer import ChangeFormerV6
import numpy as np
import mindspore.dataset as ds

from util.loss import CrossEntropyWithLogits
from mindvision.engine.callback import LossMonitor

from util.monitor import StepLossTimeMonitor
from utils import get_loader, get_loaders
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint
from mindspore.train.serialization import load_checkpoint, load_param_into_net

from mindspore.communication.management import init, get_rank, get_group_size
from mindspore import ParallelMode




def linear_lr(base_lr, max_epochs, steps_per_epoch):
    lambda_lr_steps = []
    def lambda_rule(epoch):
        lr_l = 1.0 - epoch / float(max_epochs + 1)
        return lr_l
    for epoch_number in range(max_epochs):
        epoch_lr_scale_factor = lambda_rule(epoch_number)
        steps_lr = np.repeat(base_lr*epoch_lr_scale_factor, steps_per_epoch)
        for i, step_lr in enumerate(steps_lr):
            lambda_lr_steps.append(step_lr)

    return np.array(lambda_lr_steps).astype(np.float32)


def train_net(epoch_size=5, batch_size=2, base_lr =0.0001, multi_scale_train=False):

    # define network
    net = ChangeFormerV6(embed_dim=256)
    # net = UNetMedical(n_channels=6, n_classes=2)

    # load pretrained weight file
    resume = True
    param_dict_path = '/data1/haojj/MindSpore_Project/ChangeFormerV6_3/checkpoints/changeformer_multi_scale_aug/ckpt_changeformer_graph_2-200_1780.ckpt'
    if resume:
        param_dict = load_checkpoint(param_dict_path)
        load_param_into_net(net, param_dict)
        print("load pretrained pth successfully...")
    else:
        print("training from scratch ...")

    # define dataset name
    data_name = 'LEVIR'
    data_loaders = get_loader(data_name, img_size=256, batch_size=batch_size, split='train', is_train=True, dataset='CDDataset')
    train_loader = data_loaders

    # train_loader = data_loaders['train']
    # val_loader = data_loaders['val']

    train_data_size = train_loader.get_dataset_size()
    print("train dataset length is:", train_data_size)

    # val_data_size = val_loader.get_dataset_size()
    # print("val dataset length is:", val_data_size)

    output_ckpt_path = 'checkpoints'

    # 指定保存路径
    ckpt_save_dir = os.path.join(output_ckpt_path, 'cf_dynamic_lr')

    save_ck_steps = train_data_size
    
    lr_steps = linear_lr(base_lr=base_lr, max_epochs=epoch_size, steps_per_epoch=save_ck_steps)
    print(lr_steps)
    
    # 设置模型修改的路径以及保存间隔
    ckpt_config = CheckpointConfig(save_checkpoint_steps=save_ck_steps,
                                   keep_checkpoint_max=10, exception_save=True)
    ckpoint_cb = ModelCheckpoint(prefix='ckpt_cf_levircd_graph',
                                 directory=ckpt_save_dir,
                                 config=ckpt_config)

    # CD_ChangeFormerV6_LEVIR_b16_lr0.0001_adamw_train_test_200_linear
    # _ce_multi_train_True_multi_infer_False_shuffle_AB_False_embed_dim_256

    # define optimizer
    optimizer = nn.AdamWeightDecay(net.trainable_params(), learning_rate=lr_steps,
                                           beta1=0.9, beta2=0.999, weight_decay=0.01)
    # lr_rate = 0.0001
    # optimizer = nn.SGD(net.trainable_params(), learning_rate=lr_rate,
    #                      momentum=0.9,
    #                      weight_decay=5e-4)

    criterion = CrossEntropyWithLogits(multi_scale_train=multi_scale_train)

    device_target = 'GPU'

    loss_scale_manager = ms.train.loss_scale_manager.FixedLossScaleManager(1024., False)
    amp_level = "O0" if device_target == "GPU" else "O3"
    model = ms.Model(net, loss_fn=criterion, loss_scale_manager=loss_scale_manager, optimizer=optimizer,
                  amp_level=amp_level)

    print("============== Starting Training ==============")
    time_cb = ms.TimeMonitor(data_size=batch_size)
    loss_cb = LossMonitor(lr_init=lr_steps.tolist(), per_print_times=batch_size)
    cbs = [time_cb, loss_cb]
    cbs.append(ckpoint_cb)

    model.train(epoch=epoch_size, train_dataset=train_loader, callbacks=cbs)



if __name__ == '__main__':
    ms.context.set_context(mode=ms.GRAPH_MODE, device_target="GPU", device_id=2)

    # ms.context.set_context(mode=ms.GRAPH_MODE, device_target="GPU")
    # init("nccl")
    # rank_id = get_rank()
    #
    # # 设置GPU个数
    # device_num = get_group_size()
    # ms.context.set_auto_parallel_context(device_num=device_num, gradients_mean=True, parallel_mode=ParallelMode.DATA_PARALLEL)
    #

    epoch_size = 100
    batch_size = 4
    base_lr = 0.0001
    
    multi_scale_train = True
    # multi_scale_train = False

    train_net(epoch_size=epoch_size, batch_size=batch_size, base_lr=base_lr, multi_scale_train=multi_scale_train)