import glob
import os

import mindspore as ms
from PIL import Image
from mindspore import nn, ops, ParallelMode

from models.ChangeFormer2.ChangeFormer import ChangeFormerV6
import numpy as np
import mindspore.dataset as ds
from mindspore import Model
from util.loss import CrossEntropyWithLogits
from mindvision.engine.callback import LossMonitor
from mindspore.communication.management import init, get_rank, get_group_size
from util.monitor import StepLossTimeMonitor
from utils import get_loader, get_loaders
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.communication.management import init, get_rank, get_group_size
from util.eval_metric import ChangeFormerEval, CF_metric, TempLoss, EvalCallBack, apply_eval


def linear_lr(base_lr, max_epochs, steps_per_epoch):
    lambda_lr_steps = []
    def lambda_rule(epoch):
        lr_l = 1.0 - epoch / float(max_epochs + 1)
        return lr_l
    for epoch_number in range(max_epochs):
        epoch_lr_scale_factor = lambda_rule(epoch_number)
        steps_lr = np.repeat(base_lr*epoch_lr_scale_factor, steps_per_epoch)
        for i, step_lr in enumerate(steps_lr):
            lambda_lr_steps.append(step_lr)

    return np.array(lambda_lr_steps).astype(np.float32)


def train_net(epoch_size=5, batch_size=2, multi_scale_train=False, run_Eval=False):

    # define network
    net = ChangeFormerV6(embed_dim=256)

    # load pretrained weight file
    resume = False
    param_dict_path = None
    if resume:
        param_dict = load_checkpoint((param_dict_path))
        load_param_into_net(net, param_dict)
        print("load pretrained weight file: {}".format(os.path.split(param_dict_path)[-1]))
    else:
        print("training from scratch ...")

    # define dataset name
    data_name = 'LEVIR'
    data_loaders = get_loaders(data_name=data_name, dataset='CDDataset', split='val', batch_size=batch_size, img_size=256)

    train_loader = data_loaders['train']
    val_loader = data_loaders['val']

    train_data_size = train_loader.get_dataset_size()
    print("train dataset length is:", train_data_size)

    val_data_size = val_loader.get_dataset_size()
    print("val dataset length is:", val_data_size)

    output_ckpt_path = './checkpoints'
    ckpt_save_dir = os.path.join(output_ckpt_path, 'close_official_changeformer')

    save_ck_steps = train_data_size
    ckpt_config = CheckpointConfig(save_checkpoint_steps=save_ck_steps,
                                   keep_checkpoint_max=10)
    ckpoint_cb = ModelCheckpoint(prefix='official-demo',
                                 directory=ckpt_save_dir,
                                 config=ckpt_config)


    # CD_ChangeFormerV6_LEVIR_b16_lr0.0001_adamw_train_test_200_linear
    # _ce_multi_train_True_multi_infer_False_shuffle_AB_False_embed_dim_256

    lr = linear_lr(base_lr=0.0001, max_epochs=epoch_size,steps_per_epoch=save_ck_steps)

    # define optimizer
    optimizer = nn.AdamWeightDecay(net.trainable_params(), learning_rate=ms.Tensor(lr, dtype=ms.float32),
                                           beta1=0.9, beta2=0.999, weight_decay=0.01)
    criterion = CrossEntropyWithLogits(multi_scale_train=multi_scale_train)

    # TODO: 需要指定修改
    device_target = 'GPU'

    loss_scale_manager = ms.train.loss_scale_manager.FixedLossScaleManager(1024., False)
    amp_level = "O0" if device_target == "GPU" else "O3"
    model = ms.Model(net, loss_fn=criterion, loss_scale_manager=loss_scale_manager, optimizer=optimizer,
                  amp_level=amp_level)
    # model = ms.Model(net, criterion, optimizer)

    print("============== Starting Training ==============")
    time_cb = ms.TimeMonitor(data_size=batch_size)
    loss_cb = LossMonitor(lr_init=lr.tolist(), per_print_times=1)
    cbs = [time_cb, loss_cb]
    cbs.append(ckpoint_cb)

    if run_Eval:
        eval_model = Model(ChangeFormerEval(net, 'argmax'), loss_fn=TempLoss(),
                           metrics={"CF_metric": CF_metric(num_class=2)})
        eval_param_dict = {"model": eval_model, "dataset": val_loader, "metrics_name": "CF_metric"}

        eval_cb = EvalCallBack(apply_eval, eval_param_dict, interval=1, eval_start_epoch=0, save_best_ckpt=True,
                               ckpt_directory='./checkpoints/close_official_changeformer', besk_ckpt_name='best_ascend_cf.ckpt',
                               metrics_name='CF_metric')
        cbs.append(eval_cb)

    model.train(epoch=epoch_size, train_dataset=train_loader, callbacks=cbs)




if __name__ == '__main__':
    ms.context.set_context(mode=ms.PYNATIVE_MODE, device_target="GPU", device_id=0)
    epoch_size = 200
    batch_size = 4
    multi_scale_train = True
    run_Eval = True

    train_net(epoch_size, batch_size,multi_scale_train, run_Eval)