import os

from misc.logger_tool import Logger, Timer
from models.ChangeFormer import ChangeFormerV6

import mindspore as ms
from mindspore import context
from mindspore import nn
from misc.metric_tool import ConfuseMatrixMeter
from util.loss import CrossEntropyWithLogits
import numpy as np
from util.custom_with_cell import CustomWithTrainCell, CustomWithEvalCell


class CDTrainer():
    def __init__(self, args, dataloaders):
        self.args = args
        self.dataloaders = dataloaders

        self.n_class = args.n_class
        self.net_G = ChangeFormerV6(embed_dim=args.embed_dim)

        self.device_id = int(args.gpu_ids)
        if self.device_id >= 0:
            context.set_context(mode=ms.GRAPH_MODE, device_target='GPU', device_id=self.device_id)
        else:
            context.set_context(mode=ms.GRAPH_MODE, device_target='CPU')

        self.lr = args.lr
        # define optimizers
        if args.optimizer == "sgd":
            self.optimizer_G = nn.SGD(self.net_G.trainable_params(), learning_rate=self.lr,
                                         momentum=0.9,
                                         weight_decay=5e-4)
        elif args.optimizer == "adam":
            self.optimizer_G = nn.Adam(self.net_G.trainable_params(), learning_rate=self.lr,
                                          weight_decay=0)
        elif args.optimizer == "adamw":
            self.optimizer_G = nn.AdamWeightDecay(self.net_G.trainable_params(), learning_rate=self.lr,
                                           beta1=0.9, beta2=0.999, weight_decay=0.01)

        # schedulers
        self.running_metric = ConfuseMatrixMeter(n_class=2)
        # define logger file
        logger_path = os.path.join(args.checkpoint_dir, 'log.txt')
        self.logger = Logger(logger_path)
        self.logger.write_dict_str(args.__dict__)
        # define timer
        self.timer = Timer()
        self.batch_size = args.batch_size

        #  training log
        self.epoch_acc = 0
        self.best_val_acc = 0.0
        self.best_epoch_id = 0
        self.epoch_to_start = 0
        self.max_num_epochs = args.max_epochs

        self.global_step = 0
        self.steps_per_epoch = dataloaders['train'].get_dataset_size()
        self.total_steps = (self.max_num_epochs - self.epoch_to_start) * self.steps_per_epoch

        self.G_pred = None
        self.pred_vis = None
        self.batch = None
        self.G_loss = None
        self.is_training = False
        self.batch_id = 0
        self.epoch_id = 0
        self.checkpoint_dir = args.checkpoint_dir
        self.vis_dir = args.vis_dir

        self.shuffle_AB = args.shuffle_AB

        # define the loss functions
        self.multi_scale_train = args.multi_scale_train
        self.multi_scale_infer = args.multi_scale_infer
        self.weights = tuple(args.multi_pred_weights)
        if args.loss == 'ce':
            self._pxl_loss = CrossEntropyWithLogits()
        else:
            raise NotImplemented(args.loss)

        self.VAL_ACC = np.array([], np.float32)
        if os.path.exists(os.path.join(self.checkpoint_dir, 'val_acc.npy')):
            self.VAL_ACC = np.load(os.path.join(self.checkpoint_dir, 'val_acc.npy'))
        self.TRAIN_ACC = np.array([], np.float32)
        if os.path.exists(os.path.join(self.checkpoint_dir, 'train_acc.npy')):
            self.TRAIN_ACC = np.load(os.path.join(self.checkpoint_dir, 'train_acc.npy'))

        # check and create model dir
        if os.path.exists(self.checkpoint_dir) is False:
            os.mkdir(self.checkpoint_dir)
        if os.path.exists(self.vis_dir) is False:
            os.mkdir(self.vis_dir)

    def _load_checkpoint(self, ckpt_name='last_ckpt.ckpt'):
        print("\n")
        if os.path.exists(os.path.join(self.checkpoint_dir, ckpt_name)):
            self.logger.write('loading last checkpoints...\n')
            param_dict = ms.load_checkpoint(os.path.join(self.checkpoint_dir, ckpt_name))
            ms.load_param_into_net(self.net_G, param_dict)
        else:
            print("training from scratch ......")

    def _timer_update(self):
        pass

    def _visualize_pred(self):
        pass

    def _save_checkpoint(self):
        pass

    def _update_lr_schedulers(self):
        pass

    def _update_metric(self):
        pass

    def _collect_running_batch_states(self):
        pass

    def _collect_epoch_states(self):
        pass

    def _update_checkpoints(self):
        pass

    def _update_training_acc_curve(self):
        pass

    def _update_val_val_acc_curve(self):
        pass

    def _clear_cache(self):
        pass

    def _forward_pass(self):
        pass


    def _backward_G(self):
        pass

    def train_models(self):
        self._load_checkpoint()

        best_f1 = 0.001
        for self.epoch_id in range(self.epoch_to_start, self.max_num_epochs):
            self._clear_cache()
            self.is_training = True

            # 连接前向网络与损失函数
            net_with_loss = CustomWithTrainCell(self.net_G, self._pxl_loss)
            train_net = nn.TrainOneStepCell(net_with_loss, self.optimizer_G)


            train_data_loader = self.dataloaders['train']
            steps = train_data_loader.get_dataset_size()
            step = 0

            train_net.set_train(True)

            for data in train_data_loader.create_dict_iterator():
                # print('img shape', data['imgA'].shape)
                # print('label shape', data['label'].shape)
                # print('label type', data['imgA'].dtype)

                result = train_net(data["imgA"],data['imgB'], data["label"])
                print(f"Epoch: [{self.epoch_id} / {self.max_num_epochs}], "
                      f"step: [{step} / {steps}], "
                      f"loss: {result}")
                step = step + 1


            save_list = [{"name": "lr", "data": ms.Tensor(self.lr, ms.float32)},
                         {"name": "train_epoch", "data":ms.Tensor(self.epoch_id, ms.int32)}]
            ms.save_checkpoint(self.net_G, './ckpt_test/ChangeFormer_embed256_latest.ckpt')
            ms.save_checkpoint(save_list, './ckpt_test/hyper_param.ckpt')
            print('finished save ckpt...')


            # 设置保存最佳模型的指标
            val_loss = nn.Loss()
            acc = nn.Accuracy()
            f1_score = nn.F1()

            val_loss.clear()
            acc.clear()
            f1_score.clear()

            eval_net = CustomWithEvalCell(self.net_G, self._pxl_loss)
            eval_net.set_train(False)

            val_data_loader = self.dataloaders['val']
            val_data_size = val_data_loader.get_dataset_size()
            val_step = 1
            for d in val_data_loader.create_dict_iterator():
                label = d['label']
                outputs = eval_net(d['imgA'],d['imgB'], d['label'])
                seg_loss = ms.Tensor(outputs[0])
                val_loss.update(seg_loss)
                pred = outputs[-1]
                acc.update(pred, label)
                f1_score.update(pred, label)
                print('IS EVALUATING: epoch: {}  step: {}/{} '.format(self.epoch_id, val_step, val_data_size))
                print('IS EVALUATING: epoch: {}  step: {}/{} '.format(self.epoch_id, val_step, val_data_size))
                val_step += 1
            print('eval process finished...')

            mLoss = val_loss.eval()
            mAcc = acc.eval()
            mf1_score = f1_score.eval()
            print('mLoss', mLoss)
            print('mAcc', mAcc)
            print('mF1-score', mf1_score)

            # message = 'IS_evaluating: epoch:[%d] mLoss: [%d] mAcc: [%d] F1-Score-0: [%d] F1-score-1:[%d]\n'%\
            #           (self.epoch_id, mLoss, mAcc, mf1_score[0], mf1_score[1])
            with open('./eval.txt', mode='a') as f:
                f.write('IS_evaluating: epoch:{} mLoss: {} mAcc: {} F1-Score-0: {} F1-score-1:{}'.format(self.epoch_id, mLoss, mAcc, mf1_score[0], mf1_score[1]))
            f.close()

            if mf1_score[-1] > best_f1:
                ms.save_checkpoint(self.net_G, './ckpt_test/ChangeFormer_embed256_best.ckpt')
                print('finished save ckpt...')





