
import glob
import multiprocessing
import os

import mindspore as ms
from PIL import Image
from mindspore import nn
import mindspore.dataset as ds
from dataset.CD_dataset import CDDataset

# from models.ChangeFormer import ChangeFormerV6
from models.ChangeFormer2.ChangeFormer import ChangeFormerV6

from util.custom_with_cell import CustomWithEvalCell
from util.loss import CrossEntropyWithLogits
from mindspore.common.initializer import One, Normal
from mindspore import context
from utils import get_loader
import numpy as np
from mindspore import ops
from mindspore import save_checkpoint

from mindspore import dtype as mstype
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from misc.metric_tool import ConfuseMatrixMeter

from models.unet_model import UNetMedical


def net_eval():
    # ms.set_context(mode=ms.GRAPH_MODE, device_target="GPU", pynative_synchronize=True)
    ms.set_context(mode=ms.GRAPH_MODE, device_target="CPU")

    data_name = 'LEVIR'

    val_loader = get_loader(data_name, img_size=256, batch_size=1, split='val',
               is_train=False, dataset='CDDataset')

    # TODO 加载模型文件
    ckpt_path = r'D:\pythonDemo\ChangeFormer_Server\ckpt_test\pt_2_ckpt\torch_cf_result.ckpt'
    param_dict = load_checkpoint(ckpt_path)

    net = ChangeFormerV6(input_nc=3, output_nc=2)
    # net = UNetMedical(n_channels=6, n_classes=2)

    load_param_into_net(net, param_dict)

    print("load checkpoint file successfully...")

    # 混淆矩阵类
    running_metric = ConfuseMatrixMeter(n_class=2)

    argmax = ops.Argmax(axis=1, output_type=mstype.int32)

    for d in val_loader.create_dict_iterator():
        imgAB = d['imgAB']
        label = d['label']
        outputs = net(imgAB)
        
        # print('Unet output shape:', outputs.shape)
        
        # changeformer输出多个tensor
        pred_L = argmax(outputs[-1]).asnumpy()

        # print("max pred: ", pred_L.max())

        current_score = running_metric.update_cm(pred_L, gt=label.asnumpy())
        print("current mF1:", current_score)

    scores_dict = running_metric.get_scores()
    print(scores_dict)
if __name__ == '__main__':
    net_eval()










