import os
import math
import torch
import torch.nn.functional as F
import numpy as np
import h5py
import nibabel as nib
from medpy import metric
from models.CMUnet import CMUNet
from models.Unet import UNet


#计算预测结果和真实标签之间的一些度量指标，比如 Dice 系数、Jaccard 系数、Hausdorff 距离和平均表面距离 (Average Surface Distance)，评估预测分割结果与真实标签之间的相似度和准确性
def calculate_metric_percase(pred, gt):
    dice = metric.binary.dc(pred, gt)
    jc = metric.binary.jc(pred, gt)
    hd = metric.binary.hd95(pred, gt)
    asd = metric.binary.asd(pred, gt)

    return dice, jc, hd, asd


def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1):
    print(image.shape)
    c, ww, hh, dd = image.shape # 通道数、宽度、高度和深度（或者说是图像的三个维度的大小）

    sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1 # 计算了图像在 x 方向上划分的块数 sx
    sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1
    sz = math.ceil((dd - patch_size[2]) / stride_z) + 1
    # print("{}, {}, {}".format(sx, sy, sz))
    # 数组的形状是 (num_classes, ww, hh, dd)
    # image.shape[1:] 只包含图像数据的维度信息，所以在前面加上 num_classes，得到完整的形状
    score_map = np.zeros((num_classes, ) + image.shape[1:]).astype(np.float32)
    cnt = np.zeros(image.shape[1:]).astype(np.float32) # 统计每个像素点被覆盖的次数

    for x in range(0, sx):
        xs = min(stride_xy*x, ww-patch_size[0])
        for y in range(0, sy):
            ys = min(stride_xy * y,hh-patch_size[1])
            for z in range(0, sz):
                zs = min(stride_z * z, dd-patch_size[2])
                test_patch = image[:,xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]]
                test_patch = np.expand_dims(test_patch,axis=0).astype(np.float32)
                test_patch = torch.from_numpy(test_patch).cuda()
                y1 = net(test_patch)
                y = F.softmax(y1, dim=1)
                y = y.cpu().data.numpy()
                y = y[0,:,:,:,:]
                # 将当前预测结果 y 加到分数地图 score_map 的对应位置上。具体来说，它使用了切片操作，将 y 加到了 score_map 的特定区域上，实现了分数的累加
                score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
                  = score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y
                # 将当前处理的块覆盖的像素点计数加一。具体来说，它使用了切片操作，将 cnt 中当前块覆盖的区域的计数加一，以统计每个像素点被覆盖的次数
                cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
                  = cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1
    score_map = score_map/np.expand_dims(cnt,axis=0)
    label_map = np.argmax(score_map, axis = 0)
    return label_map, score_map

# 对一组图像数据进行测试，并返回平均评价指标
def test_all_case(net, image_list, num_classes=2, patch_size=(112, 112, 80), stride_xy=18, stride_z=4, save_result=True, test_save_path=None, preproc_fn=None):
    total_metric = 0.0
    for ith,image_path in enumerate(image_list):
        h5f = h5py.File(image_path, 'r')
        image = h5f['image'][:]
        label = h5f['label'][:]
        label[label==4] =3  # 将标签中值为4的部分替换为3，用于处理特定的标签
        if preproc_fn is not None:
            image = preproc_fn(image)
        prediction, score_map = test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=num_classes)
        print(np.unique(prediction),np.unique(label))

        if np.sum(prediction)==0:
            single_metric = (0,0,0,0)
        else:
            single_metric = calculate_metric_percase(prediction, label[:])
        print('%03d,\t%.5f, %.5f, %.5f, %.5f' % (ith, single_metric[0], single_metric[1], single_metric[2], single_metric[3]))
        total_metric += np.asarray(single_metric)

        if save_result:
            nib.save(nib.Nifti1Image(prediction.astype(np.float32), np.eye(4)), test_save_path + "%03d_pred.nii.gz"%(ith))
            # image只保留一个模态
            nib.save(nib.Nifti1Image(image[0].astype(np.float32), np.eye(4)), test_save_path + "%03d_img.nii.gz"%(ith))
            nib.save(nib.Nifti1Image(label[:].astype(np.float32), np.eye(4)), test_save_path + "%03d_gt.nii.gz"%(ith))
    avg_metric = total_metric / len(image_list) # 计算所有图像的平均评价指标
    print('average metric is {}'.format(avg_metric))

    return avg_metric


if __name__ == '__main__':
    data_path = '/root/autodl-tmp/BrainTumorSegmentation1/data_set/BraTS2021'
    data_path1 = '/root/autodl-tmp/BrainTumorSegmentation1/data_set/BraTS2021/dataset'
    #data_path = r'D:\PyCharmCode\BrainTumorSegmentation1\data_set\BraTS2021'
    #data_path1 = r'D:\PyCharmCode\BrainTumorSegmentation1\data_set\BraTS2021\dataset'
    test_save_path = '/root/autodl-tmp/BrainTumorSegmentation1/predictions/CMUNet2/'
    #test_save_path = r'D:\PyCharmCode\BrainTumorSegmentation1\preditions\unet1\\'
    save_mode_path = '/root/autodl-tmp/BrainTumorSegmentation1/result/CMUNet2.pth'
    #save_mode_path = r'D:\PyCharmCode\BrainTumorSegmentation1\result\UNet.pth'

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net = CMUNet(in_channels=4,num_classes=4).to(device) # 实例化 UNet 模型，并将其移动到 GPU 上。
    net.load_state_dict(torch.load(save_mode_path)['model']) # 加载预训练模型的参数
    print("init weight from {}".format(save_mode_path))
    net.eval()

    with open(data_path + '/' + 'test.txt', 'r') as f: # 打开测试数据列表文件，并读取其中的图像路径。

        image_list = []  # 初始化空的图像路径列表
        lines = f.readlines()  # 读取所有行
        for idx, line in enumerate(lines):
            if idx < len(lines) - 1:  # 检查当前行是否为最后一行
                x = line.strip('\n')  # 去掉换行符
            else:
                x = line.strip()
            image_list.append(os.path.join(data_path1, x))  # 将路径添加到图像路径列表中
        #image_list = [os.path.join(data_path, x.strip()) for x in f.readlines()] # 构建测试图像路径列表。
    print(len(image_list))
    # print(image_list[0])
    # 滑动窗口法
    # 进行测试评估，并保存预测结果到指定路径
    avg_metric = test_all_case(net, image_list, num_classes=4,
                                patch_size=(160,160,128), stride_xy=32, stride_z=16,
                                save_result=True,test_save_path=test_save_path)   
