import os
import torch
import random
import numpy as np
import SimpleITK as sitk
from models import ResUNet
from torch.utils.data import Dataset
from calculate import hippocampus_left_right, hippocampus_2_to_1
from train_test import test_average_calculate, test_average_calculate_all


# 准备数据 SimpleITK
class ITKDataset(Dataset):
    def __init__(self, image_paths, transform=None):
        self.image_paths = image_paths
        self.transform = transform

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        image_path = self.image_paths[idx]

        # 加载训练图像和标签图像
        image = sitk.ReadImage(image_path)
        image = sitk.GetArrayFromImage(image)

        # 将训练图像和标签图像转换为浮点张量
        image = torch.from_numpy(image).float()

        # 应用转换(可选,暂时没做)
        if self.transform:
            image = self.transform(image)

        return image


# 测试并保存预测图像为nii.gz文件
def test_save(test_loader, move_path_files, model):
    device_cuda = torch.device("cuda")  # 驱动为GPU
    device_cpu = torch.device('cpu')  # 驱动为CPU
    num = len(test_loader)
    predicted_left_all = torch.zeros((256, 224, 176)).to(device_cpu)
    predicted_right_all = torch.zeros((256, 224, 176)).to(device_cpu)
    predicted_all = torch.zeros((256, 224, 176)).to(device_cpu)

    # dice_n_left, dice_n_right = torch.zeros((num, 1)).to(device_cpu), torch.zeros((num, 1)).to(device_cpu)  # 左右海马体的dice系数
    # jaccard_n_left, jaccard_n_right = torch.zeros((num, 1)).to(device_cpu), torch.zeros((num, 1)).to(device_cpu)  # 左右海马体的jaccard系数
    # PPV_n_left, PPV_n_right = torch.zeros((num, 1)).to(device_cpu), torch.zeros((num, 1)).to(device_cpu)  # 左右海马体的PPV
    # MSE_n_left, MSE_n_right = torch.zeros((num, 1)).to(device_cpu), torch.zeros((num, 1)).to(device_cpu)  # 左右海马体的MSE
    # hd_95_n_left, hd_95_n_right = torch.zeros((num, 1)).to(device_cpu), torch.zeros((num, 1)).to(device_cpu)  # 左右海马体的hd_95
    #
    # dice_n_all = torch.zeros((num, 1)).to(device_cpu)  # 左和右海马体的dice系数
    # jaccard_n_all = torch.zeros((num, 1)).to(device_cpu)  # 左和右海马体的jaccard系数
    # PPV_n_all = torch.zeros((num, 1)).to(device_cpu)  # 左和右海马体的PPV
    # MSE_n_all = torch.zeros((num, 1)).to(device_cpu)  # 左和右海马体的MSE
    # hd_95_n_all = torch.zeros((num, 1)).to(device_cpu)  # 左和右海马体的hd_95

    model.eval()  # 模型进入评估模式,激活所有神经元
    model.training = False  # 退出训练
    with torch.no_grad():
        print('model begin to test')
        for test_files_num, data in enumerate(test_loader):  # 测试样本批次为1
            image = data  # (batch_size, 176, 256, 224)
            images = image.reshape(1, 176, 256, 224)  # (176, 256, 224) -> (1, 176, 256, 224)
            # masks = masks.reshape(1, 176, 256, 224)  # (176, 256, 224) -> (1, 176, 256, 224)

            # (1, 224, 256, 176)->(256, 1, 224, 176)->(1, 1, 64, 224, 176)
            images = images.permute(0, 2, 3, 1)[0][76:140].reshape(1, 1, 64, 224, 176).to(device_cpu)
            # (176, 256, 224) -> (256, 224, 176)
            # masks = masks.permute(1, 2, 0).to(device_cpu)

            # 预测输出
            outputs = model(images)  # (1, 3, 64, 224, 176)

            # 找到预测值最高的索引作为预测结果
            predicted = torch.argmax(outputs, dim=1)  # (1, 64, 224, 176)
            predicted = predicted.reshape(64, 224, 176)
            # masks = masks.reshape(64, 224, 176)

            # 分割出预测的左右海马体和标签的左右海马体
            predicted_left, predicted_right = hippocampus_left_right(predicted)
            # masks_left, masks_right = hippocampus_left_right(masks)
            # 将测试图像尺寸还原
            predicted_left_all[76:140] = predicted_left
            predicted_right_all[76:140] = predicted_right
            predicted_all[76:140] = predicted

            # # 输出一下每个样本的测试进度并获取样本的评估指标
            # d_l, j_l, p_l, m_l, h_l = \
            #     test_average_calculate(
            #         predicted_left_all,
            #         masks_left,
            #         test_files_num,  # 第几个样本
            #         'left',
            #     )
            # d_r, j_r, p_r, m_r, h_r = \
            #     test_average_calculate(
            #         predicted_right_all,
            #         masks_right,
            #         test_files_num,  # 第几个样本
            #         'right',
            #     )
            # d_a, j_a, p_a, m_a, h_a = \
            #     test_average_calculate(
            #         hippocampus_2_to_1(predicted_all),
            #         hippocampus_2_to_1(masks),
            #         test_files_num,  # 第几个样本
            #         'all',
            #     )
            # # 收集每个样本的评估指标
            # dice_n_left[test_files_num] = d_l
            # jaccard_n_left[test_files_num] = j_l
            # PPV_n_left[test_files_num] = p_l
            # MSE_n_left[test_files_num] = m_l
            # hd_95_n_left[test_files_num] = h_l
            #
            # dice_n_right[test_files_num] = d_r
            # jaccard_n_right[test_files_num] = j_r
            # PPV_n_right[test_files_num] = p_r
            # MSE_n_right[test_files_num] = m_r
            # hd_95_n_right[test_files_num] = h_r
            #
            # dice_n_all[test_files_num] = d_a
            # jaccard_n_all[test_files_num] = j_a
            # PPV_n_all[test_files_num] = p_a
            # MSE_n_all[test_files_num] = m_a
            # hd_95_n_all[test_files_num] = h_a
            #
            left_all = predicted_left_all.permute(2, 0, 1).to(device_cpu).numpy()
            right_all = predicted_right_all.permute(2, 0, 1).to(device_cpu).numpy()
            hai_all = predicted_all.permute(2, 0, 1).to(device_cpu).numpy()

            # 图像和标签保存地址
            predicted_save_path = 'D:/ahaaaaaaaa/Python_PyCharm/V1.0.4/save/predicted'
            move_image = sitk.ReadImage(move_path_files[test_files_num])
            # 使用os.path.basename()获取文件名
            file_name_with_ext = os.path.basename(move_path_files[test_files_num])
            # 使用os.path.splitext()去掉后缀名
            file_name_without_ext = os.path.splitext(file_name_with_ext)[0]
            file_name_without_ext = os.path.splitext(file_name_without_ext)[0]

            Origin = move_image.GetOrigin()
            Spacing = move_image.GetSpacing()
            Direction = move_image.GetDirection()

            # 将numpy数组转为nii图像
            predicted_left_nii = sitk.GetImageFromArray(left_all.astype(np.uint8))
            predicted_right_nii = sitk.GetImageFromArray(right_all.astype(np.uint8))
            predicted_hai_nii = sitk.GetImageFromArray(hai_all.astype(np.uint8))
            # mask_nii = sitk.GetImageFromArray(masks.astype(np.uint8))
            # mask_nii = sitk.Cast(mask_nii, sitk.sitkUInt8)

            predicted_left_nii.SetOrigin(Origin)
            predicted_left_nii.SetSpacing(Spacing)
            predicted_left_nii.SetDirection(Direction)

            predicted_right_nii.SetOrigin(Origin)
            predicted_right_nii.SetSpacing(Spacing)
            predicted_right_nii.SetDirection(Direction)

            predicted_hai_nii.SetOrigin(Origin)
            predicted_hai_nii.SetSpacing(Spacing)
            predicted_hai_nii.SetDirection(Direction)

            # mask_nii.SetOrigin(Origin)
            # mask_nii.SetSpacing(Spacing)
            # mask_nii.SetDirection(Direction)

            # 保存左右海马体和标签的图像
            # sitk.WriteImage(predicted_left_nii, os.path.join(predicted_save_path, f'{test_files_num}_left.nii.gz'))
            # sitk.WriteImage(predicted_right_nii, os.path.join(predicted_save_path, f'{test_files_num}_right.nii.gz'))
            sitk.WriteImage(predicted_hai_nii, os.path.join(predicted_save_path, file_name_without_ext + '_mask.nii.gz'))
            print(test_files_num)

            # # 只存1张,后面可改
            # if test_files_num == 0:
            #     break

        # # 左右海马体单独和一起的评估指标
        # test_average_calculate_all(
        #                             dice_n_left,
        #                             jaccard_n_left,
        #                             PPV_n_left,
        #                             MSE_n_left,
        #                             hd_95_n_left,
        #                             'left',
        #                         )
        # test_average_calculate_all(
        #                             dice_n_right,
        #                             jaccard_n_right,
        #                             PPV_n_right,
        #                             MSE_n_right,
        #                             hd_95_n_right,
        #                             'right',
        #                         )
        #
        # test_average_calculate_all(
        #                             dice_n_all,
        #                             jaccard_n_all,
        #                             PPV_n_all,
        #                             MSE_n_all,
        #                             hd_95_n_all,
        #                             'all',
        #                         )


device_cuda = torch.device("cuda")  # 驱动为GPU
device_cpu = torch.device('cpu')  # 驱动为CPU
# 填入测试图像文件路径列表
# image_path = 'C:/SYBdataset/dataset/TEST_IMGS_brain'  # 已去头骨数据路径
# image_path = 'C:/SYBdataset/dataset/qutougu/brain'
# image_path_files = [os.path.join(image_path, f) for f in os.listdir(image_path) if f.endswith('.nii.gz')]

move_path = 'C:/SYBdataset/TEST_IMGS_brain'  # 已去头骨数据路径
move_path_files = [os.path.join(move_path, f) for f in os.listdir(move_path) if f.endswith('.nii.gz')]

model = ResUNet(in_channel=1, out_channel=3, training=False).to(device_cpu)

model_path = 'D:/ahaaaaaaaa/Python_PyCharm/V1.0.4/models/unet++_model_V1.0.4(tverskyLoss).pth'  # 保存的模型参数地址
if os.path.isfile(model_path):  # 判断是否存在已保存好的模型参数
    model_parameters = torch.load(model_path)  # 读取模型参数
    model.load_state_dict(model_parameters)  # 加载模型参数
    print('model_save is load')
else:
    print('model_save is not exist')

random_number = 0
image_10_path = []
mask_10_path = []
# choice_life = [2, 7, 11, 12, 13, 16, 20, 25, 27, 28]
# for i in choice_life:
#     # random_number = random_number + random.randint(0, 5)
#     # print(random_number)
#     mask_10_path.append(mask_path_files[i])
#     image_10_path.append(image_path_files[i])

test_loader = ITKDataset(move_path_files)
test_save(test_loader, move_path_files, model)
