import copy
import os

import numpy as np
import torch
from PIL import Image
from medpy import metric
from scipy.ndimage import zoom
import torch.nn as nn
import SimpleITK as sitk
import torchvision.transforms.functional as TF
import random
from tqdm import tqdm

from matplotlib import pyplot as plt



class DiceLoss(nn.Module):
    def __init__(self, n_classes):
        super(DiceLoss, self).__init__()
        self.n_classes = n_classes

    def _one_hot_encoder(self, input_tensor):
        tensor_list = []
        for i in range(self.n_classes):
            temp_prob = input_tensor == i  # * torch.ones_like(input_tensor)
            tensor_list.append(temp_prob.unsqueeze(1))
        output_tensor = torch.cat(tensor_list, dim=1)
        return output_tensor.float()

    def _dice_loss(self, score, target):
        target = target.float()
        smooth = 1e-5
        intersect = torch.sum(score * target)
        y_sum = torch.sum(target * target)
        z_sum = torch.sum(score * score)
        loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
        loss = 1 - loss
        return loss

    def forward(self, inputs, target, weight=None, softmax=False):
        if softmax:
            inputs = torch.softmax(inputs, dim=1)
        target = self._one_hot_encoder(target)
        if weight is None:
            weight = [1] * self.n_classes
        assert inputs.size() == target.size(), 'predict {} & target {} shape do not match'.format(inputs.size(), target.size())
        class_wise_dice = []
        loss = 0.0
        for i in range(0, self.n_classes):
            dice = self._dice_loss(inputs[:, i], target[:, i])
            class_wise_dice.append(1.0 - dice.item())
            loss += dice * weight[i]
        return loss / self.n_classes


def calculate_metric_percase(pred, gt):
    pred[pred > 0] = 1
    gt[gt > 0] = 1
    if pred.sum() > 0 and gt.sum()>0:
        dice = metric.binary.dc(pred, gt)
        hd95 = metric.binary.hd95(pred, gt)
        return dice, hd95
    elif pred.sum() > 0 and gt.sum()==0:
        return 1, 0
    else:
        return 0, 0







def test_single_volume(image, label, net, classes, patch_size=[256, 256], test_save_path=None, case=None, z_spacing=1):
    image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()
    if len(image.shape) == 3:
        prediction = np.zeros_like(label)
        for ind in range(image.shape[0]):
            slice = image[ind, :, :]
            x, y = slice.shape[0], slice.shape[1]
            if x != patch_size[0] or y != patch_size[1]:
                slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=3)  # previous using 0
            input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda()
            net.eval()
            with torch.no_grad():
                outputs = net(input)
                out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0)
                out = out.cpu().detach().numpy()
                if x != patch_size[0] or y != patch_size[1]:
                    pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
                else:
                    pred = out
                prediction[ind] = pred
    else:
        input = torch.from_numpy(image).unsqueeze(
            0).unsqueeze(0).float().cuda()
        net.eval()
        with torch.no_grad():
            out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0)
            prediction = out.cpu().detach().numpy()
    metric_list = []
    for i in range(1, classes):
        metric_list.append(calculate_metric_percase(prediction == i, label == i))

    if test_save_path is not None:
        img_itk = sitk.GetImageFromArray(image.astype(np.float32))
        prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32))
        lab_itk = sitk.GetImageFromArray(label.astype(np.float32))
        img_itk.SetSpacing((1, 1, z_spacing))
        prd_itk.SetSpacing((1, 1, z_spacing))
        lab_itk.SetSpacing((1, 1, z_spacing))
        sitk.WriteImage(prd_itk, test_save_path + '/'+case + "_pred.nii.gz")
        sitk.WriteImage(img_itk, test_save_path + '/'+ case + "_img.nii.gz")
        sitk.WriteImage(lab_itk, test_save_path + '/'+ case + "_gt.nii.gz")
    return metric_list



# 这是正确的
# def test_single_volume_success(image, label, net, classes, patch_size=[224, 224], test_save_path=None, z_spacing=1):
#     # 将图像和标签转换为 NumPy 数组
#     image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()
#
#     # 保持图像为 RGB 格式，不进行灰度转换
#     if len(image.shape) == 3 and image.shape[0] == 3:
#         pass  # 保持图像为 RGB 格式
#
#     # 初始化 prediction 数组，确保其形状为 (H, W)
#     prediction = np.zeros(image.shape[1:], dtype=np.float32)
#
#     if len(image.shape) == 3:
#         # 如果图像是 RGB 的 (3, height, width)
#         x, y = image.shape[1:]
#
#         # 如果当前图像的尺寸与 patch_size 不一致，则调整尺寸
#         if x != patch_size[0] or y != patch_size[1]:
#             image = np.stack([zoom(image[c], (patch_size[0] / x, patch_size[1] / y), order=3) for c in range(3)], axis=0)  # 对每个通道进行三次样条插值
#
#         # 转换为张量并添加额外的维度，并将其发送到 GPU 上
#         input = torch.from_numpy(image).unsqueeze(0).float().cuda()
#         net.eval()
#         with torch.no_grad():
#             outputs = net(input)
#             out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0)
#             out = out.cpu().detach().numpy()
#
#             # 恢复预测结果到原始尺寸
#             if x != patch_size[0] or y != patch_size[1]:
#                 pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
#             else:
#                 pred = out
#
#             # 将预测结果存储到 prediction 数组中
#             prediction = pred
#     else:
#         # 如果图像是二维的
#         input = torch.from_numpy(image).unsqueeze(0).unsqueeze(0).float().cuda()
#         net.eval()
#         with torch.no_grad():
#             out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0)
#             prediction = out.cpu().detach().numpy()
#
#     # 将 label 从 (1, 224, 224) 压缩为 (224, 224)
#     label = label.squeeze(0)
#
#     # 确保 prediction 和 label 的形状一致
#     assert prediction.shape == label.shape, f"Shape mismatch: prediction.shape = {prediction.shape}, label.shape = {label.shape}"
#
#     # 计算指标
#     metric_list = []
#     for i in range(1, classes):
#         # 确保计算前的输入为布尔数组
#         pred_bool = (prediction == i)
#         label_bool = (label == i)
#
#         if np.any(pred_bool) and np.any(label_bool):  # 确保有前景存在以避免距离变换错误
#             metric_list.append(calculate_metric_percase(pred_bool, label_bool))
#         else:
#             # 如果没有前景，直接添加零结果
#             metric_list.append([0, 0])
#
#     # 保存预测结果
#     if test_save_path is not None:
#         # 保存为 PNG 格式
#         os.makedirs(test_save_path, exist_ok=True)
#         plt.imsave(os.path.join(test_save_path, "_pred.png"), prediction, cmap="gray")
#         plt.imsave(os.path.join(test_save_path, "_img.png"), image.transpose(1, 2, 0).astype(np.uint8))  # 保存为 RGB 图像
#         plt.imsave(os.path.join(test_save_path, "_gt.png"), label, cmap="gray")
#
#     return metric_list

from sklearn.metrics import confusion_matrix
# 计算评价指标
def calculate_metrics(pred_bool, label_bool):
    # 使用 sklearn confusion_matrix 计算指标
    cm = confusion_matrix(label_bool.flatten(), pred_bool.flatten(), labels=[0, 1])

    # 计算各类指标
    tp = cm[1, 1]  # True positive
    fp = cm[0, 1]  # False positive
    fn = cm[1, 0]  # False negative
    tn = cm[0, 0]  # True negative

    # Precision = TP / (TP + FP)
    precision = tp / (tp + fp) if (tp + fp) != 0 else 0.0

    # Recall = TP / (TP + FN)
    recall = tp / (tp + fn) if (tp + fn) != 0 else 0.0

    # mIoU = TP / (TP + FP + FN)
    miou = tp / (tp + fp + fn) if (tp + fp + fn) != 0 else 0.0

    # F1 score (for reference, can be added if needed)
    f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) != 0 else 0.0

    return precision, recall, miou

def test_single_volume_success(image, label, net, classes, patch_size=[224, 224], test_save_path=None, z_spacing=1):
    # 将图像和标签转换为 NumPy 数组
    image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()

    # 初始化 prediction 数组
    prediction = np.zeros(image.shape[1:], dtype=np.float32)

    if len(image.shape) == 3:
        x, y = image.shape[1:]
        if x != patch_size[0] or y != patch_size[1]:
            image = np.stack([zoom(image[c], (patch_size[0] / x, patch_size[1] / y), order=3) for c in range(3)], axis=0)

        input = torch.from_numpy(image).unsqueeze(0).float().cuda()
        net.eval()
        with torch.no_grad():
            outputs = net(input)
            out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0)
            out = out.cpu().detach().numpy()

            if x != patch_size[0] or y != patch_size[1]:
                pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
            else:
                pred = out

            prediction = pred
    else:
        input = torch.from_numpy(image).unsqueeze(0).unsqueeze(0).float().cuda()
        net.eval()
        with torch.no_grad():
            out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0)
            prediction = out.cpu().detach().numpy()

    label = label.squeeze(0)
    assert prediction.shape == label.shape, f"Shape mismatch: prediction.shape = {prediction.shape}, label.shape = {label.shape}"

    metric_list = []
    for i in range(1, classes):
        pred_bool = (prediction == i)
        label_bool = (label == i)

        if np.any(pred_bool) and np.any(label_bool):
            dice = calculate_metric_percase(pred_bool, label_bool)  # Assume this function exists for Dice and HD95
            precision, recall, miou = calculate_metrics(pred_bool, label_bool)
            metric_list.append([dice[0], dice[1], precision, recall, miou])
        else:
            metric_list.append([0, 0, 0, 0, 0])

    # 保存预测结果
    if test_save_path is not None:
        os.makedirs(test_save_path, exist_ok=True)
        plt.imsave(os.path.join(test_save_path, "_pred.png"), prediction, cmap="gray")
        plt.imsave(os.path.join(test_save_path, "_img.png"), image.transpose(1, 2, 0).astype(np.uint8))
        plt.imsave(os.path.join(test_save_path, "_gt.png"), label, cmap="gray")

    return metric_list












#
# import numpy as np
# import torch
# from scipy.ndimage import zoom
# import SimpleITK as sitk
#
# import numpy as np
# import torch
# from scipy.ndimage import zoom
# import SimpleITK as sitk
#
# def test_single_volume(image, label, net, classes, patch_size=[224, 224], test_save_path=None, z_spacing=1):
#     # 将图像和标签转换为 NumPy 数组
#     image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()
#
#     # 初始化 prediction 数组，确保其形状与 image 一致
#     depth, height, width = image.shape if len(image.shape) == 3 else (1, *image.shape)
#     prediction = np.zeros((depth, height, width), dtype=np.float32)
#
#     if len(image.shape) == 3:
#         # 如果图像是三维的 (3, height, width)
#         for ind in range(image.shape[0]):
#             slice = image[ind, :, :]
#             x, y = slice.shape
#
#             # 如果当前切片的尺寸与 patch_size 不一致，则调整切片的尺寸
#             if x != patch_size[0] or y != patch_size[1]:
#                 slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=3)  # 三次样条插值
#
#             # 转换为张量并添加额外的维度，并将其发送到 GPU 上
#             input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda()
#             net.eval()
#             with torch.no_grad():
#                 outputs = net(input)
#                 out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0)
#                 out = out.cpu().detach().numpy()
#
#                 # 恢复预测结果到原始尺寸
#                 if x != patch_size[0] or y != patch_size[1]:
#                     pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
#                 else:
#                     pred = out
#
#                 # 将预测结果存储到 prediction 数组中
#                 prediction[ind, :, :] = pred
#     else:
#         # 如果图像是二维的
#         input = torch.from_numpy(image).unsqueeze(0).unsqueeze(0).float().cuda()
#         net.eval()
#         with torch.no_grad():
#             out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0)
#             prediction = out.cpu().detach().numpy()
#
#     # 将 prediction 转换为单通道的类别索引图
#     if len(prediction.shape) == 3 and prediction.shape[0] == 3:
#         prediction = np.argmax(prediction, axis=0)
#
#     # 将 label 从 (1, 224, 224) 压缩为 (224, 224)
#     label = label.squeeze(0)
#     # 确保 prediction 和 label 的形状一致
#     assert prediction.shape == label.shape, f"Shape mismatch: prediction.shape = {prediction.shape}, label.shape = {label.shape}"
#
#     # 计算指标
#     metric_list = []
#     for i in range(1, classes):
#         # 确保计算前的输入为布尔数组
#         pred_bool = (prediction == i)
#         label_bool = (label == i)
#
#         if np.any(pred_bool) and np.any(label_bool):  # 确保有前景存在以避免距离变换错误
#             metric_list.append(calculate_metric_percase(pred_bool, label_bool))
#         else:
#             # 如果没有前景，直接添加零结果
#             metric_list.append([0, 0])
#
#     # 保存预测结果
#     if test_save_path is not None:
#         img_itk = sitk.GetImageFromArray(image.astype(np.float32))
#         prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32))
#         lab_itk = sitk.GetImageFromArray(label.astype(np.float32))
#         img_itk.SetSpacing((1, 1, z_spacing))
#         prd_itk.SetSpacing((1, 1, z_spacing))
#         lab_itk.SetSpacing((1, 1, z_spacing))
#         sitk.WriteImage(prd_itk, test_save_path + '/' + "_pred.nii.gz")
#         sitk.WriteImage(img_itk, test_save_path + '/' + "_img.nii.gz")
#         sitk.WriteImage(lab_itk, test_save_path + '/' + "_gt.nii.gz")
#
#     return metric_list



def one_hot_encode(label, num_channels):
    """
    生成标签的one-hot编码

    参数：
    label (ndarray): 输入的标签
    num_channels (int): 生成的通道数

    返回值：
    one_hot (ndarray): one-hot编码后的标签，形状为 (num_channels, label.shape[0], label.shape[1], ...)
    """
    label_shape = label.shape
    one_hot = np.zeros((num_channels,) + label_shape, dtype=np.float32)
    for channel in range(num_channels):
        one_hot[channel] = (label == channel).astype(np.float32)
    return one_hot
def testISIC_single_volume(image, label, net, classes, patch_size=[224, 224], test_save_path=None, z_spacing=1):
    image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()
    label = one_hot_encode(label, 3)
    if len(image.shape) == 3:
        prediction = np.zeros_like(label)  # 全零数组用来存储预测结果
        for ind in range(image.shape[0]):
            slice = image[ind, :, :]  # 选择当前切片
            x, y = slice.shape[0], slice.shape[1]  # 获取当前切片的高度和宽度
            if x != patch_size[0] or y != patch_size[1]:  # 如果当前切片的尺寸与patch_size不匹配，则调整切片的尺寸
                slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=3)  # previous using 0
                #  转换为张量，并对其添加额外的维度，并将其发送到GPU上。
            input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda()
            net.eval()
            with torch.no_grad():
                outputs = net(input)
                out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0)
                out = out.cpu().detach().numpy()
                if x != patch_size[0] or y != patch_size[1]:
                    pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
                else:
                    pred = out
                prediction[ind] = pred
    else:
        input = torch.from_numpy(image).unsqueeze(
            0).unsqueeze(0).float().cuda()
        net.eval()
        with torch.no_grad():
            out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0)
            prediction = out.cpu().detach().numpy()
    metric_list = []
    for i in range(1, classes):
        metric_list.append(calculate_metric_percase(prediction == i, label == i))

    if test_save_path is not None:
        img_itk = sitk.GetImageFromArray(image.astype(np.float32))
        prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32))
        lab_itk = sitk.GetImageFromArray(label.astype(np.float32))
        img_itk.SetSpacing((1, 1, z_spacing))
        prd_itk.SetSpacing((1, 1, z_spacing))
        lab_itk.SetSpacing((1, 1, z_spacing))
        sitk.WriteImage(prd_itk, test_save_path + '/' + "_pred.nii.gz")
        sitk.WriteImage(img_itk, test_save_path + '/' + "_img.nii.gz")
        sitk.WriteImage(lab_itk, test_save_path + '/' + "_gt.nii.gz")
    return metric_list
"""
    处理图片使用
"""
class myToTensor:
    def __init__(self):
        pass

    def __call__(self, data):
        image, mask = data
        return torch.tensor(image).permute(2, 0, 1), torch.tensor(mask).permute(2, 0, 1)

# 调整大小
class myResize:
    def __init__(self, size_h=256, size_w=256):
        self.size_h = size_h
        self.size_w = size_w

    def __call__(self, data):
        image, mask = data
        return TF.resize(image, [self.size_h, self.size_w]), TF.resize(mask, [self.size_h, self.size_w])

class myResize_npz:
    def __init__(self, x, y):
        self.size_h = x
        self.size_w = y

    def __call__(self, data):
        image, mask = data['image'], data['label']
        image, mask = torch.from_numpy(image), torch.from_numpy(mask)

        # image = np.array(image, dtype=np.float32)  # 将数据类型转换为 float32
        # image = (image * 255).astype(np.uint8)  # 将数据缩放到 0-255 范围，并转换为 uint8
        # # 将 Numpy 数组转换为 PIL 图像对象
        # image = Image.fromarray(image)
        # mask = Image.fromarray(mask)
        #
        # # 调整图像和掩码尺寸
        # image = image.resize((self.size_w, self.size_h))
        # mask = mask.resize((self.size_w, self.size_h))
        #
        # # 将 PIL 图像对象转换回 Numpy 数组
        # image = np.array(image)
        # mask = np.array(mask)

        # return image, mask
        return TF.resize(image, [self.size_h, self.size_w]), TF.resize(mask, [self.size_h, self.size_w])

# 随机水平翻转
class myRandomHorizontalFlip:
    def __init__(self, p=0.5):
        self.p = p

    def __call__(self, data):
        image, mask = data
        if random.random() < self.p:
            return TF.hflip(image), TF.hflip(mask)
        else:
            return image, mask

# 随机垂直翻转
class myRandomVerticalFlip:
    def __init__(self, p=0.5):
        self.p = p

    def __call__(self, data):
        image, mask = data
        if random.random() < self.p:
            return TF.vflip(image), TF.vflip(mask)
        else:
            return image, mask

# 随机旋转
class myRandomRotation:
    def __init__(self, p=0.5, degree=[0, 360]):
        self.angle = random.uniform(degree[0], degree[1])
        self.p = p

    def __call__(self, data):
        image, mask = data
        if random.random() < self.p:
            return TF.rotate(image, self.angle), TF.rotate(mask, self.angle)
        else:
            return image, mask

# 归一化
class myNormalize:
    def __init__(self, data_name, train=True):
        if data_name == 'ISIC2018':
            if train:
                self.mean = 157.561
                self.std = 26.706
            else:
                self.mean = 149.034
                self.std = 32.022
        elif data_name == 'ISIC2017':
            if train:
                self.mean = 159.922
                self.std = 28.871
            else:
                self.mean = 148.429
                self.std = 25.748
        elif data_name == 'ISIC2016':  # 还没调参数
            if train:
                self.mean = 159.922
                self.std = 28.871
            else:
                self.mean = 148.429
                self.std = 25.748
        elif data_name == 'DSB2018':  # 还没调参数
            if train:
                self.mean = 159.922
                self.std = 28.871
            else:
                self.mean = 148.429
                self.std = 25.748
        else:
            if train:
                self.mean = [0.40059128, 0.26921213, 0.18386859]
                self.std = [0.29772735, 0.20417021, 0.13873906]
            else:
                self.mean = [0.29772735, 0.20417021, 0.13873906]
                self.std = [0.29772735, 0.20417021, 0.13873906]

    def __call__(self, data):
        img, msk = data
        img_normalized = (img - self.mean) / self.std
        img_normalized = ((img_normalized - np.min(img_normalized))
                          / (np.max(img_normalized) - np.min(img_normalized))) * 255.
        return img_normalized, msk


