import random

import numpy as np
import torch
from PIL import Image, ImageFilter
from typing import Tuple

from matplotlib import pyplot as plt


def mask_to_image(mask: torch.Tensor) -> Image.Image:
    """
    将 PyTorch 张量转换为 PIL 图像。

    Args:
        mask (torch.Tensor): 输入的 PyTorch 张量，形状为 (C, H, W) 或 (H, W)。

    Returns:
        Image.Image: 转换后的 PIL 图像。
    """
    # 检查张量的维度
    if mask.dim() == 3:
        mask = mask.permute(1, 2, 0)  # 将通道放在最后一个维度
    elif mask.dim() == 2:
        mask = mask.unsqueeze(-1)  # 添加通道维度

    # 将张量转换为 NumPy 数组
    mask = mask.numpy()

    # 将 NumPy 数组转换为 PIL 图像
    img = Image.fromarray(mask.astype(np.uint8))

    return img



def PIL_Image_ToTensor(img):
    # 将 PIL 图像转换为 NumPy 数组
    img = np.array(img)

    # 转换为 PyTorch 张量
    img = torch.from_numpy(img)

    if img.dim() == 3:  # 如果是彩色图，保持通道数
        img = img.permute(2, 0, 1)  # 将通道放在第一个维度
        if img.shape[0] == 1:  # 如果是灰度图，变更为2维
            img = img.squeeze(0)
    # 将张量类型转换为 torch.long
    img = img.to(torch.long)
    return img

def convert_pred_to_mask(pred):
    """
    将预测张量转换为预测掩膜。

    参数:
    - pred (torch.Tensor): 形状为 (batch_size, num_classes, h, w) 的预测张量。

    返回:
    - torch.Tensor: 形状为 (batch_size, h, w) 的预测掩膜。
    """
    # 选择每个像素的最大概率类别
    _, pred_mask = torch.max(pred, dim=1)
    pred_mask = pred_mask.to(torch.long)
    return pred_mask


def get_scores(pred: torch.Tensor, real_mask:torch.Tensor ,device) -> Tuple[torch.Tensor, torch.Tensor]:
    '''
    生成一个批次的交集和并集
    :param pred: 模型的输出(b,c,h,w)
    :param real_mask: 真实掩膜(b,h,w)
    :return: 交集（tensor），并集（tensor）
    '''
    batch_size, num_classes, h, w = pred.shape
    pred_mask = convert_pred_to_mask(pred)

    # 初始化 IoU 字典
    iou_per_class = {c: 0.0 for c in range(num_classes)}
    # 一个批次的 交集和并集
    total_intersections = torch.zeros(num_classes , dtype=torch.int64,device=device)
    total_unions = torch.zeros(num_classes ,dtype=torch.int64,device=device)

    # 计算交集和并集
    for c in range(num_classes):
        intersections = torch.sum((pred_mask == c) & (real_mask == c), dim=(1, 2))
        unions = torch.sum((pred_mask == c) | (real_mask == c), dim=(1, 2))

        total_intersections[c] += torch.sum(intersections).item()
        total_unions[c] += torch.sum(unions).item()
    return total_intersections, total_unions


def dice_coeff(input: torch.Tensor, target: torch.Tensor, reduce_batch_first: bool = False, epsilon=1e-6):
    assert input.size() == target.size()
    if input.dim() == 2 and reduce_batch_first:
        raise ValueError(f'Reduce batch but got tensor without batch dimension (shape {input.shape})')

    if input.dim() == 2 or reduce_batch_first:
        inter = torch.dot(input.reshape(-1), target.reshape(-1))
        sets_sum = torch.sum(input) + torch.sum(target)
        if sets_sum.item() == 0:
            sets_sum = 2 * inter

        return (2 * inter + epsilon) / (sets_sum + epsilon)
    else:
        dice = 0
        for i in range(input.shape[0]):
            dice += dice_coeff(input[i, ...], target[i, ...])
        return dice / input.shape[0]

def multiclass_dice_coeff(input: torch.Tensor, target: torch.Tensor, reduce_batch_first: bool = False, epsilon=1e-6):
    assert input.size() == target.size()
    dice = 0
    for channel in range(input.shape[1]):
        dice += dice_coeff(input[:, channel, ...], target[:, channel, ...], reduce_batch_first, epsilon)
    return dice / input.shape[1]

def dice_loss(input: torch.Tensor, target: torch.Tensor, multiclass: bool = False):
    assert input.size() == target.size()
    fn = multiclass_dice_coeff if multiclass else dice_coeff
    return 1 - fn(input, target, reduce_batch_first=True)
######################################
import torch
import torch.nn.functional as F
from torch.autograd import Variable

# Lovasz-Softmax Loss Functions
def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None, **kwargs):
    """
    Multi-class Lovasz-Softmax loss for class probabilities.
    """
    if per_image:
        # Calculate loss per image and average it
        loss = torch.mean(torch.stack(
            [lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
             for prob, lab in zip(probas, labels)]
        ))
    else:
        # Calculate loss for the entire batch
        loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes)
    return loss

def lovasz_softmax_flat(probas, labels, classes='present'):
    """
    Flat Lovasz-Softmax loss for a single class probability vector.
    """
    if probas.numel() == 0:
        return probas * 0.

    C = probas.size(1)
    losses = []
    class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
    for c in class_to_sum:
        fg = (labels == c).float()  # foreground for class c
        if classes == 'present' and fg.sum() == 0:
            continue
        class_pred = probas[:, c] if C > 1 else probas[:, 0]
        errors = (fg - class_pred).abs()
        errors_sorted, perm = torch.sort(errors, descending=True)
        fg_sorted = fg[perm]
        losses.append(torch.dot(errors_sorted, lovasz_grad(fg_sorted)))
    return torch.mean(torch.stack(losses))

def flatten_probas(probas, labels, ignore=None):
    """
    Flattens the predictions in the batch.
    """
    if probas.dim() == 3:
        B, H, W = probas.size()
        probas = probas.view(B, 1, H, W)
    B, C, H, W = probas.size()
    probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C)
    labels = labels.view(-1)
    if ignore is None:
        return probas, labels
    valid = (labels != ignore)
    return probas[valid.nonzero().squeeze()], labels[valid]

def lovasz_grad(gt_sorted):
    """
    Computes gradient of the Lovasz extension w.r.t sorted errors.
    """
    p = len(gt_sorted)
    gts = gt_sorted.sum()
    intersection = gts - gt_sorted.float().cumsum(0)
    union = gts + (1 - gt_sorted).float().cumsum(0)
    jaccard = 1.0 - intersection / union
    if p > 1:
        jaccard[1:p] = jaccard[1:p] - jaccard[0:p-1]
    return jaccard
######################################
def focal_loss(logits, labels, gamma=2, reduction="mean",**kwargs):
    r"""
    focal loss for multi classification（简洁版实现）

    `https://arxiv.org/pdf/1708.02002.pdf`

    FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t)
    """

    # 这段代码比较简洁，具体可以看作者是怎么定义的，或者看 focal_lossv1 版本的实现
    # 经测试，reduction 加不加结果都一样，但是为了保险，还是加上
    # logits是过激活函数前的值,reduction="none"就是不对loss进行求mean或者sum 保留每个样本的CE loss
    ce_loss = F.cross_entropy(logits, labels, reduction="sum")
    log_pt = -ce_loss
    pt = torch.exp(log_pt)
    weights = (1 - pt) ** gamma
    fl = weights * ce_loss

    if reduction == "sum":
        fl = fl.sum()
    elif reduction == "mean":
        fl = fl.mean()
    else:
        raise ValueError(f"reduction '{reduction}' is not valid")
    return fl


def balanced_focal_loss(logits, labels, alpha=0.25, gamma=2, reduction="mean"):
    r"""
    带平衡因子的 focal loss，这里的 alpha 在多分类中应该是个向量，向量中的每个值代表类别的权重。
    但是为了简单起见，我们假设每个类一样，直接传 0.25。
    如果是长尾数据集，则应该自行构造 alpha 向量，同时改写 focal loss 函数。
    """
    return alpha * focal_loss(logits, labels, gamma, reduction)



def focal_lossv1(logits, labels, gamma=2):
    r"""
    focal loss for multi classification（第一版）

    FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t)
    """

    # pt = F.softmax(logits, dim=-1)  # 直接调用可能会溢出
    #什么是softmax的溢出:https://blog.csdn.net/qq_35054151/article/details/125891745
    # 一个不会溢出的 trick
    log_pt = F.log_softmax(logits, dim=-1)  # 这里相当于 CE loss
    #pt:tensor([[0.1617, 0.2182, 0.2946, 0.3255],
    #    [0.2455, 0.2010, 0.3314, 0.2221]])
    pt = torch.exp(log_pt)  # 通过 softmax 函数后打的分
    labels = labels.view(-1, 1)  # 多加一个维度，为使用 gather 函数做准备
    #.gather第一个参数表示根据哪个维度,第二个参数表示按照索引列表index从input中选取指定元素
    pt = pt.gather(1, labels)  # 从pt中挑选出真实值对应的 softmax 打分，也可以使用独热编码实现
    #pt,因为只有两个样本所以只有两项损失: tensor([[0.2182],
    #                                      [0.2221]])
    ce_loss = -torch.log(pt)
    weights = (1 - pt) ** gamma
    #对应元素相乘
    fl = weights * ce_loss
    #大家都是默认取均值而不是取sum
    fl = fl.mean()
    return fl




import torch
import torch.nn as nn
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class MultiClassFocalLossWithAlpha(nn.Module):
    def __init__(self, alpha=[0.1,0.4,0.3,0.2], gamma=2, reduction='sum'):
        """
        :param alpha: 权重系数列表，四分类中 0.1 0.4 0.3 0.2
        :param gamma: 困难样本挖掘的gamma
        :param reduction:
        """
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        super(MultiClassFocalLossWithAlpha, self).__init__()
        self.alpha = torch.tensor(alpha).to(self.device)
        self.gamma = gamma
        self.reduction = reduction

    def forward(self, pred, target):
        alpha = self.alpha[target]  # 为当前batch内的样本分配类别权重，shape=(N, H, W)
        log_softmax = torch.log_softmax(pred, dim=1).to(self.device)  # 形状为 (N, C, H, W)

        tmpvar = target.unsqueeze(1).to(self.device)  # 目标形状变为 (N, 1, H, W)
        logpt = torch.gather(log_softmax, dim=1, index=tmpvar).to(self.device)  # 获取类别对应的 log_softmax 值，形状 (N, 1, H, W)

        logpt = logpt.view(-1)  # 降维为一维，形状为 (N * H * W)
        ce_loss = -logpt  # 交叉熵损失
        pt = torch.exp(logpt)  # softmax 值，形状为 (N * H * W)

        # focal loss 计算
        focal_loss = alpha.view(-1) * (1 - pt) ** self.gamma * ce_loss  # 按照公式计算focal loss
        if self.reduction == "mean":
            return torch.mean(focal_loss)
        if self.reduction == "sum":
            return torch.sum(focal_loss)
        return focal_loss






def sp_noiseImg(img, prob=0.04,**kwargs):
    '''0.05'''
    output = np.zeros(img.shape, np.uint8)
    #随机生成prob
    prob= random.uniform(0.03, 0.07)
    thres = 1 - prob
    for i in range(img.shape[0]):
        for j in range(img.shape[1]):
            rdn = random.random()
            if rdn < prob:
                output[i][j] = random.randint(0,16)
            elif rdn > thres:
                output[i][j] = random.randint(240,255)
            else:
                output[i][j] = img[i][j]

    return output#numpy


"""对图像和标签同时进行CutMix操作,通过collate_fn载入DataLoader使用"""
def custom_cutmix(images, masks):
    # 获取batch size
    batch_size, _, H, W = images.size()

    # 随机选择要混合的两组图像索引
    indices = torch.randperm(batch_size)

    # cut_rat = np.sqrt(0.5)  # 控制裁剪区域比例，这里是0.5的平方根
    # 随机裁剪区域的宽高
    min_ratio = 0.2  # 最小裁剪比例
    max_ratio = 0.8  # 最大裁剪比例

    # 随机生成裁剪比例
    cut_rat_w = np.random.uniform(min_ratio, max_ratio)
    cut_rat_h = np.random.uniform(min_ratio, max_ratio)

    cut_w = int(W * cut_rat_h)
    cut_h = int(H * cut_rat_w)

    # 随机生成裁剪区域的起始位置
    cx = np.random.randint(W)
    cy = np.random.randint(H)

    # 确定裁剪区域的边界
    bbx1 = np.clip(cx - cut_w // 2, 0, W)
    bby1 = np.clip(cy - cut_h // 2, 0, H)
    bbx2 = np.clip(cx + cut_w // 2, 0, W)
    bby2 = np.clip(cy + cut_h // 2, 0, H)

    # 创建新的图像和标签进行CutMix
    mixed_images = images.clone()
    mixed_masks = masks.clone()

    # 将裁剪区域的像素从随机选择的图像粘贴到目标图像上
    mixed_images[:, :, bby1:bby2, bbx1:bbx2] = images[indices, :, bby1:bby2, bbx1:bbx2]
    mixed_masks[:, :, bby1:bby2, bbx1:bbx2] = masks[indices, :, bby1:bby2, bbx1:bbx2]

    return mixed_images, mixed_masks



def collate_fn(batch,p=0.3):
    '''
    不能dataset.DataLoader不能传参，否则会报错
    p控制概率，
    函数的执行次数是 数据量除以batch_size
    batch_size越大p可以调大
    '''
    images, masks, image_names  = zip(*batch)
    # print(type(masks))
    images = torch.stack(images)    #元组->张量
    masks = torch.stack(masks)# (batch_size, H, W)
    image_names = str(image_names)

    if random.random() < p:
        masks = masks.unsqueeze(1)# (batch_size, 1, H, W)
        images, masks = custom_cutmix(images, masks)
        masks = masks.squeeze(1)# (batch_size, H, W)
        # print(type(masks))
        return images, masks,image_names
    return images, masks,image_names

#dataset的loder可视化
def visualize_batch(data_loader,p=4):
    #定义一个函数来可视化一批次的数据
    # 获取一个批次的数据
    images, labels, _ = next(iter(data_loader))
    # 将图像数据从 Tensor 转换为 numpy 数组
    images = images.numpy()
    labels = labels.numpy()
    # 创建一个图形窗口，设置子图
    fig, axes = plt.subplots(nrows=2, ncols=p, figsize=(12, 6))
    axes = axes.flatten()  # 将子图数组展平
    # 遍历批次中的每个图像
    for i in range(p):
        # 将图像数据从 (C, H, W) 转换为 (H, W, C)
        image = images[i].transpose((1, 2, 0))  # (C, H, W) -> (H, W, C)
        label = labels[i].squeeze()  # 如果标签有多个通道，移除多余的通道
        # 显示图像
        axes[i].imshow(image, cmap='gray')
        axes[i].set_title(f'Image {i+1}')
        axes[i].axis('off')
        # 显示标签
        axes[i + p].imshow(label, cmap='gray')
        axes[i + p].axis('off')
    plt.show()


#反锐化
def ace_enhancement_numpy_1(image_np, amount=1.5, **kwargs):

    image = Image.fromarray(image_np)# PIL
    blurred = image.filter(ImageFilter.GaussianBlur(radius=0.6))
    blurred_array = np.array(blurred)

    # Calculate high frequency
    high_freq = image_np - blurred_array
    # Enhance high frequency and combine
    enhanced_array = image_np + amount * high_freq
    # Clip values to valid range
    enhanced_array = np.clip(enhanced_array, 0, 255).astype(np.uint8)
    return enhanced_array

def ace_enhancement_numpy(image_np, C=4, MaxCG=5, **kwargs):
    # 将输入的NumPy数组转换为PIL图像
    image = Image.fromarray(image_np)

    # 计算局部均值
    blurred = image.filter(ImageFilter.GaussianBlur(radius=0.6))  # 使用较大的模糊半径
    blurred_array = np.array(blurred)

    # 计算高频部分
    high_freq = image_np - blurred_array

    # 计算局部方差
    var_mask = (high_freq ** 2).mean(axis=(0, 1))

    # 转换为标准差
    std_mask = np.sqrt(var_mask)

    # 计算全局均值和标准差
    mean_global = np.mean(image_np)
    std_global = np.std(image_np)

    # 计算增益系数
    gain_coefficient = std_global / std_mask
    gain_coefficient[gain_coefficient > MaxCG] = MaxCG

    # 增强高频部分并合成最终图像，使用C参数来调整增强量
    enhanced_array = blurred_array + C * gain_coefficient * high_freq

    # 确保像素值在有效范围内
    enhanced_array = np.clip(enhanced_array, 0, 255).astype(np.uint8)

    return enhanced_array





if __name__ == '__main__':
    a = torch.randint(low=0,high=4,dtype=torch.long,size=(1,1,5,5))
    a = torch.onehot(a,1)
    b = a.clone()
    b[1,1,1,:] = 1
    print(get_scores(a,b))