import torch
import torch.nn.functional as F
import kornia.filters as filters
import torch.nn as nn
cos=torch.nn.CosineEmbeddingLoss(reduction='mean')
MSELoss = nn.MSELoss(reduction= 'mean') 
sobel=filters.sobel

def fu_ls_v0(image_gray,image_ir,generate_img):
    #Fu_ls_v0是使用karios库中的sobel替代原来fusion loss中手算的sobel卷积
    joint_in=torch.max(image_gray,image_ir)
    loss_in=F.l1_loss(joint_in,generate_img)

    gray_edge=sobel(image_gray)
    ir_edge=sobel(image_ir)
    joint_edge=torch.max(gray_edge,ir_edge)
    gen_edge=sobel(generate_img)
    loss_edge=F.l1_loss(joint_edge,gen_edge)

    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge

def fu_ls(image_gray,image_ir,generate_img):
    #把原来fusion loss中的sobel边缘换成canny，使用canny的默认参数
    joint_in=torch.max(image_gray,image_ir)
    loss_in=F.l1_loss(joint_in,generate_img)

    _,gray_edge=filters.canny(image_gray)
    _,ir_edge=filters.canny(image_ir)
    joint_edge=torch.max(gray_edge,ir_edge)
    _,gen_edge=filters.canny(generate_img)
    #print(joint_in)
    loss_edge=F.l1_loss(joint_edge,gen_edge)
    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge

def fu_ls_v1(image_gray,image_ir,generate_img):
    #损失上强度图边缘取反，叠加正常学习边缘
    #边缘提取使用canny默认参数
    #强度损失(对边缘进行了反色)+边缘损失

    _,gray_edge=filters.canny(image_gray)
    _,ir_edge=filters.canny(image_ir)
    joint_edge=torch.max(gray_edge,ir_edge)
    _,gen_edge=filters.canny(generate_img)
    loss_edge=F.l1_loss(joint_edge,gen_edge)

    
    joint_in=torch.max(image_gray,image_ir)
    joint_in_rvEdge=1-(joint_in*joint_edge)
    joint_in_noEdge=joint_in*(1-joint_edge)
    joint_in=joint_in_rvEdge+joint_in_noEdge
    loss_in=F.l1_loss(joint_in,generate_img)

    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge

def fu_ls_v2(image_gray,image_ir,generate_img):
    #只有边缘反色的强度损失，没有边缘损失
    _,gray_edge=filters.canny(image_gray)
    _,ir_edge=filters.canny(image_ir)
    joint_edge=torch.max(gray_edge,ir_edge)
    _,gen_edge=filters.canny(generate_img)
    #loss_edge=F.l1_loss(joint_edge,gen_edge)

    joint_in=torch.max(image_gray,image_ir)
    joint_in_rvEdge=1-(joint_in*joint_edge)
    joint_in_noEdge=joint_in*(1-joint_edge)
    joint_in=joint_in_rvEdge+joint_in_noEdge
    loss_in=F.l1_loss(joint_in,generate_img)

    loss_total=loss_in
    return loss_total,loss_in,loss_in
    '''x_in_max=torch.max(image_gray,image_ir)
    loss_in=F.l1_loss(x_in_max,generate_img)'''

def fu_ls_v3(image_gray,image_ir,generate_img):
    #边缘提取使用canny默认参数
    #去掉边缘的强度损失+把强度图边缘提取出来，然后反色，与重建特征图对应边缘位置的像素直接损失

    joint_in=torch.max(image_gray,image_ir)
    _,gray_edge=filters.canny(image_gray)
    _,ir_edge=filters.canny(image_ir)
    joint_edge=torch.max(gray_edge,ir_edge)
    gen_edge=generate_img*joint_edge
    joint_in_rvEdge=1-(joint_in*joint_edge)

    loss_edge=F.l1_loss(joint_in_rvEdge,gen_edge)
    
    joint_in_noEdge=joint_in*(1-joint_edge)
    gen_noEdge=generate_img*(1-joint_edge)
    loss_in=F.l1_loss(joint_in_noEdge,gen_noEdge)

    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge

#canny = filters.Canny(low_threshold=0.2, high_threshold=0.25)
canny = filters.Canny(low_threshold=0.1, high_threshold=0.2)
def fu_ls_v4(image_gray,image_ir,generate_img):
    #与v3相同，但是canny可调（canny默认low 0.1 high 0.2）
    joint_in=torch.max(image_gray,image_ir)
    _,gray_edge=canny(image_gray)
    _,ir_edge=canny(image_ir)
    joint_edge=torch.max(gray_edge,ir_edge)
    gen_edge=generate_img*joint_edge
    joint_in_rvEdge=1-(joint_in*joint_edge)

    loss_edge=F.l1_loss(joint_in_rvEdge,gen_edge)
    
    joint_in_noEdge=joint_in*(1-joint_edge)
    gen_noEdge=generate_img*(1-joint_edge)
    loss_in=F.l1_loss(joint_in_noEdge,gen_noEdge)

    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge


def fu_ls_v5(image_gray,image_ir,generate_img):
    #与v3相同，但是canny换成了sobel
    joint_in=torch.max(image_gray,image_ir)
    gray_edge=sobel(image_gray)
    ir_edge=sobel(image_ir)
    joint_edge=torch.max(gray_edge,ir_edge)
    gen_edge=generate_img*joint_edge
    joint_in_rvEdge=1-(joint_in*joint_edge)

    loss_edge=F.l1_loss(joint_in_rvEdge,gen_edge)
    
    joint_in_noEdge=joint_in*(1-joint_edge)
    gen_noEdge=generate_img*(1-joint_edge)
    loss_in=F.l1_loss(joint_in_noEdge,gen_noEdge)

    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge

#--------将ir edge与vis edg的最大值与joint_int的 edge作差，而不是通过反色来构造损失
canny = filters.Canny(low_threshold=0.1, high_threshold=0.2)

def edge_ls(image_gray,image_ir,generate_img):
    #Canny的部分通过边缘做差得到，损失和fu_ls一样是取反边缘和生成的融合图对应位置边缘做l1损失
    #sobel的部分是先分别计算vis/ir的sobel最大值和融合光强的sobel，去掉Canny边缘的部分后再做l1损失
    
    # canny revers edge
    joint_in = torch.max(image_gray, image_ir)
    _, gray_canny = canny(image_gray)
    _, ir_canny = canny(image_ir)
    _, joint_canny=canny(joint_in)
    max_canny = torch.max(gray_canny, ir_canny)
    diff_canny=torch.clamp((torch.abs(max_canny-joint_canny)),min=0,max=1)
    joint_in_rvcanny = ((1 - joint_in) * diff_canny)
    gen_canny=generate_img*diff_canny
    loss_canny=F.l1_loss(joint_in_rvcanny,gen_canny)
    # sobel edge
    gray_sb=sobel(image_gray)
    ir_sb=sobel(image_ir)
    joint_sb=torch.max(gray_sb,ir_sb)*(1-diff_canny)
    gen_sb=sobel(generate_img)*(1-diff_canny)
    loss_sobel=F.l1_loss(joint_sb,gen_sb)
    #joint edg
    loss_edge=5*loss_canny+loss_sobel

    #
    joint_in_noEdge=joint_in*(1-diff_canny)
    gen_noEdge=generate_img*(1-diff_canny)
    loss_in=F.l1_loss(joint_in_noEdge,gen_noEdge)
    
    
    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge

def edge_ls_v1(image_gray,image_ir,generate_img):
    #把Canny的反色边缘当作图像的内容损失的一部分
    #即将去掉canny边缘的图和反色边缘相加，然后和生成的特征图（gen img）做损失
    # canny revers edge
    joint_in = torch.max(image_gray, image_ir)
    _, gray_canny = canny(image_gray)
    _, ir_canny = canny(image_ir)
    _, joint_canny=canny(joint_in)
    max_canny = torch.max(gray_canny, ir_canny)
    diff_canny=torch.clamp((torch.abs(max_canny-joint_canny)),min=0,max=1)
    joint_in_rvcanny = ((1 - joint_in) * diff_canny)


    # sobel edge
    gray_sb=sobel(image_gray)
    ir_sb=sobel(image_ir)
    joint_sb=torch.max(gray_sb,ir_sb)*(1-diff_canny)
    gen_sb=sobel(generate_img)*(1-diff_canny)
    loss_edge=F.l1_loss(joint_sb,gen_sb)

    #
    joint_cons_img=joint_in*(1-diff_canny)+joint_in_rvcanny
    loss_in=F.l1_loss(joint_cons_img, generate_img)
    
    
    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge

def edge_ls_v2(image_gray,image_ir,generate_img):
    #Canny的部分通过边缘做差得到，损失和fu_ls一样是取反边缘和生成的融合图对应位置边缘做l1损失
    #sobel的部分是先分别计算vis/ir的sobel最大值和融合光强的sobel，去掉Canny边缘的部分后再做l1损失
    
    #和edge ls相比 sobel损失不去掉canny
    # canny revers edge
    joint_in = torch.max(image_gray, image_ir)
    _, gray_canny = canny(image_gray)
    _, ir_canny = canny(image_ir)
    _, joint_canny=canny(joint_in)
    max_canny = torch.max(gray_canny, ir_canny)
    diff_canny=torch.clamp((torch.abs(max_canny-joint_canny)),min=0,max=1)
    joint_in_rvcanny = ((1 - joint_in) * diff_canny)
    gen_canny=generate_img*diff_canny
    loss_canny=F.l1_loss(joint_in_rvcanny,gen_canny)
    # sobel edge
    gray_sb=sobel(image_gray)
    ir_sb=sobel(image_ir)
    joint_sb=torch.max(gray_sb,ir_sb)#sobel 不去掉canny
    gen_sb=sobel(generate_img)
    loss_sobel=F.l1_loss(joint_sb,gen_sb)
    #joint edg
    loss_edge=5*loss_canny+loss_sobel

    #
    joint_in_noEdge=joint_in*(1-diff_canny)
    gen_noEdge=generate_img*(1-diff_canny)
    loss_in=F.l1_loss(joint_in_noEdge,gen_noEdge)
    
    
    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge

def edge_ls_sobel(image_gray,image_ir,generate_img):
    joint_in = torch.max(image_gray, image_ir)
    gray_sobel = sobel(image_gray)
    ir_sobel = sobel(image_ir)
    gen_sobel=sobel(generate_img)
    joint_sobel=sobel(joint_in)
    max_sobel = torch.max(gray_sobel, ir_sobel)
    diff_sobel=torch.clamp(F.tanh(4.5*(max_sobel-joint_sobel)),min=0,max=1)
    loss_rvedge=F.l1_loss((1 - joint_in) * diff_sobel,gen_sobel * diff_sobel)

    # sobel 
    joint_edge=max_sobel*(1-diff_sobel)
    gen_edge=gen_sobel*(1-diff_sobel)
    loss_noedge=F.l1_loss(joint_edge,gen_edge)

    loss_edge=loss_noedge+loss_rvedge
    #
    
    loss_in=F.l1_loss(joint_in*(1-diff_sobel), generate_img*(1-diff_sobel))
    
    
    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge

def edge_ls_sobel_V0(image_gray,image_ir,generate_img):
    joint_in = torch.max(image_gray, image_ir)
    gray_sobel = sobel(image_gray)
    ir_sobel = sobel(image_ir)
    
    joint_sobel=sobel(joint_in)
    max_sobel = torch.max(gray_sobel, ir_sobel)
    diff_sobel=torch.clamp(F.tanh(4.5*(max_sobel-joint_sobel)),min=0,max=1)
    loss_rvedge=F.l1_loss((1 - joint_in) * diff_sobel,generate_img * diff_sobel)

    # sobel 
    gen_sobel=sobel(generate_img)
    joint_edge=max_sobel*(1-diff_sobel)
    gen_edge=gen_sobel*(1-diff_sobel)
    loss_noedge=F.l1_loss(joint_edge,gen_edge)

    loss_edge=loss_noedge+loss_rvedge
    #
    
    loss_in=F.l1_loss(joint_in*(1-diff_sobel), generate_img*(1-diff_sobel))
    
    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge

def edge_ls_sobel_V01(image_gray,image_ir,generate_img):
    joint_in = torch.max(image_gray, image_ir)
    gray_sobel = sobel(image_gray)
    ir_sobel = sobel(image_ir)
    
    joint_sobel=sobel(joint_in)
    max_sobel = torch.max(gray_sobel, ir_sobel)
    diff_sobel=torch.clamp(F.tanh(8*(max_sobel-joint_sobel)),min=0,max=1)
    loss_rvedge=F.l1_loss((1 - joint_in) * diff_sobel,generate_img * diff_sobel)

    # sobel 
    gen_sobel=sobel(generate_img)
    joint_edge=max_sobel*(1-diff_sobel)
    gen_edge=gen_sobel*(1-diff_sobel)
    loss_noedge=F.l1_loss(joint_edge,gen_edge)

    loss_edge=loss_noedge+loss_rvedge
    #
    
    loss_in=F.l1_loss(joint_in*(1-diff_sobel), generate_img*(1-diff_sobel))
    
    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge

def edge_ls_sobel_V1(image_gray,image_ir,generate_img):
    #反色部分加权重
    joint_in = torch.max(image_gray, image_ir)
    gray_sobel = sobel(image_gray)
    ir_sobel = sobel(image_ir)
    gen_sobel=sobel(generate_img)
    joint_sobel=sobel(joint_in)
    max_sobel = torch.max(gray_sobel, ir_sobel)
    diff_sobel=torch.clamp(F.tanh(4.5*(max_sobel-joint_sobel)),min=0,max=1)
    loss_rvedge=F.l1_loss((1 - joint_in) * diff_sobel,gen_sobel * diff_sobel)

    # sobel 
    joint_edge=max_sobel*(1-diff_sobel)
    gen_edge=gen_sobel*(1-diff_sobel)
    loss_noedge=F.l1_loss(joint_edge,gen_edge)

    loss_edge=loss_noedge+5*loss_rvedge
    #
    
    loss_in=F.l1_loss(joint_in*(1-diff_sobel), generate_img*(1-diff_sobel))
    
    
    loss_total=loss_edge*10+loss_in
    return loss_total,loss_in,loss_edge

#--------
#角度相似度
def angle(a, b):
    a_flattened = a.view(a.size(0), a.size(1) * a.size(2))
    b_flattened = b.view(b.size(0), b.size(1) * b.size(2))

    dev=a_flattened.device
    loss_flag = torch.ones(a.size(0)).to(dev)
    
    output = cos(a_flattened, b_flattened,loss_flag)

    return output

def color_angle_loss(If, vi_3):
    # 分别计算每个颜色通道的角度损失
    theta_r = angle(If[:, 0, :, :], vi_3[:, 0, :, :])
    theta_g = angle(If[:, 1, :, :], vi_3[:, 1, :, :])
    theta_b = angle(If[:, 2, :, :], vi_3[:, 2, :, :])
    
    # 将三个通道的角度损失相加
    total_angle_loss = theta_r + theta_g + theta_b
    #print('total_angle_loss',total_angle_loss)
    # 计算平均颜色角度损失
    mean_angle_loss = torch.mean(total_angle_loss)
    #print('mean_angle_loss',mean_angle_loss)
    return mean_angle_loss

#直方图损失
def histogram_norm_loss(input_tensor, target_tensor, num_bins=256):
    """
    计算两个图像张量在CUDA上的直方图损失。

    参数:
    - input_tensor (Tensor): 输入图像张量，形状为 [b, c, w, h]。
    - target_tensor (Tensor): 目标图像张量，形状与 input_tensor 相同。
    - num_bins (int): 直方图中的桶数量。

    返回:
    - Tensor: 计算得到的直方图损失。
    """

    # 初始化总损失为0
    total_loss = 0.0

    # 分通道计算直方图
    for channel in range(input_tensor.size(1)):
        # 提取输入和目标通道
        input_channel = input_tensor[:, channel, :, :]
        target_channel = target_tensor[:, channel, :, :]
        
        min_val = min(input_tensor.min(), target_tensor.min()).item()
        max_val = max(input_tensor.max(), target_tensor.max()).item()

        # 计算输入和目标通道的直方图
        input_hist = torch.histc(input_channel, bins=num_bins, min=min_val, max=max_val)
        target_hist = torch.histc(target_channel, bins=num_bins, min=min_val, max=max_val)

        # 归一化直方图
        input_hist = input_hist / (input_hist.sum() + 1e-6)  # 防止除以零
        target_hist = target_hist / (target_hist.sum() + 1e-6)

        # 计算L1损失并累加到总损失
        channel_loss = torch.sum(torch.abs(input_hist - target_hist))
        total_loss += channel_loss

    # 计算平均损失（如果需要）
    total_loss /= input_tensor.size(1)  # 除以通道数

    return total_loss


def histogram_clamp_norm_loss(input_tensor, target_tensor, num_bins=256):
    """
    计算两个图像张量在CUDA上的直方图损失 并将输入和输出张量的范围限制在0到1之间。

    参数:
    - input_tensor (Tensor): 输入图像张量，形状为 [b, c, w, h]。
    - target_tensor (Tensor): 目标图像张量，形状与 input_tensor 相同。
    - num_bins (int): 直方图中的桶数量。

    返回:
    - Tensor: 计算得到的直方图损失。
    """

    # 将输入和目标张量的范围限制在0到1之间
    input_tensor = torch.clamp(input_tensor, 0, 1)
    target_tensor = torch.clamp(target_tensor, 0, 1)

    # 初始化总损失为0
    total_loss = 0.0

    # 分通道计算直方图
    for channel in range(input_tensor.size(1)):
        # 提取输入和目标通道
        input_channel = input_tensor[:, channel, :, :]
        target_channel = target_tensor[:, channel, :, :]

        # 计算输入和目标通道的最小值和最大值
        min_val = min(input_tensor.min(), target_tensor.min()).item()
        max_val = max(input_tensor.max(), target_tensor.max()).item()

        # 计算输入和目标通道的直方图
        input_hist = torch.histc(input_channel, bins=num_bins, min=min_val, max=max_val)
        target_hist = torch.histc(target_channel, bins=num_bins, min=min_val, max=max_val)

        # 归一化直方图
        input_hist = input_hist / (input_hist.sum() + 1e-6)  # 防止除以零
        target_hist = target_hist / (target_hist.sum() + 1e-6)

        # 计算L1损失并累加到总损失
        channel_loss = torch.sum(torch.abs(input_hist - target_hist))
        total_loss += channel_loss

    # 计算平均损失（如果需要）
    total_loss /= input_tensor.size(1)  # 除以通道数

    return total_loss




def histogram_clamp_MSE(input_tensor, target_tensor, num_bins=256):
    """
    计算两个图像张量在CUDA上的直方图损失 并将输入和输出张量的范围限制在0到1之间。

    参数:
    - input_tensor (Tensor): 输入图像张量，形状为 [b, c, w, h]。
    - target_tensor (Tensor): 目标图像张量，形状与 input_tensor 相同。
    - num_bins (int): 直方图中的桶数量。

    返回:
    - Tensor: 计算得到的直方图损失。
    """

    # 将输入和目标张量的范围限制在0到1之间
    input_tensor = torch.clamp(input_tensor, 0, 1)
    target_tensor = torch.clamp(target_tensor, 0, 1)

    # 初始化总损失为0
    total_loss = 0.0

    # 分通道计算直方图
    for channel in range(input_tensor.size(1)):
        # 提取输入和目标通道
        input_channel = input_tensor[:, channel, :, :]
        target_channel = target_tensor[:, channel, :, :]

        # 计算输入和目标通道的最小值和最大值
        min_val = min(input_tensor.min(), target_tensor.min()).item()
        max_val = max(input_tensor.max(), target_tensor.max()).item()

        # 计算输入和目标通道的直方图
        input_hist = torch.histc(input_channel, bins=num_bins, min=min_val, max=max_val)
        target_hist = torch.histc(target_channel, bins=num_bins, min=min_val, max=max_val)

        # 归一化直方图
        input_hist = input_hist / (input_hist.sum() + 1e-6)  # 防止除以零
        target_hist = target_hist / (target_hist.sum() + 1e-6)

        # 计算L1损失并累加到总损失
        channel_loss = MSELoss(input_hist, target_hist)
        total_loss += channel_loss

    # 计算平均损失（如果需要）
    total_loss /= input_tensor.size(1)  # 除以通道数

    return total_loss
'''
def color_angle_loss(If, vi_3):
    # 计算两个张量的点积和模
    dot_product = (If * vi_3).sum(dim=1, keepdim=True)
    norm_if = If.norm(dim=1, keepdim=True)
    norm_vi_3 = vi_3.norm(dim=1, keepdim=True)
    
    # 计算夹角的余弦值
    cos_theta = dot_product / (norm_if * norm_vi_3)
    # 由于acos函数在cos值接近1时计算较慢，我们可以通过减去一个小的值来加速
    # 这个值需要足够小，不会对结果产生显著影响
    epsilon = 1e-8
    cos_theta = torch.clamp(cos_theta, -1.0 + epsilon, 1.0 - epsilon)
    theta = torch.acos(cos_theta)
    
    # 计算颜色角度损失，并对所有通道和空间位置求平均
    mean_angle_loss = torch.mean(theta)
    print('mean_angle_loss',mean_angle_loss)
    return mean_angle_loss

# 假设If和vi_3是形状为[N, 3, 480, 640]的PyTorch张量
# If = torch.randn(N, 3, 480, 640)
# vi_3 = torch.randn(N, 3, 480, 640)

# 计算颜色角度损失
# loss = color_angle_loss(If, vi_3)
'''
import torch
import numpy as np
import kornia as K
from kornia.filters import gaussian_blur2d, spatial_gradient
from kornia.losses import*

def scale_space_orientation_decomposition(image_tensor, scales= [7, 9, 11], orientations=[0, np.pi/4, np.pi/2]):
    # 确保图像是灰度格式
    if image_tensor.shape[0] == 3:
        image_tensor = torch.mean(image_tensor, 0, True)
    
    # 特征列表
    features = []
    orientations_tensor = torch.tensor(orientations)
    # 尺度空间分解
    for scale in scales:
        # 高斯模糊
        blurred_image = gaussian_blur2d(image_tensor, kernel_size =scale,sigma= (1.5, 1.5))
        
        # 方向滤波
        for orientation in orientations_tensor:
            # 根据指定的方向创建滤波器
            # 注意：Kornia的sobel函数返回梯度的x和y分量
            dxy= spatial_gradient(blurred_image)
            dx=dxy[:,:,0,:,:]
            dy=dxy[:,:,1,:,:]
            # 根据方向调整滤波器响应
            # 这里我们使用点积来模拟不同方向的滤波器响应
            # 根据cos(theta) * dx + sin(theta) * dy
            response = torch.cos(orientation) * dx + torch.sin(orientation) * dy
            
            # 应用非线性激活函数，如ReLU，以模拟滤波器的非线性特性
            filtered_image = torch.relu(response)
            
            # 将结果添加到特征列表中
            features.append(filtered_image.unsqueeze(0))
    
    # 返回所有特征的堆叠版本
    return torch.cat(features, dim=0)

# 读取图像并转换为PyTorch张量
# 注意：cv2.imread返回的是BGR格式，而PyTorch预期的是RGB

def vif_loss(img,ref): 
    fet_img=scale_space_orientation_decomposition(img)
    fet_reg=scale_space_orientation_decomposition(ref)
    loss=MSELoss(fet_img,fet_reg)
    return loss

from kornia.filters import filter2d
from skimage.filters import gabor_kernel
frequency = np.pi/2.0  

def chan_dcom_filt(image_tensor, scales= [(7,7), (9,9), (11,11)], orientations=[0, np.pi/4, np.pi/2]):
    # 确保图像是灰度格式
    batch_size=image_tensor.shape[0]
    if image_tensor.shape[0] == 3:
        image_tensor = torch.mean(image_tensor, 0, True)
    
    # 特征列表
    features = []
    #orientations_tensor = torch.tensor(orientations)
    # 尺度空间分解
    for scale in scales:
        # 高斯模糊
        blurred_image = gaussian_blur2d(image_tensor, kernel_size =scale,sigma= (1.5, 1.5))
        #print('blurred_image',blurred_image.shape)
        # 方向滤波
        
        for orientation in orientations:
            kernel=torch.tensor(gabor_kernel(frequency=frequency,theta=orientation))
            #kernel_batch=kernel.expand(batch_size, -1, -1)
            #filtered=filter2d(blurred_image,kernel_batch)
            kerl_real=kernel.real
            kerl_imag=kernel.imag
            filtered_real=filter2d(blurred_image,kerl_real.expand(batch_size, -1, -1))
            #filtered_imag=filter2d(blurred_image,kerl_imag.expand(batch_size, -1, -1))

            #print('filtered_real',filtered_real.shape)
            #print('filtered_imag',filtered_imag.shape)
            #filtered=torch.cat((filtered_real,filtered_imag),dim=1)
            
            features.append(filtered_real.unsqueeze(0))
    
    # 返回所有特征的堆叠版本
    return torch.cat(features, dim=0)

# 读取图像并转换为PyTorch张量
# 注意：cv2.imread返回的是BGR格式，而PyTorch预期的是RGB

def dcom_loss(img,ref,scales= [(7,7), (9,9), (11,11)]): 
    fet_img=chan_dcom_filt(img,scales)
    fet_reg=chan_dcom_filt(ref,scales)
    loss=MSELoss(fet_img,fet_reg)
    return loss


import torch
import torch.nn.functional as F
from pywt import wavedec2  # 假设使用PyWavelets库进行小波变换

def wavelet_transform(image_tensor, wavelet='haar', level=1):
    # 将图像转换为灰度（如果它是RGB）
    if image_tensor.shape[1] == 3:
        image_tensor = torch.mean(image_tensor, 1, keepdim=True)
    
    # 应用小波变换
    coeffs = wavedec2(image_tensor.numpy(), wavelet, level=level)
    return coeffs

def chan_wavelet_filt(image_tensor, scales=[1, 2, 3], wavelet='haar'):
    batch_size, channels, height, width = image_tensor.shape
    
    # 确保图像是灰度格式
    if channels == 3:
        image_tensor = torch.mean(image_tensor, 1, keepdim=True)
    
    # 小波变换特征列表
    features = []
    
    # 对于每个尺度，应用小波变换
    for scale in scales:
        # 根据尺度调整图像大小
        scaled_height, scaled_width = height // (2 ** scale), width // (2 ** scale)
        resized_image = F.interpolate(image_tensor, size=(scaled_height, scaled_width), mode='area')
        
        # 应用小波变换
        coeffs = wavelet_transform(resized_image, wavelet=wavelet, level=scale)
        
        # 将小波变换系数添加到特征列表
        features.append(coeffs)
    
    # 返回所有特征的堆叠版本
    return torch.cat(features, dim=1)

# 示例用法
# 假设img_tensor是您的输入图像张量
# wavelet_features = chan_wavelet_filt(img_tensor)

def dcom_lossV1(img,ref): 
    fet_img=wavelet_transform(img)
    fet_reg=wavelet_transform(ref)
    loss=MSELoss(fet_img,fet_reg)
    return loss