import torch
import torch.nn as nn
import torch.nn.functional as F

class AverageGradientLoss(nn.Module):
    def __init__(self):
        super(AverageGradientLoss, self).__init__()
        sobel_kernel_x = torch.tensor([[-1, 0, 1],
                                       [-2, 0, 2],
                                       [-1, 0, 1]], dtype=torch.float32).unsqueeze(0).unsqueeze(0)
        sobel_kernel_y = torch.tensor([[-1, -2, -1],
                                       [0, 0, 0],
                                       [1, 2, 1]], dtype=torch.float32).unsqueeze(0).unsqueeze(0)
        self.sobel_kernel_x = nn.Parameter(sobel_kernel_x, requires_grad=False)
        self.sobel_kernel_y = nn.Parameter(sobel_kernel_y, requires_grad=False)

    def forward(self, image):
        grad_map=torch.zeros_like(image)
        for c in range(image.shape[1]):
            grad_x = F.conv2d(image[:, c:c + 1, :, :], self.sobel_kernel_x, padding=1)
            grad_y = F.conv2d(image[:, c:c + 1, :, :], self.sobel_kernel_y, padding=1)
            gradient_magnitude = grad_x+grad_y
            grad_map[:,c:c+1,:,:]=gradient_magnitude

        return grad_map  # Negative to convert maximization to minimization


class EdgeStrengthLoss(nn.Module):
    def __init__(self):
        super(EdgeStrengthLoss, self).__init__()
        sobel_kernel_x = torch.tensor([[-1, 0, 1],
                                       [-2, 0, 2],
                                       [-1, 0, 1]], dtype=torch.float32).unsqueeze(0).unsqueeze(0)
        sobel_kernel_y = torch.tensor([[-1, -2, -1],
                                       [0, 0, 0],
                                       [1, 2, 1]], dtype=torch.float32).unsqueeze(0).unsqueeze(0)
        self.sobel_kernel_x = nn.Parameter(sobel_kernel_x, requires_grad=False)
        self.sobel_kernel_y = nn.Parameter(sobel_kernel_y, requires_grad=False)

    def forward(self, image):
        # If image has multiple channels, apply the Sobel filter to each channel separately
        edge_strength = 0
        _,_,h,w=image.shape
        for c in range(image.shape[1]):
            grad_x = F.conv2d(image[:, c:c + 1, :, :], self.sobel_kernel_x, padding=1)
            grad_y = F.conv2d(image[:, c:c + 1, :, :], self.sobel_kernel_y, padding=1)
            gradient_magnitude = torch.sqrt(grad_x ** 2 + grad_y ** 2 + 1e-10)  # Add small constant to avoid sqrt(0)
            edge_strength += torch.sum(gradient_magnitude)/(h*w)

        return -edge_strength / image.shape[1]  # Negative to convert maximization to minimization


class MutualInformationLoss(nn.Module):
    def __init__(self, num_bins=256, eps=1e-10):
        super(MutualInformationLoss, self).__init__()
        self.num_bins = num_bins
        self.eps = eps

    def forward(self, image1, image2):
        batch_size, num_channels, height, width = image1.size()
        total_mi = 0.0

        for c in range(num_channels):
            # Flatten the channels to 1D using reshape
            flat_image1 = image1[:, c, :, :].reshape(-1)
            flat_image2 = image2[:, c, :, :].reshape(-1)

            # Compute the joint histogram
            joint_hist = torch.histc(flat_image1 * self.num_bins + flat_image2, bins=self.num_bins ** 2, min=0,
                                     max=self.num_bins ** 2)
            joint_hist = joint_hist.view(self.num_bins, self.num_bins)
            joint_hist = joint_hist / torch.sum(joint_hist + self.eps)

            # Compute the marginal histograms
            hist1 = torch.sum(joint_hist, dim=1)
            hist2 = torch.sum(joint_hist, dim=0)

            # Compute entropies
            h1 = -torch.sum(hist1 * torch.log(hist1 + self.eps))
            h2 = -torch.sum(hist2 * torch.log(hist2 + self.eps))
            h12 = -torch.sum(joint_hist * torch.log(joint_hist + self.eps))

            # Compute mutual information for the channel
            mi = h1 + h2 - h12
            total_mi += mi

        # Average the mutual information across channels
        avg_mi = total_mi / num_channels
        return -avg_mi  # Negative to convert maximization to minimization



def gaussian_pyramid(img, levels):
    pyramid = [img]
    for _ in range(1, levels):
        img = F.avg_pool2d(img, kernel_size=2, stride=2)
        pyramid.append(img)
    return pyramid

class VIFLoss(nn.Module):
    def __init__(self, levels=4, eps=1e-10):
        super(VIFLoss, self).__init__()
        self.levels = levels
        self.eps = eps

    def forward(self, ref, dist):
        batch_size, num_channels, height, width = ref.size()
        total_vif = 0.0

        for c in range(num_channels):
            ref_c = ref[:, c:c+1, :, :]
            dist_c = dist[:, c:c+1, :, :]
            ref_pyramid = gaussian_pyramid(ref_c, self.levels)
            dist_pyramid = gaussian_pyramid(dist_c, self.levels)

            vif = 0.0
            for ref_l, dist_l in zip(ref_pyramid, dist_pyramid):
                sigma_nsq = self.eps
                kernel = torch.ones(1, 1, 3, 3).to(ref.device) / 9.0

                mu_ref = F.conv2d(ref_l, kernel, padding=1)
                mu_dist = F.conv2d(dist_l, kernel, padding=1)
                sigma_ref_sq = F.conv2d(ref_l * ref_l, kernel, padding=1) - mu_ref * mu_ref
                sigma_dist_sq = F.conv2d(dist_l * dist_l, kernel, padding=1) - mu_dist * mu_dist
                sigma_ref_dist = F.conv2d(ref_l * dist_l, kernel, padding=1) - mu_ref * mu_dist

                # Avoid division by zero and negative values
                sigma_ref_sq = torch.clamp(sigma_ref_sq, min=self.eps)
                sigma_dist_sq = torch.clamp(sigma_dist_sq, min=self.eps)

                g = sigma_ref_dist / (sigma_ref_sq + self.eps)
                sv_sq = sigma_dist_sq - g * sigma_ref_dist

                g = torch.clamp(g, min=0)
                sv_sq = torch.clamp(sv_sq, min=self.eps)

                num = torch.log10(1 + (sigma_ref_sq * g * g) / (sv_sq + sigma_nsq))
                den = torch.log10(1 + sigma_ref_sq / sigma_nsq)

                vif += torch.sum(num) / (torch.sum(den) + self.eps)

            total_vif += vif / len(ref_pyramid)

        avg_vif = total_vif / num_channels
        return -avg_vif  # Negative to convert maximization to minimization

class PSNRLoss(nn.Module):
    def __init__(self, max_val=1.0, eps=1e-10):
        super(PSNRLoss, self).__init__()
        self.max_val = max_val
        self.eps = eps

    def forward(self, img1, img2):
        mse = torch.mean((img1 - img2) ** 2) + self.eps  # Add eps to avoid NaN
        psnr = 20 * torch.log10(self.max_val / torch.sqrt(mse))
        return -psnr  # Negative to convert maximization to minimization


class QabfLoss(nn.Module):
    def __init__(self, window_size=11, eps=1e-10):
        super(QabfLoss, self).__init__()
        self.window_size = window_size
        self.eps = eps
        self.window = self.create_window(window_size)

    def create_window(self, window_size):
        _1D_window = torch.ones(window_size) / window_size
        _2D_window = _1D_window.unsqueeze(1).mm(_1D_window.unsqueeze(0))
        window = _2D_window.unsqueeze(0).unsqueeze(0)
        return window

    def forward(self, img1, img2, fused_img):
        batch_size, num_channels, height, width = img1.size()
        total_qabf = 0.0

        for c in range(num_channels):
            img1_c = img1[:, c:c + 1, :, :]
            img2_c = img2[:, c:c + 1, :, :]
            fused_img_c = fused_img[:, c:c + 1, :, :]

            window = self.window.to(img1.device)

            # Calculate local means
            mu1 = F.conv2d(img1_c, window, padding=self.window_size // 2, groups=1)
            mu2 = F.conv2d(img2_c, window, padding=self.window_size // 2, groups=1)
            mu_fused = F.conv2d(fused_img_c, window, padding=self.window_size // 2, groups=1)

            # Calculate local standard deviations
            sigma1 = torch.sqrt(
                F.conv2d(img1_c * img1_c, window, padding=self.window_size // 2, groups=1) - mu1 * mu1 + self.eps)
            sigma2 = torch.sqrt(
                F.conv2d(img2_c * img2_c, window, padding=self.window_size // 2, groups=1) - mu2 * mu2 + self.eps)
            sigma_fused = torch.sqrt(F.conv2d(fused_img_c * fused_img_c, window, padding=self.window_size // 2,
                                              groups=1) - mu_fused * mu_fused + self.eps)

            # Calculate cross terms
            sigma12 = F.conv2d(img1_c * img2_c, window, padding=self.window_size // 2, groups=1) - mu1 * mu2
            sigma1f = F.conv2d(img1_c * fused_img_c, window, padding=self.window_size // 2, groups=1) - mu1 * mu_fused
            sigma2f = F.conv2d(img2_c * fused_img_c, window, padding=self.window_size // 2, groups=1) - mu2 * mu_fused

            # Avoid division by zero and negative values
            sigma1 = torch.clamp(sigma1, min=self.eps)
            sigma2 = torch.clamp(sigma2, min=self.eps)
            sigma_fused = torch.clamp(sigma_fused, min=self.eps)

            q1 = (2 * sigma1 * sigma_fused + self.eps) / (sigma1 ** 2 + sigma_fused ** 2 + self.eps)
            q2 = (2 * sigma2 * sigma_fused + self.eps) / (sigma2 ** 2 + sigma_fused ** 2 + self.eps)
            q3 = (sigma1f * sigma2f + self.eps) / (sigma1 * sigma2 + self.eps)

            q_map = q1 * q2 * q3
            qabf = torch.mean(q_map)
            total_qabf += qabf

        avg_qabf = total_qabf / num_channels
        return -avg_qabf  # Negative to convert maximization to minimization


class EntropyLoss(nn.Module):
    def __init__(self, bins=256):
        super(EntropyLoss, self).__init__()
        self.bins = bins

    def forward(self, image):
        # Ensure the image is in the range [0, 1]
        image = torch.clamp(image, 0.0, 1.0)

        # Calculate entropy for each channel separately
        batch_size, num_channels, height, width = image.size()
        entropy = 0.0

        for c in range(num_channels):
            # Flatten the image channel to calculate histogram
            image_flat = image[:, c, :, :].view(batch_size, -1)
            hist = torch.histc(image_flat, bins=self.bins, min=0, max=1)
            hist = hist / torch.sum(hist)  # Normalize to get probability distribution

            # Calculate entropy for the channel
            entropy_channel = -torch.sum(hist * torch.log(hist + 1e-10))  # Add a small value to avoid log(0)
            entropy += entropy_channel

        # Average entropy over the channels
        entropy = entropy / num_channels
        return -entropy  # Negative to convert maximization to minimization



class SSIMLoss(nn.Module):
    # __constants__ = ['data_range', 'use_padding']

    def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False):
        '''
        :param window_size: the size of gauss kernel
        :param window_sigma: sigma of normal distribution
        :param data_range: value range of input images. (usually 1.0 or 255)
        :param channel: input channels (default: 3)
        :param use_padding: padding image before conv
        '''
        super().__init__()
        assert window_size % 2 == 1, 'Window size must be odd.'
        window = self.create_window(window_size, window_sigma, channel)
        # self.register_buffer('window', window)
        self.window=window
        self.data_range = data_range
        self.use_padding = use_padding

    def create_window(self,window_size: int, sigma: float, channel: int):
        '''
        Create 1-D gauss kernel
        :param window_size: the size of gauss kernel
        :param sigma: sigma of normal distribution
        :param channel: input channel
        :return: 1D kernel
        '''
        coords = torch.arange(window_size, dtype=torch.float)
        coords -= window_size // 2

        g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))
        g /= g.sum()

        g = g.reshape(1, 1, 1, -1).repeat(channel, 1, 1, 1)
        return g

    def _gaussian_filter(self,x, window_1d, use_padding: bool):
        '''
        Blur input with 1-D kernel
        :param x: batch of tensors to be blured
        :param window_1d: 1-D gauss kernel
        :param use_padding: padding image before conv
        :return: blured tensors
        '''
        C = x.shape[1]
        padding = 0
        if use_padding:
            window_size = window_1d.shape[3]
            padding = window_size // 2
        out = F.conv2d(x, window_1d, stride=1, padding=(0, padding), groups=C)
        out = F.conv2d(out, window_1d.transpose(2, 3), stride=1, padding=(padding, 0), groups=C)
        return out
    # @torch.jit.script_method

    def ssim(self,X, Y, window, data_range: float, use_padding: bool = False):
        '''
        Calculate ssim index for X and Y
        :param X: images
        :param Y: images
        :param window: 1-D gauss kernel
        :param data_range: value range of input images. (usually 1.0 or 255)
        :param use_padding: padding image before conv
        :return:
        '''

        K1 = 0.01
        K2 = 0.03
        compensation = 1.0

        C1 = (K1 * data_range) ** 2
        C2 = (K2 * data_range) ** 2

        mu1 = self._gaussian_filter(X, window, use_padding)
        mu2 = self._gaussian_filter(Y, window, use_padding)
        sigma1_sq = self._gaussian_filter(X * X, window, use_padding)
        sigma2_sq = self._gaussian_filter(Y * Y, window, use_padding)
        sigma12 = self._gaussian_filter(X * Y, window, use_padding)

        mu1_sq = mu1.pow(2)
        mu2_sq = mu2.pow(2)
        mu1_mu2 = mu1 * mu2

        sigma1_sq = compensation * (sigma1_sq - mu1_sq)
        sigma2_sq = compensation * (sigma2_sq - mu2_sq)
        sigma12 = compensation * (sigma12 - mu1_mu2)

        cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
        # Fixed the issue that the negative value of cs_map caused ms_ssim to output Nan.
        cs_map = F.relu(cs_map)
        ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map

        ssim_val = ssim_map.mean(dim=(1, 2, 3))  # reduce along CHW
        cs = cs_map.mean(dim=(1, 2, 3))

        return ssim_val, cs

    def forward(self, X, Y):
        r = self.ssim(X, Y, window=self.window, data_range=self.data_range, use_padding=self.use_padding)
        return r[0]




class MS_SSIMLoss(nn.Module):

    def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False, weights=None, levels=None, eps=1e-8):
        '''
        class for ms-ssim
        :param window_size: the size of gauss kernel
        :param window_sigma: sigma of normal distribution
        :param data_range: value range of input images. (usually 1.0 or 255)
        :param channel: input channels
        :param use_padding: padding image before conv
        :param weights: weights for different levels. (default [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
        :param levels: number of downsampling
        :param eps: Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf.
        '''
        super().__init__()
        assert window_size % 2 == 1, 'Window size must be odd.'
        self.data_range = data_range
        self.use_padding = use_padding
        self.eps = eps

        window = self.create_window(window_size, window_sigma, channel)
        self.register_buffer('window', window)

        if weights is None:
            weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
        weights = torch.tensor(weights, dtype=torch.float)

        if levels is not None:
            weights = weights[:levels]
            weights = weights / weights.sum()

        self.register_buffer('weights', weights)

    def create_window(self,window_size: int, sigma: float, channel: int):
        '''
        Create 1-D gauss kernel
        :param window_size: the size of gauss kernel
        :param sigma: sigma of normal distribution
        :param channel: input channel
        :return: 1D kernel
        '''
        coords = torch.arange(window_size, dtype=torch.float)
        coords -= window_size // 2

        g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))
        g /= g.sum()

        g = g.reshape(1, 1, 1, -1).repeat(channel, 1, 1, 1)
        return g

    def ms_ssim(self,X, Y, window, data_range: float, weights, use_padding: bool = False, eps: float = 1e-8):
        '''
        interface of ms-ssim
        :param X: a batch of images, (N,C,H,W)
        :param Y: a batch of images, (N,C,H,W)
        :param window: 1-D gauss kernel
        :param data_range: value range of input images. (usually 1.0 or 255)
        :param weights: weights for different levels
        :param use_padding: padding image before conv
        :param eps: use for avoid grad nan.
        :return:
        '''
        weights = weights[:, None]

        levels = weights.shape[0]
        vals = []
        for i in range(levels):
            ss, cs = self.ssim(X, Y, window=window, data_range=data_range, use_padding=use_padding)

            if i < levels - 1:
                vals.append(cs)
                X = F.avg_pool2d(X, kernel_size=2, stride=2, ceil_mode=True)
                Y = F.avg_pool2d(Y, kernel_size=2, stride=2, ceil_mode=True)
            else:
                vals.append(ss)

        vals = torch.stack(vals, dim=0)
        # Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf.
        vals = vals.clamp_min(eps)
        # The origin ms-ssim op.
        ms_ssim_val = torch.prod(vals[:-1] ** weights[:-1] * vals[-1:] ** weights[-1:], dim=0)
        # The new ms-ssim op. But I don't know which is best.
        # ms_ssim_val = torch.prod(vals ** weights, dim=0)
        # In this file's image training demo. I feel the old ms-ssim more better. So I keep use old ms-ssim op.
        return ms_ssim_val

    def ssim(self,X, Y, window, data_range: float, use_padding: bool = False):
        '''
        Calculate ssim index for X and Y
        :param X: images
        :param Y: images
        :param window: 1-D gauss kernel
        :param data_range: value range of input images. (usually 1.0 or 255)
        :param use_padding: padding image before conv
        :return:
        '''

        K1 = 0.01
        K2 = 0.03
        compensation = 1.0

        C1 = (K1 * data_range) ** 2
        C2 = (K2 * data_range) ** 2

        mu1 = self._gaussian_filter(X, window, use_padding)
        mu2 = self._gaussian_filter(Y, window, use_padding)
        sigma1_sq = self._gaussian_filter(X * X, window, use_padding)
        sigma2_sq = self._gaussian_filter(Y * Y, window, use_padding)
        sigma12 = self._gaussian_filter(X * Y, window, use_padding)

        mu1_sq = mu1.pow(2)
        mu2_sq = mu2.pow(2)
        mu1_mu2 = mu1 * mu2

        sigma1_sq = compensation * (sigma1_sq - mu1_sq)
        sigma2_sq = compensation * (sigma2_sq - mu2_sq)
        sigma12 = compensation * (sigma12 - mu1_mu2)

        cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
        # Fixed the issue that the negative value of cs_map caused ms_ssim to output Nan.
        cs_map = F.relu(cs_map)
        ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map

        ssim_val = ssim_map.mean(dim=(1, 2, 3))  # reduce along CHW
        cs = cs_map.mean(dim=(1, 2, 3))

        return ssim_val, cs

    def forward(self, X, Y):
        return self.ms_ssim(X, Y, window=self.window, data_range=self.data_range, weights=self.weights,
                       use_padding=self.use_padding, eps=self.eps)
