import torch
import torch.nn as nn
from mam_fusion import MambaFusion
from my_ssim import SSIM
from my_loss import AverageGradientLoss
import torch.nn.functional as F

class L_color(nn.Module):
    def __init__(self):
        super(L_color, self).__init__()

    def forward(self, image_visible, image_fused):
        ycbcr_visible = self.rgb_to_ycbcr(image_visible)
        ycbcr_fused = self.rgb_to_ycbcr(image_fused)

        cb_visible = ycbcr_visible[:, 1, :, :]
        cr_visible = ycbcr_visible[:, 2, :, :]
        cb_fused = ycbcr_fused[:, 1, :, :]
        cr_fused = ycbcr_fused[:, 2, :, :]

        loss_cb = F.l1_loss(cb_visible, cb_fused)
        loss_cr = F.l1_loss(cr_visible, cr_fused)

        loss_color = loss_cb + loss_cr
        return loss_color

    def rgb_to_ycbcr(self, image):
        r = image[:, 0, :, :]
        g = image[:, 1, :, :]
        b = image[:, 2, :, :]

        y = 0.299 * r + 0.587 * g + 0.114 * b
        cb = -0.168736 * r - 0.331264 * g + 0.5 * b
        cr = 0.5 * r - 0.418688 * g - 0.081312 * b

        ycbcr_image = torch.stack((y, cb, cr), dim=1)
        return ycbcr_image

class spation_frequency_fusion_model(nn.Module):

    def __init__(self,device):
        super().__init__()
        self.mambafusion = MambaFusion(device=device)
        self.AverageGradientLoss=AverageGradientLoss()
        self.color_loss=L_color()
        self.ssim_loss=SSIM(data_range=1.,channel=1)
    def forward_loss(self, fusion,vis,ir):
        ir=ir[:,0:1,:,:]
        _,_,H,W=vis.shape
        ssim_loss_temp_ir = self.ssim_loss(fusion.float(), ir.float())
        ssim_loss_temp_vis = self.ssim_loss(fusion.float(), vis.float())
        ssim_loss = ((1 - ssim_loss_temp_vis) + (1 - ssim_loss_temp_ir))

        max_img = torch.max(ir,vis)
        max_loss = (torch.sum(torch.abs(max_img - fusion)))/(H*W)#+torch.sum(torch.abs(ir - fusion))/(H*W)

        x_grad = self.AverageGradientLoss(fusion)
        vis_grad = self.AverageGradientLoss(vis)
        ir_grad = self.AverageGradientLoss(ir)
        f_grad = (vis_grad + ir_grad )
        grad_loss = torch.sum(torch.abs(x_grad - f_grad))/(H*W)
        return ssim_loss,grad_loss,max_loss

    def YCbCr2RGB(self, Y, Cb, Cr):
        ycrcb = torch.cat([Y, Cr, Cb], dim=1)
        B, C, W, H = ycrcb.shape
        im_flat = ycrcb.transpose(1, 3).transpose(1, 2).reshape(-1, 3)
        mat = torch.tensor([[1.0, 1.0, 1.0], [1.403, -0.714, 0.0], [0.0, -0.344, 1.773]]
                           ).to(Y.device)
        bias = torch.tensor([0.0 / 255, -0.5, -0.5]).to(Y.device)
        temp = (im_flat + bias).mm(mat)
        out = temp.reshape(B, W, H, C).transpose(1, 3).transpose(2, 3)
        out = out.clamp(0, 1.0)
        return out

    def RGB2YCrCb(self,rgb_image):
        R = rgb_image[:, 0:1]
        G = rgb_image[:, 1:2]
        B = rgb_image[:, 2:3]
        Y = 0.299 * R + 0.587 * G + 0.114 * B
        Cr = (R - Y) * 0.713 + 0.5
        Cb = (B - Y) * 0.564 + 0.5

        Y = Y.clamp(0.0, 1.0)
        Cr = Cr.clamp(0.0, 1.0).detach()
        Cb = Cb.clamp(0.0, 1.0).detach()
        return Y, Cb, Cr

    def forward(self,vis,ir,mode='train'):
        Y_vi, Cb_vi, Cr_vi = self.RGB2YCrCb(vis)
        fusion= self.mambafusion(Y_vi, ir[:,0:1,:,:])
        if mode=='train':
            ssim_loss, grad_loss, max_loss = self.forward_loss(fusion, Y_vi, ir)
            return  ssim_loss, grad_loss,max_loss
        else:
            rgb_fusion = self.YCbCr2RGB(fusion, Cb_vi, Cr_vi)
            return rgb_fusion