
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from module.repa_module import conv_bn



class IlluNet(nn.Module):

    def __init__(self, in_planes, planes, stride=1):
        super(IlluNet, self).__init__()
        
        self.shortcut = nn.Identity()
       
        self.conv2 = nn.Sequential(conv_bn(in_channels=planes, out_channels=self.expansion * planes, kernel_size=3, stride=1, padding=1),
                                   nn.Sigmoid())


    def forward(self, x):
        out = self.conv2(x)
        out = out + self.shortcut(x)

        out = torch.clamp(out, 0.0001, 1) 
        raw_enhance = torch.clip(x/out,0,1)
        return raw_enhance



class IlluNet_with_Quad(nn.Module):

    def __init__(self, in_planes, planes,pre_weight ,stride=1):
        super(IlluNet_with_Quad, self).__init__()
        self.pre_weight_p = pre_weight
        self.illu_net = IlluNet(in_planes,planes)
        self.a = nn.Parameter(torch.tensor(0.6,requires_grad=True) )
        self.b = nn.Parameter(torch.tensor(-1.3,requires_grad=True))
        self.c = nn.Parameter(torch.tensor( 1.5,requires_grad=True))
        self.y_coff = torch.tensor([24.966, 128.553, 65.481])
        self.pool_scale = 4
        self.pool = nn.MaxPool2d(kernel_size=(self.pool_scale, self.pool_scale),stride=2,padding=self.pool_scale//2)
        self.mapping_func = lambda x:self.a*x**2 + self.b*x + self.c
        self.load_illu_net()
      
    def load_illu_net(self):
        if self.pre_weight_p==None:
            return
        weight = torch.load(self.pre_weight_p)
        self.illu_net.load_state_dict(weight)
      


    def calculate_adapt_map(self,img):
        B,C,H,W = img.shape
        # B,H,W,3,1
        coff = self.y_coff.repeat(B,H,W,1).unsqueeze(-1).to(img.device)
        # B,H,W,1,3
        img_trans = img.permute(0,2,3,1).unsqueeze(3)

        img_y = img_trans@coff + 16.0
        img_y = img_y.squeeze(-1).permute(0,3,1,2)
        # return img_y
        y_median = torch.median(img_y)

        diff_img = img_y-y_median
        diff_img_norm = (diff_img-diff_img.min())/(diff_img.max()-diff_img.min())
       
        return diff_img_norm

    def forward(self, x):

        b,c,h,w = x.shape
     
        raw_enhance = self.illu_net(x)
        diff_img_norm = self.calculate_adapt_map(x) 
     
        # The following two lines can be optionally omitted to avoid the risk of image blurring.
        diff_img_norm =self.pool(diff_img_norm)
        diff_img_norm = F.interpolate(diff_img_norm,size=(h,w))
     
        modulated_coff = self.mapping_func(diff_img_norm).double()

        modulated_coff = torch.where(modulated_coff>3.5,3.5,modulated_coff)
        modulated_coff = torch.where(torch.bitwise_and(diff_img_norm>0.35,modulated_coff<1.0),1.0,modulated_coff)
     
       
        enhance = torch.clip( raw_enhance*modulated_coff,0,1)

        return enhance