import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import scipy.stats as st

class SA(nn.Module):
    def __init__(self, lamda, sigma, grad=True):
        super(SA, self).__init__()
        self.guass_kernel = self.gen_guass_kernel(lamda, sigma)
        self.guass_kernel = torch.from_numpy(self.guass_kernel).unsqueeze(0).unsqueeze(0)
        # 是否需要梯度来更新参数
        self.guass_kernel = nn.Parameter(self.guass_kernel, requires_grad=grad)


    def forward(self, attention, x):
        soft_attention = F.conv2d(attention, self.guass_kernel, padding=int(self.guass_kernel.size(2)/2))
        soft_attention = self.norm(soft_attention)
        x = torch.mul(x, soft_attention.max(attention))
        return x
    
    def gen_guass_kernel(self, lamda, sigma):
        interval = (2*sigma+1)/lamda
        x = np.linspace(-sigma-interval/2, sigma+interval/2, lamda+1)
        # 通过st.norm.cdf(x)求出正态分布的累积分布函数值
        # 通过差值求出每个区间的概率密度函数值
        kernel1d = np.diff(st.norm.cdf(x))
        # 得出二维的高斯核并进行归一化
        kernel = np.float32(np.sqrt(np.outer(kernel1d, kernel1d)))
        kernel = kernel/kernel.sum()
        return kernel

    def norm(self, x):
        # 最小-最大归一化，进行特征缩放
        max = x.max(3)[0].max(2)[0].unsqueeze(2).unsqueeze(3).expand_as(x)
        min = x.min(3)[0].min(2)[0].unsqueeze(2).unsqueeze(3).expand_as(x)
        x = x - min
        x = x/(max - min + 1e-8)
        return x
    
if __name__ == '__main__':
    test = SA(3, 4)
    print(test.guass_kernel)
    print(test.guass_kernel.shape)