
import torch 
import torch.nn as nn
import torch.nn.functional as F 
import math
from sparsemax import Sparsemax
"""
假设输入的维度是二维 每一行表示一个distance  
输出是一个sparse attention result  要保证梯度是可以反向传播的      
"""

class TemperingSoftmaxWithThreshold(nn.Module):

    def __init__(self,t,threshold):
        super().__init__()
        self.t = t
        self.threshold = threshold
        self.softmax = nn.Softmax(dim=-1)
    
    def forward(self,x):
        x_t = x.div(self.t)
        pb  = self.softmax(x_t)
        pb_with_threshold = F.relu(pb-self.threshold)
        pb_normalize = pb_with_threshold.div(pb_with_threshold.sum(dim=-1))
        return pb_normalize

class GraduallyTemperingSoftmaxWithThreshold(nn.Module):

    def __init__(self,gradually_sparse,threshold):
        super().__init__()
        self.gradually_sparse = gradually_sparse
        self.threshold = threshold
        self.softmax = nn.Softmax(dim=-1)
    
    def forward(self,x):
        self.t = self.gradually_sparse(self.training)
        x_t = x.div(self.t)
        pb  = self.softmax(x_t)
        pb_with_threshold = F.relu(pb-self.threshold)
        pb_normalize = pb_with_threshold.div(pb_with_threshold.sum(dim=-1))
        return pb_normalize

class GraduallyTopKSparseSoftmax():
    def __init__(self,gradually_sparse,k):
        super().__init__()
        self.gradually_sparse = gradually_sparse 
        self.softmax = nn.Softmax(dim=-1)

    def generateMask(self,x,k):
        return generate_topk_mask(x,k)
    
    def forward(self,x):
        n = self.x.shape[1]
        self.k = int(n * self.gradually_sparse(self.training))
        mask = self.generateMask(x,self.k)
        p = self.softmax(x)
        p_with_mask = mask * p
        p = p_with_mask.div(p_with_mask.sum(dim=-1))
        return p

def generate_topk_mask(v,k):
    
    assert v.shape[1] > k and k > 0 , ValueError("k is error")
    sort_v = torch.sort(v,dim=1,descending=True).values
    topk   = sort_v[:,k].reshape(-1,1)
    mask   = torch.zeros_like(v)
    bool_mask = torch.gt(v,topk)
    mask[bool_mask] = 1
    return mask

class TopKSparseSoftmax(nn.Module):
    
    """
    using mask to simulate topk selection mechanism
    """
    def __init__(self,k):
        """
        k must be a integer. 
        """
        super().__init__()
        self.k = k 
        self.softmax = nn.Softmax(dim=-1)

    def generateMask(self,x):
        return generate_topk_mask(x,self.k)
    
    def forward(self,x):
        mask = self.generateMask(x)
        p = self.softmax(x)
        p_with_mask = mask * p
        p = p_with_mask.div(p_with_mask.sum(dim=-1))
        return p

class Sparsemax(nn.Module):

    def __init__(self):
        super().__init__()
        self.sparsemax = Sparsemax(dim=1)
    def forward(self,x):
        return self.sparsemax(x)

def meanSparsity(sparse_attention_weight):
    
    each_sparse_degree = (sparse_attention_weight == 0).sum(dim=-1).div(sparse_attention_weight.shape[1])
    return each_sparse_degree.mean()


def similarity_qk_cosine(q,ks):
    # $\frac{q.ks}{||q||||ks||}$
    # ks m,n  q t,n      
    # F.linear  q @ ks.T 
    att_weight = F.linear(q,ks) # t,m
    ksnormal = ks.square().sum(dim=1).sqrt().reshape(-1,1) # m
    qnormal = q.square().sum(dim=1).sqrt().reshape(-1,1)  # t
    total_normal = qnormal @ ksnormal
    att_weight = att_weight.div(total_normal)
    return att_weight

def similarity_qk_self_attention(q,ks):
    
    # $\frac{q.ks}{\sqrt{d_m}}$
    att_weight = F.linear(q,ks) # t,m
    dm = q.shape[1]
    return att_weight.div(math.sqrt(dm))

def difference_zz_l1(z,rec_z):
    return (z-rec_z).abs().sum(dim=-1).mean()

def difference_zz_l2(z,rec_z):
    return (z-rec_z).pow(2).sum(dim=-1).mean()

def difference_zz_linfinte(z,rec_z):
    return (z-rec_z).abs().max(dim=-1).mean()

def difference_zz_cosine(z,rec_z):
    # z: b,repredim 
    # rec_z: b,repdim
    sim = z * rec_z 
    znormal = z.pow(2).sum(dim=-1).sqrt()
    rec_znormal = rec_z.pow(2).sum(dim=-1).sqrt()
    normal = znormal * rec_znormal
    difference = sim.div(normal).mean()
    return difference

def difference_zz_jsdivergence(att,rec_att):
    pass

def difference_zz_mutualinformation(att,rec_att):
    pass

def exponential_decay(start,end,update_steps=1,temperature=20):
    # update_steps 指的是多少个steps更新一次 
    # 这里用闭包实现  每次更新时都会自动得到值 
    step = [0]
    current_step = [0]
    def get_value(training):
        if training:
            current_step[0] += 1
            if current_step[0] % update_steps == update_steps-1:
                step[0] += 1
        return (start-end) * math.exp(- step[0]/temperature) + end
    return get_value

def linear_decay(start,end,update_steps):
    pass


# nn.Dropout