import torch
import torch.nn as nn 
import torch.nn.functional as F


class ProductAttentionScore(nn.Module):

    def __init__(self, temperature):
        super().__init__()
        self.temperature = temperature

    def forward(self, q, k):

        attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
        attn = F.softmax(attn, dim=-1)
        #output = torch.matmul(attn, v)

        return  attn

class gate(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.dim = dim
        self.l1 = nn.Linear(dim*2, dim)
        self.l2 = nn.Linear(dim*2, dim)
        self.l3 = nn.Linear(dim*2, dim)
        self.sig = nn.Sigmoid()
    
    def forward(self, s1, s2):
        rt = self.sig(self.l1(torch.cat([s2, s1], dim = -1)))
        ht_1 = s2*rt
        ht_2 = F.tanh(self.l2(torch.cat([ht_1, s1], dim = -1)))
        zt = self.sig(self.l2(torch.cat([s2, s1], dim =-1)))
        ht = zt*s2 + (1-zt)*ht_2
        return ht

class ResAttention(nn.Module):
    def __init__(self, emb_dim, len_q, n_heads, q_dim=None, v_dim=None, dropout_rate=0.1, device = None):

        super().__init__()
        self.device = device if device is not None else torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.emb_dim = emb_dim
        self.n_heads = n_heads

        self.q_dim = q_dim if q_dim is not None else emb_dim
        self.k_dim = self.q_dim
        self.v_dim = v_dim if v_dim is not None else emb_dim
        self.dropout = nn.Dropout(dropout_rate)
        
        #trans_linear_matrix of q,k,v
        self.q_linear = nn.Linear(emb_dim, self.q_dim)
        self.k_linear = nn.Linear(emb_dim, self.k_dim)
        self.v_linear = nn.Linear(emb_dim, self.v_dim)
        self.fc = nn.Linear(self.v_dim, emb_dim)

        #更新门gate
        self.gate = gate(len_q).to(self.device)

        assert (self.q_dim % self.n_heads == 0), "查询Q的最后维度不能整除n_heads, 还玩个屁。"
        assert (self.v_dim % self.n_heads == 0), "V的维度不能整除n_heads呀好哥哥, 长点心吧。"
        self.get_score = ProductAttentionScore((self.q_dim//n_heads)**0.5)

        self.head_dim1 = self.q_dim//self.n_heads
        self.head_dim2 = self.v_dim//self.n_heads
    
    def forward(self, q, k = None, v=None, prior_score = None):

        sz_b, len_q = q.size(0), q.size(1)

        #q[batch, len_k, n_heads, k_dim//n_heads]
        k = self.k_linear(k) if k is not None else self.k_linear(q).view(sz_b, len_q, self.n_heads, self.head_dim1).contiguous()
        #q[batch, len_v, n_heads, v_dim//n_heads]
        v = self.v_linear(v) if v is not None else self.v_linear(q).view(sz_b, len_q, self.n_heads, self.head_dim2).contiguous()
        #q[batch, len_q, n_heads, q_dim//n_heads]
        q = self.q_linear(q).view(sz_b, len_q, self.n_heads, self.head_dim1).contiguous()
        q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
        score = self.get_score(q,k)
        #加入之前attenscore的分数影响
        if prior_score is not None:
            score = self.gate(score, prior_score)

        #output[batch, len_q, v_dim*n_heads]
        output = torch.matmul(self.dropout(score), v).transpose(1, 2).contiguous().view(sz_b, len_q, -1)
        output = self.fc(output)

        return output, score

class FeedForward(nn.Module):

    def __init__(self, d_in, d_hid, dropout=0.1):

        super().__init__()
        self.w_1 = nn.Linear(d_in, d_hid) # position-wise
        self.w_2 = nn.Linear(d_hid, d_in) # position-wise
        self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        residual = x
        x = self.w_2(F.relu(self.w_1(x)))
        x = self.dropout(x)
        x += residual
        x = self.layer_norm(x)
        return x

class ResAttenLayer(nn.Module):
    def __init__(self, emb_dim, len_q, resattenlayer, n_heads, q_dim=None, v_dim=None, 
                feed_hid=None, dropout_rate=0.1, device = None):
        """
        emb_dim: 等于图片展平之后的维度
        resattenlayer: 残差注意力的层数, 若是等于1就是普通的注意力
        n_heads: 多头注意力的头数
        q_dim: 根据输入生成q的维度,自己设置,需要能够整除n_heads。
        v_dim: 根据输入生成v的维度,自己设置,需要能够整除n_heads。
        feed_hid: feedforward中隐藏层的维度, 大于emb_dim最好
        dropout_rate: Dropout的丢失率
        device: cpu还是gpu
        样例：
        x = torch.rand((10,6,200))
        resattenlayer = ResAttenLayer(emb_dim=200, len_q = 6, resattenlayer=3, n_heads=5, q_dim=200, v_dim=200)
        最后的输出依旧跟x的维度一样[批量数, 通道数, 图片长*宽]
        """
        super().__init__()
        self.resattenlayer = resattenlayer
        resattenlist = [ResAttention(emb_dim, len_q, n_heads, q_dim = q_dim, v_dim = v_dim, dropout_rate = dropout_rate, device = device)\
                       for _ in range(self.resattenlayer-1)]\
                      +[ResAttention(emb_dim, len_q, n_heads, q_dim = q_dim, v_dim = v_dim, dropout_rate = dropout_rate, device = device)]
        self.layer_stack = nn.ModuleList(resattenlist)
    
    def forward(self, x):
        score = None
        for resattenlayer in self.layer_stack:
            x, socre = resattenlayer(x, score)
        return x