import numpy as np
import torch
from torch import nn


class PoswiseFeedForwardNet(nn.Module):
    """前馈神经网络（PoswiseFeedForward）"""

    def __init__(self, d_model, d_ff, dropout=0.1):
        super(PoswiseFeedForwardNet, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(d_model, d_ff, bias=False),  # 512-->2048
            nn.ReLU(),
            nn.Linear(d_ff, d_model, bias=False),  # 2048-->512
            nn.Dropout(dropout)
        )
        self.layer_norm = nn.LayerNorm(d_model)

    def forward(self, inputs):
        residual = inputs
        output = self.fc(inputs)
        # (batch,context_len,d_model)
        return self.layer_norm(output + residual)  # 残差+归一化


class ScaledDotProductAttention(nn.Module):
    """Scaled DotProduct Attention缩放点积注意力机制
        Q : query
        K : data_keys
        V : data_values
        d_k : queries and keys dimension  询问 键 维度
        d_v : values dimension  值 维度
        Attention(Q,K,V)=softmax(QK′/√d_k)V
    """

    def __init__(self, d_k, dropout):
        super(ScaledDotProductAttention, self).__init__()
        self.d_k = d_k
        self.softmax = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(dropout)

    def forward(self, q, k, v, attention_mask):
        """

        :param q: shape(batch_size, n_heads, len_q, d_k)
        :param k: shape(batch_size, n_heads, len_k, d_k)
        :param v: shape(batch_size, n_heads, len_v, d_v)
        :param attention_mask:
        :return:
        """
        # matmul操作即矩阵相乘  qk′/sqrt(d_k)
        scores = torch.matmul(q, k.transpose(-1, -2)) / np.sqrt(self.d_k)
        # shape(batch_size, n_heads, len_q, len_k)

        # 把被mask的地方置为无穷小，softmax之后会趋近于0，Q会忽视这部分的权重
        # .masked_fill_(mask，value) 是张量操作函数，用于对张量中的部分元素进行替换操作
        # mask：一个与原张量形状相同的布尔类型的张量，用于指示要替换的元素的位置。True 表示需要替换，False 表示不需要替换
        # value：一个标量或大小与原张量相同的张量，用于指定替换后的值
        # 把我们之前设置的mask矩阵中True的地方设置为负无穷
        if attention_mask is not None:
            assert attention_mask.size() == scores.size()
            scores.masked_fill_(attention_mask, -1e9)

        # q求出对应attention score
        attn = self.dropout(self.softmax(scores))
        context = torch.matmul(attn, v)
        # shape(batch_size, n_heads, len_q, d_v)
        return context, attn


class MultiHeadAttention(nn.Module):
    """MultiHead Attention：多头注意力机制
    """

    def __init__(self, d_model, n_heads, d_k, d_v, dropout):
        """
        :param d_model: 单个词的特征数量
        :param n_heads: 多头数量
        :param d_k:
        :param d_v:
        :param dropout:
        """
        super(MultiHeadAttention, self).__init__()
        # w_q,w_k,w_v其实就是一个线性层，用来将输入映射为q、k、v
        self.d_model = d_model
        self.n_heads = n_heads
        self.d_k = d_k
        self.d_v = d_v

        self.scaled_dot_product_attention = ScaledDotProductAttention(self.d_k, dropout)
        self.w_q = nn.Linear(self.d_model, self.d_k * self.n_heads)
        self.w_k = nn.Linear(self.d_model, self.d_k * self.n_heads)
        self.w_v = nn.Linear(self.d_model, self.d_v * self.n_heads)

        self.linear = nn.Linear(self.d_v * self.n_heads, self.d_model)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(self.d_model)

    def forward(self, q, k, v, attention_mask):
        """
        自注意力时，q,k,v均来自输入带位置信息的词向量，自注意力q=k=v
        交叉注意力式，k,v来自enc_outputs  q来自掩码自注意力后的decoder_inputs
        :param q: 输入带位置信息的词向量   shape(batch_size, max_len, d_model)
        :param k: 输入带位置信息的词向量   shape(batch_size, max_len, d_model)
        :param v: 输入带位置信息的词向量   shape(batch_size, max_len, d_model)
        :param attention_mask: shape(batch_size, max_len, max_len)
        :return:
        """
        residual = q
        batch_size = q.size(0)

        # 分头；一定要注意的是自注意力时q和k分头之后维度是一致的，所以一看这里都是d_k  转置是因为先分头 再得W_Q
        q_s = self.w_q(q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
        # shape(batch_size, n_heads, max_len, d_k)
        k_s = self.w_k(k).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
        v_s = self.w_v(v).view(batch_size, -1, self.n_heads, self.d_v).transpose(1, 2)
        # shape(batch_size, n_heads, max_len, d_k)

        if attention_mask:
            # shape(batch_size, len_q, len_k)
            # 就是把mask信息复制n份一样的，重复到n个头上以便计算多头注意力机制
            attention_mask = attention_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1)
            # shape(batch_size, n_heads, len_q, len_k)

        # 求出新的词向量Z
        context, attention = self.scaled_dot_product_attention(q_s, k_s, v_s, attention_mask)
        # shape(batch_size, n_heads, max_len, d_v),  shape(batch_size, n_heads, len_q, len_k)

        context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)
        output = self.dropout(self.linear(context))
        # shape(batch_size, n_heads, max_len, d_v)

        return self.layer_norm(output + residual), attention  # 残差＋归一化
