import math

import torch
from torch import nn
import torch.nn.functional as F


def self_attention(query, key, value, mask=None):
    d_k = query.size(-1)
    scores = torch.matmul(query, key.transpose(-2, -1) / math.sqrt(d_k))
    if mask is not None:
        mask.cuda()
        scores = scores.masked_fill(mask == 0, -1e9)
        # mask和value，将mask中取值为0位置对应于scores的相应位置用-1e9填充。
    self_attn = F.softmax(scores, dim=-1)
    return torch.matmul(self_attn, value), self_attn


class multi_head_attention(nn.Module):
    # q,k,v的维度为 d_model/h_head,相当于计算q,k,v时是并行计算的.
    def __init__(self, d_model, head):
        """
        :param d_model:输入维度
        :param head: 头数
        """
        super(multi_head_attention, self).__init__()
        assert (d_model % head == 0)
        self.head = head
        self.d_model = d_model
        self.d_k = d_model // head
        self.linear_query = nn.Linear(d_model, d_model)
        self.linear_key = nn.Linear(d_model, d_model)
        self.linear_value = nn.Linear(d_model, d_model)
        self.linear_out = nn.Linear(d_model, d_model)
        self.attn = None

    def forward(self, query, key, value, mask=None):
        """

        :param query:
        :param key:
        :param value:
        :param mask:
        :return:
        """
        if mask is not None:
            mask = mask.unsqueeze(1)
        n_batch = query.size(0)
        # 获得Q、K、V的同时考虑切分，方便做并行化
        query = self.linear_query(query).view(n_batch, -1, self.head, self.d_k).transpose(1, 2)
        key = self.linear_key(key).view(n_batch, -1, self.head, self.d_k).transpose(1, 2)
        value = self.linear_value(value).view(n_batch, -1, self.head, self.d_k).transpose(1, 2)

        x, _ = self_attention(query, key, value, mask=mask)
        # 拼接向量
        x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.head * self.d_k)

        return self.linear_out(x)


if __name__ == "__main__":
    x = torch.randn(128, 64, 512)  # batch_size, context上下文特征 ,原特征
    y, _ = self_attention(x, x, x)  # y:torch.Size([128, 64, 512])

    head_attention = multi_head_attention(512, 8)
    y = head_attention(x, x, x)  # torch.Size([128, 64, 512])

    pass
