import torch
from torch import nn
from transformer.DotProductAttention import DotProductAttention


class MultiHeadAttention(nn.Module):
    def __init__(self, query_size, key_size, value_size, num_head, hidden_dims, dropout=0.5):
        super().__init__()
        self.num_head = num_head

        self.w_q = nn.Linear(query_size, hidden_dims)
        self.w_k = nn.Linear(key_size, hidden_dims)
        self.w_v = nn.Linear(value_size, hidden_dims)
        self.w_o = nn.Linear(hidden_dims, query_size)

        self.attention = DotProductAttention(dropout)

    def forward(self, queries, keys, values, valid_lens):
        queries = transpose_qkv(self.w_q(queries), self.num_head)
        keys = transpose_qkv(self.w_k(keys), self.num_head)
        values = transpose_qkv(self.w_v(values), self.num_head)

        # 根据注意力头的数量扩增 valid_lens
        if valid_lens is not None:
            valid_lens = torch.repeat_interleave(valid_lens, repeats=self.num_head, dim=0)

        result = self.attention(queries, keys, values, valid_lens)
        return self.w_o(transpose_out(result, self.num_head))


# 注意力机制分头
def transpose_qkv(x, num_head):
    x = x.reshape(x.shape[0], x.shape[1], num_head, -1)
    x = x.permute([0, 2, 1, 3])
    return x.reshape(-1, x.shape[2], x.shape[3])


# 从注意力头的形态还原
def transpose_out(x, num_head):
    x = x.reshape(-1, num_head, x.shape[1], x.shape[2])
    x = x.permute([0, 2, 1, 3])
    return x.reshape(x.shape[0], x.shape[1], -1)
