import torch
import torch.nn as nn

from utils import clone_module, attention


class MultiHeadedAttention(nn.Module):

    def __init__(self, num_head, feature_dim, dropout=0.1):
        super(MultiHeadedAttention, self).__init__()
        assert feature_dim % num_head == 0  # 每个头分配等量的特征.也就是 feature_dim/num_head 个
        self.d_k = feature_dim // num_head  # 得到每个头获得的分割词向量维度d_k
        self.num_head = num_head
        self.attn = None
        self.linears = clone_module(nn.Linear(feature_dim, feature_dim), 4)
        self.dropout = nn.Dropout(p=dropout)

    def forward(self, query, key, value, mask=None):
        '''
        query, key, value: [batch_size,seq_len,feature_dim]
        mask: [batch, seq_len, seq_len]
        '''

        if mask is not None:
            mask = mask.unsqueeze(1)  # [batch, seq_len, seq_len]->[batch,1,seq_len, seq_len]
        batch_size = query.size(0)
        seq_len = query.size(1)

        # query, key, value: [batch_size,seq_len,feature_dim]-> [batch_size,num_head,seq_len,d_k]
        query, key, value = [
            model(x).view(batch_size, -1, self.num_head, self.d_k).transpose(1, 2)
            for model, x in zip(self.linears, (query, key, value))
        ]

        # query, key, value: [batch_size,num_head,seq_len,d_k]
        x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
        x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.num_head * self.d_k)  # 多头的输出汇总
        assert seq_len == x.size(1)

        return self.linears[-1](x)


if __name__ == '__main__':

    from utils import subsequent_mask

    batch_size = 3
    seq_len = 4
    feature_dim = 512
    num_head = 2
    dropout = 0.2

    # test attention
    scores = torch.ones(batch_size, num_head, seq_len, seq_len)
    mask = subsequent_mask(size=seq_len)
    mask = mask.unsqueeze(0)
    scores = scores.masked_fill(mask == 0, -1e9)
    print(scores)

    # test MultiHeadedAttention
    self_attn = MultiHeadedAttention(num_head=num_head, feature_dim=feature_dim, dropout=dropout)
    query = key = value = torch.randn(batch_size, seq_len, feature_dim)
    mask = subsequent_mask(size=seq_len)
    result = self_attn(query, key, value, mask)
    print(result.size())  # [batch_size, seq_len, feature_dim]
