import math
import torch
from torch import nn
from d2l import torch as d2l


# @save

# @save
def transpose_qkv(X, num_heads):
    """为了多注意力头的并行计算而变换形状"""
    # 输入X的形状:(batch_size，查询或者“键－值”对的个数，num_hiddens)
    # 输出X的形状:(batch_size，查询或者“键－值”对的个数，num_heads，
    # num_hiddens/num_heads)
    assert X.shape[2] % num_heads == 0
    X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)
    # print(X.shape)
    # 输出X的形状:(batch_size，num_heads，查询或者“键－值”对的个数,
    # num_hiddens/num_heads)
    X = X.permute(0, 2, 1, 3)
    # print(X.shape)
    # 最终输出的形状:(batch_size*num_heads,查询或者“键－值”对的个数,
    # num_hiddens/num_heads)
    # [80, 10, 20]
    return X.reshape(-1, X.shape[2], X.shape[3])


# @save
def transpose_output(X, num_heads):
    """逆转transpose_qkv函数的操作"""
    X = X.reshape(-1, num_heads, X.shape[1], X.shape[2])
    # 16,10,5,20
    X = X.permute(0, 2, 1, 3)
    # 16,10,100
    return X.reshape(X.shape[0], X.shape[1], -1)


class MultiHeadAttention(nn.Module):
    """多头注意力"""

    def __init__(self, key_size, query_size, value_size, num_hiddens,
                 num_heads, dropout, bias=False, **kwargs):
        super(MultiHeadAttention, self).__init__(**kwargs)
        self.num_heads = num_heads
        # 声明缩放点积注意力
        self.attention = d2l.DotProductAttention(dropout)
        self.W_q = nn.Linear(query_size, num_hiddens, bias=bias)
        self.W_k = nn.Linear(key_size, num_hiddens, bias=bias)
        self.W_v = nn.Linear(value_size, num_hiddens, bias=bias)
        self.W_o = nn.Linear(num_hiddens, num_hiddens, bias=bias)

    def forward(self, queries, keys, values, valid_lens):
        # 首先针对数据进行加性处理,加性后的数据规整且更加适合进行多头分割
        queries = transpose_qkv(self.W_q(queries), self.num_heads)
        # print("经过多头分割后的 queries",queries.shape) # torch.Size([128, 7, 25])
        # torch.Size([128, 13, 25])
        keys = transpose_qkv(self.W_k(keys), self.num_heads)
        # torch.Size([128, 13, 25])
        values = transpose_qkv(self.W_v(values), self.num_heads)
        # print(keys.shape,values.shape)

        if valid_lens is not None:
            # print("valid_lensvalid_lensvalid_lens1",valid_lens.shape)
            # 在轴0，将第一项（标量或者矢量）复制num_heads次，
            # 然后如此复制第二项，然后诸如此类。
            valid_lens = torch.repeat_interleave(
                valid_lens, repeats=self.num_heads, dim=0)
            # print("valid_lensvalid_lensvalid_lens2", valid_lens.shape)

        # output的形状:(batch_size*num_heads，查询的个数，
        # num_hiddens/num_heads)
        # 使用缩放点积结合遮蔽注意力进行计算
        # newQ [128, 7, 25]  newK [128, 13, 25]  newV [128, 13, 25]   valid_lens [128, 7]
        output = self.attention(queries, keys, values, valid_lens)
        # torch.Size([128, 7, 25]) print("未经过变化的输出", output.shape)
        # output_concat的形状:(batch_size，查询的个数，num_hiddens)
        output_concat = transpose_output(output, self.num_heads)
        # print("last level output",output_concat.shape) torch.Size([32, 7, 100])
        return self.W_o(output_concat)

"""
Q torch.Size torch.Size([32, 7, 20])
K torch.Size torch.Size([32, 13, 12])
V torch.Size torch.Size([32, 13, 12])
"""
batch_size = 32
num_qvpairs = 20
num_kvpairs = 12
num_vvpairs = 12
num_hiddens, num_heads = 100, 4
attention = MultiHeadAttention(num_kvpairs, num_qvpairs, num_vvpairs,
                               num_hiddens, num_heads, 0.5)
attention.eval()
# 设置遮蔽注意力的 valid_lens 有效长度 70
valid_lens = torch.tensor([[i for i in range(1, 8)]] * batch_size)
Q = torch.ones((batch_size, 7, num_qvpairs))
K = torch.ones((batch_size, 13, num_kvpairs))
V = torch.ones((batch_size, 13, num_vvpairs))
print(attention(Q, K, V, valid_lens).shape)
# torch.Size([32, 7, 100])