import torch
import torch.nn as nn
from torch.nn import functional


class MultiHeadAttention(nn.Module):
    def __init__(self, num_heads: int = 2, input_depth: int = 768, total_key_depth: int = 768,
                 total_value_depth: int = 768, output_depth: int = 768, dropout=0.1):
        # num_heads=2, input_depth=768, total_key_depth=768, total_value_depth=768, output_depth=768
        super().__init__()
        assert total_key_depth % num_heads == 0
        assert total_value_depth % num_heads == 0

        self.num_heads = num_heads
        self.query_scale = (total_key_depth // num_heads) ** -0.5  # 0.05103103630798288

        # 下面的这几个模块都并不会让 x 的维度发生任何变化
        self.query_linear = nn.Linear(input_depth, total_key_depth, bias=False)  # 输入输出维度不变
        self.key_linear = nn.Linear(input_depth, total_key_depth, bias=False)  # 输入输出维度不变
        self.value_linear = nn.Linear(input_depth, total_value_depth, bias=False)  # 输入输出维度不变
        self.output_linear = nn.Linear(total_value_depth, output_depth, bias=False)  # 输入输出维度不变

        self.dropout = nn.Dropout(dropout)

    def _split_heads(self, x):  # 三维 tensor 变为四维 tensor
        """
        Split x such to add an extra num_heads dimension
        Input:
            x: a Tensor with shape [batch_size, seq_length, 768]
        Returns:
            A Tensor with shape [batch_size, num_heads, seq_length, 768/num_heads]
        """

        if len(x.shape) != 3:
            raise ValueError("x must have rank 3")
        shape = x.shape
        # 这里的 view 相当于手动把 768 元的向量，手动分成了 2 份
        return x.view(shape[0], shape[1], self.num_heads, shape[2] // self.num_heads).permute(0, 2, 1, 3)

    def _merge_heads(self, x):  # 这里输入是 [batch_size, num_heads, 768/num_heads, 768/num_heads]
        """
        Merge the extra num_heads into the last dimension
        Input:
            x: a Tensor with shape [batch_size, num_heads, seq_length, depth/num_heads]
        Returns: x.permute(0, 2, 1, 3).reshape(shape[0],shape[2],shape[1]*shape[3])
            A Tensor with shape [batch_size, seq_length, depth]
        """
        if len(x.shape) != 4:
            raise ValueError("x must have rank 4")
        shape = x.shape
        return x.permute(0, 2, 1, 3).contiguous().view(shape[0], shape[2], shape[3] * self.num_heads)

    def forward(self, queries, keys, values):  # 输入的三个元素都是 shape=(batch_size, sentence_seq_len, 768)
        queries = self.query_linear(queries)  # 输入输出 shape 不变
        queries = self._split_heads(queries)  # [batch_size, num_heads, sentence_seq_len, 768/num_heads]

        keys = self.key_linear(keys)
        keys = self._split_heads(keys)  # [batch_size, num_heads, sentence_seq_len, 768/num_heads]

        values = self.value_linear(values)
        values = self._split_heads(values)  # [batch_size, num_heads, sentence_seq_len, 768/num_heads]

        # scale queries
        queries *= self.query_scale  # 相当于每个元素都变为原本的 0.05103103630798288 倍
        # logits = torch.einsum('ijku,ijvu->ijkv',queries,keys)
        logits = torch.matmul(  # matmul() 就是矩阵乘法
            queries,  # [batch_size, num_heads, sentence_seq_len, 768/num_heads]
            keys.permute(0, 1, 3, 2)  # [batch_size, num_heads,  768/num_heads, sentence_seq_len]
        )  # 此时的这个 logits 的维度是 [batch_size, num_heads, sentence_seq_len, sentence_seq_len]

        weights = functional.softmax(logits, dim=-1)  # weights.shape=(batch_size, num_heads, sentence_seq_len, sentence_seq_len)
        # 但是这个 weights 矩阵中只会有 sentence_seq_len 个不同的数字

        weights = self.dropout(weights)

        contexts = torch.matmul(weights, values)  # 最后得到 contexts[batch_size, num_heads, sentence_seq_len, 768/num_heads]
        # Merge Heads
        contexts = self._merge_heads(contexts)  # 得到 [batch_size, sentence_seq_len, 768]
        outputs = self.output_linear(contexts)  # 得到 [batch_size, sentence_seq_len, 768]
        return outputs

    pass


if __name__ == '__main__':
    print((768 // 2) ** -0.5)  # 0.05103103630798288
    print(1.2 * 1.2 + 3.3 * 2.4)  # 9.36
    a_tensor = torch.randn(2, 3, 4, 5)  #
    res = torch.matmul(a_tensor, a_tensor.permute(0, 1, 3, 2))
    print(res.shape)  # torch.Size([2, 3, 4, 4])
    pass


if __name__ == '__main__':
    a_tensor = torch.tensor(
        [[1., 2., 3.],
         [4., 5., 6.]], dtype=torch.float
    )
    print(functional.softmax(a_tensor, dim=0))
    # tensor([[0.0474, 0.0474, 0.0474],
    #         [0.9526, 0.9526, 0.9526]])
    print(functional.softmax(a_tensor, dim=1))
    # tensor([[0.0900, 0.2447, 0.6652],
    #         [0.0900, 0.2447, 0.6652]])
    pass
