import torch
from torch import nn
import math
# #定义张量x,x是一个2x4大小的张量，代表了2个样本，每一个样本有4个特征
# #当使用层归一化对x进行计算时，会对x的每一行的4个特征进行归一化
# x = torch.tensor([
#     [1.0,2.0,3.0,4.0],
#     [1.0,1.0,1.0,1.0]
# ])
# #定义层归一化的示例
# #传入的参数4，表示层归一化的输入维度是4
# #也就是对应某一层的4个神经元的输出
# layer_norm = nn.LayerNorm(4)
# #计算层归一化的结果
# output = layer_norm(x)
# print(output)

#多头注意力机制 + add + Layernorm
def attention(q,k,v,mask=None,dropout=None):
    #将k矩阵的最后一个维度值作为d_k
    d_k = k.size(-1)
    #将k矩阵的最后两个维度互换(转置),与q矩阵相乘,除以d_k开根号
    scores = torch.matmul(q,k.transpose(-2,-1)) / math.sqrt(d_k)
    if mask is not None:
        scores = scores.masked_fill(mask,-1e9)
    p_attn = torch.softmax(scores,dim=-1)
    if dropout is not None:
        p_attn = dropout(p_attn)
    return torch.matmul(p_attn,v)

class MultHeadedAttention(nn.Module):
    def __init__(self, d_model, n_head, dropout=0.1):
        super().__init__()
        self.dropout = nn.Dropout(dropout)

        # 必须能被整除
        assert d_model % n_head == 0
        self.d_token = d_model // n_head
        self.n_head = n_head

        self.W_Q = nn.Linear(d_model, d_model, bias=False)
        self.W_K = nn.Linear(d_model, d_model, bias=False)
        self.W_V = nn.Linear(d_model, d_model, bias=False)

        self.linear = nn.Linear(d_model, d_model, bias=False)
        #layernorm
        self.norm = nn.LayerNorm(d_model)
    def forward(self, q, k, v,mask=None):
        res = q
        # 分头的主要目的是让模型能够同时关注输入序列的不同部分，从而捕捉更丰富的特征
        # q,k,v shape ==> batch_size,seq_len,d_mdoel
        n_batchs = q.size(0)
        q_seq_len = q.size(1)
        k_seq_len = k.size(1)
        v_seq_len = v.size(1)
        # (batch_size, seq_len, d_model) ===> (batch_size, seq_len, n_head, d_token)
        # 其中 n_head 是头的数量，d_token 是每个头的维度
        # 为了方便计算，需要将头的维度移到前面，变成 (batch_size, n_head, seq_len, d_token)
        # 矩阵运算（如 torch.matmul）通常对最后两个维度进行操作。
        # 将头的维度移到前面后，可以直接对 seq_len 和 d_token 维度进行矩阵乘法
        q = self.W_Q(q).view(n_batchs, q_seq_len, self.n_head, self.d_token).transpose(1, 2)
        k = self.W_K(k).view(n_batchs, k_seq_len, self.n_head, self.d_token).transpose(1, 2)
        v = self.W_V(v).view(n_batchs, v_seq_len, self.n_head, self.d_token).transpose(1, 2)

        # 掩码要升维
        if mask is not None:
            mask = mask.unsqueeze(1)
        # 计算注意力
        att = attention(q, k, v,mask,self.dropout)
        # 拼接
        # contiguous()确保张量在内存中是连续存储的，提高计算效率；
        # (batch_size, n_head, seq_len, d_token)=>(batch_size, seq_len, n_head, d_token)=>(batch_size, seq_len, d_model)
        concat = att.transpose(1, 2).contiguous().reshape(n_batchs, -1, self.n_head * self.d_token)
        output = self.linear(concat)
        return self.norm(res + output)


mha = MultHeadedAttention(8, 2)
q = k = v = torch.randn(2, 3, 8)
mha_out = mha(q, k, v)
print(mha_out)
print(mha_out.shape)