import torch
from sentence_transformers import SentenceTransformer
from torch import nn

sentences = list("中国的首都在哪里？")
model = SentenceTransformer('./data/BAAI/bge-small-zh')
embeddings = model.encode(sentences, normalize_embeddings=True)
# embeddings.shape (9, 512)  9: seq_len  512: embedding_dims

X = torch.from_numpy(embeddings).unsqueeze(0)
batch_size = X.shape[0]
seq_len = X.shape[1]
embedding_dims = X.shape[-1]

hidden_dims = 512
w_q = torch.randn((batch_size, embedding_dims, hidden_dims), requires_grad=True)
w_k = torch.randn((batch_size, embedding_dims, hidden_dims), requires_grad=True)
w_v = torch.randn((batch_size, embedding_dims, hidden_dims), requires_grad=True)
w_o = torch.randn((batch_size, hidden_dims, hidden_dims), requires_grad=True)

Q = X @ w_q
K = X @ w_k
V = X @ w_v

# 多头注意力机制
mHead = 8
head_dims = hidden_dims // mHead
Q = Q.reshape(batch_size, seq_len, mHead, head_dims)
K = K.reshape(batch_size, seq_len, mHead, head_dims)
V = V.reshape(batch_size, seq_len, mHead, head_dims)

# 为了合并batch_size,mHead ，将batch_size,mHead 的维度调整一起
Q = Q.permute([0, 2, 1, 3])
K = K.permute([0, 2, 1, 3])
V = V.permute([0, 2, 1, 3])

# 保留mHead 注意力头的维度，合并batch_size,mHead维度
Q = Q.reshape(-1, seq_len, head_dims)
K = K.reshape(-1, seq_len, head_dims)
V = V.reshape(-1, seq_len, head_dims)

atten_mask = nn.Transformer.generate_square_subsequent_mask(seq_len)

# 缩放点积注意力机制
Z = torch.softmax((Q @ K.transpose(1, 2) + atten_mask) / embedding_dims ** 0.5, dim=-1) @ V
# 还原整个维度
Z = Z.reshape(batch_size, mHead, seq_len, head_dims)
Z = Z.permute([0, 2, 1, 3])
Z = Z.reshape(batch_size, seq_len, -1)
O = Z @ w_o
print(O.shape)

# Z也可以采用下面的方式
# Z (8,9,64)
# Z_list = torch.chunk(Z,mHead) # 按照注意力头分割出来
# Z = torch.cat(Z_list,dim=-1) #(9,512)
