import os
import shutil as sh
import torch

b = 2
s = 10
embedding_dim = 128
k_size = 96
v_size = 64
head = 8
k_head_dim = k_size // head
v_head_dim = v_size // head

w_q = torch.rand(embedding_dim, k_size)
w_k = torch.rand(embedding_dim, k_size)
w_v = torch.rand(embedding_dim, v_size)

x = torch.rand(b, s, embedding_dim)

base = 'dump/multi'
# dump
if os.path.exists(base):
    sh.rmtree(base)
os.mkdir(base)

torch.save(w_q, f'{base}/w_q.bin')
torch.save(w_k, f'{base}/w_k.bin')
torch.save(w_v, f'{base}/w_v.bin')
torch.save(x, f'{base}/x.bin')

w_q = torch.load(f'{base}/w_q.bin')
w_k = torch.load(f'{base}/w_k.bin')
w_v = torch.load(f'{base}/w_v.bin')
x = torch.load(f'{base}/x.bin')


q = torch.matmul(x, w_q)  # (b, s, k_size)
k = torch.matmul(x, w_k)  # (b, s, k_size)
v = torch.matmul(x, w_v)  # (b, s, v_size)

# q k v 转为多头
q = q.reshape(b, s, head, k_head_dim).transpose(1, 2)  # (b, n, s, hd_k)
k = k.reshape(b, s, head, k_head_dim).transpose(1, 2)  # (b, n, s, hd_k)
v = v.reshape(b, s, head, v_head_dim).transpose(1, 2)  # (b, n, s, hd_v)



score = torch.matmul(q, k.transpose(-1, -2))  # (b, n, s, s)
scale = q.shape[-1] ** 0.5
score = score / scale

mask = torch.triu(torch.ones(s, s))
print(f'mask={mask}')

score = score * mask  # mask.shape=(s, s)，score.shape=(b, n, s, s)，此处点积时mask会自动广播
print(f'score={score}')

# safe_softmax
attention_weight = torch.softmax(score - torch.max(score, dim=-1, keepdim=True)[0], dim=-1)

attention_out = torch.matmul(attention_weight, v)  # (b, n, s, hd_v)
attention_out = attention_out.transpose(1, 2).reshape(b, s, v_size)
print(f'attention_out={attention_out}')
torch.save(attention_out, f'{base}/out0.bin')


