import os
import torch

b = 2
s = 10
embedding_dim = 128
k_size = 96
v_size = 64
head = 8
hd_k = k_size // head  # k_head_dim
hd_v = v_size // head  # v_head_dim

w_q = torch.rand(embedding_dim, k_size)
w_k = torch.rand(embedding_dim, k_size)
w_v = torch.rand(embedding_dim, v_size)

x = torch.rand(b, s, embedding_dim)


# dump
if not os.path.exists('dump/w_q.bin'):
    torch.save(w_q, 'dump/w_q.bin')
    torch.save(w_k, 'dump/w_k.bin')
    torch.save(w_v, 'dump/w_v.bin')
    torch.save(x, 'dump/x.bin')

w_q = torch.load('dump/w_q.bin')
w_k = torch.load('dump/w_k.bin')
w_v = torch.load('dump/w_v.bin')
x = torch.load('dump/x.bin')


q = torch.matmul(x, w_q)  # (b, s, k_size)  (2, 10, 96)
k = torch.matmul(x, w_k)  # (b, s, k_size)  (2, 10, 96)
v = torch.matmul(x, w_v)  # (b, s, v_size)  (2, 10, 64)

# q k v 转为多头
q = q.reshape(b, s, head, hd_k).transpose(1, 2)  # (b, n, s, hd_k)   (2, 8, 10, 12)
k = k.reshape(b, s, head, hd_k).transpose(1, 2)  # (b, n, s, hd_k)   (2, 8, 10, 12)
v = v.reshape(b, s, head, hd_v).transpose(1, 2)  # (b, n, s, hd_v)   (2, 8, 10, 8)

# 1. 定义block size
b_c = 2  # block size column, k/v
b_r = 3  # block size row, q

# 2. 初始化go、gl、gm
go = torch.zeros(b, head, s, hd_v)  # go.shape == q.shape
gl = torch.zeros(b, head, s, 1)
gm = torch.zeros(b, head, s, 1)

# 3. 切分q、k、v
q_list =
t_r =


#########################################
score = torch.matmul(q, k.transpose(-1, -2))  # (b, n, s, s)
score = score / torch.sqrt(torch.tensor(q.shape[-1]))

mask = torch.triu(torch.ones(s, s))
print(f'mask={mask}')

score = score * mask  # mask.shape=(s, s)，score.shape=(b, n, s, s)，此处点积时mask会自动广播
print(f'score={score}')

attention_weight = torch.softmax(score, dim=-1)

attention_out = torch.matmul(attention_weight, v)  # (b, n, s, hd_v)
attention_out = attention_out.transpose(1, 2).reshape(b, s, v_size)
print(f'attention_out={attention_out}')
torch.save(attention_out, 'dump/out0.bin')


