import os
import torch

b = 2
s = 10
embedding_dim = 128
k_size = 96
v_size = 64
head = 8
k_head_dim = k_size // head
v_head_dim = v_size // head


# dump
base = 'dump/multi'
w_q = torch.load(f'{base}/w_q.bin')  # (embedding_dim, k_size)
w_k = torch.load(f'{base}/w_k.bin')  # (embedding_dim, k_size)
w_v = torch.load(f'{base}/w_v.bin')  # (embedding_dim, v_size)
x = torch.load(f'{base}/x.bin')      # (b, s, embedding_dim)


# 切分权重
w_q = w_q.reshape(embedding_dim, head, k_head_dim).transpose(0, 1)  # (n, embedding_dim, hd_k)    hd_k意思是head_dim_k
w_k = w_k.reshape(embedding_dim, head, k_head_dim).transpose(0, 1)  # (n, embedding_dim, hd_k)
w_v = w_v.reshape(embedding_dim, head, v_head_dim).transpose(0, 1)  # (n, embedding_dim, hd_v)

attention_out_list = []

# 每个head独立计算
for i in range(head):
    w_q_i = w_q[i, :, :]  # (embedding_dim, hd_k)
    # print(f'w_q_i.shape={w_q_i.shape}')
    w_k_i = w_k[i, :, :]  # (embedding_dim, hd_k)
    w_v_i = w_v[i, :, :]  # (embedding_dim, hd_v)

    q_i = torch.matmul(x, w_q_i)  # (b, s, hd_k)
    k_i = torch.matmul(x, w_k_i)  # (b, s, hd_k)
    v_i = torch.matmul(x, w_v_i)  # (b, s, hd_v)

    score_i = torch.matmul(q_i, k_i.transpose(-1, -2))  # (b, s, s)
    scale = q_i.shape[-1] ** 0.5
    score_i = score_i / scale

    mask = torch.triu(torch.ones(s, s))
    print(f'mask={mask}')

    score_i = score_i * mask  # mask.shape=(s, s)，score.shape=(b, s, s)，此处点积时mask会自动广播
    # print(f'score_i={score_i}')

    # safe_softmax
    attention_weight_i = torch.softmax(score_i - torch.max(score_i, dim=-1, keepdim=True)[0], dim=-1)

    attention_out_i = torch.matmul(attention_weight_i, v_i)

    print(f'attention_out_i={attention_out_i}')
    attention_out_list.append(attention_out_i)  # n * (b, s, hd_v)

# 按最后一个dim拼接
out = torch.cat(attention_out_list, dim=-1)  # (b, s, v_size)
out0 = torch.load(f'{base}/out0.bin')

if torch.allclose(out0, out, rtol=0.001, atol=0.001):
    print(f'===============')
else:
    print(f'!!!!!!!!!!!!!!!')


# 按dim=0 cat，然后再transpose，看看结果是否一致？答：一致。
# out = torch.cat(attention_out_list, dim=0)  # (n*b, s, hd_v)
# out = out.reshape(head, b, s, v_head_dim)   # (n, b, s, hd_v)
out = torch.stack(attention_out_list)       # (n, b, s, hd_v)   stack等价于cat+reshape，stack增加一个维度，cat维度数量不变
out = out.transpose(0, 1)       # (b, n, s, hd_v)
out = out.transpose(1, 2)       # (b, s, n, hd_v)
out = out.reshape(b, s, v_size)             # (b, s, v_size)

if torch.allclose(out0, out, rtol=0.001, atol=0.001):
    print(f'===============')
else:
    print(f'!!!!!!!!!!!!!!!')


# todo 多头注意力中，将权重按头切分，然后每个头独立加载，结果再拼接，结果和整体计算是一致的。
#  但是中间结果score_i的大小仍然是(b, s, s)，当序列长度s较大时，占用显存过多，因此诞生了flash-attention。