import torch
from xformers import ops as xops
import time

bs = 32
seq_len = 512
n_head = 16
head_dim = 64
query_states = torch.randn((bs, n_head, seq_len, head_dim), dtype=torch.float16).to("cuda:0")
key_states = torch.randn((bs, n_head, seq_len, head_dim), dtype=torch.float16).to("cuda:0")
value_states = torch.randn((bs, n_head, seq_len, head_dim), dtype=torch.float16).to("cuda:0")

flash_query_states = query_states.transpose(1, 2)
flash_key_states = key_states.transpose(1, 2)
flash_value_states = value_states.transpose(1, 2)
start_time = time.time()

#  xformers
flash_attn_output = xops.memory_efficient_attention(
    flash_query_states, flash_key_states, flash_value_states,
    attn_bias=xops.LowerTriangularMask()

)
print(f'flash attention time: {(time.time() - start_time) * 1000} ms')
print(torch.cuda.max_memory_allocated("cuda:0") / 1024 ** 2)  # 192M
print("=============================")
print(torch.cuda.memory_allocated("cuda:0") / 1024 ** 2)  # 128M
