import torch
import time
from model_flash_attention import FlashAttentionTransformer

# 1. 基本计时方法
model = FlashAttentionTransformer(
    vocab_size=10000,
    d_model=128,
    num_heads=8,
    d_ff=512,
    num_layers=6,
    context_length=256,
    device="cuda",
    dtype=torch.float32,
)
model.to("cuda")
input_tensor = torch.randint(0, 10000, (1, 256)).cuda()    

# 预热
for _ in range(3):
    with torch.no_grad():
        output = model(input_tensor)

# 实际测试
torch.cuda.synchronize()  # 确保之前的操作完成
start_time = time.time()
with torch.no_grad():
    output = model(input_tensor)
torch.cuda.synchronize()  # 等待计算完成
end_time = time.time()
print(f"模型前向传播时间: {(end_time - start_time)*1000:.2f} ms")

# 2. 使用torch.autograd.profiler进行详细分析
with torch.autograd.profiler.profile(use_cuda=True) as prof:
    with torch.no_grad():
        output = model(input_tensor)
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))

# # 3. 使用torch.profiler进行更高级的分析
# with torch.profiler.profile(
#     schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2),
#     on_trace_ready=torch.profiler.tensorboard_trace_handler('./logs'),
#     record_shapes=True,
#     profile_memory=True,
#     with_stack=True
# ) as prof:
#     for _ in range(10):
#         with torch.no_grad():
#             output = model(input_tensor)
#         prof.step()