Infatoshi's picture
initial upload: 7 problem definitions
80692f2 verified
name: 03_paged_attention
display_name: "Paged Attention Decode"
precision: bf16
regime: memory # decode is bandwidth-bound (KV cache streamed once per token)
# Effective FLOPs per call: QK^T + softmax*V across all batches/heads/seq.
# Sum-formula evaluated per-shape; the benchmark substitutes batch/num_heads/etc.
# 2 * B * H * L * D for QK^T plus 2 * B * H * L * D for AV => 4 * B * H * L * D
flops_formula: "4 * batch * num_heads * seq_len * head_dim"
# Bytes moved (the real bottleneck): the KV cache must be streamed end-to-end.
# K and V each: batch * seq_len * num_kv_heads * head_dim * 2 bytes (bf16)
# Q: batch * num_heads * head_dim * 2 (negligible vs KV)
# Out: same as Q
# So total ~ 2 * (B * L * Hkv * D * 2) + small.
bytes_formula: "2 * batch * seq_len * num_kv_heads * head_dim * 2 + batch * num_heads * head_dim * 2 * 2"
hardware: [RTX_PRO_6000]
peak_tflops_key: bf16 # informational; regime=memory uses bandwidth
peak_bandwidth_key: dram # 1.8 TB/s GDDR7 on RTX PRO 6000 Blackwell
tolerance:
bfloat16: 0.02
float16: 0.02
# Forbidden ops -- using any of these in solution.py fails correctness post-hoc.
# vllm.attention / flashinfer.batch_decode_with_paged_kv_cache: dispatching to
# the SOTA library is the trivial cheat. SDPA is also banned because the agent
# could gather pages then call SDPA and inherit FlashAttention "for free".
forbidden:
- "vllm.attention"
- "flashinfer.batch_decode_with_paged_kv_cache"
- "flashinfer.decode"
- "torch.nn.functional.scaled_dot_product_attention"
- "F.scaled_dot_product_attention"
sota:
name: "vLLM PagedAttention v2 / FlashInfer batch_decode_with_paged_kv_cache"
url: "https://github.com/vllm-project/vllm/blob/main/csrc/attention/paged_attention_v2.cu"
function: "vllm._C.ops.paged_attention_v2"
deps:
- "vllm>=0.6.0"
- "flashinfer>=0.2.0"
# Decode is memory-bound; reference reaches ~70-85% of peak HBM bandwidth on H100.
reference_bandwidth_gbps_h100: 2400
num_correct_trials: 3
num_perf_trials: 30