| name: 02_kda_cutlass | |
| display_name: "Kimi Delta Attention (chunk forward) via CUTLASS CuTe" | |
| precision: bf16 | |
| regime: compute | |
| # Approximate FLOPs per forward call. KDA forward (chunk form) breakdown per | |
| # token across all heads: | |
| # - intra-chunk Aqk (q @ k.T with decay): 2*BT*K | |
| # - intra-chunk Akk + cumulative invert : ~3*BT*K + BT^2 | |
| # - w = A @ (g.exp()*k): 2*BT*K | |
| # - u = A @ v: 2*BT*V | |
| # - inter-chunk q @ S: 2*K*V | |
| # - inter-chunk (Aqk @ v_i): 2*BT*V | |
| # - state update S += k.T @ v_i: 2*K*V | |
| # Aggregating with a constant of 4 (typical for chunked linear attention) gives | |
| # the formula below. Used only for roofline reporting -- not graded directly. | |
| flops_formula: "4 * B * T * H * (K*V + CHUNK_SIZE*K + CHUNK_SIZE*V)" | |
| # Bytes moved (lower bound): one read of q,k,v,g,beta and one write of o. | |
| bytes_formula: "B*T*H*K*2 + B*T*H*K*2 + B*T*H*V*2 + B*T*H*K*4 + B*T*H*2 + B*T*H*V*2" | |
| hardware: [RTX_PRO_6000] | |
| peak_tflops_key: bf16 # KDA's heavy work is bf16 @ bf16 -> fp32 GEMM-equivalent | |
| peak_bandwidth_key: dram | |
| # bf16 default tolerance from src/eval/correctness.py is 1e-2 atol/rtol; KDA's | |
| # long recurrence accumulates more error than a single GEMM, so we relax slightly. | |
| tolerance: | |
| torch.bfloat16: 0.05 # was "5e-2" -- PyYAML parses un-dotted sci-notation as string | |
| # Forbidden ops -- using any in solution.py fails correctness post-hoc. | |
| # The whole point of the problem is to write the kernel; calling FLA's existing | |
| # Triton chunk_kda is the canonical reward-hack to block. | |
| forbidden: | |
| - "fla.ops.kda" | |
| - "fla.ops.chunk_kda" | |
| - "chunk_kda" | |
| - "fused_recurrent_kda" | |
| - "naive_chunk_kda" | |
| - "naive_recurrent_kda" | |
| sota: | |
| name: "FLA chunk_kda (Triton)" | |
| url: "https://github.com/fla-org/flash-linear-attention/tree/main/fla/ops/kda" | |
| function: "fla.ops.kda.chunk_kda" | |
| deps: | |
| - "flash-linear-attention>=0.3" | |
| # Documented H100 throughput (informational, not graded). FLA's KDA Triton | |
| # kernel hits roughly 0.6-0.8x of FlashAttention-2 wall-clock on H100 at the | |
| # B=2,T=2048,H=8,K=V=128 shape (per the Kimi Linear blog / FLA benchmarks). | |
| reference_throughput_tflops_h100: null | |
| num_correct_trials: 3 | |
| num_perf_trials: 20 | |