import torch
import triton
import triton.language as tl

DEVICE = "npu"  # 若 NPU，改为 "npu"

@triton.jit
def batch_matmul(
    a_ptr, b_ptr, c_ptr,
    B, M, N, K,
    stride_ab, stride_am, stride_ak,
    stride_bb, stride_bk, stride_bn,
    stride_cb, stride_cm, stride_cn,
    BLOCK_SIZE_M: tl.constexpr,
    BLOCK_SIZE_N: tl.constexpr,
    BLOCK_SIZE_K: tl.constexpr,
    GROUP_SIZE_M: tl.constexpr,
):
    bid = tl.program_id(axis=0)
    pid = tl.program_id(axis=1)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    offsets_k = tl.arange(0, BLOCK_SIZE_K)
    a_ptrs = a_ptr + (bid * stride_ab + offsets_m[:, None] * stride_am + offsets_k[None, :] * stride_ak)
    b_ptrs = b_ptr + (bid * stride_bb + offsets_k[:, None] * stride_bk + offsets_n[None, :] * stride_bn)
    accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
    num_blocks = tl.cdiv(K, BLOCK_SIZE_K)
    for k in range(0, num_blocks):
        # 在 for k 循环中
        a_mask = (offsets_m[:, None] < M) & (offsets_k[None, :] < K - k * BLOCK_SIZE_K)
        b_mask = (offsets_k[:, None] < K - k * BLOCK_SIZE_K) & (offsets_n[None, :] < N)
        a = tl.load(a_ptrs, mask=a_mask, other=0.0)
        b = tl.load(b_ptrs, mask=b_mask, other=0.0)
        accumulator += tl.dot(a, b)
        a_ptrs += BLOCK_SIZE_K * stride_ak
        b_ptrs += BLOCK_SIZE_K * stride_bk
    c_ptrs = c_ptr + (bid * stride_cb + offsets_m[:, None] * stride_cm + offsets_n[None, :] * stride_cn)
    offsets_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    c_mask = (bid < B) & (offsets_cm[:, None] < M) & (offsets_cn[None, :] < N)
    tl.store(c_ptrs, accumulator, mask=c_mask)

def batch_matmul_ref(a, b):
    """PyTorch 参考实现：批次矩阵乘法 torch.bmm(a, b)"""
    return torch.bmm(a, b)

def batch_matmul_triton(a, b):
    """Triton 实现：批次矩阵乘法"""
    B, M, K = a.shape
    B_check, K_check, N = b.shape
    assert B == B_check and K == K_check, f"Shape mismatch: a=({B},{M},{K}), b=({B_check},{K_check},{N})"
    c = torch.empty((B, M, N), dtype=torch.float32, device=a.device)
    
    BLOCK_SIZE_M = 64
    BLOCK_SIZE_N = 64
    BLOCK_SIZE_K = 64
    GROUP_SIZE_M = 8
    
    # Grid: (B, num_pid_m * num_pid_n)
    grid = (B, triton.cdiv(M, BLOCK_SIZE_M) * triton.cdiv(N, BLOCK_SIZE_N))
    
    batch_matmul[grid](
        a, b, c,
        B, M, N, K,
        stride_ab=a.stride(0), stride_am=a.stride(1), stride_ak=a.stride(2),
        stride_bb=b.stride(0), stride_bk=b.stride(1), stride_bn=b.stride(2),
        stride_cb=c.stride(0), stride_cm=c.stride(1), stride_cn=c.stride(2),
        BLOCK_SIZE_M=BLOCK_SIZE_M,
        BLOCK_SIZE_N=BLOCK_SIZE_N,
        BLOCK_SIZE_K=BLOCK_SIZE_K,
        GROUP_SIZE_M=GROUP_SIZE_M,
    )
    return c

if __name__ == "__main__":
    torch.manual_seed(42)  # 固定种子
    B, M, N, K = 2, 1024, 2112, 7168  # 示例形状（匹配典型 Transformer FFN）
    a = torch.randn(B, M, K, dtype=torch.float32, device=DEVICE)
    b = torch.randn(K, B, N, dtype=torch.float32, device=DEVICE)
    b = torch.permute(b, (1, 0, 2))
    # PyTorch 参考输出
    c_ref = batch_matmul_ref(a, b)
    
    # Triton 输出
    c_triton = batch_matmul_triton(a, b)
    
    # 精度对比
    abs_diff = torch.abs(c_ref - c_triton)
    max_abs_err = torch.max(abs_diff).item()
    rel_err = torch.max(abs_diff / (torch.abs(c_ref) + 1e-8)).item()
    allclose = torch.allclose(c_ref, c_triton, atol=1e-3, rtol=1e-3)
    
    print(f"输入形状: a=({B}, {M}, {K}), b=({B}, {K}, {N})")
    print(f"输出形状: c=({B}, {M}, {N})")
    print(f"Max Absolute Error: {max_abs_err:.6f}")
    print(f"Max Relative Error: {rel_err:.6f}")
    print(f"Allclose (atol=1e-5, rtol=1e-5): {allclose}")
    
    # 样本输出（第一个批次、第一个行、前5列）
    print(f"Reference [0, 0, :5]: {c_ref[0, 0, :5].cpu()}")
    print(f"Triton [0, 0, :5]: {c_triton[0, 0, :5].cpu()}")
    
    # 若通过，打印成功信息
    if allclose:
        print("测试通过！Triton 批次 MatMul 正确。")
    else:
        print("测试失败，请检查内核实现。")