# import torch
# import triton
# import triton.language as tl

# DEVICE = "npu"  # 若 NPU，改为 "npu"

# @triton.jit
# def pad_head_rotate_half_x_per_matmul_fused(
#     output_ptr, mm2_split2_ptr, sin2_ptr, cos2_ptr,
#     tokens: tl.constexpr, head_num: tl.constexpr, hidden_size_rope_q_head: tl.constexpr,
#     BLOCK_SIZE: tl.constexpr,  # 设为 hidden_size_rope_q_head (dim)
# ):
#     pid_tokens = tl.program_id(axis=0)
#     pid_head = tl.program_id(axis=1)
#     if pid_tokens >= tokens or pid_head >= head_num:
#         return
    
#     dim = hidden_size_rope_q_head
#     total_dim = dim * head_num
#     token_idx = pid_tokens
#     head_idx = pid_head
    
#     # 基址：sin/cos 共享，q/output per head
#     sin_cos_base = token_idx * dim
#     q_base = token_idx * total_dim + head_idx * dim
#     out_base = q_base  # 输出布局同 q
    
#     # 假设 BLOCK_SIZE == dim，offsets = tl.arange(0, dim)
#     offsets = tl.arange(0, BLOCK_SIZE)
#     mask = offsets < dim  # 若 BLOCK_SIZE > dim，mask 兜底
    
#     # 步骤1: pad_head_num - 加载 sin/cos (隐式 pad，复用)
#     sin_vals = tl.load(sin2_ptr + sin_cos_base + offsets, mask=mask, other=0.0)
#     cos_vals = tl.load(cos2_ptr + sin_cos_base + offsets, mask=mask, other=0.0)
    
#     # 步骤2: 加载 origin_q (q_vals)
#     q_vals = tl.load(mm2_split2_ptr + q_base + offsets, mask=mask, other=0.0)
    
#     # 步骤3: rotate_half_x - 向量化计算 rotated_vals
#     half_dim = dim // 2
#     is_second = offsets >= half_dim  # [BLOCK_SIZE] bool tensor
#     rotated_offsets = tl.where(is_second, offsets - half_dim, offsets + half_dim)  # 动态偏移
#     rotated_ptrs = mm2_split2_ptr + q_base + rotated_offsets  # 指针数组
#     rotated_base_vals = tl.load(rotated_ptrs, mask=mask, other=0.0)  # 加载旋转位置的值
#     rotated_sign = tl.where(is_second, 1.0, -1.0)  # 第二半正，first 半负
#     rotated_vals = rotated_sign * rotated_base_vals  # 应用符号
    
#     # 步骤4: per_matmul - element-wise 计算
#     output_vals = q_vals * cos_vals + rotated_vals * sin_vals
    
#     # 步骤5: 存储
#     tl.store(output_ptr + out_base + offsets, output_vals, mask=mask)

# # 参考实现：PyTorch 等价逻辑（用于验证）
# def reference_rope_fusion(mm2_split2, sin2, cos2):
#     """
#     等价实现：pad sin/cos + rotate_half_x + per_matmul。
#     mm2_split2: [tokens, head_num, dim]
#     sin2, cos2: [tokens, dim]
#     返回: [tokens, head_num, dim]
#     """
#     tokens, head_num, dim = mm2_split2.shape
#     # pad: 重复 sin/cos 到 [tokens, head_num, dim]
#     sin_pad = sin2.unsqueeze(1).repeat(1, head_num, 1)  # [tokens, head_num, dim]
#     cos_pad = cos2.unsqueeze(1).repeat(1, head_num, 1)
    
#     # rotate_half_x: 对每个 head 的 q 旋转
#     rotated = torch.zeros_like(mm2_split2)
#     half_dim = dim // 2
#     for h in range(head_num):
#         q_h = mm2_split2[:, h, :]  # [tokens, dim]
#         # 交换 + 负号：rotated[:, h, :half] = -q_h[:, half:], rotated[:, h, half:] = q_h[:, :half]
#         rotated[:, h, :half_dim] = -q_h[:, half_dim:]
#         rotated[:, h, half_dim:] = q_h[:, :half_dim]
    
#     # per_matmul: q * cos + rotated * sin
#     output = mm2_split2 * cos_pad + rotated * sin_pad
#     return output

# if __name__ == "__main__":
#     # 测试参数（小规模，便于调试）
#     tokens = 1024
#     head_num = 64
#     dim = 64  # hidden_size_rope_q_head
#     BLOCK_SIZE = dim  # 全 dim 加载
    
#     # 生成随机测试数据
#     torch.manual_seed(42)  # 固定种子
#     mm2_split2 = torch.randn(tokens, head_num, dim, dtype=torch.float32, device=DEVICE)
#     sin2 = torch.randn(tokens, dim, dtype=torch.float32, device=DEVICE)
#     cos2 = torch.randn(tokens, dim, dtype=torch.float32, device=DEVICE)
    
#     # Triton 输入：展平 mm2_split2 到 [tokens, head_num * dim]
#     mm2_flat = mm2_split2.reshape(tokens, head_num * dim)
#     output_triton = torch.empty_like(mm2_flat, dtype=torch.float32, device=DEVICE)
    
#     # 运行内核
#     grid = (tokens, head_num)
#     # 地址没有对齐
#     pad_head_rotate_half_x_per_matmul_fused[grid](
#         output_triton, mm2_flat, sin2, cos2,
#         tokens=tokens, head_num=head_num, hidden_size_rope_q_head=dim,
#         BLOCK_SIZE=BLOCK_SIZE
#     )
    
#     # 重塑 Triton 输出为 [tokens, head_num, dim]
#     output_triton = output_triton.reshape(tokens, head_num, dim)
    
#     # 计算参考输出
#     output_ref = reference_rope_fusion(mm2_split2, sin2, cos2)
    
#     # 精度对比
#     abs_diff = torch.abs(output_ref - output_triton)
#     max_abs_err = torch.max(abs_diff).item()
#     rel_err = torch.max(abs_diff / (torch.abs(output_ref) + 1e-8)).item()
#     allclose = torch.allclose(output_ref, output_triton, atol=1e-5, rtol=1e-5)
    
#     # 输出结果
#     print(f"测试形状: mm2_split2=({tokens}, {head_num}, {dim}), sin/cos=({tokens}, {dim})")
#     print(f"Max Absolute Error: {max_abs_err:.6f}")
#     print(f"Max Relative Error: {rel_err:.6f}")
#     print(f"Allclose (atol=1e-5, rtol=1e-5): {allclose}")
    
#     # 样本输出（第一个 token, 第一个 head）
#     print(f"Reference [0, 0, :]: {output_ref[0, 0, :].cpu()}")
#     print(f"Triton [0, 0, :]: {output_triton[0, 0, :].cpu()}")
    
#     # 若通过，打印成功信息
#     if allclose:
#         print("测试通过！内核逻辑正确。")
#     else:
#         print("测试失败，请检查内核实现。")


import torch
import triton
import triton.language as tl
import numpy as np
from mlapo_torch import main

DEVICE = "npu"  # 若 NPU，改为 "npu"


'''
GM -> VECIN 32B = 4 * 8
'''
# @triton.jit
# def pad_head_rotate_half_x_per_matmul_fused(
#     output_ptr, mm2_split2_ptr, sin2_ptr, cos2_ptr, mm2_split2_ptr_rotated,
#     tokens: tl.constexpr, head_num: tl.constexpr, hidden_size_rope_q_head: tl.constexpr,
#     BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE: tl.constexpr,  # BLOCK_SIZE_M for tokens, BLOCK_SIZE for dim
# ):
#     pid_m = tl.program_id(axis=0)
#     pid_head = tl.program_id(axis=1)
#     if pid_head >= head_num:
#         return
    
#     dim = hidden_size_rope_q_head
#     total_dim = dim * head_num
#     num_pid_m = tl.cdiv(tokens, BLOCK_SIZE_M)
#     if pid_m >= num_pid_m:
#         return
    
#     # Token 块偏移
#     offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
#     mask_m = offsets_m < tokens
    
#     # Head 固定
#     head_idx = pid_head
    
#     # 假设 BLOCK_SIZE == dim，offsets_n = tl.arange(0, dim)
#     offsets_n = tl.arange(0, BLOCK_SIZE)
#     mask_n = offsets_n < dim
#     offsets_q = head_idx * dim + offsets_n
#     mask_q = offsets_q < total_dim
#     # 广播 mask
#     mask = mask_m[:, None] and mask_n[None, :]
    
#     # 基址：sin/cos per token, q/output per (token block, head)
#     sin_cos_base = offsets_m[:, None] * dim + offsets_n[None, :]  # [BLOCK_M, BLOCK_SIZE]
#     q_base = offsets_m[:, None] * total_dim + offsets_q[None, :]  # [BLOCK_M, BLOCK_SIZE]
#     out_base = q_base  # 输出同 q
    
#     # 步骤1: pad_head_num - 加载 sin/cos (隐式 pad，复用 per token)
#     sin_vals = tl.load(sin2_ptr + sin_cos_base, mask=mask, other=0.0)  # [BLOCK_M, BLOCK_SIZE]
#     cos_vals = tl.load(cos2_ptr + sin_cos_base, mask=mask, other=0.0)
#     # sin_vals = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE), dtype=tl.float32)
#     # cos_vals = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE), dtype=tl.float32)
    
#     # 步骤2: 加载 origin_q (q_vals)
#     q_vals = tl.load(mm2_split2_ptr + q_base, mask=mask_m[:, None] and mask_q[None, :], other=0.0)
#     # q_vals = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE), dtype=tl.float32)

#     # 步骤3: rotate_half_x - 向量化计算 rotated_vals [BLOCK_M, BLOCK_SIZE]
#     half_dim = dim // 2
#     is_second = offsets_n >= half_dim  # [1, BLOCK_SIZE]
#     rotated_vals_bf = tl.where(is_second, -q_vals, q_vals)
#     rotated_offsets_n = tl.where(is_second, offsets_n - half_dim, offsets_n + half_dim)
#     rotated_offsets_n_all = head_idx * dim + rotated_offsets_n
#     mask_rotated_offsets_n_all = rotated_offsets_n_all < total_dim
#     rotated_n_base = offsets_m[:, None] * total_dim + rotated_offsets_n_all[None, :]  # [BLOCK_M, BLOCK_SIZE]
#     tl.store(mm2_split2_ptr_rotated + rotated_n_base, rotated_vals_bf, mask=mask_m[:, None] and mask_rotated_offsets_n_all[None, :])
#     # 这条命令存在问题
#     rotated_vals = tl.load(mm2_split2_ptr_rotated + q_base, mask=mask_m[:, None] and mask_q[None, :], other=0.0)
#     # rotated_vals = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE), dtype=tl.float32)
#     # rotated_sign = tl.where(is_second, 1.0, -1.0)  # [1, BLOCK_SIZE]
#     # rotated_vals = rotated_base_vals * rotated_sign  # 广播乘法
    
#     # 步骤4: per_matmul - element-wise [BLOCK_M, BLOCK_SIZE]
#     output_vals = q_vals * cos_vals + rotated_vals * sin_vals
    
#     # 步骤5: 存储
#     tl.store(output_ptr + out_base, output_vals, mask=mask_m[:, None] and mask_q[None, :])


@triton.jit
def pad_head_rotate_half_x_per_matmul_fused(
    output_ptr, mm2_split2_ptr, sin2_ptr, cos2_ptr, mm2_split2_ptr_rotated,
    tokens: tl.constexpr, head_num: tl.constexpr, hidden_size_rope_q_head: tl.constexpr,
    BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE: tl.constexpr, BLOCK_SIZE_FULL: tl.constexpr  # BLOCK_SIZE_M for tokens, BLOCK_SIZE for dim
):
    pid_m = tl.program_id(axis=0)
    pid_head = tl.program_id(axis=1)
    if pid_head >= head_num:
        return
    
    dim = hidden_size_rope_q_head
    half_dim = hidden_size_rope_q_head / 2
    total_dim = hidden_size_rope_q_head * head_num
    total_dim_1 = hidden_size_rope_q_head * head_num / 2
    total_dim_2 = hidden_size_rope_q_head * head_num
    num_pid_m = tl.cdiv(tokens, BLOCK_SIZE_M)
    if pid_m >= num_pid_m:
        return
    
    # Token 块偏移
    offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    mask_m = offsets_m < tokens
    head_idx = pid_head
    offsets_n = tl.arange(0, BLOCK_SIZE_FULL)
    mask_n = offsets_n < dim
    offsets_n_1 = tl.arange(0, BLOCK_SIZE)
    offsets_n_2 = BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
    mask_n_1 = offsets_n_1 < half_dim
    mask_n_2 = offsets_n_2 < dim
    offsets_q = head_idx * dim + offsets_n
    mask_q = offsets_q < total_dim
    offsets_q_1 = head_idx * dim + offsets_n_1
    offsets_q_2 = head_idx * dim + offsets_n_2
    mask_q_1 = offsets_q_1 < total_dim_1
    mask_q_2 = offsets_q_2 < total_dim_2
    # 广播 mask
    mask_1 = mask_m[:, None] and mask_n_1[None, :]
    mask_2 = mask_m[:, None] and mask_n_2[None, :]
    # 基址：sin/cos per token, q/output per (token block, head)
    sin_cos_base_1 = offsets_m[:, None] * dim + offsets_n_1[None, :]  # [BLOCK_M, BLOCK_SIZE]
    sin_cos_base_2 = offsets_m[:, None] * dim + offsets_n_2[None, :]  # [BLOCK_M, BLOCK_SIZE]
    sin_vals_1 = tl.load(sin2_ptr + sin_cos_base_1, mask=mask_1, other=0.0)  # [BLOCK_M, BLOCK_SIZE]
    sin_vals_2 = tl.load(sin2_ptr + sin_cos_base_2, mask=mask_2, other=0.0)  # [BLOCK_M, BLOCK_SIZE]
    cos_vals_1 = tl.load(cos2_ptr + sin_cos_base_1, mask=mask_1, other=0.0)  # [BLOCK_M, BLOCK_SIZE]
    cos_vals_2 = tl.load(cos2_ptr + sin_cos_base_2, mask=mask_2, other=0.0)  # [BLOCK_M, BLOCK_SIZE]

    q_base_1 = offsets_m[:, None] * total_dim + offsets_q_1[None, :]  # [BLOCK_M, BLOCK_SIZE]
    q_base_2 = offsets_m[:, None] * total_dim + offsets_q_2[None, :]  # [BLOCK_M, BLOCK_SIZE]
    # q_base = offsets_m[:, None] * total_dim + offsets_q[None, :]
    # out_base = q_base  # 输出同 q
    
    # q_vals = tl.load(mm2_split2_ptr + q_base, mask=mask_m[:, None] and mask_q[None, :], other=0.0)
    q_vals_1 = tl.load(mm2_split2_ptr + q_base_1, mask=mask_m[:, None] and mask_q_1[None, :], other=0.0)
    q_vals_2 = tl.load(mm2_split2_ptr + q_base_2, mask=mask_m[:, None] and mask_q_2[None, :], other=0.0)

    output_vals_1 = cos_vals_1 * q_vals_1 - sin_vals_1 * q_vals_2
    output_vals_2 = cos_vals_2 * q_vals_2 - sin_vals_2 * q_vals_1

    # output_vals = q_vals * cos_vals + rotated_vals * sin_vals
    tl.store(output_ptr + q_base_1, output_vals_1, mask=mask_m[:, None] and mask_q_1[None, :])
    tl.store(output_ptr + q_base_2, output_vals_2, mask=mask_m[:, None] and mask_q_2[None, :])
# 参考实现：PyTorch 等价逻辑（批量版，支持 BLOCK_M）

def pad_head_num(
    head_dim_sin_cos: torch.Tensor, 
    head_num: int
    ) -> torch.Tensor:
    '''
    Pad Head Num（扩展 sin/cos 到 head_num 维度）
    '''
    # 
    return torch.tile(head_dim_sin_cos, (1, head_num))

def rotate_half_x(
        q_temp: torch.Tensor, 
        head_num: int
        ) -> torch.Tensor:
    '''
    Q head RoPE 部分：旋转半部分
    '''
    # 拆分成 head_num 个 [n,head_dim] 的二维向量
    q_splits = torch.chunk(q_temp, head_num, dim=1)
    # 对每个 [n,head_dim] 向量的第二维进行分割，并对第二块乘以 -1再拼回到第一块前面
    processed_q_splits = []
    for q_split in q_splits:
        # 分割第二维
        first_half, second_half = torch.chunk(q_split, 2, dim=1)
        # 拼接回 [n,head_dim] 的二维向量
        processed_q_split = torch.cat((-second_half, first_half), dim=1)
        processed_q_splits.append(processed_q_split)
    # 将所有处理后的 [n,head_dim] 向量拼回 [n,head_num*head_dim] 的二维向量
    return torch.cat(processed_q_splits, dim=1)

def reference_rope_fusion(mm2_split2, sin2, cos2, BLOCK_SIZE_M=64):
    """
    等价实现：pad sin/cos + rotate_half_x + per_matmul。
    mm2_split2: [tokens, head_num, dim]
    sin2, cos2: [tokens, dim]
    返回: [tokens, head_num, dim]
    """
    # tokens, head_num, dim = mm2_split2.shape
    # # pad: 重复 sin/cos 到 [tokens, head_num, dim]
    # sin_pad = sin2.unsqueeze(1).repeat(1, head_num, 1)  # [tokens, head_num, dim]
    # cos_pad = cos2.unsqueeze(1).repeat(1, head_num, 1)
    
    # # rotate_half_x: 对每个 head 的 q 旋转
    # rotated = torch.zeros_like(mm2_split2)
    # half_dim = dim // 2
    # for h in range(head_num):
    #     q_h = mm2_split2[:, h, :]  # [tokens, dim]
    #     # 交换 + 负号：rotated[:, h, :half] = -q_h[:, half:], rotated[:, h, half:] = q_h[:, :half]
    #     rotated[:, h, :half_dim] = -q_h[:, half_dim:]
    #     rotated[:, h, half_dim:] = q_h[:, :half_dim]
    
    # # per_matmul: q * cos + rotated * sin
    # output = mm2_split2 * cos_pad + rotated * sin_pad
    # return output
    tokens, head_num, dim = mm2_split2.shape
    pad_sin = pad_head_num(sin2, head_num)
    pad_cos = pad_head_num(cos2, head_num)
    mm2_flat = mm2_split2.reshape(tokens, head_num * dim)
    rope_res = mm2_flat * pad_cos + rotate_half_x(mm2_flat, head_num) * pad_sin
    rope_res = rope_res.reshape(tokens, head_num, dim)
    rope_res = rope_res.to(torch.float16)
    return rope_res

if __name__ == "__main__":
    # 测试参数（小规模，便于调试；可改为大 tokens=1024 测试性能）
    # UB访问没有对齐32Byte
    # tokens = 1024  # 测试分块效果
    # head_num = 64
    # dim = 64  # hidden_size_rope_q_head
    # BLOCK_SIZE_M = 64  # token 块大小
    # BLOCK_SIZE = dim  # dim 块大小
    torch.npu.set_device(4)
    # 生成随机测试数据
    torch.manual_seed(42)  # 固定种子
    # mm2_split2 = torch.randn(tokens, head_num, dim, dtype=torch.float16, device=DEVICE)
    mm2_split2 = main()[0].npu()
    tokens, head_num, dim = mm2_split2.shape
    BLOCK_SIZE_M = 64  # token 块大小
    BLOCK_SIZE = dim // 2 # dim 块大小
    BLOCK_SIZE_FULL = dim
    # sin2 = torch.randn(tokens, dim, dtype=torch.float16, device=DEVICE)
    sin2 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(tokens, dim))).to(torch.float16).npu()
    # cos2 = torch.randn(tokens, dim, dtype=torch.float16, device=DEVICE)
    cos2 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(tokens, dim))).to(torch.float16).npu()
    
    # Triton 输入：展平 mm2_split2 到 [tokens, head_num * dim]
    mm2_flat = mm2_split2.reshape(tokens, head_num * dim)
    output_triton = torch.empty_like(mm2_flat, dtype=torch.float16, device=DEVICE)
    mm2_rotated = torch.empty_like(mm2_flat, dtype=torch.float16, device=DEVICE)

    # 运行内核：grid[0] = triton.cdiv(tokens, BLOCK_SIZE_M)
    grid = (triton.cdiv(tokens, BLOCK_SIZE_M), head_num)
    pad_head_rotate_half_x_per_matmul_fused[grid](
        output_triton, mm2_flat, sin2, cos2, mm2_rotated,
        tokens=tokens, head_num=head_num, hidden_size_rope_q_head=dim,
        BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE=BLOCK_SIZE, BLOCK_SIZE_FULL=BLOCK_SIZE_FULL
    )
    
    # 重塑 Triton 输出为 [tokens, head_num, dim]
    output_triton = output_triton.reshape(tokens, head_num, dim)
    
    # 计算参考输出
    output_ref = reference_rope_fusion(mm2_split2, sin2, cos2)
    
    # 精度对比
    abs_diff = torch.abs(output_ref - output_triton)
    max_abs_err = torch.max(abs_diff).item()
    rel_err = torch.max(abs_diff / (torch.abs(output_ref) + 1e-8)).item()
    allclose = torch.allclose(output_ref, output_triton, atol=1.5e-1, rtol=1.5e-1)
    
    # 输出结果
    print(f"测试形状: mm2_split2=({tokens}, {head_num}, {dim}), sin/cos=({tokens}, {dim})")
    print(f"Grid: ({triton.cdiv(tokens, BLOCK_SIZE_M)}, {head_num})")
    print(f"Max Absolute Error: {max_abs_err:.6f}")
    print(f"Max Relative Error: {rel_err:.6f}")
    print(f"Allclose (atol=1e-5, rtol=1e-5): {allclose}")
    
    # 样本输出（第一个 token, 第一个 head）
    print(f"Reference [0, 0, :]: {output_ref[0, 0, :].cpu()}")
    print(f"Triton [0, 0, :]: {output_triton[0, 0, :].cpu()}")
    
    # 若通过，打印成功信息
    if allclose:
        print("测试通过！内核逻辑正确。")
    else:
        print("测试失败，请检查内核实现。")