import torch 
import torch_npu
import triton
import triton.language as tl
from mlapo_torch import process_deq_scale, QUANTMAX, QUANTMIN, MLAPO

epsilon = 1e-6
DEVICE = "npu"

'''
现在matmul转换存在问题 正在看转换过程
'''

# 量化的RMSNorm计算 
@triton.jit
def rms_norm_quant(
    input_ptr,   
    gamma_ptr,     
    beta_ptr,       
    output_ptr,     
    quant_scale_f, 
    quant_offset_f, 
    QUANTMAX: tl.constexpr,
    QUANTMIN: tl.constexpr,
    N: tl.constexpr, 
    H: tl.constexpr,
    EPS: tl.constexpr,
    BLOCK_SIZE: tl.constexpr,
):
    # 计算行索引（每个 block 处理一行）
    row_idx = tl.program_id(axis=0)
    if row_idx >= N:
        return
    # 只是一部分的内容 需要进行reduce操作
    sum_square = 0.0
    num_blocks = tl.cdiv(H, BLOCK_SIZE)
    for i in range(0, num_blocks):
        col_offsets = tl.arange(0, BLOCK_SIZE)
        col_mask = i * BLOCK_SIZE + col_offsets < H
        x = tl.load(input_ptr + row_idx * H + i * BLOCK_SIZE + col_offsets, mask=col_mask, other=0.0)
        x_square = x * x
        block_square = tl.sum(x_square, axis=0)
        sum_square += block_square
    mean_square = sum_square / H
    inv_rms = 1.0 / tl.sqrt(mean_square + EPS)
    
    for i in range(0, num_blocks):
        col_offsets = tl.arange(0, BLOCK_SIZE)
        col_mask = i * BLOCK_SIZE + col_offsets < H
        x = tl.load(input_ptr + row_idx * H + i * BLOCK_SIZE + col_offsets, mask=col_mask, other=0.0)
        gamma = tl.load(gamma_ptr + i * BLOCK_SIZE + col_offsets, mask=col_mask, other=0.0)
        beta = tl.load(beta_ptr + i * BLOCK_SIZE + col_offsets, mask=col_mask, other=0.0)
        norm = x * inv_rms * gamma + beta
        quant_float = norm * quant_scale_f + quant_offset_f
        is_positive = quant_float >= 0.0
        rounded_float = tl.where(
            is_positive,
            tl.floor(quant_float + 0.5),
            tl.ceil(quant_float - 0.5)
        )
        quant_clamped_float = tl.where(
            rounded_float < QUANTMIN,
            float(QUANTMIN),
            tl.where(rounded_float > QUANTMAX, float(QUANTMAX), rounded_float)
        )
        quant_clamped_int8 = tl.cast(quant_clamped_float, tl.int8)
        tl.store(output_ptr + row_idx * H + i * BLOCK_SIZE + col_offsets, quant_clamped_int8, mask=col_mask)

# 量化版matmul
# @triton.jit
# def matmul_bias_scale(
#     a_ptr, b_ptr, c_ptr, bias_ptr, de_scale_ptr,
#     M, N, K,
#     stride_am, stride_ak,
#     stride_bk, stride_bn,
#     stride_cm, stride_cn,
#     BLOCK_SIZE_M: tl.constexpr,
#     BLOCK_SIZE_N: tl.constexpr,
#     BLOCK_SIZE_K: tl.constexpr,
#     GROUP_SIZE_M: tl.constexpr,
# ):
#     pid = tl.program_id(axis=0)
#     num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
#     num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
#     num_pid_in_group = GROUP_SIZE_M * num_pid_n
#     group_id = pid // num_pid_in_group
#     first_pid_m = group_id * GROUP_SIZE_M
#     group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
#     pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
#     pid_n = (pid % num_pid_in_group) // group_size_m
#     offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
#     offsets_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
#     offsets_k = tl.arange(0, BLOCK_SIZE_K)
#     a_ptrs = a_ptr + (offsets_m[:, None] * stride_am + offsets_k[None, :] * stride_ak)
#     b_ptrs = b_ptr + (offsets_k[:, None] * stride_bk + offsets_n[None, :] * stride_bn)
#     accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
#     num_blocks = tl.cdiv(K, BLOCK_SIZE_K)
#     for k in range(0, num_blocks):
#         # 在 for k 循环中
#         a = tl.load(a_ptrs, mask=offsets_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
#         b = tl.load(b_ptrs, mask=offsets_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
#         accumulator += tl.dot(a, b)
#         a_ptrs += BLOCK_SIZE_K * stride_ak
#         b_ptrs += BLOCK_SIZE_K * stride_bk
#     c_ptrs = c_ptr + offsets_m[:, None] * stride_cm + offsets_n[None, :] * stride_cn
#     offsets_bias = offsets_n[None, :]
#     offsets_de_scale = offsets_n
#     bias = tl.load(bias_ptr + offsets_bias, mask=offsets_bias < N, other=0.0)
#     de_scale = tl.load(de_scale_ptr + offsets_de_scale, mask=offsets_de_scale < N, other=1.0)
#     bias_float32 = tl.cast(bias, tl.float32)
#     accumulator = accumulator + bias_float32
#     accumulator = accumulator * de_scale
#     offsets_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
#     offsets_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
#     c_mask = (offsets_cm[:, None] < M) & (offsets_cn[None, :] < N)
#     tl.store(c_ptrs, accumulator, mask=c_mask)

@triton.jit
def matmul_bias_scale(
    a_ptr, b_ptr, c_ptr, bias_ptr, de_scale_ptr,
    M, N, K,
    stride_am, stride_ak,
    stride_bk, stride_bn,
    stride_cm, stride_cn,
    BLOCK_SIZE_M: tl.constexpr,
    BLOCK_SIZE_N: tl.constexpr,
    BLOCK_SIZE_K: tl.constexpr,
    GROUP_SIZE_M: tl.constexpr,
):
    pid = tl.program_id(axis=0)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    offsets_k = tl.arange(0, BLOCK_SIZE_K)
    a_ptrs_base = a_ptr + (offsets_m[:, None] * stride_am + offsets_k[None, :] * stride_ak)
    b_ptrs_base = b_ptr + (offsets_k[None, :] * stride_bk + offsets_n[:, None] * stride_bn)
    msk_m = offsets_m < M
    msk_n = offsets_n < N
    accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
    num_blocks = tl.cdiv(K, BLOCK_SIZE_K)
    for k in range(0, num_blocks):
        # 在 for k 循环中
        a_ptrs = a_ptrs_base + k * BLOCK_SIZE_K * stride_ak
        b_ptrs = b_ptrs_base + k * BLOCK_SIZE_K * stride_bk
        a = tl.load(a_ptrs, mask=msk_m[:, None] and (offsets_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0)
        b = tl.load(b_ptrs, mask=msk_n[:, None] and (offsets_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0)
        b_trans = tl.trans(b)
        accumulator += tl.dot(a, b_trans)
    c_ptrs = c_ptr + offsets_m[:, None] * stride_cm + offsets_n[None, :] * stride_cn
    offsets_bias = offsets_n[None, :]
    offsets_de_scale = offsets_n
    bias = tl.load(bias_ptr + offsets_bias, mask=offsets_bias < N, other=0.0)
    de_scale = tl.load(de_scale_ptr + offsets_de_scale, mask=offsets_de_scale < N, other=1.0)
    bias_float32 = tl.cast(bias, tl.float32)
    accumulator = accumulator + bias_float32
    accumulator = accumulator * de_scale
    offsets_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    c_mask = (offsets_cm[:, None] < M) & (offsets_cn[None, :] < N)
    tl.store(c_ptrs, accumulator, mask=c_mask)

# 平常版matmul操作
@triton.jit
def matmul(
    a_ptr, b_ptr, c_ptr,
    M, N, K,
    stride_am, stride_ak,
    stride_bk, stride_bn,
    stride_cm, stride_cn,
    BLOCK_SIZE_M: tl.constexpr,
    BLOCK_SIZE_N: tl.constexpr,
    BLOCK_SIZE_K: tl.constexpr,
    GROUP_SIZE_M: tl.constexpr,
):
    pid = tl.program_id(axis=0)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    offsets_k = tl.arange(0, BLOCK_SIZE_K)
    a_ptrs = a_ptr + offsets_m[:, None] * stride_am + offsets_k[None, :] * stride_ak
    b_ptrs = b_ptr + offsets_k[:, None] * stride_bk + offsets_n[None, :] * stride_bn
    accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
    num_blocks = tl.cdiv(K, BLOCK_SIZE_K)
    for k in range(0, num_blocks):
        # 在 for k 循环中
        a_mask = (offsets_m[:, None] < M) & (offsets_k[None, :] < K - k * BLOCK_SIZE_K)
        b_mask = (offsets_k[:, None] < K - k * BLOCK_SIZE_K) & (offsets_n[None, :] < N)
        a = tl.load(a_ptrs, mask=a_mask, other=0.0)
        b = tl.load(b_ptrs, mask=b_mask, other=0.0)
        accumulator += tl.dot(a, b)
        a_ptrs += BLOCK_SIZE_K * stride_ak
        b_ptrs += BLOCK_SIZE_K * stride_bk
    c_ptrs = c_ptr + offsets_m[:, None] * stride_cm + offsets_n[None, :] * stride_cn
    offsets_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    c_mask = (offsets_cm[:, None] < M) & (offsets_cn[None, :] < N)
    tl.store(c_ptrs, accumulator, mask=c_mask)

@triton.jit
def batch_matmul(
    a_ptr, b_ptr, c_ptr,
    B, M, N, K,
    stride_ab, stride_am, stride_ak,
    stride_bb, stride_bk, stride_bn,
    stride_cb, stride_cm, stride_cn,
    BLOCK_SIZE_M: tl.constexpr,
    BLOCK_SIZE_N: tl.constexpr,
    BLOCK_SIZE_K: tl.constexpr,
    GROUP_SIZE_M: tl.constexpr,
):
    bid = tl.program_id(axis=0)
    pid = tl.program_id(axis=1)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    offsets_k = tl.arange(0, BLOCK_SIZE_K)
    a_ptrs = a_ptr + (bid * stride_ab + offsets_m[:, None] * stride_am + offsets_k[None, :] * stride_ak)
    b_ptrs = b_ptr + (bid * stride_bb + offsets_k[:, None] * stride_bk + offsets_n[None, :] * stride_bn)
    accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
    num_blocks = tl.cdiv(K, BLOCK_SIZE_K)
    for k in range(0, num_blocks):
        # 在 for k 循环中
        a_mask = (offsets_m[:, None] < M) & (offsets_k[None, :] < K - k * BLOCK_SIZE_K)
        b_mask = (offsets_k[:, None] < K - k * BLOCK_SIZE_K) & (offsets_n[None, :] < N)
        a = tl.load(a_ptrs, mask=a_mask, other=0.0)
        b = tl.load(b_ptrs, mask=b_mask, other=0.0)
        accumulator += tl.dot(a, b)
        a_ptrs += BLOCK_SIZE_K * stride_ak
        b_ptrs += BLOCK_SIZE_K * stride_bk
    c_ptrs = c_ptr + (bid * stride_cb + offsets_m[:, None] * stride_cm + offsets_n[None, :] * stride_cn)
    offsets_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    c_mask = (bid < B) & (offsets_cm[:, None] < M) & (offsets_cn[None, :] < N)
    tl.store(c_ptrs, accumulator, mask=c_mask)

'''
sin经过操作之前 形状是tokens, hidden_size_rope_k
经过操作之后 形状是tokens, hidden_size_rope_k * head_num
'''
@triton.jit
def pad_head_num(
    pad_sin2,
    pad_cos2,
    sin2,
    cos2,
    tokens: tl.constexpr,
    head_num: tl.constexpr,
    hidden_size_rope_k: tl.constexpr,
    total_output_dim: tl.constexpr,
    BLOCK_SIZE_K: tl.constexpr,  # 固定为 hidden_size_rope_k
):
    pid = tl.program_id(axis=0)
    if pid >= tokens:
        return
    
    token_idx = pid  # 每个 pid 一个 token

    # 加载当前 token 的输入行：1D 块 (hidden_size_rope_k,)
    offsets_k = tl.arange(0, BLOCK_SIZE_K)
    mask_k = offsets_k < hidden_size_rope_k
    sin2_row = tl.load(sin2 + token_idx * hidden_size_rope_k + offsets_k, mask=mask_k, other=0.0)
    cos2_row = tl.load(cos2 + token_idx * hidden_size_rope_k + offsets_k, mask=mask_k, other=0.0)
    
    # 循环 head_num 次，重复存储 input_row
    for i in range(head_num):
        out_start = token_idx * total_output_dim + i * hidden_size_rope_k
        mask_out = offsets_k < hidden_size_rope_k  # 块 mask，确保形状匹配
        tl.store(pad_sin2 + out_start + offsets_k, sin2_row, mask=mask_out)
        tl.store(pad_cos2 + out_start + offsets_k, cos2_row, mask=mask_out)

@triton.jit
def rotate_half_x(
    output_ptr,
    input_ptr,
    tokens: tl.constexpr,
    head_num: tl.constexpr,
    hidden_size_rope_q_head: tl.constexpr,
    BLOCK_SIZE_M: tl.constexpr,   # tokens 块大小
    BLOCK_SIZE_N: tl.constexpr,   # dim 块大小
):
    """
    Triton 内核：RoPE 半部分旋转，对每个 head 的 Q 进行 [-second_half, first_half] 变换。
    """
    pid_m = tl.program_id(0)
    pid_n = tl.program_id(1)
    block_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_N)
    block_n = pid_n * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_N)
    
    mask_m = block_m < tokens
    mask_n = block_n < (head_num * hidden_size_rope_q_head)
    mask = mask_m[:, None] & mask_n[None, :]
    
    # 加载 Q 块
    ptr = input_ptr + (block_m[:, None] * (head_num * hidden_size_rope_q_head) + block_n[None, :])
    q = tl.load(ptr, mask=mask, other=0.0)
    
    # 计算 head_id 和 local_n
    half_dim = hidden_size_rope_q_head // 2
    # 进行torch.chunk操作
    head_id = block_n // hidden_size_rope_q_head
    local_n = block_n % hidden_size_rope_q_head
    is_second_half = local_n >= half_dim
    
    # 应用负号（仅第二半）
    rotated = tl.where(is_second_half, -q, q)
    
    # 计算新 local_n（交换前后半）
    new_local_n = tl.where(is_second_half, local_n - half_dim, local_n + half_dim)
    new_block_n = head_id * hidden_size_rope_q_head + new_local_n
    
    # 存储到新位置
    # stride操作
    out_ptr = output_ptr + (block_m[:, None] * (head_num * hidden_size_rope_q_head) + new_block_n[None, :])
    tl.store(out_ptr, rotated, mask=mask)

@triton.jit
def per_mamtul(
    output_ptr,
    origin_q_ptr,
    pad_cos_ptr,
    rotate_q_ptr,
    pad_sin_ptr,
    M, N,
    stride_m, stride_n,
    BLOCK_SIZE_M: tl.constexpr,
    BLOCK_SIZE_N: tl.constexpr,
    GROUP_SIZE_M: tl.constexpr,
):
    pid = tl.program_id(axis=0)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    offset_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offset_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    # 指针偏移（用于加载）
    offset = offset_m[:, None] * stride_m + offset_n[None, :] * stride_n
    # 分开计算 mask（修复核心问题）
    mask_m = offset_m[:, None] < M
    mask_n = offset_n[None, :] < N
    mask = mask_m & mask_n
    origin_q_ptrs = origin_q_ptr + offset
    pad_cos_ptrs = pad_cos_ptr + offset
    rotate_q_ptrs = rotate_q_ptr + offset
    pad_sin_ptrs = pad_sin_ptr + offset
    output_ptrs = output_ptr + offset
    origin_q = tl.load(origin_q_ptrs, mask=mask, other=0.0)
    pad_cos = tl.load(pad_cos_ptrs, mask=mask, other=0.0)
    rotate_q = tl.load(rotate_q_ptrs, mask=mask, other=0.0)
    pad_sin = tl.load(pad_sin_ptrs, mask=mask, other=0.0)
    output = origin_q * pad_cos + rotate_q * pad_sin
    tl.store(output_ptrs, output, mask=mask)

# @triton.jit
# def pad_head_rotate_half_x_per_matmul_fused(
#     output_ptr, mm2_split2_ptr, sin2_ptr, cos2_ptr, mm2_split2_ptr_rotated,
#     tokens: tl.constexpr, head_num: tl.constexpr, hidden_size_rope_q_head: tl.constexpr,
#     BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE: tl.constexpr,  # BLOCK_SIZE_M for tokens, BLOCK_SIZE for dim
# ):
#     pid_m = tl.program_id(axis=0)
#     pid_head = tl.program_id(axis=1)
#     if pid_head >= head_num:
#         return
    
#     dim = hidden_size_rope_q_head
#     total_dim = dim * head_num
#     num_pid_m = tl.cdiv(tokens, BLOCK_SIZE_M)
#     if pid_m >= num_pid_m:
#         return
    
#     # Token 块偏移
#     offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
#     mask_m = offsets_m < tokens
#     # Head 固定
#     head_idx = pid_head
#     # 假设 BLOCK_SIZE == dim，offsets_n = tl.arange(0, dim)
#     offsets_n = tl.arange(0, BLOCK_SIZE)
#     mask_n = offsets_n < dim
#     offsets_q = head_idx * dim + offsets_n
#     mask_q = offsets_q < total_dim
#     # 广播 mask
#     mask = mask_m[:, None] and mask_n[None, :]
#     # 基址：sin/cos per token, q/output per (token block, head)
#     sin_cos_base = offsets_m[:, None] * dim + offsets_n[None, :]  # [BLOCK_M, BLOCK_SIZE]
#     q_base = offsets_m[:, None] * total_dim + offsets_q[None, :]  # [BLOCK_M, BLOCK_SIZE]
#     out_base = q_base  # 输出同 q
#     # 步骤1: pad_head_num - 加载 sin/cos (隐式 pad，复用 per token)
#     sin_vals = tl.load(sin2_ptr + sin_cos_base, mask=mask, other=0.0)  # [BLOCK_M, BLOCK_SIZE]
#     cos_vals = tl.load(cos2_ptr + sin_cos_base, mask=mask, other=0.0)
#     # sin_vals = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE), dtype=tl.float32)
#     # cos_vals = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE), dtype=tl.float32)
#     # 步骤2: 加载 origin_q (q_vals)
#     q_vals = tl.load(mm2_split2_ptr + q_base, mask=mask_m[:, None] and mask_q[None, :], other=0.0)
#     # q_vals = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE), dtype=tl.float32)
#     # 步骤3: rotate_half_x - 向量化计算 rotated_vals [BLOCK_M, BLOCK_SIZE]
#     half_dim = dim // 2
#     is_second = offsets_n >= half_dim  # [1, BLOCK_SIZE]
#     rotated_vals_bf = tl.where(is_second, -q_vals, q_vals)
#     rotated_offsets_n = tl.where(is_second, offsets_n - half_dim, offsets_n + half_dim)
#     rotated_offsets_n_all = head_idx * dim + rotated_offsets_n
#     mask_rotated_offsets_n_all = rotated_offsets_n_all < total_dim
#     rotated_n_base = offsets_m[:, None] * total_dim + rotated_offsets_n_all[None, :]  # [BLOCK_M, BLOCK_SIZE]
#     tl.store(mm2_split2_ptr_rotated + rotated_n_base, rotated_vals_bf, mask=mask_m[:, None] and mask_rotated_offsets_n_all[None, :])
#     # 这条命令存在问题
#     rotated_vals = tl.load(mm2_split2_ptr_rotated + q_base, mask=mask_m[:, None] and mask_q[None, :], other=0.0)
#     # rotated_vals = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE), dtype=tl.float32)
#     # rotated_sign = tl.where(is_second, 1.0, -1.0)  # [1, BLOCK_SIZE]
#     # rotated_vals = rotated_base_vals * rotated_sign  # 广播乘法
#     # 步骤4: per_matmul - element-wise [BLOCK_M, BLOCK_SIZE]
#     output_vals = q_vals * cos_vals + rotated_vals * sin_vals
#     # 步骤5: 存储
#     tl.store(output_ptr + out_base, output_vals, mask=mask_m[:, None] and mask_q[None, :])


@triton.jit
def pad_head_rotate_half_x_per_matmul_fused(
    output_ptr, mm2_split2_ptr, sin2_ptr, cos2_ptr,
    tokens: tl.constexpr, head_num: tl.constexpr, hidden_size_rope_q_head: tl.constexpr,
    BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE: tl.constexpr, BLOCK_SIZE_FULL: tl.constexpr  # BLOCK_SIZE_M for tokens, BLOCK_SIZE for dim
):
    pid_m = tl.program_id(axis=1)
    pid_head = tl.program_id(axis=0)
    if pid_head >= head_num:
        return
    
    dim = hidden_size_rope_q_head
    half_dim = hidden_size_rope_q_head / 2
    total_dim = hidden_size_rope_q_head * head_num
    # 移除 total_dim_1（错误定义），total_dim_2 简化为 total_dim
    num_pid_m = tl.cdiv(tokens, BLOCK_SIZE_M)
    if pid_m >= num_pid_m:
        return
    
    # Token 块偏移
    offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    mask_m = offsets_m < tokens
    head_idx = pid_head
    offsets_n = tl.arange(0, BLOCK_SIZE_FULL)
    mask_n = offsets_n < dim
    offsets_n_1 = tl.arange(0, BLOCK_SIZE)
    offsets_n_2 = BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
    mask_n_1 = offsets_n_1 < half_dim
    mask_n_2 = offsets_n_2 < dim
    offsets_q = head_idx * dim + offsets_n
    mask_q = offsets_q < total_dim
    offsets_q_1 = head_idx * dim + offsets_n_1
    offsets_q_2 = head_idx * dim + offsets_n_2
    # 修正：mask_q_1 使用 total_dim 而非 total_dim_1
    mask_q_1 = offsets_q_1 < total_dim
    mask_q_2 = offsets_q_2 < total_dim  # 统一使用 total_dim
    # 广播 mask
    mask_1 = mask_m[:, None] and mask_n_1[None, :]
    mask_2 = mask_m[:, None] and mask_n_2[None, :]
    # 基址：sin/cos per token, q/output per (token block, head)
    sin_cos_base_1 = offsets_m[:, None] * dim + offsets_n_1[None, :]  # [BLOCK_M, BLOCK_SIZE]
    sin_cos_base_2 = offsets_m[:, None] * dim + offsets_n_2[None, :]  # [BLOCK_M, BLOCK_SIZE]
    sin_vals_1 = tl.load(sin2_ptr + sin_cos_base_1, mask=mask_1, other=0.0)  # [BLOCK_M, BLOCK_SIZE]
    sin_vals_2 = tl.load(sin2_ptr + sin_cos_base_2, mask=mask_2, other=0.0)  # [BLOCK_M, BLOCK_SIZE]
    cos_vals_1 = tl.load(cos2_ptr + sin_cos_base_1, mask=mask_1, other=0.0)  # [BLOCK_M, BLOCK_SIZE]
    cos_vals_2 = tl.load(cos2_ptr + sin_cos_base_2, mask=mask_2, other=0.0)  # [BLOCK_M, BLOCK_SIZE]

    q_base_1 = offsets_m[:, None] * total_dim + offsets_q_1[None, :]  # [BLOCK_M, BLOCK_SIZE]
    q_base_2 = offsets_m[:, None] * total_dim + offsets_q_2[None, :]  # [BLOCK_M, BLOCK_SIZE]
    
    q_vals_1 = tl.load(mm2_split2_ptr + q_base_1, mask=mask_m[:, None] and mask_q_1[None, :], other=0.0)
    q_vals_2 = tl.load(mm2_split2_ptr + q_base_2, mask=mask_m[:, None] and mask_q_2[None, :], other=0.0)

    output_vals_1 = cos_vals_1 * q_vals_1 - sin_vals_1 * q_vals_2
    # 修正：符号改为 +，匹配 PyTorch rotate_half_x 中的 rotated[half:] = q1 和 + sin * rotated
    output_vals_2 = cos_vals_2 * q_vals_2 + sin_vals_2 * q_vals_1

    tl.store(output_ptr + q_base_1, output_vals_1, mask=mask_m[:, None] and mask_q_1[None, :])
    tl.store(output_ptr + q_base_2, output_vals_2, mask=mask_m[:, None] and mask_q_2[None, :])

# @triton.jit
# def fused_batch_matmul_rope(
#     a_non_ptr, a_rope_ptr, b_ptr, sin_ptr, cos_ptr, c_ptr, stride_bb,
#     hidden_size_nope_q, hidden_size_wuk_head, hidden_size_rope_q_head, head_num, tokens,
#     stride_a_m, stride_a_head, stride_a_k,
#     stride_b_k, stride_b_n,
#     stride_sin_m, stride_sin_n,
#     stride_cos_m, stride_cos_n,
#     stride_c_m, stride_c_head, stride_c_n,
#     BLOCK_SIZE_M: tl.constexpr,
#     BLOCK_SIZE_N: tl.constexpr,
#     BLOCK_SIZE_K: tl.constexpr,
#     BLOCK_SIZE_HALF: tl.constexpr,
# ):
#     pid_head = tl.program_id(0)
#     pid_m = tl.program_id(1)
#     pid_n = tl.program_id(2)
#     if pid_head >= head_num:
#         return
#     num_pid_m = tl.cdiv(tokens, BLOCK_SIZE_M)
#     if pid_m >= num_pid_m:
#         return
#     num_pid_n_total = tl.cdiv(hidden_size_wuk_head + hidden_size_rope_q_head, BLOCK_SIZE_N)
#     if pid_n >= num_pid_n_total:
#         return

#     offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
#     offsets_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
#     mask_m = offsets_m < tokens
#     mask_n = offsets_n < (hidden_size_wuk_head + hidden_size_rope_q_head)
#     mask = mask_m[:, None] & mask_n[None, :]

#     num_pid_n_non = tl.cdiv(hidden_size_wuk_head, BLOCK_SIZE_N)
#     if pid_n < num_pid_n_non:
#         # Matmul mode for non-rope part
#         offsets_k = tl.arange(0, BLOCK_SIZE_K)
#         accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
#         num_blocks_k = tl.cdiv(hidden_size_nope_q, BLOCK_SIZE_K)
#         a_ptrs_base = a_non_ptr + offsets_m[:, None] * stride_a_m + pid_head * stride_a_head + offsets_k[None, :] * stride_a_k
#         b_ptrs_base = b_ptr + pid_head * stride_bb + offsets_k[None, :] * stride_b_k + offsets_n[:, None] * stride_b_n
#         for k in range(0, num_blocks_k):
#             k_start = k * BLOCK_SIZE_K
#             mask_k = offsets_k < (hidden_size_nope_q - k_start)
#             a_ptrs = a_ptrs_base + k_start * stride_a_k
#             b_ptrs = b_ptrs_base + k_start * stride_b_k
#             a = tl.load(a_ptrs, mask=mask_m[:, None] & mask_k[None, :], other=0.0)
#             b = tl.load(b_ptrs, mask=mask_k[None, :] & mask_n[:, None], other=0.0)
#             b_trans = tl.trans(b)
#             accumulator += tl.dot(a, b_trans)
#         c_ptrs = c_ptr + offsets_m[:, None] * stride_c_m + pid_head * stride_c_head + offsets_n[None, :] * stride_c_n
#         tl.store(c_ptrs, accumulator, mask=mask)
#     else:
#         # Rope mode for rope part
#         half_dim = hidden_size_rope_q_head // 2
#         offsets_n_1 = tl.arange(0, BLOCK_SIZE_HALF)
#         offsets_n_2 = half_dim + tl.arange(0, BLOCK_SIZE_HALF)
#         mask_n_1 = offsets_n_1 < half_dim
#         mask_n_2 = offsets_n_2 < hidden_size_rope_q_head
#         mask_1 = mask_m[:, None] & mask_n_1[None, :]
#         mask_2 = mask_m[:, None] & mask_n_2[None, :]

#         # Load q1 and q2
#         q_base_1 = offsets_m[:, None] * stride_a_m + pid_head * stride_a_head + offsets_n_1[None, :] * stride_a_k
#         q_base_2 = offsets_m[:, None] * stride_a_m + pid_head * stride_a_head + offsets_n_2[None, :] * stride_a_k
#         q_vals_1 = tl.load(a_rope_ptr + q_base_1, mask=mask_1, other=0.0)
#         q_vals_2 = tl.load(a_rope_ptr + q_base_2, mask=mask_2, other=0.0)

#         # Load sin1, cos1, sin2, cos2
#         sin_cos_base_1 = offsets_m[:, None] * stride_sin_m + offsets_n_1[None, :] * stride_sin_n
#         sin_cos_base_2 = offsets_m[:, None] * stride_sin_m + offsets_n_2[None, :] * stride_sin_n
#         sin_vals_1 = tl.load(sin_ptr + sin_cos_base_1, mask=mask_1, other=0.0)
#         sin_vals_2 = tl.load(sin_ptr + sin_cos_base_2, mask=mask_2, other=0.0)
#         cos_vals_1 = tl.load(cos_ptr + offsets_m[:, None] * stride_cos_m + offsets_n_1[None, :] * stride_cos_n, mask=mask_1, other=0.0)
#         cos_vals_2 = tl.load(cos_ptr + offsets_m[:, None] * stride_cos_m + offsets_n_2[None, :] * stride_cos_n, mask=mask_2, other=0.0)

#         # Compute output
#         output_vals_1 = cos_vals_1 * q_vals_1 - sin_vals_1 * q_vals_2
#         output_vals_2 = cos_vals_2 * q_vals_2 + sin_vals_2 * q_vals_1

#         # Store to c: first half
#         c_ptrs_1 = c_ptr + offsets_m[:, None] * stride_c_m + pid_head * stride_c_head + (hidden_size_wuk_head + offsets_n_1)[None, :] * stride_c_n
#         tl.store(c_ptrs_1, output_vals_1, mask=mask_1)
#         # Second half
#         c_ptrs_2 = c_ptr + offsets_m[:, None] * stride_c_m + pid_head * stride_c_head + (hidden_size_wuk_head + offsets_n_2)[None, :] * stride_c_n
#         tl.store(c_ptrs_2, output_vals_2, mask=mask_2)


@triton.jit
def rms_norm_rope_rac_cal_kernel(
    kv_input, 
    gamma, 
    cos, 
    sin, 
    slot_mapping, 
    kv_cache, 
    tokens : tl.constexpr, 
    hidden_size_kv : tl.constexpr, 
    hidden_size_rope_k : tl.constexpr, 
    epsilon : tl.constexpr, 
    BLOCK_SIZE : tl.constexpr
):
    program_idx = tl.program_id(axis = 0)
    program_num = tl.num_programs(axis = 0)
    if program_idx >= tokens: 
        return
    
    hidden_size_rms_kv = hidden_size_kv - hidden_size_rope_k
    
    for row_idx in tl.range(program_idx, tokens, program_num): 
        ## slot pos
        slot_offset = row_idx
        slot_mask = slot_offset < tokens
        slot = tl.load(slot_mapping + slot_offset, mask = slot_mask)

        ## rms_norm
        square_sum = 0.0

        input_rms_ptr = kv_input + row_idx * hidden_size_kv
        gamma_ptr = gamma
        output_rms_ptr = kv_cache + slot * hidden_size_kv

        rms_block_num = tl.cdiv(hidden_size_rms_kv, BLOCK_SIZE)
        for rms_block_idx in tl.range(0, rms_block_num): 
            start = rms_block_idx * BLOCK_SIZE
            offsets = start + tl.arange(0, BLOCK_SIZE)
            mask = offsets < hidden_size_rms_kv
            block = tl.load(input_rms_ptr + offsets, mask = mask)
            square_block = block * block
            square_block_sum = tl.sum(square_block)
            square_sum += square_block_sum

        square_sum_mean = square_sum / hidden_size_rms_kv
        denominator = tl.sqrt(square_sum_mean + epsilon)
        factor = 1.0 / denominator

        for rms_block_idx in tl.range(0, rms_block_num): 
            start = rms_block_idx * BLOCK_SIZE
            offsets = start + tl.arange(0, BLOCK_SIZE)
            mask = offsets < hidden_size_rms_kv
            block_input = tl.load(input_rms_ptr + offsets, mask = mask)
            block_gamma = tl.load(gamma_ptr + offsets, mask = mask)
            block_output = block_input * block_gamma * factor
            tl.store(output_rms_ptr + offsets, block_output, mask = mask)
        
        ## rope
        input_type = kv_input.type.element_ty
        cal_type = tl.float32

        input_rope_ptr  = row_idx * hidden_size_kv + hidden_size_rms_kv + kv_input
        output_rope_ptr = slot    * hidden_size_kv + hidden_size_rms_kv + kv_cache
        cos_ptr         = row_idx * hidden_size_rope_k + cos
        sin_ptr         = row_idx * hidden_size_rope_k + sin
        
        hidden_size_rope_k_half = hidden_size_rope_k // 2
        rope_block_num_half = tl.cdiv(hidden_size_rope_k_half, BLOCK_SIZE)

        for rope_block_idx in tl.range(0, rope_block_num_half): 
            input_rope_start = rope_block_idx * BLOCK_SIZE
            offsets_left = input_rope_start + tl.arange(0, BLOCK_SIZE)
            offsets_right= input_rope_start + tl.arange(0, BLOCK_SIZE) + hidden_size_rope_k_half

            mask_left = offsets_left < hidden_size_rope_k_half
            mask_right = offsets_right < hidden_size_rope_k 

            block_input_left  = tl.load(input_rope_ptr + offsets_left,  mask = mask_left).cast(cal_type)
            block_input_right = tl.load(input_rope_ptr + offsets_right, mask = mask_right).cast(cal_type)

            block_cos_left = tl.load(cos_ptr + offsets_left, mask = mask_left).cast(cal_type)
            block_sin_left = tl.load(sin_ptr + offsets_left, mask = mask_left).cast(cal_type)
            block_output_left = (block_cos_left * block_input_left - block_sin_left * block_input_right).cast(input_type)
            tl.store(output_rope_ptr + offsets_left, block_output_left, mask = mask_left)

            block_cos_right = tl.load(cos_ptr + offsets_right, mask = mask_right).cast(cal_type)
            block_sin_right = tl.load(sin_ptr + offsets_right, mask = mask_right).cast(cal_type)
            block_output_right = (block_cos_right * block_input_right + block_sin_right * block_input_left).cast(input_type)
            tl.store(output_rope_ptr + offsets_right, block_output_right, mask = mask_right)


def mlapo_triton(
    input1, gamma1, beta1, quant_scale1, quant_offset1, wdqkv, de_scale1, bias1,
    gamma2, beta2, quant_scale2, quant_offset2, wuq, de_scale2, bias2, 
    gamma3, cos1, sin1, cos2, sin2, wuk, key_cache, slot_mapping,
    Tokens, head_num, block_size, data_type, cache_mode=0, epsilon = epsilon,
    hidden_size=7168, hidden_size_wdq=1536, hidden_size_wuk_head=512, hidden_size_rope_q_head=64, hidden_size_rope_k=64
):
    tokens = input1.shape[0]
    hidden_size = input1.shape[1]
    TILE_SIZE_RMS = 1024 # 最好可以调整
    scale_val1 = 1 / quant_scale1.item()
    offset_val1 = quant_offset1.item()
    rms_quant_out1 = torch.empty((tokens, hidden_size), dtype=torch.int8, device=DEVICE)
    grid_rms = (tokens,)
    rms_norm_quant[grid_rms](
        input1.to(torch.float32).npu(), 
        gamma1.to(torch.float32).npu(), 
        beta1.to(torch.float32).npu(),
        rms_quant_out1,
        quant_scale_f=scale_val1, quant_offset_f=offset_val1,
        QUANTMAX=QUANTMAX, QUANTMIN=QUANTMIN,
        N=tokens, H=hidden_size, EPS=epsilon,
        BLOCK_SIZE=TILE_SIZE_RMS,
    )


    BLOCK_SIZE_M = 64
    BLOCK_SIZE_N = 64
    BLOCK_SIZE_K = 64
    GROUP_SIZE_M = 8
    M = rms_quant_out1.shape[0]
    # N = wdqkv.shape[0]
    # K = wdqkv.shape[1]
    N, K = wdqkv.shape
    # print(M, N, K)
    # assert K == rms_quant_out1.shape[1], "the k dimension is not same"
    mm1_out = torch.empty((M, N), dtype=torch.float32, device=DEVICE)
    grid_matmul = (triton.cdiv(M, BLOCK_SIZE_M) * triton.cdiv(N, BLOCK_SIZE_N), )
    # wdqkv_T = wdqkv.transpose(0, 1)
    wdqkv_T = wdqkv
    matmul_bias_scale[grid_matmul](
        rms_quant_out1.to(torch.float32), wdqkv_T.to(torch.float32).npu(), mm1_out,
        bias1.npu(), de_scale1.npu(),
        M, N, K,
        stride_am=rms_quant_out1.stride(0), stride_ak=rms_quant_out1.stride(1),
        stride_bk=wdqkv_T.stride(1), stride_bn=wdqkv_T.stride(0),
        stride_cm=mm1_out.stride(0), stride_cn=mm1_out.stride(1),
        BLOCK_SIZE_M=BLOCK_SIZE_M,
        BLOCK_SIZE_N=BLOCK_SIZE_N,
        BLOCK_SIZE_K=BLOCK_SIZE_K,
        GROUP_SIZE_M=GROUP_SIZE_M,
    )
    # if data_type == torch.float16:
    #     de_scale1 = process_deq_scale(deq_scale=de_scale1)
    # 形状 [1024, 2112]
    mm1_out = mm1_out.to(torch.float16)


    # 接着进行计算
    hidden_size_wdkv = mm1_out.shape[-1] - hidden_size_wdq
    mm1_split1, mm1_split2 = torch.split(mm1_out, [hidden_size_wdkv, hidden_size_wdq], dim=1)
    scale_val2 = 1 / quant_scale2.item()
    offset_val2 = quant_offset2.item()
    rms_quant_out2 = torch.empty((tokens, hidden_size_wdq), dtype=torch.int8, device=DEVICE)
    grid_rms = (tokens,)
    rms_norm_quant[grid_rms](
        mm1_split2.to(torch.float32).npu(), 
        gamma2.to(torch.float32).npu(), 
        beta2.to(torch.float32).npu(),
        rms_quant_out2,
        quant_scale_f=scale_val2, quant_offset_f=offset_val2,
        QUANTMAX=QUANTMAX, QUANTMIN=QUANTMIN,
        N=tokens, H=hidden_size_wdq, EPS=epsilon,
        BLOCK_SIZE=TILE_SIZE_RMS,
    )


    BLOCK_SIZE_M = 64
    BLOCK_SIZE_N = 64
    BLOCK_SIZE_K = 64
    GROUP_SIZE_M = 8
    M = rms_quant_out2.shape[0]
    # N = wuq.shape[0]
    # K = wuq.shape[1]
    N, K = wuq.shape
    # print(M, N, K)
    # assert K == rms_quant_out2.shape[1], "the k dimension is not same"
    mm2_out = torch.empty((M, N), dtype=torch.float32, device=DEVICE)
    grid_matmul = (triton.cdiv(M, BLOCK_SIZE_M) * triton.cdiv(N, BLOCK_SIZE_N), )
    # wuq_T = wuq.transpose(0, 1)
    wuq_T = wuq
    matmul_bias_scale[grid_matmul](
        rms_quant_out2.to(torch.float32), wuq_T.to(torch.float32).npu(), mm2_out,
        bias2.npu(), de_scale2.npu(),
        M, N, K,
        stride_am=rms_quant_out2.stride(0), stride_ak=rms_quant_out2.stride(1),
        stride_bk=wuq_T.stride(1), stride_bn=wuq_T.stride(0),
        stride_cm=mm2_out.stride(0), stride_cn=mm2_out.stride(1),
        BLOCK_SIZE_M=BLOCK_SIZE_M,
        BLOCK_SIZE_N=BLOCK_SIZE_N,
        BLOCK_SIZE_K=BLOCK_SIZE_K,
        GROUP_SIZE_M=GROUP_SIZE_M,
    )

    # hidden_size_rms_kv = mm1_split1.shape[-1] - hidden_size_rope_k
    mm1_split1_cpu = mm1_split1.clone().cpu()
    # mm1_split1_1, mm1_split1_2 = torch.split(mm1_split1_cpu, [hidden_size_rms_kv, hidden_size_rope_k], dim = 1)
    # key_norm_out = mm1_split1_1.reshape(tokens, 1, hidden_size_rms_kv)
    # # 接着写
    # # ================================================
    # mlapo = MLAPO()
    # key_out = mlapo.rms_norm_rope_reshape_cache_golden(
    #     key_norm_out, gamma3, mm1_split1_2, cos1, sin1, slot_mapping,
    #     key_cache, tokens, hidden_size_rope_k, block_size, epsilon, data_type
    # )
    
    grid_rms_rope = (20, )
    BLOCK_SIZE_RMS_ROPE = 128
    # key_cache_cpu = key_cache.clone()
    key_out = key_cache.npu()
    rms_norm_rope_rac_cal_kernel[grid_rms_rope](
        mm1_split1.contiguous().npu(), 
        gamma3.npu(), 
        cos1.npu(), 
        sin1.npu(), 
        slot_mapping.npu(), 
        key_out, 
        tokens, 
        mm1_split1.shape[-1], 
        hidden_size_rope_k, 
        epsilon,
        BLOCK_SIZE_RMS_ROPE
    )

    # key_out = key_cache

    # ================================================

    # ==========================================================
    hidden_size_wuq_head = mm2_out.shape[-1] // head_num
    mm2_out = mm2_out.reshape(tokens, head_num, hidden_size_wuq_head)
    hidden_size_nope_q = hidden_size_wuq_head - hidden_size_rope_q_head
    mm2_split1, mm2_split2 = torch.split(mm2_out, [hidden_size_nope_q, hidden_size_rope_q_head], dim=2)

    bmm_out_non_permute = torch.empty((head_num, tokens, hidden_size_wuk_head), dtype=torch.float32, device=DEVICE)
    batch_num = head_num
    M = tokens
    N = hidden_size_wuk_head
    K = hidden_size_nope_q
    BLOCK_SIZE_M = 64
    BLOCK_SIZE_N = 64
    BLOCK_SIZE_K = 64
    GROUP_SIZE_M = 8
    batch_grid_matmul = (batch_num, triton.cdiv(M, BLOCK_SIZE_M) * triton.cdiv(N, BLOCK_SIZE_N))
    mm2_split1_permute = torch.permute(mm2_split1, (1, 0, 2))

    # 这个完全独立运行
    batch_matmul[batch_grid_matmul](
        mm2_split1_permute.to(torch.float32).npu(), 
        wuk.to(torch.float32).npu(), 
        bmm_out_non_permute,
        batch_num, M, N, K,
        stride_ab=mm2_split1_permute.stride(0),
        stride_am=mm2_split1_permute.stride(1),
        stride_ak=mm2_split1_permute.stride(2),
        stride_bb=wuk.stride(0),
        stride_bk=wuk.stride(1),
        stride_bn=wuk.stride(2),
        stride_cb=bmm_out_non_permute.stride(0),
        stride_cm=bmm_out_non_permute.stride(1),
        stride_cn=bmm_out_non_permute.stride(2),
        BLOCK_SIZE_M=BLOCK_SIZE_M,
        BLOCK_SIZE_N=BLOCK_SIZE_N,
        BLOCK_SIZE_K=BLOCK_SIZE_K,
        GROUP_SIZE_M=GROUP_SIZE_M,
    )
    bmm_out = torch.permute(bmm_out_non_permute, (1, 0, 2))
    bmm_out = bmm_out.to(data_type)
    # =============================================================
    # hidden_size_wuq_head = mm2_out.shape[-1] // head_num
    # mm2_out = mm2_out.reshape(tokens, head_num, hidden_size_wuq_head)
    # hidden_size_nope_q = hidden_size_wuq_head - hidden_size_rope_q_head
    # mm2_split1, mm2_split2 = torch.split(mm2_out, [hidden_size_nope_q, hidden_size_rope_q_head], dim=2)
    # total_head_dim = hidden_size_wuk_head + hidden_size_rope_q_head
    # q_out = torch.empty((tokens, head_num, total_head_dim), dtype=data_type, device=DEVICE)

    # BLOCK_SIZE_M = 64
    # BLOCK_SIZE_N = 64
    # BLOCK_SIZE_K = 64
    # BLOCK_SIZE_HALF = hidden_size_rope_q_head // 2
    # grid_fused = (head_num, triton.cdiv(tokens, BLOCK_SIZE_M), triton.cdiv(total_head_dim, BLOCK_SIZE_N))
    # wuq_head = hidden_size_wuq_head
    # fused_batch_matmul_rope[grid_fused](
    #     mm2_split1.float().npu(), mm2_split2.npu(), wuk.float().npu(), sin2.npu(), cos2.npu(), q_out, wuk.stride(0),
    #     hidden_size_nope_q=hidden_size_nope_q,
    #     hidden_size_wuk_head=hidden_size_wuk_head,
    #     hidden_size_rope_q_head=hidden_size_rope_q_head,
    #     head_num=head_num,
    #     tokens=tokens,
    #     stride_a_m=mm2_split1.stride(0),
    #     stride_a_head=wuq_head,
    #     stride_a_k=mm2_split1.stride(2),
    #     stride_b_k=wuk.stride(1),
    #     stride_b_n=wuk.stride(2),
    #     stride_sin_m=sin2.stride(0),
    #     stride_sin_n=sin2.stride(1),
    #     stride_cos_m=cos2.stride(0),
    #     stride_cos_n=cos2.stride(1),
    #     stride_c_m=q_out.stride(0),
    #     stride_c_head=q_out.stride(1),
    #     stride_c_n=q_out.stride(2),
    #     BLOCK_SIZE_M=BLOCK_SIZE_M,
    #     BLOCK_SIZE_N=BLOCK_SIZE_N,
    #     BLOCK_SIZE_K=BLOCK_SIZE_K,
    #     BLOCK_SIZE_HALF=BLOCK_SIZE_HALF,
    # )
    # q_out = q_out.to(data_type)
    # ==============================================

    # ==============================================
    # return [bmm_out.cpu(), key_out.cpu()]
    # ====================================
    # q_out = mlapo.rope_concat_golden(
    #         mm2_split2.reshape(tokens, head_num * hidden_size_rope_q_head).cpu(), sin2, cos2, bmm_out.cpu(),
    #         head_num, hidden_size_rope_k, tokens, data_type
    #     )
    # ====================================

    # ====================================
    # 进行pad_head_num操作
    # 两个pad_head_num可以同步进行
    # tokens, hidden_size_rope_k = sin2.shape
    # grid_pad = (tokens, )
    # pad_hidden_size_rope_k = hidden_size_rope_k * head_num
    # pad_sin2 = torch.empty((tokens, pad_hidden_size_rope_k), dtype=sin2.dtype, device=DEVICE)
    # pad_cos2 = torch.empty((tokens, pad_hidden_size_rope_k), dtype=cos2.dtype, device=DEVICE)
    # pad_head_num[grid_pad](
    #     pad_sin2, pad_cos2, sin2.npu(), cos2.npu(), tokens=tokens,
    #     head_num=head_num, hidden_size_rope_k=hidden_size_rope_k,
    #     total_output_dim=pad_hidden_size_rope_k, BLOCK_SIZE_K=hidden_size_rope_k
    # )

    # tokens, hidden_size_rope_k = cos2.shape
    # grid_pad = (tokens, )
    # pad_hidden_size_rope_k = hidden_size_rope_k * head_num
    # pad_cos2 = torch.empty((tokens, pad_hidden_size_rope_k), dtype=cos2.dtype, device=DEVICE)
    # pad_head_num[grid_pad](
    #     pad_cos2, cos2.npu(), tokens=tokens,
    #     head_num=head_num, hidden_size_rope_k=hidden_size_rope_k,
    #     total_output_dim=pad_hidden_size_rope_k, BLOCK_SIZE_K=hidden_size_rope_k
    # )

    # mm2_split2_reshape = mm2_split2.reshape(tokens, head_num * hidden_size_rope_q_head)
    # tokens, pad_hidden_size_rope_q_head = mm2_split2_reshape.shape
    # hidden_size_rope_q_head = pad_hidden_size_rope_q_head // head_num
    # rotate_mm2_split2 = torch.empty_like(mm2_split2_reshape, dtype=mm2_split2_reshape.dtype, device=DEVICE)
    # BLOCK_SIZE_M = 64
    # BLOCK_SIZE_N = 64
    # grid_rope_half = (triton.cdiv(tokens, BLOCK_SIZE_M), triton.cdiv(pad_hidden_size_rope_q_head, BLOCK_SIZE_N))
    # rotate_half_x[grid_rope_half](
    #     rotate_mm2_split2, mm2_split2_reshape, tokens=tokens,
    #     head_num=head_num, hidden_size_rope_q_head=hidden_size_rope_q_head,
    #     BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N
    # )

    # BLOCK_SIZE_M = 64
    # BLOCK_SIZE_N = 64

    # 在生成数据的时候 hidden_size_rope_k == hidden_size_rope_q_head
    # assert(hidden_size_rope_k == hidden_size_rope_q_head, "hidden_size_rope_q_head must equal to hidden_size_rope_k")
    # pad_sin2 形状 [tokens, hidden_size_rope_k * head_num]
    # pad_cos2 形状 [tokens, hidden_size_rope_k * head_num]
    # rotate_mm2_split2 形状 [tokens, head_num * hidden_size_rope_q_head]
    # mm2_split2_reshape 形状 [tokens, head_num * hidden_size_rope_q_head]
    # 进行乘积操作
    # M, N = pad_cos2.shape
    # BLOCK_SIZE_M = 64
    # BLOCK_SIZE_N = 64
    # GROUP_SIZE_M = 8
    # grid_per_matmul = (triton.cdiv(M, BLOCK_SIZE_M) * triton.cdiv(N, BLOCK_SIZE_N), )
    # rope_res = torch.empty_like(mm2_split2_reshape, dtype=mm2_split2_reshape.dtype, device=DEVICE)
    # per_mamtul[grid_per_matmul](
    #     rope_res, mm2_split2_reshape, pad_cos2, rotate_mm2_split2, pad_sin2,
    #     M=M, N=N, stride_m=pad_cos2.stride(0), stride_n=pad_cos2.stride(1),
    #     BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, GROUP_SIZE_M=GROUP_SIZE_M
    # )
    # ====================================

    # =============================================================
    # 进行fused操作
    mm2_split2_reshape = mm2_split2.reshape(tokens, head_num * hidden_size_rope_q_head).cpu()
    BLOCK_SIZE_M = 64  # token 块大小
    BLOCK_SIZE_FULL = hidden_size_rope_q_head  # dim 块大小
    BLOCK_SIZE = hidden_size_rope_q_head // 2
    # grid_fused = (triton.cdiv(tokens, BLOCK_SIZE_M), head_num)
    grid_fused = (head_num, triton.cdiv(tokens, BLOCK_SIZE_M))
    rope_res = torch.empty_like(mm2_split2_reshape, dtype=mm2_split2_reshape.dtype, device=DEVICE)
    # mm2_rotated = torch.empty_like(mm2_split2_reshape, dtype=torch.float16, device=DEVICE)
    pad_head_rotate_half_x_per_matmul_fused[grid_fused](
        rope_res, mm2_split2_reshape.npu(), sin2.npu(), cos2.npu(),
        tokens=tokens, head_num=head_num, hidden_size_rope_q_head=hidden_size_rope_q_head,
        BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE=BLOCK_SIZE, BLOCK_SIZE_FULL=BLOCK_SIZE_FULL
    )
    # =============================================================
    tokens, pad_headNum = rope_res.shape
    rope_res = rope_res.reshape(tokens, head_num, pad_headNum // head_num)
    rope_res = rope_res.to(data_type)
    q_out = torch.cat((bmm_out, rope_res), dim=2)
    # 根据 cache_mode 返回输出
    if cache_mode == 1:
        ## 待确认
        return [
            q_out[..., 0:512].cpu(), 
            key_out[..., 0:512].cpu(),
            q_out[..., 512:576].cpu(), 
            key_out[..., 512:576].cpu()
        ]
    else:
        return [q_out.cpu(), key_out.cpu()]
    return rms_quant_out1.cpu(), rms_quant_out2.cpu()