import torch 
import torch_npu
import triton
import triton.language as tl
from mlapo_torch import process_deq_scale, QUANTMAX, QUANTMIN, MLAPO

epsilon = 1e-6
DEVICE = "npu"

'''
现在matmul转换存在问题 正在看转换过程
'''

# 量化的RMSNorm计算 
@triton.jit
def rms_norm_quant(
    input_ptr,   
    gamma_ptr,     
    beta_ptr,       
    output_ptr,     
    quant_scale_f, 
    quant_offset_f, 
    QUANTMAX: tl.constexpr,
    QUANTMIN: tl.constexpr,
    N: tl.constexpr, 
    H: tl.constexpr,
    EPS: tl.constexpr,
    BLOCK_SIZE: tl.constexpr,
):
    # 计算行索引（每个 block 处理一行）
    row_idx = tl.program_id(axis=0)
    if row_idx >= N:
        return
    # 只是一部分的内容 需要进行reduce操作
    sum_square = 0.0
    num_blocks = tl.cdiv(H, BLOCK_SIZE)
    for i in range(0, num_blocks):
        col_offsets = tl.arange(0, BLOCK_SIZE)
        col_mask = i * BLOCK_SIZE + col_offsets < H
        x = tl.load(input_ptr + row_idx * H + i * BLOCK_SIZE + col_offsets, mask=col_mask, other=0.0)
        x_square = x * x
        block_square = tl.sum(x_square, axis=0)
        sum_square += block_square
    mean_square = sum_square / H
    inv_rms = 1.0 / tl.sqrt(mean_square + EPS)
    
    for i in range(0, num_blocks):
        col_offsets = tl.arange(0, BLOCK_SIZE)
        col_mask = i * BLOCK_SIZE + col_offsets < H
        x = tl.load(input_ptr + row_idx * H + i * BLOCK_SIZE + col_offsets, mask=col_mask, other=0.0)
        gamma = tl.load(gamma_ptr + i * BLOCK_SIZE + col_offsets, mask=col_mask, other=0.0)
        beta = tl.load(beta_ptr + i * BLOCK_SIZE + col_offsets, mask=col_mask, other=0.0)
        norm = x * inv_rms * gamma + beta
        quant_float = norm * quant_scale_f + quant_offset_f
        is_positive = quant_float >= 0.0
        rounded_float = tl.where(
            is_positive,
            tl.floor(quant_float + 0.5),
            tl.ceil(quant_float - 0.5)
        )
        quant_clamped_float = tl.where(
            rounded_float < QUANTMIN,
            float(QUANTMIN),
            tl.where(rounded_float > QUANTMAX, float(QUANTMAX), rounded_float)
        )
        quant_clamped_int8 = tl.cast(quant_clamped_float, tl.int8)
        tl.store(output_ptr + row_idx * H + i * BLOCK_SIZE + col_offsets, quant_clamped_int8, mask=col_mask)

# 量化版matmul
# @triton.jit
# def matmul_bias_scale(
#     a_ptr, b_ptr, c_ptr, bias_ptr, de_scale_ptr,
#     M, N, K,
#     stride_am, stride_ak,
#     stride_bk, stride_bn,
#     stride_cm, stride_cn,
#     BLOCK_SIZE_M: tl.constexpr,
#     BLOCK_SIZE_N: tl.constexpr,
#     BLOCK_SIZE_K: tl.constexpr,
#     GROUP_SIZE_M: tl.constexpr,
# ):
#     pid = tl.program_id(axis=0)
#     num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
#     num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
#     num_pid_in_group = GROUP_SIZE_M * num_pid_n
#     group_id = pid // num_pid_in_group
#     first_pid_m = group_id * GROUP_SIZE_M
#     group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
#     pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
#     pid_n = (pid % num_pid_in_group) // group_size_m
#     offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
#     offsets_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
#     offsets_k = tl.arange(0, BLOCK_SIZE_K)
#     a_ptrs = a_ptr + (offsets_m[:, None] * stride_am + offsets_k[None, :] * stride_ak)
#     b_ptrs = b_ptr + (offsets_k[:, None] * stride_bk + offsets_n[None, :] * stride_bn)
#     accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
#     num_blocks = tl.cdiv(K, BLOCK_SIZE_K)
#     for k in range(0, num_blocks):
#         # 在 for k 循环中
#         a = tl.load(a_ptrs, mask=offsets_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
#         b = tl.load(b_ptrs, mask=offsets_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
#         accumulator += tl.dot(a, b)
#         a_ptrs += BLOCK_SIZE_K * stride_ak
#         b_ptrs += BLOCK_SIZE_K * stride_bk
#     c_ptrs = c_ptr + offsets_m[:, None] * stride_cm + offsets_n[None, :] * stride_cn
#     offsets_bias = offsets_n[None, :]
#     offsets_de_scale = offsets_n
#     bias = tl.load(bias_ptr + offsets_bias, mask=offsets_bias < N, other=0.0)
#     de_scale = tl.load(de_scale_ptr + offsets_de_scale, mask=offsets_de_scale < N, other=1.0)
#     bias_float32 = tl.cast(bias, tl.float32)
#     accumulator = accumulator + bias_float32
#     accumulator = accumulator * de_scale
#     offsets_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
#     offsets_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
#     c_mask = (offsets_cm[:, None] < M) & (offsets_cn[None, :] < N)
#     tl.store(c_ptrs, accumulator, mask=c_mask)

@triton.jit
def matmul_bias_scale(
    a_ptr, b_ptr, c_ptr, bias_ptr, de_scale_ptr,
    M, N, K,
    stride_am, stride_ak,
    stride_bk, stride_bn,
    stride_cm, stride_cn,
    BLOCK_SIZE_M: tl.constexpr,
    BLOCK_SIZE_N: tl.constexpr,
    BLOCK_SIZE_K: tl.constexpr,
    GROUP_SIZE_M: tl.constexpr,
):
    pid = tl.program_id(axis=0)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    offsets_k = tl.arange(0, BLOCK_SIZE_K)
    a_ptrs_base = a_ptr + (offsets_m[:, None] * stride_am + offsets_k[None, :] * stride_ak)
    b_ptrs_base = b_ptr + (offsets_k[None, :] * stride_bk + offsets_n[:, None] * stride_bn)
    msk_m = offsets_m < M
    msk_n = offsets_n < N
    accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
    num_blocks = tl.cdiv(K, BLOCK_SIZE_K)
    for k in range(0, num_blocks):
        # 在 for k 循环中
        a_ptrs = a_ptrs_base + k * BLOCK_SIZE_K * stride_ak
        b_ptrs = b_ptrs_base + k * BLOCK_SIZE_K * stride_bk
        a = tl.load(a_ptrs, mask=msk_m[:, None] and (offsets_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0)
        b = tl.load(b_ptrs, mask=msk_n[:, None] and (offsets_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0)
        b_trans = tl.trans(b)
        accumulator += tl.dot(a, b_trans)
    c_ptrs = c_ptr + offsets_m[:, None] * stride_cm + offsets_n[None, :] * stride_cn
    offsets_bias = offsets_n[None, :]
    offsets_de_scale = offsets_n
    bias = tl.load(bias_ptr + offsets_bias, mask=offsets_bias < N, other=0.0)
    de_scale = tl.load(de_scale_ptr + offsets_de_scale, mask=offsets_de_scale < N, other=1.0)
    bias_float32 = tl.cast(bias, tl.float32)
    accumulator = accumulator + bias_float32
    accumulator = accumulator * de_scale
    offsets_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    c_mask = (offsets_cm[:, None] < M) & (offsets_cn[None, :] < N)
    tl.store(c_ptrs, accumulator, mask=c_mask)

# 平常版matmul操作
@triton.jit
def matmul(
    a_ptr, b_ptr, c_ptr,
    M, N, K,
    stride_am, stride_ak,
    stride_bk, stride_bn,
    stride_cm, stride_cn,
    BLOCK_SIZE_M: tl.constexpr,
    BLOCK_SIZE_N: tl.constexpr,
    BLOCK_SIZE_K: tl.constexpr,
    GROUP_SIZE_M: tl.constexpr,
):
    pid = tl.program_id(axis=0)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    offsets_k = tl.arange(0, BLOCK_SIZE_K)
    a_ptrs = a_ptr + offsets_m[:, None] * stride_am + offsets_k[None, :] * stride_ak
    b_ptrs = b_ptr + offsets_k[:, None] * stride_bk + offsets_n[None, :] * stride_bn
    accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
    num_blocks = tl.cdiv(K, BLOCK_SIZE_K)
    for k in range(0, num_blocks):
        # 在 for k 循环中
        a_mask = (offsets_m[:, None] < M) & (offsets_k[None, :] < K - k * BLOCK_SIZE_K)
        b_mask = (offsets_k[:, None] < K - k * BLOCK_SIZE_K) & (offsets_n[None, :] < N)
        a = tl.load(a_ptrs, mask=a_mask, other=0.0)
        b = tl.load(b_ptrs, mask=b_mask, other=0.0)
        accumulator += tl.dot(a, b)
        a_ptrs += BLOCK_SIZE_K * stride_ak
        b_ptrs += BLOCK_SIZE_K * stride_bk
    c_ptrs = c_ptr + offsets_m[:, None] * stride_cm + offsets_n[None, :] * stride_cn
    offsets_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    c_mask = (offsets_cm[:, None] < M) & (offsets_cn[None, :] < N)
    tl.store(c_ptrs, accumulator, mask=c_mask)

@triton.jit
def batch_matmul(
    a_ptr, b_ptr, c_ptr,
    B, M, N, K,
    stride_ab, stride_am, stride_ak,
    stride_bb, stride_bk, stride_bn,
    stride_cb, stride_cm, stride_cn,
    BLOCK_SIZE_M: tl.constexpr,
    BLOCK_SIZE_N: tl.constexpr,
    BLOCK_SIZE_K: tl.constexpr,
    GROUP_SIZE_M: tl.constexpr,
):
    bid = tl.program_id(axis=0)
    pid = tl.program_id(axis=1)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    offsets_k = tl.arange(0, BLOCK_SIZE_K)
    a_ptrs = a_ptr + (bid * stride_ab + offsets_m[:, None] * stride_am + offsets_k[None, :] * stride_ak)
    b_ptrs = b_ptr + (bid * stride_bb + offsets_k[:, None] * stride_bk + offsets_n[None, :] * stride_bn)
    accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
    num_blocks = tl.cdiv(K, BLOCK_SIZE_K)
    for k in range(0, num_blocks):
        # 在 for k 循环中
        a_mask = (offsets_m[:, None] < M) & (offsets_k[None, :] < K - k * BLOCK_SIZE_K)
        b_mask = (offsets_k[:, None] < K - k * BLOCK_SIZE_K) & (offsets_n[None, :] < N)
        a = tl.load(a_ptrs, mask=a_mask, other=0.0)
        b = tl.load(b_ptrs, mask=b_mask, other=0.0)
        accumulator += tl.dot(a, b)
        a_ptrs += BLOCK_SIZE_K * stride_ak
        b_ptrs += BLOCK_SIZE_K * stride_bk
    c_ptrs = c_ptr + (bid * stride_cb + offsets_m[:, None] * stride_cm + offsets_n[None, :] * stride_cn)
    offsets_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    c_mask = (bid < B) & (offsets_cm[:, None] < M) & (offsets_cn[None, :] < N)
    tl.store(c_ptrs, accumulator, mask=c_mask)

'''
sin经过操作之前 形状是tokens, hidden_size_rope_k
经过操作之后 形状是tokens, hidden_size_rope_k * head_num
'''
@triton.jit
def pad_head_num(
    pad_sin2,
    pad_cos2,
    sin2,
    cos2,
    tokens: tl.constexpr,
    head_num: tl.constexpr,
    hidden_size_rope_k: tl.constexpr,
    total_output_dim: tl.constexpr,
    BLOCK_SIZE_K: tl.constexpr,  # 固定为 hidden_size_rope_k
):
    pid = tl.program_id(axis=0)
    if pid >= tokens:
        return
    
    token_idx = pid  # 每个 pid 一个 token

    # 加载当前 token 的输入行：1D 块 (hidden_size_rope_k,)
    offsets_k = tl.arange(0, BLOCK_SIZE_K)
    mask_k = offsets_k < hidden_size_rope_k
    sin2_row = tl.load(sin2 + token_idx * hidden_size_rope_k + offsets_k, mask=mask_k, other=0.0)
    cos2_row = tl.load(cos2 + token_idx * hidden_size_rope_k + offsets_k, mask=mask_k, other=0.0)
    
    # 循环 head_num 次，重复存储 input_row
    for i in range(head_num):
        out_start = token_idx * total_output_dim + i * hidden_size_rope_k
        mask_out = offsets_k < hidden_size_rope_k  # 块 mask，确保形状匹配
        tl.store(pad_sin2 + out_start + offsets_k, sin2_row, mask=mask_out)
        tl.store(pad_cos2 + out_start + offsets_k, cos2_row, mask=mask_out)

@triton.jit
def rotate_half_x(
    output_ptr,
    input_ptr,
    tokens: tl.constexpr,
    head_num: tl.constexpr,
    hidden_size_rope_q_head: tl.constexpr,
    BLOCK_SIZE_M: tl.constexpr,   # tokens 块大小
    BLOCK_SIZE_N: tl.constexpr,   # dim 块大小
):
    """
    Triton 内核：RoPE 半部分旋转，对每个 head 的 Q 进行 [-second_half, first_half] 变换。
    """
    pid_m = tl.program_id(0)
    pid_n = tl.program_id(1)
    block_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_N)
    block_n = pid_n * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_N)
    
    mask_m = block_m < tokens
    mask_n = block_n < (head_num * hidden_size_rope_q_head)
    mask = mask_m[:, None] & mask_n[None, :]
    
    # 加载 Q 块
    ptr = input_ptr + (block_m[:, None] * (head_num * hidden_size_rope_q_head) + block_n[None, :])
    q = tl.load(ptr, mask=mask, other=0.0)
    
    # 计算 head_id 和 local_n
    half_dim = hidden_size_rope_q_head // 2
    # 进行torch.chunk操作
    head_id = block_n // hidden_size_rope_q_head
    local_n = block_n % hidden_size_rope_q_head
    is_second_half = local_n >= half_dim
    
    # 应用负号（仅第二半）
    rotated = tl.where(is_second_half, -q, q)
    
    # 计算新 local_n（交换前后半）
    new_local_n = tl.where(is_second_half, local_n - half_dim, local_n + half_dim)
    new_block_n = head_id * hidden_size_rope_q_head + new_local_n
    
    # 存储到新位置
    # stride操作
    out_ptr = output_ptr + (block_m[:, None] * (head_num * hidden_size_rope_q_head) + new_block_n[None, :])
    tl.store(out_ptr, rotated, mask=mask)

@triton.jit
def per_mamtul(
    output_ptr,
    origin_q_ptr,
    pad_cos_ptr,
    rotate_q_ptr,
    pad_sin_ptr,
    M, N,
    stride_m, stride_n,
    BLOCK_SIZE_M: tl.constexpr,
    BLOCK_SIZE_N: tl.constexpr,
    GROUP_SIZE_M: tl.constexpr,
):
    pid = tl.program_id(axis=0)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    offset_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offset_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    # 指针偏移（用于加载）
    offset = offset_m[:, None] * stride_m + offset_n[None, :] * stride_n
    # 分开计算 mask（修复核心问题）
    mask_m = offset_m[:, None] < M
    mask_n = offset_n[None, :] < N
    mask = mask_m & mask_n
    origin_q_ptrs = origin_q_ptr + offset
    pad_cos_ptrs = pad_cos_ptr + offset
    rotate_q_ptrs = rotate_q_ptr + offset
    pad_sin_ptrs = pad_sin_ptr + offset
    output_ptrs = output_ptr + offset
    origin_q = tl.load(origin_q_ptrs, mask=mask, other=0.0)
    pad_cos = tl.load(pad_cos_ptrs, mask=mask, other=0.0)
    rotate_q = tl.load(rotate_q_ptrs, mask=mask, other=0.0)
    pad_sin = tl.load(pad_sin_ptrs, mask=mask, other=0.0)
    output = origin_q * pad_cos + rotate_q * pad_sin
    tl.store(output_ptrs, output, mask=mask)

def mlapo_triton(
    input1, gamma1, beta1, quant_scale1, quant_offset1, wdqkv, de_scale1, bias1,
    gamma2, beta2, quant_scale2, quant_offset2, wuq, de_scale2, bias2, 
    gamma3, cos1, sin1, cos2, sin2, wuk, key_cache, slot_mapping,
    Tokens, head_num, block_size, data_type, cache_mode=0, epsilon = epsilon,
    hidden_size=7168, hidden_size_wdq=1536, hidden_size_wuk_head=512, hidden_size_rope_q_head=64, hidden_size_rope_k=64
):
    tokens = input1.shape[0]
    hidden_size = input1.shape[1]
    TILE_SIZE_RMS = 1024 # 最好可以调整
    scale_val1 = 1 / quant_scale1.item()
    offset_val1 = quant_offset1.item()
    rms_quant_out1 = torch.empty((tokens, hidden_size), dtype=torch.int8, device=DEVICE)
    grid_rms = (tokens,)
    rms_norm_quant[grid_rms](
        input1.to(torch.float32).npu(), 
        gamma1.to(torch.float32).npu(), 
        beta1.to(torch.float32).npu(),
        rms_quant_out1,
        quant_scale_f=scale_val1, quant_offset_f=offset_val1,
        QUANTMAX=QUANTMAX, QUANTMIN=QUANTMIN,
        N=tokens, H=hidden_size, EPS=epsilon,
        BLOCK_SIZE=TILE_SIZE_RMS,
    )


    BLOCK_SIZE_M = 64
    BLOCK_SIZE_N = 64
    BLOCK_SIZE_K = 64
    GROUP_SIZE_M = 8
    M = rms_quant_out1.shape[0]
    # N = wdqkv.shape[0]
    # K = wdqkv.shape[1]
    N, K = wdqkv.shape
    # print(M, N, K)
    # assert K == rms_quant_out1.shape[1], "the k dimension is not same"
    mm1_out = torch.empty((M, N), dtype=torch.float32, device=DEVICE)
    grid_matmul = (triton.cdiv(M, BLOCK_SIZE_M) * triton.cdiv(N, BLOCK_SIZE_N), )
    # wdqkv_T = wdqkv.transpose(0, 1)
    wdqkv_T = wdqkv
    matmul_bias_scale[grid_matmul](
        rms_quant_out1.to(torch.float32), wdqkv_T.to(torch.float32).npu(), mm1_out,
        bias1.npu(), de_scale1.npu(),
        M, N, K,
        stride_am=rms_quant_out1.stride(0), stride_ak=rms_quant_out1.stride(1),
        stride_bk=wdqkv_T.stride(1), stride_bn=wdqkv_T.stride(0),
        stride_cm=mm1_out.stride(0), stride_cn=mm1_out.stride(1),
        BLOCK_SIZE_M=BLOCK_SIZE_M,
        BLOCK_SIZE_N=BLOCK_SIZE_N,
        BLOCK_SIZE_K=BLOCK_SIZE_K,
        GROUP_SIZE_M=GROUP_SIZE_M,
    )
    # if data_type == torch.float16:
    #     de_scale1 = process_deq_scale(deq_scale=de_scale1)
    # 形状 [1024, 2112]
    mm1_out = mm1_out.to(torch.float16)


    # 接着进行计算
    hidden_size_wdkv = mm1_out.shape[-1] - hidden_size_wdq
    mm1_split1, mm1_split2 = torch.split(mm1_out, [hidden_size_wdkv, hidden_size_wdq], dim=1)
    scale_val2 = 1 / quant_scale2.item()
    offset_val2 = quant_offset2.item()
    rms_quant_out2 = torch.empty((tokens, hidden_size_wdq), dtype=torch.int8, device=DEVICE)
    grid_rms = (tokens,)
    rms_norm_quant[grid_rms](
        mm1_split2.to(torch.float32).npu(), 
        gamma2.to(torch.float32).npu(), 
        beta2.to(torch.float32).npu(),
        rms_quant_out2,
        quant_scale_f=scale_val2, quant_offset_f=offset_val2,
        QUANTMAX=QUANTMAX, QUANTMIN=QUANTMIN,
        N=tokens, H=hidden_size_wdq, EPS=epsilon,
        BLOCK_SIZE=TILE_SIZE_RMS,
    )


    BLOCK_SIZE_M = 64
    BLOCK_SIZE_N = 64
    BLOCK_SIZE_K = 64
    GROUP_SIZE_M = 8
    M = rms_quant_out2.shape[0]
    # N = wuq.shape[0]
    # K = wuq.shape[1]
    N, K = wuq.shape
    # print(M, N, K)
    # assert K == rms_quant_out2.shape[1], "the k dimension is not same"
    mm2_out = torch.empty((M, N), dtype=torch.float32, device=DEVICE)
    grid_matmul = (triton.cdiv(M, BLOCK_SIZE_M) * triton.cdiv(N, BLOCK_SIZE_N), )
    # wuq_T = wuq.transpose(0, 1)
    wuq_T = wuq
    matmul_bias_scale[grid_matmul](
        rms_quant_out2.to(torch.float32), wuq_T.to(torch.float32).npu(), mm2_out,
        bias2.npu(), de_scale2.npu(),
        M, N, K,
        stride_am=rms_quant_out2.stride(0), stride_ak=rms_quant_out2.stride(1),
        stride_bk=wuq_T.stride(1), stride_bn=wuq_T.stride(0),
        stride_cm=mm2_out.stride(0), stride_cn=mm2_out.stride(1),
        BLOCK_SIZE_M=BLOCK_SIZE_M,
        BLOCK_SIZE_N=BLOCK_SIZE_N,
        BLOCK_SIZE_K=BLOCK_SIZE_K,
        GROUP_SIZE_M=GROUP_SIZE_M,
    )

    hidden_size_rms_kv = mm1_split1.shape[-1] - hidden_size_rope_k
    mm1_split1_cpu = mm1_split1.clone().cpu()
    mm1_split1_1, mm1_split1_2 = torch.split(mm1_split1_cpu, [hidden_size_rms_kv, hidden_size_rope_k], dim = 1)
    key_norm_out = mm1_split1_1.reshape(tokens, 1, hidden_size_rms_kv)
    # 接着写
    # ================================================
    mlapo = MLAPO()
    key_out = mlapo.rms_norm_rope_reshape_cache_golden(
        key_norm_out, gamma3, mm1_split1_2, cos1, sin1, slot_mapping,
        key_cache, tokens, hidden_size_rope_k, block_size, epsilon, data_type
    )
    # ================================================

    hidden_size_wuq_head = mm2_out.shape[-1] // head_num
    mm2_out = mm2_out.reshape(tokens, head_num, hidden_size_wuq_head)
    hidden_size_nope_q = hidden_size_wuq_head - hidden_size_rope_q_head
    mm2_split1, mm2_split2 = torch.split(mm2_out, [hidden_size_nope_q, hidden_size_rope_q_head], dim=2)

    bmm_out_non_permute = torch.empty((head_num, tokens, hidden_size_wuk_head), dtype=torch.float32, device=DEVICE)
    batch_num = head_num
    M = tokens
    N = hidden_size_wuk_head
    K = hidden_size_nope_q
    BLOCK_SIZE_M = 64
    BLOCK_SIZE_N = 64
    BLOCK_SIZE_K = 64
    GROUP_SIZE_M = 8
    batch_grid_matmul = (batch_num, triton.cdiv(M, BLOCK_SIZE_M) * triton.cdiv(N, BLOCK_SIZE_N),)
    mm2_split1_permute = torch.permute(mm2_split1, (1, 0, 2))

    # 这个完全独立运行
    batch_matmul[batch_grid_matmul](
        mm2_split1_permute.float().npu(), wuk.float().npu(), bmm_out_non_permute,
        batch_num, M, N, K,
        stride_ab=mm2_split1_permute.stride(0),
        stride_am=mm2_split1_permute.stride(1),
        stride_ak=mm2_split1_permute.stride(2),
        stride_bb=wuk.stride(0),
        stride_bk=wuk.stride(1),
        stride_bn=wuk.stride(2),
        stride_cb=bmm_out_non_permute.stride(0),
        stride_cm=bmm_out_non_permute.stride(1),
        stride_cn=bmm_out_non_permute.stride(2),
        BLOCK_SIZE_M=BLOCK_SIZE_M,
        BLOCK_SIZE_N=BLOCK_SIZE_N,
        BLOCK_SIZE_K=BLOCK_SIZE_K,
        GROUP_SIZE_M=GROUP_SIZE_M,
    )
    bmm_out = torch.permute(bmm_out_non_permute, (1, 0, 2))
    # ====================================
    # q_out = mlapo.rope_concat_golden(
    #         mm2_split2.reshape(tokens, head_num * hidden_size_rope_q_head).cpu(), sin2, cos2, bmm_out.cpu(),
    #         head_num, hidden_size_rope_k, tokens, data_type
    #     )
    # ====================================

    # ====================================
    # 进行pad_head_num操作
    # 两个pad_head_num可以同步进行
    tokens, hidden_size_rope_k = sin2.shape
    grid_pad = (tokens, )
    pad_hidden_size_rope_k = hidden_size_rope_k * head_num
    pad_sin2 = torch.empty((tokens, pad_hidden_size_rope_k), dtype=sin2.dtype, device=DEVICE)
    pad_cos2 = torch.empty((tokens, pad_hidden_size_rope_k), dtype=cos2.dtype, device=DEVICE)
    pad_head_num[grid_pad](
        pad_sin2, pad_cos2, sin2.npu(), cos2.npu(), tokens=tokens,
        head_num=head_num, hidden_size_rope_k=hidden_size_rope_k,
        total_output_dim=pad_hidden_size_rope_k, BLOCK_SIZE_K=hidden_size_rope_k
    )

    # tokens, hidden_size_rope_k = cos2.shape
    # grid_pad = (tokens, )
    # pad_hidden_size_rope_k = hidden_size_rope_k * head_num
    # pad_cos2 = torch.empty((tokens, pad_hidden_size_rope_k), dtype=cos2.dtype, device=DEVICE)
    # pad_head_num[grid_pad](
    #     pad_cos2, cos2.npu(), tokens=tokens,
    #     head_num=head_num, hidden_size_rope_k=hidden_size_rope_k,
    #     total_output_dim=pad_hidden_size_rope_k, BLOCK_SIZE_K=hidden_size_rope_k
    # )

    mm2_split2_reshape = mm2_split2.reshape(tokens, head_num * hidden_size_rope_q_head)
    tokens, pad_hidden_size_rope_q_head = mm2_split2_reshape.shape
    hidden_size_rope_q_head = pad_hidden_size_rope_q_head // head_num
    rotate_mm2_split2 = torch.empty_like(mm2_split2_reshape, dtype=mm2_split2_reshape.dtype, device=DEVICE)
    BLOCK_SIZE_M = 64
    BLOCK_SIZE_N = 64
    grid_rope_half = (triton.cdiv(tokens, BLOCK_SIZE_M), triton.cdiv(pad_hidden_size_rope_q_head, BLOCK_SIZE_N))
    rotate_half_x[grid_rope_half](
        rotate_mm2_split2, mm2_split2_reshape, tokens=tokens,
        head_num=head_num, hidden_size_rope_q_head=hidden_size_rope_q_head,
        BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N
    )

    BLOCK_SIZE_M = 64
    BLOCK_SIZE_N = 64

    # 在生成数据的时候 hidden_size_rope_k == hidden_size_rope_q_head
    assert(hidden_size_rope_k == hidden_size_rope_q_head, "hidden_size_rope_q_head must equal to hidden_size_rope_k")
    # pad_sin2 形状 [tokens, hidden_size_rope_k * head_num]
    # pad_cos2 形状 [tokens, hidden_size_rope_k * head_num]
    # rotate_mm2_split2 形状 [tokens, head_num * hidden_size_rope_q_head]
    # mm2_split2_reshape 形状 [tokens, head_num * hidden_size_rope_q_head]
    # 进行乘积操作
    M, N = pad_cos2.shape
    BLOCK_SIZE_M = 64
    BLOCK_SIZE_N = 64
    GROUP_SIZE_M = 8
    grid_per_matmul = (triton.cdiv(M, BLOCK_SIZE_M) * triton.cdiv(N, BLOCK_SIZE_N), )
    rope_res = torch.empty_like(mm2_split2_reshape, dtype=mm2_split2_reshape.dtype, device=DEVICE)
    per_mamtul[grid_per_matmul](
        rope_res, mm2_split2_reshape, pad_cos2, rotate_mm2_split2, pad_sin2,
        M=M, N=N, stride_m=pad_cos2.stride(0), stride_n=pad_cos2.stride(1),
        BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, GROUP_SIZE_M=GROUP_SIZE_M
    )
    tokens, pad_headNum = rope_res.shape
    rope_res = rope_res.reshape(tokens, head_num, pad_headNum // head_num)
    rope_res = rope_res.to(data_type)
    q_out = torch.cat((bmm_out.to(data_type), rope_res), dim=2)
    # ====================================
    # 根据 cache_mode 返回输出
    if cache_mode == 1:
        ## 待确认
        return [
            q_out[..., 0:512].cpu(), 
            key_out[..., 0:512].cpu(),
            q_out[..., 512:576].cpu(), 
            key_out[..., 512:576].cpu()
        ]
    else:
        return [q_out.cpu(), key_out.cpu()]
    return rms_quant_out1.cpu(), rms_quant_out2.cpu()