# import torch
# import torch.nn.functional as F
# import torch_npu
# import numpy as np
# import random

# # 常量定义
# QUANTMAX = 127
# QUANTMIN = -128

# # 辅助函数：处理 dequantize scale（针对 float16 特殊处理）
# def process_deq_scale(deq_scale: torch.Tensor) -> torch.Tensor:
#     ret = torch.frombuffer(deq_scale.numpy().tobytes(), dtype=torch.int32).to(torch.int64)
#     return ret

# # 辅助函数：向上取整
# def round_up(val: int, align: int) -> int:
#     if align == 0:
#         return 0
#     return -(val // -align) * align

# # 辅助函数：转置数据（用于 matmul 权重，模拟 NPU 格式）
# def transdata(nd_mat, block_size: tuple = (16, 16)):
#     r = round_up(nd_mat.shape[0], block_size[0])
#     c = round_up(nd_mat.shape[1], block_size[1])
#     r_pad = r - nd_mat.shape[0]
#     c_pad = c - nd_mat.shape[1]
#     nd_mat = F.pad(nd_mat, ((0, r_pad, 0, c_pad)))
#     nz_mat = torch.permute(
#         torch.reshape(nd_mat, (r // block_size[0], block_size[0], c // block_size[1], block_size[1])), [2, 0, 1, 3]
#     )
#     nz_mat = torch.reshape(nz_mat, (nz_mat.shape[0], nz_mat.shape[1] * nz_mat.shape[2], nz_mat.shape[3]))
#     return nz_mat

# # RMS Norm 黄金计算（非量化版）
# def rms_norm_golden(x: torch.Tensor, gamma: torch.Tensor, epsilon: float = 1e-6, hidden_size: int = None) -> torch.Tensor:
#     if hidden_size is None:
#         hidden_size = x.shape[-1]
#     x_float32 = x.to(torch.float32)
#     square_sum = torch.sum(torch.square(x_float32), dim=-1, keepdims=True)
#     rms = 1.0 / torch.sqrt(square_sum / hidden_size + epsilon)
#     gamma_float32 = gamma.to(torch.float32)
#     rms_norm = rms * x_float32 * gamma_float32
#     return rms_norm.to(x.dtype)

# # RMS Norm 量化计算（用于 MatMul 输入）
# def rms_norm_quant_calc(input_tensor: torch.Tensor, gamma: torch.Tensor, beta: torch.Tensor,
#                         quant_scale: torch.Tensor, quant_offset: torch.Tensor, epsilon: float = 1e-6,
#                         hidden_size: int = None, quant_max: int = QUANTMAX, quant_min: int = QUANTMIN) -> torch.Tensor:
#     if hidden_size is None:
#         hidden_size = input_tensor.shape[-1]
#     out_shape = input_tensor.shape
#     scale = 1.0 / quant_scale.item()
#     offset = quant_offset.item()
#     input_scale = torch.tensor(scale, dtype=torch.float32)
#     input_offset = torch.tensor(offset, dtype=torch.float32)
#     input0 = input_tensor.float()
#     input1 = gamma.float()
#     # 这里dim=-1 一行一行的处理内容
#     square_sum = torch.sum(torch.square(input0), dim=-1, keepdims=True)
#     factor = 1.0 / torch.sqrt(square_sum / hidden_size + epsilon)
#     output = input0 * factor * input1
#     output = (output + beta.float()) * input_scale + input_offset
#     # 去整函数
#     output = torch.round(output)
#     # 量化到int8的空间中
#     output = torch.clamp(output, quant_min, quant_max).to(torch.int8)
#     return output

# # RoPE 旋转半部分（Q 专用）
# def rotate_half_x(q_temp: torch.Tensor, head_num: int) -> torch.Tensor:
#     q_splits = torch.chunk(q_temp, head_num, dim=1)
#     processed_q_splits = []
#     for q_split in q_splits:
#         first_half, second_half = torch.chunk(q_split, 2, dim=1)
#         processed_q_split = torch.cat((-second_half, first_half), dim=1)
#         processed_q_splits.append(processed_q_split)
#     return torch.cat(processed_q_splits, dim=1)

# # RoPE 旋转半部分（K 专用）
# def rotate_half(k_temp: torch.Tensor) -> torch.Tensor:
#     first_half, second_half = torch.chunk(k_temp, 2, dim=1)
#     return torch.cat((-second_half, first_half), dim=1)

# # Pad Head Num（扩展 sin/cos 到 head_num 维度）
# def pad_head_num(headdim_sin_cos: torch.Tensor, head_num: int) -> torch.Tensor:
#     return torch.tile(headdim_sin_cos, (1, head_num))

# # RoPE Concat 黄金计算（Q 部分）
# def rope_concat_golden(q: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor, concat_input: torch.Tensor,
#                        head_num: int, rope_hidden_size: int, input_token_num: int, dtype: torch.dtype) -> torch.Tensor:
#     pad_sin = pad_head_num(sin, head_num)
#     pad_cos = pad_head_num(cos, head_num)
#     rope_res = q * pad_cos + rotate_half_x(q, head_num) * pad_sin
#     rope_res = rope_res.reshape(input_token_num, head_num, rope_hidden_size)
#     rope_res = rope_res.to(dtype)
#     return torch.cat((concat_input.to(dtype), rope_res), dim=2)

# # RoPE 黄金计算（K 部分）
# def rope_golden(key_rope: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
#     return key_rope * cos + rotate_half(key_rope) * sin

# # Reshape & Cache 黄金计算
# def rac_golden(key_rac: torch.Tensor, slot_mapping: torch.Tensor, key_cache_in: torch.Tensor, block_size: int) -> torch.Tensor:
#     key_cache_out = key_cache_in.clone()
#     for i, slot in enumerate(slot_mapping):
#         if slot < 0:
#             continue
#         block_index = slot // block_size
#         block_offset = slot % block_size
#         token_key = key_rac[i]
#         key_cache_out[block_index][block_offset] = token_key
#     return key_cache_out

# # 组合 RMS Norm + RoPE + Reshape & Cache 黄金计算（K 部分）
# def rms_norm_rope_reshape_cache_golden(x: torch.Tensor, gamma: torch.Tensor, key_rope: torch.Tensor,
#                                        cos: torch.Tensor, sin: torch.Tensor, slot_mapping: torch.Tensor,
#                                        key_cache_in: torch.Tensor, input_token_num: int, rope_hidden_size: int,
#                                        block_size: int, epsilon: float = 1e-6, dtype: torch.dtype = torch.float16) -> torch.Tensor:
#     rms_norm_output = rms_norm_golden(x, gamma, epsilon, hidden_size=x.shape[-1])
#     rope_output = rope_golden(key_rope, sin, cos)
#     rope_reshape = rope_output.reshape(input_token_num, 1, rope_hidden_size)
#     key_rac = torch.cat((rms_norm_output, rope_reshape), dim=-1)
#     return rac_golden(key_rac, slot_mapping, key_cache_in, block_size).to(dtype)

# # 主 Golden 计算函数：MLA Preprocess 的完整黄金输出
# def mla_preprocess_golden(input1: torch.Tensor, gamma1: torch.Tensor, beta1: torch.Tensor, quant_scale1: torch.Tensor,
#                           quant_offset1: torch.Tensor, wdqkv: torch.Tensor, de_scale1: torch.Tensor, bias1: torch.Tensor,
#                           gamma2: torch.Tensor, beta2: torch.Tensor, quant_scale2: torch.Tensor, quant_offset2: torch.Tensor,
#                           wuq: torch.Tensor, de_scale2: torch.Tensor, bias2: torch.Tensor, gamma3: torch.Tensor,
#                           cos1: torch.Tensor, sin1: torch.Tensor, wuk: torch.Tensor, key_cache: torch.Tensor,
#                           slot_mapping: torch.Tensor, Tokens: int, head_num: int, block_size: int = 128,
#                           data_type: torch.dtype = torch.float16, cache_mode: int = 0, epsilon: float = 1e-6,
#                           hidden_size: int = 7168, rope_hidden_size: int = 64, rms_hidden_size: int = 512) -> list:
#     """
#     MLA Preprocess 的 Golden 计算入口函数。
#     :param input1: 输入张量 (Tokens, 7168)
#     :param gamma1, beta1, quant_scale1, quant_offset1: RMS Norm1 参数
#     :param wdqkv: dqkv 权重 (2112, 7168)
#     :param de_scale1, bias1: dqkv dequantize scale 和 bias
#     :param gamma2, beta2, quant_scale2, quant_offset2: RMS Norm2 参数
#     :param wuq: uq 权重 (head_num*192, 1536)
#     :param de_scale2, bias2: uq dequantize scale 和 bias
#     :param gamma3: RMS Norm3 gamma (512)
#     :param cos1, sin1: RoPE cos/sin (Tokens, 64)
#     :param wuk: K 投影权重 (head_num, 128, 512)
#     :param key_cache: K 缓存 (block_num, block_size, 1, 576)
#     :param slot_mapping: 槽位映射 (Tokens)
#     :param Tokens: 序列长度
#     :param head_num: 头数
#     :param block_size: 块大小
#     :param data_type: 数据类型 (float16 或 bfloat16)
#     :param cache_mode: 缓存模式 (0: 无 split; 1: split Q/K)
#     :param epsilon: RMS Norm epsilon
#     :param hidden_size: 隐藏维度 (7168)
#     :param rope_hidden_size: RoPE 隐藏维度 (64)
#     :param rms_hidden_size: RMS 隐藏维度 (512)
#     :return: [q_out, key_out] 或 split 版本
#     """
#     input_token_num = Tokens
#     headdim = 576  # Q/K head dim

#     # Step 1: RMS Norm 量化 (input1 -> dqkv 输入)
#     rms_quant_out1 = rms_norm_quant_calc(input1, gamma1, beta1, quant_scale1, quant_offset1, epsilon, hidden_size)

#     # Step 2: MatMul (dqkv)
#     mm1_out = torch.matmul(rms_quant_out1.to(torch.float32), wdqkv.transpose(0, 1).to(torch.float32))
#     mm1_out = mm1_out.to(torch.int32) + bias1
#     mm1_out = (mm1_out.to(torch.float32) * de_scale1).to(data_type)
#     if data_type == torch.float16:
#         de_scale1 = process_deq_scale(de_scale1)
#     return mm1_out

#     # Split dqkv: [512 (key_norm), 64 (key_rope), 1536 (value_norm)]
#     mm1_split1, mm1_split2, mm1_split3 = torch.split(mm1_out, [512, 64, 1536], dim=1)

#     # Step 3: RMS Norm 量化 (value_norm -> uq 输入)
#     rms_quant_out2 = rms_norm_quant_calc(mm1_split3, gamma2, beta2, quant_scale2, quant_offset2, epsilon, 1536)

#     # Step 4: MatMul (uq)
#     mm2_out = torch.matmul(rms_quant_out2.to(torch.float32), wuq.transpose(0, 1).to(torch.float32))
#     mm2_out = mm2_out.to(torch.int32) + bias2
#     mm2_out = (mm2_out.to(torch.float32) * de_scale2).to(data_type)
#     mm2_out = mm2_out.reshape(Tokens, head_num, 192)

#     # Split uq: [128 (q_base), 64 (q_rope)]
#     mm2_split1, mm2_split2 = torch.split(mm2_out, [128, 64], dim=2)

#     # Step 5: EinSum / BMM for q_base
#     bmm_out = torch.permute(
#         torch.matmul(torch.permute(mm2_split1, (1, 0, 2)).float(), wuk.float()),
#         (1, 0, 2),
#     )

#     # Step 6: RoPE Concat for Q (Tokens, head_num, 576)
#     q_out = rope_concat_golden(
#         mm2_split2.reshape(Tokens, head_num * 64), sin1, cos1, bmm_out,
#         head_num, rope_hidden_size, input_token_num, data_type
#     )

#     # Step 7: RMS Norm + RoPE + Reshape & Cache for K
#     key_norm_out = mm1_split1.reshape(Tokens, 1, rms_hidden_size)
#     key_out = rms_norm_rope_reshape_cache_golden(
#         key_norm_out, gamma3, mm1_split2, cos1, sin1, slot_mapping,
#         key_cache, input_token_num, rope_hidden_size, block_size, epsilon, data_type
#     )

#     # 根据 cache_mode 返回输出
#     if cache_mode == 1:
#         # Split: q_base (Tokens, head_num, 512), key_base (block_num, block_size, 1, 512),
#         #        q_rope (Tokens, head_num, 64), key_rope (block_num, block_size, 1, 64)
#         return [
#             q_out[..., :512], key_out[..., :512],
#             q_out[..., 512:], key_out[..., 512:]
#         ]
#     else:
#         return [q_out, key_out]

# # 示例调用（用于测试 Golden 函数）
# if __name__ == "__main__":
#     # 设置随机种子
#     seed = 12
#     random.seed(seed)
#     np.random.seed(seed)
#     torch.manual_seed(seed)

#     # 参数
#     Tokens = 1024
#     head_num = random.choice([32, 128, 16, 64])
#     block_size = random.choice([128, 256])      
#     data_type = torch.float16  # 或 torch.bfloat16
#     block_num = 192
#     headdim = 576

#     # 生成模拟输入（实际使用时替换为真实数据）
#     hidden_size = 7168
#     input1 = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(Tokens, hidden_size))).to(data_type)
#     gamma1 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(hidden_size))).to(data_type)
#     beta1 = torch.from_numpy(np.random.randint(-2, 2, (hidden_size)).astype(np.float16)).to(data_type)
#     quant_scale1 = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(1))).to(data_type)
#     quant_offset1 = torch.from_numpy(np.random.uniform(-128.0, 127.0, size=(1))).to(torch.int8)
#     wdqkv = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(2112, hidden_size))).to(torch.int8)
#     de_scale1 = torch.rand((2112), dtype=torch.float32) / 1000
#     bias1 = torch.from_numpy(np.random.randint(-10, 10, (1, 2112)).astype(np.int32)).to(torch.int32)

#     gamma2 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(1536))).to(data_type)
#     beta2 = torch.from_numpy(np.random.randint(-2, 2, (1536)).astype(np.float16)).to(data_type)
#     quant_scale2 = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(1))).to(data_type)
#     quant_offset2 = torch.from_numpy(np.random.uniform(-128.0, 127.0, size=(1))).to(torch.int8)
#     wuq = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(head_num * 192, 1536))).to(torch.int8)
#     de_scale2 = torch.rand((head_num * 192), dtype=torch.float32) / 1000
#     bias2 = torch.from_numpy(np.random.randint(-10, 10, (1, head_num * 192)).astype(np.int32)).to(torch.int32)

#     gamma3 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(512))).to(data_type)
#     cos1 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(Tokens, 64))).to(data_type)
#     sin1 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(Tokens, 64))).to(data_type)
#     wuk = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(head_num, 128, 512))).to(data_type)
#     key_cache = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(block_num, block_size, 1, headdim))).to(data_type)
#     slot_mapping = torch.from_numpy(np.random.choice(192 * 128, Tokens, replace=False).astype(np.int32)).to(torch.int32)

#     # 调用 Golden 计算
#     golden_outputs = mla_preprocess_golden(
#         input1, gamma1, beta1, quant_scale1, quant_offset1, wdqkv, de_scale1, bias1,
#         gamma2, beta2, quant_scale2, quant_offset2, wuq, de_scale2, bias2, gamma3,
#         cos1, sin1, wuk, key_cache, slot_mapping,
#         Tokens=Tokens, head_num=head_num, block_size=block_size, data_type=data_type, cache_mode=0
#     )

#     print(f"Golden 输出形状: {[out.shape for out in golden_outputs]}")
#     print("Golden 计算完成！")

import torch  ## 需要导入CANN的环境变量set_env.sh才能生效torch_npu
import torch.nn.functional as F
import torch_npu
import numpy as np
import random


'''

'''

# 设置随机种子
seed = 12
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
## 常量定义
QUANTMAX = 127
QUANTMIN = -128


def process_deq_scale(
        deq_scale: torch.Tensor
        ) -> np.ndarray:
    '''
    处理输入atb MLAPreprocessOperation的 de_scale1 和 de_scale2

    1. 获取deq_scale torch张量数据，转换为numpy数组
    2. 将 float32 的 de_scale1 转换为字节码，用int32重新解释
    3. 转换为int64 torch 张量
    return reinterpret_cast<int32>(
                                deq_scale ) ## reinterpret_cast<int32>
                                            .to(int64)

    原代码中使用本次 golden 数据计算完后，如果本次 type_data = float16，
    则将用完的 self.de_scale1 和 self.de_scale2 做该处理，后续输入 atb 调用。

    对于 atb MLAPreprocessOperation，
    当 data_type = float16，de_scale 为 int64 类型；
    当 data_type = bfloat16，de_scale 为 float32 类型
    '''
    ret = torch.frombuffer(deq_scale.numpy().tobytes(), dtype=torch.int32).to(torch.int64)
    return ret


def round_up(
        val: int, 
        align: int
        ) -> int:
    '''
    辅助函数：向上取整
    '''
    if align == 0:
        return 0
    return -(val // -align) * align

def transdata(
        nd_mat, 
        block_size: tuple = (16, 16)
        ):
    ''' 
    二维数据：ND->NZ
    处理输入 atb MLAPreprocessOperation 的 wdqkv和wuq，这两个矩阵要求是NZ格式，所以输入atb之前需要做ND->NZ
    对于 wuk，可以是ND也可以是NZ。
    若ND，输入 shape = [head_num_q, hidden_size_nope_q_head = 128, hidden_size_wuk_head = 512], 
    若NZ，输入 shape = [head_num_q, 32, 128, 16]
    
    该函数对wdqkv 和 wuq 调用（如果将来需要对比atb）
    '''
    r = round_up(nd_mat.shape[0], block_size[0])
    c = round_up(nd_mat.shape[1], block_size[1])
    r_pad = r - nd_mat.shape[0]
    c_pad = c - nd_mat.shape[1]
    nd_mat = F.pad(nd_mat, ((0, r_pad, 0, c_pad)))
    nz_mat = torch.permute(
        torch.reshape(nd_mat, (r // block_size[0], block_size[0], c // block_size[1], block_size[1])), [2, 0, 1, 3]
    )
    nz_mat = torch.reshape(nz_mat, (nz_mat.shape[0], nz_mat.shape[1] * nz_mat.shape[2], nz_mat.shape[3]))
    return nz_mat

def transdata_3d(
        nd_mat, 
        block_size: tuple = (16, 16)
        ):
    '''
    三维数据的ND->NZ相当于其中各个二维矩阵做ND->NZ
    该函数对wuk调用（如果将来需要对比atb）
    '''
    if nd_mat.ndim != 3:
        raise ValueError("Expected a 3-dimensional input array.")
    B, K, N = nd_mat.shape
    processed_slices = []

    for batch_index in range(B):
        current_slice = nd_mat[batch_index]
        nz_mat = transdata(current_slice, block_size)
        processed_slices.append(nz_mat)
    
    result = torch.stack(processed_slices, axis=0)
    return result




class MLAPO: 

    def __init__(self):
        pass
        

    def rms_norm_golden(
            self, 
            x: torch.Tensor, 
            gamma: torch.Tensor, 
            epsilon: float = 1e-6, 
            hidden_size: int = None
            ) -> torch.Tensor:
        '''
        RMSNorm 无量化，用于对 输入tokens 经 RMSNormQuant 降维后切分为KV后其中的V部分计算

        gamma * x_float / x的均方根（1方和2均2.5加epsilon3根）
        result = gamma_float * x_float 
                                        /  
                                        sqrt(
                                                sum(
                                                    square(x_float), axis = -1
                                                                                ) ## sum
                                                                                / hidden_size + epsilon 
                                                                                                                    ) ## sqrt
        x_float = x.to(float)
        gamma_float = gamma.to(float)
        '''
        if hidden_size is None:
            hidden_size = x.shape[-1]
        x_float32 = x.to(torch.float32)
        square_sum = torch.sum(torch.square(x_float32), dim=-1, keepdims=True)
        rms = 1.0 / torch.sqrt(square_sum / hidden_size + epsilon)
        gamma_float32 = gamma.to(torch.float32)
        rms_norm = rms * x_float32 * gamma_float32
        return rms_norm.to(x.dtype)

    def rms_norm_quant_calc(
            self, 
            input_tensor: torch.Tensor, 
            gamma: torch.Tensor, 
            beta: torch.Tensor,
            quant_scale: torch.Tensor, 
            quant_offset: torch.Tensor, 
            epsilon: float = 1e-6,
            hidden_size: int = None, 
            quant_max: int = QUANTMAX, 
            quant_min: int = QUANTMIN
            ) -> torch.Tensor:
        '''
        RMSNorm + Quant 用于，用于 MatMul 输入，
        包括：
        1 输入 tokens 经过 RMSNormQuant 后与 wdqkv 做 matmul降维
        2 输入 tokens 经过 RMSNormQuant 后降维，切分后的 Q 与 wuq 做 matmul 升维

        return  round( 
                    ( RMSNorm(input_tensor) + beta ) / quant_scale.to(float) 
                                                                        + quant_offset.to(float) 
                                                                                                ) ## round
                                                                                                    ) 
                                                                                                    .to(fp16)
                                                                                                                .to(int8)
        RMSNorm(input_tensor) = gamma_float * input_float / 
                                                    sqrt( 
                                                        sum(
                                                                square(input_float), axis = -1
                                                                                                ) ## sum
                                                                                                    / input.shape[-1] + epsilon ) ## sqrt
        input_float = input_tensor.to(float)
        gamma_float = gamma.to(float)
        '''
        if hidden_size is None:
            hidden_size = input_tensor.shape[-1]
        out_shape = input_tensor.shape
        scale = 1.0 / quant_scale.item()
        offset = quant_offset.item()
        input_scale = torch.tensor(scale, dtype=torch.float32)
        input_offset = torch.tensor(offset, dtype=torch.float32)
        input0 = input_tensor.float()
        input = gamma.float()
        # 这里dim=-1 一行一行的处理内容
        square_sum = torch.sum(torch.square(input0), dim=-1, keepdims=True)
        factor = 1.0 / torch.sqrt(square_sum / hidden_size + epsilon)
        output = input0 * factor * input
        output = (output + beta.float()) * input_scale + input_offset
        # 去整函数
        output = torch.round(output)
        # 量化到int8的空间中
        output = torch.clamp(output, quant_min, quant_max).to(torch.int8)
        return output

    
    def rotate_half_x(
            self, 
            q_temp: torch.Tensor, 
            head_num: int
            ) -> torch.Tensor:
        '''
        Q head RoPE 部分：旋转半部分
        '''
        # 拆分成 head_num 个 [n,head_dim] 的二维向量
        q_splits = torch.chunk(q_temp, head_num, dim=1)
        # 对每个 [n,head_dim] 向量的第二维进行分割，并对第二块乘以 -1再拼回到第一块前面
        processed_q_splits = []
        for q_split in q_splits:
            # 分割第二维
            first_half, second_half = torch.chunk(q_split, 2, dim=1)
            # 拼接回 [n,head_dim] 的二维向量
            processed_q_split = torch.cat((-second_half, first_half), dim=1)
            processed_q_splits.append(processed_q_split)
        # 将所有处理后的 [n,head_dim] 向量拼回 [n,head_num*head_dim] 的二维向量
        return torch.cat(processed_q_splits, dim=1)

    # RoPE 旋转半部分（K 专用）
    def rotate_half(
            self, 
            k_temp: torch.Tensor
            ) -> torch.Tensor:
        '''
        K Rope
        '''
        
        first_half, second_half = torch.chunk(k_temp, 2, dim=1)
        processed_k_split = torch.cat((-second_half, first_half), dim=1)
        return processed_k_split

    
    def pad_head_num(
            self, 
            head_dim_sin_cos: torch.Tensor, 
            head_num: int
            ) -> torch.Tensor:
        '''
        Pad Head Num（扩展 sin/cos 到 head_num 维度）
        '''
        # 
        return torch.tile(head_dim_sin_cos, (1, head_num))

    def rope_concat_golden(
            self, 
            q: torch.Tensor, 
            sin: torch.Tensor, 
            cos: torch.Tensor, 
            concat_input: torch.Tensor,
            head_num: int, 
            rope_hidden_size: int, 
            input_token_num: int, 
            dtype: torch.dtype
            ) -> torch.Tensor:
        '''
        输入 tokens 降维切分后，Q 部分经过 RMSNormQuant后升维再切分为 RoPE 部分和 NoPE 部分，
        RoPE 部分做 RoPE
        NoPE 部分做升维
        这里是 RoPE部分做RoPE + 与NoPE部分升维结果 做 Concat 计算
        '''
        pad_sin = self.pad_head_num(sin, head_num)
        pad_cos = self.pad_head_num(cos, head_num)
        rope_res = q * pad_cos + self.rotate_half_x(q, head_num) * pad_sin
        rope_res = rope_res.reshape(input_token_num, head_num, rope_hidden_size)
        rope_res = rope_res.to(dtype)
        return torch.cat((concat_input.to(dtype), rope_res), dim=2)

    def rope_golden(
            self, 
            key_rope: torch.Tensor, 
            sin: torch.Tensor, 
            cos: torch.Tensor
            ) -> torch.Tensor:
        '''
        输入 tokens 降维切分后，对 K 部分计算 RoPE
        '''
        return key_rope * cos + self.rotate_half(key_rope) * sin

    
    def rac_golden(
            self, 
            key_rac: torch.Tensor, 
            slot_mapping: torch.Tensor, 
            key_cache_in: torch.Tensor, 
            block_size: int
            ) -> torch.Tensor:
        '''
        将K部分RoPE结果与KV部分RMSNorm结果进行处理，加入cache后返回
        '''
        
        key_cache_out = key_cache_in.clone()
        for i, slot in enumerate(slot_mapping):
            if slot < 0:
                continue
            ## block_size 是128、256随机选择
            block_index = slot // block_size
            block_offset = slot % block_size
            token_key = key_rac[i]
            key_cache_out[block_index][block_offset] = token_key
        return key_cache_out

    def rms_norm_rope_reshape_cache_golden(
            self, 
            x: torch.Tensor, 
            gamma: torch.Tensor, 
            key_rope: torch.Tensor,
            cos: torch.Tensor, 
            sin: torch.Tensor, 
            slot_mapping: torch.Tensor,
            key_cache_in: torch.Tensor, 
            input_token_num: int, 
            rope_hidden_size: int,
            block_size: int, 
            epsilon: float = 1e-6, 
            dtype: torch.dtype = torch.float16
            ) -> torch.Tensor:
        '''
        对K部分做RoPE，对V部分做RMSNorm，
        reshape后调用rac_golden计算cache
        '''
        rms_norm_output = self.rms_norm_golden(x, gamma, epsilon, hidden_size=x.shape[-1])
        rope_output = self.rope_golden(key_rope, sin, cos)
        rope_reshape = rope_output.reshape(input_token_num, 1, rope_hidden_size)
        key_rac = torch.cat((rms_norm_output, rope_reshape), dim=-1)
        return self.rac_golden(key_rac, slot_mapping, key_cache_in, block_size).to(dtype)

    def mla_preprocess_calc(
            self, 
            input : torch.Tensor, 
            gamma1 : torch.Tensor, 
            beta1 : torch.Tensor, 
            quant_scale1 : torch.Tensor,
            quant_offset1 : torch.Tensor, 
            wdqkv : torch.Tensor, 
            de_scale1 : torch.Tensor, 
            bias1 : torch.Tensor,
            # 
            gamma2 : torch.Tensor, 
            beta2 : torch.Tensor, 
            quant_scale2 : torch.Tensor, 
            quant_offset2 : torch.Tensor,
            wuq : torch.Tensor, 
            de_scale2 : torch.Tensor, 
            bias2 : torch.Tensor, 
            # 
            gamma3 : torch.Tensor,
            cos1 : torch.Tensor, 
            sin1 : torch.Tensor, 
            cos2 : torch.Tensor, 
            sin2 : torch.Tensor, 
            wuk : torch.Tensor, 
            key_cache : torch.Tensor,
            slot_mapping : torch.Tensor, 
            # 
            tokens : int, 
            head_num_q : int, 
            block_size : int, 
            data_type : torch.dtype = torch.float16, 
            cache_mode : int = 0, 
            epsilon : float = 1e-6, 
            hidden_size : int = 7168, 
            hidden_size_wdq : int = 1536, 
            hidden_size_rope_q_head : int = 64, 
            hidden_size_rope_k : int = 64
            ) -> list:
        """
        MLA Preprocess 全计算流程。

        以main函数传参为例：

        ## 元数据：

        tokens = 1024
        head_num_q = random.choice([32, 128, 16, 64])
        block_size = random.choice([128, 256])
        block_num = 192
        data_type = torch.float16  # 或 torch.bfloat16，只有两个取值
        hidden_size = 7168

        hidden_size_wdqkv = 2112                                    ## 输入 tokens 经过降维后hidden_size
        hidden_size_wdq = 1536                                      ## 降维后拆分的Q的hidden_size，剩余为KV
        hidden_size_wdkv = hidden_size_wdqkv - hidden_size_wdq      ## d是降维，u是升维
        hidden_size_rope_k = 64                                     ## KV传入RoPE的hidden_size（只是K的），剩余为K剩余部分+V
        hidden_size_rms_kv = hidden_size_wdkv - hidden_size_rope_k 
        hidden_size_wuq_head = 192                                  ## 输入tokens经降维+切出Q后升维，其中单头hidden_size
        hidden_size_rope_q_head = hidden_size_rope_k = 64           ## Q单头传入RoPE的hidden_size
        hidden_size_nope_q_head = hidden_size_wuq_head - hidden_size_rope_q_head
        hidden_size_wuk_head = 512                                  ## 输入tokens经降维+切出Q后升维，其中单头切分为RoPE和NoPE，这里是NoPE升维后hidden_size

        hidden_size_output_kv = hidden_size_rope_k + hidden_size_rms_kv  ## 576
        hidden_size_output_q = hidden_size_rope_q_head + hidden_size_wuk_head  ## 576

        epsilon = 1e-6
        cache_mode = 0
        
        ## 待添加参数？
        q_rotary_coeff Q旋转系数
        k_rotary_coeff K旋转系数（RoPE的两种实现方式。atb和当前测试都未实现）
        transpose_wdq wdqkv是否转置
        transpose_wuq wuq是否转置
        transpose_wuk wuk是否转置（当前默认都是未转置，代码中调用transpose自行转置。atb也未实现）
        quantMode 指定RMSNorm量化的类型。
                0-PER_TENSOR_QUANT_ASYMM，per_tensor静态非对称量化，默认量化类型。
                1-PER_TOKEN_QUANT_SYMM：per_token 动态对称量化，未实现。
                2-PER_TOKEN_QUANT_ASYMM：per_token 动态非对称量化，未实现。
                3-UNQUANT：不量化，浮点输出，未实现。
                1-3在abt和当前测试都未实现
        cache_mode 指定最终QKV结果输出格式
                - cacheMode 为 0 时，kcache 和 q 均经过拼接后输出。
                - cacheMode 为 1 时，输入输出的 kvCcache 拆分为 krope 和 ctkv，q 拆分为 qrope 和 qnope。
                - cacheMode 为 2 时，krope 和 ctkv 转为 NZ 格式输出，ctkv 和 qnope 经过 per_head 静态对称量化为 int8 类型。
                - cacheMode 为 3 时，krope 和 ctkv 转为 NZ 格式输出。
        
        ## 输出结果：
        qOut 输出 tensor 数据类型data_type｡ cacheMode = 2 时数据类型为 int8
            cacheMode 为 0 : [tokenNum, headNum,576]；
            cacheMode 为 1 : [tokenNum, headNum,64]；
            cacheMode 为 2 : [tokenNum, headNum,512]；
            cacheMode 为 3 : [tokenNum, headNum,512]

        kvCacheOut 输出 tensor  数据类型data_type cacheMode = 2 时数据类型为 int8 ，格式为 NZ 。 cacheMode = 3 时，格式为 NZ ｡
            cacheMode 为 0 : [blockNum, blockSize,1, 576]；
            cacheMode 为 1 : [blockNum, blockSize,1, 512]；
            cacheMode 为 2 : [blockNum, headNum_512/32,block_size, 32]；
            cacheMode 为 3 : [blockNum, headNum_512/16,block_size, 16]

        kvCacheRopeOut 为 0 时输出此 tensor ｡数据类型 = data_type｡ cacheMode = 2 时数据格式为 NZ ｡ cacheMode = 3 时，格式为 NZ ｡
            cacheMode 不为 0 : [tokenNum,headNum,64]；
            cacheMode 为 1 : [blockNum, blockSize,1, 64]；
            cacheMode 为 2 : [blockNum, headNum_64 / 16 ,block_size, 16]；
            cacheMode 为 3 : [blockNum, headNum_64 / 16 ,block_size, 16]
                
        ## 入参：

        :param input : 输入 tokens（包含QKV）                      shape = (tokens, hidden_size), type = data_type
        :param gamma1 : 对输入 tokens 做 RMSNormQuant 参数          shape = (hidden_size), type = data_type
        :param beta1 : 对输入 tokens 做 RMSNormQuant 参数           shape = (hidden_size), type = data_type
        :param quant_scale1 : 对输入 tokens 做 RMSNormQuant 参数    shape = 1, type = data_type
        :param quant_offset1 : 对输入 tokens 做 RMSNormQuant 参数   shape = 1, type = int8
        :param wdqkv : 输入 tokens 的降维矩阵用以matmul             shape = (hidden_size_wdqkv, hidden_size), type = int8
        :param de_scale1 : 输入 tokens 降维后缩放参数               shape = 1, type = float32
        :param bias1 : 输入 tokens 降维后偏移                       shape = (1, hidden_size_wdqkv), type = int32
        
        :param gamma2 : Q部分 RMSNormQuant 参数                     shape = (hidden_size_wdq), type = data_type
        :param beta2 : Q部分 RMSNormQuant 参数                      shape = (hidden_size_wdq), type = data_type
        :param quant_scale2 : Q部分 RMSNormQuant 参数               shape = 1, type = data_type
        :param quant_offset2 : Q部分 RMSNormQuant 参数              shape = 1, type = int8
        :param wuq : Q部分的升维矩阵用以matmul                      shape = (head_num_q * hidden_size_wuq_head, hidden_size_wdq), type = int8
        :param de_scale2 : Q部分升维后缩放参数                      shape = 1, type = float32
        :param bias2 : Q部分升维后偏移                              shape = (1, head_num_q * hidden_size_wuq_head)， type = int32
        
        :param gamma3 : KV 部分 RMSNorm 参数                         shape = (hidden_size_rms_kv), type = data_type
        :param cos1 : K 部分 RoPE 参数                              shape = (tokens, hidden_size_rope_k), type = data_type
        :param sin1 : K 部分 RoPE 参数                              shape = (tokens, hidden_size_rope_k), type = data_type
        :param cos2 : Q 部分单头64维度 RoPE 参数                    数据同sin2
        :param sin2 : Q 部分单头64维度 RoPE 参数                    数据同sin1
        :param wuk : Q 部分单头128维度 升维矩阵用以matmul           shape = (head_num_q, hidden_size_nope_q_head, hidden_size_wuk_head), type = data_type
        :param key_cache : K（和V）缓存？                           shape = (block_num, block_size, 1, hidden_size_output_kv), type = data_type 
        :param slot_mapping: 槽位映射 (tokens)                      shape = 1, type = int32

        :param tokens : 序列长度        
        :param head_num : 头数
        :param block_size : 块大小
        :param data_type : 数据类型 (float16 或 bfloat16等)
        :param cache_mode : 缓存模式 (0: 无 split; 1: split Q/KV)
        :param epsilon : RMS Norm epsilon
        :param hidden_size : 隐藏维度 (7168)
        :param hidden_size_wdq : 降维后的tokens切分k维度 1536
        :param hidden_size_rope_q_head : q 单头 RoPE 隐藏维度 64
        :param hidden_size_rope_k : k RoPE 隐藏维度 64
        :return : [q_out, key_out] 或 split 版本

        以上所有数据中，原test代码内只有 key_cache 会在计算过程中被修改，然后作为最终结果return
        这里使用了clone来计算，没有修改原 key_cache 
        """

        # part 1: RMSNorm + Quant 处理输入 tokens
        rms_norm_quant_tokens = self.rms_norm_quant_calc(input, gamma1, beta1, quant_scale1, quant_offset1, epsilon, hidden_size)

        # Step 2: Matmul + LinearOperation( bias + DeQuant) 对 RMSNormQuant处理的tokens降维
        wdqkv = wdqkv.transpose(0, 1)
        # mm1_out = torch.matmul(rms_norm_quant_tokens.to(torch.float32), wdqkv_T.transpose(0, 1).to(torch.float32))
        mm1_out = torch.matmul(rms_norm_quant_tokens.to(torch.float32), wdqkv.to(torch.float32))
        mm1_out = mm1_out.to(torch.int32) + bias1
        mm1_out = (mm1_out.to(torch.float32) * de_scale1).to(data_type)

        # ==============TEST============
        # return [rms_norm_quant_tokens, mm1_out]
        ## if data_type == torch.float16: 
        ##     self.de_scale1 = process_deq_scale(de_scale1)

        # Split 经过RMSNormQuant处理后降维的tokens [2112(qkv)] [576(kv), 1536 (q)]
        hidden_size_wdkv = mm1_out.shape[-1] - hidden_size_wdq
        mm1_split1, mm1_split2 = torch.split(mm1_out, [hidden_size_wdkv, hidden_size_wdq], dim=1)

        # Step 3: RMS Norm 处理经过RMSNormQuant处理后降维后tokens中的q部分
        rms_quant_out2 = self.rms_norm_quant_calc(mm1_split2, gamma2, beta2, quant_scale2, quant_offset2, epsilon, hidden_size_wdq)

        # Step 4: MatMul + LinearOperation( bias + DeQuant ) 对 经过RMSNormQuant处理后降维后tokens中的q部分做RMSNormQuant后 升维，总维度为 head_num_q * wuq.shape[-1]
        wuq = wuq.transpose(0, 1)
        # mm2_out = torch.matmul(rms_quant_out2.to(torch.float32), wuq_T.transpose(0, 1).to(torch.float32))
        mm2_out = torch.matmul(rms_quant_out2.to(torch.float32), wuq.to(torch.float32))
        mm2_out = mm2_out.to(torch.int32) + bias2
        mm2_out = (mm2_out.to(torch.float32) * de_scale2).to(data_type)

        # ==============TEST============
        # return [rms_norm_quant_tokens, rms_quant_out2]
        ## if data_type == torch.float16: 
        ##     self.de_scale2 = process_deq_scale(de_scale2)

        # Step 5: RMSNorm + RoPE + Reshape & Cache 处理经过RMSNormQuant处理后降维后tokens中的kv部分
        hidden_size_rms_kv = mm1_split1.shape[-1] - hidden_size_rope_k
        mm1_split1_1, mm1_split1_2 = torch.split(mm1_split1, [hidden_size_rms_kv, hidden_size_rope_k], dim = 1)
        key_norm_out = mm1_split1_1.reshape(tokens, 1, hidden_size_rms_kv)
        key_out = self.rms_norm_rope_reshape_cache_golden(
            key_norm_out, gamma3, mm1_split1_2, cos1, sin1, slot_mapping,
            key_cache, tokens, hidden_size_rope_k, block_size, epsilon, data_type
        )

        # Split 经过RMSNormQuant处理后降维后tokens中的q部分做RMSNormQuant后升维的单头，[128 (q_base), 64 (q_rope)]
        hidden_size_wuq_head = mm2_out.shape[-1] // head_num_q
        print("hidden_size_wuq_head: ", hidden_size_wuq_head)
        mm2_out = mm2_out.reshape(tokens, head_num_q, hidden_size_wuq_head)
        hidden_size_nope_q = hidden_size_wuq_head - hidden_size_rope_q_head
        mm2_split1, mm2_split2 = torch.split(mm2_out, [hidden_size_nope_q, hidden_size_rope_q_head], dim=2)

        # Step 6: Matmul，对Q切分后的单头中的NoPE部分升维
        bmm_out = torch.permute(
            torch.matmul(torch.permute(mm2_split1, (1, 0, 2)).float(), wuk.float()),
            (1, 0, 2),
        )
        return [mm2_split2, key_out]
        # Step 7: RoPE + Concat，对 Q 部分升维后单头的RoPE部分做RoPE，与升维后的NoPE部分拼接 
        q_out = self.rope_concat_golden(
            mm2_split2.reshape(tokens, head_num_q * hidden_size_rope_q_head), sin2, cos2, bmm_out,
            head_num_q, hidden_size_rope_k, tokens, data_type
        )

        # 根据 cache_mode 返回输出
        if cache_mode == 1:
            ## 待确认
            return [
                q_out[..., 0:512], 
                key_out[..., 0:512],
                q_out[..., 512:576], 
                key_out[..., 512:576]
            ]
        else:
            return [q_out, key_out]
        

    '''
    以下编写单元测试逻辑
    '''

    def test_mlapo_torch(
            self, 
            input : torch.Tensor, 
            gamma1 : torch.Tensor, 
            beta1 : torch.Tensor, 
            quant_scale1 : torch.Tensor,
            quant_offset1 : torch.Tensor, 
            wdqkv : torch.Tensor, 
            de_scale1 : torch.Tensor, 
            bias1 : torch.Tensor,
            #
            gamma2 : torch.Tensor, 
            beta2 : torch.Tensor, 
            quant_scale2 : torch.Tensor, 
            quant_offset2 : torch.Tensor,
            wuq : torch.Tensor, 
            de_scale2 : torch.Tensor, 
            bias2 : torch.Tensor, 
            #
            gamma3 : torch.Tensor,
            cos1 : torch.Tensor, 
            sin1 : torch.Tensor, 
            cos2 : torch.Tensor, 
            sin2 : torch.Tensor, 
            wuk : torch.Tensor, 
            key_cache : torch.Tensor,
            slot_mapping : torch.Tensor, 
            #
            tokens : int, 
            head_num_q : int, 
            block_size : int, 
            data_type : torch.dtype = torch.float16, 
            cache_mode : int = 0, 
            #
            epsilon : float = 1e-6, 
            hidden_size : int = 7168, 
            hidden_size_wdq : int = 1536, 
            hidden_size_rope_q_head : int = 64, 
            hidden_size_rope_k : int = 512
            ) -> list:
        
        ## 在单元函数内需要进行修改的非输出参数添加形参
        ## self.de_scale1 = de_scale1
        ## self.de_scale2 = de_scale2

        ## 编写测试逻辑，完整的mlapo逻辑在 mla_preprocess_calc 内
        golden_outputs = self.mla_preprocess_calc(
            input, 
            gamma1, 
            beta1, 
            quant_scale1, 
            quant_offset1, 
            wdqkv, 
            de_scale1, 
            bias1,
            # 
            gamma2, 
            beta2, 
            quant_scale2, 
            quant_offset2, 
            wuq, 
            de_scale2, 
            bias2, 
            # 
            gamma3,
            cos1, 
            sin1, 
            cos2, 
            sin2, 
            wuk, 
            key_cache, 
            slot_mapping,
            # 
            tokens=tokens, 
            head_num_q=head_num_q, 
            block_size=block_size, 
            data_type=data_type, 
            cache_mode = cache_mode, 
            #
            epsilon = epsilon, 
            hidden_size = hidden_size, 
            hidden_size_wdq = hidden_size_wdq, 
            hidden_size_rope_q_head = hidden_size_rope_q_head, 
            hidden_size_rope_k = hidden_size_rope_k
        )

        return golden_outputs 


def main():
    # 元数据
    tokens = 1024
    # head_num_q = random.choice([32, 128, 16, 64])
    head_num_q = 128
    block_size = random.choice([128, 256])
    block_num = 192
    data_type = random.choice([torch.float16, torch.bfloat16])
    
    hidden_size = 7168
    ## d表示降维，u表示升维
    hidden_size_wdqkv = 2112
    hidden_size_wdq = 1536
    hidden_size_wdkv = hidden_size_wdqkv - hidden_size_wdq  ## 576
    hidden_size_rope_k = 64
    hidden_size_rms_kv = hidden_size_wdkv - hidden_size_rope_k  ## 512
    hidden_size_wuq_head = 192
    hidden_size_rope_q_head = hidden_size_rope_k = 64
    hidden_size_nope_q_head = hidden_size_wuq_head - hidden_size_rope_q_head
    hidden_size_wuk_head = 512

    hidden_size_output_kv = hidden_size_rope_k + hidden_size_rms_kv  ## 576
    hidden_size_output_q = hidden_size_rope_q_head + hidden_size_wuk_head  ## 576

    epsilon = 1e-6
    cache_mode = 0

    # 生成模拟输入（实际使用时替换为真实数据）
    input = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(tokens, hidden_size))).to(data_type)
    gamma1 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(hidden_size))).to(data_type)
    beta1 = torch.from_numpy(np.random.randint(-2, 2, (hidden_size)).astype(np.float16)).to(data_type)
    quant_scale1 = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(1))).to(data_type)
    quant_offset1 = torch.from_numpy(np.random.uniform(-128.0, 127.0, size=(1))).to(torch.int8)
    wdqkv = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(hidden_size_wdqkv, hidden_size))).to(torch.int8)
    # T
    # wdqkv = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(hidden_size, hidden_size_wdqkv))).to(torch.int8)
    de_scale1 = torch.rand((hidden_size_wdqkv), dtype=torch.float32) / 1000
    bias1 = torch.from_numpy(np.random.randint(-10, 10, (1, hidden_size_wdqkv)).astype(np.int32)).to(torch.int32)

    gamma2 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(hidden_size_wdq))).to(data_type)
    beta2 = torch.from_numpy(np.random.randint(-2, 2, (hidden_size_wdq)).astype(np.float16)).to(data_type)
    quant_scale2 = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(1))).to(data_type)
    quant_offset2 = torch.from_numpy(np.random.uniform(-128.0, 127.0, size=(1))).to(torch.int8)
    wuq = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(head_num_q * hidden_size_wuq_head, hidden_size_wdq))).to(torch.int8)
    # T
    # wuq = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(hidden_size_wdq, head_num_q * hidden_size_wuq_head))).to(torch.int8)
    de_scale2 = torch.rand((head_num_q * hidden_size_wuq_head), dtype=torch.float32) / 1000
    bias2 = torch.from_numpy(np.random.randint(-10, 10, (1, head_num_q * hidden_size_wuq_head)).astype(np.int32)).to(torch.int32)

    gamma3 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(hidden_size_rms_kv))).to(data_type)
    cos1 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(tokens, hidden_size_rope_k))).to(data_type)
    sin1 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(tokens, hidden_size_rope_k))).to(data_type)
    cos2 = cos1
    sin2 = sin1
    wuk = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(head_num_q, hidden_size_nope_q_head, hidden_size_wuk_head))).to(data_type)
    key_cache = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(block_num, block_size, 1, hidden_size_output_kv))).to(data_type)
    slot_mapping = torch.from_numpy(np.random.choice(192 * 128, tokens, replace=False).astype(np.int32)).to(torch.int32)

    #  计算 Golden
    mlapo = MLAPO()
    golden_outputs = mlapo.test_mlapo_torch(
        input, 
        gamma1, 
        beta1, 
        quant_scale1, 
        quant_offset1, 
        wdqkv, 
        de_scale1, 
        bias1,
        # 
        gamma2, 
        beta2, 
        quant_scale2, 
        quant_offset2, 
        wuq, 
        de_scale2, 
        bias2, 
        # 
        gamma3,
        cos1, 
        sin1, 
        cos2, 
        sin2, 
        wuk, 
        key_cache, 
        slot_mapping,
        # 
        tokens=tokens, 
        head_num_q=head_num_q, 
        block_size=block_size, 
        data_type=data_type, 
        cache_mode = cache_mode, 
        # 
        epsilon = epsilon, 
        hidden_size = hidden_size, 
        hidden_size_wdq = hidden_size_wdq, 
        hidden_size_rope_q_head = hidden_size_rope_q_head, 
        hidden_size_rope_k = hidden_size_rope_k
    )
    return golden_outputs

# 示例调用（用于测试test）
if __name__ == "__main__" : 
    # # 元数据
    # tokens = 1024
    # head_num_q = random.choice([32, 128, 16, 64])
    # block_size = random.choice([128, 256])
    # block_num = 192
    # data_type = random.choice([torch.float16, torch.bfloat16])
    
    # hidden_size = 7168
    # ## d表示降维，u表示升维
    # hidden_size_wdqkv = 2112
    # hidden_size_wdq = 1536
    # hidden_size_wdkv = hidden_size_wdqkv - hidden_size_wdq  ## 576
    # hidden_size_rope_k = 64
    # hidden_size_rms_kv = hidden_size_wdkv - hidden_size_rope_k  ## 512
    # hidden_size_wuq_head = 192
    # hidden_size_rope_q_head = hidden_size_rope_k = 64
    # hidden_size_nope_q_head = hidden_size_wuq_head - hidden_size_rope_q_head
    # hidden_size_wuk_head = 512

    # hidden_size_output_kv = hidden_size_rope_k + hidden_size_rms_kv  ## 576
    # hidden_size_output_q = hidden_size_rope_q_head + hidden_size_wuk_head  ## 576

    # epsilon = 1e-6
    # cache_mode = 0

    # # 生成模拟输入（实际使用时替换为真实数据）
    # input = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(tokens, hidden_size))).to(data_type)
    # gamma1 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(hidden_size))).to(data_type)
    # beta1 = torch.from_numpy(np.random.randint(-2, 2, (hidden_size)).astype(np.float16)).to(data_type)
    # quant_scale1 = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(1))).to(data_type)
    # quant_offset1 = torch.from_numpy(np.random.uniform(-128.0, 127.0, size=(1))).to(torch.int8)
    # wdqkv = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(hidden_size_wdqkv, hidden_size))).to(torch.int8)
    # # T
    # # wdqkv = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(hidden_size, hidden_size_wdqkv))).to(torch.int8)
    # de_scale1 = torch.rand((hidden_size_wdqkv), dtype=torch.float32) / 1000
    # bias1 = torch.from_numpy(np.random.randint(-10, 10, (1, hidden_size_wdqkv)).astype(np.int32)).to(torch.int32)

    # gamma2 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(hidden_size_wdq))).to(data_type)
    # beta2 = torch.from_numpy(np.random.randint(-2, 2, (hidden_size_wdq)).astype(np.float16)).to(data_type)
    # quant_scale2 = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(1))).to(data_type)
    # quant_offset2 = torch.from_numpy(np.random.uniform(-128.0, 127.0, size=(1))).to(torch.int8)
    # wuq = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(head_num_q * hidden_size_wuq_head, hidden_size_wdq))).to(torch.int8)
    # # T
    # # wuq = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(hidden_size_wdq, head_num_q * hidden_size_wuq_head))).to(torch.int8)
    # de_scale2 = torch.rand((head_num_q * hidden_size_wuq_head), dtype=torch.float32) / 1000
    # bias2 = torch.from_numpy(np.random.randint(-10, 10, (1, head_num_q * hidden_size_wuq_head)).astype(np.int32)).to(torch.int32)

    # gamma3 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(hidden_size_rms_kv))).to(data_type)
    # cos1 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(tokens, hidden_size_rope_k))).to(data_type)
    # sin1 = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(tokens, hidden_size_rope_k))).to(data_type)
    # cos2 = cos1
    # sin2 = sin1
    # wuk = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(head_num_q, hidden_size_nope_q_head, hidden_size_wuk_head))).to(data_type)
    # key_cache = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(block_num, block_size, 1, hidden_size_output_kv))).to(data_type)
    # slot_mapping = torch.from_numpy(np.random.choice(192 * 128, tokens, replace=False).astype(np.int32)).to(torch.int32)

    # #  计算 Golden
    # mlapo = MLAPO()
    # golden_outputs = mlapo.test_mlapo_torch(
    #     input, 
    #     gamma1, 
    #     beta1, 
    #     quant_scale1, 
    #     quant_offset1, 
    #     wdqkv, 
    #     de_scale1, 
    #     bias1,
    #     # 
    #     gamma2, 
    #     beta2, 
    #     quant_scale2, 
    #     quant_offset2, 
    #     wuq, 
    #     de_scale2, 
    #     bias2, 
    #     # 
    #     gamma3,
    #     cos1, 
    #     sin1, 
    #     cos2, 
    #     sin2, 
    #     wuk, 
    #     key_cache, 
    #     slot_mapping,
    #     # 
    #     tokens=tokens, 
    #     head_num_q=head_num_q, 
    #     block_size=block_size, 
    #     data_type=data_type, 
    #     cache_mode = cache_mode, 
    #     # 
    #     epsilon = epsilon, 
    #     hidden_size = hidden_size, 
    #     hidden_size_wdq = hidden_size_wdq, 
    #     hidden_size_rope_q_head = hidden_size_rope_q_head, 
    #     hidden_size_rope_k = hidden_size_rope_k
    # )
    golden_outputs = main()

    print(f"Golden 输出形状: {[out.shape for out in golden_outputs]}")
    print("Golden 计算完成！")