#include <sys/types.h>
#include <ctype.h>
#include <cmath>
#include <cassert>
#include <numa.h>
#include <iostream>
#include <vector>
#include <string>
#include <sstream>
#include"cpu_types.hpp"
// #include "tensor.h"
#include "quantize.h"
#include <unistd.h>  // Linux
#include <csignal>

#include <omp.h>
#include <sched.h>

std::vector<int> cpu_ids = {};

std::vector<std::string> split_str(const std::string & str, char delimiter) {
    std::vector<std::string> tokens;
    std::string token;
    std::istringstream token_stream(str);
    while (std::getline(token_stream, token, delimiter)) {
        if (!token.empty()) {
            tokens.push_back(token);
        }
    }
    return tokens;
}

int get_total_thread_num() {
    int total_thread_num = 1;
    const char* env = getenv("OMP_NUM_THREADS");
    if (env && *env) {
        total_thread_num = std::stoi(env);
    }
    return total_thread_num;
}

std::vector<int> parse_sequence(const std::string& input) {
    std::vector<int> result;
    std::vector<std::string> parts = split_str(input, ',');
    for (std::vector<std::string>::iterator it = parts.begin(); it != parts.end(); ++it) {
        const std::string& part = *it;
        size_t colon_pos = part.find(":");
        if (colon_pos != std::string::npos) {
            std::string range = part.substr(0, colon_pos);
            std::string step_str = part.substr(colon_pos + 1);
            size_t dash_pos = range.find('-');
            if (dash_pos == std::string::npos) {
                continue;
            }
            int start = std::stoi(range.substr(0, dash_pos));
            int end = std::stoi(range.substr(dash_pos + 1));
            int step = std::stoi(step_str);

            for (int num = start; num <= end; num += step) {
                result.push_back(num);
            }
        }
        else if ((colon_pos = part.find('-')) != std::string::npos) {
            int start = std::stoi(part.substr(0, colon_pos));
            int end = std::stoi(part.substr(colon_pos + 1));

            for (int i = start; i <= end; ++i) {
                result.push_back(i);
            }
        }
        else {
            result.push_back(std::stoi(part));
        }
    }
    return result;
}

static void get_affinity_cpus(std::vector<int>& cpu_ids) {
    static bool init = false;
    if (!init) {
        init = true;
        const char* env = getenv("CUSTOM_CPU_AFFINITY");
        if (env && *env) {
            cpu_ids = parse_sequence(env);
        }
        std::sort(cpu_ids.begin(), cpu_ids.end());
    }
}

static void init_process_affinity() {
    if (cpu_ids.size() == 0) {
        return;
    }
    int total_thread_num = get_total_thread_num();
    if (total_thread_num % cpu_ids.size() != 0) {
        std::cout << "Error! OMP_NUM_THREADS: " << total_thread_num << ", ";
        std::cout << "CPU_IDs: ";
        for (int i = 0; i < cpu_ids.size(); ++i) {
            std::cout << cpu_ids[i] << " ";
        }
        exit(0);
    }
    int current_thread_id = omp_get_thread_num();
    int per_cpu = total_thread_num / cpu_ids.size();

    cpu_set_t mask;
    CPU_ZERO(&mask);
    CPU_SET(cpu_ids[current_thread_id / per_cpu], &mask);

    int ret = sched_setaffinity(0, sizeof(cpu_set_t), &mask);
    if (ret == -1) {
        std::cout << "Error! Binding core failed!\n";
        exit(0);
    }
}

typedef unsigned int UINT32;
typedef unsigned long long UINT64;
typedef float f32;
// typedef unsigned char bool;
#define GENERAL_ARCH_BAICHUAN "baichuan"
#define EPSILON 1e-6f   /* float a1， float a2， if |a1 - a2| < EPSILON, then a1 = a2*/

extern void transpose_v(f16 *vt, const f16 *v, int n_tokens, int dim, int qkv_dim);
extern void prefill_attention(f16 *out_ptr, const f16 *qkv_ptr, const f16 *vt_ptr, int N_tokens, int N_seqs, const int *seq_lens);

float expf_f16_table[65536];

char* quantization_bit_code_str = getenv("SYSHAX_QUANTIZE");
bool i8mm_flag = false;

// 检测 i8mm 指令集支持的函数
bool detect_i8mm_support() {
    FILE* pipe = popen("lscpu | grep i8mm", "r");
    if (pipe == nullptr) {
        std::cerr << "Warning: Failed to execute lscpu command, assuming i8mm not supported" << std::endl;
        return false;
    }
    
    char buffer[128];
    std::string result = "";
    while (fgets(buffer, sizeof(buffer), pipe) != nullptr) {
        result += buffer;
    }
    
    int ret_code = pclose(pipe);
    
    // 如果命令执行成功且有输出，说明支持 i8mm
    bool has_i8mm = (ret_code == 0 && !result.empty());
    
    std::cout << "i8mm detection: " << (has_i8mm ? "supported" : "not supported") << std::endl;
    
    return has_i8mm;
}

// 初始化 i8mm_flag
bool init_i8mm_flag() {
    static bool initialized = false;
    if (!initialized) {
        i8mm_flag = detect_i8mm_support();
        initialized = true;
    }
    return i8mm_flag;
}

template<class scaler_t>
struct KernelVecType{
  using q_load_vec_t = void;
  using k_load_vec_t = void;
  using v_load_vec_t = void;
  using q_k_v_vec_t = void;
  using accum_vec_t = void;
};

template<>
struct KernelVecType<f16>{
  using q_load_vec_t = vec_op::FP16Vec8;
  using k_load_vec_t = vec_op::FP16Vec16;
  using v_load_vec_t = vec_op::FP16Vec16;
  using q_k_v_vec_t = vec_op::FP16Vec16;
  using accum_vec_t = vec_op::FP16Vec16;
};

/* 记录tensor的数据类型 */
typedef struct {
    int token_embd_weight;
    int attn_k_weight;
    int attn_k_bias;
    int attn_norm_weight;
    int attn_q_weight;
    int attn_q_bias;
    int attn_v_weight;
    int attn_v_bias;
    int ffn_down_weight;
    int ffn_gate_weight;
    int ffn_norm_weight;
    int ffn_up_weight;
    int attn_output_weight;
    int output_weight;
    int output_norm_weight;
} WeightTypes;

WeightTypes weight_types;


enum ggml_type {
    GGML_TYPE_F32     = 0,
    GGML_TYPE_F16     = 1,
    GGML_TYPE_Q4_0    = 2,
    GGML_TYPE_Q4_1    = 3,
    GGML_TYPE_Q5_0    = 6,
    GGML_TYPE_Q5_1    = 7,
    GGML_TYPE_Q8_0    = 8,
    GGML_TYPE_Q8_1    = 9,
    GGML_TYPE_Q2_K    = 10,
    GGML_TYPE_Q3_K    = 11,
    GGML_TYPE_Q4_K    = 12,
    GGML_TYPE_Q5_K    = 13,
    GGML_TYPE_Q6_K    = 14,
    GGML_TYPE_Q8_K    = 15,
    GGML_TYPE_IQ2_XXS = 16,
    GGML_TYPE_IQ2_XS  = 17,
    GGML_TYPE_IQ3_XXS = 18,
    GGML_TYPE_IQ1_S   = 19,
    GGML_TYPE_IQ4_NL  = 20,
    GGML_TYPE_IQ3_S   = 21,
    GGML_TYPE_IQ2_S   = 22,
    GGML_TYPE_IQ4_XS  = 23,
    GGML_TYPE_I8      = 24,
    GGML_TYPE_I16     = 25,
    GGML_TYPE_I32     = 26,
    GGML_TYPE_I64     = 27,
    GGML_TYPE_F64     = 28,
    GGML_TYPE_IQ1_M   = 29,
    GGML_TYPE_COUNT,
};

// 定义工作分配结构体
typedef struct WorkDivider {
    int num_threads;
    int tid;
    int num_numas;
    int threads_per_numa;
    int my_numa;
    int tid_in_numa;
} WorkDivider;

// 定义工作范围结构体:单numa和多numa
typedef struct SingleNumaWorkRange {
    int begin_thread;
    int end_thread;
    int work_per_thread;
} SingleNumaWorkRange;

typedef struct MultiNumaWorkRange {
    int begin_numa;
    int end_numa;
    int work_per_numa;
    int begin_thread;
    int end_thread;
    int work_per_thread;
} MultiNumaWorkRange;


typedef struct {
    const char *pcTypeName;
    UINT32 uiblkSize;
    UINT32 uiTypeSize;
    ggml_from_float_t quantize;
    ggml_to_float_t dequantize;
    enum ggml_type VecDotType;  /* 矩阵点积计算类型 */
    ggml_vec_dot_t VecDotFunc;  /* 矩阵点积计算函数 */
} BLOCK_DATA_INFO;

const char *g_ModelArch = "qwen2"; /* 模型架构 */

BLOCK_DATA_INFO g_BlockDataInfo[] = {
    {"f32", 1, sizeof(float), NULL, NULL, GGML_TYPE_F32, NULL},
    {"f16", 1, sizeof(uint16_t), (ggml_from_float_t)ggml_fp32_to_fp16_row, (ggml_to_float_t)ggml_fp16_to_fp32_row, GGML_TYPE_F16, (ggml_vec_dot_t)ggml_vec_dot_f16},
    {"q4_0", QK4_0, sizeof(block_q4_0), (ggml_from_float_t)quantize_row_q4_0, (ggml_to_float_t)dequantize_row_q4_0, GGML_TYPE_Q8_0, (ggml_vec_dot_t)ggml_vec_dot_q4_0_q8_0},
    {"q4_1", QK4_1, sizeof(block_q4_1), (ggml_from_float_t)quantize_row_q4_1, (ggml_to_float_t)dequantize_row_q4_1, GGML_TYPE_Q8_1, (ggml_vec_dot_t)ggml_vec_dot_q4_1_q8_1},
    {"", 0, 0, NULL},
    {"", 0, 0, NULL},
    {"", 0, 0, NULL},
    {"", 0, 0, NULL},
    {"q8_0", QK8_0, sizeof(block_q8_0), (ggml_from_float_t)quantize_row_q8_0, (ggml_to_float_t)dequantize_row_q8_0, GGML_TYPE_Q8_0, (ggml_vec_dot_t)ggml_vec_dot_q8_0_q8_0},
    // {"q8_1", QK8_1, sizeof(block_q8_1), (ggml_from_float_t)quantize_row_q8_1},
    // {"q2_K", QK_K, sizeof(block_q2_K), (ggml_from_float_t)quantize_row_q2_K, (ggml_to_float_t)dequantize_row_q2_K, GGML_TYPE_Q8_K, (ggml_vec_dot_t)ggml_vec_dot_q2_K_q8_K},  //10
    // {"q3_K", QK_K, sizeof(block_q3_K), (ggml_from_float_t)quantize_row_q3_K, NULL, GGML_TYPE_Q8_K, (ggml_vec_dot_t)ggml_vec_dot_q3_K_q8_K},
    // {"", 0, 0, NULL},
    // {"", 0, 0, NULL},  //13
    // {"q6_K", QK_K, sizeof(block_q6_K), (ggml_from_float_t)quantize_row_q6_K, NULL, GGML_TYPE_Q8_K, (ggml_vec_dot_t)ggml_vec_dot_q6_K_q8_K},
    // {"q8_K", QK_K, sizeof(block_q8_K), (ggml_from_float_t)quantize_row_q8_K},
};

typedef struct {
    enum ggml_type DataType;  /* 张量的数据类型 */
    union {
        void *tensor1;
        void **tensor2;
        void ***tensor3;
    } Data;
} TENSOR_INFO;

/* 挂载模型位置 */
typedef struct weight {
    TENSOR_INFO token_embedding;
    TENSOR_INFO rms_att_norm; // (layer, dim) rmsnorm weights
    TENSOR_INFO rms_ffn_norm; // (layer, dim)
    TENSOR_INFO Wqkv;
    TENSOR_INFO wo; // (layer, n_head * head_size, dim)
    // weights for bias
    TENSOR_INFO qkv_bias;
    // weights for ffn
    TENSOR_INFO w1w3;
    TENSOR_INFO ffn_down; // (layer, dim, hidden_dim)
    TENSOR_INFO output;       //output linear
    TENSOR_INFO output_norm;  //output RMS norm
} WEIGHT;


typedef struct {
    void *Token_Ori;    /* 原始的token */
    void *Token_Norm;   /* 归一化处理后的token */
    float *K;            /* K矩阵 */
    float *Q;            /* Q矩阵 */
    float *V;            /* V矩阵 */
    float *QK;           /* QK的转置结果 */
    float *Attn_out;     /* 注意力输出 */
    f16 *Attn_out_f16;
    float *ffn_Gate;     /* ffn_Gate输出 */
    float *ffn_up;       /* ffn_up */
    float *logits;       /* 采样输出 */
    float **key_cache;    /* K cache */
    float **value_cache;  /* V cache */
    void **temp_output_vec_numa;
    void **tmp_vec_numa;
    f16 *seq_qkv;      /* 新增:序列的qkv输出 */
    float *add_weight;   /* 新增权重 */
    float *output_f32;
} MODEL_RUN_STATE;

typedef struct {
    int dim;            /* embedding 维度 */
    int n_head;        /* 注意力头个数 */
    int n_kv_heads;     /* kv的对数 */
    int hidden_dim;     /* ffn隐藏层维度 */
    int n_layers;       /* 模型层数 */
    int context_length; /* 上下文长度 */
    float norm_rms_eps; /* eps */
    int n_vocab;        /* 词汇数量 */
    float rope_freq_base; /* rope频率 */
    f16 *cos_sin_cache;  /* rope历史数据 */
    int n_rotary;        /* rotary维度 */
    bool is_neox_style;  /* rope风格 */
    double attn_scale;   /* rope系数 */
} MODEL_HYPE_PARA;

__thread f16 qk_tmp_storage[131072];

int g_numas = numa_num_configured_nodes();
WEIGHT g_pstWeight;
MODEL_HYPE_PARA g_pstModelHypePara;

float f16_to_f32(f16 h){return h;}
f16 f32_to_f16(float h){return h;}

__attribute__((noinline))
f16 DOTPRODUCT_vv_f16(int M, const f16 *src0_ptr, const f16 *src1_ptr)
{
    __builtin_prefetch(&src0_ptr[0], 0 , 0);
    __builtin_prefetch(&src1_ptr[0], 0 , 0);
    if (M >= 128) {
        __builtin_prefetch(&src0_ptr[32], 0 , 0);
        __builtin_prefetch(&src1_ptr[32], 0 , 0);
        __builtin_prefetch(&src0_ptr[64], 0 , 0);
        __builtin_prefetch(&src1_ptr[64], 0 , 0);
        __builtin_prefetch(&src0_ptr[96], 0 , 0);
        __builtin_prefetch(&src1_ptr[96], 0 , 0);
    }
    float sumf = 0.0f;
    int j = 0;
#ifdef __ARM_NEON
    const int M_UNROLL = 8;
    const int M_SIMD = 8;
    float16x8_t sum[M_UNROLL] = {vdupq_n_f16(0.0f)};
    for (; j <= M - M_UNROLL * M_SIMD; j += M_UNROLL * M_SIMD) {
        __builtin_prefetch(&src0_ptr[j + 192], 0 , 0);
        __builtin_prefetch(&src1_ptr[j + 192], 0 , 0);
        __builtin_prefetch(&src0_ptr[j + 224], 0 , 0);
        __builtin_prefetch(&src1_ptr[j + 224], 0 , 0);
        __builtin_prefetch(&src0_ptr[j + 256], 0 , 0);
        __builtin_prefetch(&src1_ptr[j + 256], 0 , 0);
        __builtin_prefetch(&src0_ptr[j + 288], 0 , 0);
        __builtin_prefetch(&src1_ptr[j + 288], 0 , 0);
        __builtin_prefetch(&src0_ptr[j + 320], 0 , 0);
        __builtin_prefetch(&src1_ptr[j + 320], 0 , 0);
        for (int ss = 0; ss < M_UNROLL; ss++) {
            sum[ss] = vfmaq_f16(sum[ss], vld1q_f16(&src0_ptr[j + ss * M_SIMD]), vld1q_f16(&src1_ptr[j + ss * M_SIMD]));
        }
    }

    for (; j <= M - 8; j += 8) {
        sum[0] = vfmaq_f16(sum[0], vld1q_f16(&src0_ptr[j]), vld1q_f16(&src1_ptr[j]));
    }
    sum[0] = vaddq_f16(vaddq_f16(sum[0], sum[2]), vaddq_f16(sum[1], sum[3]));
    if (M_UNROLL > 4) {
        sum[4] = vaddq_f16(vaddq_f16(sum[4], sum[6]), vaddq_f16(sum[5], sum[7]));
        sum[0] = vaddq_f16(sum[0], sum[4]);
    }

    float32x4_t t0 = vcvt_f32_f16(vget_low_f16(sum[0]));
    float32x4_t t1 = vcvt_f32_f16(vget_high_f16(sum[0]));
    sumf = vaddvq_f32(vaddq_f32(t0, t1));
#endif
    for (; j < M; j++) {
        sumf += (f16_to_f32(src0_ptr[j]) * f16_to_f32(src1_ptr[j]));
    }

    return sumf;
}

void transpose_v(f16 *vt, const f16 *v, int n_tokens, int dim, int qkv_dim)
{
    for (int i = 0; i < n_tokens; i++) {
        int j = 0;
        for (int j = 0; j < dim; j++) {
            vt[j * n_tokens + i] = v[i * qkv_dim + j];
        }
    }
}

void prefill_attention(f16 *out_ptr, const f16 *qkv_ptr, const f16 *vt_ptr, int N_tokens, int N_seqs, const int *seq_lens)
{
    int N_gqa = g_pstModelHypePara.n_head / g_pstModelHypePara.n_kv_heads, dim_gqa = g_pstModelHypePara.dim / N_gqa;
    int qkv_stride = g_pstModelHypePara.dim + 2 * dim_gqa, head_size = g_pstModelHypePara.dim / g_pstModelHypePara.n_head;
    const f16 *q_ptr = qkv_ptr, *k_ptr = q_ptr + g_pstModelHypePara.dim;

    std::vector<int> seqlen_prefix_sum;
    seqlen_prefix_sum.push_back(0);
    for (int i = 0, sum_seq_lens = 0; i < N_seqs; i++) {
        sum_seq_lens += seq_lens[i];
        seqlen_prefix_sum.push_back(sum_seq_lens);
    }

    int num_threads = omp_get_num_threads();
    num_threads = num_threads / 4;

    #pragma omp parallel for collapse(2) schedule(dynamic, 1) num_threads(num_threads)
    for (int seq = 0; seq < N_seqs; seq++) {
        for (int h_q = 0; h_q < g_pstModelHypePara.n_head; h_q++) {
            f16 *qk_tmp = (f16 *)qk_tmp_storage;
            int seq_t_begin = seqlen_prefix_sum[seq], seq_t_end = seqlen_prefix_sum[seq + 1];
            int h_kv = h_q / N_gqa;

            for (int t = seq_t_begin; t < seq_t_end; t++) {
                const f16 *q_head_ptr = q_ptr + t * qkv_stride + h_q * head_size;
                int token_idx_in_seq = t - seq_t_begin;
                f16 row_max = -INFINITY;
                for (int i = 0; i <= token_idx_in_seq; i++) {
                    const f16 *k_head_ptr = k_ptr + (seq_t_begin + i) *qkv_stride + h_kv *head_size;
                    qk_tmp[i] = DOTPRODUCT_vv_f16(head_size, q_head_ptr, k_head_ptr);
                    row_max = qk_tmp[i] > row_max ? qk_tmp[i] : row_max;
                }
                f32 sumexp = 0.0f;
                for (int i = 0; i <= token_idx_in_seq; i++) {
                    f16 diff = qk_tmp[i] - row_max;
                    f32 exp_result = expf_f16_table[*(uint16_t *)&diff];
                    qk_tmp[i] = exp_result;
                    sumexp += exp_result;
                }

                for (int i = 0; i <= token_idx_in_seq; i++) {
                    qk_tmp[i] /= sumexp;
                }

                for (int iv = 0; iv < head_size; iv++) {
                    const f16 *vt_seq_ptr = vt_ptr + h_kv * head_size * N_tokens + iv *N_tokens + seq_t_begin;
                    out_ptr[t * g_pstModelHypePara.dim + h_q * head_size + iv] = DOTPRODUCT_vv_f16(token_idx_in_seq + 1, vt_seq_ptr, qk_tmp);
                }
            }
        }
    }
}

template<class scalar_t>
void paged_attention_v1_impl(      scalar_t* __restrict__ out,            // [num_seqs, num_heads, head_size]
      const scalar_t* __restrict__ q,        // [num_seqs, num_heads, head_size]
      const scalar_t* __restrict__ k_cache,  // [num_blocks, num_kv_heads, head_size/x, block_size, x]
      const scalar_t* __restrict__ v_cache,  // [num_blocks, num_kv_heads, head_size, block_size]
      const int num_kv_heads,
      const int* __restrict__ block_tables,  // [num_seqs, max_num_blocks_per_seq]
      const int* __restrict__ seq_lens,      // [num_seqs]
      const int max_num_blocks_per_seq,
      const int q_stride, const int kv_block_stride, const int kv_head_stride,
      const int num_seqs, const int num_heads, const int HEAD_SIZE)
{
    using q_load_vec_t = typename KernelVecType<scalar_t>::q_load_vec_t;
    using k_load_vec_t = typename KernelVecType<scalar_t>::k_load_vec_t;
    using v_load_vec_t = typename KernelVecType<scalar_t>::v_load_vec_t;
    using q_k_v_vec_t = typename KernelVecType<scalar_t>::q_k_v_vec_t;
    using accum_vec_t = typename KernelVecType<scalar_t>::accum_vec_t;
    using accum_scalar_t = scalar_t;

    constexpr int BLOCK_SIZE = 16;
    constexpr int x = BLOCK_SIZE / sizeof(scalar_t);
    static_assert(k_load_vec_t::get_elem_num() % x == 0);
    static_assert(q_load_vec_t::get_elem_num() * sizeof(scalar_t) == 16);

    constexpr int TOKEN_PER_GROUP = k_load_vec_t::get_elem_num() / x;
    constexpr int MAX_GROUP_NUM = 16 / TOKEN_PER_GROUP;
    static_assert(MAX_GROUP_NUM == 8 || MAX_GROUP_NUM == 4);

    const int N_gqa = num_heads / num_kv_heads;

    int num_threads = omp_get_num_threads();
    num_threads = num_threads / 4;

#pragma omp parallel for collapse(2) schedule(dynamic, 1) num_threads(num_threads)
    for (int seq_idx = 0; seq_idx < num_seqs; ++seq_idx) {
      for (int head_idx = 0; head_idx < num_heads; ++head_idx) {
        accum_scalar_t *qk_tmp = (accum_scalar_t *)qk_tmp_storage;
        int seq_len = seq_lens[seq_idx];
        const int* seq_block_table = block_tables + max_num_blocks_per_seq * seq_idx;
        const int block_num = (seq_len + BLOCK_SIZE - 1) / BLOCK_SIZE;
        const int64_t kv_head_idx = head_idx / N_gqa;
        const int last_block_token_num = seq_len - (block_num - 1) * BLOCK_SIZE;
        const scalar_t* __restrict__ q_vec_ptr = q + seq_idx * q_stride + head_idx * HEAD_SIZE;

        // Compute logits
        for (int block_idx = 0; block_idx < block_num; ++block_idx) {
            const int64_t physical_block_idx = seq_block_table[block_idx];
            const scalar_t* __restrict__ k_block_cache_ptr =
                k_cache + physical_block_idx * kv_block_stride +
                kv_head_idx * kv_head_stride;
            const int token_num = (block_idx == block_num - 1) ? last_block_token_num : BLOCK_SIZE;
            const int group_num = (token_num + TOKEN_PER_GROUP - 1) / TOKEN_PER_GROUP;
            accum_vec_t group_accums[MAX_GROUP_NUM];
            for (int q_offset = 0; q_offset < HEAD_SIZE; q_offset +=x, k_block_cache_ptr += x * BLOCK_SIZE) {
                q_load_vec_t q_load_group_vec(q_vec_ptr + q_offset);
                q_k_v_vec_t q_group_vec(q_load_group_vec);

                for (int token_group_idx = 0; token_group_idx < group_num; token_group_idx++) {
                    k_load_vec_t k_load_group_vec(k_block_cache_ptr + token_group_idx * x * TOKEN_PER_GROUP);
                    q_k_v_vec_t k_group_vec(k_load_group_vec);
                    vec_op::fma(group_accums[token_group_idx], q_group_vec, k_group_vec);
                    vec_op::prefetch(k_block_cache_ptr + x *BLOCK_SIZE + token_group_idx * x *TOKEN_PER_GROUP);
                }
            }
            for (int token_group_idx = 0; token_group_idx < group_num; token_group_idx++) {
                for (int token_idx = 0; token_idx < TOKEN_PER_GROUP; token_idx++) {
                    accum_scalar_t dot_v =
                        group_accums[token_group_idx].
                        template reduce_sub_sum<accum_vec_t::get_elem_num() / TOKEN_PER_GROUP>(token_idx);
                    qk_tmp[block_idx * BLOCK_SIZE + token_group_idx * TOKEN_PER_GROUP + token_idx] = dot_v;
                }
            }
        }

        f32 max = qk_tmp[0], sum = 0.0;
        for (int i = 1; i < seq_len; i++) {
            max = max >= qk_tmp[i] ? max : qk_tmp[i];
        }

        for (int i = 0; i < seq_len; i++) {
            f16 diff = qk_tmp[i] - max;
            qk_tmp[i] = expf_f16_table[*(uint16_t *)&diff];
            sum += qk_tmp[i];
        }
        int i = 0;
        for (; i < seq_len; i++) {
            qk_tmp[i] /= sum;
        }
        for (; i < block_num * BLOCK_SIZE; i++) {
            qk_tmp[i] = 0;
        }

        constexpr int head_elem_num_per_partition = 16;
        assert(HEAD_SIZE % head_elem_num_per_partition == 0);
        int head_partition_num = HEAD_SIZE / head_elem_num_per_partition;
        for (int head_part_idx = 0; head_part_idx < head_partition_num; ++head_part_idx) {
          accum_vec_t accums[head_elem_num_per_partition];
          scalar_t* __restrict__ out_ptr =
              out + seq_idx * num_heads * HEAD_SIZE + head_idx * HEAD_SIZE +
              head_part_idx * head_elem_num_per_partition;
          for (int block_idx = 0; block_idx < block_num; ++block_idx) {
            const int64_t physical_block_idx = seq_block_table[block_idx];
            const scalar_t* __restrict__ v_block_cache_ptr =
                v_cache + physical_block_idx * kv_block_stride +
                kv_head_idx * kv_head_stride + BLOCK_SIZE * head_part_idx *
                head_elem_num_per_partition;

                accum_vec_t qk_vec(qk_tmp + block_idx * BLOCK_SIZE);
                for (int head_elem_idx = 0; head_elem_idx < head_elem_num_per_partition; head_elem_idx++) {
                    v_load_vec_t v_load_vec(v_block_cache_ptr + BLOCK_SIZE * head_elem_idx);
                    accum_vec_t v_vec(v_load_vec);
                    vec_op::fma(accums[head_elem_idx], qk_vec, v_vec);
                }

                if (block_idx != block_num - 1) {
                  const int64_t next_physical_block_idx =
                      seq_block_table[block_idx + 1];
                  const scalar_t* __restrict__ next_v_block_cache_ptr =
                      v_cache + next_physical_block_idx * kv_block_stride +
                      kv_head_idx * kv_head_stride +
                      BLOCK_SIZE * head_part_idx * head_elem_num_per_partition;

                  for (int head_elem_idx = 0; head_elem_idx < head_elem_num_per_partition; head_elem_idx += 2) {
                      vec_op::prefetch(next_v_block_cache_ptr + BLOCK_SIZE * head_elem_idx);
                  }
                }
          }

          for (int head_elem_idx = 0; head_elem_idx < head_elem_num_per_partition; head_elem_idx++) {
              float value = accums[head_elem_idx].reduce_sum();
              vec_op::storeFP32(value, out_ptr + head_elem_idx);
          }
      }
    }
  }
}

void Quantize(void *Dst, float *src, enum ggml_type DataType, int size)
{
    g_BlockDataInfo[DataType].quantize(src, Dst, size);
}

// 初始化工作分配结构体
void init_work_divider(WorkDivider *divider, int numas) {
    divider->num_numas = numas;
    divider->num_threads = omp_get_num_threads();
    if (divider->num_threads % divider->num_numas != 0) {
        fprintf(stderr, "nthreads (%d) %% numas (%d) != 0\n", divider->num_threads, divider->num_numas);
        exit(1);
    }
    divider->tid = omp_get_thread_num();
    // printf("tid:%d, num_threads:%d \n", divider->tid, divider->num_threads);
    divider->threads_per_numa = divider->num_threads / divider->num_numas;
    divider->my_numa = divider->tid / divider->threads_per_numa;
    divider->tid_in_numa = divider->tid % divider->threads_per_numa;
}

void RmsNorm(float *DstData, float *SrcData, float *SrcWeight, float eps, int dataNum)
{
    float ss = 0.0f;
    for (int j = 0; j < dataNum; j++) {
        ss += SrcData[j] * SrcData[j];
    }
    ss /= dataNum;
    ss += eps;
    ss = 1.0f / sqrtf(ss);
    for (int j = 0; j < dataNum; j++) {
        DstData[j] = SrcWeight[j] * (ss * SrcData[j]);
    }
}

void divide_all_work(const WorkDivider *divider, int total_workitems, SingleNumaWorkRange *pstSingleRange)
{
    int work_per_thread = total_workitems / divider->num_threads;
    int work_remaining = total_workitems % divider->num_threads;
    if (work_remaining == 0) {
        pstSingleRange->begin_thread = divider->tid * work_per_thread;
        pstSingleRange->end_thread = divider->tid * work_per_thread + work_per_thread;
        pstSingleRange->work_per_thread = work_per_thread;
    } else if (divider->tid < work_remaining) {
        pstSingleRange->begin_thread = divider->tid * work_per_thread + divider->tid;
        pstSingleRange->end_thread = (divider->tid + 1) * work_per_thread + (divider->tid + 1);
        pstSingleRange->work_per_thread = work_per_thread + 1;
    } else {
        pstSingleRange->begin_thread = divider->tid * work_per_thread + work_remaining;
        pstSingleRange->end_thread = (divider->tid + 1) * work_per_thread + work_remaining;
        pstSingleRange->work_per_thread = work_per_thread;
    }
    return;
}

// 分配单 NUMA 节点的工作
void divide_work_first_numa(const WorkDivider *divider, int total_workitems, SingleNumaWorkRange *pstSingleRange)
{
    if (divider->my_numa == 0) {
        int work_per_thread = total_workitems / divider->threads_per_numa;
        int work_remaining = total_workitems % divider->threads_per_numa;
        if (work_remaining == 0) {
            pstSingleRange->begin_thread = divider->tid * work_per_thread;
            pstSingleRange->end_thread = divider->tid * work_per_thread + work_per_thread;
            pstSingleRange->work_per_thread = work_per_thread;
        } else if (divider->tid < work_remaining) {
            pstSingleRange->begin_thread = divider->tid * work_per_thread + divider->tid;
            pstSingleRange->end_thread = (divider->tid + 1) * work_per_thread + (divider->tid + 1);
            pstSingleRange->work_per_thread = work_per_thread + 1;
        } else {
            pstSingleRange->begin_thread = divider->tid * work_per_thread + work_remaining;
            pstSingleRange->end_thread = (divider->tid + 1) * work_per_thread + work_remaining;
            pstSingleRange->work_per_thread = work_per_thread;
        }
        return;
    }

    pstSingleRange->begin_thread = 0;
    pstSingleRange->end_thread = 0;
    pstSingleRange->work_per_thread = 0;
}

// 分配所有 NUMA 节点的工作
void divide_work_all_numas(const WorkDivider *divider, int total_workitems, MultiNumaWorkRange *pstNulRange)
{
    int max_workitems_per_numa = (total_workitems - 1) / divider->num_numas + 1;
    int workitem_numa_begin = divider->my_numa * max_workitems_per_numa;
    int workitem_numa_end = workitem_numa_begin + max_workitems_per_numa;
    if (workitem_numa_end > total_workitems) {
        workitem_numa_end = total_workitems;
    }
    int workitems_my_numa = workitem_numa_end - workitem_numa_begin;
    int max_workitems_per_thread = (workitems_my_numa - 1) / divider->threads_per_numa + 1;
    int begin = divider->tid_in_numa * max_workitems_per_thread;
    int end = begin + max_workitems_per_thread;
    if (end > workitems_my_numa) {
        end = workitems_my_numa;
    }

    pstNulRange->begin_numa = workitem_numa_begin;
    pstNulRange->end_numa = workitem_numa_end;
    pstNulRange->work_per_numa = max_workitems_per_numa;
    pstNulRange->begin_thread = begin;
    pstNulRange->end_thread = end;
    pstNulRange->work_per_thread = end - begin;
}

/* 反量化 */
void Dequantize(void *DstData, void *SrcData, WEIGHT *pstWeight, int dataNum)
{
    enum ggml_type TokenType = pstWeight->token_embedding.DataType;

    /* 需要反量化情况 */
    if (TokenType != GGML_TYPE_F32 && pstWeight->rms_att_norm.DataType == GGML_TYPE_F32) {
        g_BlockDataInfo[TokenType].dequantize(SrcData, static_cast<float*>(DstData), dataNum);
    }
}

void divide_kv_cache_numa(const WorkDivider * divider, int total_workitems,
                          SingleNumaWorkRange * pstSingleRange)
{
    int work_per_thread;
    int work_remaining;
    int work_per_numa;
    int NumaNum = divider->num_numas;
    int threads_per_numa = divider->threads_per_numa;
    int head_per_numa;
    int tid_group, tid_use, head_add;

    if (total_workitems % NumaNum != 0) {
        fprintf(stderr, "kv cache: heads (%d) %% numas (%d) != 0\n", total_workitems, NumaNum);
        exit(1);
    }

    work_per_numa = total_workitems / NumaNum;
    if (total_workitems <= divider->num_threads) {
        /* 隔离不需要的线程 */
        if (divider->tid % threads_per_numa >= work_per_numa) {
            pstSingleRange->begin_thread = 0;
            pstSingleRange->end_thread = 0;
            return;
        }

        /* kvcache分配 */
        pstSingleRange->begin_thread = divider->tid_in_numa + divider->my_numa * work_per_numa;
        pstSingleRange->end_thread = pstSingleRange->begin_thread + 1;
    } else {
        tid_group = divider->tid / threads_per_numa;
        tid_use = divider->tid % threads_per_numa;
        head_per_numa = total_workitems / NumaNum;
        work_per_thread = head_per_numa / threads_per_numa;
        work_remaining = head_per_numa % threads_per_numa;
        head_add = head_per_numa * tid_group;
        if (work_remaining == 0) {
            pstSingleRange->begin_thread = tid_use * work_per_thread + head_add;
            pstSingleRange->end_thread = tid_use * work_per_thread + work_per_thread + head_add;
        } else if (tid_use < work_remaining) {
            pstSingleRange->begin_thread = tid_use * work_per_thread + tid_use + head_add;
            pstSingleRange->end_thread = (tid_use + 1) * work_per_thread + (tid_use + 1) + head_add;
        } else {
            pstSingleRange->begin_thread = tid_use * work_per_thread + work_remaining + head_add;
            pstSingleRange->end_thread = (tid_use + 1) * work_per_thread + work_remaining + head_add;
        }
    }
    return;
}

void Rope_embedding(MODEL_RUN_STATE *pstRunState, MODEL_HYPE_PARA *pstModelPara, int pos, int n_tokens)
{
    int dim = pstModelPara->dim;
    int n_kv_heads = pstModelPara->n_kv_heads;
    int n_head = pstModelPara->n_head;
    long kv_dim = (dim * n_kv_heads) / n_head;
    int head_size = dim / n_head;
    float rope_freq_base = (fabsf(pstModelPara->rope_freq_base - 0.0f) < EPSILON)
        ? 10000.0f
        : pstModelPara->rope_freq_base;

    bool ropetype = strstr(g_ModelArch, "qwen") != NULL;
    for (int k = 0; k < n_tokens; k++, pos++) {
        if (!ropetype){
            for (int i = 0; i < dim; i += 2) {
                int head_dim = i % head_size;
                float freq = 1.0f / powf(rope_freq_base, head_dim / (float)head_size);
                float val = pos * freq;
                float fcr = cosf(val);
                float fci = sinf(val);
                int rotn = i < kv_dim ? 2 : 1; // how many vectors? 2 = q & k, 1 = q only
                for (int v = 0; v < rotn; v++) {
                    float *vec = v == 0 ? (float *)pstRunState->Q + k * dim : (float *)pstRunState->K + k * kv_dim; // the vector to rotate (query or key)
                    float v0 = vec[i];
                    float v1 = vec[i+1];
                    vec[i]   = v0 * fcr - v1 * fci;
                    vec[i+1] = v0 * fci + v1 * fcr;
                }
            }
        }else{
            for (int j = 0; j < dim / head_size; j++){
                for (int i = 0; i < head_size; i += 2) {
                    int I = i / 2;
                    float freq = 1.0f / powf(rope_freq_base, i / (float)head_size);
                    float val = pos * freq;
                    float fcr = cosf(val);
                    float fci = sinf(val);
                    int rotn = i + j * head_size < kv_dim ? 2 : 1; // how many vectors? 2 = q & k, 1 = q only
                    for (int v = 0; v < rotn; v++) {
                        float *vec = v == 0 ? (float *)pstRunState->Q + k * dim : (float *)pstRunState->K + k * kv_dim; // the vector to rotate (query or key)
                        float v0 = vec[I + j * head_size];
                        float v1 = vec[I + j * head_size + head_size / 2];
                        vec[I + j * head_size]                 = v0 * fcr - v1 * fci;
                        vec[I + j * head_size + head_size / 2] = v0 * fci + v1 * fcr;
                    }
                }
            }
        }   
    }
}

void Active_Silu(f16 *dst, f16 *w1w3, int hidden_dim, int n_tokens)
{
    int total_hidden_dim = hidden_dim << 1;

    for (int j = 0; j < n_tokens; j++) {
        for (int i = 0; i < hidden_dim; i++) {
            float val = w1w3[i + j * total_hidden_dim];
            val *= (1.0f / (1.0f + expf(-val)));
            val *= w1w3[i + j * total_hidden_dim + hidden_dim];
            dst[i + j * hidden_dim] = (f16)val;
        }
    }
}

__attribute__((noinline))
void Rope_embedding_impl(bool rope_type, int n_rotary, f16 *head_ptr, const f16 *cos_sin_cache, int position)
{
    const f16 *cos_sin_ptr = cos_sin_cache + position * n_rotary;
    int embed_dim = n_rotary >> 1;

    /* rope_neox */
    if (rope_type == true) {
        int xx = 0, yy = embed_dim;
        for (; xx <= embed_dim - 8; xx += 8, yy += 8) {
            __builtin_prefetch(&head_ptr[xx + 32], 1, 2);
            __builtin_prefetch(&head_ptr[yy + 32], 1, 2);
            const float16x8_t qx = vld1q_f16(&head_ptr[xx]), qy = vld1q_f16(&head_ptr[yy]);
            const float16x8_t csx = vld1q_f16(&cos_sin_ptr[xx]), csy = vld1q_f16(&cos_sin_ptr[yy]);
            vst1q_f16(&head_ptr[xx], vfmaq_f16(vmulq_f16(qx, csx), vnegq_f16(qy), csy));
            vst1q_f16(&head_ptr[yy], vfmaq_f16(vmulq_f16(qy, csx), qx, csy));
        }
        for (; xx < embed_dim; xx++, yy++) {
            const f16 qx = head_ptr[xx], qy = head_ptr[yy];
            head_ptr[xx] = qx * cos_sin_ptr[xx] - qy * cos_sin_ptr[yy];
            head_ptr[yy] = qy * cos_sin_ptr[xx] + qx * cos_sin_ptr[yy];
        }
     } else { /* rope_gptj */
        for (int j = 0; j < embed_dim; j++) {
            const f16 qx = head_ptr[2 * j], qy = head_ptr[2 * j + 1];
            const f16 cos = cos_sin_ptr[j], sin = cos_sin_ptr[embed_dim + j];
            head_ptr[2 * j] = qx * cos - qy * sin;
            head_ptr[2 * j + 1] = qy * cos + qx * sin;
        }
     }
}

void quantization_weight_strategy(void *dst, void *src, int64_t quantization_bit_code, size_t Size)
{
    int kv_dim = g_pstModelHypePara.dim * g_pstModelHypePara.n_kv_heads / g_pstModelHypePara.n_head;
    float Buffer[kv_dim];
    int block_num = Size / kv_dim;

    /* 反量化 */
    if (quantization_bit_code == GGML_TYPE_F32) {
        g_BlockDataInfo[GGML_TYPE_F16].dequantize(src, static_cast<float*>(dst), Size);
    } else {
        if (quantization_bit_code != GGML_TYPE_F16) {
            int offset = kv_dim / g_BlockDataInfo[quantization_bit_code].uiblkSize * g_BlockDataInfo[quantization_bit_code].uiTypeSize;
            for (int i = 0; i < block_num; i++) {
                g_BlockDataInfo[GGML_TYPE_F16].dequantize((char *)src + i * kv_dim * sizeof(f16), Buffer, kv_dim);
                g_BlockDataInfo[quantization_bit_code].quantize(Buffer, (char *)dst + i * offset, kv_dim);
            }
        } else { /* 直接复制权重 */
            memcpy(dst, src, Size * sizeof(f16));
        }
    }
}

void load_weight_and_malloc_active_tensor(
    int64_t dim,  // MODEL_HYPE_PARA.dim embedding 维度
    int64_t hidden_dim,  // MODEL_HYPE_PARA.hidden_dim ffn 隐藏层维度
    int64_t n_layers,  // MODEL_HYPE_PARA.n_layers 模型层数
    int64_t n_vocab,  // MODEL_HYPE_PARA.n_vocab 词汇数量
    int64_t n_head,  // MODEL_HYPE_PARA.n_head 注意力头个数
    int64_t n_kv_heads,  // MODEL_HYPE_PARA.n_kv_heads kv的对数
    int64_t context_length,   // MODEL_HYPE_PARA.context_length 上下文长度
    double norm_rms_eps,   // MODEL_HYPE_PARA.norm_rms_eps eps
    double rope_freq_base,   // MODEL_HYPE_PARA.rope_freq_base rope频率
    double attn_scale,
    int64_t is_neox_style,
    int64_t quantization_bit_code,

    torch::Tensor const &cos_sin_cache,
    torch::Tensor token_embedding,   // WEIGHT.token_embedding
    torch::Tensor rms_att_norm,   // WEIGHT.rms_att_norm
    torch::Tensor rms_ffn_norm,   // WEIGHT.rms_ffn_norm
    torch::Tensor wqkv,
    torch::Tensor wo,   // WEIGHT.wo
    torch::Tensor qkv_bias,
    torch::Tensor w1w3,
    torch::Tensor ffn_down,   // WEIGHT.ffn_down
    torch::Tensor output_norm,  // WEIGHT.output_norm
    torch::Tensor lm_head       // WEIGHT.output
){
    static bool already_called = false;
    if (already_called) {
        return;
    }
    already_called = true;

    // 初始化 i8mm 指令集检测
    init_i8mm_flag();

    g_pstModelHypePara.dim = dim;            /* embedding 维度 */
    g_pstModelHypePara.n_head = n_head;        /* 注意力头个数 */
    g_pstModelHypePara.n_kv_heads = n_kv_heads;     /* kv的对数 */
    g_pstModelHypePara.hidden_dim = hidden_dim;     /* ffn隐藏层维度 */
    g_pstModelHypePara.n_layers = n_layers;       /* 模型层数 */
    g_pstModelHypePara.context_length = context_length; /* 上下文长度 */
    g_pstModelHypePara.norm_rms_eps = norm_rms_eps; /* eps */
    g_pstModelHypePara.n_vocab = n_vocab;        /* 词汇数量 */
    g_pstModelHypePara.rope_freq_base = rope_freq_base; /* rope频率 */
    g_pstModelHypePara.cos_sin_cache = (f16 *)cos_sin_cache.data_ptr();
    g_pstModelHypePara.n_rotary = cos_sin_cache.size(1);
    g_pstModelHypePara.is_neox_style = is_neox_style;
    g_pstModelHypePara.attn_scale = attn_scale;

    weight_types.token_embd_weight = GGML_TYPE_F32;
    weight_types.attn_k_weight = quantization_bit_code;
    weight_types.attn_k_bias = GGML_TYPE_F32;
    weight_types.attn_norm_weight = GGML_TYPE_F32;
    weight_types.attn_q_weight = quantization_bit_code;
    weight_types.attn_q_bias = GGML_TYPE_F32;
    weight_types.attn_v_weight = quantization_bit_code;
    weight_types.attn_v_bias = GGML_TYPE_F32;
    weight_types.ffn_down_weight = quantization_bit_code;
    weight_types.ffn_gate_weight = quantization_bit_code;
    weight_types.ffn_norm_weight = GGML_TYPE_F32;
    weight_types.ffn_up_weight = quantization_bit_code;
    weight_types.attn_output_weight = quantization_bit_code;
    weight_types.output_weight = quantization_bit_code;
    weight_types.output_norm_weight = GGML_TYPE_F32;

    for(int i = 0; i < (1 << 16); ++i) {
        float f = f16_to_f32(*(f16*)(&i));
        expf_f16_table[i] = f32_to_f16(expf(f));
    }

    assert(wqkv.dtype() == torch::kFloat16);
    int N_gqa = n_head / n_kv_heads;
    int kv_dim = dim / N_gqa;

    size_t tokens_embedding_weight_size = (size_t)dim * n_vocab / g_BlockDataInfo[weight_types.token_embd_weight].uiblkSize * g_BlockDataInfo[weight_types.token_embd_weight].uiTypeSize;

    size_t attention_q_size_per_layer = (size_t)dim * dim / g_BlockDataInfo[weight_types.attn_q_weight].uiblkSize * g_BlockDataInfo[weight_types.attn_q_weight].uiTypeSize;
    size_t attention_k_size_per_layer = (size_t)dim * kv_dim / g_BlockDataInfo[weight_types.attn_k_weight].uiblkSize * g_BlockDataInfo[weight_types.attn_k_weight].uiTypeSize;
    size_t attention_v_size_per_layer = (size_t)dim * kv_dim / g_BlockDataInfo[weight_types.attn_v_weight].uiblkSize * g_BlockDataInfo[weight_types.attn_v_weight].uiTypeSize;
    size_t attention_size_per_layer = attention_q_size_per_layer + attention_k_size_per_layer + attention_v_size_per_layer;

    size_t bias_q_size_per_layer = (size_t)dim / g_BlockDataInfo[weight_types.attn_q_bias].uiblkSize * g_BlockDataInfo[weight_types.attn_q_bias].uiTypeSize;
    size_t bias_k_size_per_layer = (size_t)kv_dim / g_BlockDataInfo[weight_types.attn_k_bias].uiblkSize * g_BlockDataInfo[weight_types.attn_k_bias].uiTypeSize;
    size_t bias_v_size_per_layer = (size_t)kv_dim / g_BlockDataInfo[weight_types.attn_v_bias].uiblkSize * g_BlockDataInfo[weight_types.attn_v_bias].uiTypeSize;
    size_t bias_qkv_size_per_layer = bias_q_size_per_layer + bias_k_size_per_layer + bias_v_size_per_layer;

    size_t attention_norm_size_per_layer = (size_t)dim / g_BlockDataInfo[weight_types.attn_norm_weight].uiblkSize * g_BlockDataInfo[weight_types.attn_norm_weight].uiTypeSize;

    size_t ffn_down_size_per_layer = (size_t)dim * hidden_dim / g_BlockDataInfo[weight_types.ffn_down_weight].uiblkSize * g_BlockDataInfo[weight_types.ffn_down_weight].uiTypeSize;
    size_t ffn_gate_size_per_layer = (size_t)dim * hidden_dim / g_BlockDataInfo[weight_types.ffn_gate_weight].uiblkSize * g_BlockDataInfo[weight_types.ffn_gate_weight].uiTypeSize;
    size_t ffn_norm_size_per_layer = (size_t)dim / g_BlockDataInfo[weight_types.ffn_norm_weight].uiblkSize * g_BlockDataInfo[weight_types.ffn_norm_weight].uiTypeSize;
    size_t ffn_up_size_per_layer = (size_t)dim * hidden_dim / g_BlockDataInfo[weight_types.ffn_up_weight].uiblkSize * g_BlockDataInfo[weight_types.ffn_up_weight].uiTypeSize;
    size_t w1w3_size_per_layer = ffn_gate_size_per_layer + ffn_up_size_per_layer;

    size_t attention_output_size_per_layer = (size_t)dim * dim / g_BlockDataInfo[weight_types.attn_output_weight].uiblkSize * g_BlockDataInfo[weight_types.attn_output_weight].uiTypeSize;
    size_t output_size = (size_t)dim * n_vocab / g_BlockDataInfo[weight_types.output_weight].uiblkSize * g_BlockDataInfo[weight_types.output_weight].uiTypeSize;
    size_t output_norm_size = (size_t)dim / g_BlockDataInfo[weight_types.output_norm_weight].uiblkSize * g_BlockDataInfo[weight_types.output_norm_weight].uiTypeSize;

    g_pstWeight.rms_att_norm.Data.tensor2 = static_cast<void**>(numa_alloc_onnode(n_layers * sizeof(float *), 0));
    g_pstWeight.rms_ffn_norm.Data.tensor2 = static_cast<void**>(numa_alloc_onnode(n_layers * sizeof(float *), 0));
    g_pstWeight.qkv_bias.Data.tensor2 = static_cast<void**>(numa_alloc_onnode(n_layers * sizeof(float *), 0));

    for (int i = 0; i < n_layers; i++) {
        g_pstWeight.rms_att_norm.Data.tensor2[i] = numa_alloc_onnode(attention_norm_size_per_layer, 0);
        g_pstWeight.rms_ffn_norm.Data.tensor2[i] = numa_alloc_onnode(ffn_norm_size_per_layer, 0);
        g_pstWeight.qkv_bias.Data.tensor2[i] = numa_alloc_onnode(bias_qkv_size_per_layer, 0);
    }
    g_pstWeight.output_norm.Data.tensor1 = numa_alloc_onnode(output_norm_size, 0);

    g_pstWeight.Wqkv.Data.tensor3 = static_cast<void***>(numa_alloc_onnode(g_numas * sizeof(void **), 0));
    g_pstWeight.wo.Data.tensor3 = static_cast<void***>(numa_alloc_onnode(g_numas * sizeof(void **), 0));
    g_pstWeight.w1w3.Data.tensor3 = static_cast<void***>(numa_alloc_onnode(g_numas * sizeof(void **), 0));
    g_pstWeight.ffn_down.Data.tensor3 = static_cast<void***>(numa_alloc_onnode(g_numas * sizeof(void **), 0));
    g_pstWeight.output.Data.tensor2 = static_cast<void**>(numa_alloc_onnode(g_numas * sizeof(void *), 0));
    g_pstWeight.token_embedding.Data.tensor1 = numa_alloc_onnode(tokens_embedding_weight_size, 0);

    for (int i = 0; i < g_numas; i++) {
        g_pstWeight.Wqkv.Data.tensor3[i] = static_cast<void**>(numa_alloc_onnode(n_layers * sizeof(void *), i));
        g_pstWeight.wo.Data.tensor3[i] = static_cast<void**>(numa_alloc_onnode(n_layers * sizeof(void *), i));
        g_pstWeight.w1w3.Data.tensor3[i] = static_cast<void**>(numa_alloc_onnode(n_layers * sizeof(void *), i));
        g_pstWeight.ffn_down.Data.tensor3[i] = static_cast<void**>(numa_alloc_onnode(n_layers * sizeof(void *), i));
        g_pstWeight.output.Data.tensor2[i] = (void *)numa_alloc_onnode(output_size / g_numas, i);

        for (int j = 0; j < n_layers; j++) {
            g_pstWeight.Wqkv.Data.tensor3[i][j] = numa_alloc_onnode(attention_size_per_layer / g_numas, i);
            g_pstWeight.wo.Data.tensor3[i][j] = numa_alloc_onnode(attention_output_size_per_layer / g_numas, i);
            g_pstWeight.w1w3.Data.tensor3[i][j] = numa_alloc_onnode(w1w3_size_per_layer / g_numas, i);
            g_pstWeight.ffn_down.Data.tensor3[i][j] = numa_alloc_onnode(ffn_down_size_per_layer / g_numas, i);
        }
    }

    std::cout << "load_weight start ..." << std::endl;

    /* 量化权重 */
    for (int layerNum = 0; layerNum < n_layers; layerNum++) {
        quantization_weight_strategy(g_pstWeight.rms_att_norm.Data.tensor2[layerNum], rms_att_norm.index(torch::indexing::TensorIndex(layerNum)).data_ptr(),
                                     weight_types.attn_norm_weight, dim);
        quantization_weight_strategy(g_pstWeight.rms_ffn_norm.Data.tensor2[layerNum], rms_ffn_norm.index(torch::indexing::TensorIndex(layerNum)).data_ptr(),
                                     weight_types.ffn_norm_weight, dim);
        int qkv_dim = dim + 2 * kv_dim;
        for (int j = 0; j < g_numas; ++j) {
            f16 *qkv_pointer = (f16 *)wqkv.index(torch::indexing::TensorIndex(layerNum)).data_ptr() + qkv_dim / g_numas * dim * j;
            quantization_weight_strategy(g_pstWeight.Wqkv.Data.tensor3[j][layerNum], (char *)qkv_pointer, weight_types.attn_k_weight,
                                         qkv_dim / g_numas * dim);

            f16 *wo_pointer = (f16 *)wo.index(torch::indexing::TensorIndex(layerNum)).data_ptr() + dim / g_numas * dim * j;
            quantization_weight_strategy(g_pstWeight.wo.Data.tensor3[j][layerNum], (char *)wo_pointer,
                                         weight_types.attn_output_weight, dim * dim / g_numas);

            f16 *w1w3_pointer = (f16 *)w1w3.index(torch::indexing::TensorIndex(layerNum)).data_ptr() + 2 * hidden_dim / g_numas * dim * j;
            quantization_weight_strategy(g_pstWeight.w1w3.Data.tensor3[j][layerNum], (char *)w1w3_pointer,
                                         weight_types.ffn_up_weight, 2 * hidden_dim / g_numas * dim);

            f16 *ffn_down_pointer = (f16 *)ffn_down.index(torch::indexing::TensorIndex(layerNum)).data_ptr() + hidden_dim / g_numas * dim * j;
            quantization_weight_strategy(g_pstWeight.ffn_down.Data.tensor3[j][layerNum], (char *)ffn_down_pointer,
                                         weight_types.ffn_down_weight, dim * hidden_dim / g_numas);
        }

        quantization_weight_strategy(g_pstWeight.qkv_bias.Data.tensor2[layerNum], qkv_bias.index(torch::indexing::TensorIndex(layerNum)).data_ptr(),
                                     weight_types.attn_q_bias, qkv_dim);
    }

    for (int i = 0; i < g_numas; i++) {
        f16 *output_pointer = (f16 *)lm_head.data_ptr() + n_vocab / g_numas * dim * i;
        quantization_weight_strategy(g_pstWeight.output.Data.tensor2[i], output_pointer,
                                     weight_types.output_weight, dim * n_vocab /  g_numas);
    }

    quantization_weight_strategy(g_pstWeight.token_embedding.Data.tensor1, token_embedding.data_ptr(), weight_types.token_embd_weight, dim * n_vocab);
    quantization_weight_strategy(g_pstWeight.output_norm.Data.tensor1, output_norm.data_ptr(), weight_types.output_norm_weight, dim);

    std::cout << "load_weight end." << std::endl;
}

// #define DEBUG_TIME 1
static inline uint64_t get_time_ns(void)
{
    struct timespec ts;
    clock_gettime(CLOCK_REALTIME, &ts);
    return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec;
}

void get_next_token(void* output, MODEL_HYPE_PARA *pstModelHypePara, WEIGHT *pstLlama, MODEL_RUN_STATE *pstRunState,
                      bool is_prompt,
                      torch::Tensor& block_tables,
                      torch::Tensor& seq_lens,
                      torch::Tensor& slot_mapping,
                      void *hidden_state, int64_t *pos,
                      std::vector<torch::Tensor>& kv_caches,
                      int64_t block_size,
                      int n_tokens)
{
    int dim = pstModelHypePara->dim;
    int n_kv_heads = pstModelHypePara->n_kv_heads;
    int n_head = pstModelHypePara->n_head;
    int kv_dim = (dim * n_kv_heads) / n_head;
    int hidden_dim =  pstModelHypePara->hidden_dim;
    int layers = pstModelHypePara->n_layers;
    int n_vocab = pstModelHypePara->n_vocab;
    float eps = pstModelHypePara->norm_rms_eps;
    UINT32 srcBlockNum, srcBlocksize;
    UINT32 dstBlockNum, dstBlocksize;
    int srcType;
    enum ggml_type dstType;
    
    int qkv_dim = dim + kv_dim + kv_dim;
    int head_size = dim / n_head;
    int kv_head_dim = block_size * head_size;
    f16 *VT = (f16 *)numa_alloc_onnode(n_tokens * dim * sizeof(f16), 0);

    quantization_weight_strategy(pstRunState->Token_Norm, (char *)hidden_state, weight_types.token_embd_weight, n_tokens * dim);

#ifdef DEBUG_TIME
    uint64_t t0 = get_time_ns();
    uint64_t time[25] = {0};
    uint64_t tt1 = get_time_ns();
#endif

    int total_thread_num = get_total_thread_num();
    std::vector<bool> is_init_process_affinity(total_thread_num, false);

    for(int L = 0; L < layers; L++) {
#pragma omp parallel
{
        int current_thread_num = omp_get_thread_num();
        if(!is_init_process_affinity[current_thread_num]) {
            init_process_affinity();
            is_init_process_affinity[current_thread_num] = true;
        }
        WorkDivider work;
        init_work_divider(&work, g_numas);
        SingleNumaWorkRange srange;
        MultiNumaWorkRange mrange;

        divide_work_first_numa(&work, n_tokens, &srange);
        if (work.my_numa == 0) {
            for (int i = srange.begin_thread; i < srange.end_thread; i++) {
                for (int j= 0; j < dim; j++) {
                    ((f32 *)pstRunState->add_weight)[i * dim + j] += ((f32 *)pstRunState->Token_Norm)[i * dim + j];
                }
                RmsNorm((f32 *)pstRunState->Token_Ori + i * dim, (f32 *)pstRunState->add_weight + i * dim,
                        (f32 *)pstLlama->rms_att_norm.Data.tensor2[L], eps, dim);
            }
        }

#ifdef DEBUG_TIME
    if (work.tid == 0) {
        time[0] += get_time_ns() - tt1;
        tt1 = get_time_ns();
    }
#endif

#pragma omp barrier
        srcType = weight_types.attn_k_weight;
        dstType = g_BlockDataInfo[srcType].VecDotType;
        srcBlockNum = g_BlockDataInfo[srcType].uiblkSize;
        srcBlocksize = g_BlockDataInfo[srcType].uiTypeSize;  
        dstBlockNum = g_BlockDataInfo[dstType].uiblkSize;
        dstBlocksize = g_BlockDataInfo[dstType].uiTypeSize;

        divide_work_first_numa(&work, n_tokens * dim / dstBlockNum, &srange);
        Quantize((char *)pstRunState->tmp_vec_numa[0] + srange.begin_thread * dstBlocksize, 
                 (f32 *)pstRunState->Token_Ori + srange.begin_thread * dstBlockNum,
                 dstType, srange.work_per_thread * dstBlockNum);

        if (work.my_numa == 0) {
            for (int i = 1; i < g_numas; i++) {
                memcpy((char *)pstRunState->tmp_vec_numa[i] + srange.begin_thread * dstBlocksize,
                        (char *)pstRunState->tmp_vec_numa[0] + srange.begin_thread * dstBlocksize,
                        srange.work_per_thread * dstBlocksize);
            }
        }

#ifdef DEBUG_TIME
if (work.tid == 0) {
    time[1] += get_time_ns() - tt1;
    tt1 = get_time_ns();
}
#endif

#pragma omp barrier
        /* 计算qkv */
        divide_work_all_numas(&work, qkv_dim, &mrange);
        if ((quantization_bit_code_str != nullptr && std::string(quantization_bit_code_str) == "f16") || !i8mm_flag) {
            for (int i = mrange.begin_thread; i < mrange.end_thread; i++) {
                for (int k = 0; k < n_tokens; k++) {
                    __builtin_prefetch(pstRunState->Attn_out + mrange.begin_numa + k * qkv_dim + i, 1, 2);
                    g_BlockDataInfo[srcType].VecDotFunc(dim,
                        (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * qkv_dim + i, 0,
                        (char *)g_pstWeight.Wqkv.Data.tensor3[work.my_numa][L] + i * dim / srcBlockNum * srcBlocksize, 0,
                        (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, 0, 1);
                }
            }
        }
        else {
            for (int i = mrange.begin_thread; i < mrange.end_thread; i += 2) {
                int nrc_i = ((mrange.end_thread - i) >= 2) ? 2 : 1;    
                if (nrc_i == 2) {
                    for (int k = 0; k < n_tokens; k += 2) {
                        int nrc_k = ((n_tokens - k) >= 2) ? 2 : 1;
                        if (nrc_k == 2) {
                            __builtin_prefetch(pstRunState->Attn_out + mrange.begin_numa + k * qkv_dim + i, 1, 2);
                            g_BlockDataInfo[srcType].VecDotFunc(dim,
                                    (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * qkv_dim + i, qkv_dim,
                                    (char *)g_pstWeight.Wqkv.Data.tensor3[work.my_numa][L] + i * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                    (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, nrc_k);
                        }
                        else if (nrc_k == 1) {
                            __builtin_prefetch(pstRunState->Attn_out + mrange.begin_numa + k * qkv_dim + i, 1, 2);
                            g_BlockDataInfo[srcType].VecDotFunc(dim,
                                    (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * qkv_dim + i, qkv_dim,
                                    (char *)g_pstWeight.Wqkv.Data.tensor3[work.my_numa][L] + i * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                    (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, nrc_k);
                        
                            g_BlockDataInfo[srcType].VecDotFunc(dim,
                                    (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * qkv_dim + i+1, qkv_dim,
                                    (char *)g_pstWeight.Wqkv.Data.tensor3[work.my_numa][L] + (i + 1) * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                    (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, nrc_k);
                        }

                    }
                } 
                else if (nrc_i == 1) {
                    for (int k = 0; k < n_tokens; k += 1) {
                        __builtin_prefetch(pstRunState->Attn_out + mrange.begin_numa + k * qkv_dim + i, 1, 2);
                        g_BlockDataInfo[srcType].VecDotFunc(dim,
                                (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * qkv_dim + i, qkv_dim,
                                (char *)g_pstWeight.Wqkv.Data.tensor3[work.my_numa][L] + i * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, 1);
                    }
                }
            }
        }

#ifdef DEBUG_TIME
    if (work.tid == 0) {
        time[2] += get_time_ns() - tt1;
        tt1 = get_time_ns();
    }
#endif

#pragma omp barrier
        if (work.tid == 1) {
            for (int k = 0; k < n_tokens; k++) {
                for (int i = 0; i < qkv_dim; i++) {
                    pstRunState->Attn_out[k * qkv_dim + i] += ((f32 *)g_pstWeight.qkv_bias.Data.tensor2[L])[i];
                }
            }
            g_BlockDataInfo[GGML_TYPE_F16].quantize(pstRunState->Attn_out, pstRunState->Attn_out_f16, n_tokens * qkv_dim);
        }

#ifdef DEBUG_TIME
    if (work.tid == 0) {
        time[3] += get_time_ns() - tt1;
        tt1 = get_time_ns();
    }
#endif

// 中间数据f32 -> f16, 减少attention修改
#pragma omp barrier
        f16 *q_ptr = pstRunState->Attn_out_f16, *k_ptr = pstRunState->Attn_out_f16 + dim, *v_ptr = pstRunState->Attn_out_f16 + dim + kv_dim;
        f16 *kcache_ptr = (f16 *)kv_caches[L][0].data_ptr(), *vcache_ptr = (f16 *)kv_caches[L][1].data_ptr();
        int64_t *slot_mapping_ptr = (int64_t *)slot_mapping.data_ptr();
        int kv_cache_block_elem_num = n_kv_heads * head_size * block_size;

        divide_all_work(&work, n_tokens * n_kv_heads, &srange);
        for (int i = srange.begin_thread; i < srange.end_thread; i++) {
            int t = i / n_kv_heads;
            int h = i % n_kv_heads;
            const int64_t slot = slot_mapping_ptr[t];
            if (slot < 0) {
                continue;
            }
            int64_t block_idx = slot / block_size, block_offset = slot % block_size;
            f16 *k_head_ptr = k_ptr + t * qkv_dim + h * head_size;
            f16 *kcache_head_ptr = kcache_ptr + kv_cache_block_elem_num * block_idx + h * block_size * head_size;
            const f16 *v_head_ptr = v_ptr + t * qkv_dim + h * head_size;
            f16 *vcache_head_ptr = vcache_ptr + kv_cache_block_elem_num * block_idx + h * block_size * head_size;
            Rope_embedding_impl(g_pstModelHypePara.is_neox_style, g_pstModelHypePara.n_rotary, k_head_ptr, g_pstModelHypePara.cos_sin_cache,
                                pos[t]);
            for (int idx = 0; idx < head_size; idx += 8) {   //8 = 16 / sizeof(f16)
                for (int vidx = idx; vidx < idx + 8; vidx++) {
                    vcache_head_ptr[vidx * block_size + block_offset] = v_head_ptr[vidx];
                }
                std::copy_n(k_head_ptr + idx, 8, kcache_head_ptr + idx * block_size + block_offset * 8);
            }
        }
        divide_all_work(&work, n_tokens * n_head, &srange);
        for (int i = srange.begin_thread; i < srange.end_thread; i++) {
            int t = i / n_head;
            int h = i % n_head;
            Rope_embedding_impl(g_pstModelHypePara.is_neox_style, g_pstModelHypePara.n_rotary, q_ptr + t *qkv_dim + h * head_size,
                                g_pstModelHypePara.cos_sin_cache, pos[t]);
            for (int j = 0; j < head_size; j++) {
                q_ptr[t * qkv_dim + h * head_size + j] *= g_pstModelHypePara.attn_scale;
            }
        }

#ifdef DEBUG_TIME
    if (work.tid == 0) {
        time[4] += get_time_ns() - tt1;
        tt1 = get_time_ns();
    }
#endif

}
        //divide_kv_cache_numa(&work, n_head, &srange);
        if (is_prompt == true) {
            f16 *v_ptr = (f16 *)pstRunState->Attn_out_f16 + dim + kv_dim;
            transpose_v(VT, v_ptr, n_tokens, kv_dim, qkv_dim);
            prefill_attention(pstRunState->seq_qkv, pstRunState->Attn_out_f16, VT, n_tokens, seq_lens.size(0),
                              (int *)seq_lens.data_ptr());
        } else {
            int kv_block_row = kv_caches[L][0].stride(0);
            paged_attention_v1_impl(pstRunState->seq_qkv, pstRunState->Attn_out_f16, (f16 *)kv_caches[L][0].data_ptr(),
                                    (f16 *)kv_caches[L][1].data_ptr(), n_kv_heads, (int *)block_tables.data_ptr(),
                                    (int *)seq_lens.data_ptr(), block_tables.size(1), qkv_dim, kv_block_row,
                                    kv_head_dim, n_tokens, n_head, head_size);
        }

#ifdef DEBUG_TIME
    time[5] += get_time_ns() - tt1;
    tt1 = get_time_ns();
#endif

// 数据转换f16 —> f32
#pragma omp parallel 
{
        WorkDivider work;
        init_work_divider(&work, g_numas);
        SingleNumaWorkRange srange;
        MultiNumaWorkRange mrange;

        if (work.tid == 1) {
            g_BlockDataInfo[GGML_TYPE_F16].dequantize(pstRunState->seq_qkv, pstRunState->Attn_out, n_tokens * dim);
        }

#ifdef DEBUG_TIME
    if (work.tid == 0) {
        time[6] += get_time_ns() - tt1;
        tt1 = get_time_ns();
    }
#endif

#pragma omp barrier

        srcType = weight_types.attn_output_weight;
        dstType = g_BlockDataInfo[srcType].VecDotType;
        srcBlockNum = g_BlockDataInfo[srcType].uiblkSize;
        srcBlocksize = g_BlockDataInfo[srcType].uiTypeSize;
        dstBlockNum = g_BlockDataInfo[dstType].uiblkSize;
        dstBlocksize = g_BlockDataInfo[dstType].uiTypeSize;

        divide_work_first_numa(&work, n_tokens * dim / dstBlockNum, &srange);
        Quantize((char *)pstRunState->tmp_vec_numa[0] + srange.begin_thread * dstBlocksize,
                 (f32 *)pstRunState->Attn_out + srange.begin_thread * dstBlockNum,
                 dstType, srange.work_per_thread * dstBlockNum);

        if (work.my_numa == 0) {
            for (int i= 1; i < g_numas; i++) {
                memcpy((char *)pstRunState->tmp_vec_numa[i] + srange.begin_thread * dstBlocksize,
                        (char *)pstRunState->tmp_vec_numa[0] + srange.begin_thread * dstBlocksize,
                        srange.work_per_thread * dstBlocksize);
            }
        }
#pragma omp barrier
        divide_work_all_numas(&work, dim, &mrange);
        if ((quantization_bit_code_str != nullptr && std::string(quantization_bit_code_str) == "f16") || !i8mm_flag) {
            for (int i = mrange.begin_thread; i < mrange.end_thread; i++) {
                for (int k = 0; k < n_tokens; k++) {
                    __builtin_prefetch(pstRunState->Attn_out + mrange.begin_numa + k * dim + i, 1, 2);
                    g_BlockDataInfo[srcType].VecDotFunc(dim,
                        (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * dim + i, 0,
                        (char *)g_pstWeight.wo.Data.tensor3[work.my_numa][L] + i * dim / srcBlockNum * srcBlocksize, 0,
                        (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, 0, 1);
                }
            }
        }
        else {
            for (int i = mrange.begin_thread; i < mrange.end_thread; i += 2) {
                int nrc_i = ((mrange.end_thread - i) >= 2) ? 2 : 1;
                if (nrc_i == 2) {
                    for (int k = 0; k < n_tokens; k += 2) {
                        int nrc_k = ((n_tokens - k) >= 2) ? 2 : 1;
                        if (nrc_k == 2) {
                            __builtin_prefetch(pstRunState->Attn_out + mrange.begin_numa + k * dim + i, 1, 2);
                            g_BlockDataInfo[srcType].VecDotFunc(dim,
                                    (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * dim + i, dim,
                                    (char *)g_pstWeight.wo.Data.tensor3[work.my_numa][L] + i * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                    (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, nrc_k);
                        }
                        else if (nrc_k == 1) {
                            __builtin_prefetch(pstRunState->Attn_out + mrange.begin_numa + k * dim + i, 1, 2);
                            g_BlockDataInfo[srcType].VecDotFunc(dim,
                                    (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * dim + i, dim,
                                    (char *)g_pstWeight.wo.Data.tensor3[work.my_numa][L] + i * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                    (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, nrc_k);

                            g_BlockDataInfo[srcType].VecDotFunc(dim,
                                    (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * dim + i + 1, dim,
                                    (char *)g_pstWeight.wo.Data.tensor3[work.my_numa][L] + (i + 1) * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                    (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, nrc_k);
                        }

                    }
                }
                else if (nrc_i == 1) {
                    for (int k = 0; k < n_tokens; k += 1) {
                        __builtin_prefetch(pstRunState->Attn_out + mrange.begin_numa + k * dim + i, 1, 2);
                        g_BlockDataInfo[srcType].VecDotFunc(dim,
                                (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * dim + i, dim,
                                (char *)g_pstWeight.wo.Data.tensor3[work.my_numa][L] + i * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, 1);
                    }
                }
            }
        }

#ifdef DEBUG_TIME
    if (work.tid == 0) {
        time[7] += get_time_ns() - tt1;
        tt1 = get_time_ns();
    }
#endif

#pragma omp barrier
        divide_work_first_numa(&work, n_tokens, &srange);
        if (work.my_numa == 0) {
            for (int i = srange.begin_thread; i < srange.end_thread; i++) {
                for (int j= 0; j < dim; j++) {
                    ((f32 *)pstRunState->add_weight)[i * dim + j] += ((f32 *)pstRunState->Attn_out)[i * dim + j];
                }
                RmsNorm((f32 *)pstRunState->Attn_out + i * dim, (f32 *)pstRunState->add_weight + i * dim,
                        (f32 *)pstLlama->rms_ffn_norm.Data.tensor2[L], eps, dim);
            }
        }

#ifdef DEBUG_TIME
    if (work.tid == 0) {
        time[8] += get_time_ns() - tt1;
        tt1 = get_time_ns();
    }
#endif

#pragma omp barrier
        srcType = weight_types.ffn_up_weight;
        dstType = g_BlockDataInfo[srcType].VecDotType;
        srcBlockNum = g_BlockDataInfo[srcType].uiblkSize;
        srcBlocksize = g_BlockDataInfo[srcType].uiTypeSize;
        dstBlockNum = g_BlockDataInfo[dstType].uiblkSize;
        dstBlocksize = g_BlockDataInfo[dstType].uiTypeSize;

        divide_work_first_numa(&work, n_tokens * dim / dstBlockNum, &srange);
        Quantize((char *)pstRunState->tmp_vec_numa[0] + srange.begin_thread * dstBlocksize,
                 (f32 *)pstRunState->Attn_out + srange.begin_thread * dstBlockNum,
                 dstType, srange.work_per_thread * dstBlockNum);

        if (work.my_numa == 0) {
            for (int i = 1; i < g_numas; i++) {
                memcpy((char *)pstRunState->tmp_vec_numa[i] + srange.begin_thread * dstBlocksize,
                        (char *)pstRunState->tmp_vec_numa[0] + srange.begin_thread * dstBlocksize,
                        srange.work_per_thread * dstBlocksize);
            }
        }
#pragma omp barrier
        /* w1/w3 数据类型一样 */
        int total_hidden_dim = hidden_dim * 2;
        divide_work_all_numas(&work, total_hidden_dim, &mrange);
        if ((quantization_bit_code_str != nullptr && std::string(quantization_bit_code_str) == "f16") || !i8mm_flag) {
            for (int i = mrange.begin_thread; i < mrange.end_thread; i++) {
                for (int k = 0; k < n_tokens; k++) {
                    __builtin_prefetch(pstRunState->Attn_out + mrange.begin_numa + k * total_hidden_dim + i, 1, 2);
                    /* 单线程的结果 */
                    g_BlockDataInfo[srcType].VecDotFunc(dim,
                        (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * total_hidden_dim + i, 0,
                        (char *)g_pstWeight.w1w3.Data.tensor3[work.my_numa][L] + i * dim / srcBlockNum * srcBlocksize, 0,
                        (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, 0, 1);
                }
            }
        }
        else {
            for (int i = mrange.begin_thread; i < mrange.end_thread; i += 2) {
                int nrc_i = ((mrange.end_thread - i) >= 2) ? 2 : 1;
                if (nrc_i == 2) {
                    for (int k = 0; k < n_tokens; k += 2) {
                        int nrc_k = ((n_tokens - k) >= 2) ? 2 : 1;
                        if (nrc_k == 2) {
                            __builtin_prefetch(pstRunState->Attn_out + mrange.begin_numa + k * total_hidden_dim + i, 1, 2);
                            /* 单线程的结果 */
                            g_BlockDataInfo[srcType].VecDotFunc(dim,
                                (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * total_hidden_dim + i, total_hidden_dim,
                                (char *)g_pstWeight.w1w3.Data.tensor3[work.my_numa][L] + i * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, nrc_k);
                        }
                        else if (nrc_k == 1) {
                            __builtin_prefetch(pstRunState->Attn_out + mrange.begin_numa + k * total_hidden_dim + i, 1, 2);
                            /* 单线程的结果 */
                            g_BlockDataInfo[srcType].VecDotFunc(dim,
                                (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * total_hidden_dim + i, total_hidden_dim,
                                (char *)g_pstWeight.w1w3.Data.tensor3[work.my_numa][L] + i * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, nrc_k);
                            
                            g_BlockDataInfo[srcType].VecDotFunc(dim,
                                (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * total_hidden_dim + i + 1, total_hidden_dim,
                                (char *)g_pstWeight.w1w3.Data.tensor3[work.my_numa][L] + (i + 1) * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, nrc_k);
                        }
                    }
                }
                else if (nrc_i == 1) {
                    for (int k = 0; k < n_tokens; k += 1) {
                        __builtin_prefetch(pstRunState->Attn_out + mrange.begin_numa + k * total_hidden_dim + i, 1, 2);
                        /* 单线程的结果 */
                        g_BlockDataInfo[srcType].VecDotFunc(dim,
                            (f32 *)pstRunState->Attn_out + mrange.begin_numa + k * total_hidden_dim + i, total_hidden_dim,
                            (char *)g_pstWeight.w1w3.Data.tensor3[work.my_numa][L] + i * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                            (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, 1);
                    }
                } 
            }            
        }

#ifdef DEBUG_TIME
    if (work.tid == 0) {
        time[9] += get_time_ns() - tt1;
        tt1 = get_time_ns();
    }
#endif

#pragma omp barrier
        /* silu激活函数 */
        divide_work_first_numa(&work, n_tokens * hidden_dim, &srange);
        if (work.my_numa == 0) {
            for (int item = srange.begin_thread; item < srange.end_thread; item++) {
                int i = item / hidden_dim, j = item % hidden_dim;
                f32 *w1 = pstRunState->Attn_out + i * hidden_dim * 2, *w3 = pstRunState->Attn_out + i * hidden_dim * 2 + hidden_dim;
                f32 *result = pstRunState->ffn_Gate + i * hidden_dim;
                f16 neg_w1 = -w1[j];
                f32 silu_f32 = w1[j] / (1.0 + expf_f16_table[*(uint16_t *)&neg_w1]);
                result[j] = silu_f32 * w3[j];
            }
        }

#ifdef DEBUG_TIME
    if (work.tid == 0) {
        time[10] += get_time_ns() - tt1;
        tt1 = get_time_ns();
    }
#endif

#pragma omp barrier
        /* w2 */
        srcType = weight_types.ffn_down_weight;
        dstType = g_BlockDataInfo[srcType].VecDotType;
        srcBlockNum = g_BlockDataInfo[srcType].uiblkSize;
        srcBlocksize = g_BlockDataInfo[srcType].uiTypeSize;
        dstBlockNum = g_BlockDataInfo[dstType].uiblkSize;
        dstBlocksize = g_BlockDataInfo[dstType].uiTypeSize;

        divide_work_first_numa(&work, n_tokens * hidden_dim / dstBlockNum, &srange);
        Quantize((char *)pstRunState->tmp_vec_numa[0] + srange.begin_thread * dstBlocksize, 
                 (f32 *)pstRunState->ffn_Gate + srange.begin_thread * dstBlockNum,
                 dstType, srange.work_per_thread * dstBlockNum);

        if (work.my_numa == 0) {
            for (int i = 1; i < g_numas; i++) {
                memcpy((char *)pstRunState->tmp_vec_numa[i] + srange.begin_thread * dstBlocksize,
                        (char *)pstRunState->tmp_vec_numa[0] + srange.begin_thread * dstBlocksize,
                        srange.work_per_thread * dstBlocksize);
            }
        }
#pragma omp barrier
        divide_work_all_numas(&work, dim, &mrange);
        if ((quantization_bit_code_str != nullptr && std::string(quantization_bit_code_str) == "f16") || !i8mm_flag) {
            for (int i = mrange.begin_thread; i < mrange.end_thread; i++) {
                for (int k = 0; k < n_tokens; k++) {
                    __builtin_prefetch((f16 *)pstRunState->Token_Norm + mrange.begin_numa + k * dim + i, 1, 2);
                    g_BlockDataInfo[srcType].VecDotFunc(hidden_dim,
                        (f32 *)pstRunState->Token_Norm + mrange.begin_numa + k * dim + i, 0,
                        (char *)g_pstWeight.ffn_down.Data.tensor3[work.my_numa][L] + i * hidden_dim / srcBlockNum * srcBlocksize, 0,
                        (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * hidden_dim / dstBlockNum * dstBlocksize, 0, 1);
                }
            }
        }
        else {
            for (int i = mrange.begin_thread; i < mrange.end_thread; i += 2) {
                int nrc_i = ((mrange.end_thread - i) >= 2) ? 2 : 1;
                if (nrc_i == 2) {
                    for (int k = 0; k < n_tokens; k += 2) {
                        int nrc_k = ((n_tokens - k) >= 2) ? 2 : 1;
                        if (nrc_k == 2) {
                            __builtin_prefetch((f16 *)pstRunState->Token_Norm + mrange.begin_numa + k * dim + i, 1, 2);
                            g_BlockDataInfo[srcType].VecDotFunc(hidden_dim,
                                    (f32 *)pstRunState->Token_Norm + mrange.begin_numa + k * dim + i, dim,
                                    (char *)g_pstWeight.ffn_down.Data.tensor3[work.my_numa][L] + i * hidden_dim / srcBlockNum * srcBlocksize, hidden_dim / srcBlockNum * srcBlocksize,
                                    (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * hidden_dim / dstBlockNum * dstBlocksize, hidden_dim / dstBlockNum * dstBlocksize, nrc_k);
                        }
                        else if (nrc_k == 1) {
                            __builtin_prefetch((f16 *)pstRunState->Token_Norm + mrange.begin_numa + k * dim + i, 1, 2);
                            g_BlockDataInfo[srcType].VecDotFunc(hidden_dim,
                                    (f32 *)pstRunState->Token_Norm + mrange.begin_numa + k * dim + i, dim,
                                    (char *)g_pstWeight.ffn_down.Data.tensor3[work.my_numa][L] + i * hidden_dim / srcBlockNum * srcBlocksize, hidden_dim / srcBlockNum * srcBlocksize,
                                    (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * hidden_dim / dstBlockNum * dstBlocksize, hidden_dim / dstBlockNum * dstBlocksize, nrc_k);

                            g_BlockDataInfo[srcType].VecDotFunc(hidden_dim,
                                    (f32 *)pstRunState->Token_Norm + mrange.begin_numa + k * dim + i + 1, dim,
                                    (char *)g_pstWeight.ffn_down.Data.tensor3[work.my_numa][L] + (i + 1) * hidden_dim / srcBlockNum * srcBlocksize, hidden_dim / srcBlockNum * srcBlocksize,
                                    (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * hidden_dim / dstBlockNum * dstBlocksize, hidden_dim / dstBlockNum * dstBlocksize, nrc_k);
                        }
                    }
                }
                else if (nrc_i == 1) {
                    for (int k = 0; k < n_tokens; k += 1) {
                        __builtin_prefetch((f16 *)pstRunState->Token_Norm + mrange.begin_numa + k * dim + i, 1, 2);
                        g_BlockDataInfo[srcType].VecDotFunc(hidden_dim,
                                (f32 *)pstRunState->Token_Norm + mrange.begin_numa + k * dim + i, dim,
                                (char *)g_pstWeight.ffn_down.Data.tensor3[work.my_numa][L] + i * hidden_dim / srcBlockNum * srcBlocksize, hidden_dim / srcBlockNum * srcBlocksize,
                                (char *)pstRunState->tmp_vec_numa[work.my_numa] + k * hidden_dim / dstBlockNum * dstBlocksize, hidden_dim / dstBlockNum * dstBlocksize, 1);
                    }
                }
            }
        }

#ifdef DEBUG_TIME
    if (work.tid == 0) {
        time[11] += get_time_ns() - tt1;
        tt1 = get_time_ns();
    }
#endif

} //end omp
    }

        std::vector<int> last_token_indices;
        if (is_prompt == true) {
            for (int i = 0, sum_seq_lens = 0; i < seq_lens.size(0); i++) {
                sum_seq_lens += ((int *)seq_lens.data_ptr())[i];
                last_token_indices.push_back(sum_seq_lens - 1);
            }
        } else {
            for (int i = 0; i < n_tokens; i++) {
                last_token_indices.push_back(i);
            }
        }

#pragma omp parallel
{
        WorkDivider work;
        init_work_divider(&work, g_numas);
        SingleNumaWorkRange srange;
        MultiNumaWorkRange mrange;

        divide_work_first_numa(&work, last_token_indices.size(), &srange);
        if (work.my_numa == 0) {
            for (int i = srange.begin_thread; i < srange.end_thread; i++) {
                int last_token = last_token_indices[i];
                for (int j = 0; j < dim; j++) {
                    ((f32 *)pstRunState->Token_Norm)[last_token * dim + j] += ((f32 *)pstRunState->add_weight)[last_token * dim + j];
                }
                RmsNorm((f32 *)pstRunState->Token_Ori + i * dim, (f32 *)pstRunState->Token_Norm + last_token * dim,
                        (f32 *)pstLlama->output_norm.Data.tensor1, eps, dim);
            }
        }

#ifdef DEBUG_TIME
    if (work.tid == 0) {
        time[12] += get_time_ns() - tt1;
        tt1 = get_time_ns();
    }
#endif

#pragma omp barrier
        /* 外层linear */
        srcType = weight_types.output_weight;
        dstType = g_BlockDataInfo[srcType].VecDotType;
        srcBlockNum = g_BlockDataInfo[srcType].uiblkSize;
        srcBlocksize = g_BlockDataInfo[srcType].uiTypeSize;
        dstBlockNum = g_BlockDataInfo[dstType].uiblkSize;
        dstBlocksize = g_BlockDataInfo[dstType].uiTypeSize;

        divide_work_first_numa(&work, last_token_indices.size() * dim / dstBlockNum, &srange);
        Quantize((char *)pstRunState->temp_output_vec_numa[0] + srange.begin_thread * dstBlocksize, 
                 (f32 *)pstRunState->Token_Ori + srange.begin_thread * dstBlockNum,
                 dstType, srange.work_per_thread * dstBlockNum);

        if (work.my_numa == 0) {
            for (int i = 1; i < g_numas; i++) {
                memcpy((char *)pstRunState->temp_output_vec_numa[i] + srange.begin_thread * dstBlocksize,
                        (char *)pstRunState->temp_output_vec_numa[0] + srange.begin_thread * dstBlocksize,
                        srange.work_per_thread * dstBlocksize);
            }
        }
#pragma omp barrier
        divide_work_all_numas(&work, n_vocab, &mrange);
        if ((quantization_bit_code_str != nullptr && std::string(quantization_bit_code_str) == "f16") || !i8mm_flag) {
            for (int i = mrange.begin_thread; i < mrange.end_thread; i++) {
                for (int k = 0; k < last_token_indices.size(); k++) {
                    __builtin_prefetch((f16 *)output + mrange.begin_numa + k * n_vocab + i, 1, 2);
                    g_BlockDataInfo[srcType].VecDotFunc(dim,
                        (f32 *)pstRunState->output_f32 + mrange.begin_numa + k * n_vocab + i, 0,
                        (char *)g_pstWeight.output.Data.tensor2[work.my_numa] + i * dim / srcBlockNum * srcBlocksize, 0,
                        (char *)pstRunState->temp_output_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, 0, 1);
                }
            }
        }
        else {
            for (int i = mrange.begin_thread; i < mrange.end_thread; i += 2) {
                int nrc_i = ((mrange.end_thread - i) >= 2) ? 2 : 1;
                if (nrc_i == 2) {
                    for (int k = 0; k < last_token_indices.size(); k += 2) {
                        int nrc_k = ((last_token_indices.size() - k) >= 2) ? 2 : 1;
                        if (nrc_k == 2) {
                            __builtin_prefetch((f16 *)output + mrange.begin_numa + k * n_vocab + i, 1, 2);
                            g_BlockDataInfo[srcType].VecDotFunc(dim,
                                    (f32 *)pstRunState->output_f32 + mrange.begin_numa + k * n_vocab + i, n_vocab,
                                    (char *)g_pstWeight.output.Data.tensor2[work.my_numa] + i * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                    (char *)pstRunState->temp_output_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, nrc_k);
                        }
                        else if (nrc_k == 1) {
                            __builtin_prefetch((f16 *)output + mrange.begin_numa + k * n_vocab + i, 1, 2);
                            g_BlockDataInfo[srcType].VecDotFunc(dim,
                                    (f32 *)pstRunState->output_f32 + mrange.begin_numa + k * n_vocab + i, n_vocab,
                                    (char *)g_pstWeight.output.Data.tensor2[work.my_numa] + i * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                    (char *)pstRunState->temp_output_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, nrc_k);

                            g_BlockDataInfo[srcType].VecDotFunc(dim,
                                    (f32 *)pstRunState->output_f32 + mrange.begin_numa + k * n_vocab + i + 1, n_vocab,
                                    (char *)g_pstWeight.output.Data.tensor2[work.my_numa] + (i + 1) * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                    (char *)pstRunState->temp_output_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, nrc_k);   
                        }

                    }
                }
                else if (nrc_i == 1) {
                    for (int k = 0; k < last_token_indices.size(); k += 1) {
                        __builtin_prefetch((f16 *)output + mrange.begin_numa + k * n_vocab + i, 1, 2);
                        g_BlockDataInfo[srcType].VecDotFunc(dim,
                                (f32 *)pstRunState->output_f32 + mrange.begin_numa + k * n_vocab + i, n_vocab,
                                (char *)g_pstWeight.output.Data.tensor2[work.my_numa] + i * dim / srcBlockNum * srcBlocksize, dim / srcBlockNum * srcBlocksize,
                                (char *)pstRunState->temp_output_vec_numa[work.my_numa] + k * dim / dstBlockNum * dstBlocksize, dim / dstBlockNum * dstBlocksize, 1);
                    }
                }
            } 
        }

#ifdef DEBUG_TIME
    if (work.tid == 0) {
        time[13] += get_time_ns() - tt1;
        tt1 = get_time_ns();
    }
#endif

}
    g_BlockDataInfo[GGML_TYPE_F16].quantize(pstRunState->output_f32, output, last_token_indices.size() * n_vocab);

#ifdef DEBUG_TIME
    time[14] += get_time_ns() - tt1;
    tt1 = get_time_ns();
#endif
    numa_free(VT, n_tokens * dim);

#ifdef DEBUG_TIME
    uint64_t t1 = get_time_ns();
    if (is_prompt == true) {
        fprintf(stderr, " bs=%d prefill=%.3f ms, %.3f token/s\n", n_tokens, (t1 - t0) / 1000000.0, 1.0 * n_tokens / ((t1 - t0) / 1000000000.0));
    } else {
        fprintf(stderr, " bs=%d decode=%.3f ms, %.3f token/s\n", n_tokens, (t1 - t0) / 1000000.0, 1.0 * n_tokens / ((t1 - t0) / 1000000000.0));
    }
    fprintf(stderr, "[0] first rms_norm ——> %8.3lf ms\n", time[0] / 1000000.0);
    fprintf(stderr, "[1] qkv quantize and memcpy ——> %8.3lf ms\n", time[1] / 1000000.0);
    fprintf(stderr, "[2] qkv matmul ——> %8.3lf ms\n", time[2] / 1000000.0);
    fprintf(stderr, "[3] qkv add and quantize f16 ——> %8.3lf ms\n", time[3] / 1000000.0);
    fprintf(stderr, "[4] rope operator ——> %8.3lf ms\n", time[4] / 1000000.0);
    fprintf(stderr, "[5] page attention operator ——> %8.3lf ms\n", time[5] / 1000000.0);
    fprintf(stderr, "[6] dequantize f32 ——> %8.3lf ms\n", time[6] / 1000000.0);
    fprintf(stderr, "[7] (wo)quantize-memcpy-matmul ——> %8.3lf ms\n", time[7] / 1000000.0);
    fprintf(stderr, "[8] ffn add and rmsnorm ——> %8.3lf ms\n", time[8] / 1000000.0);
    fprintf(stderr, "[9] (w1w3)quantize-memcpy-matmul ——> %8.3lf ms\n", time[9] / 1000000.0);
    fprintf(stderr, "[10] silu activation function ——> %8.3lf ms\n", time[10] / 1000000.0);
    fprintf(stderr, "[11] (w2)quantize-memcpy-matmul ——> %8.3lf ms\n", time[11] / 1000000.0);
    fprintf(stderr, "[12] output_norm add and rmsnorm ——> %8.3lf ms\n", time[12] / 1000000.0);
    fprintf(stderr, "[13] (output)quantize-memcpy-matmul ——> %8.3lf ms\n", time[13] / 1000000.0);
    fprintf(stderr, "[14] output quantize f16 ——> %8.3lf ms\n\n", time[14] / 1000000.0);
#endif
}

MODEL_RUN_STATE* create_run_state(const MODEL_HYPE_PARA* para, bool is_prompt, int64_t N_tokens, int64_t seq_num) {
    int64_t dim = para->dim;
    int64_t n_head = para->n_head;
    int64_t n_kv_heads = para->n_kv_heads;
    int64_t kv_dim = dim / n_head * n_kv_heads; // kv的维度
    int64_t hidden_dim = para->hidden_dim;
    int64_t n_layers = para->n_layers;
    int64_t context_length = para->context_length;
    int64_t n_vocab = para->n_vocab;

    MODEL_RUN_STATE* state = new MODEL_RUN_STATE();
    memset(state, 0, sizeof(MODEL_RUN_STATE)); // 防止野指针

    // 归一化结果
    state->Token_Ori = (f32*)numa_alloc_onnode(dim * context_length * sizeof(f32), 0);
    state->Token_Norm = (f32*)numa_alloc_onnode(dim * context_length * sizeof(f32), 0);

    // add的中间结果
    state->add_weight = (f32 *)numa_alloc_onnode(dim * context_length * sizeof(f32), 0);
//    if (state->add_weight) memset(state->add_weight, 0, dim * context_length * sizeof(f32));

    // 反量化挂载
    state->temp_output_vec_numa = (void **)numa_alloc_onnode(g_numas * sizeof(void *), 0);
    state->tmp_vec_numa = (void **)numa_alloc_onnode(g_numas * sizeof(void *), 0);
    for (int i = 0; i < g_numas; i++) {
        state->temp_output_vec_numa[i] = (void *)numa_alloc_onnode(dim * context_length * sizeof(f16), i);
        state->tmp_vec_numa[i] = (void *)numa_alloc_onnode(hidden_dim * context_length * sizeof(f16), i);
    }

    // 注意力输出
    size_t attn_size = (dim + kv_dim + kv_dim >= 2 * hidden_dim ? dim + kv_dim + kv_dim : 2 * hidden_dim) * context_length;
    state->Attn_out = (f32*)numa_alloc_onnode(attn_size * sizeof(f32), 0);
    state->Attn_out_f16 = (f16*)numa_alloc_onnode(attn_size * sizeof(f16), 0);

    // 隐藏层
    state->ffn_Gate = (f32 *)numa_alloc_onnode(hidden_dim * context_length * sizeof(f32), 0);

    // seq_qkv
    state->seq_qkv = (f16*)numa_alloc_onnode(seq_num * context_length * dim * sizeof(f16), 0);

    // output_f32
    int output_tmp_size = (is_prompt ? seq_num : N_tokens) * n_vocab;
    state->output_f32 = (f32*)numa_alloc_onnode(output_tmp_size * sizeof(f32), 0);

    // 检查所有分配
    if (!state->Token_Ori || !state->Token_Norm || !state->add_weight ||
        !state->temp_output_vec_numa || !state->tmp_vec_numa ||
        !state->Attn_out || !state->Attn_out_f16 || !state->ffn_Gate ||
        !state->seq_qkv || !state->output_f32) {
        fprintf(stderr, "Error: numa_alloc_onnode failed! (File: %s, Line: %d)\n", __FILE__, __LINE__);
    }

    return state;
}

void destroy_run_state(MODEL_RUN_STATE* state, const MODEL_HYPE_PARA* para, bool is_prompt, int64_t N_tokens, int64_t seq_num) {
    int64_t dim = para->dim;
    int64_t n_head = para->n_head;
    int64_t n_kv_heads = para->n_kv_heads;
    int64_t kv_dim = dim / n_head * n_kv_heads;
    int64_t hidden_dim = para->hidden_dim;
    int64_t context_length = para->context_length;
    int64_t n_vocab = para->n_vocab;

    // 归一化结果
    if (state->Token_Ori) numa_free(state->Token_Ori, dim * context_length * sizeof(f32));
    if (state->Token_Norm) numa_free(state->Token_Norm, dim * context_length * sizeof(f32));

    // add的中间结果
    if (state->add_weight) numa_free(state->add_weight, dim * context_length * sizeof(f32));

    // 反量化挂载
    if (state->temp_output_vec_numa) {
        for (int i = 0; i < g_numas; i++) {
            if (state->temp_output_vec_numa[i]) numa_free(state->temp_output_vec_numa[i], dim * context_length * sizeof(f16));
        }
        numa_free(state->temp_output_vec_numa, g_numas * sizeof(void*));
    }
    if (state->tmp_vec_numa) {
        for (int i = 0; i < g_numas; i++) {
            if (state->tmp_vec_numa[i]) numa_free(state->tmp_vec_numa[i], hidden_dim * context_length * sizeof(f16));
        }
        numa_free(state->tmp_vec_numa, g_numas * sizeof(void*));
    }

    // 注意力输出
    size_t attn_size = (dim + kv_dim + kv_dim >= 2 * hidden_dim ? dim + kv_dim + kv_dim : 2 * hidden_dim) * context_length;
    if (state->Attn_out) numa_free(state->Attn_out, attn_size * sizeof(f32));
    if (state->Attn_out_f16) numa_free(state->Attn_out_f16, attn_size * sizeof(f16));

    // 隐藏层
    if (state->ffn_Gate) numa_free(state->ffn_Gate, hidden_dim * context_length * sizeof(f32));

    // seq_qkv
    if (state->seq_qkv) numa_free(state->seq_qkv, seq_num * context_length * dim * sizeof(f16));

    // output_f32
    int output_tmp_size = (is_prompt ? seq_num : N_tokens) * n_vocab;
    if (state->output_f32) numa_free(state->output_f32, output_tmp_size * sizeof(f32));

    delete state;
}

void get_next_token_for_torch(
    torch::Tensor model_output,   // WEIGHT.token_embedding
    torch::Tensor hidden_stats,

    bool is_prompt,
    torch::Tensor block_tables,
    torch::Tensor seq_lens,
    torch::Tensor& slot_mapping,
    torch::Tensor positions,
    std::vector<torch::Tensor> kv_caches,
    int64_t block_size,
    int64_t N_tokens)
{
    get_affinity_cpus(cpu_ids);
    void* hd = static_cast<void*>(hidden_stats.data_ptr());
    int64_t *pos = static_cast<int64_t *>(positions.data_ptr());
    void* output = static_cast<void*>(model_output.data_ptr());

    int64_t seq_num = seq_lens.size(0);

    MODEL_RUN_STATE* state = create_run_state(&g_pstModelHypePara, is_prompt, N_tokens, seq_num);

    get_next_token(output,
        &g_pstModelHypePara, &g_pstWeight, state, 
        is_prompt, 
        block_tables,
        seq_lens,
        slot_mapping,
        hd, 
        pos, 
        kv_caches,
        block_size,
        N_tokens
    );

    destroy_run_state(state, &g_pstModelHypePara, is_prompt, N_tokens, seq_num);
}

