| |
| |
| |
|
|
| |
| #include "kai_matmul_clamp_f32_qsi8d32p_qsi4c32p_interface.h" |
| #include "kai_matmul_clamp_f32_qai8dxp_qsi8cxp_interface.h" |
| #include "kai_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod.h" |
| #include "kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod.h" |
| #include "kai_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod.h" |
| #include "kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm.h" |
| #include "kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot.h" |
| #include "kai_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa.h" |
| #include "kai_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa.h" |
| #include "kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot.h" |
| #include "kai_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod.h" |
| #include "kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod.h" |
| #include "kai_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod.h" |
| #include "kai_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm.h" |
| #include "kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm.h" |
| #include "kai_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod.h" |
| #include "kai_matmul_clamp_f32_f16p1vlx2_qsi4c32p4vlx2_1vlx4vl_sme2_mopa.h" |
|
|
| #include "kai_lhs_pack_bf16p2vlx2_f32_sme.h" |
| #include "kai_lhs_quant_pack_qsi8d32p_f32.h" |
| #include "kai_lhs_quant_pack_qsi8d32p4x8sb_f32_neon.h" |
| #include "kai_lhs_quant_pack_qsi8d32p_f32_neon.h" |
| #include "kai_lhs_quant_pack_qai8dxp_f32.h" |
|
|
| #include "kai_rhs_pack_kxn_bf16p2vlx2b_f32_x32_sme.h" |
| #include "kai_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0.h" |
| #include "kai_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon.h" |
| #include "kai_rhs_pack_nxk_qsi8cxp_qsi8cx_neon.h" |
| #include "kai_lhs_pack_f16pmrx2_f32_neon.h" |
|
|
| #include "kai_common.h" |
|
|
| #include "simd-mappings.h" |
|
|
| #define GGML_COMMON_DECL_CPP |
| #include "ggml-common.h" |
|
|
| #include "kernels.h" |
|
|
| #define NELEMS(x) (sizeof(x) / sizeof(*x)) |
|
|
| template<size_t(*Fn)(size_t,size_t,size_t)> |
| static inline size_t kernel_offs_fn3(size_t a, size_t b, size_t c) { |
| return Fn(a, b, c); |
| } |
|
|
| template<size_t(*Fn)(size_t,size_t)> |
| static inline size_t kernel_offs_fn2(size_t a, size_t b, size_t) { |
| return Fn(a, b); |
| } |
|
|
| template<void(*Fn)(size_t,size_t,size_t,size_t,const void*,const void*,float*,size_t,size_t,float,float)> |
| static inline void kernel_run_fn11(size_t m, size_t n, size_t k, size_t bl, |
| const void* lhs, const void* rhs, void* dst, |
| size_t dst_stride_row, size_t dst_stride_col, |
| float clamp_min, float clamp_max) { |
| Fn(m, n, k, bl, lhs, rhs, static_cast<float*>(dst), dst_stride_row, dst_stride_col, clamp_min, clamp_max); |
| } |
|
|
| template<void(*Fn)(size_t,size_t,size_t,const void*,const void*,void*,size_t,size_t,float,float)> |
| static inline void kernel_run_fn10(size_t m, size_t n, size_t k, size_t , |
| const void* lhs, const void* rhs, void* dst, |
| size_t dst_stride_row, size_t dst_stride_col, |
| float clamp_min, float clamp_max) { |
| Fn(m, n, k, lhs, rhs, dst, dst_stride_row, dst_stride_col, clamp_min, clamp_max); |
| } |
|
|
| template<void(*Fn)(size_t,size_t,size_t,const void*,const void*,float*,size_t,size_t,float,float)> |
| static inline void kernel_run_float_fn10(size_t m, size_t n, size_t k, size_t , |
| const void* lhs, const void* rhs, void* dst, |
| size_t dst_stride_row, size_t dst_stride_col, |
| float clamp_min, float clamp_max) { |
| Fn(m, n, k, lhs, rhs, static_cast<float*>(dst), dst_stride_row, dst_stride_col, clamp_min, clamp_max); |
| } |
|
|
| template<size_t(*Fn)(size_t,size_t,size_t,size_t,size_t,size_t)> |
| static inline size_t lhs_ps_fn6(size_t m, size_t k, size_t bl, size_t mr, size_t kr, size_t sr) { |
| return Fn(m, k, bl, mr, kr, sr); |
| } |
|
|
| template<size_t(*Fn)(size_t,size_t,size_t,size_t,size_t)> |
| static inline size_t lhs_ps_fn5(size_t m, size_t k, size_t , size_t mr, size_t kr, size_t sr) { |
| return Fn(m, k, mr, kr, sr); |
| } |
|
|
| template<size_t(*Fn)(size_t,size_t,size_t,size_t,size_t,size_t)> |
| static inline size_t lhs_offs_fn6(size_t m_idx, size_t k, size_t bl, size_t mr, size_t kr, size_t sr) { |
| return Fn(m_idx, k, bl, mr, kr, sr); |
| } |
|
|
| template<size_t(*Fn)(size_t,size_t,size_t,size_t,size_t)> |
| static inline size_t lhs_offs_fn5(size_t m_idx, size_t k, size_t , size_t mr, size_t kr, size_t sr) { |
| return Fn(m_idx, k, mr, kr, sr); |
| } |
|
|
| template<void(*Fn)(size_t,size_t,size_t,size_t,size_t,size_t,size_t,const float*,size_t,void*)> |
| static inline void lhs_pack_float_fn10(size_t m, size_t k, size_t bl, size_t mr, size_t kr, size_t sr, |
| size_t m_idx_start, const void* lhs, size_t lhs_stride, void* lhs_packed) { |
| Fn(m, k, bl, mr, kr, sr, m_idx_start, static_cast<const float*>(lhs), lhs_stride, lhs_packed); |
| } |
|
|
| template<void(*Fn)(size_t,size_t,size_t,size_t,size_t,size_t,size_t,const void*,size_t,void*)> |
| static inline void lhs_pack_void_fn10(size_t m, size_t k, size_t bl, size_t mr, size_t kr, size_t sr, |
| size_t m_idx_start, const void* lhs, size_t lhs_stride, void* lhs_packed) { |
| Fn(m, k, bl, mr, kr, sr, m_idx_start, lhs, lhs_stride, lhs_packed); |
| } |
|
|
| template<void(*Fn)(size_t,size_t,size_t,size_t,size_t,size_t,const void*,size_t,void*)> |
| static inline void lhs_pack_void_fn9(size_t m, size_t k, size_t , size_t mr, size_t kr, size_t sr, |
| size_t m_idx_start, const void* lhs, size_t lhs_stride, void* lhs_packed) { |
| Fn(m, k, mr, kr, sr, m_idx_start, lhs, lhs_stride, lhs_packed); |
| } |
|
|
| template<void(*Fn)(size_t,size_t,size_t,size_t,size_t,size_t,const float*,size_t,void*)> |
| static inline void lhs_pack_float_fn9_no_bl(size_t m, size_t k, size_t , size_t mr, size_t kr, size_t sr, |
| size_t m_idx_start, const void * lhs, size_t lhs_stride, void * lhs_packed) { |
| Fn(m, k, mr, kr, sr, m_idx_start, static_cast<const float*>(lhs), lhs_stride, lhs_packed); |
| } |
|
|
| template<size_t(*Fn)(size_t,size_t,size_t,size_t,size_t)> |
| static inline size_t rhs_ps_fn5(size_t n, size_t k, size_t nr, size_t kr, size_t bl) { |
| return Fn(n, k, nr, kr, bl); |
| } |
|
|
| template<size_t(*Fn)(size_t,size_t)> |
| static inline size_t rhs_ps_fn2(size_t n, size_t k, size_t , size_t , size_t ) { |
| return Fn(n, k); |
| } |
|
|
| template<size_t(*Fn)(size_t,size_t,size_t,size_t)> |
| static inline size_t rhs_stride_fn4(size_t k, size_t nr, size_t kr, size_t bl) { |
| return Fn(k, nr, kr, bl); |
| } |
|
|
| template<size_t(*Fn)(size_t)> |
| static inline size_t rhs_stride_fn1(size_t k, size_t , size_t , size_t ) { |
| return Fn(k); |
| } |
|
|
| template<void(*Fn)(size_t,size_t,size_t,size_t,size_t,size_t,size_t,const uint8_t*,const float*,void*,size_t,const struct kai_rhs_pack_qs4cxs1s0_param*)> |
| static inline void rhs_pack_fn12(size_t num_groups, size_t n, size_t k, size_t nr, size_t kr, size_t sr, size_t bl, |
| size_t , const void* rhs, const void* bias, const void* , |
| void* rhs_packed, size_t extra_bytes, const void* params) { |
| Fn(num_groups, n, k, nr, kr, sr, bl, |
| static_cast<const uint8_t*>(rhs), |
| static_cast<const float*>(bias), |
| rhs_packed, extra_bytes, |
| static_cast<const kai_rhs_pack_qs4cxs1s0_param*>(params)); |
| } |
|
|
| template<void(*Fn)(size_t,size_t,size_t,size_t,size_t,size_t,const int8_t*,const float*,const float*,void*,size_t,const struct kai_rhs_pack_qsi8cx_params*)> |
| static inline void rhs_pack_scale_fn12(size_t num_groups, size_t n, size_t k, size_t nr, size_t kr, size_t sr, size_t , |
| size_t , const void* rhs, const void* bias, const void* scale, |
| void* rhs_packed, size_t extra_bytes, const void* params) { |
| Fn(num_groups, n, k, nr, kr, sr, |
| static_cast<const int8_t*>(rhs), |
| static_cast<const float*>(bias), |
| static_cast<const float*>(scale), |
| rhs_packed, extra_bytes, |
| static_cast<const kai_rhs_pack_qsi8cx_params*>(params)); |
| } |
|
|
| template<void(*Fn)(size_t,size_t,size_t,size_t,size_t,size_t,size_t,const void*,const void*,const void*,void*,size_t,const void*)> |
| static inline void rhs_pack_fn13(size_t num_groups, size_t n, size_t k, size_t nr, size_t kr, size_t sr, size_t , |
| size_t rhs_stride, const void* rhs, const void* bias, const void* scale, |
| void* rhs_packed, size_t extra_bytes, const void* params) { |
| Fn(num_groups, n, k, nr, kr, sr, rhs_stride, rhs, bias, scale, rhs_packed, extra_bytes, params); |
| } |
|
|
| static const size_t INT4_PER_BYTE = 2; |
| static const size_t INT4_BITS = 4; |
| static const int Q4_0_ZERO_POINT = 8; |
| const size_t INT4_PER_UINT16 = 4; |
|
|
| static void dequantize_row_qsi4c32pscalef16( |
| const void *packed_data, |
| int32_t row_idx, |
| int64_t nc, |
| float *out, |
| size_t nr_pack, |
| size_t packed_row_stride, |
| size_t kr, |
| size_t bl, |
| size_t num_bytes_multiplier |
| ) { |
| size_t group_idx = row_idx / nr_pack; |
| size_t row_in_group = row_idx % nr_pack; |
| const uint8_t *packed_group = (const uint8_t *)packed_data + group_idx * packed_row_stride; |
| size_t num_blocks = nc / bl; |
| const uint8_t *block_ptr = packed_group; |
|
|
| for (size_t b = 0; b < num_blocks; ++b) { |
| uint16_t scale_f16 = *((const uint16_t *)(block_ptr + row_in_group * num_bytes_multiplier)); |
| float scale = GGML_CPU_FP16_TO_FP32(scale_f16); |
|
|
| const uint8_t *segment_ptr = block_ptr + nr_pack * num_bytes_multiplier; |
| size_t num_segments = bl / kr; |
| size_t num_bytes_per_segment = kr / INT4_PER_BYTE; |
|
|
| for (size_t s = 0; s < num_segments; ++s) { |
| const uint8_t *seg_base = segment_ptr + s * nr_pack * num_bytes_per_segment; |
| const uint8_t *qbytes = seg_base + row_in_group * num_bytes_per_segment; |
| for (size_t k = 0; k < num_bytes_per_segment; ++k) { |
| uint8_t byte = qbytes[k] ^ 0x88; |
| int x0 = (byte & 0x0F) - Q4_0_ZERO_POINT; |
| int x1 = (byte >> INT4_BITS) - Q4_0_ZERO_POINT; |
| out[b * bl + s * num_bytes_per_segment + k] = x0 * scale; |
| out[b * bl + s * num_bytes_per_segment + k + bl/2] = x1 * scale; |
| } |
| } |
| block_ptr += nr_pack * num_bytes_multiplier + num_segments * nr_pack * num_bytes_per_segment; |
| } |
| } |
|
|
| static void dequantize_row_qsi4c32ps1s0scalef16( |
| const void *packed_data, |
| int32_t row_idx, |
| int64_t k, |
| float *out, |
| size_t nr, |
| size_t packed_row_stride, |
| size_t kr, |
| size_t bl, |
| size_t num_bytes_multiplier |
| ) { |
| const size_t num_blocks = k / bl; |
| const size_t bl4 = bl / INT4_PER_UINT16; |
|
|
| size_t group_idx = row_idx / nr; |
| size_t row_in_group = row_idx % nr; |
|
|
| const uint8_t *packed_group = (const uint8_t *)packed_data + group_idx * packed_row_stride; |
| const uint16_t *qdata = (const uint16_t *)packed_group; |
| const uint16_t *scales = (const uint16_t *)(packed_group + packed_row_stride - (nr * num_blocks * num_bytes_multiplier)); |
|
|
| for (size_t block_idx = 0; block_idx < num_blocks; ++block_idx) { |
| uint16_t scale_f16 = scales[row_in_group + block_idx * nr]; |
| float scale = GGML_CPU_FP16_TO_FP32(scale_f16); |
|
|
| for (size_t bl4_idx = 0; bl4_idx < bl4; ++bl4_idx) { |
| uint16_t q = qdata[(block_idx * bl4 + bl4_idx) * nr + row_in_group]; |
|
|
| for (size_t qidx = 0; qidx < INT4_PER_UINT16; ++qidx) { |
| int v = ((q >> (qidx * 4)) & 0xF) - Q4_0_ZERO_POINT; |
| out[block_idx * bl + bl4_idx * INT4_BITS + qidx] = v * scale; |
| } |
| } |
| } |
| GGML_UNUSED(kr); |
| } |
|
|
| static void dequantize_row_qsi8cxp( |
| const void *packed_data, |
| int32_t row_idx, |
| int64_t k, |
| float *out, |
| size_t nr, |
| size_t packed_row_stride, |
| size_t kr, |
| size_t bl, |
| size_t num_bytes_multiplier |
| ) { |
| GGML_UNUSED(bl); |
| GGML_UNUSED(num_bytes_multiplier); |
|
|
| const size_t k_internal = ((size_t) k + QK8_0 - 1) / QK8_0 * QK8_0; |
| const size_t group_idx = row_idx / nr; |
| const size_t row_in_group = row_idx % nr; |
|
|
| const uint8_t * group_ptr = static_cast<const uint8_t *>(packed_data) + group_idx * packed_row_stride; |
| const int8_t * data_base = reinterpret_cast<const int8_t *>(group_ptr); |
|
|
| const size_t num_blocks = k_internal / kr; |
|
|
| for (size_t block = 0; block < num_blocks; ++block) { |
| const int8_t * block_ptr = data_base + (block * nr + row_in_group) * kr; |
| for (size_t i = 0; i < kr; ++i) { |
| const size_t k_idx = block * kr + i; |
| if (k_idx < (size_t) k) { |
| out[k_idx] = static_cast<float>(block_ptr[i]); |
| } |
| } |
| } |
|
|
| const uint8_t * sums_ptr = group_ptr + nr * k_internal; |
| GGML_UNUSED(sums_ptr); |
|
|
| const float * scale_ptr = reinterpret_cast<const float *>(sums_ptr + nr * sizeof(int32_t)); |
| const float scale = scale_ptr[row_in_group]; |
|
|
| if (scale == 0.0f) { |
| for (size_t i = 0; i < (size_t) k; ++i) { |
| out[i] = 0.0f; |
| } |
| return; |
| } |
|
|
| for (size_t i = 0; i < (size_t) k; ++i) { |
| out[i] *= scale; |
| } |
| } |
|
|
| static ggml_kleidiai_kernels gemm_gemv_kernels[] = { |
| #if defined(__ARM_FEATURE_SME) |
| { |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_f16p1vlx2_qsi4c32p4vlx2_1vlx4vl_sme2_mopa, |
| kai_get_n_step_matmul_clamp_f32_f16p1vlx2_qsi4c32p4vlx2_1vlx4vl_sme2_mopa, |
| kai_get_mr_matmul_clamp_f32_f16p1vlx2_qsi4c32p4vlx2_1vlx4vl_sme2_mopa, |
| kai_get_nr_matmul_clamp_f32_f16p1vlx2_qsi4c32p4vlx2_1vlx4vl_sme2_mopa, |
| kai_get_kr_matmul_clamp_f32_f16p1vlx2_qsi4c32p4vlx2_1vlx4vl_sme2_mopa, |
| kai_get_sr_matmul_clamp_f32_f16p1vlx2_qsi4c32p4vlx2_1vlx4vl_sme2_mopa, |
| kai_get_dst_offset_matmul_clamp_f32_f16p1vlx2_qsi4c32p4vlx2_1vlx4vl_sme2_mopa, |
| kai_get_dst_size_matmul_clamp_f32_f16p1vlx2_qsi4c32p4vlx2_1vlx4vl_sme2_mopa, |
| &kernel_offs_fn3<kai_get_lhs_packed_offset_matmul_clamp_f32_f16p1vlx2_qsi4c32p4vlx2_1vlx4vl_sme2_mopa>, |
| &kernel_offs_fn3<kai_get_rhs_packed_offset_matmul_clamp_f32_f16p1vlx2_qsi4c32p4vlx2_1vlx4vl_sme2_mopa>, |
| &kernel_run_fn11<kai_run_matmul_clamp_f32_f16p1vlx2_qsi4c32p4vlx2_1vlx4vl_sme2_mopa>, |
| }, |
|
|
| { |
| kai_get_lhs_offset_lhs_pack_f16pmrx2_f32_neon, |
| &lhs_offs_fn6<kai_get_lhs_packed_offset_lhs_pack_f16pmrx2_f32_neon>, |
| &lhs_ps_fn6<kai_get_lhs_packed_size_lhs_pack_f16pmrx2_f32_neon>, |
| &lhs_pack_void_fn10<kai_run_lhs_pack_f16pmrx2_f32_neon>, |
| }, |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, |
| kai_get_n_step_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, |
| kai_get_mr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, |
| kai_get_nr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, |
| kai_get_kr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, |
| kai_get_sr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, |
| kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, |
| kai_get_dst_size_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, |
| &kernel_offs_fn3<kai_get_lhs_packed_offset_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot>, |
| &kernel_offs_fn3<kai_get_rhs_packed_offset_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot>, |
| &kernel_run_fn11<kai_run_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32_neon, |
| &lhs_offs_fn6<kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32_neon>, |
| &lhs_ps_fn6<kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32_neon>, |
| &lhs_pack_float_fn10<kai_run_lhs_quant_pack_qsi8d32p_f32_neon>, |
| }, |
| { |
| kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon, |
| dequantize_row_qsi4c32ps1s0scalef16, |
| &rhs_ps_fn5<kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon>, |
| &rhs_stride_fn4<kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon>, |
| &rhs_pack_fn12<kai_run_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon>, |
| }, |
| CPU_FEATURE_SME, |
| GGML_TYPE_F32, |
| GGML_TYPE_Q4_0, |
| GGML_TYPE_F32, |
| }, |
| { |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_n_step_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_mr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_nr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_kr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_sr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_dst_offset_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_dst_size_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| &kernel_offs_fn2<kai_get_lhs_packed_offset_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa>, |
| &kernel_offs_fn2<kai_get_rhs_packed_offset_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa>, |
| &kernel_run_fn10<kai_run_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_pack_bf16p2vlx2_f32_sme, |
| &lhs_offs_fn5<kai_get_lhs_packed_offset_lhs_pack_bf16p2vlx2_f32_sme>, |
| &lhs_ps_fn5<kai_get_lhs_packed_size_lhs_pack_bf16p2vlx2_f32_sme>, |
| &lhs_pack_void_fn9<kai_run_lhs_pack_bf16p2vlx2_f32_sme>, |
| }, |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_n_step_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_mr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_nr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_kr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_sr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_dst_offset_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| kai_get_dst_size_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, |
| nullptr, |
| nullptr, |
| nullptr, |
| }, |
| { |
| kai_get_lhs_offset_lhs_pack_bf16p2vlx2_f32_sme, |
| &lhs_offs_fn5<kai_get_lhs_packed_offset_lhs_pack_bf16p2vlx2_f32_sme>, |
| &lhs_ps_fn5<kai_get_lhs_packed_size_lhs_pack_bf16p2vlx2_f32_sme>, |
| &lhs_pack_void_fn9<kai_run_lhs_pack_bf16p2vlx2_f32_sme>, |
| }, |
| { |
| nullptr, |
| nullptr, |
| &rhs_ps_fn2<kai_get_rhs_packed_size_rhs_pack_kxn_bf16p2vlx2b_f32_x32_sme>, |
| &rhs_stride_fn1<kai_get_rhs_packed_stride_rhs_pack_kxn_bf16p2vlx2b_f32_x32_sme>, |
| &rhs_pack_fn13<kai_run_rhs_pack_kxn_bf16p2vlx2b_f32_x32_sme>, |
| }, |
| CPU_FEATURE_SME, |
| GGML_TYPE_F32, |
| GGML_TYPE_F16, |
| GGML_TYPE_F32, |
| }, |
| #endif |
| #if defined(__APPLE__) |
| #if defined(__ARM_FEATURE_DOTPROD) |
| { |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_n_step_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_mr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_nr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_kr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_sr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_dst_offset_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_dst_size_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| &kernel_offs_fn3<kai_get_lhs_packed_offset_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod>, |
| &kernel_offs_fn3<kai_get_rhs_packed_offset_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod>, |
| &kernel_run_fn11<kai_run_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, |
| &lhs_offs_fn6<kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_ps_fn6<kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_pack_float_fn10<kai_run_lhs_quant_pack_qsi8d32p_f32>, |
| }, |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_n_step_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_mr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_nr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_kr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_sr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_dst_size_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| &kernel_offs_fn3<kai_get_lhs_packed_offset_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod>, |
| &kernel_offs_fn3<kai_get_rhs_packed_offset_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod>, |
| &kernel_run_fn11<kai_run_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, |
| &lhs_offs_fn6<kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_ps_fn6<kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_pack_float_fn10<kai_run_lhs_quant_pack_qsi8d32p_f32>, |
| }, |
| { |
| kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, |
| dequantize_row_qsi4c32pscalef16, |
| &rhs_ps_fn5<kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| &rhs_stride_fn4<kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| &rhs_pack_fn12<kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| }, |
| CPU_FEATURE_DOTPROD, |
| GGML_TYPE_F32, |
| GGML_TYPE_Q4_0, |
| GGML_TYPE_F32, |
| }, |
| #endif |
| #if defined(__ARM_FEATURE_MATMUL_INT8) |
| { |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_n_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_mr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_nr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_kr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_sr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_dst_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_dst_size_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| &kernel_offs_fn3<kai_get_lhs_packed_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm>, |
| &kernel_offs_fn3<kai_get_rhs_packed_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm>, |
| &kernel_run_fn11<kai_run_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qsi8d32p4x8sb_f32_neon, |
| &lhs_offs_fn6<kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p4x8sb_f32_neon>, |
| &lhs_ps_fn6<kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p4x8sb_f32_neon>, |
| &lhs_pack_float_fn10<kai_run_lhs_quant_pack_qsi8d32p4x8sb_f32_neon>, |
| }, |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_n_step_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_mr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_nr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_kr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_sr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_dst_size_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| &kernel_offs_fn3<kai_get_lhs_packed_offset_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod>, |
| &kernel_offs_fn3<kai_get_rhs_packed_offset_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod>, |
| &kernel_run_fn11<kai_run_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, |
| &lhs_offs_fn6<kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_ps_fn6<kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_pack_float_fn10<kai_run_lhs_quant_pack_qsi8d32p_f32>, |
| }, |
| { |
| kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, |
| dequantize_row_qsi4c32pscalef16, |
| &rhs_ps_fn5<kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| &rhs_stride_fn4<kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| &rhs_pack_fn12<kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| }, |
| CPU_FEATURE_I8MM, |
| GGML_TYPE_F32, |
| GGML_TYPE_Q4_0, |
| GGML_TYPE_F32, |
| }, |
| #endif |
| #else |
| #if defined(__ARM_FEATURE_SVE) |
| { |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, |
| kai_get_n_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, |
| kai_get_mr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, |
| kai_get_nr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, |
| kai_get_kr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, |
| kai_get_sr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, |
| kai_get_dst_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, |
| kai_get_dst_size_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, |
| &kernel_offs_fn3<kai_get_lhs_packed_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm>, |
| &kernel_offs_fn3<kai_get_rhs_packed_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm>, |
| &kernel_run_fn11<kai_run_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qsi8d32p4x8sb_f32_neon, |
| &lhs_offs_fn6<kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p4x8sb_f32_neon>, |
| &lhs_ps_fn6<kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p4x8sb_f32_neon>, |
| &lhs_pack_float_fn10<kai_run_lhs_quant_pack_qsi8d32p4x8sb_f32_neon>, |
| }, |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, |
| kai_get_n_step_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, |
| kai_get_mr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, |
| kai_get_nr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, |
| kai_get_kr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, |
| kai_get_sr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, |
| kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, |
| kai_get_dst_size_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, |
| &kernel_offs_fn3<kai_get_lhs_packed_offset_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod>, |
| &kernel_offs_fn3<kai_get_rhs_packed_offset_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod>, |
| &kernel_run_fn11<kai_run_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, |
| &lhs_offs_fn6<kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_ps_fn6<kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_pack_float_fn10<kai_run_lhs_quant_pack_qsi8d32p_f32>, |
| }, |
| { |
| kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, |
| dequantize_row_qsi4c32pscalef16, |
| &rhs_ps_fn5<kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| &rhs_stride_fn4<kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| &rhs_pack_fn12<kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| }, |
| CPU_FEATURE_SVE | CPU_FEATURE_I8MM | CPU_FEATURE_DOTPROD, |
| GGML_TYPE_F32, |
| GGML_TYPE_Q4_0, |
| GGML_TYPE_F32, |
| }, |
| #endif |
| #if defined(__ARM_FEATURE_MATMUL_INT8) |
| { |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_n_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_mr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_nr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_kr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_sr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_dst_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| kai_get_dst_size_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, |
| &kernel_offs_fn3<kai_get_lhs_packed_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm>, |
| &kernel_offs_fn3<kai_get_rhs_packed_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm>, |
| &kernel_run_fn11<kai_run_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qsi8d32p4x8sb_f32_neon, |
| &lhs_offs_fn6<kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p4x8sb_f32_neon>, |
| &lhs_ps_fn6<kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p4x8sb_f32_neon>, |
| &lhs_pack_float_fn10<kai_run_lhs_quant_pack_qsi8d32p4x8sb_f32_neon>, |
| }, |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_n_step_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_mr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_nr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_kr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_sr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| kai_get_dst_size_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, |
| &kernel_offs_fn3<kai_get_lhs_packed_offset_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod>, |
| &kernel_offs_fn3<kai_get_rhs_packed_offset_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod>, |
| &kernel_run_fn11<kai_run_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, |
| &lhs_offs_fn6<kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_ps_fn6<kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_pack_float_fn10<kai_run_lhs_quant_pack_qsi8d32p_f32>, |
| }, |
| { |
| kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, |
| dequantize_row_qsi4c32pscalef16, |
| &rhs_ps_fn5<kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| &rhs_stride_fn4<kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| &rhs_pack_fn12<kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| }, |
| CPU_FEATURE_I8MM, |
| GGML_TYPE_F32, |
| GGML_TYPE_Q4_0, |
| GGML_TYPE_F32, |
| }, |
| #endif |
| #if defined(__ARM_FEATURE_DOTPROD) |
| { |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_n_step_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_mr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_nr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_kr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_sr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_dst_offset_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| kai_get_dst_size_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, |
| &kernel_offs_fn3<kai_get_lhs_packed_offset_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod>, |
| &kernel_offs_fn3<kai_get_rhs_packed_offset_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod>, |
| &kernel_run_fn11<kai_run_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, |
| &lhs_offs_fn6<kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_ps_fn6<kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_pack_float_fn10<kai_run_lhs_quant_pack_qsi8d32p_f32>, |
| }, |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_n_step_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_mr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_nr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_kr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_sr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| kai_get_dst_size_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, |
| &kernel_offs_fn3<kai_get_lhs_packed_offset_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod>, |
| &kernel_offs_fn3<kai_get_rhs_packed_offset_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod>, |
| &kernel_run_fn11<kai_run_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, |
| &lhs_offs_fn6<kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_ps_fn6<kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32>, |
| &lhs_pack_float_fn10<kai_run_lhs_quant_pack_qsi8d32p_f32>, |
| }, |
| { |
| kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, |
| dequantize_row_qsi4c32pscalef16, |
| &rhs_ps_fn5<kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| &rhs_stride_fn4<kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| &rhs_pack_fn12<kai_run_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0>, |
| }, |
| CPU_FEATURE_DOTPROD, |
| GGML_TYPE_F32, |
| GGML_TYPE_Q4_0, |
| GGML_TYPE_F32, |
| }, |
| #endif |
| #endif |
| { } |
| }; |
|
|
| static ggml_kleidiai_kernels gemm_gemv_kernels_q8[] = { |
| #if defined(__ARM_FEATURE_SME) |
| { |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, |
| kai_get_n_step_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, |
| kai_get_mr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, |
| kai_get_nr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, |
| kai_get_kr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, |
| kai_get_sr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, |
| kai_get_dst_offset_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, |
| kai_get_dst_size_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, |
| &kernel_offs_fn2<kai_get_lhs_packed_offset_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa>, |
| &kernel_offs_fn2<kai_get_rhs_packed_offset_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa>, |
| &kernel_run_float_fn10<kai_run_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, |
| &lhs_offs_fn5<kai_get_lhs_packed_offset_lhs_quant_pack_qai8dxp_f32>, |
| &lhs_ps_fn5<kai_get_lhs_packed_size_lhs_quant_pack_qai8dxp_f32>, |
| &lhs_pack_float_fn9_no_bl<kai_run_lhs_quant_pack_qai8dxp_f32>, |
| }, |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, |
| kai_get_n_step_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, |
| kai_get_mr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, |
| kai_get_nr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, |
| kai_get_kr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, |
| kai_get_sr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, |
| kai_get_dst_offset_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, |
| kai_get_dst_size_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, |
| &kernel_offs_fn2<kai_get_lhs_packed_offset_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot>, |
| &kernel_offs_fn2<kai_get_rhs_packed_offset_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot>, |
| &kernel_run_float_fn10<kai_run_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, |
| &lhs_offs_fn5<kai_get_lhs_packed_offset_lhs_quant_pack_qai8dxp_f32>, |
| &lhs_ps_fn5<kai_get_lhs_packed_size_lhs_quant_pack_qai8dxp_f32>, |
| &lhs_pack_float_fn9_no_bl<kai_run_lhs_quant_pack_qai8dxp_f32>, |
| }, |
| { |
| kai_get_rhs_packed_stride_rhs_pack_nxk_qsi8cxp_qsi8cx_neon, |
| dequantize_row_qsi8cxp, |
| &rhs_ps_fn5<kai_get_rhs_packed_size_rhs_pack_nxk_qsi8cxp_qsi8cx_neon>, |
| &rhs_stride_fn4<kai_get_rhs_packed_stride_rhs_pack_nxk_qsi8cxp_qsi8cx_neon>, |
| &rhs_pack_scale_fn12<kai_run_rhs_pack_nxk_qsi8cxp_qsi8cx_neon>, |
| }, |
| CPU_FEATURE_SME, |
| GGML_TYPE_F32, |
| GGML_TYPE_Q8_0, |
| GGML_TYPE_F32, |
| }, |
| #endif |
| #if defined(__ARM_FEATURE_MATMUL_INT8) |
| { |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, |
| kai_get_n_step_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, |
| kai_get_mr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, |
| kai_get_nr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, |
| kai_get_kr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, |
| kai_get_sr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, |
| kai_get_dst_offset_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, |
| kai_get_dst_size_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, |
| &kernel_offs_fn2<kai_get_lhs_packed_offset_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm>, |
| &kernel_offs_fn2<kai_get_rhs_packed_offset_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm>, |
| &kernel_run_float_fn10<kai_run_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, |
| &lhs_offs_fn5<kai_get_lhs_packed_offset_lhs_quant_pack_qai8dxp_f32>, |
| &lhs_ps_fn5<kai_get_lhs_packed_size_lhs_quant_pack_qai8dxp_f32>, |
| &lhs_pack_float_fn9_no_bl<kai_run_lhs_quant_pack_qai8dxp_f32>, |
| }, |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, |
| kai_get_n_step_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, |
| kai_get_mr_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, |
| kai_get_nr_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, |
| kai_get_kr_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, |
| kai_get_sr_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, |
| kai_get_dst_offset_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, |
| kai_get_dst_size_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, |
| &kernel_offs_fn2<kai_get_lhs_packed_offset_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod>, |
| &kernel_offs_fn2<kai_get_rhs_packed_offset_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod>, |
| &kernel_run_float_fn10<kai_run_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, |
| &lhs_offs_fn5<kai_get_lhs_packed_offset_lhs_quant_pack_qai8dxp_f32>, |
| &lhs_ps_fn5<kai_get_lhs_packed_size_lhs_quant_pack_qai8dxp_f32>, |
| &lhs_pack_float_fn9_no_bl<kai_run_lhs_quant_pack_qai8dxp_f32>, |
| }, |
| { |
| kai_get_rhs_packed_stride_rhs_pack_nxk_qsi8cxp_qsi8cx_neon, |
| dequantize_row_qsi8cxp, |
| &rhs_ps_fn5<kai_get_rhs_packed_size_rhs_pack_nxk_qsi8cxp_qsi8cx_neon>, |
| &rhs_stride_fn4<kai_get_rhs_packed_stride_rhs_pack_nxk_qsi8cxp_qsi8cx_neon>, |
| &rhs_pack_scale_fn12<kai_run_rhs_pack_nxk_qsi8cxp_qsi8cx_neon>, |
| }, |
| CPU_FEATURE_I8MM, |
| GGML_TYPE_F32, |
| GGML_TYPE_Q8_0, |
| GGML_TYPE_F32, |
| }, |
| #endif |
| #if defined(__ARM_FEATURE_DOTPROD) |
| { |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, |
| kai_get_n_step_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, |
| kai_get_mr_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, |
| kai_get_nr_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, |
| kai_get_kr_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, |
| kai_get_sr_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, |
| kai_get_dst_offset_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, |
| kai_get_dst_size_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, |
| &kernel_offs_fn2<kai_get_lhs_packed_offset_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod>, |
| &kernel_offs_fn2<kai_get_rhs_packed_offset_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod>, |
| &kernel_run_float_fn10<kai_run_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, |
| &lhs_offs_fn5<kai_get_lhs_packed_offset_lhs_quant_pack_qai8dxp_f32>, |
| &lhs_ps_fn5<kai_get_lhs_packed_size_lhs_quant_pack_qai8dxp_f32>, |
| &lhs_pack_float_fn9_no_bl<kai_run_lhs_quant_pack_qai8dxp_f32>, |
| }, |
| |
| { |
| kai_get_m_step_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, |
| kai_get_n_step_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, |
| kai_get_mr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, |
| kai_get_nr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, |
| kai_get_kr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, |
| kai_get_sr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, |
| kai_get_dst_offset_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, |
| kai_get_dst_size_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, |
| &kernel_offs_fn2<kai_get_lhs_packed_offset_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod>, |
| &kernel_offs_fn2<kai_get_rhs_packed_offset_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod>, |
| &kernel_run_float_fn10<kai_run_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod>, |
| }, |
| { |
| kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, |
| &lhs_offs_fn5<kai_get_lhs_packed_offset_lhs_quant_pack_qai8dxp_f32>, |
| &lhs_ps_fn5<kai_get_lhs_packed_size_lhs_quant_pack_qai8dxp_f32>, |
| &lhs_pack_float_fn9_no_bl<kai_run_lhs_quant_pack_qai8dxp_f32>, |
| }, |
| { |
| kai_get_rhs_packed_stride_rhs_pack_nxk_qsi8cxp_qsi8cx_neon, |
| dequantize_row_qsi8cxp, |
| &rhs_ps_fn5<kai_get_rhs_packed_size_rhs_pack_nxk_qsi8cxp_qsi8cx_neon>, |
| &rhs_stride_fn4<kai_get_rhs_packed_stride_rhs_pack_nxk_qsi8cxp_qsi8cx_neon>, |
| &rhs_pack_scale_fn12<kai_run_rhs_pack_nxk_qsi8cxp_qsi8cx_neon>, |
| }, |
| CPU_FEATURE_DOTPROD, |
| GGML_TYPE_F32, |
| GGML_TYPE_Q8_0, |
| GGML_TYPE_F32, |
| }, |
| #endif |
| { } |
| }; |
|
|
| ggml_kleidiai_kernels * ggml_kleidiai_select_kernels(cpu_feature cpu_features, const ggml_tensor * tensor) { |
| ggml_kleidiai_kernels * kernel = nullptr; |
|
|
| if (tensor->op == GGML_OP_MUL_MAT && tensor->src[0] != nullptr && tensor->src[1] != nullptr) { |
| #if defined(__ARM_FEATURE_SME) || \ |
| defined(__ARM_FEATURE_DOTPROD) || \ |
| defined(__ARM_FEATURE_MATMUL_INT8) || \ |
| defined(__ARM_FEATURE_SVE) |
| auto try_table = [&](auto & table) { |
| for (size_t i = 0; i < NELEMS(table) - 1; ++i) { |
| if ((cpu_features & table[i].required_cpu) == table[i].required_cpu && |
| table[i].lhs_type == tensor->src[1]->type && |
| table[i].rhs_type == tensor->src[0]->type && |
| table[i].op_type == tensor->type) { |
| kernel = &table[i]; |
| return true; |
| } |
| } |
| return false; |
| }; |
|
|
| if (tensor->src[0]->type == GGML_TYPE_Q8_0) { |
| try_table(gemm_gemv_kernels_q8); |
| } else { |
| try_table(gemm_gemv_kernels); |
| } |
| #else |
| GGML_UNUSED(gemm_gemv_kernels); |
| GGML_UNUSED(gemm_gemv_kernels_q8); |
| GGML_UNUSED(cpu_features); |
| #endif |
| } |
|
|
| return kernel; |
| } |
|
|
| ggml_kleidiai_kernels * ggml_kleidiai_select_kernels_q4_0(cpu_feature features) { |
| ggml_kleidiai_kernels * kernels = nullptr; |
|
|
| #if defined(__ARM_FEATURE_SME) || \ |
| defined(__ARM_FEATURE_DOTPROD) || \ |
| defined(__ARM_FEATURE_MATMUL_INT8) || \ |
| defined(__ARM_FEATURE_SVE) |
| for (size_t i = 0; i < NELEMS(gemm_gemv_kernels) - 1; ++i) { |
| if ((features & gemm_gemv_kernels[i].required_cpu) == gemm_gemv_kernels[i].required_cpu) { |
| kernels = &gemm_gemv_kernels[i]; |
| break; |
| } |
| } |
| #else |
| GGML_UNUSED(features); |
| #endif |
|
|
| return kernels; |
| } |
|
|
| ggml_kleidiai_kernels * ggml_kleidiai_select_kernels_q8_0(cpu_feature features) { |
| ggml_kleidiai_kernels * kernels = nullptr; |
|
|
| #if defined(__ARM_FEATURE_SME) || defined(__ARM_FEATURE_DOTPROD) || defined(__ARM_FEATURE_MATMUL_INT8) |
| for (size_t i = 0; i < NELEMS(gemm_gemv_kernels_q8) - 1; ++i) { |
| if ((features & gemm_gemv_kernels_q8[i].required_cpu) == gemm_gemv_kernels_q8[i].required_cpu) { |
| kernels = &gemm_gemv_kernels_q8[i]; |
| break; |
| } |
| } |
| #else |
| GGML_UNUSED(features); |
| #endif |
|
|
| return kernels; |
| } |
|
|