|
|
|
|
|
|
|
|
|
|
|
$assert DATATYPE in ["F32", "QC4", "QC8"] |
|
$assert NR == 2 |
|
$assert MR % 2 == 0 |
|
#include <assert.h> |
|
|
|
$if DATATYPE in ["QC8", "QC4"]: |
|
#include <smmintrin.h> |
|
$else: |
|
#include <xmmintrin.h> |
|
|
|
#include <xnnpack/gemm.h> |
|
$if DATATYPE == "QC8": |
|
#include <xnnpack/unaligned.h> |
|
|
|
|
|
$if DATATYPE in ["QC8", "QC4"]: |
|
$ISA = {2: "sse2", 4: "sse41"}[SSE] |
|
$else: |
|
$ISA = "sse" |
|
$DATATYPE_SPEC = {"F32": "f32", "QC8": "f32_qc8w", "QC4": "f32_qc4w"}[DATATYPE] |
|
void xnn_${DATATYPE_SPEC}_gemm_minmax_ukernel_${MR}x${NR}c4__${ISA}( |
|
size_t mr, |
|
size_t nc, |
|
size_t kc, |
|
const float* restrict a, |
|
size_t a_stride, |
|
$if DATATYPE == "F32": |
|
const float* restrict w, |
|
$else: |
|
const void* restrict w, |
|
float* restrict c, |
|
size_t cm_stride, |
|
size_t cn_stride, |
|
$if DATATYPE == "QC4": |
|
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
|
$else: |
|
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
|
{ |
|
assert(mr != 0); |
|
assert(mr <= ${MR}); |
|
assert(nc != 0); |
|
assert(kc != 0); |
|
assert(kc % sizeof(float) == 0); |
|
assert(a != NULL); |
|
assert(w != NULL); |
|
assert(c != NULL); |
|
|
|
const float* a0 = a; |
|
float* c0 = c; |
|
$for M in range(1, MR): |
|
const float* a${M} = (const float*) ((uintptr_t) a${M-1} + a_stride); |
|
float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride); |
|
$if M % 2 == 0: |
|
if XNN_UNPREDICTABLE(mr <= ${M}) { |
|
a${M} = a${M-1}; |
|
c${M} = c${M-1}; |
|
} |
|
$elif M + 1 == MR: |
|
if XNN_UNPREDICTABLE(mr != ${M+1}) { |
|
a${M} = a${M-1}; |
|
c${M} = c${M-1}; |
|
} |
|
$else: |
|
if XNN_UNPREDICTABLE(mr < ${M+1}) { |
|
a${M} = a${M-1}; |
|
c${M} = c${M-1}; |
|
} |
|
|
|
do { |
|
__m128 vacc0x0c4 = _mm_load_ss(w); |
|
$for N in range(1, NR): |
|
$if DATATYPE == "F32": |
|
__m128 vacc0x${N}c4 = _mm_load_ss(w + ${N}); |
|
$else: |
|
__m128 vacc0x${N}c4 = _mm_load_ss((const float*) w + ${N}); |
|
$for M in range(1, MR): |
|
$for N in range(NR): |
|
__m128 vacc${M}x${N}c4 = vacc0x${N}c4; |
|
$if DATATYPE == "F32": |
|
w += ${NR}; |
|
$else: |
|
w = (const float*) w + ${NR}; |
|
|
|
size_t k = kc; |
|
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) { |
|
$for M in range(MR): |
|
const __m128 va${M} = _mm_loadu_ps(a${M}); |
|
a${M} += 4; |
|
|
|
$if DATATYPE == "F32": |
|
const __m128 vb0 = _mm_loadu_ps(w); |
|
$for N in range(1, NR): |
|
const __m128 vb${N} = _mm_loadu_ps(w + ${N * 4}); |
|
w += ${NR * 4}; |
|
$else: |
|
$if SSE >= 4: |
|
const __m128i vbi0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_u32(w))); |
|
$for N in range(1, NR): |
|
const __m128i vbi${N} = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + ${N * 4}))); |
|
$for N in range(NR): |
|
const __m128 vb${N} = _mm_cvtepi32_ps(vbi${N}); |
|
$else: |
|
$for N in range(0, NR, 2): |
|
const __m128i vb${N}${N+1} = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + ${N * 4})); |
|
$for N in range(0, NR, 2): |
|
const __m128i vbw${N}${N+1} = _mm_unpacklo_epi8(vb${N}${N+1}, vb${N}${N+1}); |
|
const __m128 vb${N} = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw${N}${N+1}, vbw${N}${N+1}), 24)); |
|
const __m128 vb${N+1} = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw${N}${N+1}, vbw${N}${N+1}), 24)); |
|
w = (const int8_t*) w + ${NR * 4}; |
|
|
|
$for M in range(MR): |
|
$for N in range(NR): |
|
vacc${M}x${N}c4 = _mm_add_ps(vacc${M}x${N}c4, _mm_mul_ps(va${M}, vb${N})); |
|
} |
|
if XNN_UNLIKELY(k != 0) { |
|
$for M in range(MR): |
|
const __m128 va${M} = _mm_loadu_ps(a${M}); |
|
a${M} = (const float*) ((uintptr_t) a${M} + k); |
|
|
|
$if DATATYPE == "F32": |
|
const __m128 vb0 = _mm_loadu_ps(w); |
|
$for N in range(1, NR): |
|
const __m128 vb${N} = _mm_loadu_ps(w + ${N * 4}); |
|
w += ${NR * 4}; |
|
$else: |
|
$if SSE >= 4: |
|
const __m128i vbi0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_u32(w))); |
|
$for N in range(1, NR): |
|
const __m128i vbi${N} = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + ${N * 4}))); |
|
$for N in range(NR): |
|
const __m128 vb${N} = _mm_cvtepi32_ps(vbi${N}); |
|
$else: |
|
$for N in range(0, NR, 2): |
|
const __m128i vb${N}${N+1} = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + ${N * 4})); |
|
$for N in range(0, NR, 2): |
|
const __m128i vbw${N}${N+1} = _mm_unpacklo_epi8(vb${N}${N+1}, vb${N}${N+1}); |
|
const __m128 vb${N} = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw${N}${N+1}, vbw${N}${N+1}), 24)); |
|
const __m128 vb${N+1} = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw${N}${N+1}, vbw${N}${N+1}), 24)); |
|
w = (const int8_t*) w + ${NR * 4}; |
|
|
|
$for N in range(NR): |
|
const __m128 vmask${N} = _mm_cmpeq_ps(_mm_setzero_ps(), vb${N}); |
|
|
|
$for M in range(MR): |
|
$for N in range(NR): |
|
vacc${M}x${N}c4 = _mm_add_ps(vacc${M}x${N}c4, _mm_mul_ps(_mm_andnot_ps(vmask${N}, va${M}), vb${N})); |
|
} |
|
|
|
$for M in range(MR): |
|
const __m128 vacc${M}x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc${M}x0c4, vacc${M}x1c4), _mm_unpackhi_ps(vacc${M}x0c4, vacc${M}x1c4)); |
|
|
|
$for M in range(0, MR, 2): |
|
__m128 vacc${M}${M+1}x01 = _mm_add_ps(_mm_movelh_ps(vacc${M}x01c2, vacc${M+1}x01c2), _mm_movehl_ps(vacc${M+1}x01c2, vacc${M}x01c2)); |
|
|
|
$if DATATYPE in ["QC8", "QC4"]: |
|
const __m128 vscalex01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w)); |
|
const __m128 vscale2x01 = _mm_movelh_ps(vscalex01, vscalex01); |
|
w = (const float*) w + 2; |
|
$for M in range(0, MR, 2): |
|
vacc${M}${M+1}x01 = _mm_mul_ps(vacc${M}${M+1}x01, vscale2x01); |
|
const __m128 vmax = _mm_load_ps(params->sse.max); |
|
$for M in range(0, MR, 2): |
|
vacc${M}${M+1}x01 = _mm_min_ps(vacc${M}${M+1}x01, vmax); |
|
|
|
const __m128 vmin = _mm_load_ps(params->sse.min); |
|
$for M in range(0, MR, 2): |
|
vacc${M}${M+1}x01 = _mm_max_ps(vacc${M}${M+1}x01, vmin); |
|
|
|
if XNN_LIKELY(nc >= ${NR}) { |
|
$for M in reversed(range(0, MR, 2)): |
|
_mm_storel_pi((__m64*) c${M}, vacc${M}${M+1}x01); |
|
c${M} = (float*) ((uintptr_t) c${M} + cn_stride); |
|
a${M} = (const float*) ((uintptr_t) a${M} - kc); |
|
_mm_storeh_pi((__m64*) c${M+1}, vacc${M}${M+1}x01); |
|
c${M+1} = (float*) ((uintptr_t) c${M+1} + cn_stride); |
|
a${M+1} = (const float*) ((uintptr_t) a${M+1} - kc); |
|
|
|
nc -= ${NR}; |
|
} else { |
|
assert(nc == 1); |
|
$for M in reversed(range(0, MR, 2)): |
|
_mm_store_ss(c${M}, vacc${M}${M+1}x01); |
|
_mm_store_ss(c${M+1}, _mm_movehl_ps(vacc${M}${M+1}x01, vacc${M}${M+1}x01)); |
|
|
|
nc = 0; |
|
} |
|
} while (nc != 0); |
|
} |
|
|