|
|
|
|
|
|
|
|
|
|
|
$assert BATCH_TILE % 16 == 0 |
|
$assert BATCH_TILE >= 16 |
|
$SIMD_TILE = BATCH_TILE |
|
#include <assert.h> |
|
|
|
#include <immintrin.h> |
|
|
|
#include <xnnpack/common.h> |
|
#include <xnnpack/intrinsics-polyfill.h> |
|
#include <xnnpack/vcvt.h> |
|
|
|
|
|
void xnn_f32_f16_vcvt_ukernel__avx512skx_x${BATCH_TILE}( |
|
size_t batch, |
|
const float* input, |
|
void* output, |
|
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) |
|
{ |
|
assert(batch != 0); |
|
assert(batch % sizeof(float) == 0); |
|
assert(input != NULL); |
|
assert(output != NULL); |
|
|
|
uint16_t* o = (uint16_t*) output; |
|
$if BATCH_TILE > 16: |
|
for (; batch >= ${BATCH_TILE} * sizeof(float); batch -= ${BATCH_TILE} * sizeof(float)) { |
|
const __m512 vf0 = _mm512_loadu_ps(input); |
|
$for N in range(1, SIMD_TILE): |
|
const __m512 vf${N} = _mm512_loadu_ps(input + ${N * 16}); |
|
input += ${BATCH_TILE}; |
|
|
|
_mm256_storeu_si256((__m256i*) o, _mm512_cvtps_ph(vf0, _MM_FROUND_NO_EXC | _MM_FROUND_TO_NEAREST_INT)); |
|
$for N in range(1, SIMD_TILE): |
|
_mm256_storeu_si256((__m256i*) (o + ${N * 16}), _mm512_cvtps_ph(vf${N}, _MM_FROUND_NO_EXC | _MM_FROUND_TO_NEAREST_INT)); |
|
o += ${BATCH_TILE}; |
|
} |
|
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { |
|
const __m512 vf = _mm512_loadu_ps(input); |
|
input += 16; |
|
|
|
_mm256_storeu_si256((__m256i*) o, _mm512_cvtps_ph(vf, _MM_FROUND_NO_EXC | _MM_FROUND_TO_NEAREST_INT)); |
|
o += 16; |
|
} |
|
if XNN_UNLIKELY(batch != 0) { |
|
assert(batch >= 1 * sizeof(float)); |
|
assert(batch <= 15 * sizeof(float)); |
|
|
|
|
|
batch >>= XNN_LOG2_SIZEOF_FLOAT; |
|
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1))); |
|
|
|
const __m512 vf = _mm512_maskz_loadu_ps(vmask, input); |
|
const __m256i vh = _mm512_cvtps_ph(vf, _MM_FROUND_NO_EXC | _MM_FROUND_TO_NEAREST_INT); |
|
_mm256_mask_storeu_epi16(o, vmask, vh); |
|
} |
|
} |
|
|