|
|
|
|
|
|
|
|
|
|
|
$assert BATCH_TILE % 8 == 0 |
|
$assert BATCH_TILE >= 8 |
|
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
|
#include <assert.h> |
|
|
|
#include <immintrin.h> |
|
|
|
#include <xnnpack/common.h> |
|
#include <xnnpack/intrinsics-polyfill.h> |
|
#include <xnnpack/vunary.h> |
|
|
|
|
|
void xnn_f16_vhswish_ukernel__f16c_x${BATCH_TILE}( |
|
size_t batch, |
|
const void* restrict input, |
|
void* restrict output, |
|
const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
|
{ |
|
assert(batch != 0); |
|
assert(batch % sizeof(uint16_t) == 0); |
|
assert(input != NULL); |
|
assert(output != NULL); |
|
|
|
const uint16_t* i = (const uint16_t*) input; |
|
uint16_t* o = (uint16_t*) output; |
|
|
|
const __m256 vsixth = _mm256_load_ps(params->avx.sixth); |
|
const __m256 vthree = _mm256_load_ps(params->avx.three); |
|
const __m128i vsix = _mm_load_si128((const __m128i*) params->avx.six); |
|
const __m128i vzero = _mm_setzero_si128(); |
|
|
|
$if BATCH_TILE > 8: |
|
for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) { |
|
__m256 vx${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); |
|
$for N in range(8, BATCH_TILE, 8): |
|
__m256 vx${ABC[N:N+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + ${N}))); |
|
i += ${BATCH_TILE}; |
|
|
|
$for N in range(0, BATCH_TILE, 8): |
|
__m128i vacc${ABC[N:N+8]} = _mm256_cvtps_ph(_mm256_add_ps(vx${ABC[N:N+8]}, vthree), _MM_FROUND_TO_NEAREST_INT); |
|
vx${ABC[N:N+8]} = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx${ABC[N:N+8]}, vsixth), _MM_FROUND_TO_NEAREST_INT)); |
|
|
|
$for N in range(0, BATCH_TILE, 8): |
|
vacc${ABC[N:N+8]} = _mm_max_epi16(vacc${ABC[N:N+8]}, vzero); |
|
|
|
$for N in range(0, BATCH_TILE, 8): |
|
vacc${ABC[N:N+8]} = _mm_min_epi16(vacc${ABC[N:N+8]}, vsix); |
|
|
|
$for N in range(0, BATCH_TILE, 8): |
|
vacc${ABC[N:N+8]} = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc${ABC[N:N+8]}), vx${ABC[N:N+8]}), _MM_FROUND_TO_NEAREST_INT); |
|
|
|
_mm_storeu_si128((__m128i*) o, vacc${ABC[0:8]}); |
|
$for N in range(8, BATCH_TILE, 8): |
|
_mm_storeu_si128((__m128i*) (o + ${N}), vacc${ABC[N:N+8]}); |
|
o += ${BATCH_TILE}; |
|
} |
|
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { |
|
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); |
|
i += 8; |
|
__m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_TO_NEAREST_INT); |
|
vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_TO_NEAREST_INT)); |
|
vacc = _mm_max_epi16(vacc, vzero); |
|
vacc = _mm_min_epi16(vacc, vsix); |
|
vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_TO_NEAREST_INT); |
|
_mm_storeu_si128((__m128i*) o, vacc); |
|
o += 8; |
|
} |
|
if XNN_UNLIKELY(batch != 0) { |
|
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); |
|
__m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_TO_NEAREST_INT); |
|
vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_TO_NEAREST_INT)); |
|
vacc = _mm_max_epi16(vacc, vzero); |
|
vacc = _mm_min_epi16(vacc, vsix); |
|
vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_TO_NEAREST_INT); |
|
|
|
if (batch & (4 * sizeof(uint16_t))) { |
|
_mm_storel_epi64((__m128i*) o, vacc); |
|
vacc = _mm_unpackhi_epi64(vacc, vacc); |
|
o += 4; |
|
} |
|
if (batch & (2 * sizeof(uint16_t))) { |
|
_mm_storeu_si32(o, vacc); |
|
vacc = _mm_srli_epi64(vacc, 32); |
|
o += 2; |
|
} |
|
if (batch & (1 * sizeof(uint16_t))) { |
|
*o = (uint16_t) _mm_extract_epi16(vacc, 0); |
|
} |
|
} |
|
} |
|
|