File size: 3,745 Bytes
8b7c501 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert BATCH_TILE % 8 == 0
$assert BATCH_TILE >= 8
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vhswish_ukernel__f16c_x${BATCH_TILE}(
size_t batch,
const void* restrict input,
void* restrict output,
const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
const __m256 vsixth = _mm256_load_ps(params->avx.sixth);
const __m256 vthree = _mm256_load_ps(params->avx.three);
const __m128i vsix = _mm_load_si128((const __m128i*) params->avx.six);
const __m128i vzero = _mm_setzero_si128();
$if BATCH_TILE > 8:
for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) {
__m256 vx${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
$for N in range(8, BATCH_TILE, 8):
__m256 vx${ABC[N:N+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + ${N})));
i += ${BATCH_TILE};
$for N in range(0, BATCH_TILE, 8):
__m128i vacc${ABC[N:N+8]} = _mm256_cvtps_ph(_mm256_add_ps(vx${ABC[N:N+8]}, vthree), _MM_FROUND_TO_NEAREST_INT);
vx${ABC[N:N+8]} = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx${ABC[N:N+8]}, vsixth), _MM_FROUND_TO_NEAREST_INT));
$for N in range(0, BATCH_TILE, 8):
vacc${ABC[N:N+8]} = _mm_max_epi16(vacc${ABC[N:N+8]}, vzero);
$for N in range(0, BATCH_TILE, 8):
vacc${ABC[N:N+8]} = _mm_min_epi16(vacc${ABC[N:N+8]}, vsix);
$for N in range(0, BATCH_TILE, 8):
vacc${ABC[N:N+8]} = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc${ABC[N:N+8]}), vx${ABC[N:N+8]}), _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, vacc${ABC[0:8]});
$for N in range(8, BATCH_TILE, 8):
_mm_storeu_si128((__m128i*) (o + ${N}), vacc${ABC[N:N+8]});
o += ${BATCH_TILE};
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
__m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_TO_NEAREST_INT);
vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_TO_NEAREST_INT));
vacc = _mm_max_epi16(vacc, vzero);
vacc = _mm_min_epi16(vacc, vsix);
vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, vacc);
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_TO_NEAREST_INT);
vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_TO_NEAREST_INT));
vacc = _mm_max_epi16(vacc, vzero);
vacc = _mm_min_epi16(vacc, vsix);
vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vacc);
vacc = _mm_unpackhi_epi64(vacc, vacc);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vacc);
vacc = _mm_srli_epi64(vacc, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vacc, 0);
}
}
}
|