|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include <assert.h> |
|
|
|
#include <immintrin.h> |
|
|
|
#include <xnnpack/common.h> |
|
#include <xnnpack/intrinsics-polyfill.h> |
|
#include <xnnpack/vbinary.h> |
|
|
|
|
|
void xnn_f16_vdiv_minmax_ukernel__f16c_x16( |
|
size_t batch, |
|
const void* restrict input_a, |
|
const void* restrict input_b, |
|
void* restrict output, |
|
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
|
{ |
|
assert(batch != 0); |
|
assert(batch % sizeof(uint16_t) == 0); |
|
assert(input_a != NULL); |
|
assert(input_b != NULL); |
|
assert(output != NULL); |
|
|
|
const uint16_t* a = (const uint16_t*) input_a; |
|
const uint16_t* b = (const uint16_t*) input_b; |
|
uint16_t* o = (uint16_t*) output; |
|
|
|
const __m256 vy_min = _mm256_load_ps(params->avx.min); |
|
const __m256 vy_max = _mm256_load_ps(params->avx.max); |
|
|
|
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { |
|
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a)); |
|
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b)); |
|
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); |
|
const __m256 vb456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (b + 8))); |
|
a += 16; |
|
b += 16; |
|
|
|
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va01234567, vb01234567), _MM_FROUND_TO_NEAREST_INT)); |
|
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va456789AB, vb456789AB), _MM_FROUND_TO_NEAREST_INT)); |
|
|
|
|
|
vy01234567 = _mm256_max_ps(vy01234567, vy_min); |
|
vy456789AB = _mm256_max_ps(vy456789AB, vy_min); |
|
|
|
vy01234567 = _mm256_min_ps(vy01234567, vy_max); |
|
vy456789AB = _mm256_min_ps(vy456789AB, vy_max); |
|
|
|
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT)); |
|
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT)); |
|
o += 16; |
|
} |
|
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { |
|
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a)); |
|
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b)); |
|
a += 8; |
|
b += 8; |
|
|
|
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va, vb), _MM_FROUND_TO_NEAREST_INT)); |
|
|
|
vy = _mm256_max_ps(vy, vy_min); |
|
vy = _mm256_min_ps(vy, vy_max); |
|
|
|
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); |
|
o += 8; |
|
} |
|
if XNN_UNLIKELY(batch != 0) { |
|
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a)); |
|
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b)); |
|
|
|
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va, vb), _MM_FROUND_TO_NEAREST_INT)); |
|
|
|
vy = _mm256_max_ps(vy, vy_min); |
|
vy = _mm256_min_ps(vy, vy_max); |
|
|
|
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); |
|
if (batch & (4 * sizeof(uint16_t))) { |
|
_mm_storel_epi64((__m128i*) o, vh); |
|
vh = _mm_unpackhi_epi64(vh, vh); |
|
o += 4; |
|
} |
|
if (batch & (2 * sizeof(uint16_t))) { |
|
_mm_storeu_si32(o, vh); |
|
vh = _mm_srli_epi64(vh, 32); |
|
o += 2; |
|
} |
|
if (batch & (1 * sizeof(uint16_t))) { |
|
*o = (uint16_t) _mm_extract_epi16(vh, 0); |
|
} |
|
} |
|
} |
|
|