|
|
|
|
|
|
|
|
|
|
|
$assert BATCH_TILE % 8 == 0 |
|
$assert BATCH_TILE >= 8 |
|
$SIMD_TILE = BATCH_TILE |
|
#include <assert.h> |
|
|
|
#include <immintrin.h> |
|
|
|
#include <xnnpack/common.h> |
|
#include <xnnpack/vcvt.h> |
|
|
|
|
|
void xnn_f16_f32_vcvt_ukernel__f16c_x${BATCH_TILE}( |
|
size_t batch, |
|
const void* input, |
|
float* output, |
|
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
|
{ |
|
assert(batch != 0); |
|
assert(batch % sizeof(uint16_t) == 0); |
|
assert(input != NULL); |
|
assert(output != NULL); |
|
|
|
const uint16_t* i = (const uint16_t*) input; |
|
$if BATCH_TILE > 8: |
|
for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) { |
|
const __m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); |
|
$for N in range(1, SIMD_TILE): |
|
const __m256 vacc${N} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + ${N * 8}))); |
|
i += ${BATCH_TILE}; |
|
|
|
_mm256_storeu_ps(output, vacc0); |
|
$for N in range(1, SIMD_TILE): |
|
_mm256_storeu_ps(output + ${N * 8}, vacc${N}); |
|
output += ${BATCH_TILE}; |
|
} |
|
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { |
|
const __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); |
|
i += 8; |
|
|
|
_mm256_storeu_ps(output, vacc); |
|
output += 8; |
|
} |
|
if XNN_UNLIKELY(batch != 0) { |
|
assert(batch >= 1 * sizeof(uint16_t)); |
|
assert(batch <= 7 * sizeof(uint16_t)); |
|
const __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); |
|
|
|
__m128 vacc_lo = _mm256_castps256_ps128(vacc); |
|
if (batch & (4 * sizeof(uint16_t))) { |
|
_mm_storeu_ps(output, vacc_lo); |
|
vacc_lo = _mm256_extractf128_ps(vacc, 1); |
|
output += 4; |
|
} |
|
if (batch & (2 * sizeof(uint16_t))) { |
|
_mm_storel_pi((__m64*) output, vacc_lo); |
|
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo); |
|
output += 2; |
|
} |
|
if (batch & (1 * sizeof(uint16_t))) { |
|
_mm_store_ss(output, vacc_lo); |
|
} |
|
} |
|
} |
|
|