test / src /f16-f32-vcvt /sse-int16.c.in
Androidonnxfork's picture
Upload folder using huggingface_hub
8b7c501
raw
history blame
8.59 kB
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert SSE in [2, 4]
$assert not AVX or SSE == 4
$assert BATCH_TILE % 8 == 0
$assert BATCH_TILE >= 8
$SIMD_TILE = BATCH_TILE // 8
$SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE]
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
#include <${SSE_HEADER}>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
$ISA = "avx" if AVX else {2: "sse2", 4: "sse41"}[SSE]
void xnn_f16_f32_vcvt_ukernel__${ISA}_int16_x${BATCH_TILE}(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask);
const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset);
const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale);
const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias);
const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
$if BATCH_TILE > 8:
for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) {
const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
$for N in range(1, SIMD_TILE):
const __m128i vh${N} = _mm_loadu_si128((const __m128i*) (i + ${N * 8}));
i += ${BATCH_TILE};
$for N in range(SIMD_TILE):
const __m128i vsign${N} = _mm_and_si128(vh${N}, vsign_mask);
$for N in range(SIMD_TILE):
const __m128i vnonsign${N} = _mm_xor_si128(vh${N}, vsign${N});
$for N in range(SIMD_TILE):
const __m128i vprenorm${2*N} = _mm_slli_epi16(vnonsign${N}, 13);
const __m128i vprenorm${2*N+1} = _mm_add_epi16(_mm_srli_epi16(vnonsign${N}, 3), vexp_offset);
$for N in range(SIMD_TILE):
const __m128i vnorm${2*N} = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm${2*N}, vprenorm${2*N+1})), vexp_scale));
const __m128i vnorm${2*N+1} = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm${2*N}, vprenorm${2*N+1})), vexp_scale));
$for N in range(SIMD_TILE):
const __m128i vdenorm${2*N} = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign${N}, vmagic_mask)), vmagic_bias));
const __m128i vdenorm${2*N+1} = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign${N}, vmagic_mask)), vmagic_bias));
$for N in range(SIMD_TILE):
const __m128i vmask${N} = _mm_cmpgt_epi16(vnonsign${N}, vdenorm_cutoff);
$for N in range(SIMD_TILE):
$if SSE == 4:
const __m128i vf${2*N} = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign${N}),
_mm_blendv_epi8(vdenorm${2*N}, vnorm${2*N}, _mm_cvtepi16_epi32(vmask${N})));
const __m128i vf${2*N+1} = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign${N}),
_mm_blendv_epi8(vdenorm${2*N+1}, vnorm${2*N+1}, _mm_unpackhi_epi16(vmask${N}, vmask${N})));
$else:
const __m128i vxmask${2*N} = _mm_unpacklo_epi16(vmask${N}, vmask${N});
const __m128i vf${2*N} = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign${N}),
_mm_or_si128(_mm_and_si128(vxmask${2*N}, vnorm${2*N}), _mm_andnot_si128(vxmask${2*N}, vdenorm${2*N})));
const __m128i vxmask${2*N+1} = _mm_unpackhi_epi16(vmask${N}, vmask${N});
const __m128i vf${2*N+1} = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign${N}),
_mm_or_si128(_mm_and_si128(vxmask${2*N+1}, vnorm${2*N+1}), _mm_andnot_si128(vxmask${2*N+1}, vdenorm${2*N+1})));
_mm_storeu_ps(output, _mm_castsi128_ps(vf0));
$for N in range(1, 2*SIMD_TILE):
_mm_storeu_ps(output + ${N * 4}, _mm_castsi128_ps(vf${N}));
output += ${BATCH_TILE};
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vh = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vsign = _mm_and_si128(vh, vsign_mask);
const __m128i vnonsign = _mm_xor_si128(vh, vsign);
const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
$if SSE == 4:
const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
$else:
const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
_mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
$if SSE == 4:
const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
$else:
const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
_mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
_mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
_mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
output += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128i vh = _mm_loadu_si128((const __m128i*) i);
const __m128i vsign = _mm_and_si128(vh, vsign_mask);
const __m128i vnonsign = _mm_xor_si128(vh, vsign);
const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
$if SSE == 4:
__m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
$else:
const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
__m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
_mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
if (batch & (4 * sizeof(uint16_t))) {
_mm_storeu_ps(output, _mm_castsi128_ps(vf));
output += 4;
$if SSE == 4:
vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
$else:
const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
_mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
output += 2;
vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
}
if (batch & (1 * sizeof(uint16_t))) {
_mm_store_ss(output, _mm_castsi128_ps(vf));
}
}
}