// Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. $assert BATCH_TILE % 8 == 0 $assert BATCH_TILE >= 8 $SIMD_TILE = BATCH_TILE // 8 #include #include #include #include $WASM_V16X8_LANESELECT = "wasm_i16x8_relaxed_laneselect" if RELAXED else "wasm_v128_bitselect" $ISA = "wasmrelaxedsimd" if RELAXED else "wasmsimd" void xnn_f32_f16_vcvt_ukernel__${ISA}_x${BATCH_TILE}( size_t batch, const float* input, void* output, const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input != NULL); assert(output != NULL); const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias); const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf); const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max); const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero); const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min); const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask); const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask); const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh); uint16_t* o = (uint16_t*) output; $if BATCH_TILE > 8: for (; batch >= ${BATCH_TILE} * sizeof(float); batch -= ${BATCH_TILE} * sizeof(float)) { const v128_t vx0 = wasm_v128_load(input); $for N in range(1, 2*SIMD_TILE): const v128_t vx${N} = wasm_v128_load(input + ${N * 4}); input += ${BATCH_TILE}; $for N in range(2*SIMD_TILE): const v128_t vabsx${N} = wasm_f32x4_abs(vx${N}); $for N in range(2*SIMD_TILE): const v128_t vsignx${N} = wasm_v128_xor(vx${N}, vabsx${N}); $for N in range(2*SIMD_TILE): v128_t vbias${N} = wasm_i32x4_add(vabsx${N}, vexp_bias); $for N in range(2*SIMD_TILE): v128_t vf${N} = wasm_f32x4_mul(vabsx${N}, vscale_to_inf); $for N in range(2*SIMD_TILE): const v128_t vnanmaskw${N} = wasm_i32x4_gt(vabsx${N}, vexpw_max); $for N in range(2*SIMD_TILE): vbias${N} = wasm_v128_and(vbias${N}, vexpw_max); $for N in range(2*SIMD_TILE): vf${N} = wasm_f32x4_mul(vf${N}, vscale_to_zero); $for N in range(SIMD_TILE): const v128_t vnanmaskh${N} = wasm_i16x8_narrow_i32x4(vnanmaskw${2*N}, vnanmaskw${2*N+1}); $for N in range(SIMD_TILE): const v128_t vsignh${N} = wasm_i16x8_narrow_i32x4(vsignx${2*N}, vsignx${2*N+1}); $for N in range(2*SIMD_TILE): vbias${N} = wasm_i16x8_max(vbias${N}, vbias_min); $for N in range(2*SIMD_TILE): vf${N} = wasm_f32x4_add(vf${N}, vbias${N}); $for N in range(2*SIMD_TILE): v128_t vexpw${N} = wasm_i32x4_shr(vf${N}, 13); $for N in range(2*SIMD_TILE): const v128_t vmantw${N} = wasm_v128_and(vf${N}, vmanth_mask); $for N in range(2*SIMD_TILE): vexpw${N} = wasm_v128_and(vexpw${N}, vexph_mask); $for N in range(2*SIMD_TILE): const v128_t vnonsignw${N} = wasm_i32x4_add(vmantw${N}, vexpw${N}); $for N in range(SIMD_TILE): const v128_t vnonsignh${N} = wasm_i16x8_narrow_i32x4(vnonsignw${2*N}, vnonsignw${2*N+1}); $for N in range(SIMD_TILE): const v128_t vabsh${N} = ${WASM_V16X8_LANESELECT}(vnanh, vnonsignh${N}, vnanmaskh${N}); $for N in range(SIMD_TILE): const v128_t vh${N} = wasm_v128_or(vabsh${N}, vsignh${N}); wasm_v128_store(o, vh0); $for N in range(1, SIMD_TILE): wasm_v128_store(o + ${N * 8}, vh${N}); o += ${BATCH_TILE}; } for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const v128_t vx_lo = wasm_v128_load(input); const v128_t vx_hi = wasm_v128_load(input + 4); input += 8; const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo); const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi); const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo); const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi); v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias); v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias); v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf); v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf); const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max); const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max); vbias_lo = wasm_v128_and(vbias_lo, vexpw_max); vbias_hi = wasm_v128_and(vbias_hi, vexpw_max); vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero); vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero); const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi); const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi); vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min); vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min); vf_lo = wasm_f32x4_add(vf_lo, vbias_lo); vf_hi = wasm_f32x4_add(vf_hi, vbias_hi); v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13); v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13); const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask); const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask); vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask); vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask); const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo); const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi); const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi); const v128_t vabsh = ${WASM_V16X8_LANESELECT}(vnanh, vnonsignh, vnanmaskh); const v128_t vh = wasm_v128_or(vabsh, vsignh); wasm_v128_store(o, vh); o += 8; } if XNN_UNPREDICTABLE(batch != 0) { const v128_t vx_lo = wasm_v128_load(input); const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float)))); const v128_t vx_hi = wasm_v128_load(input_hi); const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo); const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi); const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo); const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi); v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias); v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias); v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf); v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf); const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max); const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max); vbias_lo = wasm_v128_and(vbias_lo, vexpw_max); vbias_hi = wasm_v128_and(vbias_hi, vexpw_max); vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero); vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero); const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi); const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi); vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min); vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min); vf_lo = wasm_f32x4_add(vf_lo, vbias_lo); vf_hi = wasm_f32x4_add(vf_hi, vbias_hi); v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13); v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13); const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask); const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask); vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask); vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask); const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo); const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi); const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi); const v128_t vabsh = ${WASM_V16X8_LANESELECT}(vnanh, vnonsignh, vnanmaskh); v128_t vh = wasm_v128_or(vabsh, vsignh); if (batch & (4 * sizeof(float))) { wasm_v128_store64_lane(o, vh, 0); vh = wasm_v64x2_shuffle(vh, vh, 1, 1); o += 4; } if (batch & (2 * sizeof(float))) { wasm_v128_store32_lane(o, vh, 0); vh = wasm_i64x2_shr(vh, 32); o += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store16_lane(o, vh, 0); } } }