// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. $assert BATCH_TILE % 8 == 0 $assert BATCH_TILE >= 8 $SIMD_TILE = BATCH_TILE // 8 #include #include #include #include void xnn_f16_velu_ukernel__neonfp16arith_rr1_p3_x${BATCH_TILE}( size_t batch, const void* input, void* output, const union xnn_f16_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vprescale = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.prescale)); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.sat_cutoff)); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.magic_bias)); const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.log2e)); const float16x8_t vminus_ln2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.minus_ln2)); const float16x8_t vc3 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.c3)); const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.c2)); const float16x8_t vminus_alpha = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.minus_alpha)); const float16x8_t vbeta = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.beta)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; $if BATCH_TILE > 8: for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) { $for N in range(SIMD_TILE): float16x8_t vx${N} = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; $for N in range(SIMD_TILE): float16x8_t vz${N} = vmulq_f16(vx${N}, vprescale); $for N in range(SIMD_TILE): vz${N} = vmaxq_f16(vz${N}, vsat_cutoff); $for N in range(SIMD_TILE): float16x8_t vn${N} = vfmaq_f16(vmagic_bias, vz${N}, vlog2e); $for N in range(SIMD_TILE): float16x8_t vs${N} = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn${N}), 10)); vn${N} = vsubq_f16(vn${N}, vmagic_bias); $for N in range(SIMD_TILE): float16x8_t vt${N} = vfmaq_f16(vz${N}, vn${N}, vminus_ln2); $for N in range(SIMD_TILE): float16x8_t vp${N} = vfmaq_f16(vc2, vc3, vt${N}); vp${N} = vmulq_f16(vp${N}, vt${N}); $for N in range(SIMD_TILE): vt${N} = vmulq_f16(vt${N}, vs${N}); vs${N} = vfmsq_f16(vminus_alpha, vs${N}, vminus_alpha); $for N in range(SIMD_TILE): vp${N} = vfmaq_f16(vt${N}, vp${N}, vt${N}); $for N in range(SIMD_TILE): float16x8_t ve${N} = vfmsq_f16(vs${N}, vp${N}, vminus_alpha); const uint16x8_t vm${N} = vcltq_s16(vreinterpretq_s16_f16(vx${N}), vmovq_n_s16(0)); $for N in range(SIMD_TILE): vx${N} = vmulq_f16(vx${N}, vbeta); $for N in range(SIMD_TILE): const float16x8_t vy${N} = vbslq_f16(vm${N}, ve${N}, vx${N}); $for N in range(SIMD_TILE): vst1q_u16(o, vreinterpretq_u16_f16(vy${N})); o += 8; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vmulq_f16(vx, vprescale); vz = vmaxq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vlog2e); float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); float16x8_t vt = vfmaq_f16(vz, vn, vminus_ln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vmulq_f16(vp, vt); vt = vmulq_f16(vt, vs); vs = vfmsq_f16(vminus_alpha, vs, vminus_alpha); vp = vfmaq_f16(vt, vp, vt); float16x8_t ve = vfmsq_f16(vs, vp, vminus_alpha); const uint16x8_t vm = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0)); vx = vmulq_f16(vx, vbeta); const float16x8_t vy = vbslq_f16(vm, ve, vx); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if XNN_UNLIKELY(batch != 0) { float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vmulq_f16(vx, vprescale); vz = vmaxq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vlog2e); float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); float16x8_t vt = vfmaq_f16(vz, vn, vminus_ln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vmulq_f16(vp, vt); vt = vmulq_f16(vt, vs); vs = vfmsq_f16(vminus_alpha, vs, vminus_alpha); vp = vfmaq_f16(vt, vp, vt); float16x8_t ve = vfmsq_f16(vs, vp, vminus_alpha); const uint16x8_t vm = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0)); vx = vmulq_f16(vx, vbeta); float16x8_t vy = vbslq_f16(vm, ve, vx); float16x4_t vy_lo = vget_low_f16(vy); if (batch & (4 * sizeof(uint16_t))) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (batch & (2 * sizeof(uint16_t))) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o += 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (batch & (1 * sizeof(uint16_t))) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }