File size: 6,740 Bytes
8b7c501 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__neonfp16_x16(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t vh0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float32x4_t vf0 = vcvt_f32_f16(vget_low_f16(vh0));
const float32x4_t vf1 = vcvt_f32_f16(vget_high_f16(vh0));
const float32x4_t vf2 = vcvt_f32_f16(vget_low_f16(vh1));
const float32x4_t vf3 = vcvt_f32_f16(vget_high_f16(vh1));
vst1q_f32(output, vf0); output += 4;
vst1q_f32(output, vf1); output += 4;
vst1q_f32(output, vf2); output += 4;
vst1q_f32(output, vf3); output += 4;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vh = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float32x4_t vf_lo = vcvt_f32_f16(vget_low_f16(vh));
const float32x4_t vf_hi = vcvt_f32_f16(vget_high_f16(vh));
vst1q_f32(output, vf_lo); output += 4;
vst1q_f32(output, vf_hi); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vh = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float32x4_t vf = vcvt_f32_f16(vget_low_f16(vh));
if (batch & (4 * sizeof(uint16_t))) {
vst1q_f32(output, vf); output += 4;
vf = vcvt_f32_f16(vget_high_f16(vh));
}
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(uint16_t))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
void xnn_f16_f32acc_rsum_ukernel__neonfp16_x32_acc4(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
float32x4_t vacc3 = vmovq_n_f32(0.0f);
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vh01 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh23 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh45 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh67 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float32x4_t vt0 = vcvt_f32_f16(vget_low_f16(vh01));
const float32x4_t vt1 = vcvt_f32_f16(vget_high_f16(vh01));
const float32x4_t vt2 = vcvt_f32_f16(vget_low_f16(vh23));
const float32x4_t vt3 = vcvt_f32_f16(vget_high_f16(vh23));
const float32x4_t vt4 = vcvt_f32_f16(vget_low_f16(vh45));
const float32x4_t vt5 = vcvt_f32_f16(vget_high_f16(vh45));
const float32x4_t vt6 = vcvt_f32_f16(vget_low_f16(vh67));
const float32x4_t vt7 = vcvt_f32_f16(vget_high_f16(vh67));
vacc0 = vaddq_f32(vacc0, vt0);
vacc1 = vaddq_f32(vacc1, vt1);
vacc2 = vaddq_f32(vacc2, vt2);
vacc3 = vaddq_f32(vacc3, vt3);
vacc0 = vaddq_f32(vacc0, vt4);
vacc1 = vaddq_f32(vacc1, vt5);
vacc2 = vaddq_f32(vacc2, vt6);
vacc3 = vaddq_f32(vacc3, vt7);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc2 = vaddq_f32(vacc2, vacc3);
vacc0 = vaddq_f32(vacc0, vacc2);
for (; batch >= 4 * sizeof(uint16_t); batch -= 4 * sizeof(uint16_t)) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_dup_u16(i));
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vmul_f32(vacc, vscale);
const float16x4_t vout = vcvt_f16_f32(vcombine_f32(vacc, vacc));
vst1_lane_u16(o, vreinterpret_u16_f16(vout), 0);
}
void xnn_f32_f16_vcvt_ukernel__neonfp16_x16(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vf0 = vld1q_f32(input); input += 4;
const float32x4_t vf1 = vld1q_f32(input); input += 4;
const float32x4_t vf2 = vld1q_f32(input); input += 4;
const float32x4_t vf3 = vld1q_f32(input); input += 4;
const uint16x8_t vh0 = vreinterpretq_u16_f16(vcombine_f16(vcvt_f16_f32(vf0), vcvt_f16_f32(vf1)));
const uint16x8_t vh1 = vreinterpretq_u16_f16(vcombine_f16(vcvt_f16_f32(vf2), vcvt_f16_f32(vf3)));
vst1q_u16(o, vh0); o += 8;
vst1q_u16(o, vh1); o += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vf = vld1q_f32(input); input += 4;
const uint16x4_t vh = vreinterpret_u16_f16(vcvt_f16_f32(vf));
vst1_u16(o, vh); o += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch % sizeof(float) == 0);
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vf = vld1q_f32(input);
uint16x4_t vh = vreinterpret_u16_f16(vcvt_f16_f32(vf));
if (batch & (2 * sizeof(float))) {
vst1_lane_u32((void*) o, vreinterpret_u32_u16(vh), 0); o += 2;
vh = vext_u16(vh, vh, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u16(o, vh, 0);
}
}
}
|