// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. $assert BATCH_TILE >= 1 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" $assert OP in ["ADD", "DIV", "RDIV", "MAX", "MIN", "MUL", "SUB", "RSUB", "SQRDIFF"] $assert ACTIVATION in ["LINEAR", "MINMAX"] #include $if ACTIVATION == "MINMAX": #include #include #include #include #include #include $VOPH_F16 = { $ "ADD": lambda x: "vaddh_f16(%s, vb)" % x, $ "DIV": lambda x: "vdivh_f16(%s, vb)" % x, $ "RDIV": lambda x: "vdivh_f16(vb, %s)" % x, $ "MAX": lambda x: "vmaxnmh_f16(%s, vb)" % x, $ "MIN": lambda x: "vminnmh_f16(%s, vb)" % x, $ "MUL": lambda x: "vmulh_f16(%s, vb)" % x, $ "SUB": lambda x: "vsubh_f16(%s, vb)" % x, $ "RSUB": lambda x: "vsubh_f16(vb, %s)" % x, $ "SQRDIFF": lambda x: "vsubh_f16(%s, vb)" % x, $}[OP] $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION] $PARAMS = {"LINEAR": "xnn_f16_default_params", "MINMAX": "xnn_f16_minmax_params"}[ACTIVATION] void xnn_f16_v${OP.lower()}c${SUFFIX}_ukernel__fp16arith_x${BATCH_TILE}( size_t batch, const void* restrict input_a, const void* restrict input_b, void* restrict output, const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float16_t) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float16_t* a = (const float16_t*) input_a; const float16_t* b = (const float16_t*) input_b; float16_t* o = (float16_t*) output; $if ACTIVATION == "MINMAX": float16_t vy_min, vy_max; memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min)); memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max)); const float16_t vb = *b; $if BATCH_TILE > 1: for (; batch >= ${BATCH_TILE} * sizeof(float16_t); batch -= ${BATCH_TILE} * sizeof(float16_t)) { $for N in range(BATCH_TILE): float16_t vacc${ABC[N]} = a[${N}]; a += ${BATCH_TILE}; $for N in range(BATCH_TILE): vacc${ABC[N]} = ${VOPH_F16("vacc" + ABC[N])}; $if OP == "SQRDIFF": $for N in range(BATCH_TILE): vacc${ABC[N]} = vmulh_f16(vacc${ABC[N]}, vacc${ABC[N]}); $if ACTIVATION == "MINMAX": $for N in range(BATCH_TILE): vacc${ABC[N]} = vmaxnmh_f16(vacc${ABC[N]}, vy_min); $for N in range(BATCH_TILE): vacc${ABC[N]} = vminnmh_f16(vacc${ABC[N]}, vy_max); $for N in range(BATCH_TILE): o[${N}] = vacc${ABC[N]}; o += ${BATCH_TILE}; } if XNN_UNLIKELY(batch != 0) { $if BATCH_TILE > 2: do { float16_t vacc = *a++; vacc = ${VOPH_F16("vacc")}; $if OP == "SQRDIFF": vacc = vmulh_f16(vacc, vacc); $if ACTIVATION == "MINMAX": vacc = vmaxnmh_f16(vacc, vy_min); vacc = vminnmh_f16(vacc, vy_max); *o++ = vacc; batch -= sizeof(float16_t); } while (batch != 0); $else: float16_t vacc = *a; vacc = ${VOPH_F16("vacc")}; $if OP == "SQRDIFF": vacc = vmulh_f16(vacc, vacc); $if ACTIVATION == "MINMAX": vacc = vmaxnmh_f16(vacc, vy_min); vacc = vminnmh_f16(vacc, vy_max); *o = vacc; } $else: do { float16_t vacc = *a++; vacc = ${VOPH_F16("vacc")}; $if OP == "SQRDIFF": vacc = vmulh_f16(vacc, vacc); $if ACTIVATION == "MINMAX": vacc = vmaxnmh_f16(vacc, vy_min); vacc = vminnmh_f16(vacc, vy_max); *o++ = vacc; batch -= sizeof(float16_t); } while (batch != 0); }