// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. $assert BATCH_TILE >= 1 $ABC = "01234567456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" $assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"] $assert ACTIVATION in ["LINEAR", "MINMAX"] #include $if ACTIVATION == "MINMAX": #include #include #include #include #include $VOPH_F16 = { $ "ADD": lambda x, y: "vaddh_f16(%s, %s)" % (x, y), $ "DIV": lambda x, y: "vdivh_f16(%s, %s)" % (x, y), $ "MAX": lambda x, y: "vmaxnmh_f16(%s, %s)" % (x, y), $ "MIN": lambda x, y: "vminnmh_f16(%s, %s)" % (x, y), $ "MUL": lambda x, y: "vmulh_f16(%s, %s)" % (x, y), $ "SUB": lambda x, y: "vsubh_f16(%s, %s)" % (x, y), $ "SQRDIFF": lambda x, y: "vsubh_f16(%s, %s)" % (x, y), $}[OP] $SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION] $PARAMS = {"LINEAR": "xnn_f16_default_params", "MINMAX": "xnn_f16_minmax_params"}[ACTIVATION] void xnn_f16_v${OP.lower()}${SUFFIX}_ukernel__fp16arith_x${BATCH_TILE}( size_t batch, const void* restrict input_a, const void* restrict input_b, void* restrict output, const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float16_t) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float16_t* a = (const float16_t*) input_a; const float16_t* b = (const float16_t*) input_b; float16_t* o = (float16_t*) output; $if ACTIVATION == "MINMAX": float16_t vy_min, vy_max; memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min)); memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max)); $if BATCH_TILE > 1: for (; batch >= ${BATCH_TILE} * sizeof(float16_t); batch -= ${BATCH_TILE} * sizeof(float16_t)) { $for N in range(BATCH_TILE): const float16_t va${ABC[N]} = *a++; $for N in range(BATCH_TILE): const float16_t vb${ABC[N]} = *b++; $for N in range(BATCH_TILE): float16_t vacc${ABC[N]} = ${VOPH_F16("va" + ABC[N], "vb" + ABC[N])}; $if OP == "SQRDIFF": $for N in range(BATCH_TILE): vacc${ABC[N]} = vmulh_f16(vacc${ABC[N]}, vacc${ABC[N]}); $if ACTIVATION == "MINMAX": $for N in range(BATCH_TILE): vacc${ABC[N]} = vmaxnmh_f16(vacc${ABC[N]}, vy_min); $for N in range(BATCH_TILE): vacc${ABC[N]} = vminnmh_f16(vacc${ABC[N]}, vy_max); $for N in range(BATCH_TILE): *o++ = vacc${ABC[N]}; } if XNN_UNLIKELY(batch != 0) { $if BATCH_TILE > 2: do { const float16_t va = *a++; const float16_t vb = *b++; float16_t vacc = ${VOPH_F16("va", "vb")}; $if OP == "SQRDIFF": vacc = vmulh_f16(vacc, vacc); $if ACTIVATION == "MINMAX": vacc = vmaxnmh_f16(vacc, vy_min); vacc = vminnmh_f16(vacc, vy_max); *o++ = vacc; batch -= sizeof(float16_t); } while (batch != 0); $else: const float16_t va = *a; const float16_t vb = *b; float16_t vacc = ${VOPH_F16("va", "vb")}; $if OP == "SQRDIFF": vacc = vmulh_f16(vacc, vacc); $if ACTIVATION == "MINMAX": vacc = vmaxnmh_f16(vacc, vy_min); vacc = vminnmh_f16(vacc, vy_max); *o = vacc; } $else: do { const float16_t va = *a++; const float16_t vb = *b++; float16_t vacc = ${VOPH_F16("va", "vb")}; $if OP == "SQRDIFF": vacc = vmulh_f16(vacc, vacc); $if ACTIVATION == "MINMAX": vacc = vmaxnmh_f16(vacc, vy_min); vacc = vminnmh_f16(vacc, vy_max); *o++ = vacc; batch -= sizeof(float16_t); } while (batch != 0); }