// Auto-generated file. Do not edit! | |
// Template: src/f16-vbinary/vop-fp16arith.c.in | |
// Generator: tools/xngen | |
// | |
// Copyright 2022 Google LLC | |
// | |
// This source code is licensed under the BSD-style license found in the | |
// LICENSE file in the root directory of this source tree. | |
void xnn_f16_vmax_ukernel__fp16arith_x1( | |
size_t batch, | |
const void* restrict input_a, | |
const void* restrict input_b, | |
void* restrict output, | |
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) | |
{ | |
assert(batch != 0); | |
assert(batch % sizeof(float16_t) == 0); | |
assert(input_a != NULL); | |
assert(input_b != NULL); | |
assert(output != NULL); | |
const float16_t* a = (const float16_t*) input_a; | |
const float16_t* b = (const float16_t*) input_b; | |
float16_t* o = (float16_t*) output; | |
do { | |
const float16_t va = *a++; | |
const float16_t vb = *b++; | |
float16_t vacc = vmaxnmh_f16(va, vb); | |
*o++ = vacc; | |
batch -= sizeof(float16_t); | |
} while (batch != 0); | |
} | |