// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. $CHANNEL_SUBTILE = 4 $assert CHANNEL_TILE % CHANNEL_SUBTILE == 0 $CHANNEL_ROUND = 4 $assert MIDDLE_PASS_TILE <= LAST_PASS_TILE $assert FIRST_PASS_TILE >= 1 $assert MIDDLE_PASS_TILE >= 1 $assert LAST_PASS_TILE >= 1 $assert ACCUMULATORS >= 1 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" $VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32" #include #include #include #include #include #include void xnn_f32_dwconv_minmax_ukernel_${FIRST_PASS_TILE}f${MIDDLE_PASS_TILE}m${LAST_PASS_TILE}l${CHANNEL_TILE}c${CHANNEL_SUBTILE}s${CHANNEL_ROUND}r__${"neonfma" if FMA else "neon"}${"" if ACCUMULATORS == 1 else "_acc%d" % ACCUMULATORS}( size_t channels, size_t output_width, const float** input, const float* weights, float* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const float* zero, size_t kernel_size, float* buffer, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); assert(kernel_size > ${FIRST_PASS_TILE}); const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max); const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min); do { const float* w = weights; // First pass to process ${FIRST_PASS_TILE} inputs. { float* b = buffer; $for K in range(FIRST_PASS_TILE): const float* i${K} = input[${K}]; assert(i${K} != NULL); if XNN_UNPREDICTABLE(i${K} != zero) { i${K} = (const float*) ((uintptr_t) i${K} + input_offset); } input += ${FIRST_PASS_TILE}; // Process c channels and write to buffer. $if CHANNEL_TILE == 4: size_t c = 0; for (; c < channels; c += 4) { float32x4_t vacc0123p0 = vld1q_f32(w); w += 4; $for K in range(FIRST_PASS_TILE): const float32x4_t vi${K}x0123 = vld1q_f32(i${K}); i${K} += 4; const float32x4_t vk${K}x0123 = vld1q_f32(w); w += 4; $if 1 <= K < ACCUMULATORS: float32x4_t vacc0123p${K} = vmulq_f32(vi${K}x0123, vk${K}x0123); $else: vacc0123p${K % ACCUMULATORS} = ${VMULADDQ_F32}(vacc0123p${K % ACCUMULATORS}, vi${K}x0123, vk${K}x0123); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0123p0 $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: vacc0123p${A} = vaddq_f32(vacc0123p${A}, vacc0123p${A + ACC_SLICE}); $ACC_SLICE *= 2 vst1q_f32(b, vacc0123p0); b += 4; } $else: size_t c = round_up_po2(channels, ${CHANNEL_ROUND}); for (; c >= ${CHANNEL_TILE}; c -= ${CHANNEL_TILE}) { $for C in range(0, CHANNEL_TILE, 4): float32x4_t vacc${ABC[C:C+4]}p0 = vld1q_f32(w); w += 4; $for K in range(FIRST_PASS_TILE): $for C in range(0, CHANNEL_TILE, 4): const float32x4_t vi${K}x${ABC[C:C+4]} = vld1q_f32(i${K}); i${K} += 4; $for C in range(0, CHANNEL_TILE, 4): const float32x4_t vk${K}x${ABC[C:C+4]} = vld1q_f32(w); w += 4; $for C in range(0, CHANNEL_TILE, 4): $if 1 <= K < ACCUMULATORS: float32x4_t vacc${ABC[C:C+4]}p${K} = vmulq_f32(vi${K}x${ABC[C:C+4]}, vk${K}x${ABC[C:C+4]}); $else: vacc${ABC[C:C+4]}p${K % ACCUMULATORS} = ${VMULADDQ_F32}(vacc${ABC[C:C+4]}p${K % ACCUMULATORS}, vi${K}x${ABC[C:C+4]}, vk${K}x${ABC[C:C+4]}); $if ACCUMULATORS > 1: // Add up all accumulators to vacc${ABC[0:4]}p0 $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: $for C in range(0, CHANNEL_TILE, 4): vacc${ABC[C:C+4]}p${A} = vaddq_f32(vacc${ABC[C:C+4]}p${A}, vacc${ABC[C:C+4]}p${A + ACC_SLICE}); $ACC_SLICE *= 2 $for C in range(0, CHANNEL_TILE, 4): vst1q_f32(b, vacc${ABC[C:C+4]}p0); b += 4; } $if CHANNEL_TILE == 8: if (c != 0) { float32x4_t vacc0123p0 = vld1q_f32(w); w += 4; $for K in range(FIRST_PASS_TILE): const float32x4_t vi${K}x0123 = vld1q_f32(i${K}); i${K} += 4; const float32x4_t vk${K}x0123 = vld1q_f32(w); w += 4; $if 1 <= K < ACCUMULATORS: float32x4_t vacc0123p${K} = vmulq_f32(vi${K}x0123, vk${K}x0123); $else: vacc0123p${K % ACCUMULATORS} = ${VMULADDQ_F32}(vacc0123p${K % ACCUMULATORS}, vi${K}x0123, vk${K}x0123); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0123p0 $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: vacc0123p${A} = vaddq_f32(vacc0123p${A}, vacc0123p${A + ACC_SLICE}); $ACC_SLICE *= 2 vst1q_f32(b, vacc0123p0); b += 4; } $else: for (; c != 0; c -= 4) { float32x4_t vacc0123p0 = vld1q_f32(w); w += 4; $for K in range(FIRST_PASS_TILE): const float32x4_t vi${K}x0123 = vld1q_f32(i${K}); i${K} += 4; const float32x4_t vk${K}x0123 = vld1q_f32(w); w += 4; $if 1 <= K < ACCUMULATORS: float32x4_t vacc0123p${K} = vmulq_f32(vi${K}x0123, vk${K}x0123); $else: vacc0123p${K % ACCUMULATORS} = ${VMULADDQ_F32}(vacc0123p${K % ACCUMULATORS}, vi${K}x0123, vk${K}x0123); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0123p0 $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: vacc0123p${A} = vaddq_f32(vacc0123p${A}, vacc0123p${A + ACC_SLICE}); $ACC_SLICE *= 2 vst1q_f32(b, vacc0123p0); b += 4; } } // Middle pass to process ${MIDDLE_PASS_TILE} inputs in each iteration. for (size_t ks = kernel_size - ${FIRST_PASS_TILE}; ks > ${LAST_PASS_TILE}; ks -= ${MIDDLE_PASS_TILE}) { float* b = buffer; $for K in range(MIDDLE_PASS_TILE): const float* i${K} = input[${K}]; assert(i${K} != NULL); if XNN_UNPREDICTABLE(i${K} != zero) { i${K} = (const float*) ((uintptr_t) i${K} + input_offset); } input += ${MIDDLE_PASS_TILE}; $if CHANNEL_TILE == 4: size_t c = 0; for (; c < channels; c += 4) { float32x4_t vacc0123p0 = vld1q_f32(b); $for K in range(MIDDLE_PASS_TILE): const float32x4_t vi${K}x0123 = vld1q_f32(i${K}); i${K} += 4; const float32x4_t vk${K}x0123 = vld1q_f32(w); w += 4; $if 1 <= K < ACCUMULATORS: float32x4_t vacc0123p${K} = vmulq_f32(vi${K}x0123, vk${K}x0123); $else: vacc0123p${K % ACCUMULATORS} = ${VMULADDQ_F32}(vacc0123p${K % ACCUMULATORS}, vi${K}x0123, vk${K}x0123); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0123p0 $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: vacc0123p${A} = vaddq_f32(vacc0123p${A}, vacc0123p${A + ACC_SLICE}); $ACC_SLICE *= 2 vst1q_f32(b, vacc0123p0); b += 4; } $else: size_t c = round_up_po2(channels, ${CHANNEL_ROUND}); for (; c >= ${CHANNEL_TILE}; c -= ${CHANNEL_TILE}) { float32x4_t vacc0123p0 = vld1q_f32(b); $for C in range(4, CHANNEL_TILE, 4): float32x4_t vacc${ABC[C:C+4]}p0 = vld1q_f32(b + ${C}); $for K in range(MIDDLE_PASS_TILE): $for C in range(0, CHANNEL_TILE, 4): const float32x4_t vi${K}x${ABC[C:C+4]} = vld1q_f32(i${K}); i${K} += 4; $for C in range(0, CHANNEL_TILE, 4): const float32x4_t vk${K}x${ABC[C:C+4]} = vld1q_f32(w); w += 4; $for C in range(0, CHANNEL_TILE, 4): $if 1 <= K < ACCUMULATORS: float32x4_t vacc${ABC[C:C+4]}p${K} = vmulq_f32(vi${K}x${ABC[C:C+4]}, vk${K}x${ABC[C:C+4]}); $else: vacc${ABC[C:C+4]}p${K % ACCUMULATORS} = ${VMULADDQ_F32}(vacc${ABC[C:C+4]}p${K % ACCUMULATORS}, vi${K}x${ABC[C:C+4]}, vk${K}x${ABC[C:C+4]}); $if ACCUMULATORS > 1: // Add up all accumulators to vacc${ABC[0:4]}p0 $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: $for C in range(0, CHANNEL_TILE, 4): vacc${ABC[C:C+4]}p${A} = vaddq_f32(vacc${ABC[C:C+4]}p${A}, vacc${ABC[C:C+4]}p${A + ACC_SLICE}); $ACC_SLICE *= 2 $for C in range(0, CHANNEL_TILE, 4): vst1q_f32(b, vacc${ABC[C:C+4]}p0); b += 4; } $if CHANNEL_TILE == 8: if (c != 0) { float32x4_t vacc0123p0 = vld1q_f32(b); $for K in range(MIDDLE_PASS_TILE): const float32x4_t vi${K}x0123 = vld1q_f32(i${K}); i${K} += 4; const float32x4_t vk${K}x0123 = vld1q_f32(w); w += 4; $if 1 <= K < ACCUMULATORS: float32x4_t vacc0123p${K} = vmulq_f32(vi${K}x0123, vk${K}x0123); $else: vacc0123p${K % ACCUMULATORS} = ${VMULADDQ_F32}(vacc0123p${K % ACCUMULATORS}, vi${K}x0123, vk${K}x0123); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0123p0 $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: vacc0123p${A} = vaddq_f32(vacc0123p${A}, vacc0123p${A + ACC_SLICE}); $ACC_SLICE *= 2 vst1q_f32(b, vacc0123p0); b += 4; } $else: for (; c != 0; c -= 4) { float32x4_t vacc0123p0 = vld1q_f32(b); $for K in range(MIDDLE_PASS_TILE): const float32x4_t vi${K}x0123 = vld1q_f32(i${K}); i${K} += 4; const float32x4_t vk${K}x0123 = vld1q_f32(w); w += 4; $if 1 <= K < ACCUMULATORS: float32x4_t vacc0123p${K} = vmulq_f32(vi${K}x0123, vk${K}x0123); $else: vacc0123p${K % ACCUMULATORS} = ${VMULADDQ_F32}(vacc0123p${K % ACCUMULATORS}, vi${K}x0123, vk${K}x0123); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0123p0 $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: vacc0123p${A} = vaddq_f32(vacc0123p${A}, vacc0123p${A + ACC_SLICE}); $ACC_SLICE *= 2 vst1q_f32(b, vacc0123p0); b += 4; } } // Last pass to process up to ${LAST_PASS_TILE} inputs. { float* b = buffer; $for K in range(0, LAST_PASS_TILE): const float* i${K} = input[${K}]; assert(i${K} != NULL); if XNN_UNPREDICTABLE(i${K} != zero) { i${K} = (const float*) ((uintptr_t) i${K} + input_offset); } size_t c = channels; $if CHANNEL_TILE > 4: for (; c >= ${CHANNEL_TILE}; c -= ${CHANNEL_TILE}) { $for C in range(0, CHANNEL_TILE, 4): float32x4_t vacc${ABC[C:C+4]}p0 = vld1q_f32(b); b += 4; $for K in range(LAST_PASS_TILE): $for C in range(0, CHANNEL_TILE, 4): const float32x4_t vi${K}x${ABC[C:C+4]} = vld1q_f32(i${K}); i${K} += 4; $for C in range(0, CHANNEL_TILE, 4): float32x4_t vk${K}x${ABC[C:C+4]} = vld1q_f32(w); w += 4; $for C in range(0, CHANNEL_TILE, 4): $if 1 <= K < ACCUMULATORS: float32x4_t vacc${ABC[C:C+4]}p${K} = vmulq_f32(vi${K}x${ABC[C:C+4]}, vk${K}x${ABC[C:C+4]}); $else: vacc${ABC[C:C+4]}p${K % ACCUMULATORS} = ${VMULADDQ_F32}(vacc${ABC[C:C+4]}p${K % ACCUMULATORS}, vi${K}x${ABC[C:C+4]}, vk${K}x${ABC[C:C+4]}); $if ACCUMULATORS > 1: // Add up all accumulators to vacc${ABC[0:4]}p0 $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: $for C in range(0, CHANNEL_TILE, 4): vacc${ABC[C:C+4]}p${A} = vaddq_f32(vacc${ABC[C:C+4]}p${A}, vacc${ABC[C:C+4]}p${A + ACC_SLICE}); $ACC_SLICE *= 2 $for C in range(0, CHANNEL_TILE, 4): float32x4_t vacc${ABC[C:C+4]} = vmaxq_f32(vacc${ABC[C:C+4]}p0, vmin); $for C in range(0, CHANNEL_TILE, 4): vacc${ABC[C:C+4]} = vminq_f32(vacc${ABC[C:C+4]}, vmax); $for C in range(0, CHANNEL_TILE, 4): vst1q_f32(output, vacc${ABC[C:C+4]}); output += 4; } for (; c >= 4; c -= 4) { float32x4_t vacc0123p0 = vld1q_f32(b); b += 4; $for K in range(LAST_PASS_TILE): const float32x4_t vi${K}x0123 = vld1q_f32(i${K}); i${K} += 4; float32x4_t vk${K}x0123 = vld1q_f32(w); w += 4; $if 1 <= K < ACCUMULATORS: float32x4_t vacc0123p${K} = vmulq_f32(vi${K}x0123, vk${K}x0123); $else: vacc0123p${K % ACCUMULATORS} = ${VMULADDQ_F32}(vacc0123p${K % ACCUMULATORS}, vi${K}x0123, vk${K}x0123); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0123p0 $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: vacc0123p${A} = vaddq_f32(vacc0123p${A}, vacc0123p${A + ACC_SLICE}); $ACC_SLICE *= 2 float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin); vacc0123 = vminq_f32(vacc0123, vmax); vst1q_f32(output, vacc0123); output += 4; } if XNN_UNLIKELY(c != 0) { float32x4_t vacc0123p0 = vld1q_f32(b); $for K in range(LAST_PASS_TILE): const float32x4_t vi${K}x0123 = vld1q_f32(i${K}); float32x4_t vk${K}x0123 = vld1q_f32(w); w += 4; $if 1 <= K < ACCUMULATORS: float32x4_t vacc0123p${K} = vmulq_f32(vi${K}x0123, vk${K}x0123); $else: vacc0123p${K % ACCUMULATORS} = ${VMULADDQ_F32}(vacc0123p${K % ACCUMULATORS}, vi${K}x0123, vk${K}x0123); $if ACCUMULATORS > 1: // Add up all accumulators to vacc${ABC[0:4]}p0 $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: vacc0123p${A} = vaddq_f32(vacc0123p${A}, vacc0123p${A + ACC_SLICE}); $ACC_SLICE *= 2 float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin); vacc0123 = vminq_f32(vacc0123, vmax); float32x2_t vacc01 = vget_low_f32(vacc0123); if (c & 2) { vst1_f32(output, vacc01); output += 2; vacc01 = vget_high_f32(vacc0123); } if (c & 1) { vst1_lane_f32(output, vacc01, 0); output += 1; } } } input = (const float**) ((uintptr_t) input + input_stride); output = (float*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }