// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "lite/backends/arm/math/conv_block_utils.h"
#include "lite/backends/arm/math/conv_impl.h"
#include "lite/core/context.h"
#ifdef ARM_WITH_OMP
#include <omp.h>
#endif

namespace paddle {
namespace lite {
namespace arm {
namespace math {

const int OUT_C_BLOCK = 4;
const int OUT_H_BLOCK = 2;
const int OUT_W_BLOCK = 4;

size_t conv3x3s2_direct_workspace_size(const operators::ConvParam& param,
                                       ARMContext* ctx) {
  auto dim_in = param.x->dims();
  auto dim_out = param.output->dims();
  auto paddings = *param.paddings;
  const int threads = ctx->threads();
  int llc_size = ctx->llc_size() / sizeof(float);
  const int pad_w = paddings[2];
  const int pad_h = paddings[0];
  int ow = dim_out[3];
  int oh = dim_out[2];
  int ic = dim_in[1];
  const int wout_round = ROUNDUP(ow, OUT_W_BLOCK);
  const int win_round = wout_round * 2 /*stride_w*/ + 1;
  const int hin_r_block = OUT_H_BLOCK * 2 /*stride_h*/ + 1;

  int hout_r_block =
      (llc_size - 2 * wout_round * ic - ic) /
      ((4 * wout_round + 2) * ic + wout_round * OUT_C_BLOCK * threads);
  hout_r_block = hout_r_block > oh ? oh : hout_r_block;
  hout_r_block = (hout_r_block / OUT_H_BLOCK) * OUT_H_BLOCK;
  hout_r_block = hout_r_block < OUT_H_BLOCK ? OUT_H_BLOCK : hout_r_block;

  int in_len = win_round * ic;
  int pre_in_size = hin_r_block * in_len;
  int pre_out_size = OUT_C_BLOCK * hout_r_block * wout_round;

  return sizeof(float) * (pre_in_size + ctx->threads() * pre_out_size);
}

void write_to_oc4_fp32(const float* din,
                       float* dout,
                       int cs,
                       int ce,
                       int hs,
                       int he,
                       int ws,
                       int we,
                       int channel,
                       int height,
                       int width,
                       int flag_act,
                       float alpha,
                       const float* bias) {
  int size_c_out = width * height;

  float* doutc0r0 = dout + cs * size_c_out + hs * width + ws;
  float* doutc1r0 = doutc0r0 + size_c_out;
  float* doutc2r0 = doutc1r0 + size_c_out;
  float* doutc3r0 = doutc2r0 + size_c_out;

  int size_h = (he > height ? height : he) - hs;  // size_h == hei_n
  int w_round = we - ws;

  int valid_we = we > width ? width : we;
  int win = valid_we - ws;
  int w_in_stride = w_round << 2;
  int cnt_col = win >> 2;
  int remain = win % 4;
  float32x4_t vzero = vdupq_n_f32(0.f);
  float32x4_t vbias = vld1q_f32(bias);
  float32x4_t valpha = vdupq_n_f32(alpha);
  float tmp0[4] = {0.f, 0.f, 0.f, 0.f};
  float tmp1[4] = {0.f, 0.f, 0.f, 0.f};
  float tmp2[4] = {0.f, 0.f, 0.f, 0.f};
  float tmp3[4] = {0.f, 0.f, 0.f, 0.f};

  switch (flag_act) {
    case 0:  // no act
      for (int i = 0; i < size_h; i++) {
        float* doutc0_ptr = doutc0r0;
        float* doutc1_ptr = doutc1r0;
        float* doutc2_ptr = doutc2r0;
        float* doutc3_ptr = doutc3r0;
        const float* din_hei_ptr = din;
        int cnt = cnt_col;
#ifdef __aarch64__
        asm volatile(
            "cmp %w[cnt], #1\n"
            "ldp  q0, q1, [%[din_ptr]], #32\n"
            "ldp  q2, q3, [%[din_ptr]], #32\n"
            "prfm pldl1keep, [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "fadd v0.4s, v0.4s, %[vbias].4s\n"
            "fadd v1.4s, v1.4s, %[vbias].4s\n"
            "fadd v2.4s, v2.4s, %[vbias].4s\n"
            "fadd v3.4s, v3.4s, %[vbias].4s\n"
            "trn1  v4.4s, v0.4s, v1.4s\n"
            "trn2  v5.4s, v0.4s, v1.4s\n"
            "trn1  v6.4s, v2.4s, v3.4s\n"
            "trn2  v7.4s, v2.4s, v3.4s\n"
            "ldp  q0, q1, [%[din_ptr]], #32\n"
            "trn1 v8.2d, v4.2d, v6.2d\n"
            "trn2 v9.2d, v4.2d, v6.2d\n"
            "ldp  q2, q3, [%[din_ptr]], #32\n"
            "trn1 v10.2d, v5.2d, v7.2d\n"
            "trn2 v11.2d, v5.2d, v7.2d\n"
            "subs %w[cnt], %w[cnt], #1\n"
            "str  q8, [%[doutc0r0]], #16 \n"
            "str  q9, [%[doutc2r0]], #16 \n"
            "str  q10, [%[doutc1r0]], #16 \n"
            "str  q11, [%[doutc3r0]], #16 \n"
            "bne 1b\n"
            "2: \n"
            "fadd v0.4s, v0.4s, %[vbias].4s\n"
            "fadd v1.4s, v1.4s, %[vbias].4s\n"
            "fadd v2.4s, v2.4s, %[vbias].4s\n"
            "fadd v3.4s, v3.4s, %[vbias].4s\n"
            "trn1 v4.4s, v0.4s, v1.4s\n"
            "trn2 v5.4s, v0.4s, v1.4s\n"
            "trn1 v6.4s, v2.4s, v3.4s\n"
            "trn2 v7.4s, v2.4s, v3.4s\n"
            "trn1 v8.2d, v4.2d, v6.2d\n"
            "trn2 v9.2d, v4.2d, v6.2d\n"
            "trn1 v10.2d, v5.2d, v7.2d\n"
            "trn2 v11.2d, v5.2d, v7.2d\n"
            "str  q8, [%[tmp0]]\n"
            "str  q9, [%[tmp2]]\n"
            "str  q10, [%[tmp1]]\n"
            "str  q11, [%[tmp3]]\n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [doutc1r0] "+r"(doutc1_ptr),
              [doutc2r0] "+r"(doutc2_ptr),
              [doutc3r0] "+r"(doutc3_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias),
              [tmp0] "r"(tmp0),
              [tmp1] "r"(tmp1),
              [tmp2] "r"(tmp2),
              [tmp3] "r"(tmp3)
            : "cc",
              "memory",
              "v0",
              "v1",
              "v2",
              "v3",
              "v4",
              "v5",
              "v6",
              "v7",
              "v8",
              "v9",
              "v10",
              "v11");
#else
        asm volatile(
            "cmp %[cnt], #1\n"
            "vld1.32 {d8-d11}, [%[din_ptr]]!\n"
            "vld1.32 {d12-d15}, [%[din_ptr]]!\n"
            "pld [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "vadd.f32 q8, q4, %q[vbias]\n"
            "vadd.f32 q9, q5, %q[vbias]\n"
            "vadd.f32 q10, q6, %q[vbias]\n"
            "vadd.f32 q11, q7, %q[vbias]\n"
            "vtrn.32 q8, q9\n"
            "vtrn.32 q10, q11\n"
            "vswp    d17, d20\n"
            "vswp    d19, d22\n"
            "vld1.32 {d8-d11}, [%[din_ptr]]!\n"
            "subs   %[cnt], %[cnt], #1\n"
            "vst1.32  {d16-d17}, [%[doutc0r0]]!\n"
            "vst1.32  {d18-d19}, [%[doutc1r0]]!\n"
            "vld1.32 {d12-d15}, [%[din_ptr]]!\n"
            "vst1.32  {d20-d21}, [%[doutc2r0]]!\n"
            "vst1.32  {d22-d23}, [%[doutc3r0]]!\n"
            "bne 1b\n"
            "2: \n"
            "vadd.f32 q8, q4, %q[vbias]\n"
            "vadd.f32 q9, q5, %q[vbias]\n"
            "vadd.f32 q10, q6, %q[vbias]\n"
            "vadd.f32 q11, q7, %q[vbias]\n"
            "vtrn.32 q8, q9\n"
            "vtrn.32 q10, q11\n"
            "vswp    d17, d20\n"
            "vswp    d19, d22\n"
            "vst1.32  {d16-d17}, [%[tmp0]]\n"
            "vst1.32  {d18-d19}, [%[tmp1]]\n"
            "vst1.32  {d20-d21}, [%[tmp2]]\n"
            "vst1.32  {d22-d23}, [%[tmp3]]\n"
            "3: \n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [doutc1r0] "+r"(doutc1_ptr),
              [doutc2r0] "+r"(doutc2_ptr),
              [doutc3r0] "+r"(doutc3_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias),
              [tmp0] "r"(tmp0),
              [tmp1] "r"(tmp1),
              [tmp2] "r"(tmp2),
              [tmp3] "r"(tmp3)
            : "cc",
              "memory",
              "q3",
              "q4",
              "q5",
              "q6",
              "q7",
              "q8",
              "q9",
              "q10",
              "q11");
#endif
        doutc0r0 += width;
        doutc1r0 += width;
        doutc2r0 += width;
        doutc3r0 += width;
        if (remain) {
          for (int j = 0; j < remain; j++) {
            *doutc0_ptr++ = tmp0[j];
            *doutc1_ptr++ = tmp1[j];
            *doutc2_ptr++ = tmp2[j];
            *doutc3_ptr++ = tmp3[j];
          }
        }
        din += w_in_stride;
      }
      break;
    case 1:  // relu
      for (int i = 0; i < size_h; i++) {
        float* doutc0_ptr = doutc0r0;
        float* doutc1_ptr = doutc1r0;
        float* doutc2_ptr = doutc2r0;
        float* doutc3_ptr = doutc3r0;
        const float* din_hei_ptr = din;
        int cnt = cnt_col;
#ifdef __aarch64__
        asm volatile(
            "cmp %w[cnt], #1\n"
            "ldp  q0, q1, [%[din_ptr]], #32\n"
            "ldp  q2, q3, [%[din_ptr]], #32\n"
            "prfm pldl1keep, [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "fadd v0.4s, v0.4s, %[vbias].4s\n"
            "fadd v1.4s, v1.4s, %[vbias].4s\n"
            "fadd v2.4s, v2.4s, %[vbias].4s\n"
            "fadd v3.4s, v3.4s, %[vbias].4s\n"
            "trn1  v4.4s, v0.4s, v1.4s\n"
            "trn2  v5.4s, v0.4s, v1.4s\n"
            "trn1  v6.4s, v2.4s, v3.4s\n"
            "trn2  v7.4s, v2.4s, v3.4s\n"
            "ldp  q0, q1, [%[din_ptr]], #32\n"
            "trn1 v8.2d, v4.2d, v6.2d\n"
            "trn2 v9.2d, v4.2d, v6.2d\n"
            "ldp  q2, q3, [%[din_ptr]], #32\n"
            "trn1 v10.2d, v5.2d, v7.2d\n"
            "trn2 v11.2d, v5.2d, v7.2d\n"
            "fmax v8.4s, v8.4s, %[vzero].4s\n"
            "fmax v9.4s, v9.4s, %[vzero].4s\n"
            "fmax v10.4s, v10.4s, %[vzero].4s\n"
            "fmax v11.4s, v11.4s, %[vzero].4s\n"
            "subs %w[cnt], %w[cnt], #1\n"
            "str  q8, [%[doutc0r0]], #16 \n"
            "str  q9, [%[doutc2r0]], #16 \n"
            "str  q10, [%[doutc1r0]], #16 \n"
            "str  q11, [%[doutc3r0]], #16 \n"
            "bne 1b\n"
            "2: \n"
            "fadd v0.4s, v0.4s, %[vbias].4s\n"
            "fadd v1.4s, v1.4s, %[vbias].4s\n"
            "fadd v2.4s, v2.4s, %[vbias].4s\n"
            "fadd v3.4s, v3.4s, %[vbias].4s\n"
            "trn1 v4.4s, v0.4s, v1.4s\n"
            "trn2 v5.4s, v0.4s, v1.4s\n"
            "trn1 v6.4s, v2.4s, v3.4s\n"
            "trn2 v7.4s, v2.4s, v3.4s\n"
            "trn1 v8.2d, v4.2d, v6.2d\n"
            "trn2 v9.2d, v4.2d, v6.2d\n"
            "trn1 v10.2d, v5.2d, v7.2d\n"
            "trn2 v11.2d, v5.2d, v7.2d\n"
            "fmax v8.4s, v8.4s, %[vzero].4s\n"
            "fmax v9.4s, v9.4s, %[vzero].4s\n"
            "fmax v10.4s, v10.4s, %[vzero].4s\n"
            "fmax v11.4s, v11.4s, %[vzero].4s\n"
            "str  q8, [%[tmp0]]\n"
            "str  q9, [%[tmp2]]\n"
            "str  q10, [%[tmp1]]\n"
            "str  q11, [%[tmp3]]\n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [doutc1r0] "+r"(doutc1_ptr),
              [doutc2r0] "+r"(doutc2_ptr),
              [doutc3r0] "+r"(doutc3_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias),
              [vzero] "w"(vzero),
              [tmp0] "r"(tmp0),
              [tmp1] "r"(tmp1),
              [tmp2] "r"(tmp2),
              [tmp3] "r"(tmp3)
            : "cc",
              "memory",
              "v0",
              "v1",
              "v2",
              "v3",
              "v4",
              "v5",
              "v6",
              "v7",
              "v8",
              "v9",
              "v10",
              "v11");
#else
        asm volatile(
            "cmp %[cnt], #1\n"
            "vld1.32 {d8-d11}, [%[din_ptr]]!\n"
            "vld1.32 {d12-d15}, [%[din_ptr]]!\n"
            "pld [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "vadd.f32 q8, q4, %q[vbias]\n"
            "vadd.f32 q9, q5, %q[vbias]\n"
            "vadd.f32 q10, q6, %q[vbias]\n"
            "vadd.f32 q11, q7, %q[vbias]\n"
            "vtrn.32 q8, q9\n"
            "vtrn.32 q10, q11\n"
            "vswp    d17, d20\n"
            "vswp    d19, d22\n"
            "vld1.32 {d8-d11}, [%[din_ptr]]!\n"
            "vmax.f32 q8, q8, %q[vzero]\n"
            "vmax.f32 q9, q9, %q[vzero]\n"
            "vmax.f32 q10, q10, %q[vzero]\n"
            "vmax.f32 q11, q11, %q[vzero]\n"
            "subs   %[cnt], %[cnt], #1\n"
            "vst1.32  {d16-d17}, [%[doutc0r0]]!\n"
            "vst1.32  {d18-d19}, [%[doutc1r0]]!\n"
            "vld1.32 {d12-d15}, [%[din_ptr]]!\n"
            "vst1.32  {d20-d21}, [%[doutc2r0]]!\n"
            "vst1.32  {d22-d23}, [%[doutc3r0]]!\n"
            "bne 1b\n"
            "2: \n"
            "vadd.f32 q8, q4, %q[vbias]\n"
            "vadd.f32 q9, q5, %q[vbias]\n"
            "vadd.f32 q10, q6, %q[vbias]\n"
            "vadd.f32 q11, q7, %q[vbias]\n"
            "vtrn.32 q8, q9\n"
            "vtrn.32 q10, q11\n"
            "vswp    d17, d20\n"
            "vswp    d19, d22\n"
            "vmax.f32 q8, q8, %q[vzero]\n"
            "vmax.f32 q9, q9, %q[vzero]\n"
            "vmax.f32 q10, q10, %q[vzero]\n"
            "vmax.f32 q11, q11, %q[vzero]\n"
            "vst1.32  {d16-d17}, [%[tmp0]]\n"
            "vst1.32  {d18-d19}, [%[tmp1]]\n"
            "vst1.32  {d20-d21}, [%[tmp2]]\n"
            "vst1.32  {d22-d23}, [%[tmp3]]\n"
            "3: \n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [doutc1r0] "+r"(doutc1_ptr),
              [doutc2r0] "+r"(doutc2_ptr),
              [doutc3r0] "+r"(doutc3_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias),
              [vzero] "w"(vzero),
              [tmp0] "r"(tmp0),
              [tmp1] "r"(tmp1),
              [tmp2] "r"(tmp2),
              [tmp3] "r"(tmp3)
            : "cc",
              "memory",
              "q3",
              "q4",
              "q5",
              "q6",
              "q7",
              "q8",
              "q9",
              "q10",
              "q11");
#endif
        doutc0r0 += width;
        doutc1r0 += width;
        doutc2r0 += width;
        doutc3r0 += width;
        if (remain) {
          for (int j = 0; j < remain; j++) {
            *doutc0_ptr++ = tmp0[j];
            *doutc1_ptr++ = tmp1[j];
            *doutc2_ptr++ = tmp2[j];
            *doutc3_ptr++ = tmp3[j];
          }
        }
        din += w_in_stride;
      }
      break;
    case 2:  // relu6
      for (int i = 0; i < size_h; i++) {
        float* doutc0_ptr = doutc0r0;
        float* doutc1_ptr = doutc1r0;
        float* doutc2_ptr = doutc2r0;
        float* doutc3_ptr = doutc3r0;
        const float* din_hei_ptr = din;
        int cnt = cnt_col;
#ifdef __aarch64__
        asm volatile(
            "cmp %w[cnt], #1\n"
            "ldp  q0, q1, [%[din_ptr]], #32\n"
            "ldp  q2, q3, [%[din_ptr]], #32\n"
            "prfm pldl1keep, [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "fadd v0.4s, v0.4s, %[vbias].4s\n"
            "fadd v1.4s, v1.4s, %[vbias].4s\n"
            "fadd v2.4s, v2.4s, %[vbias].4s\n"
            "fadd v3.4s, v3.4s, %[vbias].4s\n"
            "trn1  v4.4s, v0.4s, v1.4s\n"
            "trn2  v5.4s, v0.4s, v1.4s\n"
            "trn1  v6.4s, v2.4s, v3.4s\n"
            "trn2  v7.4s, v2.4s, v3.4s\n"
            "ldp  q0, q1, [%[din_ptr]], #32\n"
            "trn1 v8.2d, v4.2d, v6.2d\n"
            "trn2 v9.2d, v4.2d, v6.2d\n"
            "ldp  q2, q3, [%[din_ptr]], #32\n"
            "trn1 v10.2d, v5.2d, v7.2d\n"
            "trn2 v11.2d, v5.2d, v7.2d\n"
            "fmax v8.4s, v8.4s, %[vzero].4s\n"
            "fmax v9.4s, v9.4s, %[vzero].4s\n"
            "fmax v10.4s, v10.4s, %[vzero].4s\n"
            "fmax v11.4s, v11.4s, %[vzero].4s\n"
            "fmin v8.4s, v8.4s, %[valpha].4s\n"
            "fmin v9.4s, v9.4s, %[valpha].4s\n"
            "fmin v10.4s, v10.4s, %[valpha].4s\n"
            "fmin v11.4s, v11.4s, %[valpha].4s\n"
            "subs %w[cnt], %w[cnt], #1\n"
            "str  q8, [%[doutc0r0]], #16 \n"
            "str  q9, [%[doutc2r0]], #16 \n"
            "str  q10, [%[doutc1r0]], #16 \n"
            "str  q11, [%[doutc3r0]], #16 \n"
            "bne 1b\n"
            "2: \n"
            "fadd v0.4s, v0.4s, %[vbias].4s\n"
            "fadd v1.4s, v1.4s, %[vbias].4s\n"
            "fadd v2.4s, v2.4s, %[vbias].4s\n"
            "fadd v3.4s, v3.4s, %[vbias].4s\n"
            "trn1 v4.4s, v0.4s, v1.4s\n"
            "trn2 v5.4s, v0.4s, v1.4s\n"
            "trn1 v6.4s, v2.4s, v3.4s\n"
            "trn2 v7.4s, v2.4s, v3.4s\n"
            "trn1 v8.2d, v4.2d, v6.2d\n"
            "trn2 v9.2d, v4.2d, v6.2d\n"
            "trn1 v10.2d, v5.2d, v7.2d\n"
            "trn2 v11.2d, v5.2d, v7.2d\n"
            "fmax v8.4s, v8.4s, %[vzero].4s\n"
            "fmax v9.4s, v9.4s, %[vzero].4s\n"
            "fmax v10.4s, v10.4s, %[vzero].4s\n"
            "fmax v11.4s, v11.4s, %[vzero].4s\n"
            "fmin v8.4s, v8.4s, %[valpha].4s\n"
            "fmin v9.4s, v9.4s, %[valpha].4s\n"
            "fmin v10.4s, v10.4s, %[valpha].4s\n"
            "fmin v11.4s, v11.4s, %[valpha].4s\n"
            "str  q8, [%[tmp0]]\n"
            "str  q9, [%[tmp2]]\n"
            "str  q10, [%[tmp1]]\n"
            "str  q11, [%[tmp3]]\n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [doutc1r0] "+r"(doutc1_ptr),
              [doutc2r0] "+r"(doutc2_ptr),
              [doutc3r0] "+r"(doutc3_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias),
              [vzero] "w"(vzero),
              [valpha] "w"(valpha),
              [tmp0] "r"(tmp0),
              [tmp1] "r"(tmp1),
              [tmp2] "r"(tmp2),
              [tmp3] "r"(tmp3)
            : "cc",
              "memory",
              "v0",
              "v1",
              "v2",
              "v3",
              "v4",
              "v5",
              "v6",
              "v7",
              "v8",
              "v9",
              "v10",
              "v11");
#else
        asm volatile(
            "cmp %[cnt], #1\n"
            "vld1.32 {d8-d11}, [%[din_ptr]]!\n"
            "vld1.32 {d12-d15}, [%[din_ptr]]!\n"
            "pld [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "vadd.f32 q8, q4, %q[vbias]\n"
            "vadd.f32 q9, q5, %q[vbias]\n"
            "vadd.f32 q10, q6, %q[vbias]\n"
            "vadd.f32 q11, q7, %q[vbias]\n"
            "vtrn.32 q8, q9\n"
            "vtrn.32 q10, q11\n"
            "vswp    d17, d20\n"
            "vswp    d19, d22\n"
            "vld1.32 {d8-d11}, [%[din_ptr]]!\n"
            "vmax.f32 q8, q8, %q[vzero]\n"
            "vmax.f32 q9, q9, %q[vzero]\n"
            "vmax.f32 q10, q10, %q[vzero]\n"
            "vmax.f32 q11, q11, %q[vzero]\n"
            "vmin.f32 q8, q8, %q[valpha]\n"
            "vmin.f32 q9, q9, %q[valpha]\n"
            "vmin.f32 q10, q10, %q[valpha]\n"
            "vmin.f32 q11, q11, %q[valpha]\n"
            "subs   %[cnt], %[cnt], #1\n"
            "vst1.32  {d16-d17}, [%[doutc0r0]]!\n"
            "vst1.32  {d18-d19}, [%[doutc1r0]]!\n"
            "vld1.32 {d12-d15}, [%[din_ptr]]!\n"
            "vst1.32  {d20-d21}, [%[doutc2r0]]!\n"
            "vst1.32  {d22-d23}, [%[doutc3r0]]!\n"
            "bne 1b\n"
            "2: \n"
            "vadd.f32 q8, q4, %q[vbias]\n"
            "vadd.f32 q9, q5, %q[vbias]\n"
            "vadd.f32 q10, q6, %q[vbias]\n"
            "vadd.f32 q11, q7, %q[vbias]\n"
            "vtrn.32 q8, q9\n"
            "vtrn.32 q10, q11\n"
            "vswp    d17, d20\n"
            "vswp    d19, d22\n"
            "vmax.f32 q8, q8, %q[vzero]\n"
            "vmax.f32 q9, q9, %q[vzero]\n"
            "vmax.f32 q10, q10, %q[vzero]\n"
            "vmax.f32 q11, q11, %q[vzero]\n"
            "vmin.f32 q8, q8, %q[valpha]\n"
            "vmin.f32 q9, q9, %q[valpha]\n"
            "vmin.f32 q10, q10, %q[valpha]\n"
            "vmin.f32 q11, q11, %q[valpha]\n"
            "vst1.32  {d16-d17}, [%[tmp0]]\n"
            "vst1.32  {d18-d19}, [%[tmp1]]\n"
            "vst1.32  {d20-d21}, [%[tmp2]]\n"
            "vst1.32  {d22-d23}, [%[tmp3]]\n"
            "3: \n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [doutc1r0] "+r"(doutc1_ptr),
              [doutc2r0] "+r"(doutc2_ptr),
              [doutc3r0] "+r"(doutc3_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias),
              [vzero] "w"(vzero),
              [valpha] "w"(valpha),
              [tmp0] "r"(tmp0),
              [tmp1] "r"(tmp1),
              [tmp2] "r"(tmp2),
              [tmp3] "r"(tmp3)
            : "cc",
              "memory",
              "q3",
              "q4",
              "q5",
              "q6",
              "q7",
              "q8",
              "q9",
              "q10",
              "q11");
#endif
        doutc0r0 += width;
        doutc1r0 += width;
        doutc2r0 += width;
        doutc3r0 += width;
        if (remain) {
          for (int j = 0; j < remain; j++) {
            *doutc0_ptr++ = tmp0[j];
            *doutc1_ptr++ = tmp1[j];
            *doutc2_ptr++ = tmp2[j];
            *doutc3_ptr++ = tmp3[j];
          }
        }
        din += w_in_stride;
      }
      break;
    case 3:  // leakyrelu
      for (int i = 0; i < size_h; i++) {
        float* doutc0_ptr = doutc0r0;
        float* doutc1_ptr = doutc1r0;
        float* doutc2_ptr = doutc2r0;
        float* doutc3_ptr = doutc3r0;
        const float* din_hei_ptr = din;
        int cnt = cnt_col;
#ifdef __aarch64__
        asm volatile(
            "cmp %w[cnt], #1\n"
            "ldp  q0, q1, [%[din_ptr]], #32\n"
            "ldp  q2, q3, [%[din_ptr]], #32\n"
            "prfm pldl1keep, [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "fadd v0.4s, v0.4s, %[vbias].4s\n"
            "fadd v1.4s, v1.4s, %[vbias].4s\n"
            "fadd v2.4s, v2.4s, %[vbias].4s\n"
            "fadd v3.4s, v3.4s, %[vbias].4s\n"
            "trn1  v4.4s, v0.4s, v1.4s\n"
            "trn2  v5.4s, v0.4s, v1.4s\n"
            "trn1  v6.4s, v2.4s, v3.4s\n"
            "trn2  v7.4s, v2.4s, v3.4s\n"
            "trn1 v8.2d, v4.2d, v6.2d\n"
            "trn2 v9.2d, v4.2d, v6.2d\n"
            "trn1 v10.2d, v5.2d, v7.2d\n"
            "trn2 v11.2d, v5.2d, v7.2d\n"
            "fcmge v0.4s, v8.4s, %[vzero].4s\n"
            "fmul v1.4s, v8.4s, %[valpha].4s\n"
            "fcmge v2.4s, v9.4s, %[vzero].4s\n"
            "fmul v3.4s, v9.4s, %[valpha].4s\n"
            "fcmge v4.4s, v10.4s, %[vzero].4s\n"
            "fmul v5.4s, v10.4s, %[valpha].4s\n"
            "bif v8.16b, v1.16b, v0.16b\n"
            "fcmge v6.4s, v11.4s, %[vzero].4s\n"
            "fmul v7.4s, v11.4s, %[valpha].4s\n"
            "bif v9.16b, v3.16b, v2.16b\n"
            "ldp  q0, q1, [%[din_ptr]], #32\n"
            "bif v10.16b, v5.16b, v4.16b\n"
            "bif v11.16b, v7.16b, v6.16b\n"
            "ldp  q2, q3, [%[din_ptr]], #32\n"
            "subs %w[cnt], %w[cnt], #1\n"
            "str  q8, [%[doutc0r0]], #16 \n"
            "str  q9, [%[doutc2r0]], #16 \n"
            "str  q10, [%[doutc1r0]], #16 \n"
            "str  q11, [%[doutc3r0]], #16 \n"
            "bne 1b\n"
            "2: \n"
            "fadd v0.4s, v0.4s, %[vbias].4s\n"
            "fadd v1.4s, v1.4s, %[vbias].4s\n"
            "fadd v2.4s, v2.4s, %[vbias].4s\n"
            "fadd v3.4s, v3.4s, %[vbias].4s\n"
            "trn1 v4.4s, v0.4s, v1.4s\n"
            "trn2 v5.4s, v0.4s, v1.4s\n"
            "trn1 v6.4s, v2.4s, v3.4s\n"
            "trn2 v7.4s, v2.4s, v3.4s\n"
            "trn1 v8.2d, v4.2d, v6.2d\n"
            "trn2 v9.2d, v4.2d, v6.2d\n"
            "trn1 v10.2d, v5.2d, v7.2d\n"
            "trn2 v11.2d, v5.2d, v7.2d\n"
            "fcmge v0.4s, v8.4s, %[vzero].4s\n"
            "fmul v1.4s, v8.4s, %[valpha].4s\n"
            "fcmge v2.4s, v9.4s, %[vzero].4s\n"
            "fmul v3.4s, v9.4s, %[valpha].4s\n"
            "fcmge v4.4s, v10.4s, %[vzero].4s\n"
            "fmul v5.4s, v10.4s, %[valpha].4s\n"
            "bif v8.16b, v1.16b, v0.16b\n"
            "fcmge v6.4s, v11.4s, %[vzero].4s\n"
            "fmul v7.4s, v11.4s, %[valpha].4s\n"
            "bif v9.16b, v3.16b, v2.16b\n"
            "bif v10.16b, v5.16b, v4.16b\n"
            "bif v11.16b, v7.16b, v6.16b\n"
            "str  q8, [%[tmp0]]\n"
            "str  q9, [%[tmp2]]\n"
            "str  q10, [%[tmp1]]\n"
            "str  q11, [%[tmp3]]\n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [doutc1r0] "+r"(doutc1_ptr),
              [doutc2r0] "+r"(doutc2_ptr),
              [doutc3r0] "+r"(doutc3_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias),
              [vzero] "w"(vzero),
              [valpha] "w"(valpha),
              [tmp0] "r"(tmp0),
              [tmp1] "r"(tmp1),
              [tmp2] "r"(tmp2),
              [tmp3] "r"(tmp3)
            : "cc",
              "memory",
              "v0",
              "v1",
              "v2",
              "v3",
              "v4",
              "v5",
              "v6",
              "v7",
              "v8",
              "v9",
              "v10",
              "v11");
#else
        asm volatile(
            "cmp %[cnt], #1\n"
            "vld1.32 {d8-d11}, [%[din_ptr]]!\n"
            "vld1.32 {d12-d15}, [%[din_ptr]]!\n"
            "pld [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "vadd.f32 q8, q4, %q[vbias]\n"
            "vadd.f32 q9, q5, %q[vbias]\n"
            "vadd.f32 q10, q6, %q[vbias]\n"
            "vadd.f32 q11, q7, %q[vbias]\n"
            "vtrn.32 q8, q9\n"
            "vtrn.32 q10, q11\n"
            "vswp    d17, d20\n"
            "vswp    d19, d22\n"
            "vcge.f32 q4, q8, %q[vzero]\n"
            "vmul.f32 q5, q8, %q[valpha]\n"
            "vcge.f32 q6, q9, %q[vzero]\n"
            "vmul.f32 q7, q9, %q[valpha]\n"
            "vcge.f32 q12, q10, %q[vzero]\n"
            "vmul.f32 q13, q10, %q[valpha]\n"
            "vbif q8, q5, q4\n"
            "vcge.f32 q4, q11, %q[vzero]\n"
            "vmul.f32 q5, q11, %q[valpha]\n"
            "vbif q9, q7, q6\n"
            "vbif q10, q13, q12\n"
            "vbif q11, q5, q4\n"
            "vld1.32 {d8-d11}, [%[din_ptr]]!\n"
            "subs   %[cnt], %[cnt], #1\n"
            "vst1.32  {d16-d17}, [%[doutc0r0]]!\n"
            "vst1.32  {d18-d19}, [%[doutc1r0]]!\n"
            "vld1.32 {d12-d15}, [%[din_ptr]]!\n"
            "vst1.32  {d20-d21}, [%[doutc2r0]]!\n"
            "vst1.32  {d22-d23}, [%[doutc3r0]]!\n"
            "bne 1b\n"
            "2: \n"
            "vadd.f32 q8, q4, %q[vbias]\n"
            "vadd.f32 q9, q5, %q[vbias]\n"
            "vadd.f32 q10, q6, %q[vbias]\n"
            "vadd.f32 q11, q7, %q[vbias]\n"
            "vtrn.32 q8, q9\n"
            "vtrn.32 q10, q11\n"
            "vswp    d17, d20\n"
            "vswp    d19, d22\n"
            "vcge.f32 q4, q8, %q[vzero]\n"
            "vmul.f32 q5, q8, %q[valpha]\n"
            "vcge.f32 q6, q9, %q[vzero]\n"
            "vmul.f32 q7, q9, %q[valpha]\n"
            "vcge.f32 q12, q10, %q[vzero]\n"
            "vmul.f32 q13, q10, %q[valpha]\n"
            "vbif q8, q5, q4\n"
            "vcge.f32 q4, q11, %q[vzero]\n"
            "vmul.f32 q5, q11, %q[valpha]\n"
            "vbif q9, q7, q6\n"
            "vbif q10, q13, q12\n"
            "vbif q11, q5, q4\n"
            "vst1.32  {d16-d17}, [%[tmp0]]\n"
            "vst1.32  {d18-d19}, [%[tmp1]]\n"
            "vst1.32  {d20-d21}, [%[tmp2]]\n"
            "vst1.32  {d22-d23}, [%[tmp3]]\n"
            "3: \n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [doutc1r0] "+r"(doutc1_ptr),
              [doutc2r0] "+r"(doutc2_ptr),
              [doutc3r0] "+r"(doutc3_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias),
              [vzero] "w"(vzero),
              [valpha] "w"(valpha),
              [tmp0] "r"(tmp0),
              [tmp1] "r"(tmp1),
              [tmp2] "r"(tmp2),
              [tmp3] "r"(tmp3)
            : "cc",
              "memory",
              "q3",
              "q4",
              "q5",
              "q6",
              "q7",
              "q8",
              "q9",
              "q10",
              "q11",
              "q12",
              "q13");
#endif
        doutc0r0 += width;
        doutc1r0 += width;
        doutc2r0 += width;
        doutc3r0 += width;
        if (remain) {
          for (int j = 0; j < remain; j++) {
            *doutc0_ptr++ = tmp0[j];
            *doutc1_ptr++ = tmp1[j];
            *doutc2_ptr++ = tmp2[j];
            *doutc3_ptr++ = tmp3[j];
          }
        }
        din += w_in_stride;
      }
      break;
  }
}

void write_to_oc1_fp32(const float* din,
                       float* dout,
                       int cs,
                       int ce,
                       int hs,
                       int he,
                       int ws,
                       int we,
                       int channel,
                       int height,
                       int width,
                       int flag_act,
                       float alpha,
                       const float* bias) {
  int size_c_out = width * height;

  float* doutc0r0 = dout + cs * size_c_out + hs * width + ws;

  int size_h = (he > height ? height : he) - hs;  // size_h == hei_n
  int w_round = we - ws;

  int valid_we = we > width ? width : we;
  int win = valid_we - ws;
  int cnt_col = win >> 2;
  int remain = win % 4;
  float32x4_t vzero = vdupq_n_f32(0.f);
  float32x4_t valpha = vdupq_n_f32(alpha);
  float32x4_t vbias = vdupq_n_f32(bias[0]);
  float tmp[4] = {0.f, 0.f, 0.f, 0.f};
  switch (flag_act) {
    case 0:  // no act
      for (int i = 0; i < size_h; i++) {
        float* doutc0_ptr = doutc0r0;
        const float* din_hei_ptr = din;
        int cnt = cnt_col;
#ifdef __aarch64__
        asm volatile(
            "cmp %w[cnt], #1\n"
            "ldr  q0, [%[din_ptr]], #16\n"
            "prfm pldl1keep, [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "fadd v1.4s, v0.4s, %[vbias].4s\n"
            "subs %w[cnt], %w[cnt], #1\n"
            "ldr  q0, [%[din_ptr]], #16\n"
            "str  q1, [%[doutc0r0]], #16 \n"
            "bne 1b\n"
            "2: \n"
            "fadd v1.4s, v0.4s, %[vbias].4s\n"
            "str  q1, [%[tmp]] \n"
            "3: \n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias), [tmp] "r"(tmp)
            : "cc", "memory", "v0", "v1", "v2");
#else
        asm volatile(
            "cmp %[cnt], #1\n"
            "vld1.32 {d8-d9}, [%[din_ptr]]!\n"
            "pld [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "vadd.f32 q5, q4, %q[vbias]\n"
            "subs   %[cnt], %[cnt], #1\n"
            "vld1.32 {d8-d9}, [%[din_ptr]]!\n"
            "vst1.32  {d10-d11}, [%[doutc0r0]]!\n"
            "bne 1b\n"
            "2: \n"
            "vadd.f32 q5, q4, %q[vbias]\n"
            "vst1.32  {d10-d11}, [%[tmp]]\n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias), [tmp] "r"(tmp)
            : "cc", "memory", "q3", "q4", "q5");
#endif
        doutc0r0 += width;
        din += w_round;
        if (remain) {
          for (int j = 0; j < remain; j++) {
            *doutc0_ptr++ = tmp[j];
          }
        }
      }
      break;
    case 1:  // relu
      for (int i = 0; i < size_h; i++) {
        float* doutc0_ptr = doutc0r0;
        const float* din_hei_ptr = din;
        int cnt = cnt_col;
#ifdef __aarch64__
        asm volatile(
            "cmp %w[cnt], #1\n"
            "ldr  q0, [%[din_ptr]], #16\n"
            "prfm pldl1keep, [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "fadd v1.4s, v0.4s, %[vbias].4s\n"
            "subs %w[cnt], %w[cnt], #1\n"
            "fmax v1.4s, v1.4s, %[vzero].4s\n"
            "ldr  q0, [%[din_ptr]], #16\n"
            "str  q1, [%[doutc0r0]], #16 \n"
            "bne 1b\n"
            "2: \n"
            "fadd v1.4s, v0.4s, %[vbias].4s\n"
            "fmax v1.4s, v1.4s, %[vzero].4s\n"
            "str  q1, [%[tmp]]\n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias), [vzero] "w"(vzero), [tmp] "r"(tmp)
            : "cc", "memory", "v0", "v1", "v2");
#else
        asm volatile(
            "cmp %[cnt], #1\n"
            "vld1.32 {d8-d9}, [%[din_ptr]]!\n"
            "pld [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "vadd.f32 q5, q4, %q[vbias]\n"
            "subs   %[cnt], %[cnt], #1\n"
            "vmax.f32 q5, q5, %q[vzero]\n"
            "vld1.32 {d8-d9}, [%[din_ptr]]!\n"
            "vst1.32  {d10-d11}, [%[doutc0r0]]!\n"
            "bne 1b\n"
            "2: \n"
            "vadd.f32 q5, q4, %q[vbias]\n"
            "vmax.f32 q5, q5, %q[vzero]\n"
            "vst1.32  {d10-d11}, [%[tmp]]\n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias), [vzero] "w"(vzero), [tmp] "r"(tmp)
            : "cc", "memory", "q3", "q4", "q5");
#endif
        doutc0r0 += width;
        din += w_round;
        if (remain) {
          for (int j = 0; j < remain; j++) {
            *doutc0_ptr++ = tmp[j];
          }
        }
      }
      break;
    case 2:  // relu6
      for (int i = 0; i < size_h; i++) {
        float* doutc0_ptr = doutc0r0;
        const float* din_hei_ptr = din;
        int cnt = cnt_col;
#ifdef __aarch64__
        asm volatile(
            "cmp %w[cnt], #1\n"
            "ldr  q0, [%[din_ptr]], #16\n"
            "prfm pldl1keep, [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "fadd v1.4s, v0.4s, %[vbias].4s\n"
            "subs %w[cnt], %w[cnt], #1\n"
            "fmax v1.4s, v1.4s, %[vzero].4s\n"
            "fmin v1.4s, v1.4s, %[valpha].4s\n"
            "ldr  q0, [%[din_ptr]], #16\n"
            "str  q1, [%[doutc0r0]], #16 \n"
            "bne 1b\n"
            "2: \n"
            "fadd v1.4s, v0.4s, %[vbias].4s\n"
            "fmax v1.4s, v1.4s, %[vzero].4s\n"
            "fmin v1.4s, v1.4s, %[valpha].4s\n"
            "str  q1, [%[tmp]] \n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias),
              [vzero] "w"(vzero),
              [valpha] "w"(valpha),
              [tmp] "r"(tmp)
            : "cc", "memory", "v0", "v1", "v2");
#else
        asm volatile(
            "cmp %[cnt], #1\n"
            "vld1.32 {d8-d9}, [%[din_ptr]]!\n"
            "pld [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "vadd.f32 q5, q4, %q[vbias]\n"
            "subs   %[cnt], %[cnt], #1\n"
            "vmax.f32 q5, q5, %q[vzero]\n"
            "vmin.f32 q5, q5, %q[valpha]\n"
            "vld1.32 {d8-d9}, [%[din_ptr]]!\n"
            "vst1.32  {d10-d11}, [%[doutc0r0]]!\n"
            "bne 1b\n"
            "2: \n"
            "vadd.f32 q5, q4, %q[vbias]\n"
            "vmax.f32 q5, q5, %q[vzero]\n"
            "vmin.f32 q5, q5, %q[valpha]\n"
            "vst1.32  {d10-d11}, [%[tmp]]\n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias),
              [vzero] "w"(vzero),
              [valpha] "w"(valpha),
              [tmp] "r"(tmp)
            : "cc", "memory", "q3", "q4", "q5");
#endif
        doutc0r0 += width;
        din += w_round;
        if (remain) {
          for (int j = 0; j < remain; j++) {
            *doutc0_ptr++ = tmp[j];
          }
        }
      }
      break;
    case 3:  // leakyrelu
      for (int i = 0; i < size_h; i++) {
        float* doutc0_ptr = doutc0r0;
        const float* din_hei_ptr = din;
        int cnt = cnt_col;
#ifdef __aarch64__
        asm volatile(
            "cmp %w[cnt], #1\n"
            "ldr  q0, [%[din_ptr]], #16\n"
            "prfm pldl1keep, [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "fadd v1.4s, v0.4s, %[vbias].4s\n"
            "subs %w[cnt], %w[cnt], #1\n"
            "fcmge v3.4s, v1.4s, %[vzero].4s\n"
            "fmul v4.4s, v1.4s, %[valpha].4s\n"
            "ldr  q0, [%[din_ptr]], #16\n"
            "bif v1.16b, v4.16b, v3.16b\n"
            "str  q1, [%[doutc0r0]], #16 \n"
            "bne 1b\n"
            "2: \n"
            "fadd v1.4s, v0.4s, %[vbias].4s\n"
            "fcmge v3.4s, v1.4s, %[vzero].4s\n"
            "fmul v4.4s, v1.4s, %[valpha].4s\n"
            "bif v1.16b, v4.16b, v3.16b\n"
            "str  q1, [%[tmp]] \n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias),
              [vzero] "w"(vzero),
              [valpha] "w"(valpha),
              [tmp] "r"(tmp)
            : "cc", "memory", "v0", "v1", "v2", "v3", "v4");
#else
        asm volatile(
            "cmp %[cnt], #1\n"
            "vld1.32 {d8-d9}, [%[din_ptr]]!\n"
            "pld [%[din_ptr]]\n"
            "blt 2f\n"
            "1: \n"
            "vadd.f32 q5, q4, %q[vbias]\n"
            "subs   %[cnt], %[cnt], #1\n"
            "vcge.f32 q8, q5, %q[vzero]\n"
            "vmul.f32 q9, q5, %q[valpha]\n"
            "vld1.32 {d8-d9}, [%[din_ptr]]!\n"
            "vbif q5, q9, q8\n"
            "vst1.32  {d10-d11}, [%[doutc0r0]]!\n"
            "bne 1b\n"
            "2: \n"
            "vadd.f32 q5, q4, %q[vbias]\n"
            "vcge.f32 q8, q5, %q[vzero]\n"
            "vmul.f32 q9, q5, %q[valpha]\n"
            "vbif q5, q9, q8\n"
            "vst1.32  {d10-d11}, [%[tmp]]\n"
            : [doutc0r0] "+r"(doutc0_ptr),
              [cnt] "+r"(cnt),
              [din_ptr] "+r"(din_hei_ptr)
            : [vbias] "w"(vbias),
              [vzero] "w"(vzero),
              [valpha] "w"(valpha),
              [tmp] "r"(tmp)
            : "cc", "memory", "q3", "q4", "q5", "q8", "q9");
#endif
        doutc0r0 += width;
        din += w_round;
        if (remain) {
          for (int j = 0; j < remain; j++) {
            *doutc0_ptr++ = tmp[j];
          }
        }
      }
      break;
  }
}

#ifdef __aarch64__
#define INIT_FIRST                        \
  "ldp    q0, q1,   [%[r0]], #32\n"       \
  "ldr    d10,      [%[r0]]\n"            \
  "ldp    q4, q5,   [%[r2]], #32\n"       \
  "ldr    d12,      [%[r2]]\n"            \
  "2:\n"                                  \
  "fmul   v15.4s ,  %[w0].4s,  v0.s[0]\n" \
  "fmul   v16.4s ,  %[w0].4s,  v0.s[2]\n" \
  "fmul   v17.4s ,  %[w0].4s,  v1.s[0]\n" \
  "fmul   v18.4s ,  %[w0].4s,  v1.s[2]\n" \
  "fmul   v19.4s ,  %[w0].4s,  v4.s[0]\n" \
  "fmul   v20.4s ,  %[w0].4s,  v4.s[2]\n" \
  "fmul   v21.4s ,  %[w0].4s,  v5.s[0]\n" \
  "fmul   v22.4s ,  %[w0].4s,  v5.s[2]\n"

#define INIT                              \
  "ldp    q15, q16, [%[ptr_out0]]\n"      \
  "ldp    q17, q18, [%[ptr_out0], #32]\n" \
  "ldp    q0, q1,   [%[r0]], #32\n"       \
  "ldr    d10,      [%[r0]]\n"            \
  "ldp    q4, q5,   [%[r2]], #32\n"       \
  "ldr    d12,      [%[r2]]\n"            \
  "2:\n"                                  \
  "ldp    q19, q20, [%[ptr_out1]]     \n" \
  "ldp    q21, q22, [%[ptr_out1], #32]\n" \
  "fmla   v15.4s ,  %[w0].4s,  v0.s[0]\n" \
  "fmla   v16.4s ,  %[w0].4s,  v0.s[2]\n" \
  "fmla   v17.4s ,  %[w0].4s,  v1.s[0]\n" \
  "fmla   v18.4s ,  %[w0].4s,  v1.s[2]\n" \
  "fmla   v19.4s ,  %[w0].4s,  v4.s[0]\n" \
  "fmla   v20.4s ,  %[w0].4s,  v4.s[2]\n" \
  "fmla   v21.4s ,  %[w0].4s,  v5.s[0]\n" \
  "fmla   v22.4s ,  %[w0].4s,  v5.s[2]\n"

#define COMPUTE                            \
  "ldp    q2, q3,   [%[r1]], #32      \n"  \
  "fmla   v15.4s ,  %[w6].4s,  v4.s[0]\n"  \
  "fmla   v16.4s ,  %[w6].4s,  v4.s[2]\n"  \
  "fmla   v17.4s ,  %[w6].4s,  v5.s[0]\n"  \
  "fmla   v18.4s ,  %[w6].4s,  v5.s[2]\n"  \
  "ldr    d11,      [%[r1]]\n"             \
  "fmla   v15.4s ,  %[w1].4s,  v0.s[1]\n"  \
  "fmla   v16.4s ,  %[w1].4s,  v0.s[3]\n"  \
  "fmla   v17.4s ,  %[w1].4s,  v1.s[1]\n"  \
  "fmla   v18.4s ,  %[w1].4s,  v1.s[3]\n"  \
  "fmla   v19.4s ,  %[w1].4s,  v4.s[1]\n"  \
  "fmla   v20.4s ,  %[w1].4s,  v4.s[3]\n"  \
  "fmla   v21.4s ,  %[w1].4s,  v5.s[1]\n"  \
  "fmla   v22.4s ,  %[w1].4s,  v5.s[3]\n"  \
  "ldp    q6, q7,   [%[r3]], #32      \n"  \
  "fmla   v15.4s ,  %[w7].4s,  v4.s[1]\n"  \
  "fmla   v16.4s ,  %[w7].4s,  v4.s[3]\n"  \
  "fmla   v17.4s ,  %[w7].4s,  v5.s[1]\n"  \
  "fmla   v18.4s ,  %[w7].4s,  v5.s[3]\n"  \
  "ldr    d13,      [%[r3]]\n"             \
  "fmla   v15.4s ,  %[w2].4s,  v0.s[2]\n"  \
  "fmla   v16.4s ,  %[w2].4s,  v1.s[0]\n"  \
  "fmla   v17.4s ,  %[w2].4s,  v1.s[2]\n"  \
  "fmla   v18.4s ,  %[w2].4s,  v10.s[0]\n" \
  "fmla   v19.4s ,  %[w2].4s,  v4.s[2]\n"  \
  "fmla   v20.4s ,  %[w2].4s,  v5.s[0]\n"  \
  "fmla   v21.4s ,  %[w2].4s,  v5.s[2]\n"  \
  "fmla   v22.4s ,  %[w2].4s,  v12.s[0]\n" \
  "ldp    q8, q9,   [%[r4]], #32      \n"  \
  "fmla   v15.4s ,  %[w8].4s,  v4.s[2]\n"  \
  "fmla   v16.4s ,  %[w8].4s,  v5.s[0]\n"  \
  "fmla   v17.4s ,  %[w8].4s,  v5.s[2]\n"  \
  "fmla   v18.4s ,  %[w8].4s,  v12.s[0]\n" \
  "ldr    d14,      [%[r4]]\n"             \
  "fmla   v15.4s ,  %[w3].4s,  v2.s[0]\n"  \
  "fmla   v16.4s ,  %[w3].4s,  v2.s[2]\n"  \
  "fmla   v17.4s ,  %[w3].4s,  v3.s[0]\n"  \
  "fmla   v18.4s ,  %[w3].4s,  v3.s[2]\n"  \
  "fmla   v19.4s ,  %[w3].4s,  v6.s[0]\n"  \
  "fmla   v20.4s ,  %[w3].4s,  v6.s[2]\n"  \
  "fmla   v21.4s ,  %[w3].4s,  v7.s[0]\n"  \
  "fmla   v22.4s ,  %[w3].4s,  v7.s[2]\n"  \
  "ldp    q0, q1,   [%[r0]], #32      \n"  \
  "fmla   v15.4s ,  %[w4].4s,  v2.s[1]\n"  \
  "fmla   v16.4s ,  %[w4].4s,  v2.s[3]\n"  \
  "fmla   v17.4s ,  %[w4].4s,  v3.s[1]\n"  \
  "fmla   v18.4s ,  %[w4].4s,  v3.s[3]\n"  \
  "fmla   v19.4s ,  %[w4].4s,  v6.s[1]\n"  \
  "fmla   v20.4s ,  %[w4].4s,  v6.s[3]\n"  \
  "fmla   v21.4s ,  %[w4].4s,  v7.s[1]\n"  \
  "fmla   v22.4s ,  %[w4].4s,  v7.s[3]\n"  \
  "ldr    d10,      [%[r0]]\n"             \
  "fmla   v15.4s ,  %[w5].4s,  v2.s[2]\n"  \
  "fmla   v16.4s ,  %[w5].4s,  v3.s[0]\n"  \
  "fmla   v17.4s ,  %[w5].4s,  v3.s[2]\n"  \
  "fmla   v18.4s ,  %[w5].4s,  v11.s[0]\n" \
  "ldp    q4, q5,   [%[r2]], #32      \n"  \
  "stp    q15, q16, [%[ptr_out0]], #32\n"  \
  "fmla   v19.4s ,  %[w5].4s,  v6.s[2]\n"  \
  "fmla   v20.4s ,  %[w5].4s,  v7.s[0]\n"  \
  "fmla   v21.4s ,  %[w5].4s,  v7.s[2]\n"  \
  "fmla   v22.4s ,  %[w5].4s,  v13.s[0]\n" \
  "ldr    d12,      [%[r2]]\n"             \
  "stp    q17, q18, [%[ptr_out0]], #32\n"  \
  "fmla   v19.4s ,  %[w6].4s,  v8.s[0]\n"  \
  "fmla   v20.4s ,  %[w6].4s,  v8.s[2]\n"  \
  "fmla   v21.4s ,  %[w6].4s,  v9.s[0]\n"  \
  "fmla   v22.4s ,  %[w6].4s,  v9.s[2]\n"

#define RESULT_FIRST                       \
  "fmla   v19.4s ,  %[w7].4s,  v8.s[1]\n"  \
  "fmla   v20.4s ,  %[w7].4s,  v8.s[3]\n"  \
  "fmla   v21.4s ,  %[w7].4s,  v9.s[1]\n"  \
  "fmla   v22.4s ,  %[w7].4s,  v9.s[3]\n"  \
  "fmla   v19.4s ,  %[w8].4s,  v8.s[2]\n"  \
  "fmla   v20.4s ,  %[w8].4s,  v9.s[0]\n"  \
  "fmla   v21.4s ,  %[w8].4s,  v9.s[2]\n"  \
  "fmla   v22.4s ,  %[w8].4s,  v14.s[0]\n" \
  "subs   %w[cnt], %w[cnt], #1\n"          \
  "stp    q19, q20, [%[ptr_out1]], #32\n"  \
  "stp    q21, q22, [%[ptr_out1]], #32\n"  \
  "bne    2b                          \n"

#define RESULT                             \
  "ldp    q15, q16, [%[ptr_out0]]     \n"  \
  "fmla   v19.4s ,  %[w7].4s,  v8.s[1]\n"  \
  "fmla   v20.4s ,  %[w7].4s,  v8.s[3]\n"  \
  "fmla   v21.4s ,  %[w7].4s,  v9.s[1]\n"  \
  "fmla   v22.4s ,  %[w7].4s,  v9.s[3]\n"  \
  "ldp    q17, q18, [%[ptr_out0], #32]\n"  \
  "fmla   v19.4s ,  %[w8].4s,  v8.s[2]\n"  \
  "fmla   v20.4s ,  %[w8].4s,  v9.s[0]\n"  \
  "fmla   v21.4s ,  %[w8].4s,  v9.s[2]\n"  \
  "fmla   v22.4s ,  %[w8].4s,  v14.s[0]\n" \
  "subs   %w[cnt], %w[cnt], #1\n"          \
  "stp    q19, q20, [%[ptr_out1]], #32\n"  \
  "stp    q21, q22, [%[ptr_out1]], #32\n"  \
  "bne    2b                          \n"

#define INIT_C1                                \
  "ldr    q21, [%[ptr_out0]]\n"                \
  "ld2  {v0.4s, v1.4s}, [%[r0]], #32\n"        \
  "ldr    d10,      [%[r0]]\n"                 \
  "ld2  {v4.4s, v5.4s}, [%[r2]], #32\n"        \
  "ldr    d12,      [%[r2]]\n"                 \
  "2:\n" /*  r0, r2, mul w0, get out r0, r1 */ \
  "ldr    q22, [%[ptr_out1]]\n"                \
  "fmla   v21.4s ,  %[w0].4s,  v0.4s\n"        \
  "fmla   v22.4s ,  %[w0].4s,  v4.4s\n"        \
  "ld2  {v2.4s, v3.4s}, [%[r1]], #32\n"

#define INIT_C1_FIRST                          \
  "ld2  {v0.4s, v1.4s}, [%[r0]], #32\n"        \
  "ldr    d10,      [%[r0]]\n"                 \
  "ld2  {v4.4s, v5.4s}, [%[r2]], #32\n"        \
  "ldr    d12,      [%[r2]]\n"                 \
  "2:\n" /*  r0, r2, mul w0, get out r0, r1 */ \
  "fmul   v21.4s ,  %[w0].4s,  v0.4s\n"        \
  "fmul   v22.4s ,  %[w0].4s,  v4.4s\n"        \
  "ld2  {v2.4s, v3.4s}, [%[r1]], #32\n"

#define COMPUTE_C1                                                         \
  /* r2 mul w6, get out r0*/                                               \
  "fmla   v21.4s ,  %[w6].4s,  v4.4s\n"                                    \
  "ldr    d11,      [%[r1]]\n"                                             \
  "ext    v15.16b, v0.16b, v10.16b, #4\n"                                  \
  "ext    v16.16b, v4.16b, v12.16b, #4\n" /*  r0, r2, mul w1, get out r0*/ \
  "fmla   v21.4s ,  %[w1].4s,  v1.4s\n"                                    \
  "fmla   v22.4s ,  %[w1].4s,  v5.4s\n"                                    \
  "ld2  {v6.4s, v7.4s}, [%[r3]], #32\n" /*  r2 mul w7, get out r0 */       \
  "fmla   v21.4s ,  %[w7].4s,  v5.4s\n"                                    \
  "ldr    d13,      [%[r3]]\n" /*  r0, r2, mul w2, get out r0, r1 */       \
  "fmla   v21.4s ,  %[w2].4s,  v15.4s\n"                                   \
  "fmla   v22.4s ,  %[w2].4s,  v16.4s\n"                                   \
  "ld2  {v8.4s, v9.4s}, [%[r4]], #32 \n" /*  r2, mul w8, get out r0 */     \
  "fmla   v21.4s ,  %[w8].4s,  v16.4s\n"                                   \
  "ldr    d14,      [%[r4]]\n" /* r1, r3, mul w3, get out r0, r1 */        \
  "fmla   v21.4s ,  %[w3].4s,  v2.4s\n"                                    \
  "fmla   v22.4s ,  %[w3].4s,  v6.4s\n"                                    \
  "ext    v15.16b, v2.16b, v11.16b, #4\n"                                  \
  "ext    v16.16b, v6.16b, v13.16b, #4\n"                                  \
  "ld2  {v0.4s, v1.4s}, [%[r0]], #32\n"                                    \
  "fmla   v21.4s ,  %[w4].4s,  v3.4s\n"                                    \
  "fmla   v22.4s ,  %[w4].4s,  v7.4s\n"                                    \
  "ldr    d10,      [%[r0]]\n" /*  r1, r3, mul w5, get out r0, r1 */       \
  "fmla   v21.4s ,  %[w5].4s,  v15.4s\n"                                   \
  "fmla   v22.4s ,  %[w5].4s,  v16.4s\n"                                   \
  "ld2  {v4.4s, v5.4s}, [%[r2]], #32 \n"                                   \
  "ldr    d12,      [%[r2]]\n"                                             \
  "str    q21, [%[ptr_out0]], #16\n"

#define RESULT_C1                         \
  /*  r4, mul w6, get out r1 */           \
  "fmla   v22.4s ,  %[w6].4s,  v8.4s  \n" \
  "ext    v15.16b, v8.16b, v14.16b, #4\n" \
  "ldr    q21, [%[ptr_out0]]          \n" \
  "fmla   v22.4s ,  %[w7].4s,  v9.4s  \n" \
  "fmla   v22.4s ,  %[w8].4s,  v15.4s \n" \
  "subs   %w[cnt], %w[cnt], #1        \n" \
  "str    q22, [%[ptr_out1]], #16     \n" \
  "ldr    q22, [%[ptr_out1]]\n"           \
  "bne    2b                          \n"

#define RESULT_C1_FIRST                   \
  /*  r4, mul w6, get out r1 */           \
  "fmla   v22.4s ,  %[w6].4s,  v8.4s  \n" \
  "ext    v15.16b, v8.16b, v14.16b, #4\n" \
  "fmla   v22.4s ,  %[w7].4s,  v9.4s  \n" \
  "fmla   v22.4s ,  %[w8].4s,  v15.4s \n" \
  "subs   %w[cnt], %w[cnt], #1        \n" \
  "str    q22, [%[ptr_out1]], #16     \n" \
  "bne    2b                          \n"

#else
#define INIT_FIRST                                                             \
  "vld1.32    {d10-d13}, [%[wc0]]!       @ load w0, w1\n"                      \
  "vld1.32    {d14-d15}, [%[wc0]]!       @ load w2\n"                          \
  "vld1.32    {d0-d3}, [%[r0]]!          @ load r0\n"                          \
  "vld1.32    {d8},   [%[r0]]            @ load r0\n"   /* main loop */        \
  "0:                                    @ main loop\n" /* mul r0, with w0*/   \
  "vmul.f32   q8, q5, d0[0]              @ w0 * inr00\n"                       \
  "vmul.f32   q9, q5, d1[0]              @ w0 * inr02\n"                       \
  "vmul.f32   q10, q5, d2[0]             @ w0 * inr04\n"                       \
  "vmul.f32   q11, q5, d3[0]             @ w0 * inr06\n" /* mul r0, with w0*/  \
  "vld1.32    {d4-d7}, [%[r2]]!          @ load r2\n"                          \
  "vmla.f32   q8, q6, d0[1]              @ w1 * inr01\n"                       \
  "vmla.f32   q9, q6, d1[1]              @ w1 * inr03\n"                       \
  "vmla.f32   q10, q6, d2[1]             @ w1 * inr05\n"                       \
  "vmla.f32   q11, q6, d3[1]             @ w1 * inr07\n"                       \
  "vld1.32    {d9},   [%[r2]]            @ load r2, 9th float\n"               \
  "vmla.f32   q8, q7, d1[0]              @ w2 * inr02\n"                       \
  "vmla.f32   q9, q7, d2[0]              @ w2 * inr04\n"                       \
  "vmla.f32   q10, q7, d3[0]             @ w2 * inr06\n"                       \
  "vmla.f32   q11, q7, d8[0]             @ w2 * inr08\n"                       \
  "sub    %[r2], %[r2], #32              @ r2 - 32\n" /* mul r2, with w0, w1*/ \
  "vld1.32    {d0-d3}, [%[r1]]!          @ load r1\n"                          \
  "vmul.f32   q12, q5, d4[0]             @ w0 * inr20\n"                       \
  "vmul.f32   q13, q5, d5[0]             @ w0 * inr22\n"                       \
  "vmul.f32   q14, q5, d6[0]             @ w0 * inr24\n"                       \
  "vmul.f32   q15, q5, d7[0]             @ w0 * inr26\n"

#define INIT                                                                   \
  "vld1.32    {d16-d19}, [%[ptr_out0]]!  @ load outr0\n"                       \
  "vld1.32    {d20-d23}, [%[ptr_out0]]   @ load outr0\n"                       \
  "vld1.32    {d10-d13}, [%[wc0]]!       @ load w0, w1\n"                      \
  "vld1.32    {d14-d15}, [%[wc0]]!       @ load w2\n"                          \
  "vld1.32    {d0-d3}, [%[r0]]!          @ load r0\n"                          \
  "vld1.32    {d8},   [%[r0]]            @ load r0\n" /* main loop */          \
  "sub    %[ptr_out0], %[ptr_out0], #32  @ ptr_out0 -32\n"                     \
  "0:                                    @ main loop\n" /* mul r0*/            \
  "vld1.32    {d24-d27}, [%[ptr_out1]]!  @ load outr1\n"                       \
  "vmla.f32   q8, q5, d0[0]              @ w0 * inr00\n"                       \
  "vld1.32    {d28-d31}, [%[ptr_out1]]   @ load outr1\n"                       \
  "vmla.f32   q9, q5, d1[0]              @ w0 * inr02\n"                       \
  "vmla.f32   q10, q5, d2[0]             @ w0 * inr04\n"                       \
  "vmla.f32   q11, q5, d3[0]             @ w0 * inr06\n" /* mul r0, with w0*/  \
  "vld1.32    {d4-d7}, [%[r2]]!          @ load r2\n"                          \
  "vmla.f32   q8, q6, d0[1]              @ w1 * inr01\n"                       \
  "vmla.f32   q9, q6, d1[1]              @ w1 * inr03\n"                       \
  "vmla.f32   q10, q6, d2[1]             @ w1 * inr05\n"                       \
  "vmla.f32   q11, q6, d3[1]             @ w1 * inr07\n"                       \
  "vld1.32    {d9},   [%[r2]]            @ load r2, 9th float\n"               \
  "vmla.f32   q8, q7, d1[0]              @ w2 * inr02\n"                       \
  "vmla.f32   q9, q7, d2[0]              @ w2 * inr04\n"                       \
  "vmla.f32   q10, q7, d3[0]             @ w2 * inr06\n"                       \
  "vmla.f32   q11, q7, d8[0]             @ w2 * inr08\n"                       \
  "sub    %[r2], %[r2], #32              @ r2 - 32\n" /* mul r2, with w0, w1*/ \
  "vld1.32    {d0-d3}, [%[r1]]!          @ load r1\n"                          \
  "vmla.f32   q12, q5, d4[0]             @ w0 * inr20\n"                       \
  "vmla.f32   q13, q5, d5[0]             @ w0 * inr22\n"                       \
  "vmla.f32   q14, q5, d6[0]             @ w0 * inr24\n"                       \
  "vmla.f32   q15, q5, d7[0]             @ w0 * inr26\n"

#define COMPUTE                                                                \
  "vld1.32    {d8},   [%[r1]]            @ load r1, 9th float\n"               \
  "vmla.f32   q12, q6, d4[1]             @ w1 * inr21\n"                       \
  "vmla.f32   q13, q6, d5[1]             @ w1 * inr23\n"                       \
  "vmla.f32   q14, q6, d6[1]             @ w1 * inr25\n"                       \
  "vmla.f32   q15, q6, d7[1]             @ w1 * inr27\n"                       \
  "vld1.32    {d10-d13}, [%[wc0]]!       @ load w3, w4, to q5, q6\n"           \
  "vmla.f32   q12, q7, d5[0]             @ w2 * inr22\n"                       \
  "vmla.f32   q13, q7, d6[0]             @ w2 * inr24\n"                       \
  "vmla.f32   q14, q7, d7[0]             @ w2 * inr26\n"                       \
  "vmla.f32   q15, q7, d9[0]             @ w2 * inr28\n"                       \
  "vld1.32    {d14-d15}, [%[wc0]]!       @ load w5, to q7\n" /* mul r1, with*/ \
  "vmla.f32   q8, q5, d0[0]              @ w3 * inr10\n"                       \
  "vmla.f32   q9, q5, d1[0]              @ w3 * inr12\n"                       \
  "vmla.f32   q10, q5, d2[0]             @ w3 * inr14\n"                       \
  "vmla.f32   q11, q5, d3[0]             @ w3 * inr16\n"                       \
  "vld1.32    {d4-d7}, [%[r3]]!          @ load r3, 8 float\n"                 \
  "vmla.f32   q8, q6, d0[1]              @ w4 * inr11\n"                       \
  "vmla.f32   q9, q6, d1[1]              @ w4 * inr13\n"                       \
  "vmla.f32   q10, q6, d2[1]             @ w4 * inr15\n"                       \
  "vmla.f32   q11, q6, d3[1]             @ w4 * inr17\n"                       \
  "vld1.32    {d9},   [%[r3]]            @ load r3, 9th float\n"               \
  "vmla.f32   q8, q7, d1[0]              @ w5 * inr12\n"                       \
  "vmla.f32   q9, q7, d2[0]              @ w5 * inr14\n"                       \
  "vmla.f32   q10, q7, d3[0]             @ w5 * inr16\n"                       \
  "vmla.f32   q11, q7, d8[0]             @ w5 * inr18\n"

#define RESULT_FIRST                                                           \
  /* mul r3, with w3, w4, w5 */                                                \
  "vld1.32    {d0-d3}, [%[r2]]!          @ load r2\n"                          \
  "vmla.f32   q12, q5, d4[0]             @ w3 * inr30\n"                       \
  "vmla.f32   q13, q5, d5[0]             @ w3 * inr32\n"                       \
  "vmla.f32   q14, q5, d6[0]             @ w3 * inr34\n"                       \
  "vmla.f32   q15, q5, d7[0]             @ w3 * inr36\n"                       \
  "vld1.32    {d8},   [%[r2]]            @ load r2, 9th float\n"               \
  "vmla.f32   q12, q6, d4[1]             @ w4 * inr31\n"                       \
  "vmla.f32   q13, q6, d5[1]             @ w4 * inr33\n"                       \
  "vmla.f32   q14, q6, d6[1]             @ w4 * inr35\n"                       \
  "vmla.f32   q15, q6, d7[1]             @ w4 * inr37\n"                       \
  "vld1.32    {d10-d13}, [%[wc0]]!       @ load w6, w7\n"                      \
  "vmla.f32   q12, q7, d5[0]             @ w5 * inr32\n"                       \
  "vmla.f32   q13, q7, d6[0]             @ w5 * inr34\n"                       \
  "vmla.f32   q14, q7, d7[0]             @ w5 * inr36\n"                       \
  "vmla.f32   q15, q7, d9[0]             @ w5 * inr38\n"                       \
  "vld1.32    {d14-d15}, [%[wc0]]!       @ load w8\n" /* mul r2, with w6, w7*/ \
  "vmla.f32   q8, q5, d0[0]              @ w6 * inr20\n"                       \
  "vmla.f32   q9, q5, d1[0]              @ w6 * inr22\n"                       \
  "vmla.f32   q10, q5, d2[0]             @ w6 * inr24\n"                       \
  "vmla.f32   q11, q5, d3[0]             @ w6 * inr26\n"                       \
  "vld1.32    {d4-d7}, [%[r4]]!          @ load r4\n"                          \
  "vmla.f32   q8, q6, d0[1]              @ w7 * inr21\n"                       \
  "vmla.f32   q9, q6, d1[1]              @ w7 * inr23\n"                       \
  "vmla.f32   q10, q6, d2[1]             @ w7 * inr25\n"                       \
  "vmla.f32   q11, q6, d3[1]             @ w7 * inr27\n"                       \
  "vld1.32    {d9},   [%[r4]]            @ load r4, 9th float\n"               \
  "vmla.f32   q8, q7, d1[0]              @ w8 * inr22\n"                       \
  "vmla.f32   q9, q7, d2[0]              @ w8 * inr24\n"                       \
  "vmla.f32   q10, q7, d3[0]             @ w8 * inr26\n"                       \
  "vmla.f32   q11, q7, d8[0]             @ w8 * inr28\n"                       \
  "sub    %[wc0], %[wc0], #144           @ wc0 - 144\n" /* mul r4, with w6*/   \
  "vld1.32    {d0-d3}, [%[r0]]!          @ load r0\n"                          \
  "vmla.f32   q12, q5, d4[0]             @ w3 * inr40\n"                       \
  "vst1.32    {d16-d19}, [%[ptr_out0]]!  @ save r00, r01\n"                    \
  "vmla.f32   q13, q5, d5[0]             @ w3 * inr42\n"                       \
  "vst1.32    {d20-d23}, [%[ptr_out0]]!  @ save r02, r03\n"                    \
  "vmla.f32   q14, q5, d6[0]             @ w3 * inr44\n"                       \
  "vmla.f32   q15, q5, d7[0]             @ w3 * inr46\n"                       \
  "vld1.32    {d8},   [%[r0]]            @ load r0, 9th float\n"               \
  "vmla.f32   q12, q6, d4[1]             @ w4 * inr41\n"                       \
  "vmla.f32   q13, q6, d5[1]             @ w4 * inr43\n"                       \
  "vmla.f32   q14, q6, d6[1]             @ w4 * inr45\n"                       \
  "vmla.f32   q15, q6, d7[1]             @ w4 * inr47\n"                       \
  "vld1.32    {d10-d13}, [%[wc0]]!       @ load w0, w1\n"                      \
  "vmla.f32   q12, q7, d5[0]             @ w5 * inr42\n"                       \
  "vmla.f32   q13, q7, d6[0]             @ w5 * inr44\n"                       \
  "vmla.f32   q14, q7, d7[0]             @ w5 * inr46\n"                       \
  "vmla.f32   q15, q7, d9[0]             @ w5 * inr48\n"                       \
  "subs   %[cnt], #1                     @ loop count--\n"                     \
  "vld1.32    {d14-d15}, [%[wc0]]!       @ load w2\n"                          \
  "vst1.32    {d24-d27}, [%[ptr_out1]]!  @ save r10, r11\n"                    \
  "vst1.32    {d28-d31}, [%[ptr_out1]]!  @ save r12, r13\n"                    \
  "bne    0b                             @ jump to main loop\n"

#define RESULT                                                                 \
  "sub    %[ptr_out1], %[ptr_out1], #32  @ ptr_out1 - 32\n" /* mul r3, with */ \
  "vld1.32    {d0-d3}, [%[r2]]!          @ load r2\n"                          \
  "vmla.f32   q12, q5, d4[0]             @ w3 * inr30\n"                       \
  "vmla.f32   q13, q5, d5[0]             @ w3 * inr32\n"                       \
  "vmla.f32   q14, q5, d6[0]             @ w3 * inr34\n"                       \
  "vmla.f32   q15, q5, d7[0]             @ w3 * inr36\n"                       \
  "vld1.32    {d8},   [%[r2]]            @ load r2, 9th float\n"               \
  "vmla.f32   q12, q6, d4[1]             @ w4 * inr31\n"                       \
  "vmla.f32   q13, q6, d5[1]             @ w4 * inr33\n"                       \
  "vmla.f32   q14, q6, d6[1]             @ w4 * inr35\n"                       \
  "vmla.f32   q15, q6, d7[1]             @ w4 * inr37\n"                       \
  "vld1.32    {d10-d13}, [%[wc0]]!       @ load w6, w7\n"                      \
  "vmla.f32   q12, q7, d5[0]             @ w5 * inr32\n"                       \
  "vmla.f32   q13, q7, d6[0]             @ w5 * inr34\n"                       \
  "vmla.f32   q14, q7, d7[0]             @ w5 * inr36\n"                       \
  "vmla.f32   q15, q7, d9[0]             @ w5 * inr38\n"                       \
  "vld1.32    {d14-d15}, [%[wc0]]!       @ load w8\n" /* mul r2, with w6, w7*/ \
  "vmla.f32   q8, q5, d0[0]              @ w6 * inr20\n"                       \
  "vmla.f32   q9, q5, d1[0]              @ w6 * inr22\n"                       \
  "vmla.f32   q10, q5, d2[0]             @ w6 * inr24\n"                       \
  "vmla.f32   q11, q5, d3[0]             @ w6 * inr26\n"                       \
  "vld1.32    {d4-d7}, [%[r4]]!          @ load r4\n"                          \
  "vmla.f32   q8, q6, d0[1]              @ w7 * inr21\n"                       \
  "vmla.f32   q9, q6, d1[1]              @ w7 * inr23\n"                       \
  "vmla.f32   q10, q6, d2[1]             @ w7 * inr25\n"                       \
  "vmla.f32   q11, q6, d3[1]             @ w7 * inr27\n"                       \
  "vld1.32    {d9},   [%[r4]]            @ load r4, 9th float\n"               \
  "vmla.f32   q8, q7, d1[0]              @ w8 * inr22\n"                       \
  "vmla.f32   q9, q7, d2[0]              @ w8 * inr24\n"                       \
  "vmla.f32   q10, q7, d3[0]             @ w8 * inr26\n"                       \
  "vmla.f32   q11, q7, d8[0]             @ w8 * inr28\n"                       \
  "sub    %[wc0], %[wc0], #144           @ wc0 - 144\n" /* mul r4, with w6*/   \
  "vld1.32    {d0-d3}, [%[r0]]!          @ load r0\n"                          \
  "vmla.f32   q12, q5, d4[0]             @ w3 * inr40\n"                       \
  "vst1.32    {d16-d19}, [%[ptr_out0]]!  @ save r00, r01\n"                    \
  "vmla.f32   q13, q5, d5[0]             @ w3 * inr42\n"                       \
  "vst1.32    {d20-d23}, [%[ptr_out0]]!  @ save r02, r03\n"                    \
  "vmla.f32   q14, q5, d6[0]             @ w3 * inr44\n"                       \
  "vmla.f32   q15, q5, d7[0]             @ w3 * inr46\n"                       \
  "vld1.32    {d8},   [%[r0]]            @ load r0, 9th float\n"               \
  "vmla.f32   q12, q6, d4[1]             @ w4 * inr41\n"                       \
  "vmla.f32   q13, q6, d5[1]             @ w4 * inr43\n"                       \
  "vmla.f32   q14, q6, d6[1]             @ w4 * inr45\n"                       \
  "vmla.f32   q15, q6, d7[1]             @ w4 * inr47\n"                       \
  "vld1.32    {d10-d13}, [%[wc0]]!       @ load w0, w1\n"                      \
  "vmla.f32   q12, q7, d5[0]             @ w5 * inr42\n"                       \
  "vmla.f32   q13, q7, d6[0]             @ w5 * inr44\n"                       \
  "vmla.f32   q14, q7, d7[0]             @ w5 * inr46\n"                       \
  "vmla.f32   q15, q7, d9[0]             @ w5 * inr48\n"                       \
  "vld1.32    {d14-d15}, [%[wc0]]!       @ load w2\n"                          \
  "vst1.32    {d24-d27}, [%[ptr_out1]]!  @ save r10, r11\n"                    \
  "vst1.32    {d28-d31}, [%[ptr_out1]]!  @ save r12, r13\n"                    \
  "vld1.32    {d16-d19}, [%[ptr_out0]]!  @ load outr0\n"                       \
  "vld1.32    {d20-d23}, [%[ptr_out0]]   @ load outr0\n"                       \
  "sub    %[ptr_out0], %[ptr_out0], #32  @ ptr_out0 - 32\n"                    \
  "subs   %[cnt], #1                     @ loop count--\n"                     \
  "bne    0b                             @ jump to main loop\n"

#define INIT_C1                                                        \
  "0:                                @ main loop\n"                    \
  "vld1.32 {d24-d27}, [%[ptr_out0]]  @ load or00, or01\n"              \
  "vld1.32 {d28-d31}, [%[ptr_out1]]  @ load or10, or11\n"              \
  "vld2.32    {d6-d9},    [%[r2]]!   @ load r2\n"                      \
  "vld2.32    {d10-d13},  [%[r2]]!   @ load r2\n"                      \
  "vld1.32    {d22},  [%[r2]]        @ load 16th float\n" /* r2 * w2*/ \
  "vmla.f32   q12,    q4, %e[w2][1]  @ w21 * r2\n"                     \
  "vmla.f32   q13,    q6, %e[w2][1]  @ w21 * r2\n"                     \
  "vld2.32    {d14-d17},    [%[r0]]! @ load r0\n"                      \
  "vmla.f32   q14,    q4, %e[w0][1]  @ w01 * r2\n"                     \
  "vmla.f32   q15,    q6, %e[w0][1]  @ w01 * r2\n"

#define INIT_C1_FIRST                                     \
  "0:                                @ main loop\n"       \
  "vld2.32    {d6-d9},    [%[r2]]!   @ load r2\n"         \
  "vld2.32    {d10-d13},  [%[r2]]!   @ load r2\n"         \
  "vld1.32    {d22},  [%[r2]]        @ load 16th float\n" \
  "vmul.f32   q12,    q4, %e[w2][1]  @ w21 * r2\n"        \
  "vmul.f32   q13,    q6, %e[w2][1]  @ w21 * r2\n"        \
  "vld2.32    {d14-d17},    [%[r0]]! @ load r0\n"         \
  "vmul.f32   q14,    q4, %e[w0][1]  @ w01 * r2\n"        \
  "vmul.f32   q15,    q6, %e[w0][1]  @ w01 * r2\n"

#define COMPUTE_C1                                                           \
  "vext.32    q4, q3, q5, #1         @ r2, shift left 1\n"                   \
  "vext.32    q6, q5, q11, #1        @ r2, shift left 1\n"                   \
  "vmla.f32   q12,    q3, %e[w2][0]  @ w20 * r2\n"                           \
  "vmla.f32   q13,    q5, %e[w2][0]  @ w20 * r2\n"                           \
  "vld2.32    {d18-d21},  [%[r0]]!   @ load r0\n"                            \
  "vmla.f32   q14,    q3, %e[w0][0]  @ w00 * r2\n"                           \
  "vmla.f32   q15,    q5, %e[w0][0]  @ w00 * r2\n"                           \
  "vld1.32    {d22},  [%[r0]]        @ load 16th float\n"                    \
  "vmla.f32   q12,    q4, %f[w2][0]  @ w22 * r2\n"                           \
  "vmla.f32   q14,    q4, %f[w0][0]  @ w02 * r2\n"                           \
  "vld2.32    {d6-d9},    [%[r3]]!   @ load r3\n"                            \
  "vmla.f32   q13,    q6, %f[w2][0]  @ w22 * r2\n"                           \
  "vmla.f32   q15,    q6, %f[w0][0]  @ w02 * r2\n"                           \
  "vld2.32    {d10-d13},  [%[r3]]!   @ load r3\n" /* r0 * w0, get or0, r3 */ \
  "vmla.f32   q12,    q8, %e[w0][1]      @ w01 * r0\n"                       \
  "vmla.f32   q13,    q10, %e[w0][1]     @ w01 * r0\n"                       \
  "vext.32    q8, q7, q9, #1             @ r0, shift left 1\n"               \
  "vext.32    q10, q9, q11, #1           @ r0, shift left 1\n"               \
  "vld1.32    {d22},  [%[r3]]            @ load 16th float\n"                \
  "vmla.f32   q14,    q4, %e[w1][1]      @ w11 * r3\n"                       \
  "vmla.f32   q15,    q6, %e[w1][1]      @ w11 * r3\n"                       \
  "vmla.f32   q12,    q7, %e[w0][0]      @ w00 * r0\n"                       \
  "vmla.f32   q13,    q9, %e[w0][0]      @ w00 * r0\n"                       \
  "vext.32    q4, q3, q5, #1             @ r3, shift left 1\n"               \
  "vext.32    q6, q5, q11, #1            @ r3, shift left 1\n"               \
  "vmla.f32   q14,    q3, %e[w1][0]      @ w10 * r3\n"                       \
  "vmla.f32   q15,    q5, %e[w1][0]      @ w10 * r3\n"                       \
  "vmla.f32   q12,    q8, %f[w0][0]      @ w02 * r0\n"                       \
  "vld2.32    {d14-d17},  [%[r1]]!       @ load r1\n"                        \
  "vmla.f32   q13,    q10,%f[w0][0]      @ w02 * r0\n"                       \
  "vld2.32    {d18-d21},  [%[r1]]!       @ load r1\n"                        \
  "vmla.f32   q14,    q4, %f[w1][0]      @ w12 * r3\n"                       \
  "vld2.32    {d6-d9},    [%[r4]]!       @ load r4\n"                        \
  "vmla.f32   q15,    q6, %f[w1][0]      @ w12 * r3\n"                       \
  "vld2.32    {d10-d13},  [%[r4]]!       @ load r4\n"                        \
  "vld1.32    {d22},  [%[r1]]            @ load 16th float\n" /* r1 * w1  */ \
  "vmla.f32   q12,    q8, %e[w1][1]      @ w11 * r1\n"                       \
  "vmla.f32   q13,    q10, %e[w1][1]     @ w11 * r1\n"                       \
  "vext.32    q8, q7, q9, #1             @ r1, shift left 1\n"               \
  "vext.32    q10, q9, q11, #1           @ r1, shift left 1\n"               \
  "vmla.f32   q14,    q4, %e[w2][1]      @ w21 * r4\n"                       \
  "vmla.f32   q15,    q6, %e[w2][1]      @ w21 * r4\n"                       \
  "vld1.32    {d22},  [%[r4]]            @ load 16th float\n"                \
  "vmla.f32   q12,    q7, %e[w1][0]      @ w10 * r1\n"                       \
  "vmla.f32   q13,    q9, %e[w1][0]      @ w10 * r1\n"                       \
  "vext.32    q4, q3, q5, #1             @ r1, shift left 1\n"               \
  "vext.32    q6, q5, q11, #1            @ r1, shift left 1\n"               \
  "vmla.f32   q14,    q3, %e[w2][0]      @ w20 * r4\n"                       \
  "vmla.f32   q15,    q5, %e[w2][0]      @ w20 * r4\n"                       \
  "vmla.f32   q12,    q8, %f[w1][0]      @ w12 * r1\n"                       \
  "vmla.f32   q13,    q10, %f[w1][0]     @ w12 * r1\n"                       \
  "vmla.f32   q14,    q4, %f[w2][0]      @ w22 * r4\n"                       \
  "vmla.f32   q15,    q6, %f[w2][0]      @ w22 * r4\n"                       \
  "vst1.32    {d24-d27},  [%[ptr_out0]]! @ save or0\n"                       \
  "vst1.32    {d28-d31},  [%[ptr_out1]]! @ save or0\n"                       \
  "subs   %[cnt], #1                     @ loop count -1\n"                  \
  "bne    0b                             @ jump to main loop\n"
#endif

void conv_3x3s2_direct_fp32(const float* i_data,
                            float* o_data,
                            int bs,
                            int oc,
                            int oh,
                            int ow,
                            int ic,
                            int ih,
                            int win,
                            const float* weights,
                            const float* bias,
                            const operators::ConvParam& param,
                            ARMContext* ctx) {
  //! 3x3s2 convolution, implemented by direct algorithm
  //! prepack input to tmp buffer
  //! write output to tmp buffer
  auto paddings = *param.paddings;
  auto act_param = param.activation_param;
  const int threads = ctx->threads();
  int l2_size = ctx->llc_size() / sizeof(float);
  const int pad_w = paddings[2];
  const int pad_h = paddings[0];
  const int wout_round = ROUNDUP(ow, OUT_W_BLOCK);
  const int win_round = wout_round * 2 /*stride_w*/ + 1;
  bool flag_bias = param.bias != nullptr;

  //! get h block
  //! win_round * ic * hin_r_block + wout_round * OUT_C_BLOCK * hout_r_block
  //! * threads = l2_size
  //! win_round = 2 * wout_round + 1
  //! hin_r_block = 2 * hout_r_block + 1
  int hout_r_block =
      (l2_size - 2 * wout_round * ic - ic) /
      ((4 * wout_round + 2) * ic + wout_round * OUT_C_BLOCK * threads);
  hout_r_block = hout_r_block > oh ? oh : hout_r_block;
  hout_r_block = (hout_r_block / OUT_H_BLOCK) * OUT_H_BLOCK;
  hout_r_block = hout_r_block < OUT_H_BLOCK ? OUT_H_BLOCK : hout_r_block;

  const int hin_r_block = hout_r_block * 2 /*stride_h*/ + 1;

  int in_len = win_round * ic;
  int pre_in_size = hin_r_block * in_len;
  int pre_out_size = OUT_C_BLOCK * hout_r_block * wout_round;

  float* tmp_work_space = ctx->workspace_data<float>();
  float ptr_zero[win_round];  // NOLINT
  memset(ptr_zero, 0, sizeof(float) * win_round);
  float ptr_write[wout_round];  // NOLINT

  //! l2_cache start
  float* pre_din = tmp_work_space;

  int size_in_channel = win * ih;
  int size_out_channel = ow * oh;
  int w_stride = ic * 9; /*kernel_w * kernel_h*/
  int w_stride_chin = OUT_C_BLOCK * 9;

  int ws = -pad_w;
  int we = ws + win_round;
  int w_loop = wout_round / 4;

  int c_remain = oc - (oc / OUT_C_BLOCK) * OUT_C_BLOCK;
  int c_round_down = (oc / OUT_C_BLOCK) * OUT_C_BLOCK;

  int out_row_stride = OUT_C_BLOCK * wout_round;
  auto act_type = act_param.active_type;
  float alpha = 0.f;
  int flag_act = 0x00;  // relu: 1, relu6: 2, leakey: 3
  if (act_param.has_active) {
    if (act_type == lite_api::ActivationType::kRelu) {
      flag_act = 0x01;
    } else if (act_type == lite_api::ActivationType::kRelu6) {
      flag_act = 0x02;
      alpha = act_param.Relu_clipped_coef;
    } else if (act_type == lite_api::ActivationType::kLeakyRelu) {
      flag_act = 0x03;
      alpha = act_param.Leaky_relu_alpha;
    }
  }

  for (int n = 0; n < bs; ++n) {
    const float* din_batch = i_data + n * ic * size_in_channel;
    float* dout_batch = o_data + n * oc * size_out_channel;
    for (int h = 0; h < oh; h += hout_r_block) {
      int h_kernel = hout_r_block;
      if (h + hout_r_block > oh) {
        h_kernel = oh - h;
      }

      int hs = h * 2 /*stride_h*/ - pad_h;
      int he = hs + h_kernel * 2 /*stride_h*/ + 1;

      prepack_input_nxw(
          din_batch, pre_din, 0, ic, hs, he, ws, we, ic, win, ih, ptr_zero);

      const float* cblock_inr0 = pre_din;
      const float* cblock_inr1 = cblock_inr0 + in_len;
      const float* cblock_inr2 = cblock_inr1 + in_len;
      const float* cblock_inr3 = cblock_inr2 + in_len;
      const float* cblock_inr4 = cblock_inr3 + in_len;

#pragma omp parallel for num_threads(threads)
      for (int c = 0; c < c_round_down; c += OUT_C_BLOCK) {
#ifdef ARM_WITH_OMP
        float* pre_out =
            pre_din + pre_in_size + omp_get_thread_num() * pre_out_size;
#else
        float* pre_out = pre_din + pre_in_size;
#endif
        const float* block_inr0 = cblock_inr0;
        const float* block_inr1 = cblock_inr1;
        const float* block_inr2 = cblock_inr2;
        const float* block_inr3 = cblock_inr3;
        const float* block_inr4 = cblock_inr4;

        const float* weight_c = weights + c * w_stride;
        const float* bias_ptr = ptr_zero;
        if (flag_bias) {
          bias_ptr = bias + c;
        }

        for (int hk = 0; hk < h_kernel; hk += OUT_H_BLOCK) {
          const float* wc0 = weight_c;

          const float* inr0 = block_inr0;
          const float* inr1 = block_inr1;
          const float* inr2 = block_inr2;
          const float* inr3 = block_inr3;
          const float* inr4 = block_inr4;

          float* pre_out0 = pre_out + hk * out_row_stride;
          float* pre_out1 = pre_out0 + out_row_stride;
#ifdef __aarch64__
          // first
          float* ptr_out0 = pre_out0;
          float* ptr_out1 = pre_out1;
          float32x4_t w0 = vld1q_f32(wc0);       // w0, v23
          float32x4_t w1 = vld1q_f32(wc0 + 4);   // w1, v24
          float32x4_t w2 = vld1q_f32(wc0 + 8);   // w2, v25
          float32x4_t w3 = vld1q_f32(wc0 + 12);  // w3, v26
          float32x4_t w4 = vld1q_f32(wc0 + 16);  // w4, v27
          float32x4_t w5 = vld1q_f32(wc0 + 20);  // w5, v28
          float32x4_t w6 = vld1q_f32(wc0 + 24);  // w6, v29
          float32x4_t w7 = vld1q_f32(wc0 + 28);  // w7, v30
          float32x4_t w8 = vld1q_f32(wc0 + 32);  // w8, v31
          const float* r0 = inr0;
          const float* r1 = inr1;
          const float* r2 = inr2;
          const float* r3 = inr3;
          const float* r4 = inr4;

          int cnt = w_loop;
          // clang-format off
          asm volatile(
            INIT_FIRST COMPUTE RESULT_FIRST
            : [cnt] "+r"(cnt), [r0] "+r"(r0), [r1] "+r"(r1),
              [r2] "+r"(r2),[r3] "+r"(r3), [r4] "+r"(r4),
              [ptr_out0] "+r"(ptr_out0),
              [ptr_out1] "+r"(ptr_out1)
            : [w0] "w"(w0),
              [w1] "w"(w1), [w2] "w"(w2),
              [w3] "w"(w3), [w4] "w"(w4),
              [w5] "w"(w5), [w6] "w"(w6),
              [w7] "w"(w7), [w8] "w"(w8)
            : "cc","memory","v0","v1","v2","v3","v4",
              "v5","v6","v7","v8","v9","v10","v11","v12","v13",
              "v14","v15","v16","v17","v18","v19","v20","v21","v22");
          // clang-format on
          wc0 += 9 * OUT_C_BLOCK;
          inr0 += win_round;
          inr1 += win_round;
          inr2 += win_round;
          inr3 += win_round;
          inr4 += win_round;

          for (int i = 0; i < ic - 1; ++i) {
            ptr_out0 = pre_out0;
            ptr_out1 = pre_out1;

            w0 = vld1q_f32(wc0);       // w0, v23
            w1 = vld1q_f32(wc0 + 4);   // w1, v24
            w2 = vld1q_f32(wc0 + 8);   // w2, v25
            w3 = vld1q_f32(wc0 + 12);  // w3, v26
            w4 = vld1q_f32(wc0 + 16);  // w4, v27
            w5 = vld1q_f32(wc0 + 20);  // w5, v28
            w6 = vld1q_f32(wc0 + 24);  // w6, v29
            w7 = vld1q_f32(wc0 + 28);  // w7, v30
            w8 = vld1q_f32(wc0 + 32);  // w8, v31

            r0 = inr0;
            r1 = inr1;
            r2 = inr2;
            r3 = inr3;
            r4 = inr4;

            int cnt = w_loop;
            // clang-format off
            asm volatile(
            INIT COMPUTE RESULT
            : [cnt] "+r"(cnt), [r0] "+r"(r0), [r1] "+r"(r1),
              [r2] "+r"(r2),[r3] "+r"(r3), [r4] "+r"(r4),
              [ptr_out0] "+r"(ptr_out0),
              [ptr_out1] "+r"(ptr_out1)
            : [w0] "w"(w0),
              [w1] "w"(w1), [w2] "w"(w2),
              [w3] "w"(w3), [w4] "w"(w4),
              [w5] "w"(w5), [w6] "w"(w6),
              [w7] "w"(w7), [w8] "w"(w8)
            : "cc","memory","v0","v1","v2","v3","v4",
              "v5","v6","v7","v8","v9","v10","v11","v12","v13",
              "v14","v15","v16","v17","v18","v19","v20","v21","v22");
            // clang-format on
            wc0 += 9 * OUT_C_BLOCK;
            inr0 += win_round;
            inr1 += win_round;
            inr2 += win_round;
            inr3 += win_round;
            inr4 += win_round;
          }
#else   // not __aarch64__
          const float* wc00 = wc0;
          float* ptr_out0 = pre_out0;
          float* ptr_out1 = pre_out1;
          const float* r0 = inr0;
          const float* r1 = inr1;
          const float* r2 = inr2;
          const float* r3 = inr3;
          const float* r4 = inr4;
          int cnt = w_loop;
          // clang-format off
          asm volatile(
            INIT_FIRST COMPUTE RESULT_FIRST
          : [cnt] "+r"(cnt),
              [r0] "+r"(r0),[r1] "+r"(r1),
              [r2] "+r"(r2),[r3] "+r"(r3),
              [r4] "+r"(r4),
              [ptr_out0] "+r"(ptr_out0),
              [ptr_out1] "+r"(ptr_out1),
              [wc0] "+r"(wc00)
          :
          : "cc","memory","q0","q1","q2","q3","q4",
              "q5","q6","q7","q8","q9","q10",
              "q11","q12","q13","q14","q15"
          );
          // clang-format on
          wc0 += w_stride_chin;
          inr0 += win_round;
          inr1 += win_round;
          inr2 += win_round;
          inr3 += win_round;
          inr4 += win_round;

          for (int i = 0; i < ic - 1; ++i) {
            wc00 = wc0;
            ptr_out0 = pre_out0;
            ptr_out1 = pre_out1;

            r0 = inr0;
            r1 = inr1;
            r2 = inr2;
            r3 = inr3;
            r4 = inr4;

            cnt = w_loop;
            // clang-format off
            asm volatile(
              INIT COMPUTE RESULT
            : [cnt] "+r"(cnt),
              [r0] "+r"(r0),[r1] "+r"(r1),
              [r2] "+r"(r2),[r3] "+r"(r3),
              [r4] "+r"(r4),
              [ptr_out0] "+r"(ptr_out0),
              [ptr_out1] "+r"(ptr_out1),
              [wc0] "+r"(wc00)
            :
            : "cc","memory","q0","q1","q2","q3","q4",
              "q5","q6","q7","q8","q9","q10",
              "q11","q12","q13","q14","q15"
            );
            // clang-format on
            wc0 += w_stride_chin;
            inr0 += win_round;
            inr1 += win_round;
            inr2 += win_round;
            inr3 += win_round;
            inr4 += win_round;
          }
#endif  // __aarch64__
          block_inr0 = block_inr4;
          block_inr1 = block_inr0 + in_len;
          block_inr2 = block_inr1 + in_len;
          block_inr3 = block_inr2 + in_len;
          block_inr4 = block_inr3 + in_len;
        }
        write_to_oc4_fp32(pre_out,
                          dout_batch,
                          c,
                          c + OUT_C_BLOCK,
                          h,
                          h + h_kernel,
                          0,
                          wout_round,
                          oc,
                          oh,
                          ow,
                          flag_act,
                          alpha,
                          bias_ptr);
      }

#pragma omp parallel for num_threads(threads)
      for (int c = 0; c < c_remain; ++c) {
#ifdef ARM_WITH_OMP
        float* pre_out =
            pre_din + pre_in_size + omp_get_thread_num() * pre_out_size;
#else
        float* pre_out = pre_din + pre_in_size;
#endif

        const float* block_inr0 = cblock_inr0;
        const float* block_inr1 = cblock_inr1;
        const float* block_inr2 = cblock_inr2;
        const float* block_inr3 = cblock_inr3;
        const float* block_inr4 = cblock_inr4;

        //! get weights ptr of remained
        const float* weight_c = weights + c_round_down * w_stride;

        //! fill bias to one channel
        const float* bias_ptr = ptr_zero;
        if (flag_bias) {
          bias_ptr = bias + c_round_down + c;
        }

        for (int hk = 0; hk < h_kernel; hk += OUT_H_BLOCK) {
          const float* wc0 = weight_c;

          const float* inr0 = block_inr0;
          const float* inr1 = block_inr1;
          const float* inr2 = block_inr2;
          const float* inr3 = block_inr3;
          const float* inr4 = block_inr4;

          float* pre_out0 = pre_out + hk * wout_round;
          float* pre_out1 = pre_out0 + wout_round;
#ifdef __aarch64__
          float* ptr_out0 = pre_out0;
          float* ptr_out1 = pre_out1;

          //! get valid weights of current output channel
          float32x4_t w0 = vdupq_n_f32(wc0[c]);       // w0, v23
          float32x4_t w1 = vdupq_n_f32(wc0[c + 4]);   // w1, v24
          float32x4_t w2 = vdupq_n_f32(wc0[c + 8]);   // w2, v25
          float32x4_t w3 = vdupq_n_f32(wc0[c + 12]);  // w3, v26
          float32x4_t w4 = vdupq_n_f32(wc0[c + 16]);  // w4, v27
          float32x4_t w5 = vdupq_n_f32(wc0[c + 20]);  // w5, v28
          float32x4_t w6 = vdupq_n_f32(wc0[c + 24]);  // w6, v29
          float32x4_t w7 = vdupq_n_f32(wc0[c + 28]);  // w7, v30
          float32x4_t w8 = vdupq_n_f32(wc0[c + 32]);  // w8, v31

          const float* r0 = inr0;
          const float* r1 = inr1;
          const float* r2 = inr2;
          const float* r3 = inr3;
          const float* r4 = inr4;

          int cnt = w_loop;
          // clang-format off
          asm volatile(
              INIT_C1_FIRST COMPUTE_C1 RESULT_C1_FIRST
          : [cnt] "+r"(cnt),
            [r0] "+r"(r0),[r1] "+r"(r1),
            [r2] "+r"(r2),[r3] "+r"(r3),
            [r4] "+r"(r4),
            [ptr_out0] "+r"(ptr_out0),
            [ptr_out1] "+r"(ptr_out1)
          : [w0] "w"(w0),[w1] "w"(w1),[w2] "w"(w2),
            [w3] "w"(w3),[w4] "w"(w4),[w5] "w"(w5),
            [w6] "w"(w6),[w7] "w"(w7),[w8] "w"(w8)
            : "cc","memory","v0","v1","v2","v3",
              "v4","v5","v6","v7","v8","v9","v10","v11",
              "v12","v13","v14","v15","v16","v21","v22");
          // clang-format on
          wc0 += 36;
          inr0 += win_round;
          inr1 += win_round;
          inr2 += win_round;
          inr3 += win_round;
          inr4 += win_round;
          for (int i = 0; i < ic - 1; ++i) {
            ptr_out0 = pre_out0;
            ptr_out1 = pre_out1;

            //! get valid weights of current output channel
            w0 = vdupq_n_f32(wc0[c]);       // w0, v23
            w1 = vdupq_n_f32(wc0[c + 4]);   // w1, v24
            w2 = vdupq_n_f32(wc0[c + 8]);   // w2, v25
            w3 = vdupq_n_f32(wc0[c + 12]);  // w3, v26
            w4 = vdupq_n_f32(wc0[c + 16]);  // w4, v27
            w5 = vdupq_n_f32(wc0[c + 20]);  // w5, v28
            w6 = vdupq_n_f32(wc0[c + 24]);  // w6, v29
            w7 = vdupq_n_f32(wc0[c + 28]);  // w7, v30
            w8 = vdupq_n_f32(wc0[c + 32]);  // w8, v31

            r0 = inr0;
            r1 = inr1;
            r2 = inr2;
            r3 = inr3;
            r4 = inr4;

            cnt = w_loop;
            // clang-format off
            asm volatile(
                INIT_C1 COMPUTE_C1 RESULT_C1
                : [cnt] "+r"(cnt),
                  [r0] "+r"(r0),[r1] "+r"(r1),
                  [r2] "+r"(r2),[r3] "+r"(r3),
                  [r4] "+r"(r4),
                  [ptr_out0] "+r"(ptr_out0),
                  [ptr_out1] "+r"(ptr_out1)
                : [w0] "w"(w0),[w1] "w"(w1),[w2] "w"(w2),
                  [w3] "w"(w3),[w4] "w"(w4),[w5] "w"(w5),
                  [w6] "w"(w6),[w7] "w"(w7),[w8] "w"(w8)
                : "cc","memory","v0","v1","v2","v3",
                  "v4","v5","v6","v7","v8","v9","v10","v11",
                  "v12","v13","v14","v15","v16","v21","v22");
            // clang-format on
            wc0 += 36;
            inr0 += win_round;
            inr1 += win_round;
            inr2 += win_round;
            inr3 += win_round;
            inr4 += win_round;
          }
#else   // not __aarch64__
          float* ptr_out0 = pre_out0;
          float* ptr_out1 = pre_out1;
          //! get valid weights of current output channel
          float w_tmp[12] = {wc0[c],
                             wc0[c + 4],
                             wc0[c + 8],
                             0.f,
                             wc0[c + 12],
                             wc0[c + 16],
                             wc0[c + 20],
                             0.f,
                             wc0[c + 24],
                             wc0[c + 28],
                             wc0[c + 32],
                             0.f};
          float32x4_t w0 = vld1q_f32(w_tmp);      // w0, w1, w2, q0
          float32x4_t w1 = vld1q_f32(w_tmp + 4);  // w3, w4, w5, q1
          float32x4_t w2 = vld1q_f32(w_tmp + 8);  // w6, w7, w8, q2

          const float* r0 = inr0;
          const float* r1 = inr1;
          const float* r2 = inr2;
          const float* r3 = inr3;
          const float* r4 = inr4;

          int cnt = w_loop / 2;

          if (cnt > 0) {
            // clang-format off
            asm volatile(
              INIT_C1_FIRST COMPUTE_C1
              : [cnt] "+r"(cnt),
                [r0] "+r"(r0),[r1] "+r"(r1),[r2] "+r"(r2),
                [r3] "+r"(r3),[r4] "+r"(r4),
                [ptr_out0] "+r"(ptr_out0),
                [ptr_out1] "+r"(ptr_out1)
              : [w0] "w"(w0), [w1] "w"(w1), [w2] "w"(w2)
              : "cc","memory","q3","q4",
                "q5","q6","q7","q8","q9","q10",
                "q11","q12","q13","q14","q15"
            );
          }
          //! deal with remain ow
          if (w_loop & 1) {
              ptr_out0[0] =
                  r0[0] * w_tmp[0] + r0[1] * w_tmp[1] + r0[2] * w_tmp[2] +
                  r1[0] * w_tmp[4] + r1[1] * w_tmp[5] + r1[2] * w_tmp[6] +
                  r2[0] * w_tmp[8] + r2[1] * w_tmp[9] + r2[2] * w_tmp[10];

              ptr_out0[1] =
                  r0[2] * w_tmp[0] + r0[3] * w_tmp[1] + r0[4] * w_tmp[2] +
                  r1[2] * w_tmp[4] + r1[3] * w_tmp[5] + r1[4] * w_tmp[6] +
                  r2[2] * w_tmp[8] + r2[3] * w_tmp[9] + r2[4] * w_tmp[10];

              ptr_out0[2] =
                  r0[4] * w_tmp[0] + r0[5] * w_tmp[1] + r0[6] * w_tmp[2] +
                  r1[4] * w_tmp[4] + r1[5] * w_tmp[5] + r1[6] * w_tmp[6] +
                  r2[4] * w_tmp[8] + r2[5] * w_tmp[9] + r2[6] * w_tmp[10];

              ptr_out0[3] =
                  r0[6] * w_tmp[0] + r0[7] * w_tmp[1] + r0[8] * w_tmp[2] +
                  r1[6] * w_tmp[4] + r1[7] * w_tmp[5] + r1[8] * w_tmp[6] +
                  r2[6] * w_tmp[8] + r2[7] * w_tmp[9] + r2[8] * w_tmp[10];

              ptr_out1[0] =
                  r2[0] * w_tmp[0] + r2[1] * w_tmp[1] + r2[2] * w_tmp[2] +
                  r3[0] * w_tmp[4] + r3[1] * w_tmp[5] + r3[2] * w_tmp[6] +
                  r4[0] * w_tmp[8] + r4[1] * w_tmp[9] + r4[2] * w_tmp[10];

              ptr_out1[1] =
                  r2[2] * w_tmp[0] + r2[3] * w_tmp[1] + r2[4] * w_tmp[2] +
                  r3[2] * w_tmp[4] + r3[3] * w_tmp[5] + r3[4] * w_tmp[6] +
                  r4[2] * w_tmp[8] + r4[3] * w_tmp[9] + r4[4] * w_tmp[10];

              ptr_out1[2] =
                  r2[4] * w_tmp[0] + r2[5] * w_tmp[1] + r2[6] * w_tmp[2] +
                  r3[4] * w_tmp[4] + r3[5] * w_tmp[5] + r3[6] * w_tmp[6] +
                  r4[4] * w_tmp[8] + r4[5] * w_tmp[9] + r4[6] * w_tmp[10];

              ptr_out1[3] =
                  r2[6] * w_tmp[0] + r2[7] * w_tmp[1] + r2[8] * w_tmp[2] +
                  r3[6] * w_tmp[4] + r3[7] * w_tmp[5] + r3[8] * w_tmp[6] +
                  r4[6] * w_tmp[8] + r4[7] * w_tmp[9] + r4[8] * w_tmp[10];
          }
          wc0 += 36;
          inr0 += win_round;
          inr1 += win_round;
          inr2 += win_round;
          inr3 += win_round;
          inr4 += win_round;
          for (int i = 0; i < ic - 1; ++i) {
            ptr_out0 = pre_out0;
            ptr_out1 = pre_out1;

            //! get valid weights of current output channel
            float w_tmp[12] = {wc0[c],
                               wc0[c + 4],
                               wc0[c + 8],
                               0.f,
                               wc0[c + 12],
                               wc0[c + 16],
                               wc0[c + 20],
                               0.f,
                               wc0[c + 24],
                               wc0[c + 28],
                               wc0[c + 32],
                               0.f};
            w0 = vld1q_f32(w_tmp);      // w0, w1, w2, q0
            w1 = vld1q_f32(w_tmp + 4);  // w3, w4, w5, q1
            w2 = vld1q_f32(w_tmp + 8);  // w6, w7, w8, q2

            r0 = inr0;
            r1 = inr1;
            r2 = inr2;
            r3 = inr3;
            r4 = inr4;

            cnt = w_loop / 2;
            if (cnt > 0) {
              // clang-format off
              asm volatile(
                INIT_C1 COMPUTE_C1
                : [cnt] "+r"(cnt),
                  [r0] "+r"(r0),[r1] "+r"(r1),[r2] "+r"(r2),
                  [r3] "+r"(r3),[r4] "+r"(r4),
                  [ptr_out0] "+r"(ptr_out0),
                  [ptr_out1] "+r"(ptr_out1)
                : [w0] "w"(w0), [w1] "w"(w1), [w2] "w"(w2)
                : "cc","memory","q3","q4",
                  "q5","q6","q7","q8","q9","q10",
                  "q11","q12","q13","q14","q15"
             );
              // clang-format on
            }
            //! deal with remain ow
            if (w_loop & 1) {
              ptr_out0[0] +=
                  r0[0] * w_tmp[0] + r0[1] * w_tmp[1] + r0[2] * w_tmp[2] +
                  r1[0] * w_tmp[4] + r1[1] * w_tmp[5] + r1[2] * w_tmp[6] +
                  r2[0] * w_tmp[8] + r2[1] * w_tmp[9] + r2[2] * w_tmp[10];

              ptr_out0[1] +=
                  r0[2] * w_tmp[0] + r0[3] * w_tmp[1] + r0[4] * w_tmp[2] +
                  r1[2] * w_tmp[4] + r1[3] * w_tmp[5] + r1[4] * w_tmp[6] +
                  r2[2] * w_tmp[8] + r2[3] * w_tmp[9] + r2[4] * w_tmp[10];

              ptr_out0[2] +=
                  r0[4] * w_tmp[0] + r0[5] * w_tmp[1] + r0[6] * w_tmp[2] +
                  r1[4] * w_tmp[4] + r1[5] * w_tmp[5] + r1[6] * w_tmp[6] +
                  r2[4] * w_tmp[8] + r2[5] * w_tmp[9] + r2[6] * w_tmp[10];

              ptr_out0[3] +=
                  r0[6] * w_tmp[0] + r0[7] * w_tmp[1] + r0[8] * w_tmp[2] +
                  r1[6] * w_tmp[4] + r1[7] * w_tmp[5] + r1[8] * w_tmp[6] +
                  r2[6] * w_tmp[8] + r2[7] * w_tmp[9] + r2[8] * w_tmp[10];

              ptr_out1[0] +=
                  r2[0] * w_tmp[0] + r2[1] * w_tmp[1] + r2[2] * w_tmp[2] +
                  r3[0] * w_tmp[4] + r3[1] * w_tmp[5] + r3[2] * w_tmp[6] +
                  r4[0] * w_tmp[8] + r4[1] * w_tmp[9] + r4[2] * w_tmp[10];

              ptr_out1[1] +=
                  r2[2] * w_tmp[0] + r2[3] * w_tmp[1] + r2[4] * w_tmp[2] +
                  r3[2] * w_tmp[4] + r3[3] * w_tmp[5] + r3[4] * w_tmp[6] +
                  r4[2] * w_tmp[8] + r4[3] * w_tmp[9] + r4[4] * w_tmp[10];

              ptr_out1[2] +=
                  r2[4] * w_tmp[0] + r2[5] * w_tmp[1] + r2[6] * w_tmp[2] +
                  r3[4] * w_tmp[4] + r3[5] * w_tmp[5] + r3[6] * w_tmp[6] +
                  r4[4] * w_tmp[8] + r4[5] * w_tmp[9] + r4[6] * w_tmp[10];

              ptr_out1[3] +=
                  r2[6] * w_tmp[0] + r2[7] * w_tmp[1] + r2[8] * w_tmp[2] +
                  r3[6] * w_tmp[4] + r3[7] * w_tmp[5] + r3[8] * w_tmp[6] +
                  r4[6] * w_tmp[8] + r4[7] * w_tmp[9] + r4[8] * w_tmp[10];
            }

            wc0 += 36;
            inr0 += win_round;
            inr1 += win_round;
            inr2 += win_round;
            inr3 += win_round;
            inr4 += win_round;
          }
#endif  // __aarch64__
          block_inr0 = block_inr4;
          block_inr1 = block_inr0 + in_len;
          block_inr2 = block_inr1 + in_len;
          block_inr3 = block_inr2 + in_len;
          block_inr4 = block_inr3 + in_len;
        }
        write_to_oc1_fp32(pre_out,
                          dout_batch,
                          c + c_round_down,
                          c + c_round_down + 1,
                          h,
                          h + h_kernel,
                          0,
                          wout_round,
                          oc,
                          oh,
                          ow,
                          flag_act,
                          alpha,
                          bias_ptr);
      }
    }
  }
}

}  // namespace math
}  // namespace arm
}  // namespace lite
}  // namespace paddle
