//
// Created by root on 20-8-9.
//

#include <libyuv/planar_functions.h>
#include "ext_convert.h"

#ifdef __cplusplus
namespace libyuv {
    extern "C" {
#endif
    // clang-format off
// 16x2 pixels -> 8x1.  width is number of argb pixels. e.g. 16.
#define BGRTOUV(QB, QG, QR)                                                 \
  "vmul.s16   q8, " #QB ", q10               \n" /* B                    */ \
  "vmls.s16   q8, " #QG ", q11               \n" /* G                    */ \
  "vmls.s16   q8, " #QR ", q12               \n" /* R                    */ \
  "vadd.u16   q8, q8, q15                    \n" /* +128 -> unsigned     */ \
  "vmul.s16   q9, " #QR ", q10               \n" /* R                    */ \
  "vmls.s16   q9, " #QG ", q14               \n" /* G                    */ \
  "vmls.s16   q9, " #QB ", q13               \n" /* B                    */ \
  "vadd.u16   q9, q9, q15                    \n" /* +128 -> unsigned     */ \
  "vqshrn.u16  d0, q8, #8                    \n" /* 16 bit to 8 bit U    */ \
  "vqshrn.u16  d1, q9, #8                    \n" /* 16 bit to 8 bit V    */
// clang-format on

    void BGR24ToYRow_NEON(const uint8_t *src_bgr, uint8_t *dst_y, int width) {
        asm volatile(
        "vmov.u8    d4, #25                        \n"  // B * 0.1016 coefficient
        "vmov.u8    d5, #129                       \n"  // G * 0.5078 coefficient
        "vmov.u8    d6, #66                        \n"  // R * 0.2578 coefficient
        "vmov.u8    d7, #16                        \n"  // Add 16 constant
        "1:                                        \n"
        "vld3.8     {d0, d1, d2}, [%0]!            \n"  // load 8 pixels of BGR24.
        "subs       %2, %2, #8                     \n"  // 8 processed per loop.
        "vmull.u8   q8, d0, d6                     \n"  // R
        "vmlal.u8   q8, d1, d5                     \n"  // G
        "vmlal.u8   q8, d2, d4                     \n"  // B
        "vqrshrn.u16 d0, q8, #8                    \n"  // 16 bit to 8 bit Y
        "vqadd.u8   d0, d7                         \n"
        "vst1.8     {d0}, [%1]!                    \n"  // store 8 pixels Y.
        "bgt        1b                             \n"
        : "+r"(src_bgr),  // %0
        "+r"(dst_y),      // %1
        "+r"(width)       // %2
        :
        : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8");
    }

    void BGR24ToUVRow_NEON(const uint8_t *src_bgr,
                           int src_stride_bgr,
                           uint8_t *dst_u,
                           uint8_t *dst_v,
                           int width) {
        asm volatile (
        "add        %1, %0, %1                     \n"  // src_stride + src_bgr
        "vmov.s16   q10, #112 / 2                  \n"  // UB / VR 0.875 coefficient
        "vmov.s16   q11, #74 / 2                   \n"  // UG -0.5781 coefficient
        "vmov.s16   q12, #38 / 2                   \n"  // UR -0.2969 coefficient
        "vmov.s16   q13, #18 / 2                   \n"  // VB -0.1406 coefficient
        "vmov.s16   q14, #94 / 2                   \n"  // VG -0.7344 coefficient
        "vmov.u16   q15, #0x8080                   \n"  // 128.5
        "1:                                        \n"
        "vld3.8     {d0, d2, d4}, [%0]!            \n"  // load 8 BGR24 pixels.
        "vld3.8     {d1, d3, d5}, [%0]!            \n"  // load next 8 BGR24 pixels.
        "vpaddl.u8  q0, q0                         \n"  // R 16 bytes -> 8 shorts.
        "vpaddl.u8  q1, q1                         \n"  // G 16 bytes -> 8 shorts.
        "vpaddl.u8  q2, q2                         \n"  // B 16 bytes -> 8 shorts.
        "vld3.8     {d8, d10, d12}, [%1]!          \n"  // load 8 more BGR24 pixels.
        "vld3.8     {d9, d11, d13}, [%1]!          \n"  // load last 8 BGR24 pixels.
        "vpadal.u8  q0, q4                         \n"  // R 16 bytes -> 8 shorts.
        "vpadal.u8  q1, q5                         \n"  // G 16 bytes -> 8 shorts.
        "vpadal.u8  q2, q6                         \n"  // B 16 bytes -> 8 shorts.

        "vrshr.u16  q0, q0, #1                     \n"  // 2x average
        "vrshr.u16  q1, q1, #1                     \n"
        "vrshr.u16  q2, q2, #1                     \n"

        "subs       %4, %4, #16                    \n"  // 32 processed per loop.
        BGRTOUV(q2, q1, q0)
        "vst1.8     {d0}, [%2]!                    \n"  // store 8 pixels U.
        "vst1.8     {d1}, [%3]!                    \n"  // store 8 pixels V.
        "bgt        1b                             \n"
        : "+r"(src_bgr),  // %0
        "+r"(src_stride_bgr),  // %1
        "+r"(dst_u),     // %2
        "+r"(dst_v),     // %3
        "+r"(width)        // %4
        :
        : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
        "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
        );
    }

#define SS(width, shift) (((width) + (1 << (shift)) - 1) >> (shift))

// Any 1 to 1.
#define ANY11(NAMEANY, ANY_SIMD, UVSHIFT, SBPP, BPP, MASK)                \
  void NAMEANY(const uint8_t* src_ptr, uint8_t* dst_ptr, int width) {     \
    SIMD_ALIGNED(uint8_t temp[128 * 2]);                                  \
    memset(temp, 0, 128); /* for YUY2 and msan */                         \
    int r = width & MASK;                                                 \
    int n = width & ~MASK;                                                \
    if (n > 0) {                                                          \
      ANY_SIMD(src_ptr, dst_ptr, n);                                      \
    }                                                                     \
    memcpy(temp, src_ptr + (n >> UVSHIFT) * SBPP, SS(r, UVSHIFT) * SBPP); \
    ANY_SIMD(temp, temp + 128, MASK + 1);                                 \
    memcpy(dst_ptr + n * BPP, temp + 128, r * BPP);                       \
  }
    ANY11(BGR24ToYRow_Any_NEON, BGR24ToYRow_NEON, 0, 3, 1, 7)
#undef ANY11

// Any 1 to 2 with source stride (2 rows of source).  Outputs UV planes.
// 128 byte row allows for 32 avx BGR24 pixels.
#define ANY12S(NAMEANY, ANY_SIMD, UVSHIFT, BPP, MASK)                        \
  void NAMEANY(const uint8_t* src_ptr, int src_stride_ptr, uint8_t* dst_u,   \
               uint8_t* dst_v, int width) {                                  \
    SIMD_ALIGNED(uint8_t temp[128 * 4]);                                     \
    memset(temp, 0, 128 * 2); /* for msan */                                 \
    int r = width & MASK;                                                    \
    int n = width & ~MASK;                                                   \
    if (n > 0) {                                                             \
      ANY_SIMD(src_ptr, src_stride_ptr, dst_u, dst_v, n);                    \
    }                                                                        \
    memcpy(temp, src_ptr + (n >> UVSHIFT) * BPP, SS(r, UVSHIFT) * BPP);      \
    memcpy(temp + 128, src_ptr + src_stride_ptr + (n >> UVSHIFT) * BPP,      \
           SS(r, UVSHIFT) * BPP);                                            \
    if ((width & 1) && UVSHIFT == 0) { /* repeat last pixel for subsample */ \
      memcpy(temp + SS(r, UVSHIFT) * BPP, temp + SS(r, UVSHIFT) * BPP - BPP, \
             BPP);                                                           \
      memcpy(temp + 128 + SS(r, UVSHIFT) * BPP,                              \
             temp + 128 + SS(r, UVSHIFT) * BPP - BPP, BPP);                  \
    }                                                                        \
    ANY_SIMD(temp, 128, temp + 256, temp + 384, MASK + 1);                   \
    memcpy(dst_u + (n >> 1), temp + 256, SS(r, 1));                          \
    memcpy(dst_v + (n >> 1), temp + 384, SS(r, 1));                          \
  }
    ANY12S(BGR24ToUVRow_Any_NEON, BGR24ToUVRow_NEON, 0, 3, 15)
#undef ANY12S
    static __inline int RGBToY(uint8_t r, uint8_t g, uint8_t b) {
        return (66 * r + 129 * g + 25 * b + 0x1080) >> 8;
    }

    static __inline int RGBToU(uint8_t r, uint8_t g, uint8_t b) {
        return (112 * b - 74 * g - 38 * r + 0x8080) >> 8;
    }
    static __inline int RGBToV(uint8_t r, uint8_t g, uint8_t b) {
        return (112 * r - 94 * g - 18 * b + 0x8080) >> 8;
    }
// ARGBToY_C and ARGBToUV_C
#define MAKEROWY(NAME, R, G, B, BPP)                                         \
  void NAME##ToYRow_C(const uint8_t* src_argb0, uint8_t* dst_y, int width) { \
    int x;                                                                   \
    for (x = 0; x < width; ++x) {                                            \
      dst_y[0] = RGBToY(src_argb0[R], src_argb0[G], src_argb0[B]);           \
      src_argb0 += BPP;                                                      \
      dst_y += 1;                                                            \
    }                                                                        \
  }                                                                          \
  void NAME##ToUVRow_C(const uint8_t* src_rgb0, int src_stride_rgb,          \
                       uint8_t* dst_u, uint8_t* dst_v, int width) {          \
    const uint8_t* src_rgb1 = src_rgb0 + src_stride_rgb;                     \
    int x;                                                                   \
    for (x = 0; x < width - 1; x += 2) {                                     \
      uint8_t ab = (src_rgb0[B] + src_rgb0[B + BPP] + src_rgb1[B] +          \
                    src_rgb1[B + BPP]) >>                                    \
                   2;                                                        \
      uint8_t ag = (src_rgb0[G] + src_rgb0[G + BPP] + src_rgb1[G] +          \
                    src_rgb1[G + BPP]) >>                                    \
                   2;                                                        \
      uint8_t ar = (src_rgb0[R] + src_rgb0[R + BPP] + src_rgb1[R] +          \
                    src_rgb1[R + BPP]) >>                                    \
                   2;                                                        \
      dst_u[0] = RGBToU(ar, ag, ab);                                         \
      dst_v[0] = RGBToV(ar, ag, ab);                                         \
      src_rgb0 += BPP * 2;                                                   \
      src_rgb1 += BPP * 2;                                                   \
      dst_u += 1;                                                            \
      dst_v += 1;                                                            \
    }                                                                        \
    if (width & 1) {                                                         \
      uint8_t ab = (src_rgb0[B] + src_rgb1[B]) >> 1;                         \
      uint8_t ag = (src_rgb0[G] + src_rgb1[G]) >> 1;                         \
      uint8_t ar = (src_rgb0[R] + src_rgb1[R]) >> 1;                         \
      dst_u[0] = RGBToU(ar, ag, ab);                                         \
      dst_v[0] = RGBToV(ar, ag, ab);                                         \
    }                                                                        \
  }

    MAKEROWY(BGR24, 0, 1, 2, 3)
#undef MAKEROWY

    LIBYUV_API
    int RGBAToNV21(const uint8_t *src_rgba,
                   int src_stride_rgba,
                   uint8_t *dst_y,
                   int dst_stride_y,
                   uint8_t *dst_vu,
                   int dst_stride_vu,
                   int width,
                   int height) {
        int y;
        int halfwidth = (width + 1) >> 1;
        void (*RGBAToUVRow)(const uint8_t *src_rgba0, int src_stride_rgba, uint8_t *dst_u, uint8_t *dst_v, int width) = RGBAToUVRow_C;
        void (*RGBAToYRow)(const uint8_t *src_rgba, uint8_t *dst_y, int width) = RGBAToYRow_C;
        void (*MergeUVRow_)(const uint8_t *src_u, const uint8_t *src_v, uint8_t *dst_vu, int width) = MergeUVRow_C;
        if (!src_rgba || !dst_y || !dst_vu || width <= 0 || height == 0) {
            return -1;
        }
        // Negative height means invert the image.
        if (height < 0) {
            height = -height;
            src_rgba = src_rgba + (height - 1) * src_stride_rgba;
            src_stride_rgba = -src_stride_rgba;
        }
#if defined(HAS_RGBATOYROW_SSSE3) && defined(HAS_RGBATOUVROW_SSSE3)
        if (TestCpuFlag(kCpuHasSSSE3)) {
        RGBAToUVRow = RGBAToUVRow_Any_SSSE3;
        RGBAToYRow = RGBAToYRow_Any_SSSE3;
        if (IS_ALIGNED(width, 16)) {
          RGBAToUVRow = RGBAToUVRow_SSSE3;
          RGBAToYRow = RGBAToYRow_SSSE3;
        }
      }
#endif
#if defined(HAS_RGBATOYROW_AVX2) && defined(HAS_RGBATOUVROW_AVX2)
        if (TestCpuFlag(kCpuHasAVX2)) {
        RGBAToUVRow = RGBAToUVRow_Any_AVX2;
        RGBAToYRow = RGBAToYRow_Any_AVX2;
        if (IS_ALIGNED(width, 32)) {
          RGBAToUVRow = RGBAToUVRow_AVX2;
          RGBAToYRow = RGBAToYRow_AVX2;
        }
      }
#endif
#if defined(HAS_RGBATOYROW_NEON)
        if (TestCpuFlag(kCpuHasNEON)) {
            RGBAToYRow = RGBAToYRow_Any_NEON;
            if (IS_ALIGNED(width, 8)) {
                RGBAToYRow = RGBAToYRow_NEON;
            }
        }
#endif
#if defined(HAS_RGBATOUVROW_NEON)
        if (TestCpuFlag(kCpuHasNEON)) {
            RGBAToUVRow = RGBAToUVRow_Any_NEON;
            if (IS_ALIGNED(width, 16)) {
                RGBAToUVRow = RGBAToUVRow_NEON;
            }
        }
#endif
#if defined(HAS_RGBATOYROW_MSA)
        if (TestCpuFlag(kCpuHasMSA)) {
        RGBAToYRow = RGBAToYRow_Any_MSA;
        if (IS_ALIGNED(width, 16)) {
          RGBAToYRow = RGBAToYRow_MSA;
        }
      }
#endif
#if defined(HAS_RGBATOUVROW_MSA)
        if (TestCpuFlag(kCpuHasMSA)) {
        RGBAToUVRow = RGBAToUVRow_Any_MSA;
        if (IS_ALIGNED(width, 32)) {
          RGBAToUVRow = RGBAToUVRow_MSA;
        }
      }
#endif
#if defined(HAS_RGBATOYROW_MMI)
        if (TestCpuFlag(kCpuHasMMI)) {
        RGBAToYRow = RGBAToYRow_Any_MMI;
        if (IS_ALIGNED(width, 8)) {
          RGBAToYRow = RGBAToYRow_MMI;
        }
      }
#endif
#if defined(HAS_RGBATOUVROW_MMI)
        if (TestCpuFlag(kCpuHasMMI)) {
        RGBAToUVRow = RGBAToUVRow_Any_MMI;
        if (IS_ALIGNED(width, 16)) {
          RGBAToUVRow = RGBAToUVRow_MMI;
        }
      }
#endif

#if defined(HAS_MERGEUVROW_SSE2)
        if (TestCpuFlag(kCpuHasSSE2)) {
        MergeUVRow_ = MergeUVRow_Any_SSE2;
        if (IS_ALIGNED(halfwidth, 16)) {
          MergeUVRow_ = MergeUVRow_SSE2;
        }
      }
#endif
#if defined(HAS_MERGEUVROW_AVX2)
        if (TestCpuFlag(kCpuHasAVX2)) {
        MergeUVRow_ = MergeUVRow_Any_AVX2;
        if (IS_ALIGNED(halfwidth, 32)) {
          MergeUVRow_ = MergeUVRow_AVX2;
        }
      }
#endif
#if defined(HAS_MERGEUVROW_NEON)
        if (TestCpuFlag(kCpuHasNEON)) {
            MergeUVRow_ = MergeUVRow_Any_NEON;
            if (IS_ALIGNED(halfwidth, 16)) {
                MergeUVRow_ = MergeUVRow_NEON;
            }
        }
#endif
#if defined(HAS_MERGEUVROW_MSA)
        if (TestCpuFlag(kCpuHasMSA)) {
        MergeUVRow_ = MergeUVRow_Any_MSA;
        if (IS_ALIGNED(halfwidth, 16)) {
          MergeUVRow_ = MergeUVRow_MSA;
        }
      }
#endif
#if defined(HAS_MERGEUVROW_MMI)
        if (TestCpuFlag(kCpuHasMMI)) {
        MergeUVRow_ = MergeUVRow_Any_MMI;
        if (IS_ALIGNED(halfwidth, 8)) {
          MergeUVRow_ = MergeUVRow_MMI;
        }
      }
#endif
        {
            // Allocate a rows of uv.
            align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
            uint8_t *row_v = row_u + ((halfwidth + 31) & ~31);

            for (y = 0; y < height - 1; y += 2) {
                RGBAToUVRow(src_rgba, src_stride_rgba, row_u, row_v, width);
                MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
                RGBAToYRow(src_rgba, dst_y, width);
                RGBAToYRow(src_rgba + src_stride_rgba, dst_y + dst_stride_y, width);
                src_rgba += src_stride_rgba * 2;
                dst_y += dst_stride_y * 2;
                dst_vu += dst_stride_vu;
            }
            if (height & 1) {
                RGBAToUVRow(src_rgba, 0, row_u, row_v, width);
                MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
                RGBAToYRow(src_rgba, dst_y, width);
            }
            free_aligned_buffer_64(row_u);
        }
        return 0;
    }

    LIBYUV_API
    int BGRAToNV21(const uint8_t *src_bgra,
                   int src_stride_bgra,
                   uint8_t *dst_y,
                   int dst_stride_y,
                   uint8_t *dst_vu,
                   int dst_stride_vu,
                   int width,
                   int height) {
        int y;
        int halfwidth = (width + 1) >> 1;
        void (*BGRAToUVRow)(const uint8_t *src_bgra0, int src_stride_bgra, uint8_t *dst_u, uint8_t *dst_v, int width) = BGRAToUVRow_C;
        void (*BGRAToYRow)(const uint8_t *src_bgra, uint8_t *dst_y, int width) = BGRAToYRow_C;
        void (*MergeUVRow_)(const uint8_t *src_u, const uint8_t *src_v, uint8_t *dst_vu, int width) = MergeUVRow_C;
        if (!src_bgra || !dst_y || !dst_vu || width <= 0 || height == 0) {
            return -1;
        }
        // Negative height means invert the image.
        if (height < 0) {
            height = -height;
            src_bgra = src_bgra + (height - 1) * src_stride_bgra;
            src_stride_bgra = -src_stride_bgra;
        }
#if defined(HAS_BGRATOYROW_SSSE3) && defined(HAS_BGRATOUVROW_SSSE3)
        if (TestCpuFlag(kCpuHasSSSE3)) {
        BGRAToUVRow = BGRAToUVRow_Any_SSSE3;
        BGRAToYRow = BGRAToYRow_Any_SSSE3;
        if (IS_ALIGNED(width, 16)) {
          BGRAToUVRow = BGRAToUVRow_SSSE3;
          BGRAToYRow = BGRAToYRow_SSSE3;
        }
      }
#endif
#if defined(HAS_BGRATOYROW_AVX2) && defined(HAS_BGRATOUVROW_AVX2)
        if (TestCpuFlag(kCpuHasAVX2)) {
        BGRAToUVRow = BGRAToUVRow_Any_AVX2;
        BGRAToYRow = BGRAToYRow_Any_AVX2;
        if (IS_ALIGNED(width, 32)) {
          BGRAToUVRow = BGRAToUVRow_AVX2;
          BGRAToYRow = BGRAToYRow_AVX2;
        }
      }
#endif
#if defined(HAS_BGRATOYROW_NEON)
        if (TestCpuFlag(kCpuHasNEON)) {
            BGRAToYRow = BGRAToYRow_Any_NEON;
            if (IS_ALIGNED(width, 8)) {
                BGRAToYRow = BGRAToYRow_NEON;
            }
        }
#endif
#if defined(HAS_BGRATOUVROW_NEON)
        if (TestCpuFlag(kCpuHasNEON)) {
            BGRAToUVRow = BGRAToUVRow_Any_NEON;
            if (IS_ALIGNED(width, 16)) {
                BGRAToUVRow = BGRAToUVRow_NEON;
            }
        }
#endif
#if defined(HAS_BGRATOYROW_MSA)
        if (TestCpuFlag(kCpuHasMSA)) {
        BGRAToYRow = BGRAToYRow_Any_MSA;
        if (IS_ALIGNED(width, 16)) {
          BGRAToYRow = BGRAToYRow_MSA;
        }
      }
#endif
#if defined(HAS_BGRATOUVROW_MSA)
        if (TestCpuFlag(kCpuHasMSA)) {
        BGRAToUVRow = BGRAToUVRow_Any_MSA;
        if (IS_ALIGNED(width, 32)) {
          BGRAToUVRow = BGRAToUVRow_MSA;
        }
      }
#endif
#if defined(HAS_BGRATOYROW_MMI)
        if (TestCpuFlag(kCpuHasMMI)) {
        BGRAToYRow = BGRAToYRow_Any_MMI;
        if (IS_ALIGNED(width, 8)) {
          BGRAToYRow = BGRAToYRow_MMI;
        }
      }
#endif
#if defined(HAS_BGRATOUVROW_MMI)
        if (TestCpuFlag(kCpuHasMMI)) {
        BGRAToUVRow = BGRAToUVRow_Any_MMI;
        if (IS_ALIGNED(width, 16)) {
          BGRAToUVRow = BGRAToUVRow_MMI;
        }
      }
#endif

#if defined(HAS_MERGEUVROW_SSE2)
        if (TestCpuFlag(kCpuHasSSE2)) {
        MergeUVRow_ = MergeUVRow_Any_SSE2;
        if (IS_ALIGNED(halfwidth, 16)) {
          MergeUVRow_ = MergeUVRow_SSE2;
        }
      }
#endif
#if defined(HAS_MERGEUVROW_AVX2)
        if (TestCpuFlag(kCpuHasAVX2)) {
        MergeUVRow_ = MergeUVRow_Any_AVX2;
        if (IS_ALIGNED(halfwidth, 32)) {
          MergeUVRow_ = MergeUVRow_AVX2;
        }
      }
#endif
#if defined(HAS_MERGEUVROW_NEON)
        if (TestCpuFlag(kCpuHasNEON)) {
            MergeUVRow_ = MergeUVRow_Any_NEON;
            if (IS_ALIGNED(halfwidth, 16)) {
                MergeUVRow_ = MergeUVRow_NEON;
            }
        }
#endif
#if defined(HAS_MERGEUVROW_MSA)
        if (TestCpuFlag(kCpuHasMSA)) {
        MergeUVRow_ = MergeUVRow_Any_MSA;
        if (IS_ALIGNED(halfwidth, 16)) {
          MergeUVRow_ = MergeUVRow_MSA;
        }
      }
#endif
#if defined(HAS_MERGEUVROW_MMI)
        if (TestCpuFlag(kCpuHasMMI)) {
        MergeUVRow_ = MergeUVRow_Any_MMI;
        if (IS_ALIGNED(halfwidth, 8)) {
          MergeUVRow_ = MergeUVRow_MMI;
        }
      }
#endif
        {
            // Allocate a rows of uv.
            align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
            uint8_t *row_v = row_u + ((halfwidth + 31) & ~31);

            for (y = 0; y < height - 1; y += 2) {
                BGRAToUVRow(src_bgra, src_stride_bgra, row_u, row_v, width);
                MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
                BGRAToYRow(src_bgra, dst_y, width);
                BGRAToYRow(src_bgra + src_stride_bgra, dst_y + dst_stride_y, width);
                src_bgra += src_stride_bgra * 2;
                dst_y += dst_stride_y * 2;
                dst_vu += dst_stride_vu;
            }
            if (height & 1) {
                BGRAToUVRow(src_bgra, 0, row_u, row_v, width);
                MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
                BGRAToYRow(src_bgra, dst_y, width);
            }
            free_aligned_buffer_64(row_u);
        }
        return 0;
    }

    LIBYUV_API
    int BGR24ToNV21(const uint8_t *src_bgr,
                    int src_stride_bgr,
                    uint8_t *dst_y,
                    int dst_stride_y,
                    uint8_t *dst_vu,
                    int dst_stride_vu,
                    int width,
                    int height) {
        int y;
        int halfwidth = (width + 1) >> 1;
        void (*BGR24ToUVRow)(const uint8_t *src_bgr0, int src_stride_bgr, uint8_t *dst_u, uint8_t *dst_v, int width) = BGR24ToUVRow_C;
        void (*BGR24ToYRow)(const uint8_t *src_bgra, uint8_t *dst_y, int width) = BGR24ToYRow_C;
        void (*MergeUVRow_)(const uint8_t *src_u, const uint8_t *src_v, uint8_t *dst_vu, int width) = MergeUVRow_C;
        if (!src_bgr || !dst_y || !dst_vu || width <= 0 || height == 0) {
            return -1;
        }
        // Negative height means invert the image.
        if (height < 0) {
            height = -height;
            src_bgr = src_bgr + (height - 1) * src_stride_bgr;
            src_stride_bgr = -src_stride_bgr;
        }
#if defined(HAS_BGR24TOYROW_SSSE3) && defined(HAS_BGR24TOUVROW_SSSE3)
        if (TestCpuFlag(kCpuHasSSSE3)) {
        BGR24ToUVRow = BGR24ToUVRow_Any_SSSE3;
        BGR24ToYRow = BGR24ToYRow_Any_SSSE3;
        if (IS_ALIGNED(width, 16)) {
          BGR24ToUVRow = BGR24ToUVRow_SSSE3;
          BGR24ToYRow = BGR24ToYRow_SSSE3;
        }
      }
#endif
#if defined(HAS_BGR24TOYROW_AVX2) && defined(HAS_BGR24TOUVROW_AVX2)
        if (TestCpuFlag(kCpuHasAVX2)) {
        BGR24ToUVRow = BGR24ToUVRow_Any_AVX2;
        BGR24ToYRow = BGR24ToYRow_Any_AVX2;
        if (IS_ALIGNED(width, 32)) {
          BGR24ToUVRow = BGR24ToUVRow_AVX2;
          BGR24ToYRow = BGR24ToYRow_AVX2;
        }
      }
#endif
#if defined(HAS_BGR24TOYROW_NEON)
        if (TestCpuFlag(kCpuHasNEON)) {
            BGR24ToYRow = BGR24ToYRow_Any_NEON;
            if (IS_ALIGNED(width, 8)) {
                BGR24ToYRow = BGR24ToYRow_NEON;
            }
        }
#endif
#if defined(HAS_BGR24TOUVROW_NEON)
        if (TestCpuFlag(kCpuHasNEON)) {
            BGR24ToUVRow = BGR24ToUVRow_Any_NEON;
            if (IS_ALIGNED(width, 16)) {
                BGR24ToUVRow = BGR24ToUVRow_NEON;
            }
        }
#endif
#if defined(HAS_BGR24TOYROW_MSA)
        if (TestCpuFlag(kCpuHasMSA)) {
        BGR24ToYRow = BGR24ToYRow_Any_MSA;
        if (IS_ALIGNED(width, 16)) {
          BGR24ToYRow = BGR24ToYRow_MSA;
        }
      }
#endif
#if defined(HAS_BGR24TOUVROW_MSA)
        if (TestCpuFlag(kCpuHasMSA)) {
        BGR24ToUVRow = BGR24ToUVRow_Any_MSA;
        if (IS_ALIGNED(width, 32)) {
          BGR24ToUVRow = BGR24ToUVRow_MSA;
        }
      }
#endif
#if defined(HAS_BGR24TOYROW_MMI)
        if (TestCpuFlag(kCpuHasMMI)) {
        BGR24ToYRow = BGR24ToYRow_Any_MMI;
        if (IS_ALIGNED(width, 8)) {
          BGR24ToYRow = BGR24ToYRow_MMI;
        }
      }
#endif
#if defined(HAS_BGR24TOUVROW_MMI)
        if (TestCpuFlag(kCpuHasMMI)) {
        BGR24ToUVRow = BGR24ToUVRow_Any_MMI;
        if (IS_ALIGNED(width, 16)) {
          BGR24ToUVRow = BGR24ToUVRow_MMI;
        }
      }
#endif

#if defined(HAS_MERGEUVROW_SSE2)
        if (TestCpuFlag(kCpuHasSSE2)) {
        MergeUVRow_ = MergeUVRow_Any_SSE2;
        if (IS_ALIGNED(halfwidth, 16)) {
          MergeUVRow_ = MergeUVRow_SSE2;
        }
      }
#endif
#if defined(HAS_MERGEUVROW_AVX2)
        if (TestCpuFlag(kCpuHasAVX2)) {
        MergeUVRow_ = MergeUVRow_Any_AVX2;
        if (IS_ALIGNED(halfwidth, 32)) {
          MergeUVRow_ = MergeUVRow_AVX2;
        }
      }
#endif
#if defined(HAS_MERGEUVROW_NEON)
        if (TestCpuFlag(kCpuHasNEON)) {
            MergeUVRow_ = MergeUVRow_Any_NEON;
            if (IS_ALIGNED(halfwidth, 16)) {
                MergeUVRow_ = MergeUVRow_NEON;
            }
        }
#endif
#if defined(HAS_MERGEUVROW_MSA)
        if (TestCpuFlag(kCpuHasMSA)) {
        MergeUVRow_ = MergeUVRow_Any_MSA;
        if (IS_ALIGNED(halfwidth, 16)) {
          MergeUVRow_ = MergeUVRow_MSA;
        }
      }
#endif
#if defined(HAS_MERGEUVROW_MMI)
        if (TestCpuFlag(kCpuHasMMI)) {
        MergeUVRow_ = MergeUVRow_Any_MMI;
        if (IS_ALIGNED(halfwidth, 8)) {
          MergeUVRow_ = MergeUVRow_MMI;
        }
      }
#endif
        {
            // Allocate a rows of uv.
            align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
            uint8_t *row_v = row_u + ((halfwidth + 31) & ~31);

            for (y = 0; y < height - 1; y += 2) {
                BGR24ToUVRow(src_bgr, src_stride_bgr, row_u, row_v, width);
                MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
                BGR24ToYRow(src_bgr, dst_y, width);
                BGR24ToYRow(src_bgr + src_stride_bgr, dst_y + dst_stride_y, width);
                src_bgr += src_stride_bgr * 2;
                dst_y += dst_stride_y * 2;
                dst_vu += dst_stride_vu;
            }
            if (height & 1) {
                BGR24ToUVRow(src_bgr, 0, row_u, row_v, width);
                MergeUVRow_(row_v, row_u, dst_vu, halfwidth);
                BGR24ToYRow(src_bgr, dst_y, width);
            }
            free_aligned_buffer_64(row_u);
        }
        return 0;
    }

    int ResearchI420ToNV12(const uint8_t *src_y, int src_stride_y,
                           const uint8_t *src_u, int src_stride_u,
                           const uint8_t *src_v, int src_stride_v,
                           uint8_t *dst_y, int dst_stride_y,
                           uint8_t *dst_uv, int dst_stride_uv,
                           int width, int height) {
        if (!src_y || !src_u || !src_v || !dst_y || !dst_uv || width <= 0 || height == 0) {
            return -1;
        }
        int halfwidth = (width + 1) / 2;
        int halfheight = height > 0 ? (height + 1) / 2 : (height - 1) / 2;
        if (dst_y) {
            // copy row
            int y;
            void (*CopyRow)(const uint8_t *, uint8_t *, int);
            if (height < 0) {
                height = -height;
                dst_y = dst_y + (height - 1) * dst_stride_y;
                dst_stride_y = -dst_stride_y;
            }
            if (src_stride_y == width && dst_stride_y == width) {
                width *= height;
                height = 1;
                src_stride_y = dst_stride_y = 0;
            }
            if (src_y == dst_y && src_stride_y == dst_stride_y) {
                return -1;
            }
            void ResearchCopyRow_NEON(const uint8_t *, uint8_t *, int);
            void ResearchCopyRow_Any_NEON(const uint8_t *, uint8_t *, int);
#if 0
            CopyRow = (!((uintptr_t) (width) & ((32) - 1))) ? ResearchCopyRow_NEON : ResearchCopyRow_Any_NEON;
#else
            CopyRow = ResearchCopyRow_NEON;
#endif
            for (y = 0; y < height; ++y) {
                CopyRow(src_y, dst_y, width);
                src_y += src_stride_y;
                dst_y += dst_stride_y;
            }
        }
        // merge uv plane
        int y;
        void (*MergeUVRow)(const uint8_t *, const uint8_t *, uint8_t *, int);
        if (halfheight < 0) {
            halfheight = -halfheight;
            dst_uv = dst_uv + (halfheight - 1) * dst_stride_uv;
            dst_stride_uv = -dst_stride_uv;
        }
        if (src_stride_u == halfwidth && src_stride_v == halfwidth && dst_stride_uv == halfwidth * 2) {
            halfwidth *= halfheight;
            halfheight = 1;
            src_stride_u = src_stride_v = dst_stride_uv = 0;
        }
        void ResearchMergeUVRow_NEON(const uint8_t *, const uint8_t *, uint8_t *, int);
        MergeUVRow = ResearchMergeUVRow_NEON;
        for (y = 0; y < halfheight; ++y) {
            MergeUVRow(src_u, src_v, dst_uv, halfwidth);
            src_u += src_stride_u;
            src_v += src_stride_v;
            dst_uv += dst_stride_uv;
        }
        return 0;
    }
    // Copy multiple of 32.  vld4.8  allow unaligned and is fastest on a15.
    void ResearchCopyRow_NEON(const uint8_t *src, uint8_t *dst, int width) {
        asm volatile(
        "1:                                        \n"
        "vld1.8     {d0, d1, d2, d3}, [%0]!        \n"  // load 32
        "subs       %2, %2, #32                    \n"  // 32 processed per loop
        "vst1.8     {d0, d1, d2, d3}, [%1]!        \n"  // store 32
        "bgt        1b                             \n"
        : "+r"(src),                  // %0
        "+r"(dst),                  // %1
        "+r"(width)                 // %2  // Output registers
        :                             // Input registers
        : "cc", "memory"  // Clobber List
        );
    }
    void ResearchCopyRow_Any_NEON(const uint8_t *src_ptr, uint8_t *dst_ptr, int width) {
        SIMD_ALIGNED(uint8_t temp[128 * 2]);
        memset(temp, 0, 128); /* for YUY2 and msan */
        int r = width & 31;
        int n = width & ~31;
        if (n > 0) {
            ResearchCopyRow_NEON(src_ptr, dst_ptr, n);
        }
        memcpy(temp, src_ptr + (n >> 0) * 1, (((r) + (1 << (0)) - 1) >> (0)) * 1);
        ResearchCopyRow_NEON(temp, temp + 128, 31 + 1);
        memcpy(dst_ptr + n * 1, temp + 128, r * 1);
    }
    // Reads 16 U's and V's and writes out 16 pairs of UV.
    void ResearchMergeUVRow_NEON(const uint8_t *src_u, const uint8_t *src_v, uint8_t *dst_uv, int width) {
        asm volatile(
        "1:                                        \n"
        "vld2.8     {d0, d2}, [%0]!                \n"  // load U
        "vld2.8     {d1, d3}, [%1]!                \n"  // load V
        "subs       %3, %3, #16                    \n"  // 16 processed per loop
        "vst4.8     {d0, d1, d2, d3}, [%2]!        \n"  // store 32 pairs of UV
        "bgt        1b                             \n"
        : "+r"(src_u),              // %0
        "+r"(src_v),                // %1
        "+r"(dst_uv),               // %2
        "+r"(width)                 // %3  // Output registers
        :                           // Input registers
        : "cc", "memory"  // Clobber List
        );
    }

#ifdef __cplusplus
    }  // extern "C"
}  // namespace libyuv
#endif