#ifndef TILE2NV12_H
#define TILE2NV12_H
#include <stdint.h>
#include <msa.h>
#include "convert.h"

#define MIN(a, b) (a > b ? b : a)

#define LOAD_MAC(v,p,i) v[i] = __builtin_msa_ld_b(p,i * 16);
#define STORE_MAC(v,p,i) __builtin_msa_st_b(v[i],p + i * nv12->stride,0)

#define exchange(type,vi,vo,a,b) do {           \
        vo[a] = __msa_ilvr_##type(vi[b],vi[a]); \
        vo[b] = __msa_ilvl_##type(vi[b],vi[a]); \
    }while(0)

#define exchange_r(type,vi,vo,a,b) do {         \
        vo[a] = __msa_ilvr_##type(vi[a],vi[b]); \
        vo[b] = __msa_ilvl_##type(vi[a],vi[b]); \
    }while(0)

#define ymac_transport_90(v,dst,stride) do {        \
        v16u8 v1[16];                               \
        exchange(b,v,v1,0,1);                       \
        exchange(b,v,v1,2,3);                       \
        exchange(b,v,v1,4,5);                       \
        exchange(b,v,v1,6,7);                       \
        exchange(b,v,v1,8,9);                       \
        exchange(b,v,v1,10,11);                     \
        exchange(b,v,v1,12,13);                     \
        exchange(b,v,v1,14,15);                     \
                                                    \
        exchange(h,v1,v,0,2);                       \
        exchange(h,v1,v,1,3);                       \
        exchange(h,v1,v,4,6);                       \
        exchange(h,v1,v,5,7);                       \
        exchange(h,v1,v,8,10);                      \
        exchange(h,v1,v,9,11);                      \
        exchange(h,v1,v,12,14);                     \
        exchange(h,v1,v,13,15);                     \
                                                    \
        exchange(w,v,v1,0,4);                       \
        exchange(w,v,v1,2,6);                       \
        exchange(w,v,v1,1,5);                       \
        exchange(w,v,v1,3,7);                       \
        exchange(w,v,v1,8,12);                      \
        exchange(w,v,v1,10,14);                     \
        exchange(w,v,v1,9,13);                      \
        exchange(w,v,v1,11,15);                     \
                                                    \
        exchange(d,v1,v,0,8);                       \
        exchange(d,v1,v,4,12);                      \
        exchange(d,v1,v,2,10);                      \
        exchange(d,v1,v,6,14);                      \
        exchange(d,v1,v,1,9);                       \
        exchange(d,v1,v,5,13);                      \
        exchange(d,v1,v,3,11);                      \
        exchange(d,v1,v,7,15);                      \
        if(stride != 0) {                           \
            __msa_st_b(v[0], dst + 15 * stride,0);  \
            __msa_st_b(v[8], dst + 14 * stride,0);  \
            __msa_st_b(v[4], dst + 13 * stride,0);  \
            __msa_st_b(v[12],dst + 12 * stride,0);  \
            __msa_st_b(v[2], dst + 11 * stride,0);  \
            __msa_st_b(v[10],dst + 10 * stride,0);  \
            __msa_st_b(v[6], dst + 9 * stride,0);   \
            __msa_st_b(v[14],dst + 8 * stride,0);   \
            __msa_st_b(v[1], dst + 7 * stride,0);   \
            __msa_st_b(v[9], dst + 6 * stride,0);   \
            __msa_st_b(v[5], dst + 5 * stride,0);   \
            __msa_st_b(v[13],dst + 4 * stride,0);   \
            __msa_st_b(v[3], dst + 3 * stride,0);   \
            __msa_st_b(v[11],dst + 2 * stride,0);   \
            __msa_st_b(v[7], dst + 1 * stride,0);   \
            __msa_st_b(v[15],dst + 0 * stride,0);   \
        }                                           \
    }while(0)

#define ymac_transport_270(v,dst,stride) do {       \
        v16u8 v1[16];                               \
        exchange_r(b,v,v1,0,1);                     \
        exchange_r(b,v,v1,2,3);                     \
        exchange_r(b,v,v1,4,5);                     \
        exchange_r(b,v,v1,6,7);                     \
        exchange_r(b,v,v1,8,9);                     \
        exchange_r(b,v,v1,10,11);                   \
        exchange_r(b,v,v1,12,13);                   \
        exchange_r(b,v,v1,14,15);                   \
                                                    \
        exchange_r(h,v1,v,0,2);                     \
        exchange_r(h,v1,v,1,3);                     \
        exchange_r(h,v1,v,4,6);                     \
        exchange_r(h,v1,v,5,7);                     \
        exchange_r(h,v1,v,8,10);                    \
        exchange_r(h,v1,v,9,11);                    \
        exchange_r(h,v1,v,12,14);                   \
        exchange_r(h,v1,v,13,15);                   \
                                                    \
        exchange_r(w,v,v1,0,4);                     \
        exchange_r(w,v,v1,2,6);                     \
        exchange_r(w,v,v1,1,5);                     \
        exchange_r(w,v,v1,3,7);                     \
        exchange_r(w,v,v1,8,12);                    \
        exchange_r(w,v,v1,10,14);                   \
        exchange_r(w,v,v1,9,13);                    \
        exchange_r(w,v,v1,11,15);                   \
                                                    \
        exchange_r(d,v1,v,0,8);                     \
        exchange_r(d,v1,v,4,12);                    \
        exchange_r(d,v1,v,2,10);                    \
        exchange_r(d,v1,v,6,14);                    \
        exchange_r(d,v1,v,1,9);                     \
        exchange_r(d,v1,v,5,13);                    \
        exchange_r(d,v1,v,3,11);                    \
        exchange_r(d,v1,v,7,15);                    \
        if(stride != 0) {                           \
            __msa_st_b(v[0], dst + 0 * stride,0);   \
            __msa_st_b(v[8], dst + 1 * stride,0);   \
            __msa_st_b(v[4], dst + 2 * stride,0);   \
            __msa_st_b(v[12],dst + 3 * stride,0);   \
            __msa_st_b(v[2], dst + 4 * stride,0);   \
            __msa_st_b(v[10],dst + 5 * stride,0);   \
            __msa_st_b(v[6], dst + 6 * stride,0);   \
            __msa_st_b(v[14],dst + 7 * stride,0);   \
            __msa_st_b(v[1], dst + 8 * stride,0);   \
            __msa_st_b(v[9], dst + 9 * stride,0);   \
            __msa_st_b(v[5], dst + 10 * stride,0);  \
            __msa_st_b(v[13],dst + 11 * stride,0);  \
            __msa_st_b(v[3], dst + 12 * stride,0);  \
            __msa_st_b(v[11],dst + 13 * stride,0);  \
            __msa_st_b(v[7], dst + 14 * stride,0);  \
            __msa_st_b(v[15],dst + 15 * stride,0);  \
        }                                           \
    }while(0)

#define ymac_transport_180(v,dst,stride) do {                           \
        v16u8 v1[16];                                                   \
        const v16u8 v_mirror = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}; \
        v1[0] = __msa_vshf_b(v_mirror,v[0],v[0]);                       \
        v1[1] = __msa_vshf_b(v_mirror,v[1],v[1]);                       \
        v1[2] = __msa_vshf_b(v_mirror,v[2],v[2]);                       \
        v1[3] = __msa_vshf_b(v_mirror,v[3],v[3]);                       \
        v1[4] = __msa_vshf_b(v_mirror,v[4],v[4]);                       \
        v1[5] = __msa_vshf_b(v_mirror,v[5],v[5]);                       \
        v1[6] = __msa_vshf_b(v_mirror,v[6],v[6]);                       \
        v1[7] = __msa_vshf_b(v_mirror,v[7],v[7]);                       \
        v1[8] = __msa_vshf_b(v_mirror,v[8],v[8]);                       \
        v1[9] = __msa_vshf_b(v_mirror,v[9],v[9]);                       \
        v1[10] = __msa_vshf_b(v_mirror,v[10],v[10]);                    \
        v1[11] = __msa_vshf_b(v_mirror,v[11],v[11]);                    \
        v1[12] = __msa_vshf_b(v_mirror,v[12],v[12]);                    \
        v1[13] = __msa_vshf_b(v_mirror,v[13],v[13]);                    \
        v1[14] = __msa_vshf_b(v_mirror,v[14],v[14]);                    \
        v1[15] = __msa_vshf_b(v_mirror,v[15],v[15]);                    \
        if (stride != 0) {                                              \
            __msa_st_b(v1[0], dst + 15 * stride,0);                     \
            __msa_st_b(v1[1], dst + 14 * stride,0);                     \
            __msa_st_b(v1[2], dst + 13 * stride,0);                     \
            __msa_st_b(v1[3], dst + 12 * stride,0);                     \
            __msa_st_b(v1[4], dst + 11 * stride,0);                     \
            __msa_st_b(v1[5], dst + 10 * stride,0);                     \
            __msa_st_b(v1[6], dst + 9 * stride,0);                      \
            __msa_st_b(v1[7], dst + 8 * stride,0);                      \
            __msa_st_b(v1[8], dst + 7 * stride,0);                      \
            __msa_st_b(v1[9], dst + 6 * stride,0);                      \
            __msa_st_b(v1[10], dst + 5 * stride,0);                     \
            __msa_st_b(v1[11], dst + 4 * stride,0);                     \
            __msa_st_b(v1[12], dst + 3 * stride,0);                     \
            __msa_st_b(v1[13], dst + 2 * stride,0);                     \
            __msa_st_b(v1[14], dst + 1 * stride,0);                     \
            __msa_st_b(v1[15], dst + 0 * stride,0);                     \
        } else {                                                        \
            v[0] = v1[15];                                              \
            v[1] = v1[14];                                              \
            v[2] = v1[13];                                              \
            v[3] = v1[12];                                              \
            v[4] = v1[11];                                              \
            v[5] = v1[10];                                              \
            v[6] = v1[9];                                               \
            v[7] = v1[8];                                               \
            v[8] = v1[7];                                               \
            v[9] = v1[6];                                               \
            v[10] = v1[5];                                              \
            v[11] = v1[4];                                              \
            v[12] = v1[3];                                              \
            v[13] = v1[2];                                              \
            v[14] = v1[1];                                              \
            v[15] = v1[0];                                              \
        }                                                               \
    }while(0)

#define UV_MIX(v_uv) do {                                   \
        v16u8 v_v[4];                                       \
        v16u8 v_u[4];                                       \
        v_u[0] =  __builtin_msa_ilvr_d(v_uv[1],v_uv[0]);    \
        v_v[0] =  __builtin_msa_ilvl_d(v_uv[1],v_uv[0]);    \
                                                            \
        v_u[1] =  __builtin_msa_ilvr_d(v_uv[3],v_uv[2]);    \
        v_v[1] =  __builtin_msa_ilvl_d(v_uv[3],v_uv[2]);    \
                                                            \
        v_u[2] =  __builtin_msa_ilvr_d(v_uv[5],v_uv[4]);    \
        v_v[2] =  __builtin_msa_ilvl_d(v_uv[5],v_uv[4]);    \
                                                            \
        v_u[3] =  __builtin_msa_ilvr_d(v_uv[7],v_uv[6]);    \
        v_v[3] =  __builtin_msa_ilvl_d(v_uv[7],v_uv[6]);    \
                                                            \
        v_uv[0] = __builtin_msa_ilvr_b(v_v[0],v_u[0]);      \
        v_uv[1] = __builtin_msa_ilvl_b(v_v[0],v_u[0]);      \
                                                            \
        v_uv[2] = __builtin_msa_ilvr_b(v_v[1],v_u[1]);      \
        v_uv[3] = __builtin_msa_ilvl_b(v_v[1],v_u[1]);      \
                                                            \
        v_uv[4] = __builtin_msa_ilvr_b(v_v[2],v_u[2]);      \
        v_uv[5] = __builtin_msa_ilvl_b(v_v[2],v_u[2]);      \
                                                            \
        v_uv[6] = __builtin_msa_ilvr_b(v_v[3],v_u[3]);      \
        v_uv[7] = __builtin_msa_ilvl_b(v_v[3],v_u[3]);      \
    }while(0)

#define uvmac_transport_90(v,dst,stride) do {       \
        v8u16 v1[8];                                \
        exchange(h,v,v1,0,1);                       \
        exchange(h,v,v1,2,3);                       \
        exchange(h,v,v1,4,5);                       \
        exchange(h,v,v1,6,7);                       \
        exchange(w,v1,v,0,2);                       \
        exchange(w,v1,v,1,3);                       \
        exchange(w,v1,v,4,6);                       \
        exchange(w,v1,v,5,7);                       \
        exchange(d,v,v1,0,4);                       \
        exchange(d,v,v1,2,6);                       \
        exchange(d,v,v1,1,5);                       \
        exchange(d,v,v1,3,7);                       \
        if(stride) {                                \
            __msa_st_b(v1[0], dst + 7 * stride,0);  \
            __msa_st_b(v1[4], dst + 6 * stride,0);  \
            __msa_st_b(v1[2], dst + 5 * stride,0);  \
            __msa_st_b(v1[6], dst + 4 * stride,0);  \
            __msa_st_b(v1[1], dst + 3 * stride,0);  \
            __msa_st_b(v1[5], dst + 2 * stride,0);  \
            __msa_st_b(v1[3], dst + 1 * stride,0);  \
            __msa_st_b(v1[7], dst + 0 * stride,0);  \
        }                                           \
    }while(0)

#define uvmac_transport_270(v,dst,stride) do {      \
        v8u16 v1[8];                                \
        exchange_r(h,v,v1,0,1);                     \
        exchange_r(h,v,v1,2,3);                     \
        exchange_r(h,v,v1,4,5);                     \
        exchange_r(h,v,v1,6,7);                     \
        exchange_r(w,v1,v,0,2);                     \
        exchange_r(w,v1,v,1,3);                     \
        exchange_r(w,v1,v,4,6);                     \
        exchange_r(w,v1,v,5,7);                     \
        exchange_r(d,v,v1,0,4);                     \
        exchange_r(d,v,v1,2,6);                     \
        exchange_r(d,v,v1,1,5);                     \
        exchange_r(d,v,v1,3,7);                     \
        if(stride) {                                \
            __msa_st_b(v1[0], dst + 0 * stride,0);  \
            __msa_st_b(v1[4], dst + 1 * stride,0);  \
            __msa_st_b(v1[2], dst + 2 * stride,0);  \
            __msa_st_b(v1[6], dst + 3 * stride,0);  \
            __msa_st_b(v1[1], dst + 4 * stride,0);  \
            __msa_st_b(v1[5], dst + 5 * stride,0);  \
            __msa_st_b(v1[3], dst + 6 * stride,0);  \
            __msa_st_b(v1[7], dst + 7 * stride,0);  \
        }                                           \
    }while(0)


#define uvmac_transport_180(v,dst,stride) do {      \
        v8u16 v1[8];                                \
        const v8u16 v_mirror = {7,6,5,4,3,2,1,0};   \
        v1[0] = __msa_vshf_h(v_mirror,v[0],v[0]);   \
        v1[1] = __msa_vshf_h(v_mirror,v[1],v[1]);   \
        v1[2] = __msa_vshf_h(v_mirror,v[2],v[2]);   \
        v1[3] = __msa_vshf_h(v_mirror,v[3],v[3]);   \
        v1[4] = __msa_vshf_h(v_mirror,v[4],v[4]);   \
        v1[5] = __msa_vshf_h(v_mirror,v[5],v[5]);   \
        v1[6] = __msa_vshf_h(v_mirror,v[6],v[6]);   \
        v1[7] = __msa_vshf_h(v_mirror,v[7],v[7]);   \
        if(stride) {                                \
            __msa_st_b(v1[0], dst + 7 * stride,0);  \
            __msa_st_b(v1[1], dst + 6 * stride,0);  \
            __msa_st_b(v1[2], dst + 5 * stride,0);  \
            __msa_st_b(v1[3], dst + 4 * stride,0);  \
            __msa_st_b(v1[4], dst + 3 * stride,0);  \
            __msa_st_b(v1[5], dst + 2 * stride,0);  \
            __msa_st_b(v1[6], dst + 1 * stride,0);  \
            __msa_st_b(v1[7], dst + 0 * stride,0);  \
        }                                           \
    }while(0)

void _tile420_nv12_msa(struct tile420_fmt src_buf, struct nv12_fmt *dst_buf);
void _tile420_nv12_msa_90(struct tile420_fmt src_buf, struct nv12_fmt *dst_buf);
void _tile420_nv12_msa_180(struct tile420_fmt src_buf, struct nv12_fmt *dst_buf);
void _tile420_nv12_msa_270(struct tile420_fmt src_buf, struct nv12_fmt *dst_buf);

#endif
