#pragma once

#ifndef __GNUC__
#error "Unsupported Compiler"
#endif

/**
 * GNU C A64 Operand Modifiers
 *  w: %w0
 *  x: %x0
 *  B: %b0
 *  H: %h0
 *  S: %s0
 *  D: %d0
 *  V: %q0
 */


#define A64_LD1(return_type, offset, addr) ({ \
  return_type __rv__; \
  asm volatile ("LD1 { %[dest].16B }, [%[src]], %[idx]": [dest]"=w"(__rv__): [src]"r"(addr), [idx]"Kr"(offset)); \
__rv__;})



#define A64_DUP(return_type, dest_type, src_modifier, value) ({ \
  return_type __rv__; \
  asm volatile ("DUP %[dest]." #dest_type ", %" #src_modifier "[src]": [dest]"=w"(__rv__): [src]"r"(value)); \
__rv__;})


#define A64_VVV(instruction, return_type, modifier, value1, value2) ({ \
  return_type __rv__; \
  asm volatile ("" #instruction " %[dest]." #modifier ", %[src1]." #modifier ", %[src2]." #modifier "": [dest]"=w"(__rv__): [src1]"w"(value1), [src2]"w"(value2)); \
__rv__;})


#define A64_VVI(instruction, return_type, modifier, value1, constant) ({ \
  return_type __rv__; \
  asm volatile ("" #instruction " %[dest]." #modifier ", %[src1]." #modifier ", %[imm]": [dest]"=w"(__rv__): [src1]"w"(value1), [imm]"K"(constant)); \
__rv__;})


#define a64_vld1(type) \
template<typename Tr, unsigned char Idx = 0> \
inline Tr a64_vld1##type (const void* src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "ld1 { %[dst]." #type " }, [%[src]], %[idx]" \
    :[dst]"=w"(dst) \
    :[src]"r"(src), [idx]"K"(Idx) \
  ); \
  return dst; \
}
a64_vld1(8b)
a64_vld1(16b)
a64_vld1(4h)
a64_vld1(8h)
a64_vld1(2s)
a64_vld1(4s)
a64_vld1(1d)
a64_vld1(2d)
#undef a64_vld1


#define a64_vld1(type) \
template<typename Tr> \
inline Tr a64_vld1##type (const void* src, unsigned long idx) noexcept { \
  Tr dst; \
  asm volatile ( \
    "ld1 { %[dst]." #type " }, [%[src]], %[idx]" \
    :[dst]"=w"(dst) \
    :[src]"r"(src), [idx]"r"(idx) \
  ); \
  return dst; \
}
a64_vld1(8b)
a64_vld1(16b)
a64_vld1(4h)
a64_vld1(8h)
a64_vld1(2s)
a64_vld1(4s)
a64_vld1(1d)
a64_vld1(2d)
#undef a64_vld1


#define a64_vld1(type, index) \
template<typename Tr, unsigned char Idx = 0> \
inline void a64_vld1##type##index (Tr& dst, const void* src) noexcept { \
  asm volatile ( \
    "ld1 { %[dst]." #type " }[" #index "], [%[src]], %[idx]" \
    :[dst]"+w"(dst) \
    :[src]"r"(src), [idx]"K"(Idx) \
  ); \
}
a64_vld1(b, 0)
a64_vld1(b, 1)
a64_vld1(b, 2)
a64_vld1(b, 3)
a64_vld1(b, 4)
a64_vld1(b, 5)
a64_vld1(b, 6)
a64_vld1(b, 7)
a64_vld1(b, 8)
a64_vld1(b, 9)
a64_vld1(b, 10)
a64_vld1(b, 11)
a64_vld1(b, 12)
a64_vld1(b, 13)
a64_vld1(b, 14)
a64_vld1(b, 15)
a64_vld1(h, 0)
a64_vld1(h, 1)
a64_vld1(h, 2)
a64_vld1(h, 3)
a64_vld1(h, 4)
a64_vld1(h, 5)
a64_vld1(h, 6)
a64_vld1(h, 7)
a64_vld1(s, 0)
a64_vld1(s, 1)
a64_vld1(s, 2)
a64_vld1(s, 3)
a64_vld1(d, 0)
a64_vld1(d, 1)
#undef a64_vld1


#define a64_vld1(type, index) \
template<typename Tr> \
inline void a64_vld1##type##index (Tr& dst, const void* src, unsigned long idx) noexcept { \
  asm volatile ( \
    "ld1 { %[dst]." #type " }[" #index "], [%[src]], %[idx]" \
    :[dst]"+w"(dst) \
    :[src]"r"(src), [idx]"r"(idx) \
  ); \
}
a64_vld1(b, 0)
a64_vld1(b, 1)
a64_vld1(b, 2)
a64_vld1(b, 3)
a64_vld1(b, 4)
a64_vld1(b, 5)
a64_vld1(b, 6)
a64_vld1(b, 7)
a64_vld1(b, 8)
a64_vld1(b, 9)
a64_vld1(b, 10)
a64_vld1(b, 11)
a64_vld1(b, 12)
a64_vld1(b, 13)
a64_vld1(b, 14)
a64_vld1(b, 15)
a64_vld1(h, 0)
a64_vld1(h, 1)
a64_vld1(h, 2)
a64_vld1(h, 3)
a64_vld1(h, 4)
a64_vld1(h, 5)
a64_vld1(h, 6)
a64_vld1(h, 7)
a64_vld1(s, 0)
a64_vld1(s, 1)
a64_vld1(s, 2)
a64_vld1(s, 3)
a64_vld1(d, 0)
a64_vld1(d, 1)
#undef a64_vld1


#define a64_vld1r(type) \
template<typename Tr, unsigned char Idx> \
inline Tr a64_vld1r##type (const void* src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "ld1r { %[dst]." #type " }, [%[src]], %[idx]" \
    :[dst]"=w"(dst) \
    :[src]"r"(src), [idx]"K"(Idx) \
  ); \
  return dst; \
}
a64_vld1r(8b)
a64_vld1r(16b)
a64_vld1r(4h)
a64_vld1r(8h)
a64_vld1r(2s)
a64_vld1r(4s)
a64_vld1r(1d)
a64_vld1r(2d)
#undef a64_vld1r


#define a64_vld1r(type) \
template<typename Tr> \
inline Tr a64_vld1r##type (const void* src, unsigned long idx) noexcept { \
  Tr dst; \
  asm volatile ( \
    "ld1r { %[dst]." #type " }, [%[src]], %[idx]" \
    :[dst]"=w"(dst) \
    :[src]"r"(src), [idx]"r"(idx) \
  ); \
  return dst; \
}
a64_vld1r(8b)
a64_vld1r(16b)
a64_vld1r(4h)
a64_vld1r(8h)
a64_vld1r(2s)
a64_vld1r(4s)
a64_vld1r(1d)
a64_vld1r(2d)
#undef a64_vld1


template<typename Ts>
inline void a64_vst1 (void* dst, Ts src) noexcept {
  asm volatile (
    "st1 { %[src].16b }, [%[dst]]"
    :
    :[dst]"r"(dst), [src]"w"(src)
  );
}


#define a64_vst1(type, index) \
template<typename Ts> \
inline void a64_vst1##type##index (void* dst, Ts src) noexcept { \
  asm volatile ( \
    "st1 { %[src]." #type " }[" #index "], [%[dst]]" \
    : \
    :[dst]"r"(dst), [src]"w"(src) \
  ); \
}
a64_vst1(b, 0)
a64_vst1(b, 1)
a64_vst1(b, 2)
a64_vst1(b, 3)
a64_vst1(b, 4)
a64_vst1(b, 5)
a64_vst1(b, 6)
a64_vst1(b, 7)
a64_vst1(b, 8)
a64_vst1(b, 9)
a64_vst1(b, 10)
a64_vst1(b, 11)
a64_vst1(b, 12)
a64_vst1(b, 13)
a64_vst1(b, 14)
a64_vst1(b, 15)
a64_vst1(h, 0)
a64_vst1(h, 1)
a64_vst1(h, 2)
a64_vst1(h, 3)
a64_vst1(h, 4)
a64_vst1(h, 5)
a64_vst1(h, 6)
a64_vst1(h, 7)
a64_vst1(s, 0)
a64_vst1(s, 1)
a64_vst1(s, 2)
a64_vst1(s, 3)
a64_vst1(d, 0)
a64_vst1(d, 1)
#undef a64_vst1


#define a64_vdup(width, type, index) \
template<typename Tr, typename Ts> \
inline Tr a64_vdup##width##type##index (Ts src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "dup %[dst]." #width #type " %[src]." #type "[" #index "]" \
    :[dst]"=w"(dst) \
    :[src]"w"(src) \
  ); \
  return dst; \
}
a64_vdup(8, b, 0)
a64_vdup(8, b, 1)
a64_vdup(8, b, 2)
a64_vdup(8, b, 3)
a64_vdup(8, b, 4)
a64_vdup(8, b, 5)
a64_vdup(8, b, 6)
a64_vdup(8, b, 7)
a64_vdup(16, b, 0)
a64_vdup(16, b, 1)
a64_vdup(16, b, 2)
a64_vdup(16, b, 3)
a64_vdup(16, b, 4)
a64_vdup(16, b, 5)
a64_vdup(16, b, 6)
a64_vdup(16, b, 7)
a64_vdup(16, b, 8)
a64_vdup(16, b, 9)
a64_vdup(16, b, 10)
a64_vdup(16, b, 11)
a64_vdup(16, b, 12)
a64_vdup(16, b, 13)
a64_vdup(16, b, 14)
a64_vdup(16, b, 15)
a64_vdup(4, h, 0)
a64_vdup(4, h, 1)
a64_vdup(4, h, 2)
a64_vdup(4, h, 3)
a64_vdup(8, h, 0)
a64_vdup(8, h, 1)
a64_vdup(8, h, 2)
a64_vdup(8, h, 3)
a64_vdup(8, h, 4)
a64_vdup(8, h, 5)
a64_vdup(8, h, 6)
a64_vdup(8, h, 7)
a64_vdup(2, s, 0)
a64_vdup(2, s, 1)
a64_vdup(4, s, 0)
a64_vdup(4, s, 1)
a64_vdup(4, s, 2)
a64_vdup(4, s, 3)
a64_vdup(2, d, 0)
a64_vdup(2, d, 1)
#undef a64_vdup


#define a64_vdup(type) \
template<typename Tr, typename Ts> \
inline Tr a64_vdup##type (Ts src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "dup %[dst]." #type " %[src]" \
    :[dst]"=w"(dst) \
    :[src]"r"(src) \
  ); \
  return dst; \
}
a64_vdup(8b)
a64_vdup(16b)
a64_vdup(4h)
a64_vdup(8h)
a64_vdup(2s)
a64_vdup(4s)
a64_vdup(2d)
#undef a64_vdup


template<typename Tr, typename Ts, typename Ti>
inline Tr a64_vtbl1 (Ts tbl, Ti idx) noexcept {
  Tr dst;
  asm volatile (
    "tbl %[dst].16b, { %[tbl].16b }, %[idx].16b"
    :[dst]"=w"(dst)
    :[tbl]"w"(tbl), [idx]"w"(idx)
  );
  return dst;
}
template<typename Tr, typename Ts, typename Ti>
inline Tr a64_vtbl2 (Ts tb0, Ts tb1, Ti idx) noexcept {
  Tr dst;
  asm volatile (
    "tbl %[dst].16b, { %[tb0].16b, %[tb1].16b }, %[idx].16b"
  	:[dst]"=w"(dst)
  	:[tb0]"w"(tb0), [tb1]"w"(tb1), [idx]"w"(idx)
  );
  return dst;
}
template<typename Tr, typename Ts, typename Ti>
inline Tr a64_vtbl3 (Ts tb0, Ts tb1, Ts tb2, Ti idx) noexcept {
  Tr dst;
  asm volatile (
    "tbl %[dst].16b, { %[tb0].16b, %[tb1].16b, %[tb2].16b }, %[idx].16b"
  	:[dst]"=w"(dst)
  	:[tb0]"w"(tb0), [tb1]"w"(tb1), [tb2]"w"(tb2), [idx]"w"(idx)
  );
  return dst;
}
template<typename Tr, typename Ts, typename Ti>
inline Tr a64_vtbl4 (Ts tb0, Ts tb1, Ts tb2, Ts tb3, Ti idx) noexcept {
  Tr dst;
  asm volatile (
    "tbl %[dst].16b, { %[tb0].16b, %[tb1].16b, %[tb2].16b, %[tb3].16b }, %[idx].16b"
  	:[dst]"=w"(dst)
  	:[tb0]"w"(tb0), [tb1]"w"(tb1), [tb2]"w"(tb2), [tb3]"w"(tb3), [idx]"w"(idx)
  );
  return dst;
}


#define a64_vvi(name, type) \
template<unsigned char Imm, typename Tr, typename Ts> \
inline Tr a64_##name##type (Ts src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[dst]." #type ", %[src]." #type ", %[imm]" \
    :[dst]"=w"(dst) \
    :[src]"w"(src), [imm]"K"(Imm) \
  ); \
  return dst; \
}
a64_vvi(vushr, 8b)
a64_vvi(vushr, 16b)
a64_vvi(vushr, 4h)
a64_vvi(vushr, 8h)
a64_vvi(vushr, 2s)
a64_vvi(vushr, 4s)
a64_vvi(vushr, 2d)

a64_vvi(vushl, 8b)
a64_vvi(vushl, 16b)
a64_vvi(vushl, 4h)
a64_vvi(vushl, 8h)
a64_vvi(vushl, 2s)
a64_vvi(vushl, 4s)
a64_vvi(vushl, 2d)


#define a64_vvv(name, type) \
template<typename Tr, typename T1, typename T2> \
inline Tr a64_##name##type (T1 op1, T2 op2) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[dst]." #type ", %[op1]." #type ", %[op2]." #type \
    :[dst]"=w"(dst) \
    :[op1]"w"(op1), [op2]"w"(op2) \
  ); \
  return dst; \
}
a64_vvv(vand, 8b)
a64_vvv(vand, 16b)

a64_vvv(veor, 8b)
a64_vvv(veor, 16b)

a64_vvv(vorr, 8b)
a64_vvv(vorr, 16b)

a64_vvv(vorn, 8b)
a64_vvv(vorn, 16b)

a64_vvv(vzip1, 8b)
a64_vvv(vzip1, 16b)
a64_vvv(vzip1, 4h)
a64_vvv(vzip1, 8h)
a64_vvv(vzip1, 2s)
a64_vvv(vzip1, 4s)
a64_vvv(vzip1, 2d)

a64_vvv(vzip2, 8b)
a64_vvv(vzip2, 16b)
a64_vvv(vzip2, 4h)
a64_vvv(vzip2, 8h)
a64_vvv(vzip2, 2s)
a64_vvv(vzip2, 4s)
a64_vvv(vzip2, 2d)

a64_vvv(vuzp1, 8b)
a64_vvv(vuzp1, 16b)
a64_vvv(vuzp1, 4h)
a64_vvv(vuzp1, 8h)
a64_vvv(vuzp1, 2s)
a64_vvv(vuzp1, 4s)
a64_vvv(vuzp1, 2d)

a64_vvv(vuzp2, 8b)
a64_vvv(vuzp2, 16b)
a64_vvv(vuzp2, 4h)
a64_vvv(vuzp2, 8h)
a64_vvv(vuzp2, 2s)
a64_vvv(vuzp2, 4s)
a64_vvv(vuzp2, 2d)

a64_vvv(vtrn1, 8b)
a64_vvv(vtrn1, 16b)
a64_vvv(vtrn1, 4h)
a64_vvv(vtrn1, 8h)
a64_vvv(vtrn1, 2s)
a64_vvv(vtrn1, 4s)
a64_vvv(vtrn1, 2d)

a64_vvv(vtrn2, 8b)
a64_vvv(vtrn2, 16b)
a64_vvv(vtrn2, 4h)
a64_vvv(vtrn2, 8h)
a64_vvv(vtrn2, 2s)
a64_vvv(vtrn2, 4s)
a64_vvv(vtrn2, 2d)

a64_vvv(vcmeq, 8b)
a64_vvv(vcmeq, 16b)
a64_vvv(vcmeq, 4h)
a64_vvv(vcmeq, 8h)
a64_vvv(vcmeq, 2s)
a64_vvv(vcmeq, 4s)
a64_vvv(vcmeq, 2d)

a64_vvv(vcmge, 8b)
a64_vvv(vcmge, 16b)
a64_vvv(vcmge, 4h)
a64_vvv(vcmge, 8h)
a64_vvv(vcmge, 2s)
a64_vvv(vcmge, 4s)
a64_vvv(vcmge, 2d)

a64_vvv(vcmgt, 8b)
a64_vvv(vcmgt, 16b)
a64_vvv(vcmgt, 4h)
a64_vvv(vcmgt, 8h)
a64_vvv(vcmgt, 2s)
a64_vvv(vcmgt, 4s)
a64_vvv(vcmgt, 2d)

a64_vvv(vcmhi, 8b)
a64_vvv(vcmhi, 16b)
a64_vvv(vcmhi, 4h)
a64_vvv(vcmhi, 8h)
a64_vvv(vcmhi, 2s)
a64_vvv(vcmhi, 4s)
a64_vvv(vcmhi, 2d)

a64_vvv(vcmhs, 8b)
a64_vvv(vcmhs, 16b)
a64_vvv(vcmhs, 4h)
a64_vvv(vcmhs, 8h)
a64_vvv(vcmhs, 2s)
a64_vvv(vcmhs, 4s)
a64_vvv(vcmhs, 2d)

a64_vvv(vcmle, 8b)
a64_vvv(vcmle, 16b)
a64_vvv(vcmle, 4h)
a64_vvv(vcmle, 8h)
a64_vvv(vcmle, 2s)
a64_vvv(vcmle, 4s)
a64_vvv(vcmle, 2d)

a64_vvv(vcmlt, 8b)
a64_vvv(vcmlt, 16b)
a64_vvv(vcmlt, 4h)
a64_vvv(vcmlt, 8h)
a64_vvv(vcmlt, 2s)
a64_vvv(vcmlt, 4s)
a64_vvv(vcmlt, 2d)

a64_vvv(vcmtst, 8b)
a64_vvv(vcmtst, 16b)
a64_vvv(vcmtst, 4h)
a64_vvv(vcmtst, 8h)
a64_vvv(vcmtst, 2s)
a64_vvv(vcmtst, 4s)
a64_vvv(vcmtst, 2d)

a64_vvv(vaddp, 8b)
a64_vvv(vaddp, 16b)
a64_vvv(vaddp, 4h)
a64_vvv(vaddp, 8h)
a64_vvv(vaddp, 2s)
a64_vvv(vaddp, 4s)
a64_vvv(vaddp, 2d)
#undef a64_vvv


#define a64_vvv(name, type) \
template<typename Tr, typename T1, typename T2> \
inline void a64_##name##type (Tr& dst, T1 op1, T2 op2) noexcept { \
  asm volatile ( \
    "" #name " %[dst]." #type ", %[op1]." #type ", %[op2]." #type \
    :[dst]"+w"(dst) \
    :[op1]"w"(op1), [op2]"w"(op2) \
  ); \
}
a64_vvv(vmla, 8b)
a64_vvv(vmla, 16b)
a64_vvv(vmla, 4h)
a64_vvv(vmla, 8h)
a64_vvv(vmla, 2s)
a64_vvv(vmla, 4s)

a64_vvv(vmls, 8b)
a64_vvv(vmls, 16b)
a64_vvv(vmls, 4h)
a64_vvv(vmls, 8h)
a64_vvv(vmls, 2s)
a64_vvv(vmls, 4s)
#undef a64_vvv


#define a64_vvv(name, type) \
template<unsigned char Imm, typename Tr, typename T1, typename T2> \
inline Tr a64_##name##type (T1 op1, T2 op2) noexcept { \
	Tr dst; \
  asm volatile ( \
    "" #name " %[dst]." #type ", %[op1]." #type ", %[op2]." #type ", %[imm]" \
    :[dst]"=w"(dst) \
    :[op1]"w"(op1), [op2]"w"(op2), [imm]"K"(Imm) \
  ); \
  return dst; \
}
a64_vvv(vext, 8b)
a64_vvv(vext, 16b)
#undef a64_vvv


#define a64_vv(name, typeA, typeB) \
template<typename Tr, typename Ts> \
inline Tr a64_##name##typeA (Ts src) noexcept { \
	Tr dst; \
  asm volatile ( \
    "" #name " %[dst]." #typeB ", %[src]." #typeA "" \
    :[dst]"=w"(dst) \
    :[src]"w"(src) \
  ); \
  return dst; \
}
a64_vv(vxtn, 8h, 8b)
a64_vv(vxtn, 4s, 4h)
a64_vv(vxtn, 2d, 2s)

a64_vv(vnot, 8b, 8b)
a64_vv(vnot, 16b, 16b)
#undef a64_vv


#define a64_vv(name, typeA, typeB) \
template<typename Tr, typename Ts> \
inline void a64_##name##typeA (Tr& dst, Ts src) noexcept { \
  asm volatile ( \
    "" #name " %[dst]." #typeB ", %[src]." #typeA "" \
    :[dst]"+w"(dst) \
    :[src]"w"(src) \
  ); \
}
a64_vv(vxtn2, 8h, 16b)
a64_vv(vxtn2, 4s, 8h)
a64_vv(vxtn2, 2d, 4s)
#undef a64_vv
