#pragma once

#ifndef __GNUC__
#error "Unsupported Compiler"
#endif

#define X86_GPR "r"
#define X86_IMM "K"

/**
 * GNU C x86 Operand Modifiers
 *  al: %b0
 *  ah: %h0
 *  ax: %w0
 *  eax: %k0
 *  rax: %q0
 *  xmm: %x0
 *  ymm: %t0
 *  zmm: %g0
 */


#define x86_rr(name) \
template<typename Tr, typename Ts> \
inline Tr x86_##name (Ts src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[src], %[dst]" \
    :[dst]"=" X86_GPR(dst) \
    :[src]X86_GPR(src) \
  ); \
  return dst; \
}


#define x86_rm(name) \
template<typename Tr> \
inline Tr x86_##name (const void *src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " (%[src]), %[dst]" \
    :[dst]"=" X86_GPR(dst) \
    :[src]X86_GPR(src) \
  ); \
  return dst; \
}


#define x86_mr(name) \
template<typename Ts> \
inline void x86_##name (void *dst, Ts src) noexcept { \
  asm volatile ( \
    "" #name " %[src], (%[dst])" \
    : \
    :[dst]X86_GPR(dst), [src]X86_GPR(src) \
  ); \
}


#define x86_rrr(name) \
template<typename Tr, typename T1, typename T2> \
inline Tr x86_##name (T1 op1, T2 op2) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[op2], %[op1], %[dst]" \
    :[dst]"=" X86_GPR(dst) \
    :[op1]X86_GPR(op1), [op2]X86_GPR(op2) \
  ); \
  return dst; \
}


#define x86_rrm(name) \
template<typename Tr, typename T1> \
inline Tr x86_##name (T1 op1, const void *op2) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " (%[op2]), %[op1], %[dst]" \
    :[dst]"=" X86_GPR(dst) \
    :[op1]X86_GPR(op1), [op2]X86_GPR(op2) \
  ); \
  return dst; \
}


#define x86_rmr(name) \
template<typename Tr, typename T2> \
inline Tr x86_##name (const void *op1, T2 op2) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[op2], (%[op1]), %[dst]" \
    :[dst]"=" X86_GPR(dst) \
    :[op1]X86_GPR(op1), [op2]X86_GPR(op2) \
  ); \
  return dst; \
}


#ifdef __GCC_ASM_FLAG_OUTPUTS__
#define x86_crr(name, flag) \
template<typename T1, typename T2> \
inline int x86_##name (T1 op1, T2 op2) noexcept { \
  int dst; \
  asm volatile ( \
    "" #name " %[op2], %[op1]" \
    :[dst]"@cc" #flag (dst) \
    :[op1]X86_GPR(op1), [op2]X86_GPR(op2) \
  ); \
  return dst; \
}
#define x86_cmr(name, flag) \
template<typename T2> \
inline int x86_##name (const void *op1, T2 op2) noexcept { \
  int dst; \
  asm volatile ( \
    "" #name " %[op2], (%[op1])" \
    :[dst]"@cc" #flag (dst) \
    :[op1]X86_GPR(op1), [op2]X86_GPR(op2) \
  ); \
  return dst; \
}
#else
#define x86_crr(name, flag) \
template<typename T1, typename T2> \
inline int x86_##name (T1 op1, T2 op2) noexcept { \
  char dst; \
  asm volatile ( \
    "" #name " %[op2], %[op1]\n\t" \
    "set" #flag " %[dst]" \
    :[dst]"=" X86_GPR(dst) \
    :[op1]X86_GPR(op1), [op2]X86_GPR(op2) \
  ); \
  return (int)dst; \
}
#define x86_cmr(name, flag) \
template<typename T2> \
inline int x86_##name (const void *op1, T2 op2) noexcept { \
  char dst; \
  asm volatile ( \
    "" #name " %[op2], (%[op1])\n\t" \
    "set" #flag " %[dst]" \
    :[dst]"=" X86_GPR(dst) \
    :[op1]X86_GPR(op1), [op2]X86_GPR(op2) \
  ); \
  return (int)dst; \
}
#endif


template<typename Tr, typename Ts>
inline Tr x86_cast (Ts src) noexcept {
  Tr dst;
  asm volatile ("":"=" X86_GPR(dst):"0"(src));
  return dst;
}


// Logical AND NOT
x86_rrr(andn)
x86_rrm(andn)

// Bit Field Extract
x86_rrr(bextr)
x86_rmr(bextr)

// Extract Lowest Set Isolated Bit
x86_rr(blsi)
x86_rm(blsi)

// Get Mask Up to Lowest Set Bit
x86_rr(blsmsk)
x86_rm(blsmsk)

// Reset Lowest Set Bit
x86_rr(blsr)
x86_rm(blsr)

// Bit Scan Forward
x86_rr(bsf)
x86_rm(bsf)

// Bit Scan Reverse
x86_rr(bsr)
x86_rm(bsr)

// Byte Swap
template<typename T>
inline void x86_bswap (T &val) noexcept {
  asm volatile (
    "bswap %[val]"
    :[val]"+" X86_GPR(val)
  );
}

// Bit Test
x86_crr(bt, c);
x86_cmr(bt, c);

// Bit Test and Complement
x86_crr(btc, c);
x86_cmr(btc, c);

// Bit Test and Reset
x86_crr(btr, c);
x86_cmr(btr, c);

// Bit Test and Set
x86_crr(bts, c);
x86_cmr(bts, c);

// Zero High Bits Starting with Specified Bit Position
x86_rrr(bzhi)
x86_rmr(bzhi)

// CPU Identification
inline void x86_cpuid (unsigned int &a, unsigned int &b, unsigned int &c, unsigned int &d) noexcept {
  asm volatile (
    "cpuid"
    :[a]"=a"(a), [b]"=b"(b), [c]"=c"(c), [d]"=d"(d)
  );
}

// Accumulate CRC32 Value
x86_rr(crc32)
x86_rm(crc32)

// Unsigned Divide
template<typename T>
inline T x86_div (T &op1, T op2) noexcept {
  T dst;
  asm volatile (
    "div %[op2]"
    :"+&a"(op1), "=d"(dst)
    :[op2]X86_GPR(op2)
  );
  return dst;
}
template<typename T>
inline T x86_idiv (T &op1, T op2) noexcept {
  T dst;
  asm volatile (
    "idiv %[op2]"
    :"+&a"(op1), "=d"(dst)
    :[op2]X86_GPR(op2)
  );
  return dst;
}

// Count the Number of Leading Zero Bits
x86_rr(lzcnt)
x86_rm(lzcnt)

// Move Data After Swapping Bytes
x86_rm(movbe)
x86_mr(movbe)

// Parallel Bits Deposit
x86_rrr(pdep)
x86_rrm(pdep)

// Parallel Bits Extract
x86_rrr(pext)
x86_rrm(pext)

// Return the Count of Number of Bits Set to 1
x86_rr(popcnt)
x86_rm(popcnt)

// Rotate
template<unsigned char Imm, typename Tr>
inline Tr x86_rcl (Tr val) noexcept {
  asm volatile (
    "rcl %[imm], %[val]"
    :[val]"+" X86_GPR(val)
    :[imm]X86_IMM(Imm)
  );
  return val;
}
template<typename Tr>
inline Tr x86_rcl (Tr dst, unsigned char src) noexcept {
  asm volatile (
    "rcl %[src], %[dst]"
    :[dst]"+" X86_GPR(dst)
    :[src]"c"(src)
  );
  return dst;
}
template<unsigned char Imm, typename Tr>
inline Tr x86_rcr (Tr val) noexcept {
  asm volatile (
    "rcr %[imm], %[val]"
    :[val]"+" X86_GPR(val)
    :[imm]X86_IMM(Imm)
  );
  return val;
}
template<typename Tr>
inline Tr x86_rcr (Tr dst, unsigned char src) noexcept {
  asm volatile (
    "rcr %[src], %[dst]"
    :[dst]"+" X86_GPR(dst)
    :[src]"c"(src)
  );
  return dst;
}
template<unsigned char Imm, typename Tr>
inline Tr x86_rol (Tr val) noexcept {
  asm volatile (
    "rol %[imm], %[val]"
    :[val]"+" X86_GPR(val)
    :[imm]X86_IMM(Imm)
  );
  return val;
}
template<typename Tr>
inline Tr x86_rol (Tr dst, unsigned char src) noexcept {
  asm volatile (
    "rol %[src], %[dst]"
    :[dst]"+" X86_GPR(dst)
    :[src]"c"(src)
  );
  return dst;
}
template<unsigned char Imm, typename Tr>
inline Tr x86_ror (Tr val) noexcept {
  asm volatile (
    "ror %[imm], %[val]"
    :[val]"+" X86_GPR(val)
    :[imm]X86_IMM(Imm)
  );
  return val;
}
template<typename Tr>
inline Tr x86_ror (Tr dst, unsigned char src) noexcept {
  asm volatile (
    "ror %[src], %[dst]"
    :[dst]"+" X86_GPR(dst)
    :[src]"c"(src)
  );
  return dst;
}

// Read Time-Stamp Counter
inline unsigned long long x86_rdtsc () noexcept {
  unsigned int d, a;
  asm volatile (
    "rdtsc"
    :"=d"(d), "=a"(a)
  );
  return ((unsigned long long)d << 32) | a;
}

// Count the Number of Trailing Zero Bits
x86_rr(tzcnt)
x86_rm(tzcnt)

// Exchange and Add
template<typename T>
inline void x86_xadd (T &op1, T &op2) noexcept {
  asm volatile (
    "xadd %[op2], %[op1]"
    :[op1]"+" X86_GPR(op1), [op2]"+" X86_GPR(op2)
  );
}
template<typename T>
inline void x86_xadd (T *op1, T &op2) noexcept {
  asm volatile (
    "xadd %[op2], (%[op1])"
    :[op2]"+" X86_GPR(op2)
    :[op1]X86_GPR(op1)
  );
}

// Exchange Register/Memory with Register
template<typename T>
inline void x86_xchg (T &op1, T &op2) noexcept {
  asm volatile (
    "xchg %[op2], %[op1]"
    :[op1]"+" X86_GPR(op1), [op2]"+" X86_GPR(op2)
  );
}
template<typename T>
inline void x86_xchg (T *op1, T &op2) noexcept {
  asm volatile (
    "xchg %[op2], (%[op1])"
    :[op2]"+" X86_GPR(op2)
    :[op1]X86_GPR(op1)
  );
}

// Lock Memory Bus
inline void x86_lock () noexcept {
  asm volatile ("lock");
}

// Break Point
inline void x86_break () noexcept {
  asm volatile ("int $3");
}



#undef x86_rr
#undef x86_rm
#undef x86_mr
#undef x86_rrr
#undef x86_rrm
#undef x86_rmr
#undef x86_crr
#undef x86_cmr