#pragma once

#include "x86.h"


#define AVX2_VPBROADCAST(suffix, return_type, value) ({ \
  return_type __rv__; \
  asm volatile ("VPBROADCAST" #suffix " %x[src], %[dest]": [dest]"=x"(__rv__): [src]"x"(value)); \
__rv__;})


#define AVX2_VBROADCAST(suffix, return_type, offset, addr) ({ \
  return_type __rv__; \
  asm volatile ("VBROADCAST" #suffix " " #offset "(%[src]), %[dest]": [dest]"=x"(__rv__): [src]"r"(addr)); \
__rv__;})


#define AVX2_LOADALIGN(return_type, addr) ({ \
  return_type __rv__;  \
  asm volatile ("VMOVAPS (%[src]), %[dest]": [dest]"=x"(__rv__): [src]"r"(addr)); \
__rv__;})


#define AVX2_STOREALIGN(addr, value) ({ \
  asm volatile ("VMOVAPS %[src], (%[dest])":: [dest]"r"(addr), [src]"x"(value)); \
})


#define AVX2_ZERO(return_type) ({ \
  return_type __rv__; \
  asm volatile ("VPXOR %t[dest], %t[dest], %t[dest]": [dest]"=x"(__rv__)); \
__rv__;})


#define AVX2_VPS(suffix, return_type, operand_modifier, shift, source) ({ \
  return_type __rv__; \
  asm volatile ("VPS" #suffix " %[imm], %" #operand_modifier "[src], %" #operand_modifier "[dest]": [dest]"=x"(__rv__): [src]"x"(source), [imm]"K"(shift)); \
__rv__;})



#if defined(__llvm__)
#define AVX_ANY "x"
#define AVX_XMM "x"
#define AVX_YMM "t"
#define AVX_ZMM "g"
#else
#define AVX_ANY "x"
#define AVX_XMM "x"
#define AVX_YMM "x"
#define AVX_ZMM "x"
#endif

namespace avx {

enum FP_CMP {
  EQ_OQ = 0x0,
  LT_OS = 0x1,
  LE_OS = 0x2,
  UNIRD_Q = 0x3,
  NEQ_UQ = 0x4,
  NLT_US = 0x5,
  NLE_US = 0x6,
  ORD_Q = 0x7,
  EQ_UQ = 0x8,
  NGE_US = 0x9,
  NGT_US = 0xA,
  FALSE_OQ = 0xB,
  NEQ_OQ = 0xC,
  GE_OS = 0xD,
  GT_OS = 0xE,
  TRUE_UQ = 0xF,
  EQ_OS = 0x10,
  LT_OQ = 0x11,
  LE_OQ = 0x12,
  UNORD_S = 0x13,
  NEQ_US = 0x14,
  NLT_UQ = 0x15,
  NLE_UQ = 0x16,
  ORD_S = 0x17,
  EQ_US = 0x18,
  NGE_UQ = 0x19,
  NGT_UQ = 0x1A,
  FALSE_OS = 0x1B,
  NEQ_OS = 0x1C,
  GE_OQ = 0x1D,
  GT_OQ = 0x1E,
  TRUE_US = 0x1F,
};


enum STR_CMP {
  D_UB = 0x0,
  D_UW = 0x1,
  D_SB = 0x2,
  D_SW = 0x3,
  A_EQ_ANY = 0x0,
  A_RANGE = 0x4,
  A_EQ_EACH = 0x8,
  A_EQ_ORD = 0xC,
  P_POS = 0x00,
  P_NEG = 0x10,
  P_MPOS = 0x20,
  P_MNEG = 0x30,
  O_LI_BIT = 0x00,
  O_MI_BYTE = 0x40,
};

}

#define avx_mx(name) \
template<typename Ts> \
inline void avx_##name (void *dst, Ts src) noexcept { \
  asm volatile ( \
    "" #name " %[src], (%[dst])" \
    : \
    :[dst]X86_GPR(dst), [src]AVX_ANY(src) \
  ); \
}


#define avx_mxx(name) \
template<typename T1, typename T2> \
inline void avx_##name (void *dst, T1 op1, T2 op2) noexcept { \
  asm volatile ( \
    "" #name " %[op2], %[op1], (%[dst])" \
    : \
    :[dst]X86_GPR(dst), [op1]AVX_ANY(op1), [op2]AVX_ANY(op2) \
  ); \
}


#define avx_mxi(name) \
template<unsigned char Imm, typename Ts> \
inline void avx_##name (void *dst, Ts src) noexcept { \
  asm volatile ( \
    "" #name " %[imm], %[src], (%[dst])" \
    : \
    :[dst]X86_GPR(dst), [src]AVX_ANY(src), [imm]X86_IMM(Imm) \
  ); \
}


#define avx_rx(name) \
template<typename Tr, typename Ts> \
inline Tr avx_##name##_ (Ts src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[src], %[dst]" \
    :[dst]"=" X86_GPR(dst) \
    :[src]AVX_ANY(src) \
  ); \
  return dst; \
}


#define avx_rxi(name) \
template<unsigned char Imm, typename Tr, typename Ts> \
inline Tr avx_##name (Ts src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[imm], %[src], %[dst]" \
    :[dst]"=" X86_GPR(dst) \
    :[src]AVX_ANY(src), [imm]X86_IMM(Imm) \
  ); \
  return dst; \
}


#define avx_rm(name) \
template<typename Tr> \
inline Tr avx_##name (const void *src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " (%[src]), %[dst]" \
    :[dst]"=" X86_GPR(dst) \
    :[src]X86_GPR(src) \
  ); \
  return dst; \
}


#define avx_xx(name) \
template<typename Tr, typename Ts> \
inline Tr avx_##name (Ts src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[src], %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[src]AVX_ANY(src) \
  ); \
  return dst; \
}


#define avx_xr(name) \
template<typename Tr, typename Ts> \
inline Tr avx_##name (Ts src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[src], %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[src]X86_GPR(src) \
  ); \
  return dst; \
}


#define avx_xm(name) \
template<typename Tr> \
inline Tr avx_##name (const void *src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " (%[src]), %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[src]X86_GPR(src) \
  ); \
  return dst; \
}


#define avx_xxx(name) \
template<typename Tr, typename T1, typename T2> \
inline Tr avx_##name (T1 op1, T2 op2) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[op2], %[op1], %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[op1]AVX_ANY(op1), [op2]AVX_ANY(op2) \
  ); \
  return dst; \
}


#define avx_xxr(name) \
template<typename Tr, typename T1, typename T2> \
inline Tr avx_##name (T1 op1, T2 op2) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[op2], %[op1], %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[op1]AVX_ANY(op1), [op2]X86_GPR(op2) \
  ); \
  return dst; \
}


#define avx_xxm(name) \
template<typename Tr, typename T1> \
inline Tr avx_##name (T1 op1, const void *op2) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " (%[op2]), %[op1], %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[op1]AVX_ANY(op1), [op2]X86_GPR(op2) \
  ); \
  return dst; \
}


#define avx_xxi(name) \
template<unsigned char Imm, typename Tr, typename Ts> \
inline Tr avx_##name (Ts src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[imm], %[src], %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[src]AVX_ANY(src), [imm]X86_IMM(Imm) \
  ); \
  return dst; \
}


#define avx_xmi(name) \
template<unsigned char Imm, typename Tr, typename Ts> \
inline Tr avx_##name (const void *src) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[imm], (%[src]), %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[src]X86_GPR(src), [imm]X86_IMM(Imm) \
  ); \
  return dst; \
}


#define avx_xxxx(name) \
template<typename Tr, typename T1, typename T2, typename T3> \
inline Tr avx_##name (T1 op1, T2 op2, T3 op3) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[op3], %[op2], %[op1], %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[op1]AVX_ANY(op1), [op2]AVX_ANY(op2), [op3]AVX_ANY(op3) \
  ); \
  return dst; \
}


#define avx_xxmx(name) \
template<typename Tr, typename T1, typename T3> \
inline Tr avx_##name (T1 op1, const void *op2, T3 op3) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[op3], (%[op2]), %[op1], %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[op1]AVX_ANY(op1), [op2]X86_GPR(op2), [op3]AVX_ANY(op3) \
  ); \
  return dst; \
}


#define avx_xxxi(name) \
template<unsigned char Imm, typename Tr, typename T1, typename T2> \
inline Tr avx_##name (T1 op1, T2 op2) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[imm], %[op2], %[op1], %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[op1]AVX_ANY(op1), [op2]AVX_ANY(op2), [imm]X86_IMM(Imm) \
  ); \
  return dst; \
}


#define avx_xxri(name) \
template<unsigned char Imm, typename Tr, typename T1, typename T2> \
inline Tr avx_##name (T1 op1, T2 op2) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[imm], %[op2], %[op1], %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[op1]AVX_ANY(op1), [op2]X86_GPR(op2), [imm]X86_IMM(Imm) \
  ); \
  return dst; \
}


#define avx_xxmi(name) \
template<unsigned char Imm, typename Tr, typename T1> \
inline Tr avx_##name (T1 op1, const void *op2) noexcept { \
  Tr dst; \
  asm volatile ( \
    "" #name " %[imm], (%[op2]), %[op1], %[dst]" \
    :[dst]"=" AVX_ANY(dst) \
    :[op1]AVX_ANY(op1), [op2]X86_GPR(op2), [imm]X86_IMM(Imm) \
  ); \
  return dst; \
}


template<typename Tr, typename Ts>
inline Tr avx_cast (Ts src) noexcept {
  Tr dst;
  asm volatile ("":"=" AVX_ANY(dst):"0"(src));
  return dst;
}


// Add Packed Double-Precision Floating-Point Values
avx_xxx(vaddpd)
avx_xxm(vaddpd)

// Add Packed Single-Precision Floating-Point Values
avx_xxx(vaddps)
avx_xxm(vaddps)

// Add Scalar Double-Precision Floating-Point Values
avx_xxx(vaddsd)
avx_xxm(vaddsd)

// Add Scalar Single-Precision Floating-Point Values
avx_xxx(vaddss)
avx_xxm(vaddss)

// Packed Double-FP Add/Subtract
avx_xxx(vaddsubpd)
avx_xxm(vaddsubpd)

// Packed Single-FP Add/Subtract
avx_xxx(vaddsubps)
avx_xxm(vaddsubps)

// Bitwise Logical AND of Packed Double-Precision Floating-Point Values
avx_xxx(vandpd)
avx_xxm(vandpd)

// Bitwise Logical AND of Packed Single-Precision Floating-Point Values
avx_xxx(vandps)
avx_xxm(vandps)

// Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values
avx_xxx(vandnpd)
avx_xxm(vandnpd)

// Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values
avx_xxx(vandnps)
avx_xxm(vandnps)

// Blend Packed Double Precision Floating-Point Values
avx_xxxi(vblendpd)
avx_xxmi(vblendpd)

// Blend Packed Single Precision Floating-Point Values
avx_xxxi(vblendps)
avx_xxmi(vblendps)

// Variable Blend Packed Double Precision Floating-Point Values
avx_xxxx(vblendvpd)
avx_xxmx(vblendvpd)

// Variable Blend Packed Single Precision Floating-Point Values
avx_xxxx(vblendvps)
avx_xxmx(vblendvps)

// Compare Packed Double-Precision Floating-Point Values
avx_xxxi(vcmppd)
avx_xxmi(vcmppd)

// Compare Packed Single-Precision Floating-Point Values
avx_xxxi(vcmpps)
avx_xxmi(vcmpps)

// Compare Scalar Double-Precision Floating-Point Value
avx_xxxi(vcmpsd)
avx_xxmi(vcmpsd)

// Compare Packed Single-Precision Floating-Point Values
avx_xxxi(vcmpss)
avx_xxmi(vcmpss)

// Convert Packed Doubleword Integers to Packed Double-Precision Floating-Point Values
avx_xx(vcvtdq2pd)
avx_xm(vcvtdq2pd)

// Convert Packed Doubleword Integers to Packed Single-Precision Floating-Point Values
avx_xx(vcvtdq2ps)
avx_xm(vcvtdq2ps)

// Convert Packed Double-Precision Floating-Point Values to Packed Doubleword Integers
avx_xx(vcvtpd2dq)
avx_xm(vcvtpd2dq)

// Convert Packed Double-Precision Floating-Point Values to Packed Single-Precision Floating-Point Values
avx_xx(vcvtpd2ps)
avx_xm(vcvtpd2ps)

// Convert Packed Single-Precision Floating-Point Values to Packed Signed Doubleword Integer Values
avx_xx(vcvtps2dq)
avx_xm(vcvtps2dq)

// Convert Packed Single-Precision Floating-Point Values to Packed Double-Precision Floating-Point Values
avx_xx(vcvtps2pd)
avx_xm(vcvtps2pd)

// Convert Scalar Double-Precision Floating-Point Value to Doubleword Integer
avx_rx(vcvtsd2si)
avx_rm(vcvtsd2si)

// Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
avx_xxx(vcvtsd2ss)
avx_xxm(vcvtsd2ss)

// Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
avx_xxr(vcvtsi2sd)
avx_xxm(vcvtsi2sd)

// Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
avx_xxr(vcvtsi2ss)
avx_xxm(vcvtsi2ss)

// Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
avx_xxx(vcvtss2sd)
avx_xxm(vcvtss2sd)

// Convert Scalar Single-Precision Floating-Point Value to Doubleword Integer
avx_rx(vcvtss2si)
avx_rm(vcvtss2si)

// Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Doubleword Integers
avx_xx(vcvttpd2dq)
avx_xm(vcvttpd2dq)

// Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Signed Doubleword Integer Values
avx_xx(vcvttps2dq)
avx_xm(vcvttps2dq)

// Convert with Truncation Scalar Double-Precision Floating-Point Value to Signed Integer
avx_rx(vcvttsd2si)
avx_rm(vcvttsd2si)

// Convert with Truncation Scalar Single-Precision Floating-Point Value to Integer
avx_rx(vcvttss2si)
avx_rm(vcvttss2si)

// Divide Packed Double-Precision Floating-Point Values
avx_xxx(vdivpd)
avx_xxm(vdivpd)

// Divide Packed Single-Precision Floating-Point Values
avx_xxx(vdivps)
avx_xxm(vdivps)

// Divide Scalar Double-Precision Floating-Point Value
avx_xxx(vdivsd)
avx_xxm(vdivsd)

// Divide Scalar Single-Precision Floating-Point Value
avx_xxx(vdivss)
avx_xxm(vdivss)

// Dot Product of Packed Double Precision Floating-Point Values
avx_xxxi(vdppd)
avx_xxmi(vdppd)

// Dot Product of Packed Single Precision Floating-Point Values
avx_xxxi(vdpps)
avx_xxmi(vdpps)

// Extract Packed Floating-Point Values
avx_rxi(vextractps)
avx_mxi(vextractps)

// Packed Double-FP Horizontal Add
avx_xxx(vhaddpd)
avx_xxm(vhaddpd)

// Packed Single-FP Horizontal Add
avx_xxx(vhaddps)
avx_xxm(vhaddps)

// Packed Double-FP Horizontal Subtract
avx_xxx(vhsubpd)
avx_xxm(vhsubpd)

// Packed Single-FP Horizontal Subtract
avx_xxx(vhsubps)
avx_xxm(vhsubps)

// Extract Packed Floating-Point Values
avx_xxxi(vinsertps)
avx_xxmi(vinsertps)

// Load Unaligned Integer
avx_xm(vlddqu)

// Load MXCSR Register
inline void avx_vldmxcsr (const unsigned int *src) noexcept {
  asm volatile (
    "vldmxcsr (%[src])"
    :
    :[src]X86_GPR(src)
  );
}

// Store Selected Bytes of Double Quadword
template<typename T1, typename T2>
inline void avx_vmaskmovdqu (void *dst, T1 op1, T2 op2) noexcept {
  asm volatile (
    "vmaskmovdqu %[op2], %[op1]"
    :
    :[op1]AVX_XMM(op1), [op2]AVX_XMM(op2), "D"(dst)
  );
}

// Maximum of Packed Double-Precision Floating-Point Values
avx_xxx(vmaxpd)
avx_xxm(vmaxpd)

// Maximum of Packed Single-Precision Floating-Point Values
avx_xxx(vmaxps)
avx_xxm(vmaxps)

// Return Maximum Scalar Double-Precision Floating-Point Value
avx_xxx(vmaxsd)
avx_xxm(vmaxsd)

// Return Maximum Scalar Single-Precision Floating-Point Value
avx_xxx(vmaxss)
avx_xxm(vmaxss)

// Minimum of Packed Double-Precision Floating-Point Values
avx_xxx(vminpd)
avx_xxm(vminpd)

// Minimum of Packed Single-Precision Floating-Point Values
avx_xxx(vminps)
avx_xxm(vminps)

// Return Minimum Scalar Double-Precision Floating-Point Value
avx_xxx(vminsd)
avx_xxm(vminsd)

// Return Minimum Scalar Single-Precision Floating-Point Value
avx_xxx(vminss)
avx_xxm(vminss)

// Move Aligned Packed Single-Precision Floating-Point Values
avx_xx(vmovaps)
avx_xm(vmovaps)
avx_mx(vmovaps)

// Replicate Double FP Values
avx_xx(vmovddup)
avx_xm(vmovddup)

// Move Aligned Packed Integer Values
avx_xx(vmovdqa)
avx_xm(vmovdqa)
avx_mx(vmovdqa)

// Move Unaligned Packed Integer Values
avx_xx(vmovdqu)
avx_xm(vmovdqu)
avx_mx(vmovdqu)

// Move Packed Single-Precision Floating-Point Values High to Low
avx_xxx(vmovhlps)

// Move Packed Single-Precision Floating-Point Values Low to High
avx_xxx(vmovlhps)

// Move High Packed Single-Precision Floating-Point Values
avx_xxm(vmovhps)
avx_mx(vmovhps)

// Move Low Packed Single-Precision Floating-Point Values
avx_xxm(vmovlps)
avx_mx(vmovlps)

// Extract Packed Double-Precision Floating-Point Sign Mask
avx_rx(vmovmskpd)

// Extract Packed Single-Precision Floating-Point Sign Mask
avx_rx(vmovmskps)

// Move Doubleword/Move Quadword
avx_xr(vmovd)
avx_xm(vmovd)
avx_rx(vmovd)
avx_mx(vmovd)
avx_xr(vmovq)
avx_xm(vmovq)
avx_rx(vmovq)
avx_mx(vmovq)

// Move or Merge Scalar Double-Precision Floating-Point Value
avx_xxx(vmovsd)
avx_xm(vmovsd)
avx_mx(vmovsd)

// Replicate Single FP Values
avx_xx(vmovshdup)
avx_xm(vmovshdup)

// Replicate Single FP Values
avx_xx(vmovsldup)
avx_xm(vmovsldup)

// Move or Merge Scalar Single-Precision Floating-Point Value
avx_xxx(vmovss)
avx_xm(vmovss)
avx_mx(vmovss)

// Move Unaligned Packed Double-Precision Floating-Point Values
avx_xx(vmovups)
avx_xm(vmovups)
avx_mx(vmovups)

// Compute Multiple Packed Sums of Absolute Difference
avx_xxxi(vmpsadbw)
avx_xxmi(vmpsadbw)

// Multiply Packed Double-Precision Floating-Point Values
avx_xxx(vmulpd)
avx_xxm(vmulpd)

// Multiply Packed Single-Precision Floating-Point Values
avx_xxx(vmulps)
avx_xxm(vmulps)

// Multiply Scalar Double-Precision Floating-Point Value
avx_xxx(vmulsd)
avx_xxm(vmulsd)

// Multiply Scalar Single-Precision Floating-Point Value
avx_xxx(vmulss)
avx_xxm(vmulss)

// Bitwise Logical OR of Packed Single Precision Floating-Point Values
avx_xxx(vorps)
avx_xxm(vorps)

// Packed Absolute Value
avx_xx(vpabsb)
avx_xm(vpabsb)
avx_xx(vpabsw)
avx_xm(vpabsw)
avx_xx(vpabsd)
avx_xm(vpabsd)

// Pack with Signed Saturation
avx_xxx(vpacksswb)
avx_xxm(vpacksswb)
avx_xxx(vpackssdw)
avx_xxm(vpackssdw)

// Pack with Unsigned Saturation
avx_xxx(vpackuswb)
avx_xxm(vpackuswb)
avx_xxx(vpackusdw)
avx_xxm(vpackusdw)

// Add Packed Integers
avx_xxx(vpaddb)
avx_xxm(vpaddb)
avx_xxx(vpaddw)
avx_xxm(vpaddw)
avx_xxx(vpaddd)
avx_xxm(vpaddd)
avx_xxx(vpaddq)
avx_xxm(vpaddq)

// Add Packed Signed Integers with Signed Saturation
avx_xxx(vpaddsb)
avx_xxm(vpaddsb)
avx_xxx(vpaddsw)
avx_xxm(vpaddsw)

// Add Packed Unsigned Integers with Unsigned Saturation
avx_xxx(vpaddusb)
avx_xxm(vpaddusb)
avx_xxx(vpaddusw)
avx_xxm(vpaddusw)

// Packed Align Right
avx_xxxi(vpalignr)
avx_xxmi(vpalignr)

// Logical AND
avx_xxx(vpand)
avx_xxm(vpand)

// Logical AND NOT
avx_xxx(vpandn)
avx_xxm(vpandn)

// Average Packed Integers
avx_xxx(vpavgb)
avx_xxm(vpavgb)
avx_xxx(vpavgw)
avx_xxm(vpavgw)

// Variable Blend Packed Bytes
avx_xxxx(vpblendvb)
avx_xxmx(vpblendvb)

// Blend Packed Words/Dwords
avx_xxxi(vpblendw)
avx_xxmi(vpblendw)
avx_xxxi(vpblendd)
avx_xxmi(vpblendd)

// Compare Packed Data for Equal
avx_xxx(vpcmpeqb)
avx_xxm(vpcmpeqb)
avx_xxx(vpcmpeqw)
avx_xxm(vpcmpeqw)
avx_xxx(vpcmpeqd)
avx_xxm(vpcmpeqd)
avx_xxx(vpcmpeqq)
avx_xxm(vpcmpeqq)

// Packed Compare Explicit Length Strings, Return Index
template<unsigned char Imm, typename T1, typename T2>
inline unsigned int avx_vpcmpestri (T1 op1, T2 op2, unsigned int l1, unsigned int l2) noexcept {
  unsigned int dst;
  asm volatile (
    "vpcmpestri %[imm], %[op2], %[op1]"
    :"=c"(dst)
    :"a"(l1), "d"(l2), [op1]AVX_ANY(op1), [op2]AVX_ANY(op2), [imm]X86_IMM(Imm)
  );
  return dst;
}
template<unsigned char Imm, typename T1>
inline unsigned int avx_vpcmpestri (T1 op1, const void *op2, unsigned int l1, unsigned int l2) noexcept {
  unsigned int dst;
  asm volatile (
    "vpcmpestri %[imm], (%[op2]), %[op1]"
    :"=c"(dst)
    :"a"(l1), "d"(l2), [op1]AVX_ANY(op1), [op2]X86_GPR(op2), [imm]X86_IMM(Imm)
  );
  return dst;
}

// Packed Compare Explicit Length Strings, Return Mask
template<unsigned char Imm, typename T1, typename T2>
inline unsigned int avx_vpcmpestrm (T1 op1, T2 op2, unsigned int l1, unsigned int l2) noexcept {
  register unsigned int dst asm("xmm0");
  asm volatile (
    "vpcmpestrm %[imm], %[op2], %[op1]"
    :[dst]"=" AVX_XMM(dst)
    :"a"(l1), "d"(l2), [op1]AVX_ANY(op1), [op2]X86_GPR(op2), [imm]X86_IMM(Imm)
  );
  return dst;
}
template<unsigned char Imm, typename T1>
inline unsigned int avx_vpcmpestrm (T1 op1, const void *op2, unsigned int l1, unsigned int l2) noexcept {
  register unsigned int dst asm("xmm0");
  asm volatile (
    "vpcmpestrm %[imm], (%[op2]), %[op1]"
    :[dst]"=" AVX_XMM(dst)
    :"a"(l1), "d"(l2), [op1]AVX_ANY(op1), [op2]X86_GPR(op2), [imm]X86_IMM(Imm)
  );
  return dst;
}

// Compare Packed Signed Integers for Greater Than
avx_xxx(vpcmpgtb)
avx_xxm(vpcmpgtb)
avx_xxx(vpcmpgtw)
avx_xxm(vpcmpgtw)
avx_xxx(vpcmpgtd)
avx_xxm(vpcmpgtd)
avx_xxx(vpcmpgtq)
avx_xxm(vpcmpgtq)

// Packed Compare Explicit Length Strings, Return Index
template<unsigned char Imm, typename T1, typename T2>
inline unsigned int avx_vpcmpistri (T1 op1, T2 op2) noexcept {
  unsigned int dst;
  asm volatile (
    "vpcmpistri %[imm], %[op2], %[op1]"
    :"=c"(dst)
    :[op1]AVX_ANY(op1), [op2]AVX_ANY(op2), [imm]X86_IMM(Imm)
  );
  return dst;
}
template<unsigned char Imm, typename T1>
inline unsigned int avx_vpcmpistri (T1 op1, const void *op2) noexcept {
  unsigned int dst;
  asm volatile (
    "vpcmpistri %[imm], (%[op2]), %[op1]"
    :"=c"(dst)
    :[op1]AVX_ANY(op1), [op2]X86_GPR(op2), [imm]X86_IMM(Imm)
  );
  return dst;
}

// Packed Compare Explicit Length Strings, Return Mask
template<unsigned char Imm, typename T1, typename T2>
inline unsigned int avx_vpcmpistrm (T1 op1, T2 op2) noexcept {
  register unsigned int dst asm("xmm0");
  asm volatile (
    "vpcmpistrm %[imm], %[op2], %[op1]"
    :"=" AVX_XMM(dst)
    :[op1]AVX_ANY(op1), [op2]AVX_ANY(op2), [imm]X86_IMM(Imm)
  );
  return dst;
}
template<unsigned char Imm, typename T1>
inline unsigned int avx_vpcmpistrm (T1 op1, const void *op2) noexcept {
  register unsigned int dst asm("xmm0");
  asm volatile (
    "vpcmpistrm %[imm], (%[op2]), %[op1]"
    :"=" AVX_XMM(dst)
    :[op1]AVX_ANY(op1), [op2]X86_GPR(op2), [imm]X86_IMM(Imm)
  );
  return dst;
}

// Extract Byte/Word/Dword/Qword
avx_rxi(vpextrb)
avx_mxi(vpextrb)
avx_rxi(vpextrw)
avx_mxi(vpextrw)
avx_rxi(vpextrd)
avx_mxi(vpextrd)
avx_rxi(vpextrq)
avx_mxi(vpextrq)

// Packed Horizontal Add
avx_xxx(vphaddw)
avx_xxm(vphaddw)
avx_xxx(vphaddd)
avx_xxm(vphaddd)

// Packed Horizontal Add and Saturate
avx_xxx(vphaddsw)
avx_xxm(vphaddsw)

// Packed Horizontal Word Minimum
avx_xx(vphminposuw)
avx_xm(vphminposuw)

// Packed Horizontal Subtract
avx_xxx(vphsubw)
avx_xxm(vphsubw)
avx_xxx(vphsubd)
avx_xxm(vphsubd)

// Packed Horizontal Subtract and Saturate
avx_xxx(vphsubsw)
avx_xxm(vphsubsw)

// Insert Byte/Word/Dword/Qword
avx_xxri(vpinsrb)
avx_xxmi(vpinsrb)
avx_xxri(vpinsrw)
avx_xxmi(vpinsrw)
avx_xxri(vpinsrd)
avx_xxmi(vpinsrd)
avx_xxri(vpinsrq)
avx_xxmi(vpinsrq)

// Multiply and Add Packed Signed and Unsigned Bytes
avx_xxx(vpmaddubsw)
avx_xxm(vpmaddubsw)

// Multiply and Add Packed Integers
avx_xxx(vpmaddwd)
avx_xxm(vpmaddwd)

// Maximum of Packed Signed Integers
avx_xxx(vpmaxsb)
avx_xxm(vpmaxsb)
avx_xxx(vpmaxsw)
avx_xxm(vpmaxsw)
avx_xxx(vpmaxsd)
avx_xxm(vpmaxsd)

// Maximum of Packed Unsigned Integers
avx_xxx(vpmaxub)
avx_xxm(vpmaxub)
avx_xxx(vpmaxuw)
avx_xxm(vpmaxuw)
avx_xxx(vpmaxud)
avx_xxm(vpmaxud)

// Minimum of Packed Signed Integers
avx_xxx(vpminsb)
avx_xxm(vpminsb)
avx_xxx(vpminsw)
avx_xxm(vpminsw)
avx_xxx(vpminsd)
avx_xxm(vpminsd)

// Minimum of Packed Unsigned Integers
avx_xxx(vpminub)
avx_xxm(vpminub)
avx_xxx(vpminuw)
avx_xxm(vpminuw)
avx_xxx(vpminud)
avx_xxm(vpminud)

// Move Byte Mask
avx_rx(vpmovmskb)

// Packed Move with Sign Extend
avx_xx(vpmovsxbw)
avx_xm(vpmovsxbw)
avx_xx(vpmovsxbd)
avx_xm(vpmovsxbd)
avx_xx(vpmovsxbq)
avx_xm(vpmovsxbq)
avx_xx(vpmovsxwd)
avx_xm(vpmovsxwd)
avx_xx(vpmovsxwq)
avx_xm(vpmovsxwq)
avx_xx(vpmovsxdq)
avx_xm(vpmovsxdq)

// Packed Move with Zero Extend
avx_xx(vpmovzxbw)
avx_xm(vpmovzxbw)
avx_xx(vpmovzxbd)
avx_xm(vpmovzxbd)
avx_xx(vpmovzxbq)
avx_xm(vpmovzxbq)
avx_xx(vpmovzxwd)
avx_xm(vpmovzxwd)
avx_xx(vpmovzxwq)
avx_xm(vpmovzxwq)
avx_xx(vpmovzxdq)
avx_xm(vpmovzxdq)

// Multiply Packed Signed Dword Integers
avx_xxx(vpmuldq)
avx_xxm(vpmuldq)

// Multiply Packed Unsigned Doubleword Integers
avx_xxx(vpmuludq)
avx_xxm(vpmuludq)

// Packed Multiply High with Round and Scale
avx_xxx(vpmulhrsw)
avx_xxm(vpmulhrsw)

// Multiply Packed Unsigned Integers and Store High Result
avx_xxx(vpmulhuw)
avx_xxm(vpmulhuw)

// Multiply Packed Signed Integers and Store High Result
avx_xxx(vpmulhw)
avx_xxm(vpmulhw)

// Multiply Packed Signed Dword Integers and Store Low Result

// Multiply Packed Signed Integers and Store Low Result
avx_xxx(vpmullw)
avx_xxm(vpmullw)
avx_xxx(vpmulld)
avx_xxm(vpmulld)
avx_xxx(vpmullq)
avx_xxm(vpmullq)

// Bitwise Logical OR
avx_xxx(vpor)
avx_xxm(vpor)

// Compute Sum of Absolute Differences
avx_xxx(vpsadbw)
avx_xxm(vpsadbw)

// Packed Shuffle Bytes
avx_xxx(vpshufb)
avx_xxm(vpshufb)

// Shuffle Packed Doublewords
avx_xxi(vpshufd)
avx_xmi(vpshufd)

// Shuffle Packed High Words
avx_xxi(vpshufhw)
avx_xmi(vpshufhw)

// Shuffle Packed Low Words
avx_xxi(vpshuflw)
avx_xmi(vpshuflw)

// Packed SIGN
avx_xxx(vpsignb)
avx_xxm(vpsignb)
avx_xxx(vpsignw)
avx_xxm(vpsignw)
avx_xxx(vpsignd)
avx_xxm(vpsignd)

// Shift Double Quadword Left Logical
avx_xxi(vpslldq)

// Shift Packed Data Left Logical
avx_xxx(vpsllw)
avx_xxi(vpsllw)
avx_xxx(vpslld)
avx_xxi(vpslld)
avx_xxx(vpsllq)
avx_xxi(vpsllq)

// Shift Packed Data Right Arithmetic
avx_xxx(vpsraw)
avx_xxi(vpsraw)
avx_xxx(vpsrad)
avx_xxi(vpsrad)

// Shift Double Quadword Right Logical
avx_xxi(vpsrldq)

// Shift Packed Data Right Logical
avx_xxx(vpsrlw)
avx_xxi(vpsrlw)
avx_xxx(vpsrld)
avx_xxi(vpsrld)
avx_xxx(vpsrlq)
avx_xxi(vpsrlq)

// Subtract Packed Integers
avx_xxx(vpsubb)
avx_xxm(vpsubb)
avx_xxx(vpsubw)
avx_xxm(vpsubw)
avx_xxx(vpsubd)
avx_xxm(vpsubd)
avx_xxx(vpsubq)
avx_xxm(vpsubq)

// Subtract Packed Signed Integers with Signed Saturation
avx_xxx(vpsubsb)
avx_xxm(vpsubsb)
avx_xxx(vpsubsw)
avx_xxm(vpsubsw)

// Subtract Packed Unsigned Integers with Unsigned Saturation
avx_xxx(vpsubusb)
avx_xxm(vpsubusb)
avx_xxx(vpsubusw)
avx_xxm(vpsubusw)

// Subtract Packed Unsigned Integers with Unsigned Saturation
avx_xx(vptest)
avx_xm(vptest)

// Unpack High Data
avx_xxx(vpunpckhbw)
avx_xxm(vpunpckhbw)
avx_xxx(vpunpckhwd)
avx_xxm(vpunpckhwd)
avx_xxx(vpunpckhdq)
avx_xxm(vpunpckhdq)
avx_xxx(vpunpckhqdq)
avx_xxm(vpunpckhqdq)

// Unpack Low Data
avx_xxx(vpunpcklbw)
avx_xxm(vpunpcklbw)
avx_xxx(vpunpcklwd)
avx_xxm(vpunpcklwd)
avx_xxx(vpunpckldq)
avx_xxm(vpunpckldq)
avx_xxx(vpunpcklqdq)
avx_xxm(vpunpcklqdq)

// Logical Exclusive OR
avx_xxx(vpxor)
avx_xxm(vpxor)

// Compute Reciprocals of Packed Single-Precision Floating-Point Values
avx_xx(vrcpps)
avx_xm(vrcpps)

// Compute Reciprocals of Scalar Single-Precision Floating-Point Values
avx_xxx(vrcpss)
avx_xxm(vrcpss)

// Round Packed Double Precision Floating-Point Values
avx_xxi(vroundpd)
avx_xmi(vroundpd)

// Round Packed Single Precision Floating-Point Values
avx_xxi(vroundps)
avx_xmi(vroundps)

// Round Scalar Double Precision Floating-Point Values
avx_xxi(vroundsd)
avx_xmi(vroundsd)

// Round Scalar Single Precision Floating-Point Values
avx_xxi(vroundss)
avx_xmi(vroundss)

// Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values
avx_xx(vrsqrtps)
avx_xm(vrsqrtps)

// Compute Reciprocals of Square Roots of Scalar Single-Precision Floating-Point Values
avx_xxx(vrsqrtss)
avx_xxm(vrsqrtss)

// Shuffle Packed Double-Precision Floating-Point Values
avx_xxxi(vshufpd)
avx_xxmi(vshufpd)

// Shuffle Packed Single-Precision Floating-Point Values
avx_xxxi(vshufps)
avx_xxmi(vshufps)

// Compute Square Roots of Packed Double-Precision Floating-Point Values
avx_xx(vsqrtpd)
avx_xm(vsqrtpd)

// Compute Square Roots of Packed Single-Precision Floating-Point Values
avx_xx(vsqrtps)
avx_xm(vsqrtps)

// Compute Square Roots of Scalar Double-Precision Floating-Point Values
avx_xxx(vsqrtsd)
avx_xxm(vsqrtsd)

// Compute Square Roots of Scalar Single-Precision Floating-Point Values
avx_xxx(vsqrtss)
avx_xxm(vsqrtss)

// Store MXCSR Register State
inline void avx_vstmxcsr (unsigned int *dst) noexcept {
  asm volatile (
    "vstmxcsr (%[dst])"
    :
    :[dst]X86_GPR(dst)
  );
}

// Subtract Packed Double-Precision Floating-Point Values
avx_xxx(vsubpd)
avx_xxm(vsubpd)

// Subtract Packed Single-Precision Floating-Point Values
avx_xxx(vsubps)
avx_xxm(vsubps)

// Subtract Scalar Double-Precision Floating-Point Values
avx_xxx(vsubsd)
avx_xxm(vsubsd)

// Subtract Scalar Single-Precision Floating-Point Values
avx_xxx(vsubss)
avx_xxm(vsubss)

// Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS
avx_xx(vucomisd)
avx_xm(vucomisd)

// Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS
avx_xx(vucomiss)
avx_xm(vucomiss)

// Unpack and Interleave High Packed Double-Precision Floating-Point Values
avx_xxx(vunpckhpd)
avx_xxm(vunpckhpd)

// Unpack and Interleave High Packed Single-Precision Floating-Point Values
avx_xxx(vunpckhps)
avx_xxm(vunpckhps)

// Unpack and Interleave Low Packed Double-Precision Floating-Point Values
avx_xxx(vunpcklpd)
avx_xxm(vunpcklpd)

// Unpack and Interleave Low Packed Single-Precision Floating-Point Values
avx_xxx(vunpcklps)
avx_xxm(vunpcklps)

// Broadcast Floating-Point Data
avx_xm(vbroadcastss)
avx_xm(vbroadcastsd)
avx_xm(vbroadcastf128)

// Extract Packed Floating-Point Values
avx_xxi(vextractf128)
avx_mxi(vextractf128)

// Fused Multiply-Add of Packed Double-Precision Floating-Point Values
avx_xxx(vfmadd132pd)
avx_xxm(vfmadd132pd)
avx_xxx(vfmadd213pd)
avx_xxm(vfmadd213pd)
avx_xxx(vfmadd231pd)
avx_xxm(vfmadd231pd)

// Fused Multiply-Add of Packed Single-Precision Floating-Point Values
avx_xxx(vfmadd132ps)
avx_xxm(vfmadd132ps)
avx_xxx(vfmadd213ps)
avx_xxm(vfmadd213ps)
avx_xxx(vfmadd231ps)
avx_xxm(vfmadd231ps)

// Fused Multiply-Add of Scalar Double-Precision Floating-Point Values
avx_xxx(vfmadd132sd)
avx_xxm(vfmadd132sd)
avx_xxx(vfmadd213sd)
avx_xxm(vfmadd213sd)
avx_xxx(vfmadd231sd)
avx_xxm(vfmadd231sd)

// Fused Multiply-Add of Scalar Single-Precision Floating-Point Values
avx_xxx(vfmadd132ss)
avx_xxm(vfmadd132ss)
avx_xxx(vfmadd213ss)
avx_xxm(vfmadd213ss)
avx_xxx(vfmadd231ss)
avx_xxm(vfmadd231ss)

// Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values
avx_xxx(vfmaddsub132pd)
avx_xxm(vfmaddsub132pd)
avx_xxx(vfmaddsub213pd)
avx_xxm(vfmaddsub213pd)
avx_xxx(vfmaddsub231pd)
avx_xxm(vfmaddsub231pd)

// Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values
avx_xxx(vfmaddsub132ps)
avx_xxm(vfmaddsub132ps)
avx_xxx(vfmaddsub213ps)
avx_xxm(vfmaddsub213ps)
avx_xxx(vfmaddsub231ps)
avx_xxm(vfmaddsub231ps)

// Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values
avx_xxx(vfmsubadd132pd)
avx_xxm(vfmsubadd132pd)
avx_xxx(vfmsubadd213pd)
avx_xxm(vfmsubadd213pd)
avx_xxx(vfmsubadd231pd)
avx_xxm(vfmsubadd231pd)

// Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values
avx_xxx(vfmsubadd132ps)
avx_xxm(vfmsubadd132ps)
avx_xxx(vfmsubadd213ps)
avx_xxm(vfmsubadd213ps)
avx_xxx(vfmsubadd231ps)
avx_xxm(vfmsubadd231ps)

// Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values
avx_xxx(vfmsub132pd)
avx_xxm(vfmsub132pd)
avx_xxx(vfmsub213pd)
avx_xxm(vfmsub213pd)
avx_xxx(vfmsub231pd)
avx_xxm(vfmsub231pd)

// Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values
avx_xxx(vfmsub132ps)
avx_xxm(vfmsub132ps)
avx_xxx(vfmsub213ps)
avx_xxm(vfmsub213ps)
avx_xxx(vfmsub231ps)
avx_xxm(vfmsub231ps)

// Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values
avx_xxx(vfmsub132sd)
avx_xxm(vfmsub132sd)
avx_xxx(vfmsub213sd)
avx_xxm(vfmsub213sd)
avx_xxx(vfmsub231sd)
avx_xxm(vfmsub231sd)

// Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values
avx_xxx(vfmsub132ss)
avx_xxm(vfmsub132ss)
avx_xxx(vfmsub213ss)
avx_xxm(vfmsub213ss)
avx_xxx(vfmsub231ss)
avx_xxm(vfmsub231ss)

// Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values
avx_xxx(vfnmadd132pd)
avx_xxm(vfnmadd132pd)
avx_xxx(vfnmadd213pd)
avx_xxm(vfnmadd213pd)
avx_xxx(vfnmadd231pd)
avx_xxm(vfnmadd231pd)

// Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values
avx_xxx(vfnmadd132ps)
avx_xxm(vfnmadd132ps)
avx_xxx(vfnmadd213ps)
avx_xxm(vfnmadd213ps)
avx_xxx(vfnmadd231ps)
avx_xxm(vfnmadd231ps)

// Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values
avx_xxx(vfnmadd132sd)
avx_xxm(vfnmadd132sd)
avx_xxx(vfnmadd213sd)
avx_xxm(vfnmadd213sd)
avx_xxx(vfnmadd231sd)
avx_xxm(vfnmadd231sd)

// Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values
avx_xxx(vfnmadd132ss)
avx_xxm(vfnmadd132ss)
avx_xxx(vfnmadd213ss)
avx_xxm(vfnmadd213ss)
avx_xxx(vfnmadd231ss)
avx_xxm(vfnmadd231ss)

// Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values
avx_xxx(vfnmsub132pd)
avx_xxm(vfnmsub132pd)
avx_xxx(vfnmsub213pd)
avx_xxm(vfnmsub213pd)
avx_xxx(vfnmsub231pd)
avx_xxm(vfnmsub231pd)

// Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values
avx_xxx(vfnmsub132ps)
avx_xxm(vfnmsub132ps)
avx_xxx(vfnmsub213ps)
avx_xxm(vfnmsub213ps)
avx_xxx(vfnmsub231ps)
avx_xxm(vfnmsub231ps)

// Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values
avx_xxx(vfnmsub132sd)
avx_xxm(vfnmsub132sd)
avx_xxx(vfnmsub213sd)
avx_xxm(vfnmsub213sd)
avx_xxx(vfnmsub231sd)
avx_xxm(vfnmsub231sd)

// Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values
avx_xxx(vfnmsub132ss)
avx_xxm(vfnmsub132ss)
avx_xxx(vfnmsub213ss)
avx_xxm(vfnmsub213ss)
avx_xxx(vfnmsub231ss)
avx_xxm(vfnmsub231ss)

// Extract packed Integer Values
avx_xxi(vextracti128)
avx_mxi(vextracti128)

// Insert Packed Floating-Point Values
avx_xxxi(vinsertf128)
avx_xxmi(vinsertf128)

// Insert Packed Integer Values
avx_xxxi(vinserti128)
avx_xxmi(vinserti128)

// Conditional SIMD Packed Loads and Stores
avx_xxm(vmaskmovps)
avx_mxx(vmaskmovps)
avx_xxm(vmaskmovpd)
avx_mxx(vmaskmovpd)

// Broadcast Integer Data
avx_xx(vpbroadcastb)
avx_xm(vpbroadcastb)
avx_xx(vpbroadcastw)
avx_xm(vpbroadcastw)
avx_xx(vpbroadcastd)
avx_xm(vpbroadcastd)
avx_xx(vpbroadcastq)
avx_xm(vpbroadcastq)
avx_xm(vpbroadcasti128)

// Permute Integer Values
avx_xxxi(vperm2f128)
avx_xxmi(vperm2f128)

// Permute Integer Values
avx_xxxi(vperm2i128)
avx_xxmi(vperm2i128)

// Full Doublewords Element Permutation
avx_xxx(vpermd)
avx_xxm(vpermd)

// Permute Double-Precision Floating-Point Elements
avx_xxi(vpermpd)
avx_xmi(vpermpd)

// Permute Single-Precision Floating-Point Elements
avx_xxi(vpermps)
avx_xmi(vpermps)

// Qwords Element Permutation
avx_xxi(vpermq)
avx_xmi(vpermq)

// Permute Double-Precision Floating-Point Values
avx_xxx(vpermilpd)
avx_xxm(vpermilpd)
avx_xxi(vpermilpd)
avx_xmi(vpermilpd)

// Permute Single-Precision Floating-Point Values
avx_xxx(vpermilps)
avx_xxm(vpermilps)
avx_xxi(vpermilps)
avx_xmi(vpermilps)

// Conditional SIMD Integer Packed Loads and Stores
avx_xxm(vpmaskmovd)
avx_mxx(vpmaskmovd)
avx_xxm(vpmaskmovq)
avx_mxx(vpmaskmovq)

// Variable Bit Shift Left Logical
avx_xxx(vpsllvd)
avx_xxm(vpsllvd)
avx_xxx(vpsllvq)
avx_xxm(vpsllvq)

// Variable Bit Shift Right Arithmetic
avx_xxx(vpsravd)
avx_xxm(vpsravd)

// Variable Bit Shift Right Logical
avx_xxx(vpsrlvd)
avx_xxm(vpsrlvd)
avx_xxx(vpsrlvq)
avx_xxm(vpsrlvq)

// Variable Bit Shift Right Logical
avx_xx(vtestpd)
avx_xm(vtestpd)
avx_xx(vtestps)
avx_xm(vtestps)

// Zero All YMM Registers
inline void avx_vzeroall () noexcept {
  asm volatile ("vzeroall");
}

// Zero Upper Bits of YMM Registers
inline void avx_vzeroupper () noexcept {
  asm volatile ("vzeroupper");
}

// Bitwise Logical XOR for Double-Precision Floating-Point Values
avx_xxx(vxorpd)
avx_xxm(vxorpd)

// Bitwise Logical XOR for Single-Precision Floating-Point Values
avx_xxx(vxorps)
avx_xxm(vxorps)

// Load Bytes With Mask
template<typename Tr>
inline Tr avx_vmskmovb128 (Tr raw, const void *src, unsigned long long msk) noexcept {
  const static
  Tr dst;
  unsigned long long tmp;
  asm volatile (
    "pdep %[imm], %[msk], %[tmp]\n\t"
    "vmovq %[tmp], %[dst]\n\t"
    "shr $8, %[msk]\n\t"
    "pdep %[imm], %[msk], %[tmp]\n\t"
    "vpinsrq $1, %[tmp], %[dst], %[dst]\n\t"
    "vpblendvb %[dst], (%[src]), %[raw], %[dst]"
    :[dst]"=&" AVX_XMM(dst), [tmp]"=&" X86_GPR(tmp), [msk]"+" X86_GPR(msk)
    :[raw]AVX_XMM(raw), [src]X86_GPR(src), [imm]X86_GPR(0x8080808080808080UL)
  );
  return dst;
}



#undef avx_mx
#undef avx_mxx
#undef avx_mxi
#undef avx_rx
#undef avx_rxi
#undef avx_rm
#undef avx_xx
#undef avx_xr
#undef avx_xm
#undef avx_xxx
#undef avx_xxr
#undef avx_xxm
#undef avx_xxi
#undef avx_xmi
#undef avx_xxxx
#undef avx_xxmx
#undef avx_xxxi
#undef avx_xxri
#undef avx_xxmi