#ifndef AVX_EXTRA_H
#define AVX_EXTRA_H

typedef unsigned long long __m64;

FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, int imm8)
{
    __m128i result;
    if (likely(imm8 >= 0 && imm8 < 32)) {
        result.vect_s32 = vshlq_n_s32(a.vect_s32, imm8);
    } else {
        result.vect_s32 = vdupq_n_s32(0);
    }
    return result;
}

FORCE_INLINE __m128 _mm_set1_ps (float a)
{
    __m128 res;
    res = vdupq_n_f32(a);
    return res;
}

FORCE_INLINE __m128i _mm_set1_epi8(char w)
{
    __m128i res;
    res.vect_s8 = vdupq_n_s8(w);
    return res;
}

FORCE_INLINE __m128i _mm_slli_epi64 (__m128i a, int imm8)
{
    __m128i res;
    if (likely(imm8 >= 0 && imm8 < 64)){
        res.vect_s64 = vshlq_n_s64(a.vect_s64, imm8);
    } else {
        res.vect_s64 = vdupq_n_s64(0);
    }
    return res;
}

FORCE_INLINE __m128i _mm_srli_epi64 (__m128i a, int imm8)
{
    __m128i res;
    if (likely(imm8 >= 0 && imm8 < 64)) {
        int64x2_t vect_imm = vdupq_n_s64(-imm8);
        res.vect_u64 = vshlq_u64(a.vect_u64, vect_imm);
    } else {
        res.vect_u64 = vdupq_n_u64(0);
    }
    return res;
}

FORCE_INLINE __m128i _mm_set1_epi32(int _i)
{
    __m128i res;
    res.vect_s32 = vdupq_n_s32(_i);
    return res;
}

FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i a, __m128i b)
{
    __m128i res;
    res.vect_u32 = vceqq_s32(a.vect_s32, b.vect_s32);
    return res;
}

FORCE_INLINE __m128i _mm_cvtsi32_si128(int a)
{
    __m128i res;
    res.vect_s32 = vsetq_lane_s32(a, vdupq_n_s32(0), 0);
    return res;
}

FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
{
    __m128i res;
    res.vect_s32 = veorq_s32(a.vect_s32, b.vect_s32);
    return res;
}

FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a)
{
    vst1q_s32((int32_t*) p, a.vect_s32);
}

FORCE_INLINE __m128i _mm_loadu_si128(const __m128i *p)
{
    __m128i res;
    res.vect_s32 = vld1q_s32((int32_t *)p);
    return res;
}

FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
{
    __m128i res;
    res.vect_s32 = vbicq_s32(b.vect_s32, a.vect_s32);
    return res;
}

FORCE_INLINE __m128i _mm_packs_epi16(__m128i a, __m128i b)
{
    __m128i res;
    res.vect_s8 = vcombine_s8(vqmovn_s16(a.vect_s16), vqmovn_s16(b.vect_s16));
    return res;
}

FORCE_INLINE __m128i _mm_packs_epi32(__m128i a, __m128i b)
{
    __m128i res;
    res.vect_s16 = vcombine_s16(vqmovn_s32(a.vect_s32), vqmovn_s32(b.vect_s32));
    return res;
}

FORCE_INLINE int _mm_movemask_ps(__m128 a)
{
    static const uint32x4_t movemask = { 1, 2, 4, 8 };
    static const uint32x4_t highbit = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
    uint32x4_t t0 = vreinterpretq_u32_f32(a);
    uint32x4_t t1 = vtstq_u32(t0, highbit);
    uint32x4_t t2 = vandq_u32(t1, movemask);
    uint32x2_t t3 = vorr_u32(vget_low_u32(t2), vget_high_u32(t2));
    return vget_lane_u32(t3, 0) | vget_lane_u32(t3,1);
}

FORCE_INLINE int _mm_movemask_epi8(__m128i _a)
{
    static const int8_t __attribute__((aligned(16))) xr[8] = { -7, -6, -5, -4, -3, -2, -1, 0 };
    uint8x8_t mask_and = vdup_n_u8(0x80);
    int8x8_t mask_shift = vld1_s8(xr);

    uint8x8_t lo = vget_low_u8(_a.vect_u8);
    uint8x8_t hi = vget_high_u8(_a.vect_u8);

    lo = vand_u8(lo, mask_and);
    lo = vshl_u8(lo, mask_shift);

    hi = vand_u8(hi, mask_and);
    hi = vshl_u8(hi, mask_shift);

    lo = vpadd_u8(lo, lo);
    lo = vpadd_u8(lo, lo);
    lo = vpadd_u8(lo, lo);

    hi = vpadd_u8(hi, hi);
    hi = vpadd_u8(hi, hi);
    hi = vpadd_u8(hi, hi);

    return ((hi[0] << 8) | (lo[0] & 0xFF));
}

FORCE_INLINE __m128i _mm_sub_epi8(__m128i a, __m128i b)
{
    __m128i res;
    res.vect_s8 = vsubq_s8(a.vect_s8, b.vect_s8);
    return res;
}

FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b)
{
    __m128i res;
    res.vect_s32 = vandq_s32(a.vect_s32, b.vect_s32);
    return res;
}

FORCE_INLINE __m128i _mm_cmpeq_epi8 (__m128i a, __m128i b)
{
    __m128i res;
    res.vect_u8 = vceqq_s8(a.vect_s8, b.vect_s8);
    return res;
}

FORCE_INLINE __m128i _mm_setzero_si128()
{
    __m128i res;
    res.vect_s32 = vdupq_n_s32(0);
    return res;
}

FORCE_INLINE __m128 _mm_castsi128_ps(__m128i a)
{
    __m128 res;
    res = vreinterpretq_f32_s32(a.vect_s32);
    return res;
}

FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b)
{
    __m128i res;
    res.vect_s32 = vorrq_s32(a.vect_s32, b.vect_s32);
    return res;
}

FORCE_INLINE __m128i _mm_load_si128(const __m128i *p)
{
    __m128i res;
    res.vect_s32 = vld1q_s32((int32_t *)p);
    return res;
}

FORCE_INLINE __m128i _mm_shuffle_epi8(__m128i a, __m128i b)
{
    __m128i res;
    uint8_t __attribute__((aligned(16)))
    mask[16] = {0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F,
            0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F};
    uint8x16_t idx_masked = vandq_u8(b.vect_u8, vld1q_u8(mask));

    res.vect_u8 = vqtbl1q_u8(a.vect_u8, idx_masked);
    return res;
}

FORCE_INLINE __m128i _mm_max_epu8(__m128i a, __m128i b)
{
    __m128i res;
    res.vect_u8 = vmaxq_u8(a.vect_u8, b.vect_u8);
    return res;
}

FORCE_INLINE __m128i _mm_min_epu8(__m128i a, __m128i b)
{
    __m128i res;
    res.vect_u8 = vminq_u8(a.vect_u8, b.vect_u8);
    return res;
}

FORCE_INLINE __m128i _mm_adds_epu8(__m128i a, __m128i b)
{
    __m128i res;
    res.vect_u8 = vqaddq_u8(a.vect_u8, b.vect_u8);
    return res;
}

FORCE_INLINE __m512i _mm512_setzero_si512 ()
{
    __m512i res;
    res.vect_s32[0] = vdupq_n_s32(0);
    res.vect_s32[1] = vdupq_n_s32(0);
    res.vect_s32[2] = vdupq_n_s32(0);
    res.vect_s32[3] = vdupq_n_s32(0);
    return res;
}

FORCE_INLINE __m128i _mm_srli_si128 (__m128i a, int imm8)
{
    __m128i res;
    if ((imm8) <= 0) {
        res = a;
    }
    else if ((imm8) > 15) {
        res = _mm_setzero_si128();
    }
    else {
        res.vect_s8 = vextq_s8(a.vect_s8, vdupq_n_s8(0), (imm8));
    }
    return res;
}

FORCE_INLINE __m128i _mm_slli_si128 (__m128i a, int imm8)
{
    __m128i res;
    if ((imm8) <= 0) {
        res = a;
    }
    else if ((imm8) > 15) {
        res = _mm_setzero_si128();
    }
    else {
        res.vect_s8 = vextq_s8(vdupq_n_s8(0), a.vect_s8, 16 - (imm8));
    }
    return res;
}

FORCE_INLINE int _mm_cvtsi128_si32(__m128i a)
{
    return vgetq_lane_s32(a.vect_s32, 0);
}

#endif