#ifndef SMITH_AVXEXTRA_H
#define SMITH_AVXEXTRA_H

FORCE_INLINE __m256i _mm256_max_epi32 (__m256i a, __m256i b)
{
    __m256i res;
    res.vect_s32[0] = vmaxq_s32(a.vect_s32[0], b.vect_s32[0]);
    res.vect_s32[1] = vmaxq_s32(a.vect_s32[1], b.vect_s32[1]);
    return res;
}

FORCE_INLINE __m256i _mm256_cmpgt_epi32 (__m256i a, __m256i b)
{
    __m256i res;
    res.vect_u32[0] = vcgtq_s32(a.vect_s32[0], b.vect_s32[0]);
    res.vect_u32[1] = vcgtq_s32(a.vect_s32[1], b.vect_s32[1]);
    return res;
}

FORCE_INLINE void _mm256_stream_si256 (__m256i * mem_addr, __m256i a)
{
    *mem_addr = a;
    return;
}

FORCE_INLINE __m256i _mm256_packs_epi32(__m256i a, __m256i b)
{
    __m256i res;
    res.vect_s16[0] = vcombine_s16(vqmovn_s32(a.vect_s32[0]), vqmovn_s32(b.vect_s32[0]));
    res.vect_s16[1] = vcombine_s16(vqmovn_s32(a.vect_s32[1]), vqmovn_s32(b.vect_s32[1]));
    return res;
}

FORCE_INLINE __m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8)
{
    __m256i res;
    int bit_0 = imm8 & 0x1, bit_1 = imm8 & 0x2, bit_3 = imm8 & 0x8;
    if (bit_1 == 0) {
        res.vect_s32[0] = ((bit_0 == 0) ? a.vect_s32[0] : a.vect_s32[1]);
    } else {
        res.vect_s32[0] = ((bit_0 == 0) ? b.vect_s32[0] : b.vect_s32[1]);
    }
    if (bit_3) {
        res.vect_s32[0] = vdupq_n_s32(0);
    }
    bit_0 = imm8 & 0x10, bit_1 = imm8 & 0x20, bit_3 = imm8 & 0x80;
    if (bit_1 == 0) {
        res.vect_s32[1] = ((bit_0 == 0) ? a.vect_s32[0] : a.vect_s32[1]);
    } else {
        res.vect_s32[1] = ((bit_0 == 0) ? b.vect_s32[0] : b.vect_s32[1]);
    }
    if (bit_3) {
        res.vect_s32[1] = vdupq_n_s32(0);
    }
    return res;
}

FORCE_INLINE void* _mm_malloc (size_t size, size_t align)
{
    void *ptr;
    if (align == 1)
        return malloc (size);
    if (align == 2 || (sizeof (void *) == 8 && align == 4))
        align = sizeof (void *);
    if (posix_memalign (&ptr, align, size) == 0)
        return ptr;
    else
        return NULL;
}

FORCE_INLINE __m512i _mm512_mask_blend_epi32 (__mmask16 k, __m512i a, __m512i b)
{
    __m512i res;
    uint32x4_t vect_mask = vld1q_u32(g_mask_epi32);
    uint32x4_t vect_imm = vdupq_n_u32(k);
    uint32x4_t flag[4];
    flag[0] = vtstq_u32(vect_imm, vect_mask);
    flag[1] = vtstq_u32(vshrq_n_u32(vect_imm, 4), vect_mask);
    flag[2] = vtstq_u32(vshrq_n_u32(vect_imm, 8), vect_mask);
    flag[3] = vtstq_u32(vshrq_n_u32(vect_imm, 12), vect_mask);
    res.vect_s32[0] = vbslq_s32(flag[0], b.vect_s32[0], a.vect_s32[0]);
    res.vect_s32[1] = vbslq_s32(flag[1], b.vect_s32[1], a.vect_s32[1]);
    res.vect_s32[2] = vbslq_s32(flag[2], b.vect_s32[2], a.vect_s32[2]);
    res.vect_s32[3] = vbslq_s32(flag[3], b.vect_s32[3], a.vect_s32[3]);
    return res;
}

FORCE_INLINE __m256 interpret(__m256i a)
{
    __m256 res;
    res.vect_f32[0] = vreinterpretq_f32_s32(a.vect_s32[0]);
    res.vect_f32[1] = vreinterpretq_f32_s32(a.vect_s32[1]);
    return res;
}

FORCE_INLINE __m256i rev_interpret(__m256 a)
{
    __m256i res;
    res.vect_s32[0] = vreinterpretq_s32_f32(a.vect_f32[0]);
    res.vect_s32[1] = vreinterpretq_s32_f32(a.vect_f32[1]);
    return res;
}
/*
FORCE_INLINE __m512 interpret512(__m512i a)
{
    __m512 res;
    res.vect_f32[0] = vreinterpretq_f32_s32(a.vect_s32[0]);
    res.vect_f32[1] = vreinterpretq_f32_s32(a.vect_s32[1]);
    res.vect_f32[2] = vreinterpretq_f32_s32(a.vect_s32[2]);
    res.vect_f32[3] = vreinterpretq_f32_s32(a.vect_s32[3]);
    return res;
}

FORCE_INLINE __m512i rev_interpret512(__m512 a)
{
    __m512i res;
    res.vect_s32[0] = vreinterpretq_s32_f32(a.vect_f32[0]);
    res.vect_s32[1] = vreinterpretq_s32_f32(a.vect_f32[1]);
    res.vect_s32[2] = vreinterpretq_s32_f32(a.vect_f32[2]);
    res.vect_s32[3] = vreinterpretq_s32_f32(a.vect_f32[3]);
    return res;
}
*/

FORCE_INLINE __m512i _mm512_setzero_si512 ()
{
    __m512i res;
    res.vect_s32[0] = vdupq_n_s32(0);
    res.vect_s32[1] = res.vect_s32[0];
    res.vect_s32[2] = res.vect_s32[0];
    res.vect_s32[3] = res.vect_s32[0];
    return res;
}

FORCE_INLINE __m512i _mm512_max_epi32 (__m512i a, __m512i b)
{
    __m512i res;
    res.vect_s32[0] = vmaxq_s32(a.vect_s32[0], b.vect_s32[0]);
    res.vect_s32[1] = vmaxq_s32(a.vect_s32[1], b.vect_s32[1]);
    res.vect_s32[2] = vmaxq_s32(a.vect_s32[2], b.vect_s32[2]);
    res.vect_s32[3] = vmaxq_s32(a.vect_s32[3], b.vect_s32[3]);
    return res;
}

FORCE_INLINE __mmask16 _mm512_cmpgt_epi32_mask (__m512i a, __m512i b)
{
    __mmask16 sign;
    __mmask16 *k = &sign;
    __m512i res;
    res.vect_u32[0] = vcgtq_s32(a.vect_s32[0], b.vect_s32[0]);
    res.vect_u32[1] = vcgtq_s32(a.vect_s32[1], b.vect_s32[1]);
    res.vect_u32[2] = vcgtq_s32(a.vect_s32[2], b.vect_s32[2]);
    res.vect_u32[3] = vcgtq_s32(a.vect_s32[3], b.vect_s32[3]);
    PICK_HB_32x16(res, k);
    return sign;
}

FORCE_INLINE void _mm512_storeu_si512 (void* mem_addr, __m512i a)
{
    vst1q_s64((int64_t*)mem_addr, a.vect_s64[0]);
    vst1q_s64((int64_t*)mem_addr + 2, a.vect_s64[1]);
    vst1q_s64((int64_t*)mem_addr + 4, a.vect_s64[2]);
    vst1q_s64((int64_t*)mem_addr + 6, a.vect_s64[3]);
}

FORCE_INLINE void _mm512_stream_si512 (void* mem_addr, __m512i a)
{
    vst1q_s64((int64_t*)mem_addr, a.vect_s64[0]);
    vst1q_s64((int64_t*)mem_addr + 2, a.vect_s64[1]);
    vst1q_s64((int64_t*)mem_addr + 4, a.vect_s64[2]);
    vst1q_s64((int64_t*)mem_addr + 6, a.vect_s64[3]);
}

FORCE_INLINE __m512i _mm512_movm_epi32 (__mmask16 k)
{
    __m512i res;
    unsigned int mk = k;
    uint32x4_t mask_and = vld1q_u32(g_mask_epi32);
    res.vect_u32[0] = vtstq_u32(vdupq_n_u32(mk), mask_and);
    res.vect_u32[1] = vtstq_u32(vdupq_n_u32(mk >> 4), mask_and);
    res.vect_u32[2] = vtstq_u32(vdupq_n_u32(mk >> 8), mask_and);
    res.vect_u32[3] = vtstq_u32(vdupq_n_u32(mk >> 12), mask_and);
    return res;
}

FORCE_INLINE __m512i _mm512_packs_epi32 (__m512i a, __m512i b)
{
    __m512i res;
    res.vect_s16[0] = vcombine_s16(vqmovn_s32(a.vect_s32[0]), vqmovn_s32(b.vect_s32[0]));
    res.vect_s16[1] = vcombine_s16(vqmovn_s32(a.vect_s32[1]), vqmovn_s32(b.vect_s32[1]));
    res.vect_s16[2] = vcombine_s16(vqmovn_s32(a.vect_s32[2]), vqmovn_s32(b.vect_s32[2]));
    res.vect_s16[3] = vcombine_s16(vqmovn_s32(a.vect_s32[3]), vqmovn_s32(b.vect_s32[3]));
    return res;
}

FORCE_INLINE __m512i _mm512_permutex2var_epi32 (__m512i a, __m512i idx, __m512i b)
{
    __m512i res;
    int ptr_a[16], ptr_b[16], ptr_i[16], ptr_r[16];
    _mm256_convert_to_int32(ptr_a, a.vect_i256[0]);
    _mm256_convert_to_int32(ptr_a + 8, a.vect_i256[1]);
    _mm256_convert_to_int32(ptr_b, b.vect_i256[0]);
    _mm256_convert_to_int32(ptr_b + 8, b.vect_i256[1]);
    _mm256_convert_to_int32(ptr_i, idx.vect_i256[0]);
    _mm256_convert_to_int32(ptr_i + 8, idx.vect_i256[1]);
    _mm256_convert_to_int32(ptr_r, res.vect_i256[0]);
    _mm256_convert_to_int32(ptr_r + 8, res.vect_i256[1]);
    int i;
    for (i = 0; i < 16; ++i) {
        int id = ptr_i[i] & 0x0f;
        ptr_r[i] = ((ptr_i[i] & 0x10)) ? ptr_b[id] : ptr_a[id];
    }
    res.vect_s32[0] = vld1q_s32(ptr_r);
    res.vect_s32[1] = vld1q_s32(ptr_r + 4);
    res.vect_s32[2] = vld1q_s32(ptr_r + 8);
    res.vect_s32[3] = vld1q_s32(ptr_r + 12);
    return res;
}

FORCE_INLINE void _mm_free(void* ptr)
{
    free(ptr);
}

#endif