//#define DEBUG_CODE

#ifdef DEBUG_CODE
#include <stdlib.h>
#include <stdio.h>
#endif
#include <stdint.h>
#include <math.h>
#include <immintrin.h>

#include "myexp.h"
#include "myexp_impl.h"

#define ALIGN_32 __attribute__((aligned(32)))

static const __m256d MYEXP_MASK_1p45 =  { 0x1.8p45, 0x1.8p45, 0x1.8p45, 0x1.8p45 }  ;

// ln(2)= #define LN2	0.69314718055994530942
static const __m256d MYEXP_LN2 = {LN2,LN2,LN2,LN2};

static const __m256d MYEXP_NEG_LN2 = {-LN2,-LN2,-LN2,-LN2};

// 1/ln(2)=0x1.71547652b82fe1p+0
static const __m256d MYEXP_INVLN2 = 
    {0x1.71547652b82fep+0, 0x1.71547652b82fep+0, 0x1.71547652b82fep+0, 0x1.71547652b82fep+0};

// exp(0x1.6232bdd7abcd24p+9)= 2^1022.000000000000
// MYEXP_N_1022xLN2[i] = 1022 * ln(2) = 0x1.6232bdd7abcd2p+9 = 0x4086232bdd7abcd2
static const uint64_t MYEXP_N1022xLN2[4] ALIGN_32  =
    {LN2_X_1022_AS_UINT64, LN2_X_1022_AS_UINT64, LN2_X_1022_AS_UINT64, LN2_X_1022_AS_UINT64};

static const __m256d MYEXP_C2 = {C2, C2, C2, C2};
static const __m256d MYEXP_C3 = {C3, C3, C3, C3};
static const __m256d MYEXP_C4 = {C4, C4, C4, C4};
static const __m256d MYEXP_C5 = {C5, C5, C5, C5};

static const int MYEXP_N_1023[8] ALIGN_32 = {1023,0,1023,0,1023,0,1023,0};

// consider zi is int32_x8, zi[0],zi[2],zi[4],zi[6] is interger
//static inline __m256d search_table(__m256d zi)
static __m256d search_table(__m256i zi)
{
    __m256i n_1023 = _mm256_load_si256((__m256i const *)(MYEXP_N_1023));  // n_1023={1023,1023,1023,1023}
    __m256i n_127  = _mm256_srli_epi64(n_1023, 3);                         //n_127={1023/8,1023/8,1023/8,1023/8,1023/8}
    __m256i top12  = _mm256_srli_epi64(zi, 7);      // signed int64 right shift (>>7) // ki
     
    /*
        ki = (zi.i[0] >> 7) ;
        idx = (zi.i[0] & 127);
    */
    __m256i m256_idx = _mm256_and_si256(zi, n_127); // idx = (zi.i[0] & 127);

    int32_t idx0= _mm256_extract_epi32 (m256_idx, 0);
    int32_t idx1= _mm256_extract_epi32 (m256_idx, 2);
    int32_t idx2= _mm256_extract_epi32 (m256_idx, 4);
    int32_t idx3= _mm256_extract_epi32 (m256_idx, 6);

    int64_t v0= exp_data_52[idx0];
    int64_t v1= exp_data_52[idx1];
    int64_t v2= exp_data_52[idx2];
    int64_t v3= exp_data_52[idx3];

    top12 = _mm256_add_epi64 (top12, n_1023);  // top12[0]+=1023, top12[2]+=1023,top12[4]+=1023, top12[6]+=1023
    top12 = _mm256_slli_epi64 (top12, 52);     // left shift 52
  
#define body m256_idx   
    body = _mm256_setr_epi64x(v0,v1,v2,v3);
    body = _mm256_or_si256 (body, top12);
    return (__m256d)body;
}


#ifdef DEBUG_CODE
int i32x4_tmp[4] ALIGN_16;
int i32x8_tmp[8] ALIGN_32;

#define DEBUG_MM128(prompt, MM)                \
    _mm_store_si128((__m128i *)i32x4_tmp, MM); \
    printf("%s={", prompt);                    \
    for (int i = 0; i < 4; i++)                \
    {                                          \
        printf("%d,", i32x4_tmp[i]);           \
    }                                          \
    printf("}\n")

#define DEBUG_MM256(prompt, MM)                \
    _mm256_store_si256((__m256i *)i32x8_tmp, MM); \
    printf("%s={", prompt);                    \
    for (int i = 0; i < 8; i++)                \
    {                                          \
        printf("%d,", i32x8_tmp[i]);           \
    }                                          \
    printf("}\n")

#endif

void exp_vec4_avx2(double *pr, const double *px)
{
    __m256i bit_mask;
    __m256i N1022xLN2;  // 1022*ln(2)
    __m256i i64x4;      
    int is_full_zero;

    bit_mask = _mm256_cmpeq_epi64(bit_mask, bit_mask);
    bit_mask = _mm256_srli_epi64(bit_mask, 1); // For clean bit63, and keep bit0-bit62 don't change

#ifdef DEBUG_CODE
    DEBUG_MM256("bit_mask", bit_mask);
#endif

    i64x4     = _mm256_lddqu_si256 ((__m256i const *)(px));
    N1022xLN2 = _mm256_load_si256((__m256i const *)(MYEXP_N1022xLN2));

    i64x4 = _mm256_and_si256(i64x4, bit_mask);
    i64x4 = _mm256_cmpgt_epi64(i64x4, N1022xLN2);    //  i64x4 contain 1s if i64x4 >= 1022*LN2
    
    is_full_zero = _mm256_testz_si256(i64x4, i64x4); // is_full_zero means no px[i] >= 1022*LN2
    if (unlikely(!is_full_zero))                     // is not full_zero means some of x[i]>1022*LN2
    {
        _mm256_zeroupper();
        for (int i = 0; i < 4; i++)
            pr[i] = exp_v1(px[i]);
        return ;
    }

    __m256d x      = _mm256_loadu_pd(px);
    __m256d z      = _mm256_mul_pd(x, MYEXP_INVLN2);      // z= x / ln(2)
    __m256d zi     = _mm256_add_pd(z, MYEXP_MASK_1p45);   // zi[i] low 32bit = (round)(z *128)

    /*
        z0是z的近似值，z0具有这样的形式，
        z0 = ki + idx/128.0, -1022< ki <1024, 0 <= idx < 128
        z0 = zi.f - 0x1.8p45;
        r  = x - (z0 * LN2);
    */
    __m256d MM_z0 = _mm256_sub_pd(zi, MYEXP_MASK_1p45);   // z0 = zi - 0x1.8p45;
    __m256d MM_r, MM_r2, MM_r4 ;
    __m256d MM_t1, MM_t2;

    MM_r = _mm256_fmadd_pd(MM_z0, MYEXP_NEG_LN2, x);  // r = (z0 * -LN2) + x;
    
    MM_r2 = _mm256_mul_pd(MM_r, MM_r);   // r2 = r * r;
    MM_r4 = _mm256_mul_pd(MM_r2, MM_r2); // r4 = (r^2)*(r^2);

    MM_t1= _mm256_fmadd_pd(MM_r, MYEXP_C3, MYEXP_C2); // MM_t1  = r*C3+C2
    MM_t2= _mm256_fmadd_pd(MM_r, MYEXP_C5, MYEXP_C4); // MM_t2  = r*C5+C4
    MM_t1 = _mm256_mul_pd(MM_t1, MM_r2);    // t1 = (r*C3+C2)*(r^2)
    MM_t2 = _mm256_mul_pd(MM_t2, MM_r4);    // t2 = (r*C5+C4)*(r^4)

    __m256d body  = search_table((__m256i)zi);

#define MM_tail MM_t1
    MM_tail = _mm256_add_pd(MM_t1, MM_t2);   // tail= (r*C3+C2)*(r^2) + (r*C5+C4)*(r^4)
    MM_tail = _mm256_add_pd(MM_tail, MM_r);  // tail= (r*C3+C2)*(r^2) + (r*C5+C4)*(r^4) + r
    MM_tail = _mm256_fmadd_pd(MM_tail, body, body); //MM_tail = tail*body
    _mm256_storeu_pd(pr, MM_tail);
    _mm256_zeroupper();
    return;
}
