/*
 * SSE.h
 *
 *  Created on: Jan 15, 2011
 *      Author: LyonsDesktop
 */

#ifndef SSE_H_
#define SSE_H_
#include <xmmintrin.h>
//#include <intrin.h>
#include <x86intrin.h>
#include <emmintrin.h>
//#include <pmmintrin.h>
//Nauful's SSE functions

//Based in part on Intel's SIMD Math Library and the cephes math library.
#ifndef __WIN32__
#define __align16  __attribute__((aligned(16) ))
#else
#define __align16 __declspec(align(16) )
#endif

#define P16CONST(name, v) \
static const __align16 float _p16_##name[4] = { (float)(v), (float)(v), (float)(v), (float)(v) };

#define P16CONST_type(name, type, v) \
static const __align16 type _p16_##name[4] = { (type)(v), (type)(v), (type)(v), (type)(v) };

#define _mm_load_p16(name) (*(__m128*)(_p16_##name))
#define _mm_load_p16i(name) (*(__m128i*)(_p16_##name))

P16CONST(one, 1.0f)
P16CONST(half, 0.5f)
P16CONST_type(halfbyte, int, 127)
P16CONST_type(min_norm_pos, int, 0x00800000);
P16CONST_type(mant_mask, int, 0x7f800000);
P16CONST_type(inv_mant_mask, int, ~0x7f800000);
P16CONST_type(sign_mask, int, 0x80000000);
P16CONST_type(inv_sign_mask, int, ~0x80000000);

P16CONST(sqrt2h, 0.707106781186547524);
P16CONST(log_p0, 7.0376836292E-2);
P16CONST(log_p1, -1.1514610310E-1);
P16CONST(log_p2, 1.1676998740E-1);
P16CONST(log_p3, -1.2420140846E-1);
P16CONST(log_p4, +1.4249322787E-1);
P16CONST(log_p5, -1.6668057665E-1);
P16CONST(log_p6, +2.0000714765E-1);
P16CONST(log_p7, -2.4999993993E-1);
P16CONST(log_p8, +3.3333331174E-1);
P16CONST(log_q1, -2.12194440e-4);
P16CONST(log_q2, 0.693359375);

P16CONST(exp_high, 88.3762626647949f);
P16CONST(exp_low, -88.3762626647949f);

P16CONST(log_2, 1.44269504088896341);

P16CONST(exp_C1, 0.693359375);
P16CONST(exp_C2, -2.12194440E-4);
P16CONST(exp_p0, 1.9875691500E-4);
P16CONST(exp_p1, 1.3981999507E-3);
P16CONST(exp_p2, 8.3334519073E-3);
P16CONST(exp_p3, 4.1665795894E-2);
P16CONST(exp_p4, 1.6666665459E-1);
P16CONST(exp_p5, 5.0000001201E-1);

__inline __m128 _mm_abs_ps(__m128 _x)
{
	//Sign bit = 0

	const int AbsMask = 0x7FFFFFFF;
	return _mm_and_ps(_mm_load1_ps((float*)(&AbsMask)), _x);
}

__inline __m128 _mm_exp_ps(__m128 x)
{
    __m128 tmp = _mm_setzero_ps();
    __m128 one = _mm_load_p16(one);

    x = _mm_min_ps(x, _mm_load_p16(exp_high));
    x = _mm_max_ps(x, _mm_load_p16(exp_low));
    //exp(x) as exp(y + n*log(2));
    __m128 fx = _mm_add_ps(_mm_load_p16(half), _mm_mul_ps(x, _mm_load_p16(log_2)));

    //floorf
    tmp = _mm_cvtepi32_ps(_mm_cvttps_epi32(fx));

    __m128 submask = _mm_cmpgt_ps(tmp, fx);
    submask = _mm_and_ps(submask, one);
    fx = _mm_sub_ps(tmp, submask);

    tmp = _mm_mul_ps(fx, _mm_load_p16(exp_C1));
    __m128 z = _mm_mul_ps(fx, _mm_load_p16(exp_C2));
    x = _mm_sub_ps(x, _mm_add_ps(z, tmp));

    z = _mm_mul_ps(x, x);

    __m128 y = _mm_load_p16(exp_p0);
    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(exp_p1));
    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(exp_p2));
    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(exp_p3));
    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(exp_p4));
    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(exp_p5));
    y = _mm_mul_ps(y, z);
    y = _mm_add_ps(y, x);
    y = _mm_add_ps(y, one);

    //2^n
    __m128i m0 = _mm_cvttps_epi32(fx);
    m0= _mm_add_epi32(m0, _mm_load_p16i(halfbyte));
    m0= _mm_slli_epi32(m0, 23);
    y = _mm_mul_ps(y, _mm_castsi128_ps(m0));

    return y;
}

__inline __m128 _mm_log_ps(__m128 x)
{
    __m128 one = _mm_load_p16(one);

    x = _mm_max_ps(x, _mm_load_p16(min_norm_pos));

    //x = frexp(x, e)
    __m128i m0 = _mm_srli_epi32(_mm_castps_si128(x), 23);

    x = _mm_and_ps(x, _mm_load_p16(inv_mant_mask));
    x = _mm_or_ps(x, _mm_load_p16(half));
    m0 = _mm_sub_epi32(m0, _mm_load_p16i(halfbyte));
    __m128 e = _mm_cvtepi32_ps(m0);
    e = _mm_add_ps(e, _mm_load_p16(one));

    /*
    if(x < sqrt2h)
    { e -= 1; x = x + x - 1.0; }
    else
    { x = x - 1.0; }
    */

    __m128 mask = _mm_cmplt_ps(x, _mm_load_p16(sqrt2h));
    __m128 tmp = _mm_and_ps(x, mask);
    x = _mm_sub_ps(x, one);
    e = _mm_sub_ps(e, _mm_and_ps(one, mask));
    x = _mm_add_ps(x, tmp);

    __m128 z = _mm_mul_ps(x, x);
    __m128 y = _mm_load_p16(log_p0);

    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(log_p1));
    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(log_p2));
    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(log_p3));
    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(log_p4));
    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(log_p5));
    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(log_p6));
    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(log_p7));
    y = _mm_mul_ps(y, x);
    y = _mm_add_ps(y, _mm_load_p16(log_p8));
    y = _mm_mul_ps(y, x);
    y = _mm_mul_ps(y, z);

    y = _mm_add_ps(y, _mm_mul_ps(e, _mm_load_p16(log_q1)));
    y = _mm_sub_ps(y, _mm_mul_ps(z, _mm_load_p16(half)));

    x = _mm_add_ps(x, y);
    x = _mm_add_ps(x, _mm_mul_ps(e, _mm_load_p16(log_q2)));

    return x;
}

#endif /* SSE_H_ */
