/*
 * Copyright 2003 Program of Computer Graphics, Cornell University
 *     580 Rhodes Hall
 *     Cornell University
 *     Ithaca NY 14853
 * Web: http://www.graphics.cornell.edu/
 * 
 * Not for commercial use. Do not redistribute without permission.
 */

#ifndef _H_SIMDSSE
#define _H_SIMDSSE

#ifndef NULL
#define NULL (0)
#endif
#ifndef FALSE
#define FALSE (0)
#endif
#ifndef TRUE
#define TRUE (1)
#endif

#include <cassert>

//use SSE and SSE2 instructions
#include <fvec.h>
#include <dvec.h>


//using shared triangles uses less memory (about 1/2) at the cost of some
//performance (~10%).  With shared triangles each triangle will appear once
//in the compliled list and be referenced via pointers, and also then the
//QuadPackedTriangleList is stored naked (without the usual PrimitiveList 
//header) to save more space.
#define USE_COMPILED_SHARED_TRIANGLE

//fast absolute value, minimum, and maximum
inline static float absv(float f) { return (f<0)?-f:f; }
inline static float minv(float a, float b) { return (a<b)?a:b; }
inline static float maxv(float a, float b) { return (a>b)?a:b; }
inline static int minv(int a, int b) { return (a<b)?a:b; }
inline static int maxv(int a, int b) { return (a>b)?a:b; }
inline static double absv(double f) { return (f<0)?-f:f; }
inline static double minv(double a, double b) { return (a<b)?a:b; }
inline static double maxv(double a, double b) { return (a>b)?a:b; }
//some definitions for allocating memory aligned to various sizes
//default malloc only aligns to 8 byte (eg double or longlong) boundaries)
#define SIMD_SIZE       16
#define CACHELINE_SIZE  64
#define PAGE_SIZE       4096

// External constant __m128 values, defined in SIMDSSE.cpp. Thus if you
// include this file in the end you must link against SIMDSSE.o/obj
namespace brite {
	extern const Iu32vec4 FABS_MASK;
	extern const Is32vec4 VEC_0_N1_N1_N1;	// 0, -1, -1, -1
	extern const Is32vec4 VEC_0_N1_0_0;		// 0, -1,  0,  0
	extern const Is32vec4 VEC_0_0_N1_0;		// 0,  0, -1,  0
	extern const Is32vec4 VEC_0_0_0_N1;		// 0,  0,  0, -1
}

inline void *malloc_aligned(int size, int alignment) {
  void * retval = _mm_malloc(size,alignment);
  assert(retval);
  assert(!( ((size_t)retval) & (size_t)(alignment-1) ));
  return retval;
}
inline void free_aligned(void *ptr) {
  _mm_free(ptr);
}
inline void *malloc_simd_aligned(int size) { return malloc_aligned(size,SIMD_SIZE); }
inline void *malloc_cacheline_aligned(int size) { return malloc_aligned(size,CACHELINE_SIZE); }
inline void *malloc_page_aligned(int size) { return malloc_aligned(size,PAGE_SIZE); }

//routines to control sse modes
#define DAZ_BIT				6
#define FTZ_BIT				15
//sets Denormals Are Zero mode and returns old mode so it can be restored
//forces SSE to treat all denormals as zeros
inline unsigned int sseSetDAZMode() {
  unsigned int csr = _mm_getcsr();
  _mm_setcsr(csr | (1<<DAZ_BIT));
  return csr;
}
//sets Flush To Zero mode for denormals and returns old mode so it can be restored
//denormals are set to zero on output (not as aggressive as DAZ mode)
inline unsigned int sseSetFTZMode() {
  unsigned int csr = _mm_getcsr();
  _mm_setcsr(csr | (1<<FTZ_BIT));
  return csr;
}
//restore old sse mode
inline void sseRestoreMode(unsigned int mode) {
  _mm_setcsr(mode);
}
#undef DAZ_BIT
#undef FTZ_BIT

/* 16 byte alignment for SSE */
#if defined(_MSC_VER) || defined(__ICC)
#define ALIGN16 _MM_ALIGN16
#else
#define ALIGN16 __attribute__((aligned(16)))
#endif
/* 8 byte alignment for MMX */
#if defined(_MSC_VER) || defined(__ICC)
#define ALIGN8  __declspec(align(8))
#else
#define ALIGN8 __attribute__((aligned(8)))
#endif

/* fast float or double to int conversion (scalar) */
inline int fast_ftoi(float f) {
  return _mm_cvtt_ss2si(_mm_load_ss(&f));
}
inline int fast_dtoi(double d) {
  return _mm_cvttsd_si32(_mm_load_sd(&d));
}

//convert 4 floats to 4 ints using default rounding mode (should be round to nearest)
inline Is32vec4 RoundToIs32vec4(const F32vec4 &a)
{ return Is32vec4(_mm_cvtps_epi32(a)); }
//convert 4 floats to 4 ints using truncation rounding mode (standard in C/C++ )
inline Is32vec4 TruncateToIs32vec4(const F32vec4 &a)
{ return Is32vec4(_mm_cvttps_epi32(a)); }
//extract a word (16bits) from a integer mmx register
inline int ExtractWord2(const I32vec4 &a)
{ return _mm_extract_epi16(a,2); }
inline int ExtractWord4(const I32vec4 &a)
{ return _mm_extract_epi16(a,4); }

/* prefetch to (normal and non-temporal) */
inline void prefetch(const void *p) { _mm_prefetch((char *)p,_MM_HINT_T1); }
inline void prefetch_nontemporal(const void *p) { _mm_prefetch((char *)p,_MM_HINT_NTA); }

//returns a vector of all zeros without accessing memory
inline F32vec4 F32vec4Zero()
{ return _mm_setzero_ps(); }
inline F32vec1 F32vec1Zero()
{ return _mm_setzero_ps(); }
//load and store vector of 4 floats
inline F32vec4 Load_F32vec4(const float *p) { return _mm_load_ps(p); }
inline void Store_F32vec4(float *p, const F32vec4 &v) { _mm_store_ps(p,v); }
inline void Store_F32vec1(float *p, const F32vec1 &v) { _mm_store_ss(p,v); }
//broadcast one float from 4 vector to all four positions
inline F32vec4 BroadcastFloat0(const F32vec4 &a)
{ return _mm_shuffle_ps(a,a,_MM_SHUFFLE(0,0,0,0)); }
inline F32vec4 BroadcastFloat1(const F32vec4 &a)
{ return _mm_shuffle_ps(a,a,_MM_SHUFFLE(1,1,1,1)); }
inline F32vec4 BroadcastFloat2(const F32vec4 &a)
{ return _mm_shuffle_ps(a,a,_MM_SHUFFLE(2,2,2,2)); }
inline F32vec4 BroadcastFloat3(const F32vec4 &a)
{ return _mm_shuffle_ps(a,a,_MM_SHUFFLE(3,3,3,3)); }
//convert F32vec1 to F32vec4 by broadcasting its value to all positions
inline F32vec4 BroadcastFloat(const F32vec1 &a) 
{ return _mm_shuffle_ps(a,a,_MM_SHUFFLE(0,0,0,0)); }

/* returns a where mask is ones and b where mask is zeros */
inline F32vec4 Select(const F32vec4 &a, const F32vec4 &b, const F32vec4 &mask)
{ return( (mask & a) | F32vec4(_mm_andnot_ps(mask,b)));	}

//compute the cross-product of two 3D vectors stored in F32vec4's (SIMD vectors of four floats)
//fourth component of result will be set to zero
inline F32vec4 crossProduct(const F32vec4 &a, const F32vec4 &b) {
  F32vec4 aYZXZ = _mm_shuffle_ps(a,a,_MM_SHUFFLE(2,0,2,1));
  F32vec4 bZXYZ = _mm_shuffle_ps(b,b,_MM_SHUFFLE(2,1,0,2));
  F32vec4 aZXYZ = _mm_shuffle_ps(a,a,_MM_SHUFFLE(2,1,0,2));
  F32vec4 bYZXZ = _mm_shuffle_ps(b,b,_MM_SHUFFLE(2,0,2,1));
  return aYZXZ*bZXYZ - aZXYZ*bYZXZ;
}
//compute the dot product of two 3D vectors stored in F32vec4's (SIMD vectors of four floats)
//result is returned as a F32vec1
inline F32vec1 dotProduct(const F32vec4 &a, const F32vec4 &b) {
  F32vec4 ftemp = a*b;
  F32vec4 res = _mm_add_ss(_mm_movehl_ps(F32vec4Zero(),ftemp),ftemp);
  res = _mm_add_ss(res,_mm_shuffle_ps(ftemp,ftemp,_MM_SHUFFLE(0,0,0,1)));
  return static_cast<F32vec1>( res );
}
//compute the maximum value from the first 3 values in the vector and set result
//to have this maximum value if all 4 of its components
inline F32vec4 max3(const F32vec4 &a) {
  return simd_max(BroadcastFloat0(a),simd_max(BroadcastFloat1(a),BroadcastFloat2(a)));
}
inline F32vec4 normalize3(const F32vec4 &a) {
  F32vec1 c = dotProduct(a,a);  //compute length squared of 3d vector
  c = rsqrt_nr(c); //compute reciprocal square root
  return BroadcastFloat(c)*a;
}


//convert lower 2 fp values to 2 dp values
inline F64vec2 floatToDoubleLower(const F32vec4 &a) {
  return _mm_cvtps_pd(a);
}
inline F64vec2 floatToDoubleUpper(const F32vec4 &a) {
  return _mm_cvtps_pd(_mm_movehl_ps(a,a));
}
inline F32vec4 doubleToFloat(const F64vec2 &lower, const F64vec2 &upper) {
  return _mm_movehl_ps(_mm_cvtpd_ps(lower),_mm_cvtpd_ps(upper));
}

inline F32vec4 fabs(const F32vec4 &a) {
  //return a&*(F32vec4 *)&Iu32vec4(0x7FffFFff,0x7FffFFff,0x7FffFFff,0x7FffFFff);
  return a & _mm_castsi128_ps(brite::FABS_MASK);
}

#endif //SIMDSSE

