

.macro vmxVectDeclareCommonConsts
   .reg vmxVectZero, vmxVectOne, vmxVectMinusOne, vmxVectHalf
   ilf32 vmxVectZero,      0.f
   ilf32 vmxVectOne,       1.f
   ilf32 vmxVectMinusOne, -1.f
   ilf32 vmxVectHalf,      0.5f
   
   .reg SHUFFLE_SHIFT_LEFT_BY_8, SHUFFLE_SHIFT_LEFT_BY_4  // { 0+(_c),  1+(_c),  2+(_c),  3+(_c),  4+(_c),  5+(_c),  6+(_c),  7+(_c), 8+(_c),  9+(_c), 10+(_c), 11+(_c), 12+(_c), 13+(_c), 14+(_c), 15+(_c)}))   
   il128 SHUFFLE_SHIFT_LEFT_BY_8, 0x08090a0b_0c0d0e0f_10111213_14151617
   il128 SHUFFLE_SHIFT_LEFT_BY_4, 0x04050607_08090a0b_0c0d0e0f_10111213
   
   .reg CONST_VEC_0x8000
   ilhu     CONST_VEC_0x8000, 0x8000

   .reg     CONST_VEC_DIV_ACC
   ilhu     CONST_VEC_DIV_ACC, 0x3F80
   ori      CONST_VEC_DIV_ACC, CONST_VEC_DIV_ACC, 0x0001   
   
.endmacro


.macro vmxVectSetScalar (regName, value)
   ilf32 regName, value
.endmacro

//.macro vmxVectSet (regName, x, a, z, w)
//   ilf32 regName, x, y, z, w
//.endmacro

//
// Vector Load & store
//
.macro vmxVectLoad4A (v, adr, offset)
   lqd      v, (offset)(adr)
.endmacro

.macro vmxVectStore4A (v, adr, offset)
   stqd     v, (offset)(adr)
.endmacro


//
// Integer in vector
//
.macro vmxVectConvertIntToFloat(r, v, divExp)
   csflt r, v, divExp
.endmacro
.macro vmxVectConvertFloatToInt(r, v, mulExp)
   cflts r, v, mulExp
.endmacro


//
// Vector Rounding & Conversion
//
.macro vmxVectFloor (r, v)
   // not implemented
   // double trunc_x = __fcfid(__fctidz(x));
   // return __fsel(x - trunc_x, trunc_x, trunc_x - 1.0);
.endmacro

.macro vmxVectCeiling (r, v)
   // not implemented
   // double trunc_x = __fcfid(__fctidz(x));
   // return __fsel(trunc_x - x, trunc_x, trunc_x + 1);
.endmacro

.macro vmxVectRoundInt (r, v)
   .localreg sign, add, summ
   and      sign, v, CONST_VEC_0x8000
   or       add, vmxVectHalf, sign
   fa       summ, v, add
   vmxVectConvertFloatToInt(r, summ, 0)
.endmacro
.macro vmxVectRound (r, v)
   vmxVectRoundInt(r, v)
   vmxVectConvertIntToFloat(r,r,0)
.endmacro

.macro vmxVectTrunc (r, v)
   vmxVectConvertFloatToInt(r,v,0)
   vmxVectConvertIntToFloat(r,r,0)
.endmacro


//
// Vector Math ops
//
.macro vmxVectAbs (r, a)
   andc  r, a, CONST_VEC_0x8000
.endmacro

.macro vmxVectNegate (r, a)
   xor   r, a, CONST_VEC_0x8000
.endmacro

.macro vmxVectAdd (r, a, b)
   fa r, a, b
.endmacro

.macro vmxVectSub (r, a, b)
   fs r, a, b
.endmacro

.macro vmxVectMul (r, a, b)
   fm r, a, b
.endmacro


.macro vmxVectMadd (r, a, b, c)
   fma r, a, b, c
.endmacro

.macro vmxVectRecipEst (r, a)
   .localreg frest_res
	frest	   frest_res, a
	fi		   r,  a, frest_res
.endmacro

.macro vmxVectRecip (r, a)
   .localreg a_recip_est
   vmxVectRecipEst(a_recip_est, a)
   .localreg a_recip
   fnms     a_recip, a, a_recip_est, CONST_VEC_DIV_ACC
   fma      r, a_recip, a_recip_est, a_recip_est   
.endmacro

.macro vmxVectDivide (r, a, b)
   .localreg b_recip
	vmxVectRecip(b_recip, b)
	vmxVectMul(r, a, b_recip)
.endmacro

.macro vmxVectDivideEst (r, a, b)
   .localreg b_recip
	vmxVectRecipEst(b_recip, b)
	vmxVectMul(r, a, b_recip)
.endmacro


.macro vmxVectRecipSqrtEst (r, a)
   .localreg frsqest_res
	frsqest	frsqest_res, a
	fi		   r, a, frsqest_res
.endmacro

.macro vmxVectRecipSqrt (r, a)
   // not implemented
.endmacro

.macro vmxVectSqrtEst_NoZeroCheck (r, a)
   .localreg a_rsqrt_est
   vmxVectRecipSqrtEst(a_rsqrt_est, a)
   fm       r, a_rsqrt_est, a
.endmacro

.macro vmxVectSqrtEst (r, a)
   vmxVectSqrtEst_NoZeroCheck(r, a)

   .localreg pattern
   fcgt     pattern,  a, vmxVectZero
   selb     r, vmxVectZero, r, pattern
.endmacro

.macro vmxVectSqrt (r, a)
   // not implemented
.endmacro


.macro vmxVectDot (r, a, b)
   .localreg v0, v1
   fm    v0, a,  b
   shufb v1, v0, v0, SHUFFLE_SHIFT_LEFT_BY_8
   fa    v0, v0, v1
   shufb v1, v0, v0, SHUFFLE_SHIFT_LEFT_BY_4
   fa    r,  v0, v1
.endmacro

.macro vmxVectCross (r, a, b)
   // not implemented
.endmacro

.macro vmxVectLerp (r, a, b, t)
   //return a + t * (b - a);
   vmxVectSub(r, b, a)
   vmxVectMadd(r, r, t, a)
.endmacro













//
// Vector Comparison
//
.macro vmxVectGreaterOrEqual (r, a, b)
   // not implemented
   // return (spu_xor(spu_cmpgt(b, a), -1));
.endmacro

.macro vmxVectGreater (r, a, b)
   fcgt r,  a, b
.endmacro

.macro vmxVectLess (r, a, b)
   fcgt r,  b, a
.endmacro

.macro vmxVectSelect (r, a, b, pattern)
   selb r, a, b, pattern
.endmacro

.macro vmxVectMin (r, a, b)
   .localreg pattern

   fcgt pattern,  a, b
   selb r, a, b, pattern
.endmacro

.macro vmxVectMax (r, a, b)
   .localreg pattern
   fcgt pattern,  a, b
   selb r, b, a, pattern
.endmacro

.macro vmxVectMax4 (r, a)
   .localreg tmp
   rotqbyi  tmp, a, 0x08
   vmxVectMax(tmp, a, tmp)
   rotqbyi  r, tmp, 0x04
   vmxVectMax(r, r, tmp)
.endmacro

.macro vmxVectSaturate (r, a)
   vmxVectMin(r, a, vmxVectOne)
   vmxVectMax(r, r, vmxVectZero)
.endmacro




