/* Gluten Library -- Vector
 *
 * A templated vector (the Euclidean kind, not the container) type. Supports
 * arbitrary dimensions, although some functionality only supports 2- and
 * 3-dimension vectors.
 *
 * $AUTHOR$    res
 * $UPDATE$    r32
 */

/* LICENSES ********************************************************************

Copyright (c) 2013- Reuben E. Smith

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

*******************************************************************************/

#ifdef DEBUG
   #include <cassert>
#endif

#include <cstring>

#ifndef VECTOR_NO_SIMD
    #ifdef _MSC_VER
        #include <intrin.h>
        #include <malloc.h>

        // res: For now, just a forced hack to assume SSE3 support if a flag
        //      is given to the compiler.
        #ifdef SSE3
            #define __SSE3__
        #endif

        #define _mm_malloc(S, A) _aligned_malloc(S, A)
        #define _mm_free(P) _aligned_free(P)
    #else
        #include <x86intrin.h>

        // Ignoring old-style-cast warning here due to how _mm_shuffle_ps is
        // implemented internally. Otherwise using that intrinsic could cause
        // GCC to raise 3 or so warnings per call with -Wold-style-cast.
        #if __GNUC__ >= 4
            #if __GNUC_MINOR__ >= 6
                #define GCC_IGNORE_OLD_STYLE_CAST 1
            #elif __GNUC_MINOR__ >= 2
                #pragma GCC diagnostic ignored "-Wold-style-cast"
            #endif
        #endif
    #endif
#endif
#include "Utility.hpp"
#include "Vector.hpp"


namespace Gluten
{
#ifndef VECTOR_NO_SIMD
    static const unsigned int INNER_PROD_PERM_0 = _MM_SHUFFLE(0, 3, 2, 1),
                              INNER_PROD_PERM_1 = _MM_SHUFFLE(1, 0, 3, 2);

    static const unsigned int OUTER_PROD_PERM_0 = _MM_SHUFFLE(3, 0, 2, 1),
                              OUTER_PROD_PERM_1 = _MM_SHUFFLE(3, 1, 0, 2);


    Vector<4, float>::Vector()
    {
    #ifdef _MSC_VER
      _d = reinterpret_cast<float*>(((intptr_t)_d_raw + 15) & ~15);
    #endif

      std::memset(_d, 0, 4 * sizeof(float));
    }

    
    Vector<4, float>::Vector(const Vector<4, float>& other)
    {
    #ifdef _MSC_VER
      _d = reinterpret_cast<float*>(((intptr_t)_d_raw + 15) & ~15);
    #endif

        __m128 a = _mm_load_ps(other._d);
        _mm_store_ps(_d, a);
    }

    
    Vector<4, float>::Vector(float x, float y, float z, float w)
    {
    #ifdef _MSC_VER
      _d = reinterpret_cast<float*>(((intptr_t)_d_raw + 15) & ~15);
    #endif

        _d[0] = x;
        _d[1] = y;
        _d[2] = z;
        _d[3] = w;
    }

    
    Vector<4, float>& Vector<4, float>::operator=(const Vector<4, float>& other)
    {
        __m128 a = _mm_load_ps(other._d);
        _mm_store_ps(_d, a);

        return *this;
    }

    
    bool Vector<4, float>::operator==(const Vector<4, float>& other) const
    {
        return 
            fpRelativeEqual(_d[0], other._d[0]) &&
            fpRelativeEqual(_d[1], other._d[1]) &&
            fpRelativeEqual(_d[2], other._d[2]) &&
            fpRelativeEqual(_d[3], other._d[3]);
    }

    
    bool Vector<4, float>::operator!=(const Vector<4, float>& other) const
    {
        return !operator==(other);
    }

    
    float Vector<4, float>::operator()(unsigned int dim) const
    {
    #ifdef DEBUG
        assert(dim < dimensions);
    #endif

        return _d[dim];
    }

    
    float& Vector<4, float>::operator()(unsigned int dim)
    {
    #ifdef DEBUG
        assert(dim < dimensions);
    #endif

        return _d[dim];
    }

    
    float Vector<4, float>::normSquare() const
    {
        float r = 0;

        __m128 a = _mm_load_ps(_d);
        a = _mm_mul_ps(a, a);
    #ifdef GCC_IGNORE_OLD_STYLE_CAST
        #pragma GCC diagnostic push
        #pragma GCC diagnostic ignored "-Wold-style-cast"
    #endif

    #ifdef __SSE3__
        a = _mm_hadd_ps(a, a);
        a = _mm_hadd_ps(a, a);
    #else
        a = _mm_add_ps(a, _mm_shuffle_ps(a, a, INNER_PROD_PERM_0));
        a = _mm_add_ps(a, _mm_shuffle_ps(a, a, INNER_PROD_PERM_1));
    #endif
        
    #ifdef GCC_IGNORE_OLD_STYLE_CAST
        #pragma GCC diagnostic pop
    #endif
        _mm_store_ss(&r, a);

        return r;
    }

    
    float Vector<4, float>::norm() const
    {
        float r = 0;

        __m128 a = _mm_load_ps(_d);
        a = _mm_mul_ps(a, a);
    #ifdef GCC_IGNORE_OLD_STYLE_CAST
        #pragma GCC diagnostic push
        #pragma GCC diagnostic ignored "-Wold-style-cast"
    #endif

    #ifdef __SSE3__
        a = _mm_hadd_ps(a, a);
        a = _mm_hadd_ps(a, a);
    #else  
        a = _mm_add_ps(a, _mm_shuffle_ps(a, a, INNER_PROD_PERM_0));
        a = _mm_add_ps(a, _mm_shuffle_ps(a, a, INNER_PROD_PERM_1));
    #endif

    #ifdef GCC_IGNORE_OLD_STYLE_CAST
        #pragma GCC diagnostic pop
    #endif
    #ifdef VECTOR_FAST_MATH
        a = _mm_rcp_ss(_mm_rsqrt_ss(a));
    #else
        a = _mm_sqrt_ss(a);
    #endif
        _mm_store_ss(&r, a);

        return r;
    }

    
    Vector<4, float> Vector<4, float>::normal() const
    {
        Vector<4, float> r;
        float n = norm();

        __m128 a = _mm_load_ps(_d);
        __m128 b = _mm_load1_ps(&n);
    #ifdef VECTOR_FAST_MATH
        a = _mm_mul_ps(a, _mm_rcp_ps(b));
    #else
        a = _mm_div_ps(a, b);
    #endif
        _mm_store_ps(r._d, a);

        return r;
    }

    
    Vector<4, float>& Vector<4, float>::normalize()
    {
        float n = norm();

        __m128 a = _mm_load_ps(_d);
        __m128 b = _mm_load1_ps(&n);
    #ifdef VECTOR_FAST_MATH
        a = _mm_mul_ps(a, _mm_rcp_ps(b));
    #else
        a = _mm_div_ps(a, b);
    #endif
        _mm_store_ps(_d, a);

        return *this;
    }

    
    float Vector<4, float>::dot(const Vector<4, float>& other) const
    {
        float r = 0;

        __m128 a = _mm_load_ps(_d);
        __m128 b = _mm_load_ps(other._d);
        a = _mm_mul_ps(a, b);
    #ifdef GCC_IGNORE_OLD_STYLE_CAST
        #pragma GCC diagnostic push
        #pragma GCC diagnostic ignored "-Wold-style-cast"
    #endif
        
    #ifdef __SSE3__
        a = _mm_hadd_ps(a, a);
        a = _mm_hadd_ps(a, a);
    #else
        a = _mm_add_ps(a, _mm_shuffle_ps(a, a, INNER_PROD_PERM_0));
        a = _mm_add_ps(a, _mm_shuffle_ps(a, a, INNER_PROD_PERM_1));
    #endif

    #ifdef GCC_IGNORE_OLD_STYLE_CAST
        #pragma GCC diagnostic pop
    #endif
        _mm_store_ss(&r, a);

        return r;
    }

    
    Vector<4, float> 
    Vector<4, float>::cross(const Vector<4, float>& other) const
    {
        // res: This could probably be optimized further. It also loses w
        //      (w^2 - w^2 = 0) by the end. Should we care? For now we
        //      write w back into the temporary.

        Vector<4, float> r;

        __m128 a = _mm_load_ps(_d);
        __m128 b = _mm_load_ps(other._d);
    #ifdef GCC_IGNORE_OLD_STYLE_CAST
        #pragma GCC diagnostic push
        #pragma GCC diagnostic ignored "-Wold-style-cast"
    #endif
        __m128 b1 = _mm_shuffle_ps(b, b, OUTER_PROD_PERM_0);
        b1 = _mm_mul_ps(a, b1);
        b1 = _mm_shuffle_ps(b1, b1, OUTER_PROD_PERM_0);
        __m128 b2 = _mm_shuffle_ps(b, b, OUTER_PROD_PERM_1);
        b2 = _mm_mul_ps(a, b2);
        b2 = _mm_shuffle_ps(b2, b2, OUTER_PROD_PERM_1);
    #ifdef GCC_IGNORE_OLD_STYLE_CAST
        #pragma GCC diagnostic pop
    #endif
        a = _mm_sub_ps(b1, b2);
        _mm_store_ps(r._d, a);
        r._d[3] = _d[3];

        return r;
    }

    
    Vector<4, float> Vector<4, float>::operator+() const
    {
        Vector<4, float> r;

        __m128 a = _mm_load_ps(_d);
        _mm_store_ps(r._d, a);

        return r;
    }

    
    Vector<4, float> 
    Vector<4, float>::operator+(const Vector<4, float>& other) const
    {
        Vector<4, float> r;

        __m128 a = _mm_load_ps(_d);
        __m128 b = _mm_load_ps(other._d);
        a = _mm_add_ps(a, b);
        _mm_store_ps(r._d, a);

        return r;
    }

    
    Vector<4, float>& 
    Vector<4, float>::operator+=(const Vector<4, float>& other)
    {
        __m128 a = _mm_load_ps(_d);
        __m128 b = _mm_load_ps(other._d);
        a = _mm_add_ps(a, b);
        _mm_store_ps(_d, a);

        return *this;
    }

    
    Vector<4, float> Vector<4, float>::operator-() const
    {
        static const __m128 MASK = 
            _mm_castsi128_ps(_mm_set1_epi32(0x80000000));

        Vector<4, float> r;

        __m128 a = _mm_load_ps(_d);
        a = _mm_xor_ps(MASK, a);
        _mm_store_ps(r._d, a);

        return r;
    }


    Vector<4, float> 
    Vector<4, float>::operator-(const Vector<4, float>& other) const
    {
        Vector<4, float> r;

        __m128 a = _mm_load_ps(_d);
        __m128 b = _mm_load_ps(other._d);
        a = _mm_sub_ps(a, b);
        _mm_store_ps(r._d, a);

        return r;
    }

    
    Vector<4, float>& 
    Vector<4, float>::operator-=(const Vector<4, float>& other)
    {
        __m128 a = _mm_load_ps(_d);
        __m128 b = _mm_load_ps(other._d);
        a = _mm_sub_ps(a, b);
        _mm_store_ps(_d, a);

        return *this;
    }

    
    Vector<4, float> 
    Vector<4, float>::operator*(const Vector<4, float>& other) const
    {
        Vector<4, float> r;

        __m128 a = _mm_load_ps(_d);
        __m128 b = _mm_load_ps(other._d);
        a = _mm_mul_ps(a, b);
        _mm_store_ps(r._d, a);

        return r;
    }

    
    Vector<4, float>& 
    Vector<4, float>::operator*=(const Vector<4, float>& other)
    {
        __m128 a = _mm_load_ps(_d);
        __m128 b = _mm_load_ps(other._d);
        a = _mm_mul_ps(a, b);
        _mm_store_ps(_d, a);

        return *this;
    }

    
    Vector<4, float> 
    Vector<4, float>::operator/(const Vector<4, float>& other) const
    {
        Vector<4, float> r;

        __m128 a = _mm_load_ps(_d);
        __m128 b = _mm_load_ps(other._d);
    #ifdef VECTOR_FAST_MATH
        a = _mm_mul_ps(a, _mm_rcp_ps(b));
    #else
        a = _mm_div_ps(a, b);
    #endif
        _mm_store_ps(r._d, a);

        return r;
    }

    
    Vector<4, float>& 
    Vector<4, float>::operator/=(const Vector<4, float>& other)
    {
        __m128 a = _mm_load_ps(_d);
        __m128 b = _mm_load_ps(other._d);
    #ifdef VECTOR_FAST_MATH
        a = _mm_mul_ps(a, _mm_rcp_ps(b));
    #else
        a = _mm_div_ps(a, b);
    #endif
        _mm_store_ps(_d, a);

        return *this;
    }
#endif
}