#include "site_dslash_32bit_scalar.h"

#ifdef __cplusplus
extern "C" { 
#endif

#include "sse_align.h" 
#include <xmmintrin.h>

  typedef union { 
    unsigned int a[4];
    __m128 vector;
  } SSESign;

  static SSESign signs13 __attribute__((unused)) ALIGN = {{ 0x80000000, 0x00000000, 0x80000000, 0x00000000 }};
  static SSESign signs12 __attribute__((unused)) ALIGN = {{ 0x80000000, 0x80000000, 0x00000000, 0x00000000 }};
  static SSESign signs14 __attribute__((unused)) ALIGN = {{ 0x80000000, 0x00000000, 0x00000000, 0x80000000 }};
  static SSESign signs24 __attribute__((unused)) ALIGN = {{ 0x00000000, 0x80000000, 0x00000000, 0x80000000 }};
  static SSESign signs34 __attribute__((unused)) ALIGN = {{ 0x00000000, 0x00000000, 0x80000000, 0x80000000 }};
  static SSESign signs23 __attribute__((unused)) ALIGN = {{ 0x00000000, 0x80000000, 0x80000000, 0x00000000 }};
  

  void dslash_plus_dir0_forward(spinor_array spinor_in,
				u_mat_array u,
				halfspinor_array  upper_sum,
				halfspinor_array  lower_sum   )

  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;



    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma0 minus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x1b);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x1b);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0x1b);

    xmm3 = _mm_xor_ps(signs24.vector, xmm3);
    xmm4 = _mm_xor_ps(signs24.vector, xmm4);
    xmm5 = _mm_xor_ps(signs24.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    /* SU3 Multiply */
    xmm3 = _mm_load_ss(&u[0][0][0]);
    xmm6 = _mm_load_ss(&u[1][0][0]);
    xmm4 = _mm_load_ss(&u[0][1][0]);
    xmm7 = _mm_load_ss(&u[2][1][0]);
    xmm5 = _mm_load_ss(&u[0][2][0]);
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
    xmm3 = _mm_mul_ps(xmm0,xmm3);
    xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
    xmm6 = _mm_mul_ps(xmm1,xmm6);
    xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
    xmm4 = _mm_mul_ps(xmm0, xmm4);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_mul_ps(xmm0, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][2][0]);
    xmm7 = _mm_load_ss(&u[2][0][0]);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm3 = _mm_add_ps(xmm7, xmm3);
    xmm6 = _mm_load_ss(&u[1][1][0]);
    xmm7 = _mm_load_ss(&u[2][2][0]);
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm4 = _mm_add_ps(xmm6, xmm4);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm6 = _mm_load_ss( &u[0][0][1] );
    xmm7 = _mm_load_ss( &u[1][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
    xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
    xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
    xmm0 = _mm_xor_ps(signs13.vector, xmm0);
    xmm1 = _mm_xor_ps(signs13.vector, xmm1);
    xmm2 = _mm_xor_ps(signs13.vector, xmm2);
    xmm6 = _mm_mul_ps(xmm0,xmm6);
    xmm7 = _mm_mul_ps(xmm1,xmm7);
    xmm3 = _mm_add_ps(xmm6,xmm3);
    xmm4 = _mm_add_ps(xmm7,xmm4);
    xmm6 = _mm_load_ss( &u[2][2][1] );
    xmm7 = _mm_load_ss( &u[0][1][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm2, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][0][1] );
    xmm7 = _mm_load_ss(&u[0][2][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm0 = _mm_load_ss( &u[2][0][1] );
    xmm6 = _mm_load_ss( &u[1][2][1] );
    xmm7 = _mm_load_ss( &u[2][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm0 = _mm_mul_ps(xmm2, xmm0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm3 = _mm_add_ps(xmm0, xmm3);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);

    /* Reconstruction: Upper components just go
       Reshuffle spin and color indices so we can use movaps
       to store - it is aligned and faster... */

    _mm_store_ps(&upper_sum[0][0][0], xmm3);
    _mm_store_ps(&upper_sum[1][0][0], xmm4);
    _mm_store_ps(&upper_sum[2][0][0], xmm5);
    
    /* Lower components - do projection */
    xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0x1b);
    xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0x1b);
    xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0x1b);

    xmm3 = _mm_xor_ps(signs13.vector, xmm3);
    xmm4 = _mm_xor_ps(signs13.vector, xmm4);
    xmm5 = _mm_xor_ps(signs13.vector, xmm5);

    /* Store */
    _mm_store_ps(&lower_sum[0][0][0], xmm3);
    _mm_store_ps(&lower_sum[1][0][0], xmm4);
    _mm_store_ps(&lower_sum[2][0][0], xmm5);


  }

  void dslash_plus_dir0_backward_add( spinor_array spinor_in,
				      u_mat_array u,
				      halfspinor_array  upper_sum,
				      halfspinor_array  lower_sum   )

  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;



    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma0 plus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x1b);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x1b);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0x1b);

    xmm3 = _mm_xor_ps(signs13.vector, xmm3);
    xmm4 = _mm_xor_ps(signs13.vector, xmm4);
    xmm5 = _mm_xor_ps(signs13.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* Adj SU(3) multiply */

  xmm3 = _mm_load_ss(&u[0][0][0]);
  xmm6 = _mm_load_ss(&u[0][1][0]);
  xmm4 = _mm_load_ss(&u[1][0][0]);
  xmm7 = _mm_load_ss(&u[1][2][0]);
  xmm5 = _mm_load_ss(&u[2][0][0]);
  xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
  xmm3 = _mm_mul_ps(xmm0,xmm3);
  xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
  xmm6 = _mm_mul_ps(xmm1,xmm6);
  xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
  xmm4 = _mm_mul_ps(xmm0, xmm4);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_mul_ps(xmm0, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[2][1][0]);
  xmm7 = _mm_load_ss(&u[0][2][0]);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm3 = _mm_add_ps(xmm7, xmm3);
  xmm6 = _mm_load_ss(&u[1][1][0]);
  xmm7 = _mm_load_ss(&u[2][2][0]);
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm4 = _mm_add_ps(xmm6, xmm4);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm6 = _mm_load_ss( &u[0][0][1] );
  xmm7 = _mm_load_ss( &u[1][1][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
  xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
  xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
  xmm0 = _mm_xor_ps(signs24.vector, xmm0);
  xmm1 = _mm_xor_ps(signs24.vector, xmm1);
  xmm2 = _mm_xor_ps(signs24.vector, xmm2);
  xmm6 = _mm_mul_ps(xmm0,xmm6);
  xmm7 = _mm_mul_ps(xmm1,xmm7);
  xmm3 = _mm_add_ps(xmm6,xmm3);
  xmm4 = _mm_add_ps(xmm7,xmm4);
  xmm6 = _mm_load_ss( &u[2][2][1] );
  xmm7 = _mm_load_ss( &u[1][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm2, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[0][1][1] );
  xmm7 = _mm_load_ss(&u[2][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm0 = _mm_load_ss( &u[0][2][1] );
  xmm6 = _mm_load_ss( &u[2][1][1] );
  xmm7 = _mm_load_ss( &u[1][2][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm0 = _mm_mul_ps(xmm2, xmm0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm3 = _mm_add_ps(xmm0, xmm3);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);

  /* Result in      xmm3,4,5 */
  /* END MVV */

  /* Reconstruction */

  /* Load up upper partial sum */
  xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
  xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
  xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

  /* Add upper component */
  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  _mm_store_ps(&upper_sum[0][0][0], xmm0);
  _mm_store_ps(&upper_sum[1][0][0], xmm1);
  _mm_store_ps(&upper_sum[2][0][0], xmm2);
    
  

  /* Lower components - do projection */
  xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
  xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
  xmm2 = _mm_load_ps(&lower_sum[2][0][0]);
  

  xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0x1b);
  xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0x1b);
  xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0x1b);

  xmm3 = _mm_xor_ps(signs24.vector, xmm3);
  xmm4 = _mm_xor_ps(signs24.vector, xmm4);
  xmm5 = _mm_xor_ps(signs24.vector, xmm5);

  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  /* Store */
  _mm_store_ps(&lower_sum[0][0][0], xmm0);
  _mm_store_ps(&lower_sum[1][0][0], xmm1);
  _mm_store_ps(&lower_sum[2][0][0], xmm2);


  }


  void dslash_plus_dir1_forward_add( spinor_array  spinor_in,
				     u_mat_array  u,
				     halfspinor_array  upper_sum,
				     halfspinor_array  lower_sum   )
  {

    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;

  
    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma1 minus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x4e);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x4e);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0x4e);

    xmm3 = _mm_xor_ps(signs34.vector, xmm3);
    xmm4 = _mm_xor_ps(signs34.vector, xmm4);
    xmm5 = _mm_xor_ps(signs34.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* SU3 Multiply */
    xmm3 = _mm_load_ss(&u[0][0][0]);
    xmm6 = _mm_load_ss(&u[1][0][0]);
    xmm4 = _mm_load_ss(&u[0][1][0]);
    xmm7 = _mm_load_ss(&u[2][1][0]);
    xmm5 = _mm_load_ss(&u[0][2][0]);
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
    xmm3 = _mm_mul_ps(xmm0,xmm3);
    xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
    xmm6 = _mm_mul_ps(xmm1,xmm6);
    xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
    xmm4 = _mm_mul_ps(xmm0, xmm4);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_mul_ps(xmm0, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][2][0]);
    xmm7 = _mm_load_ss(&u[2][0][0]);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm3 = _mm_add_ps(xmm7, xmm3);
    xmm6 = _mm_load_ss(&u[1][1][0]);
    xmm7 = _mm_load_ss(&u[2][2][0]);
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm4 = _mm_add_ps(xmm6, xmm4);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm6 = _mm_load_ss( &u[0][0][1] );
    xmm7 = _mm_load_ss( &u[1][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
    xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
    xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
    xmm0 = _mm_xor_ps(signs13.vector, xmm0);
    xmm1 = _mm_xor_ps(signs13.vector, xmm1);
    xmm2 = _mm_xor_ps(signs13.vector, xmm2);
    xmm6 = _mm_mul_ps(xmm0,xmm6);
    xmm7 = _mm_mul_ps(xmm1,xmm7);
    xmm3 = _mm_add_ps(xmm6,xmm3);
    xmm4 = _mm_add_ps(xmm7,xmm4);
    xmm6 = _mm_load_ss( &u[2][2][1] );
    xmm7 = _mm_load_ss( &u[0][1][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm2, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][0][1] );
    xmm7 = _mm_load_ss(&u[0][2][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm0 = _mm_load_ss( &u[2][0][1] );
    xmm6 = _mm_load_ss( &u[1][2][1] );
    xmm7 = _mm_load_ss( &u[2][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm0 = _mm_mul_ps(xmm2, xmm0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm3 = _mm_add_ps(xmm0, xmm3);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);

    /* Reconstruction: Upper components just go
       Reshuffle spin and color indices so we can use movaps
       to store - it is aligned and faster... */
    xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
    xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
    xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    _mm_store_ps(&upper_sum[0][0][0], xmm0);
    _mm_store_ps(&upper_sum[1][0][0], xmm1);
    _mm_store_ps(&upper_sum[2][0][0], xmm2);
    
    /* Lower components - do projection */
    xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
    xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
    xmm2 = _mm_load_ps(&lower_sum[2][0][0]);

    /* Gamma_minus 1 reconstruction */

    xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0x4e);
    xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0x4e);
    xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0x4e);

    xmm3 = _mm_xor_ps(signs12.vector, xmm3);
    xmm4 = _mm_xor_ps(signs12.vector, xmm4);
    xmm5 = _mm_xor_ps(signs12.vector, xmm5);

    /* Accumulate */
    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    /* Store */
    _mm_store_ps(&lower_sum[0][0][0], xmm0);
    _mm_store_ps(&lower_sum[1][0][0], xmm1);
    _mm_store_ps(&lower_sum[2][0][0], xmm2);

  }

  void dslash_plus_dir1_backward_add( spinor_array  spinor_in,
				      u_mat_array  u,
				      halfspinor_array  upper_sum,
				      halfspinor_array  lower_sum   )
  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;



    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma1 plus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x4e);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x4e);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0x4e);

    xmm3 = _mm_xor_ps(signs12.vector, xmm3);
    xmm4 = _mm_xor_ps(signs12.vector, xmm4);
    xmm5 = _mm_xor_ps(signs12.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* Adj SU(3) multiply */

  xmm3 = _mm_load_ss(&u[0][0][0]);
  xmm6 = _mm_load_ss(&u[0][1][0]);
  xmm4 = _mm_load_ss(&u[1][0][0]);
  xmm7 = _mm_load_ss(&u[1][2][0]);
  xmm5 = _mm_load_ss(&u[2][0][0]);
  xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
  xmm3 = _mm_mul_ps(xmm0,xmm3);
  xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
  xmm6 = _mm_mul_ps(xmm1,xmm6);
  xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
  xmm4 = _mm_mul_ps(xmm0, xmm4);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_mul_ps(xmm0, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[2][1][0]);
  xmm7 = _mm_load_ss(&u[0][2][0]);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm3 = _mm_add_ps(xmm7, xmm3);
  xmm6 = _mm_load_ss(&u[1][1][0]);
  xmm7 = _mm_load_ss(&u[2][2][0]);
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm4 = _mm_add_ps(xmm6, xmm4);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm6 = _mm_load_ss( &u[0][0][1] );
  xmm7 = _mm_load_ss( &u[1][1][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
  xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
  xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
  xmm0 = _mm_xor_ps(signs24.vector, xmm0);
  xmm1 = _mm_xor_ps(signs24.vector, xmm1);
  xmm2 = _mm_xor_ps(signs24.vector, xmm2);
  xmm6 = _mm_mul_ps(xmm0,xmm6);
  xmm7 = _mm_mul_ps(xmm1,xmm7);
  xmm3 = _mm_add_ps(xmm6,xmm3);
  xmm4 = _mm_add_ps(xmm7,xmm4);
  xmm6 = _mm_load_ss( &u[2][2][1] );
  xmm7 = _mm_load_ss( &u[1][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm2, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[0][1][1] );
  xmm7 = _mm_load_ss(&u[2][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm0 = _mm_load_ss( &u[0][2][1] );
  xmm6 = _mm_load_ss( &u[2][1][1] );
  xmm7 = _mm_load_ss( &u[1][2][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm0 = _mm_mul_ps(xmm2, xmm0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm3 = _mm_add_ps(xmm0, xmm3);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);

  /* Result in      xmm3,4,5 */
  /* END MVV */

  /* Reconstruction */

  /* Load up upper partial sum */
  xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
  xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
  xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

  /* Add upper component */
  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  _mm_store_ps(&upper_sum[0][0][0], xmm0);
  _mm_store_ps(&upper_sum[1][0][0], xmm1);
  _mm_store_ps(&upper_sum[2][0][0], xmm2);
    
  

  /* Lower components - do projection */
  xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
  xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
  xmm2 = _mm_load_ps(&lower_sum[2][0][0]);
  

  xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0x4e);
  xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0x4e);
  xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0x4e);

  xmm3 = _mm_xor_ps(signs34.vector, xmm3);
  xmm4 = _mm_xor_ps(signs34.vector, xmm4);
  xmm5 = _mm_xor_ps(signs34.vector, xmm5);

  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  /* Store */
  _mm_store_ps(&lower_sum[0][0][0], xmm0);
  _mm_store_ps(&lower_sum[1][0][0], xmm1);
  _mm_store_ps(&lower_sum[2][0][0], xmm2);


  }

  void dslash_plus_dir2_forward_add( spinor_array  spinor_in,
				     u_mat_array  u,
				     halfspinor_array  upper_sum,
				     halfspinor_array  lower_sum   )
  {

    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;

  
    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma1 minus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0xb1);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0xb1);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0xb1);

    xmm3 = _mm_xor_ps(signs23.vector, xmm3);
    xmm4 = _mm_xor_ps(signs23.vector, xmm4);
    xmm5 = _mm_xor_ps(signs23.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* SU3 Multiply */
    xmm3 = _mm_load_ss(&u[0][0][0]);
    xmm6 = _mm_load_ss(&u[1][0][0]);
    xmm4 = _mm_load_ss(&u[0][1][0]);
    xmm7 = _mm_load_ss(&u[2][1][0]);
    xmm5 = _mm_load_ss(&u[0][2][0]);
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
    xmm3 = _mm_mul_ps(xmm0,xmm3);
    xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
    xmm6 = _mm_mul_ps(xmm1,xmm6);
    xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
    xmm4 = _mm_mul_ps(xmm0, xmm4);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_mul_ps(xmm0, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][2][0]);
    xmm7 = _mm_load_ss(&u[2][0][0]);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm3 = _mm_add_ps(xmm7, xmm3);
    xmm6 = _mm_load_ss(&u[1][1][0]);
    xmm7 = _mm_load_ss(&u[2][2][0]);
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm4 = _mm_add_ps(xmm6, xmm4);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm6 = _mm_load_ss( &u[0][0][1] );
    xmm7 = _mm_load_ss( &u[1][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
    xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
    xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
    xmm0 = _mm_xor_ps(signs13.vector, xmm0);
    xmm1 = _mm_xor_ps(signs13.vector, xmm1);
    xmm2 = _mm_xor_ps(signs13.vector, xmm2);
    xmm6 = _mm_mul_ps(xmm0,xmm6);
    xmm7 = _mm_mul_ps(xmm1,xmm7);
    xmm3 = _mm_add_ps(xmm6,xmm3);
    xmm4 = _mm_add_ps(xmm7,xmm4);
    xmm6 = _mm_load_ss( &u[2][2][1] );
    xmm7 = _mm_load_ss( &u[0][1][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm2, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][0][1] );
    xmm7 = _mm_load_ss(&u[0][2][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm0 = _mm_load_ss( &u[2][0][1] );
    xmm6 = _mm_load_ss( &u[1][2][1] );
    xmm7 = _mm_load_ss( &u[2][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm0 = _mm_mul_ps(xmm2, xmm0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm3 = _mm_add_ps(xmm0, xmm3);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);

    /* Reconstruction: Upper components just go
       Reshuffle spin and color indices so we can use movaps
       to store - it is aligned and faster... */
    xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
    xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
    xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    _mm_store_ps(&upper_sum[0][0][0], xmm0);
    _mm_store_ps(&upper_sum[1][0][0], xmm1);
    _mm_store_ps(&upper_sum[2][0][0], xmm2);
    
    /* Lower components - do projection */
    xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
    xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
    xmm2 = _mm_load_ps(&lower_sum[2][0][0]);

    /* Gamma_minus 1 reconstruction */

    xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0xb1);
    xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0xb1);
    xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0xb1);

    xmm3 = _mm_xor_ps(signs14.vector, xmm3);
    xmm4 = _mm_xor_ps(signs14.vector, xmm4);
    xmm5 = _mm_xor_ps(signs14.vector, xmm5);

    /* Accumulate */
    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    /* Store */
    _mm_store_ps(&lower_sum[0][0][0], xmm0);
    _mm_store_ps(&lower_sum[1][0][0], xmm1);
    _mm_store_ps(&lower_sum[2][0][0], xmm2);


  }

  void dslash_plus_dir2_backward_add( spinor_array  spinor_in,
				      u_mat_array  u,
				      halfspinor_array  upper_sum,
				      halfspinor_array  lower_sum   )
  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;



    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma1 plus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0xb1);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0xb1);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0xb1);

    xmm3 = _mm_xor_ps(signs14.vector, xmm3);
    xmm4 = _mm_xor_ps(signs14.vector, xmm4);
    xmm5 = _mm_xor_ps(signs14.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* Adj SU(3) multiply */

  xmm3 = _mm_load_ss(&u[0][0][0]);
  xmm6 = _mm_load_ss(&u[0][1][0]);
  xmm4 = _mm_load_ss(&u[1][0][0]);
  xmm7 = _mm_load_ss(&u[1][2][0]);
  xmm5 = _mm_load_ss(&u[2][0][0]);
  xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
  xmm3 = _mm_mul_ps(xmm0,xmm3);
  xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
  xmm6 = _mm_mul_ps(xmm1,xmm6);
  xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
  xmm4 = _mm_mul_ps(xmm0, xmm4);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_mul_ps(xmm0, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[2][1][0]);
  xmm7 = _mm_load_ss(&u[0][2][0]);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm3 = _mm_add_ps(xmm7, xmm3);
  xmm6 = _mm_load_ss(&u[1][1][0]);
  xmm7 = _mm_load_ss(&u[2][2][0]);
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm4 = _mm_add_ps(xmm6, xmm4);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm6 = _mm_load_ss( &u[0][0][1] );
  xmm7 = _mm_load_ss( &u[1][1][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
  xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
  xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
  xmm0 = _mm_xor_ps(signs24.vector, xmm0);
  xmm1 = _mm_xor_ps(signs24.vector, xmm1);
  xmm2 = _mm_xor_ps(signs24.vector, xmm2);
  xmm6 = _mm_mul_ps(xmm0,xmm6);
  xmm7 = _mm_mul_ps(xmm1,xmm7);
  xmm3 = _mm_add_ps(xmm6,xmm3);
  xmm4 = _mm_add_ps(xmm7,xmm4);
  xmm6 = _mm_load_ss( &u[2][2][1] );
  xmm7 = _mm_load_ss( &u[1][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm2, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[0][1][1] );
  xmm7 = _mm_load_ss(&u[2][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm0 = _mm_load_ss( &u[0][2][1] );
  xmm6 = _mm_load_ss( &u[2][1][1] );
  xmm7 = _mm_load_ss( &u[1][2][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm0 = _mm_mul_ps(xmm2, xmm0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm3 = _mm_add_ps(xmm0, xmm3);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);

  /* Result in      xmm3,4,5 */
  /* END MVV */

  /* Reconstruction */

  /* Load up upper partial sum */
  xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
  xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
  xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

  /* Add upper component */
  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  _mm_store_ps(&upper_sum[0][0][0], xmm0);
  _mm_store_ps(&upper_sum[1][0][0], xmm1);
  _mm_store_ps(&upper_sum[2][0][0], xmm2);
    
  

  /* Lower components - do projection */
  xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
  xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
  xmm2 = _mm_load_ps(&lower_sum[2][0][0]);
  

  xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0xb1);
  xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0xb1);
  xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0xb1);

  xmm3 = _mm_xor_ps(signs23.vector, xmm3);
  xmm4 = _mm_xor_ps(signs23.vector, xmm4);
  xmm5 = _mm_xor_ps(signs23.vector, xmm5);

  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  /* Store */
  _mm_store_ps(&lower_sum[0][0][0], xmm0);
  _mm_store_ps(&lower_sum[1][0][0], xmm1);
  _mm_store_ps(&lower_sum[2][0][0], xmm2);


  }


  void dslash_plus_dir2_backward_add_store( spinor_array  spinor_in,
					    u_mat_array  u,
					    halfspinor_array  upper_sum,
					    halfspinor_array  lower_sum,
					    spinor_array spinor_out)
  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;



    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma1 plus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0xb1);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0xb1);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0xb1);

    xmm3 = _mm_xor_ps(signs14.vector, xmm3);
    xmm4 = _mm_xor_ps(signs14.vector, xmm4);
    xmm5 = _mm_xor_ps(signs14.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* Adj SU(3) multiply */

  xmm3 = _mm_load_ss(&u[0][0][0]);
  xmm6 = _mm_load_ss(&u[0][1][0]);
  xmm4 = _mm_load_ss(&u[1][0][0]);
  xmm7 = _mm_load_ss(&u[1][2][0]);
  xmm5 = _mm_load_ss(&u[2][0][0]);
  xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
  xmm3 = _mm_mul_ps(xmm0,xmm3);
  xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
  xmm6 = _mm_mul_ps(xmm1,xmm6);
  xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
  xmm4 = _mm_mul_ps(xmm0, xmm4);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_mul_ps(xmm0, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[2][1][0]);
  xmm7 = _mm_load_ss(&u[0][2][0]);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm3 = _mm_add_ps(xmm7, xmm3);
  xmm6 = _mm_load_ss(&u[1][1][0]);
  xmm7 = _mm_load_ss(&u[2][2][0]);
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm4 = _mm_add_ps(xmm6, xmm4);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm6 = _mm_load_ss( &u[0][0][1] );
  xmm7 = _mm_load_ss( &u[1][1][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
  xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
  xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
  xmm0 = _mm_xor_ps(signs24.vector, xmm0);
  xmm1 = _mm_xor_ps(signs24.vector, xmm1);
  xmm2 = _mm_xor_ps(signs24.vector, xmm2);
  xmm6 = _mm_mul_ps(xmm0,xmm6);
  xmm7 = _mm_mul_ps(xmm1,xmm7);
  xmm3 = _mm_add_ps(xmm6,xmm3);
  xmm4 = _mm_add_ps(xmm7,xmm4);
  xmm6 = _mm_load_ss( &u[2][2][1] );
  xmm7 = _mm_load_ss( &u[1][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm2, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[0][1][1] );
  xmm7 = _mm_load_ss(&u[2][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm0 = _mm_load_ss( &u[0][2][1] );
  xmm6 = _mm_load_ss( &u[2][1][1] );
  xmm7 = _mm_load_ss( &u[1][2][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm0 = _mm_mul_ps(xmm2, xmm0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm3 = _mm_add_ps(xmm0, xmm3);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);

  /* Result in      xmm3,4,5 */
  /* END MVV */

  /* Reconstruction */

  /* Load up upper partial sum */
  xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
  xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
  xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

  /* Add upper component */
  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  /* Was */
  /*
  _mm_store_ps(&upper_sum[0][0][0], xmm0);
  _mm_store_ps(&upper_sum[1][0][0], xmm1);
  _mm_store_ps(&upper_sum[2][0][0], xmm2);
  */

  /* Pair store */
  _mm_storel_pi((__m64 *)&spinor_out[0][0][0], xmm0);
  _mm_storel_pi((__m64 *)&spinor_out[0][1][0], xmm1);
  _mm_storel_pi((__m64 *)&spinor_out[0][2][0], xmm2);
  _mm_storeh_pi((__m64 *)&spinor_out[1][0][0], xmm0);
  _mm_storeh_pi((__m64 *)&spinor_out[1][1][0], xmm1);
  _mm_storeh_pi((__m64 *)&spinor_out[1][2][0], xmm2);

  

  /* Lower components - do projection */
  xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
  xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
  xmm2 = _mm_load_ps(&lower_sum[2][0][0]);
  

  xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0xb1);
  xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0xb1);
  xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0xb1);

  xmm3 = _mm_xor_ps(signs23.vector, xmm3);
  xmm4 = _mm_xor_ps(signs23.vector, xmm4);
  xmm5 = _mm_xor_ps(signs23.vector, xmm5);

  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  
  /* Store */
  /* Was */
  /*
  _mm_store_ps(&lower_sum[0][0][0], xmm0);
  _mm_store_ps(&lower_sum[1][0][0], xmm1);
  _mm_store_ps(&lower_sum[2][0][0], xmm2);
  */
  /* Store */
  _mm_storel_pi((__m64 *)&spinor_out[2][0][0], xmm0);
  _mm_storel_pi((__m64 *)&spinor_out[2][1][0], xmm1);
  _mm_storel_pi((__m64 *)&spinor_out[2][2][0], xmm2);
  _mm_storeh_pi((__m64 *)&spinor_out[3][0][0], xmm0);
  _mm_storeh_pi((__m64 *)&spinor_out[3][1][0], xmm1);
  _mm_storeh_pi((__m64 *)&spinor_out[3][2][0], xmm2);


  }


  void dslash_plus_dir3_forward_add( spinor_array  spinor_in,
				     u_mat_array  u,
				     halfspinor_array  upper_sum,
				     halfspinor_array  lower_sum)
  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;

   
    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    xmm0 = _mm_sub_ps(xmm0, xmm3);
    xmm1 = _mm_sub_ps(xmm1, xmm4);
    xmm2 = _mm_sub_ps(xmm2, xmm5);


    /* SU3 Multiply */
    xmm3 = _mm_load_ss(&u[0][0][0]);
    xmm6 = _mm_load_ss(&u[1][0][0]);
    xmm4 = _mm_load_ss(&u[0][1][0]);
    xmm7 = _mm_load_ss(&u[2][1][0]);
    xmm5 = _mm_load_ss(&u[0][2][0]);
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
    xmm3 = _mm_mul_ps(xmm0,xmm3);
    xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
    xmm6 = _mm_mul_ps(xmm1,xmm6);
    xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
    xmm4 = _mm_mul_ps(xmm0, xmm4);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_mul_ps(xmm0, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][2][0]);
    xmm7 = _mm_load_ss(&u[2][0][0]);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm3 = _mm_add_ps(xmm7, xmm3);
    xmm6 = _mm_load_ss(&u[1][1][0]);
    xmm7 = _mm_load_ss(&u[2][2][0]);
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm4 = _mm_add_ps(xmm6, xmm4);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm6 = _mm_load_ss( &u[0][0][1] );
    xmm7 = _mm_load_ss( &u[1][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
    xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
    xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
    xmm0 = _mm_xor_ps(signs13.vector, xmm0);
    xmm1 = _mm_xor_ps(signs13.vector, xmm1);
    xmm2 = _mm_xor_ps(signs13.vector, xmm2);
    xmm6 = _mm_mul_ps(xmm0,xmm6);
    xmm7 = _mm_mul_ps(xmm1,xmm7);
    xmm3 = _mm_add_ps(xmm6,xmm3);
    xmm4 = _mm_add_ps(xmm7,xmm4);
    xmm6 = _mm_load_ss( &u[2][2][1] );
    xmm7 = _mm_load_ss( &u[0][1][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm2, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][0][1] );
    xmm7 = _mm_load_ss(&u[0][2][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm0 = _mm_load_ss( &u[2][0][1] );
    xmm6 = _mm_load_ss( &u[1][2][1] );
    xmm7 = _mm_load_ss( &u[2][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm0 = _mm_mul_ps(xmm2, xmm0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm3 = _mm_add_ps(xmm0, xmm3);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);

    /* Reconstruction: Upper components just go
       Reshuffle spin and color indices so we can use movaps
       to store - it is aligned and faster... */
    xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
    xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
    xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    _mm_store_ps(&upper_sum[0][0][0], xmm0);
    _mm_store_ps(&upper_sum[1][0][0], xmm1);
    _mm_store_ps(&upper_sum[2][0][0], xmm2);
    
    /* Lower components - do projection */
    xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
    xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
    xmm2 = _mm_load_ps(&lower_sum[2][0][0]);

    /* Accumulate */
    xmm0 = _mm_sub_ps(xmm0, xmm3);
    xmm1 = _mm_sub_ps(xmm1, xmm4);
    xmm2 = _mm_sub_ps(xmm2, xmm5);

    /* Store */
    _mm_store_ps(&lower_sum[0][0][0], xmm0);
    _mm_store_ps(&lower_sum[1][0][0], xmm1);
    _mm_store_ps(&lower_sum[2][0][0], xmm2);



  }

  void dslash_plus_dir3_backward_add_store( spinor_array  spinor_in,
					    u_mat_array  u,
					    halfspinor_array  upper_sum,
					    halfspinor_array  lower_sum,
					    spinor_array spinor_out)

  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;



    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* Adj SU(3) multiply */

  xmm3 = _mm_load_ss(&u[0][0][0]);
  xmm6 = _mm_load_ss(&u[0][1][0]);
  xmm4 = _mm_load_ss(&u[1][0][0]);
  xmm7 = _mm_load_ss(&u[1][2][0]);
  xmm5 = _mm_load_ss(&u[2][0][0]);
  xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
  xmm3 = _mm_mul_ps(xmm0,xmm3);
  xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
  xmm6 = _mm_mul_ps(xmm1,xmm6);
  xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
  xmm4 = _mm_mul_ps(xmm0, xmm4);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_mul_ps(xmm0, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[2][1][0]);
  xmm7 = _mm_load_ss(&u[0][2][0]);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm3 = _mm_add_ps(xmm7, xmm3);
  xmm6 = _mm_load_ss(&u[1][1][0]);
  xmm7 = _mm_load_ss(&u[2][2][0]);
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm4 = _mm_add_ps(xmm6, xmm4);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm6 = _mm_load_ss( &u[0][0][1] );
  xmm7 = _mm_load_ss( &u[1][1][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
  xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
  xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
  xmm0 = _mm_xor_ps(signs24.vector, xmm0);
  xmm1 = _mm_xor_ps(signs24.vector, xmm1);
  xmm2 = _mm_xor_ps(signs24.vector, xmm2);
  xmm6 = _mm_mul_ps(xmm0,xmm6);
  xmm7 = _mm_mul_ps(xmm1,xmm7);
  xmm3 = _mm_add_ps(xmm6,xmm3);
  xmm4 = _mm_add_ps(xmm7,xmm4);
  xmm6 = _mm_load_ss( &u[2][2][1] );
  xmm7 = _mm_load_ss( &u[1][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm2, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[0][1][1] );
  xmm7 = _mm_load_ss(&u[2][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm0 = _mm_load_ss( &u[0][2][1] );
  xmm6 = _mm_load_ss( &u[2][1][1] );
  xmm7 = _mm_load_ss( &u[1][2][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm0 = _mm_mul_ps(xmm2, xmm0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm3 = _mm_add_ps(xmm0, xmm3);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);

  /* Result in      xmm3,4,5 */
  /* END MVV */

  /* Reconstruction */

  /* Load up upper partial sum */
  xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
  xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
  xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

  /* Add upper component */
  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  /* Pair store */
  _mm_storel_pi((__m64 *)&spinor_out[0][0][0], xmm0);
  _mm_storel_pi((__m64 *)&spinor_out[0][1][0], xmm1);
  _mm_storel_pi((__m64 *)&spinor_out[0][2][0], xmm2);
  _mm_storeh_pi((__m64 *)&spinor_out[1][0][0], xmm0);
  _mm_storeh_pi((__m64 *)&spinor_out[1][1][0], xmm1);
  _mm_storeh_pi((__m64 *)&spinor_out[1][2][0], xmm2);

  /* Lower components - do projection */
  xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
  xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
  xmm2 = _mm_load_ps(&lower_sum[2][0][0]);
  

  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  /* Store */
  _mm_storel_pi((__m64 *)&spinor_out[2][0][0], xmm0);
  _mm_storel_pi((__m64 *)&spinor_out[2][1][0], xmm1);
  _mm_storel_pi((__m64 *)&spinor_out[2][2][0], xmm2);
  _mm_storeh_pi((__m64 *)&spinor_out[3][0][0], xmm0);
  _mm_storeh_pi((__m64 *)&spinor_out[3][1][0], xmm1);
  _mm_storeh_pi((__m64 *)&spinor_out[3][2][0], xmm2);

  }


  void dslash_minus_dir0_forward( spinor_array spinor_in,
				  u_mat_array u,
				  halfspinor_array  upper_sum,
				  halfspinor_array  lower_sum   )
       
  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;



    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma0 minus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x1b);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x1b);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0x1b);

    xmm3 = _mm_xor_ps(signs13.vector, xmm3);
    xmm4 = _mm_xor_ps(signs13.vector, xmm4);
    xmm5 = _mm_xor_ps(signs13.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    /* SU3 Multiply */
    xmm3 = _mm_load_ss(&u[0][0][0]);
    xmm6 = _mm_load_ss(&u[1][0][0]);
    xmm4 = _mm_load_ss(&u[0][1][0]);
    xmm7 = _mm_load_ss(&u[2][1][0]);
    xmm5 = _mm_load_ss(&u[0][2][0]);
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
    xmm3 = _mm_mul_ps(xmm0,xmm3);
    xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
    xmm6 = _mm_mul_ps(xmm1,xmm6);
    xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
    xmm4 = _mm_mul_ps(xmm0, xmm4);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_mul_ps(xmm0, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][2][0]);
    xmm7 = _mm_load_ss(&u[2][0][0]);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm3 = _mm_add_ps(xmm7, xmm3);
    xmm6 = _mm_load_ss(&u[1][1][0]);
    xmm7 = _mm_load_ss(&u[2][2][0]);
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm4 = _mm_add_ps(xmm6, xmm4);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm6 = _mm_load_ss( &u[0][0][1] );
    xmm7 = _mm_load_ss( &u[1][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
    xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
    xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
    xmm0 = _mm_xor_ps(signs13.vector, xmm0);
    xmm1 = _mm_xor_ps(signs13.vector, xmm1);
    xmm2 = _mm_xor_ps(signs13.vector, xmm2);
    xmm6 = _mm_mul_ps(xmm0,xmm6);
    xmm7 = _mm_mul_ps(xmm1,xmm7);
    xmm3 = _mm_add_ps(xmm6,xmm3);
    xmm4 = _mm_add_ps(xmm7,xmm4);
    xmm6 = _mm_load_ss( &u[2][2][1] );
    xmm7 = _mm_load_ss( &u[0][1][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm2, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][0][1] );
    xmm7 = _mm_load_ss(&u[0][2][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm0 = _mm_load_ss( &u[2][0][1] );
    xmm6 = _mm_load_ss( &u[1][2][1] );
    xmm7 = _mm_load_ss( &u[2][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm0 = _mm_mul_ps(xmm2, xmm0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm3 = _mm_add_ps(xmm0, xmm3);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);

    /* Reconstruction: Upper components just go
       Reshuffle spin and color indices so we can use movaps
       to store - it is aligned and faster... */

    _mm_store_ps(&upper_sum[0][0][0], xmm3);
    _mm_store_ps(&upper_sum[1][0][0], xmm4);
    _mm_store_ps(&upper_sum[2][0][0], xmm5);
    
    /* Lower components - do projection */
    xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0x1b);
    xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0x1b);
    xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0x1b);

    xmm3 = _mm_xor_ps(signs24.vector, xmm3);
    xmm4 = _mm_xor_ps(signs24.vector, xmm4);
    xmm5 = _mm_xor_ps(signs24.vector, xmm5);

    /* Store */
    _mm_store_ps(&lower_sum[0][0][0], xmm3);
    _mm_store_ps(&lower_sum[1][0][0], xmm4);
    _mm_store_ps(&lower_sum[2][0][0], xmm5);


  }

  void dslash_minus_dir0_backward_add( spinor_array spinor_in,
				       u_mat_array u,
				       halfspinor_array  upper_sum,
				       halfspinor_array  lower_sum   )

  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;

   

    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma0 minus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x1b);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x1b);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0x1b);

    xmm3 = _mm_xor_ps(signs24.vector, xmm3);
    xmm4 = _mm_xor_ps(signs24.vector, xmm4);
    xmm5 = _mm_xor_ps(signs24.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* Adj SU(3) multiply */

  xmm3 = _mm_load_ss(&u[0][0][0]);
  xmm6 = _mm_load_ss(&u[0][1][0]);
  xmm4 = _mm_load_ss(&u[1][0][0]);
  xmm7 = _mm_load_ss(&u[1][2][0]);
  xmm5 = _mm_load_ss(&u[2][0][0]);
  xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
  xmm3 = _mm_mul_ps(xmm0,xmm3);
  xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
  xmm6 = _mm_mul_ps(xmm1,xmm6);
  xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
  xmm4 = _mm_mul_ps(xmm0, xmm4);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_mul_ps(xmm0, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[2][1][0]);
  xmm7 = _mm_load_ss(&u[0][2][0]);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm3 = _mm_add_ps(xmm7, xmm3);
  xmm6 = _mm_load_ss(&u[1][1][0]);
  xmm7 = _mm_load_ss(&u[2][2][0]);
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm4 = _mm_add_ps(xmm6, xmm4);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm6 = _mm_load_ss( &u[0][0][1] );
  xmm7 = _mm_load_ss( &u[1][1][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
  xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
  xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
  xmm0 = _mm_xor_ps(signs24.vector, xmm0);
  xmm1 = _mm_xor_ps(signs24.vector, xmm1);
  xmm2 = _mm_xor_ps(signs24.vector, xmm2);
  xmm6 = _mm_mul_ps(xmm0,xmm6);
  xmm7 = _mm_mul_ps(xmm1,xmm7);
  xmm3 = _mm_add_ps(xmm6,xmm3);
  xmm4 = _mm_add_ps(xmm7,xmm4);
  xmm6 = _mm_load_ss( &u[2][2][1] );
  xmm7 = _mm_load_ss( &u[1][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm2, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[0][1][1] );
  xmm7 = _mm_load_ss(&u[2][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm0 = _mm_load_ss( &u[0][2][1] );
  xmm6 = _mm_load_ss( &u[2][1][1] );
  xmm7 = _mm_load_ss( &u[1][2][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm0 = _mm_mul_ps(xmm2, xmm0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm3 = _mm_add_ps(xmm0, xmm3);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);

  /* Result in      xmm3,4,5 */
  /* END MVV */

  /* Reconstruction */

  /* Load up upper partial sum */
  xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
  xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
  xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

  /* Add upper component */
  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  _mm_store_ps(&upper_sum[0][0][0], xmm0);
  _mm_store_ps(&upper_sum[1][0][0], xmm1);
  _mm_store_ps(&upper_sum[2][0][0], xmm2);
    
  

  /* Lower components - do projection */
  xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
  xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
  xmm2 = _mm_load_ps(&lower_sum[2][0][0]);
  

  xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0x1b);
  xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0x1b);
  xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0x1b);

  xmm3 = _mm_xor_ps(signs13.vector, xmm3);
  xmm4 = _mm_xor_ps(signs13.vector, xmm4);
  xmm5 = _mm_xor_ps(signs13.vector, xmm5);

  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  /* Store */
  _mm_store_ps(&lower_sum[0][0][0], xmm0);
  _mm_store_ps(&lower_sum[1][0][0], xmm1);
  _mm_store_ps(&lower_sum[2][0][0], xmm2);


  }


  void dslash_minus_dir1_forward_add( spinor_array  spinor_in,
				      u_mat_array  u,
				      halfspinor_array  upper_sum,
				      halfspinor_array  lower_sum   )
  {

    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;
  
    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma1 minus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x4e);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x4e);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0x4e);

    xmm3 = _mm_xor_ps(signs12.vector, xmm3);
    xmm4 = _mm_xor_ps(signs12.vector, xmm4);
    xmm5 = _mm_xor_ps(signs12.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* SU3 Multiply */
    xmm3 = _mm_load_ss(&u[0][0][0]);
    xmm6 = _mm_load_ss(&u[1][0][0]);
    xmm4 = _mm_load_ss(&u[0][1][0]);
    xmm7 = _mm_load_ss(&u[2][1][0]);
    xmm5 = _mm_load_ss(&u[0][2][0]);
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
    xmm3 = _mm_mul_ps(xmm0,xmm3);
    xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
    xmm6 = _mm_mul_ps(xmm1,xmm6);
    xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
    xmm4 = _mm_mul_ps(xmm0, xmm4);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_mul_ps(xmm0, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][2][0]);
    xmm7 = _mm_load_ss(&u[2][0][0]);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm3 = _mm_add_ps(xmm7, xmm3);
    xmm6 = _mm_load_ss(&u[1][1][0]);
    xmm7 = _mm_load_ss(&u[2][2][0]);
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm4 = _mm_add_ps(xmm6, xmm4);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm6 = _mm_load_ss( &u[0][0][1] );
    xmm7 = _mm_load_ss( &u[1][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
    xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
    xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
    xmm0 = _mm_xor_ps(signs13.vector, xmm0);
    xmm1 = _mm_xor_ps(signs13.vector, xmm1);
    xmm2 = _mm_xor_ps(signs13.vector, xmm2);
    xmm6 = _mm_mul_ps(xmm0,xmm6);
    xmm7 = _mm_mul_ps(xmm1,xmm7);
    xmm3 = _mm_add_ps(xmm6,xmm3);
    xmm4 = _mm_add_ps(xmm7,xmm4);
    xmm6 = _mm_load_ss( &u[2][2][1] );
    xmm7 = _mm_load_ss( &u[0][1][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm2, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][0][1] );
    xmm7 = _mm_load_ss(&u[0][2][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm0 = _mm_load_ss( &u[2][0][1] );
    xmm6 = _mm_load_ss( &u[1][2][1] );
    xmm7 = _mm_load_ss( &u[2][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm0 = _mm_mul_ps(xmm2, xmm0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm3 = _mm_add_ps(xmm0, xmm3);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);

    /* Reconstruction: Upper components just go
       Reshuffle spin and color indices so we can use movaps
       to store - it is aligned and faster... */
    xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
    xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
    xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    _mm_store_ps(&upper_sum[0][0][0], xmm0);
    _mm_store_ps(&upper_sum[1][0][0], xmm1);
    _mm_store_ps(&upper_sum[2][0][0], xmm2);
    
    /* Lower components - do projection */
    xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
    xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
    xmm2 = _mm_load_ps(&lower_sum[2][0][0]);

    /* Gamma_minus 1 reconstruction */

    xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0x4e);
    xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0x4e);
    xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0x4e);

    xmm3 = _mm_xor_ps(signs34.vector, xmm3);
    xmm4 = _mm_xor_ps(signs34.vector, xmm4);
    xmm5 = _mm_xor_ps(signs34.vector, xmm5);

    /* Accumulate */
    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    /* Store */
    _mm_store_ps(&lower_sum[0][0][0], xmm0);
    _mm_store_ps(&lower_sum[1][0][0], xmm1);
    _mm_store_ps(&lower_sum[2][0][0], xmm2);

  }

  void dslash_minus_dir1_backward_add( spinor_array  spinor_in,
				       u_mat_array  u,
				       halfspinor_array  upper_sum,
				       halfspinor_array  lower_sum   )
  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;


    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma1 minus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x4e);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x4e);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0x4e);

    xmm3 = _mm_xor_ps(signs34.vector, xmm3);
    xmm4 = _mm_xor_ps(signs34.vector, xmm4);
    xmm5 = _mm_xor_ps(signs34.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* Adj SU(3) multiply */

  xmm3 = _mm_load_ss(&u[0][0][0]);
  xmm6 = _mm_load_ss(&u[0][1][0]);
  xmm4 = _mm_load_ss(&u[1][0][0]);
  xmm7 = _mm_load_ss(&u[1][2][0]);
  xmm5 = _mm_load_ss(&u[2][0][0]);
  xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
  xmm3 = _mm_mul_ps(xmm0,xmm3);
  xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
  xmm6 = _mm_mul_ps(xmm1,xmm6);
  xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
  xmm4 = _mm_mul_ps(xmm0, xmm4);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_mul_ps(xmm0, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[2][1][0]);
  xmm7 = _mm_load_ss(&u[0][2][0]);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm3 = _mm_add_ps(xmm7, xmm3);
  xmm6 = _mm_load_ss(&u[1][1][0]);
  xmm7 = _mm_load_ss(&u[2][2][0]);
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm4 = _mm_add_ps(xmm6, xmm4);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm6 = _mm_load_ss( &u[0][0][1] );
  xmm7 = _mm_load_ss( &u[1][1][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
  xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
  xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
  xmm0 = _mm_xor_ps(signs24.vector, xmm0);
  xmm1 = _mm_xor_ps(signs24.vector, xmm1);
  xmm2 = _mm_xor_ps(signs24.vector, xmm2);
  xmm6 = _mm_mul_ps(xmm0,xmm6);
  xmm7 = _mm_mul_ps(xmm1,xmm7);
  xmm3 = _mm_add_ps(xmm6,xmm3);
  xmm4 = _mm_add_ps(xmm7,xmm4);
  xmm6 = _mm_load_ss( &u[2][2][1] );
  xmm7 = _mm_load_ss( &u[1][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm2, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[0][1][1] );
  xmm7 = _mm_load_ss(&u[2][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm0 = _mm_load_ss( &u[0][2][1] );
  xmm6 = _mm_load_ss( &u[2][1][1] );
  xmm7 = _mm_load_ss( &u[1][2][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm0 = _mm_mul_ps(xmm2, xmm0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm3 = _mm_add_ps(xmm0, xmm3);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);

  /* Result in      xmm3,4,5 */
  /* END MVV */

  /* Reconstruction */

  /* Load up upper partial sum */
  xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
  xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
  xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

  /* Add upper component */
  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  _mm_store_ps(&upper_sum[0][0][0], xmm0);
  _mm_store_ps(&upper_sum[1][0][0], xmm1);
  _mm_store_ps(&upper_sum[2][0][0], xmm2);
    
  

  /* Lower components - do projection */
  xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
  xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
  xmm2 = _mm_load_ps(&lower_sum[2][0][0]);
  

  xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0x4e);
  xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0x4e);
  xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0x4e);

  xmm3 = _mm_xor_ps(signs12.vector, xmm3);
  xmm4 = _mm_xor_ps(signs12.vector, xmm4);
  xmm5 = _mm_xor_ps(signs12.vector, xmm5);

  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  /* Store */
  _mm_store_ps(&lower_sum[0][0][0], xmm0);
  _mm_store_ps(&lower_sum[1][0][0], xmm1);
  _mm_store_ps(&lower_sum[2][0][0], xmm2);


  }

  void dslash_minus_dir2_forward_add( spinor_array  spinor_in,
				      u_mat_array  u,
				      halfspinor_array  upper_sum,
				      halfspinor_array  lower_sum   )
  {

    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;
  
    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma1 minus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0xb1);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0xb1);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0xb1);

    xmm3 = _mm_xor_ps(signs14.vector, xmm3);
    xmm4 = _mm_xor_ps(signs14.vector, xmm4);
    xmm5 = _mm_xor_ps(signs14.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* SU3 Multiply */
    xmm3 = _mm_load_ss(&u[0][0][0]);
    xmm6 = _mm_load_ss(&u[1][0][0]);
    xmm4 = _mm_load_ss(&u[0][1][0]);
    xmm7 = _mm_load_ss(&u[2][1][0]);
    xmm5 = _mm_load_ss(&u[0][2][0]);
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
    xmm3 = _mm_mul_ps(xmm0,xmm3);
    xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
    xmm6 = _mm_mul_ps(xmm1,xmm6);
    xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
    xmm4 = _mm_mul_ps(xmm0, xmm4);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_mul_ps(xmm0, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][2][0]);
    xmm7 = _mm_load_ss(&u[2][0][0]);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm3 = _mm_add_ps(xmm7, xmm3);
    xmm6 = _mm_load_ss(&u[1][1][0]);
    xmm7 = _mm_load_ss(&u[2][2][0]);
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm4 = _mm_add_ps(xmm6, xmm4);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm6 = _mm_load_ss( &u[0][0][1] );
    xmm7 = _mm_load_ss( &u[1][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
    xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
    xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
    xmm0 = _mm_xor_ps(signs13.vector, xmm0);
    xmm1 = _mm_xor_ps(signs13.vector, xmm1);
    xmm2 = _mm_xor_ps(signs13.vector, xmm2);
    xmm6 = _mm_mul_ps(xmm0,xmm6);
    xmm7 = _mm_mul_ps(xmm1,xmm7);
    xmm3 = _mm_add_ps(xmm6,xmm3);
    xmm4 = _mm_add_ps(xmm7,xmm4);
    xmm6 = _mm_load_ss( &u[2][2][1] );
    xmm7 = _mm_load_ss( &u[0][1][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm2, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][0][1] );
    xmm7 = _mm_load_ss(&u[0][2][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm0 = _mm_load_ss( &u[2][0][1] );
    xmm6 = _mm_load_ss( &u[1][2][1] );
    xmm7 = _mm_load_ss( &u[2][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm0 = _mm_mul_ps(xmm2, xmm0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm3 = _mm_add_ps(xmm0, xmm3);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);

    /* Reconstruction: Upper components just go
       Reshuffle spin and color indices so we can use movaps
       to store - it is aligned and faster... */
    xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
    xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
    xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    _mm_store_ps(&upper_sum[0][0][0], xmm0);
    _mm_store_ps(&upper_sum[1][0][0], xmm1);
    _mm_store_ps(&upper_sum[2][0][0], xmm2);
    
    /* Lower components - do projection */
    xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
    xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
    xmm2 = _mm_load_ps(&lower_sum[2][0][0]);

    /* Gamma_minus 1 reconstruction */

    xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0xb1);
    xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0xb1);
    xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0xb1);

    xmm3 = _mm_xor_ps(signs23.vector, xmm3);
    xmm4 = _mm_xor_ps(signs23.vector, xmm4);
    xmm5 = _mm_xor_ps(signs23.vector, xmm5);

    /* Accumulate */
    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    /* Store */
    _mm_store_ps(&lower_sum[0][0][0], xmm0);
    _mm_store_ps(&lower_sum[1][0][0], xmm1);
    _mm_store_ps(&lower_sum[2][0][0], xmm2);


  }

  void dslash_minus_dir2_backward_add( spinor_array  spinor_in,
				       u_mat_array  u,
				       halfspinor_array  upper_sum,
				       halfspinor_array  lower_sum   )
  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;


    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma1 minus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0xb1);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0xb1);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0xb1);

    xmm3 = _mm_xor_ps(signs23.vector, xmm3);
    xmm4 = _mm_xor_ps(signs23.vector, xmm4);
    xmm5 = _mm_xor_ps(signs23.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* Adj SU(3) multiply */

  xmm3 = _mm_load_ss(&u[0][0][0]);
  xmm6 = _mm_load_ss(&u[0][1][0]);
  xmm4 = _mm_load_ss(&u[1][0][0]);
  xmm7 = _mm_load_ss(&u[1][2][0]);
  xmm5 = _mm_load_ss(&u[2][0][0]);
  xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
  xmm3 = _mm_mul_ps(xmm0,xmm3);
  xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
  xmm6 = _mm_mul_ps(xmm1,xmm6);
  xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
  xmm4 = _mm_mul_ps(xmm0, xmm4);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_mul_ps(xmm0, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[2][1][0]);
  xmm7 = _mm_load_ss(&u[0][2][0]);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm3 = _mm_add_ps(xmm7, xmm3);
  xmm6 = _mm_load_ss(&u[1][1][0]);
  xmm7 = _mm_load_ss(&u[2][2][0]);
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm4 = _mm_add_ps(xmm6, xmm4);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm6 = _mm_load_ss( &u[0][0][1] );
  xmm7 = _mm_load_ss( &u[1][1][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
  xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
  xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
  xmm0 = _mm_xor_ps(signs24.vector, xmm0);
  xmm1 = _mm_xor_ps(signs24.vector, xmm1);
  xmm2 = _mm_xor_ps(signs24.vector, xmm2);
  xmm6 = _mm_mul_ps(xmm0,xmm6);
  xmm7 = _mm_mul_ps(xmm1,xmm7);
  xmm3 = _mm_add_ps(xmm6,xmm3);
  xmm4 = _mm_add_ps(xmm7,xmm4);
  xmm6 = _mm_load_ss( &u[2][2][1] );
  xmm7 = _mm_load_ss( &u[1][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm2, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[0][1][1] );
  xmm7 = _mm_load_ss(&u[2][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm0 = _mm_load_ss( &u[0][2][1] );
  xmm6 = _mm_load_ss( &u[2][1][1] );
  xmm7 = _mm_load_ss( &u[1][2][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm0 = _mm_mul_ps(xmm2, xmm0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm3 = _mm_add_ps(xmm0, xmm3);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);

  /* Result in      xmm3,4,5 */
  /* END MVV */

  /* Reconstruction */

  /* Load up upper partial sum */
  xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
  xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
  xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

  /* Add upper component */
  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  _mm_store_ps(&upper_sum[0][0][0], xmm0);
  _mm_store_ps(&upper_sum[1][0][0], xmm1);
  _mm_store_ps(&upper_sum[2][0][0], xmm2);
    
  

  /* Lower components - do projection */
  xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
  xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
  xmm2 = _mm_load_ps(&lower_sum[2][0][0]);
  

  xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0xb1);
  xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0xb1);
  xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0xb1);

  xmm3 = _mm_xor_ps(signs14.vector, xmm3);
  xmm4 = _mm_xor_ps(signs14.vector, xmm4);
  xmm5 = _mm_xor_ps(signs14.vector, xmm5);

  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  /* Store */
  _mm_store_ps(&lower_sum[0][0][0], xmm0);
  _mm_store_ps(&lower_sum[1][0][0], xmm1);
  _mm_store_ps(&lower_sum[2][0][0], xmm2);


  }

  void dslash_minus_dir2_backward_add_store( spinor_array  spinor_in,
					     u_mat_array  u,
					     halfspinor_array  upper_sum,
					     halfspinor_array  lower_sum,
					     spinor_array spinor_out)
  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;


    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    /* gamma1 minus projection */
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0xb1);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0xb1);
    xmm5 = _mm_shuffle_ps(xmm5, xmm5, 0xb1);

    xmm3 = _mm_xor_ps(signs23.vector, xmm3);
    xmm4 = _mm_xor_ps(signs23.vector, xmm4);
    xmm5 = _mm_xor_ps(signs23.vector, xmm5);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* Adj SU(3) multiply */

  xmm3 = _mm_load_ss(&u[0][0][0]);
  xmm6 = _mm_load_ss(&u[0][1][0]);
  xmm4 = _mm_load_ss(&u[1][0][0]);
  xmm7 = _mm_load_ss(&u[1][2][0]);
  xmm5 = _mm_load_ss(&u[2][0][0]);
  xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
  xmm3 = _mm_mul_ps(xmm0,xmm3);
  xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
  xmm6 = _mm_mul_ps(xmm1,xmm6);
  xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
  xmm4 = _mm_mul_ps(xmm0, xmm4);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_mul_ps(xmm0, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[2][1][0]);
  xmm7 = _mm_load_ss(&u[0][2][0]);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm3 = _mm_add_ps(xmm7, xmm3);
  xmm6 = _mm_load_ss(&u[1][1][0]);
  xmm7 = _mm_load_ss(&u[2][2][0]);
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm4 = _mm_add_ps(xmm6, xmm4);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm6 = _mm_load_ss( &u[0][0][1] );
  xmm7 = _mm_load_ss( &u[1][1][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
  xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
  xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
  xmm0 = _mm_xor_ps(signs24.vector, xmm0);
  xmm1 = _mm_xor_ps(signs24.vector, xmm1);
  xmm2 = _mm_xor_ps(signs24.vector, xmm2);
  xmm6 = _mm_mul_ps(xmm0,xmm6);
  xmm7 = _mm_mul_ps(xmm1,xmm7);
  xmm3 = _mm_add_ps(xmm6,xmm3);
  xmm4 = _mm_add_ps(xmm7,xmm4);
  xmm6 = _mm_load_ss( &u[2][2][1] );
  xmm7 = _mm_load_ss( &u[1][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm2, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[0][1][1] );
  xmm7 = _mm_load_ss(&u[2][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm0 = _mm_load_ss( &u[0][2][1] );
  xmm6 = _mm_load_ss( &u[2][1][1] );
  xmm7 = _mm_load_ss( &u[1][2][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm0 = _mm_mul_ps(xmm2, xmm0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm3 = _mm_add_ps(xmm0, xmm3);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);

  /* Result in      xmm3,4,5 */
  /* END MVV */

  /* Reconstruction */

  /* Load up upper partial sum */
  xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
  xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
  xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

  /* Add upper component */
  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  /* Was */
  /*
  _mm_store_ps(&upper_sum[0][0][0], xmm0);
  _mm_store_ps(&upper_sum[1][0][0], xmm1);
  _mm_store_ps(&upper_sum[2][0][0], xmm2);
  */

  /* Pair store */
  _mm_storel_pi((__m64 *)&spinor_out[0][0][0], xmm0);
  _mm_storel_pi((__m64 *)&spinor_out[0][1][0], xmm1);
  _mm_storel_pi((__m64 *)&spinor_out[0][2][0], xmm2);
  _mm_storeh_pi((__m64 *)&spinor_out[1][0][0], xmm0);
  _mm_storeh_pi((__m64 *)&spinor_out[1][1][0], xmm1);
  _mm_storeh_pi((__m64 *)&spinor_out[1][2][0], xmm2);


  /* Lower components - do projection */
  xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
  xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
  xmm2 = _mm_load_ps(&lower_sum[2][0][0]);
  

  xmm3 = _mm_shuffle_ps( xmm3, xmm3, 0xb1);
  xmm4 = _mm_shuffle_ps( xmm4, xmm4, 0xb1);
  xmm5 = _mm_shuffle_ps( xmm5, xmm5, 0xb1);

  xmm3 = _mm_xor_ps(signs14.vector, xmm3);
  xmm4 = _mm_xor_ps(signs14.vector, xmm4);
  xmm5 = _mm_xor_ps(signs14.vector, xmm5);

  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  /* Store */
  /* Was */
  /*
  _mm_store_ps(&lower_sum[0][0][0], xmm0);
  _mm_store_ps(&lower_sum[1][0][0], xmm1);
  _mm_store_ps(&lower_sum[2][0][0], xmm2);
  */

  _mm_storel_pi((__m64 *)&spinor_out[2][0][0], xmm0);
  _mm_storel_pi((__m64 *)&spinor_out[2][1][0], xmm1);
  _mm_storel_pi((__m64 *)&spinor_out[2][2][0], xmm2);
  _mm_storeh_pi((__m64 *)&spinor_out[3][0][0], xmm0);
  _mm_storeh_pi((__m64 *)&spinor_out[3][1][0], xmm1);
  _mm_storeh_pi((__m64 *)&spinor_out[3][2][0], xmm2);

  }


  void dslash_minus_dir3_forward_add( spinor_array  spinor_in,
				      u_mat_array  u,
				      halfspinor_array  upper_sum,
				      halfspinor_array  lower_sum)
  {
   __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;
  
    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);


    /* SU3 Multiply */
    xmm3 = _mm_load_ss(&u[0][0][0]);
    xmm6 = _mm_load_ss(&u[1][0][0]);
    xmm4 = _mm_load_ss(&u[0][1][0]);
    xmm7 = _mm_load_ss(&u[2][1][0]);
    xmm5 = _mm_load_ss(&u[0][2][0]);
    xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
    xmm3 = _mm_mul_ps(xmm0,xmm3);
    xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
    xmm6 = _mm_mul_ps(xmm1,xmm6);
    xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
    xmm4 = _mm_mul_ps(xmm0, xmm4);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_mul_ps(xmm0, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][2][0]);
    xmm7 = _mm_load_ss(&u[2][0][0]);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm3 = _mm_add_ps(xmm7, xmm3);
    xmm6 = _mm_load_ss(&u[1][1][0]);
    xmm7 = _mm_load_ss(&u[2][2][0]);
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm4 = _mm_add_ps(xmm6, xmm4);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm6 = _mm_load_ss( &u[0][0][1] );
    xmm7 = _mm_load_ss( &u[1][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
    xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
    xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
    xmm0 = _mm_xor_ps(signs13.vector, xmm0);
    xmm1 = _mm_xor_ps(signs13.vector, xmm1);
    xmm2 = _mm_xor_ps(signs13.vector, xmm2);
    xmm6 = _mm_mul_ps(xmm0,xmm6);
    xmm7 = _mm_mul_ps(xmm1,xmm7);
    xmm3 = _mm_add_ps(xmm6,xmm3);
    xmm4 = _mm_add_ps(xmm7,xmm4);
    xmm6 = _mm_load_ss( &u[2][2][1] );
    xmm7 = _mm_load_ss( &u[0][1][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm2, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);
    xmm6 = _mm_load_ss(&u[1][0][1] );
    xmm7 = _mm_load_ss(&u[0][2][1] );
    xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm0, xmm7);
    xmm3 = _mm_add_ps(xmm6, xmm3);
    xmm5 = _mm_add_ps(xmm7, xmm5);
    xmm0 = _mm_load_ss( &u[2][0][1] );
    xmm6 = _mm_load_ss( &u[1][2][1] );
    xmm7 = _mm_load_ss( &u[2][1][1] );
    xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
    xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
    xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
    xmm0 = _mm_mul_ps(xmm2, xmm0);
    xmm6 = _mm_mul_ps(xmm1, xmm6);
    xmm7 = _mm_mul_ps(xmm2, xmm7);
    xmm3 = _mm_add_ps(xmm0, xmm3);
    xmm5 = _mm_add_ps(xmm6, xmm5);
    xmm4 = _mm_add_ps(xmm7, xmm4);

    /* Reconstruction: Upper components just go
       Reshuffle spin and color indices so we can use movaps
       to store - it is aligned and faster... */
    xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
    xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
    xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    _mm_store_ps(&upper_sum[0][0][0], xmm0);
    _mm_store_ps(&upper_sum[1][0][0], xmm1);
    _mm_store_ps(&upper_sum[2][0][0], xmm2);
    
    /* Lower components - do projection */
    xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
    xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
    xmm2 = _mm_load_ps(&lower_sum[2][0][0]);

    /* Accumulate */
    xmm0 = _mm_add_ps(xmm0, xmm3);
    xmm1 = _mm_add_ps(xmm1, xmm4);
    xmm2 = _mm_add_ps(xmm2, xmm5);

    /* Store */
    _mm_store_ps(&lower_sum[0][0][0], xmm0);
    _mm_store_ps(&lower_sum[1][0][0], xmm1);
    _mm_store_ps(&lower_sum[2][0][0], xmm2);



  }

  void dslash_minus_dir3_backward_add_store( spinor_array  spinor_in,
					     u_mat_array  u,
					     halfspinor_array  upper_sum,
					     halfspinor_array  lower_sum,
					     spinor_array spinor_out)
       
  {
    __m128 xmm0 ALIGN;
    __m128 xmm1 ALIGN;
    __m128 xmm2 ALIGN;
    __m128 xmm3 ALIGN;
    __m128 xmm4 ALIGN;
    __m128 xmm5 ALIGN;
    __m128 xmm6 ALIGN;
    __m128 xmm7 ALIGN;



    /* Component 0 into the low 2 floats */
    xmm0 = _mm_loadl_pi(xmm0, (__m64 *)&spinor_in[0][0][0]);
    xmm1 = _mm_loadl_pi(xmm1, (__m64 *)&spinor_in[0][1][0]);
    xmm2 = _mm_loadl_pi(xmm2, (__m64 *)&spinor_in[0][2][0]);

    /* Component 1 into the high 2 floats */
    xmm0 = _mm_loadh_pi(xmm0, (__m64 *)&spinor_in[1][0][0]);
    xmm1 = _mm_loadh_pi(xmm1, (__m64 *)&spinor_in[1][1][0]);
    xmm2 = _mm_loadh_pi(xmm2, (__m64 *)&spinor_in[1][2][0]);
    
    /* Component 2 into low 2 floats */
    xmm3 = _mm_loadl_pi(xmm3, (__m64 *)&spinor_in[2][0][0]);
    xmm4 = _mm_loadl_pi(xmm4, (__m64 *)&spinor_in[2][1][0]);
    xmm5 = _mm_loadl_pi(xmm5, (__m64 *)&spinor_in[2][2][0]);

    /* Component 3 into the high 2 floats */
    xmm3 = _mm_loadh_pi(xmm3, (__m64 *)&spinor_in[3][0][0]);
    xmm4 = _mm_loadh_pi(xmm4, (__m64 *)&spinor_in[3][1][0]);
    xmm5 = _mm_loadh_pi(xmm5, (__m64 *)&spinor_in[3][2][0]);

    /* Spin Projection. Results into xmm0-xmm2 */
    xmm0 = _mm_sub_ps(xmm0, xmm3);
    xmm1 = _mm_sub_ps(xmm1, xmm4);
    xmm2 = _mm_sub_ps(xmm2, xmm5);


    /* Adj SU(3) multiply */

  xmm3 = _mm_load_ss(&u[0][0][0]);
  xmm6 = _mm_load_ss(&u[0][1][0]);
  xmm4 = _mm_load_ss(&u[1][0][0]);
  xmm7 = _mm_load_ss(&u[1][2][0]);
  xmm5 = _mm_load_ss(&u[2][0][0]);
  xmm3 = _mm_shuffle_ps(xmm3, xmm3, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm4 = _mm_shuffle_ps(xmm4, xmm4, 0x0);
  xmm3 = _mm_mul_ps(xmm0,xmm3);
  xmm7 = _mm_shuffle_ps(xmm7,xmm7,0x0);
  xmm6 = _mm_mul_ps(xmm1,xmm6);
  xmm5 = _mm_shuffle_ps(xmm5,xmm5,0x0);
  xmm4 = _mm_mul_ps(xmm0, xmm4);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_mul_ps(xmm0, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[2][1][0]);
  xmm7 = _mm_load_ss(&u[0][2][0]);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm3 = _mm_add_ps(xmm7, xmm3);
  xmm6 = _mm_load_ss(&u[1][1][0]);
  xmm7 = _mm_load_ss(&u[2][2][0]);
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm4 = _mm_add_ps(xmm6, xmm4);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm6 = _mm_load_ss( &u[0][0][1] );
  xmm7 = _mm_load_ss( &u[1][1][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0xb1);
  xmm1 = _mm_shuffle_ps(xmm1, xmm1, 0xb1);
  xmm2 = _mm_shuffle_ps(xmm2, xmm2, 0xb1);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0 );
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0 );
  xmm0 = _mm_xor_ps(signs24.vector, xmm0);
  xmm1 = _mm_xor_ps(signs24.vector, xmm1);
  xmm2 = _mm_xor_ps(signs24.vector, xmm2);
  xmm6 = _mm_mul_ps(xmm0,xmm6);
  xmm7 = _mm_mul_ps(xmm1,xmm7);
  xmm3 = _mm_add_ps(xmm6,xmm3);
  xmm4 = _mm_add_ps(xmm7,xmm4);
  xmm6 = _mm_load_ss( &u[2][2][1] );
  xmm7 = _mm_load_ss( &u[1][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm2, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);
  xmm6 = _mm_load_ss(&u[0][1][1] );
  xmm7 = _mm_load_ss(&u[2][0][1] );
  xmm6 = _mm_shuffle_ps( xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps( xmm7, xmm7, 0x0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm0, xmm7);
  xmm3 = _mm_add_ps(xmm6, xmm3);
  xmm5 = _mm_add_ps(xmm7, xmm5);
  xmm0 = _mm_load_ss( &u[0][2][1] );
  xmm6 = _mm_load_ss( &u[2][1][1] );
  xmm7 = _mm_load_ss( &u[1][2][1] );
  xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0x0);
  xmm6 = _mm_shuffle_ps(xmm6, xmm6, 0x0);
  xmm7 = _mm_shuffle_ps(xmm7, xmm7, 0x0);
  xmm0 = _mm_mul_ps(xmm2, xmm0);
  xmm6 = _mm_mul_ps(xmm1, xmm6);
  xmm7 = _mm_mul_ps(xmm2, xmm7);
  xmm3 = _mm_add_ps(xmm0, xmm3);
  xmm5 = _mm_add_ps(xmm6, xmm5);
  xmm4 = _mm_add_ps(xmm7, xmm4);

  /* Result in      xmm3,4,5 */
  /* END MVV */

  /* Reconstruction */

  /* Load up upper partial sum */
  xmm0 = _mm_load_ps(&upper_sum[0][0][0]);
  xmm1 = _mm_load_ps(&upper_sum[1][0][0]);
  xmm2 = _mm_load_ps(&upper_sum[2][0][0]);

  /* Add upper component */
  xmm0 = _mm_add_ps(xmm0, xmm3);
  xmm1 = _mm_add_ps(xmm1, xmm4);
  xmm2 = _mm_add_ps(xmm2, xmm5);
  
  /* Pair store */
  _mm_storel_pi((__m64 *)&spinor_out[0][0][0], xmm0);
  _mm_storel_pi((__m64 *)&spinor_out[0][1][0], xmm1);
  _mm_storel_pi((__m64 *)&spinor_out[0][2][0], xmm2);
  _mm_storeh_pi((__m64 *)&spinor_out[1][0][0], xmm0);
  _mm_storeh_pi((__m64 *)&spinor_out[1][1][0], xmm1);
  _mm_storeh_pi((__m64 *)&spinor_out[1][2][0], xmm2);

  /* Lower components - do projection */
  xmm0 = _mm_load_ps(&lower_sum[0][0][0]);
  xmm1 = _mm_load_ps(&lower_sum[1][0][0]);
  xmm2 = _mm_load_ps(&lower_sum[2][0][0]);
  

  xmm0 = _mm_sub_ps(xmm0, xmm3);
  xmm1 = _mm_sub_ps(xmm1, xmm4);
  xmm2 = _mm_sub_ps(xmm2, xmm5);
  
  /* Store */
  _mm_storel_pi((__m64 *)&spinor_out[2][0][0], xmm0);
  _mm_storel_pi((__m64 *)&spinor_out[2][1][0], xmm1);
  _mm_storel_pi((__m64 *)&spinor_out[2][2][0], xmm2);
  _mm_storeh_pi((__m64 *)&spinor_out[3][0][0], xmm0);
  _mm_storeh_pi((__m64 *)&spinor_out[3][1][0], xmm1);
  _mm_storeh_pi((__m64 *)&spinor_out[3][2][0], xmm2);

  }


#ifdef __cplusplus 
};
#endif
