#include <xmmintrin.h>

void  SSE_BMP24ToYUV422(
        BYTE* pBMP24,
        BYTE* pY,
        BYTE* pU,
        BYTE* pV,
        int width, int height,
        int w_org, int h_org,
        int pitch)
{
  int i,j;
  DWORD U,V;
  __m64
    src0,src1,src2,
    t0,t1,t2,t3,t4,t5, rr,gg,bb, sum, avgb, avgrg, u, v,
    val10 = _mm_set1_pi8(0x10),
    val80 = _mm_set1_pi16(0x8000),
    zero = _mm_setzero_si64();
/*      R        G        B               R             G            B
Y =   66.+20  129.-02   25.-08   +16   100 0010 (2) 1000 0001 (2)   1 1001 (3)     7
U =  -38.-11  -74.+49  112.-38  +128   -10 0110 (3) -100 1010 (3) 111 0000 (3)     9
V =  112.-38  -94.+20  -18.+17  +128   111 0000 (3) -101 1110 (5)  -1 0010 (2)    10     10+9=19 -> (3+1) + (2+1+2+1) + (2+1) = 13
                                     _1001 0000
                                        10 0000
                                       111 0000
*/
  pBMP24 += (height-1)*pitch;
  for(i=0; i<h_org; i++)
  {
    for(j=0; j<w_org>>3; j++)  // 8 pixels per iteration
    {
      // read 8 pixels (even line)
      src0 = *((__m64*) (pBMP24 +   0));   // src0 = g2-b2-r1-g1 b1-r0-g0-b0
      src1 = *((__m64*) (pBMP24 +   8));   // src1 = b5-r4-g4-b4 r3-g3-b3-r2
      src2 = *((__m64*) (pBMP24 +  16));   // src2 = r7-g7-b7-r6 g6-b6-r5-g5
      // extract channels and search average

      #define _left(a,b) _mm_slli_si64(a,b*8)
      #define _right(a,b) _mm_srli_si64(a,b*8)
      #define _bw0(a,b) _mm_unpacklo_pi8(a,b)
      #define _bw1(a,b) _mm_unpackhi_pi8(a,b)
      #define _wd0(a,b) _mm_unpacklo_pi16(a,b)
      #define _wd1(a,b) _mm_unpackhi_pi16(a,b)
      #define _or(a,b) _mm_or_si64(a,b)
      #define _avg(a,b) _mm_avg_pu8(a,b)

      t1 = _bw0(src0,_right(src1,4));                             //  b5-b1-r4-r0 g4-g0-b4-b0
      t2 = _bw0(_or(_right(src0,6),_left(src1,2)),_right(src2,2));//  b7-b3-r6-r2 g6-g2-b6-b2
      t3 = _bw0(t1,t2);                                           //  g6-g4-g2-g0 b6-b4-b2-b0
      t4 = _bw1(t1,t2);                                           //  b7-b5-b3-b1 r6-r4-r2-r0
      bb = _bw0(t3,_right(t4,4));                                 //  b7-b6-b5-b4 b3-b2-b1-b0
      avgb = _avg(t3,_right(t4,4));                               //  ??-??-??-?? B3-B2-B1-B0
      t5 = _or(_right(t3,4),_left(t4,4));                         //  r6-r4-r2-r0 g6-g4-g2-g0
      t1 = _bw0(_right(src0,4),src2);                             //  g6-g2-b6-b2 r5-r1-g5-g1
      t2 = _bw0(_right(src1,2),_right(src2,6));                   //  00-g4-00-b4 r7-r3-g7-g3
      t3 = _bw0(t1,t2);                                           //  r7-r5-r3-r1 g7-g5-g3-g1
      gg = _bw0(t5,t3);                                           //  g7-g6-g5-g4 g3-g2-g1-g0
      rr = _bw1(t5,t3);                                           //  r7-r6-r5-r4 r3-r2-r1-r0
      avgrg = _avg(t3,t5);                                        //  R3-R2-R1-R0 G3-G2-G1-G0


      // calculate low Y'ks [3..0]
      t1 = _bw0(zero,rr);
      t2 = _bw0(zero,gg);
      t3 = _bw0(zero,bb);
      // R part
      t0 = _mm_add_pi16( _mm_srli_pi16(t1,2) , _mm_srli_pi16(t1,7) );
      // G part
      t0 = _mm_add_pi16( t0 , _mm_add_pi16(_mm_srli_pi16(t2,1),_mm_srli_pi16(t2,8)) );
      // B part
      t0 = _mm_add_pi16( t0 , _mm_add_pi16(_mm_srli_pi16(t3,4),_mm_add_pi16(_mm_srli_pi16(t3,5),_mm_srli_pi16(t3,8))) );
      sum = _mm_srli_pi16( t0 , 8 );    // sum =  00 y3 00 y2  00 y1 00 y0

      // calculate high Y'ks [7..4]
      t1 = _bw1(zero,rr);
      t2 = _bw1(zero,gg);
      t3 = _bw1(zero,bb);
      // R part
      t0 = _mm_add_pi16( _mm_srli_pi16(t1,2) , _mm_srli_pi16(t1,7) );
      // G part
      t0 = _mm_add_pi16( t0 , _mm_add_pi16(_mm_srli_pi16(t2,1),_mm_srli_pi16(t2,8)) );
      // B part
      t0 = _mm_add_pi16( t0 , _mm_add_pi16(_mm_srli_pi16(t3,4),_mm_add_pi16(_mm_srli_pi16(t3,5),_mm_srli_pi16(t3,8))) );
      t0 = _mm_srli_pi16( t0 , 8 );     // sum = 00 yb 00 ya  00 y9 00 y8

      t0 = _mm_packs_pu16( sum , t0 );  // t0 = y7 y6 y5 y4  y3 y2 y1 y0
      t0 = _mm_add_pi8( t0 , val10 );

      // write Y
      _mm_stream_pi( (__m64*) pY , t0 );
      pY+=8;

      // calculate U and V using averaged values
      gg = _bw0(zero,avgrg);
      rr = _bw1(zero,avgrg);
      bb = _bw0(zero,avgb);

      // calculate U and V
      // R part
      sum = _mm_add_pi16( _mm_srli_pi16(rr,1) , _mm_srli_pi16(rr,4) );
      u = _mm_srli_pi16( _mm_add_pi16(sum,_mm_srli_pi16(rr,5)) , 2 );   // wrong sign
      v = _mm_sub_pi16( sum , _mm_srli_pi16(rr,3) );                  
      // G part
      sum = _mm_add_pi16( _mm_srli_pi16(gg,2) , _mm_srli_pi16(gg,4) );
	  sum = _mm_add_pi16( sum , _mm_srli_pi16(sum,3) );
      v = _mm_sub_pi16( v , sum );                                        // right sign
      u = _mm_add_pi16( u , _mm_sub_pi16( sum , _mm_srli_pi16(gg,4) ) );// wrong sign
      v = _mm_sub_pi16( v , _mm_srli_pi16(gg,6) );                       // right sign
      // B part
      sum = _mm_add_pi16( _mm_srli_pi16(bb,1) , _mm_srli_pi16(bb,4) );
      v = _mm_sub_pi16( v , _mm_srli_pi16(sum,3) );                      // right sign
      u = _mm_sub_pi16( u , _mm_sub_pi16(sum,_mm_srli_pi16(bb,3)) );    // wrong sign
      // add 128
      v = _mm_add_pi16( v , val80 );                                      // right sign
      u = _mm_sub_pi16( val80 , u);                                       // right sign
      // prepare and write U,V
      u = _mm_srli_pi16( u , 8 );
      v = _mm_srli_pi16( v , 8 );
      sum = _mm_packs_pu16( u , v );   // sum = V3 V2 V1 V0  U3 U2 U1 U0
      U = _m_to_int(sum);
      *(DWORD*) pU = U ;
      V = _m_to_int(_right(sum,4));
      *(DWORD*) pV = V ;
      pU += 4;
      pV += 4;
	  pBMP24 += 24;
    }
    pBMP24 -= pitch + w_org + (w_org<<1);
    pY += width - w_org;
    pU += (width - w_org)>>1;
    pV += (width - w_org)>>1;
  }
  _m_empty();
}
