#include <mmintrin.h>

/*
 * the input data is tranposed and each 16 bit element in the 8x8 matrix
 * is left aligned:
 * for example in 11...1110000 format
 * If the iDCT is of I macroblock then 0.5 needs to be added to the;DC Component
 * (element[0][0] of the matrix)
 */

/* extrn re_matrix */

#include "m1vdec.h"
#include "video.h"

#define ALIGNED(n) __declspec(align(n))

ALIGNED(16) static const Sint16 preSC[] =
{
	16384,22725,21407,19266,16384,12873,8867,4520,
    22725,31521,29692,26722,22725,17855,12299,6270,
    21407,29692,27969,25172,21407,16819,11585,5906,
    19266,26722,25172,22654,19266,15137,10426,5315,
    16384,22725,21407,19266,16384,12873,8867,4520,
    12873,17855,16819,15137,25746,20228,13933,7103,
    17734,24598,23170,20853,17734,13933,9597,4892,
    18081,25080,23624,21261,18081,14206,9785,4988,
};

#define x0005000200010001 _mm_set_pi32(0x00010001,0x00050002)
#define x5a825a825a825a82 _mm_set_pi32(0x5a825a82,0x5a825a82)
#define x539f539f539f539f _mm_set_pi32(0x539f539f,0x539f539f)
#define x4546454645464546 _mm_set_pi32(0x45464546,0x45464546)
#define x61f861f861f861f8 _mm_set_pi32(0x61f861f8,0x61f861f8)


#define TOMM(loc) (*(reinterpret_cast<const __m64*>(loc)))
#define MOVQ(loc, mm) mm = TOMM(loc)
#define MOVQ_TOMEM(src, dest) (*(reinterpret_cast<__m64*>(dest))) = src
#define MOVD_TOMEM(src, dest) (*(reinterpret_cast<Uint32*>(dest))) = src.m64_u32[1]
#define MOVQ_REG(mm_src, mm_dest) mm_dest = mm_src
#define PMULHW(loc, mm) mm = _mm_mulhi_pi16(TOMM(loc), mm)
#define PMULHW_REG(loc, mm) mm = _mm_mulhi_pi16(loc, mm)
#define PSRAW(count, mm) mm = _mm_srai_pi16(mm, count)
#define PSUBSW(src, dest) dest = _mm_subs_pi16(dest, src)
#define PADDSW(src, dest) dest = _mm_adds_pi16(dest, src)
#define PSLLW(count, mm) mm = _mm_slli_pi16(mm, count)
#define PADDW(src, dest) dest = _mm_add_pi16(TOMM(src), dest)
#define PADDW_REG(src, dest) dest = _mm_add_pi16(src, dest)
#define PUNPCKLWD(src, dest) dest = _mm_unpacklo_pi16(dest, src)
#define PUNPCKHWD(src, dest) dest = _mm_unpackhi_pi16(dest, src)
#define PUNPCKLDQ(src, dest) dest = _mm_unpacklo_pi32(dest, src)
#define PUNPCKHDQ(src, dest) dest = _mm_unpackhi_pi32(dest, src)

#define ITOMM(i) _mm_set1_pi16(i)

#define MUNG(n) n

extern "C" void IDCT_mmx(DCTBLOCK data)
{
	__m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7;
	__m64 scratch1, scratch3, scratch5, scratch7;

	// Ported assembly bullshit

	Uint8 *esip = reinterpret_cast<Uint8*>(data);
	const Uint8 *ecxp = reinterpret_cast<const Uint8*>(preSC);

/* column 0: even part
 * use V4, V12, V0, V8 to produce V22..V25
 */
	MOVQ(8*12+ecxp, mm0);	/* maybe the first mul can be done together */
				/* with the dequantization in iHuff module */
	PMULHW(8*12 + esip, mm0);		/* V12 */
	MOVQ(8*4 + ecxp, mm1);
	PMULHW(8*4 + esip, mm1);		/* V4 */
	MOVQ(ecxp, mm3);
	PSRAW(1, mm0);			/* t64=t66 */
	PMULHW(esip, mm3);		/* V0 */
	MOVQ(8*8+(ecxp), mm5);		/* duplicate V4 */
	MOVQ_REG(mm1, mm2);			/* added 11/1/96 */
	PMULHW(8*8+(esip),mm5);		/* V8 */
	PSUBSW(mm0, mm1);		/* V16 */
	PMULHW_REG(MUNG(x5a825a825a825a82), mm1);	/* 23170 ->V18 */
	PADDSW(mm0, mm2);		/* V17 */
	MOVQ_REG(mm2, mm0);			/* duplicate V17 */
	PSRAW(1, mm2);			/* t75=t82 */
	PSRAW(2, mm0);			/* t72 */
	MOVQ_REG(mm3, mm4);			/* duplicate V0 */
	PADDSW(mm5, mm3);		/* V19 */
	PSUBSW(mm5, mm4);		/* V20 ;mm5 free */
/* moved from the block below */
	MOVQ(8*10+(ecxp), mm7);
	PSRAW(1, mm3);			/* t74=t81 */
	MOVQ_REG(mm3, mm6);			/* duplicate t74=t81 */
	PSRAW(2, mm4);			/* t77=t79 */
	PSUBSW(mm0, mm1);		/* V21 ; mm0 free */
	PADDSW(mm2, mm3);		/* V22 */
	MOVQ_REG(mm1, mm5);			/* duplicate V21 */
	PADDSW(mm4, mm1);		/* V23 */
	MOVQ_TOMEM(mm3, 8*4+(esip));		/* V22 */
	PSUBSW(mm5, mm4);		/* V24; mm5 free */
	MOVQ_TOMEM(mm1, 8*12+(esip));		/* V23 */
	PSUBSW(mm2, mm6);		/* V25; mm2 free */
	MOVQ_TOMEM(mm4, (esip));		/* V24 */
/* keep mm6 alive all along the next block */
	/* MOVQ_REG(mm6, 8*8(esip) 	V25 */
/* column 0: odd part
 * use V2, V6, V10, V14 to produce V31, V39, V40, V41
 */
/* moved above: MOVQ(8*10(ecxp), mm7 */

	PMULHW(8*10+(esip), mm7);		/* V10 */
	MOVQ(8*6+(ecxp), mm0);
	PMULHW(8*6+(esip), mm0);		/* V6 */
	MOVQ(8*2+(ecxp), mm5);
	MOVQ_REG(mm7, mm3);			/* duplicate V10 */
	PMULHW(8*2+(esip), mm5);		/* V2 */
	MOVQ(8*14+(ecxp), mm4);
	PSUBSW(mm0, mm7);		/* V26 */
	PMULHW(8*14+(esip), mm4);		/* V14 */
	PADDSW(mm0, mm3);		/* V29 ; free mm0 */
	MOVQ_REG(mm7, mm1);			/* duplicate V26 */
	PSRAW(1, mm3);			/* t91=t94 */
	PMULHW_REG(MUNG(x539f539f539f539f),mm7);	/* V33 */
	PSRAW(1, mm1);			/* t96 */
	MOVQ_REG(mm5, mm0);			/* duplicate V2 */
	PSRAW(2, mm4);			/* t85=t87 */
	PADDSW(mm4,mm5);		/* V27 */
	PSUBSW(mm4, mm0);		/* V28 ; free mm4 */
	MOVQ_REG(mm0, mm2);			/* duplicate V28 */
	PSRAW(1, mm5);			/* t90=t93 */
	PMULHW_REG(MUNG(x4546454645464546),mm0);	/* V35 */
	PSRAW(1, mm2);			/* t97 */
	MOVQ_REG(mm5, mm4);			/* duplicate t90=t93 */
	PSUBSW(mm2, mm1);		/* V32 ; free mm2 */
	PMULHW_REG(MUNG(x61f861f861f861f8),mm1);	/* V36 */
	PSLLW(1, mm7);			/* t107 */
	PADDSW(mm3, mm5);		/* V31 */
	PSUBSW(mm3, mm4);		/* V30 ; free mm3 */
	PMULHW_REG(MUNG(x5a825a825a825a82),mm4);	/* V34 */
	PSUBSW(mm1, mm0);		/* V38 */
	PSUBSW(mm7, mm1);		/* V37 ; free mm7 */
	PSLLW(1, mm1);			/* t114 */
/* move from the next block */
	MOVQ_REG(mm6, mm3);			/* duplicate V25 */
/* move from the next block */
	MOVQ(8*4+(esip), mm7);		/* V22 */
	PSLLW(1, mm0);			/* t110 */
	PSUBSW(mm5, mm0);		/* V39 (mm5 needed for next block) */
	PSLLW(2, mm4);			/* t112 */
/* moved from the next block */
	MOVQ(8*12+(esip), mm2);		/* V23 */
	PSUBSW(mm0, mm4);		/* V40 */
	PADDSW(mm4, mm1);		/* V41; free mm0 */
/* moved from the next block */
	PSLLW(1, mm2);			/* t117=t125 */
/* column 0: output butterfly */
/* moved above:
 * MOVQ_REG(mm6, mm3			duplicate V25
 * MOVQ(8*4(esip), mm7			V22
 * MOVQ(8*12(esip), mm2		V23
 * PSLLW(1, mm2			t117=t125
 */
	PSUBSW(mm1, mm6);		/* tm6 */
	PADDSW(mm1, mm3);		/* tm8; free mm1 */
	MOVQ_REG(mm7, mm1);			/* duplicate V22 */
	PADDSW(mm5, mm7);		/* tm0 */
	MOVQ_TOMEM(mm3, 8*8+(esip));		/* tm8; free mm3 */
	PSUBSW(mm5, mm1);		/* tm14; free mm5 */
	MOVQ_TOMEM(mm6, 8*6+(esip));		/* tm6; free mm6 */
	MOVQ_REG(mm2, mm3);			/* duplicate t117=t125 */
	MOVQ((esip), mm6);		/* V24 */
	PADDSW(mm0, mm2);		/* tm2 */
	MOVQ_TOMEM(mm7, (esip));		/* tm0; free mm7 */
	PSUBSW(mm0, mm3);		/* tm12; free mm0 */
	MOVQ_TOMEM(mm1, 8*14+(esip));		/* tm14; free mm1 */
	PSLLW(1, mm6);			/* t119=t123 */
	MOVQ_TOMEM(mm2, 8*2+(esip));		/* tm2; free mm2 */
	MOVQ_REG(mm6, mm0);			/* duplicate t119=t123 */
	MOVQ_TOMEM(mm3, 8*12+(esip));		/* tm12; free mm3 */
	PADDSW(mm4, mm6);		/* tm4 */
/* moved from next block */
	MOVQ(8*5+(ecxp), mm1);
	PSUBSW(mm4, mm0);		/* tm10; free mm4 */
/* moved from next block */
	PMULHW(8*5+(esip), mm1);		/* V5 */
	MOVQ_TOMEM(mm6, 8*4+(esip));		/* tm4; free mm6 */
	MOVQ_TOMEM(mm0, 8*10+(esip));		/* tm10; free mm0 */
/* column 1: even part
 * use V5, V13, V1, V9 to produce V56..V59
 */
/* moved to prev block:
 *	MOVQ(8*5(ecxp), mm1
 *	PMULHW(8*5(esip), mm1		 V5
 */
	MOVQ(8*13+(ecxp), mm7);
	PSLLW(1, mm1);			/* t128=t130 */
	PMULHW(8*13+(esip), mm7);		/* V13 */
	MOVQ_REG(mm1, mm2);			/* duplicate t128=t130 */
	MOVQ(8+(ecxp), mm3);
	PMULHW(8+(esip), mm3);		/* V1 */
	MOVQ(8*9+(ecxp), mm5);
	PSUBSW(mm7, mm1);		/* V50 */
	PMULHW(8*9+(esip), mm5);		/* V9 */
	PADDSW(mm7, mm2);		/* V51 */
	PMULHW_REG(MUNG(x5a825a825a825a82), mm1);	/* 23170 ->V52 */
	MOVQ_REG(mm2, mm6);			/* duplicate V51 */
	PSRAW(1, mm2);			/* t138=t144 */
	MOVQ_REG(mm3, mm4);			/* duplicate V1 */
	PSRAW(2, mm6);			/* t136 */
	PADDSW(mm5, mm3);		/* V53 */
	PSUBSW(mm5, mm4);		/* V54 ;mm5 free */
	MOVQ_REG(mm3, mm7);			/* duplicate V53 */
/* moved from next block */
	MOVQ(8*11+(ecxp), mm0);
	PSRAW(1, mm4);			/* t140=t142 */
	PSUBSW(mm6, mm1);		/* V55 ; mm6 free */
	PADDSW(mm2, mm3);		/* V56 */
	MOVQ_REG(mm4, mm5);			/* duplicate t140=t142 */
	PADDSW(mm1, mm4);		/* V57 */
	MOVQ_TOMEM(mm3, 8*5+(esip));		/* V56 */
	PSUBSW(mm1, mm5);		/* V58; mm1 free */
	MOVQ_TOMEM(mm4, 8*13+(esip));		/* V57 */
	PSUBSW(mm2, mm7);		/* V59; mm2 free */
	MOVQ_TOMEM(mm5, 8*9+(esip));		/* V58 */
/* keep mm7 alive all along the next block
 *	MOVQ_REG(mm7, 8(esip)		V59
 * moved above
 *	MOVQ(8*11(ecxp), mm0
 */
	PMULHW(8*11+(esip), mm0);		/* V11 */
	MOVQ(8*7+(ecxp), mm6);
	PMULHW(8*7+(esip), mm6);		/* V7 */
	MOVQ(8*15+(ecxp), mm4);
	MOVQ_REG(mm0, mm3);			/* duplicate V11 */
	PMULHW(8*15+(esip), mm4);		/* V15 */
	MOVQ(8*3+(ecxp), mm5);
	PSLLW(1, mm6);			/* t146=t152 */
	PMULHW(8*3+(esip), mm5);		/* V3 */
	PADDSW(mm6, mm0);		/* V63 */
/* note that V15 computation has a correction step: 
 * this is a 'magic' constant that rebiases the results to be closer to the
 * expected result.  this magic constant can be refined to reduce the error
 * even more by doing the correction step in a later stage when the number
 * is actually multiplied by 16
 */
	PADDW_REG(MUNG(x0005000200010001), mm4);
	PSUBSW(mm6, mm3);		/* V60 ; free mm6 */
	PSRAW(1, mm0);			/* t154=t156 */
	MOVQ_REG(mm3, mm1);			/* duplicate V60 */
	PMULHW_REG(MUNG(x539f539f539f539f), mm1);	/* V67 */
	MOVQ_REG(mm5, mm6);			/* duplicate V3 */
	PSRAW(2, mm4);			/* t148=t150 */
	PADDSW(mm4, mm5);		/* V61 */
	PSUBSW(mm4, mm6);		/* V62 ; free mm4 */
	MOVQ_REG(mm5, mm4);			/* duplicate V61 */
	PSLLW(1, mm1);			/* t169 */
	PADDSW(mm0, mm5);		/* V65 -> result */
	PSUBSW(mm0, mm4);		/* V64 ; free mm0 */
	PMULHW_REG(MUNG(x5a825a825a825a82), mm4);	/* V68 */
	PSRAW(1, mm3);			/* t158 */
	PSUBSW(mm6, mm3);		/* V66 */
	MOVQ_REG(mm5, mm2);			/* duplicate V65 */
	PMULHW_REG(MUNG(x61f861f861f861f8), mm3);	/* V70 */
	PSLLW(1, mm6);			/* t165 */
	PMULHW_REG(MUNG(x4546454645464546), mm6);	/* V69 */
	PSRAW(1, mm2);			/* t172 */
/* moved from next block */
	MOVQ(8*5+(esip), mm0);		/* V56 */
	PSLLW(1, mm4);			/* t174 */
/* moved from next block */
	PSRAW(1, mm0);			/* t177=t188 */
	PSUBSW(mm3, mm6);		/* V72 */
	PSUBSW(mm1, mm3);		/* V71 ; free mm1 */
	PSUBSW(mm2, mm6);		/* V73 ; free mm2 */
/* moved from next block */
	PSRAW(1, mm5);			/* t178=t189 */
	PSUBSW(mm6, mm4);		/* V74 */
/* moved from next block */
	MOVQ_REG(mm0, mm1);			/* duplicate t177=t188 */
	PADDSW(mm4, mm3);		/* V75 */
/* moved from next block */
	PADDSW(mm5, mm0);		/* tm1 */
/* location
 *  5 - V56
 * 13 - V57
 *  9 - V58
 *  X - V59, mm7
 *  X - V65, mm5
 *  X - V73, mm6
 *  X - V74, mm4
 *  X - V75, mm3
 * free mm0, mm1 & mm2
 * moved above
 *	MOVQ(8*5(esip), mm0		V56
 *	PSLLW(1, mm0			t177=t188 ! new !!
 *	PSLLW(1, mm5			t178=t189 ! new !!
 *	MOVQ_REG(mm0, mm1			duplicate t177=t188
 *	PADDSW(mm5, mm0		tm1
 */
	MOVQ(8*13+(esip), mm2);		/* V57 */
	PSUBSW(mm5, mm1);		/* tm15; free mm5 */
	MOVQ_TOMEM(mm0, 8+(esip));		/* tm1; free mm0 */
	PSRAW(1, mm7);			/* t182=t184 ! new !! */
/* save the store as used directly in the transpose
 *	MOVQ_REG(mm1, 120(esip)		tm15; free mm1
 */
	MOVQ_REG(mm7, mm5);			/* duplicate t182=t184 */
	PSUBSW(mm3, mm7);		/* tm7 */
	PADDSW(mm3, mm5);		/* tm9; free mm3 */
	MOVQ(8*9+(esip), mm0);		/* V58 */
	MOVQ_REG(mm2, mm3);			/* duplicate V57 */
	MOVQ_TOMEM(mm7, 8*7+(esip));		/* tm7; free mm7 */
	PSUBSW(mm6, mm3);		/* tm13 */
	PADDSW(mm6, mm2);		/* tm3 ; free mm6 */
/* moved up from the transpose */
	MOVQ_REG(mm3, mm7);
/* moved up from the transpose */
	PUNPCKLWD(mm1, mm3);
	MOVQ_REG(mm0, mm6);			/* duplicate V58 */
	MOVQ_TOMEM(mm2, 8*3+(esip));		/* tm3; free mm2 */
	PADDSW(mm4, mm0);		/* tm5 */
	PSUBSW(mm4, mm6);		/* tm11; free mm4 */
/* moved up from the transpose */
	PUNPCKHWD(mm1, mm7);
	MOVQ_TOMEM(mm0, 8*5+(esip));		/* tm5; free mm0 */
/* moved up from the transpose */
	MOVQ_REG(mm5, mm2);
/* transpose - M4 part
 *  ---------       ---------
 * | M1 | M2 |     | M1'| M3'|
 *  ---------  -->  ---------
 * | M3 | M4 |     | M2'| M4'|
 *  ---------       ---------
 * Two alternatives: use full mmword approach so the following code can be
 * scheduled before the transpose is done without stores, or use the faster
 * half mmword stores (when possible)
 */
	MOVD_TOMEM(mm3, 8*9+4+(esip));		/* MS part of tmt9 */
	PUNPCKLWD(mm6, mm5);
	MOVD_TOMEM(mm7, 8*13+4+(esip));		/* MS part of tmt13 */
	PUNPCKHWD(mm6, mm2);
	MOVD_TOMEM(mm5, 8*9+(esip));		/* LS part of tmt9 */
	PUNPCKHDQ(mm3, mm5);		/* free mm3 */
	MOVD_TOMEM(mm2, 8*13+(esip));		/* LS part of tmt13 */
	PUNPCKHDQ(mm7, mm2);		/* free mm7 */
/* moved up from the M3 transpose */
	MOVQ(8*8+(esip), mm0);
/* moved up from the M3 transpose */
	MOVQ(8*10+(esip), mm1);
/* moved up from the M3 transpose */
	MOVQ_REG(mm0, mm3);
/* shuffle the rest of the data, and write it with 2 mmword writes */
	MOVQ_TOMEM(mm5, 8*11+(esip));		/* tmt11 */
/* moved up from the M3 transpose */
	PUNPCKLWD(mm1, mm0);
	MOVQ_TOMEM(mm2, 8*15+(esip));		/* tmt15 */
/* moved up from the M3 transpose */
	PUNPCKHWD(mm1, mm3);
/* transpose - M3 part
 * moved up to previous code section
 *	MOVQ(8*8(esip), mm0
 *	MOVQ(8*10(esip), mm1
 *	MOVQ_REG(mm0, mm3
 *	PUNPCKLWD(mm1, mm0
 *	PUNPCKHWD(mm1, mm3
 */
	MOVQ(8*12+(esip), mm6);
	MOVQ(8*14+(esip), mm4);
	MOVQ_REG(mm6, mm2);
/* shuffle the data and write the lower parts of the transposed in 4 dwords */
	PUNPCKLWD(mm4, mm6);
	MOVQ_REG(mm0, mm1);
	PUNPCKHDQ(mm6, mm1);
	MOVQ_REG(mm3, mm7);
	PUNPCKHWD(mm4, mm2);		/* free mm4 */
	PUNPCKLDQ(mm6, mm0);		/* free mm6 */
/* moved from next block */
	MOVQ(8*13+(esip), mm4);		/* tmt13 */
	PUNPCKLDQ(mm2, mm3);
	PUNPCKHDQ(mm2, mm7);		/* free mm2 */
/* moved from next block */
	MOVQ_REG(mm3, mm5);			/* duplicate tmt5 */
/* column 1: even part (after transpose)
* moved above
*	MOVQ_REG(mm3, mm5			duplicate tmt5
*	MOVQ(8*13(esip), mm4		tmt13
*/
	PSUBSW(mm4, mm3);		/* V134 */
	PMULHW_REG(MUNG(x5a825a825a825a82), mm3);	/* 23170 ->V136 */
	MOVQ(8*9+(esip), mm6);		/* tmt9 */
	PADDSW(mm4, mm5);		/* V135 ; mm4 free */
	MOVQ_REG(mm0, mm4);			/* duplicate tmt1 */
	PADDSW(mm6, mm0);		/* V137 */
	PSUBSW(mm6, mm4);		/* V138 ; mm6 free */
	PSLLW(2, mm3);			/* t290 */
	PSUBSW(mm5, mm3);		/* V139 */
	MOVQ_REG(mm0, mm6);			/* duplicate V137 */
	PADDSW(mm5, mm0);		/* V140 */
	MOVQ_REG(mm4, mm2);			/* duplicate V138 */
	PADDSW(mm3, mm2);		/* V141 */
	PSUBSW(mm3, mm4);		/* V142 ; mm3 free */
	MOVQ_TOMEM(mm0, 8*9+(esip));		/* V140 */
	PSUBSW(mm5, mm6);		/* V143 ; mm5 free */
/* moved from next block */
	MOVQ(8*11+(esip), mm0);		/* tmt11 */
	MOVQ_TOMEM(mm2, 8*13+(esip));		/* V141 */
/* moved from next block */
	MOVQ_REG(mm0, mm2);			/* duplicate tmt11 */
/* column 1: odd part (after transpose) */
/* moved up to the prev block
 *	MOVQ(8*11(esip), mm0		tmt11
 *	MOVQ_REG(mm0, mm2			duplicate tmt11
 */
	MOVQ(8*15+(esip), mm5);		/* tmt15 */
	PSUBSW(mm7, mm0);		/* V144 */
	MOVQ_REG(mm0, mm3);			/* duplicate V144 */
	PADDSW(mm7, mm2);		/* V147 ; free mm7 */
	PMULHW_REG(MUNG(x539f539f539f539f), mm0);	/* 21407-> V151 */
	MOVQ_REG(mm1, mm7);			/* duplicate tmt3 */
	PADDSW(mm5, mm7);		/* V145 */
	PSUBSW(mm5, mm1);		/* V146 ; free mm5 */
	PSUBSW(mm1, mm3);		/* V150 */
	MOVQ_REG(mm7, mm5);			/* duplicate V145 */
	PMULHW_REG(MUNG(x4546454645464546), mm1);	/* 17734-> V153 */
	PSUBSW(mm2, mm5);		/* V148 */
	PMULHW_REG(MUNG(x61f861f861f861f8), mm3);	/* 25080-> V154 */
	PSLLW(2, mm0);			/* t311 */
	PMULHW_REG(MUNG(x5a825a825a825a82), mm5);	/* 23170-> V152 */
	PADDSW(mm2, mm7);		/* V149 ; free mm2 */
	PSLLW(1, mm1);			/* t313 */
	MOVQ_REG(mm3, mm2);			/* duplicate V154 */
	PSUBSW(mm0, mm3);		/* V155 ; free mm0 */
	PSUBSW(mm2, mm1);		/* V156 ; free mm2 */
/* moved from the next block */
	MOVQ_REG(mm6, mm2);			/* duplicate V143 */
/* moved from the next block */
	MOVQ(8*13+(esip), mm0);		/* V141 */
	PSLLW(1, mm1);			/* t315 */
	PSUBSW(mm7, mm1);		/* V157 (keep V149) */
	PSLLW(2, mm5);			/* t317 */
	PSUBSW(mm1, mm5);		/* V158 */
	PSLLW(1, mm3);			/* t319 */
	PADDSW(mm5, mm3);		/* V159 */
/* column 1: output butterfly (after transform)
 * moved to the prev block
 *	MOVQ_REG(mm6, mm2			duplicate V143
 *	MOVQ(8*13(esip), mm0		V141
 */
	PSUBSW(mm3, mm2);		/* V163 */
	PADDSW(mm3, mm6);		/* V164 ; free mm3 */
	MOVQ_REG(mm4, mm3);			/* duplicate V142 */
	PSUBSW(mm5, mm4);		/* V165 ; free mm5 */
	MOVQ_REG(mm2, MUNG(scratch7));		/* out7 */
	PSRAW(4, mm6);
	PSRAW(4, mm4);
	PADDSW(mm5, mm3);		/* V162 */
	MOVQ(8*9+(esip), mm2);		/* V140 */
	MOVQ_REG(mm0, mm5);			/* duplicate V141 */
/* in order not to perculate this line up,
 * we read 72(esip) very near to this location
 */
	MOVQ_TOMEM(mm6, 8*9+(esip));		/* out9 */
	PADDSW(mm1, mm0);		/* V161 */
	MOVQ_REG(mm3, MUNG(scratch5));		/* out5 */
	PSUBSW(mm1, mm5);		/* V166 ; free mm1 */
	MOVQ_TOMEM(mm4, 8*11+(esip));		/* out11 */
	PSRAW(4, mm5);
	MOVQ_REG(mm0, MUNG(scratch3));		/* out3 */
	MOVQ_REG(mm2, mm4);			/* duplicate V140 */
	MOVQ_TOMEM(mm5, 8*13+(esip));		/* out13 */
	PADDSW(mm7, mm2);		/* V160 */
/* moved from the next block */
	MOVQ(8+(esip), mm0);
	PSUBSW(mm7, mm4);		/* V167 ; free mm7 */
/* moved from the next block */
	MOVQ(8*3+(esip), mm7);
	PSRAW(4, mm4);
	MOVQ_REG(mm2, MUNG(scratch1));		/* out1 */
/* moved from the next block */
	MOVQ_REG(mm0, mm1);
	MOVQ_TOMEM(mm4, 8*15+(esip));		/* out15 */
/* moved from the next block */
	PUNPCKLWD(mm7, mm0);
/* transpose - M2 parts
 * moved up to the prev block
 *	MOVQ(8(esip), mm0
 *	MOVQ(8*3(esip), mm7
 *	MOVQ_REG(mm0, mm1
 *	PUNPCKLWD(mm7, mm0
 */
	MOVQ(8*5+(esip), mm5);
	PUNPCKHWD(mm7, mm1);
	MOVQ(8*7+(esip), mm4);
	MOVQ_REG(mm5, mm3);
/* shuffle the data and write the lower parts of the trasposed in 4 dwords */
	MOVD_TOMEM(mm0, 8*8+(esip));		/* LS part of tmt8 */
	PUNPCKLWD(mm4, mm5);
	MOVD_TOMEM(mm1, 8*12+(esip));		/* LS part of tmt12 */
	PUNPCKHWD(mm4, mm3);
	MOVD_TOMEM(mm5, 8*8+4+(esip));		/* MS part of tmt8 */
	PUNPCKHDQ(mm5, mm0);		/* tmt10 */
	MOVD_TOMEM(mm3, 8*12+4+(esip));		/* MS part of tmt12 */
	PUNPCKHDQ(mm3, mm1);		/* tmt14 */
/* transpose - M1 parts */
	MOVQ((esip), mm7);
	MOVQ(8*2+(esip), mm2);
	MOVQ_REG(mm7, mm6);
	MOVQ(8*4+(esip), mm5);
	PUNPCKLWD(mm2, mm7);
	MOVQ(8*6+(esip), mm4);
	PUNPCKHWD(mm2, mm6);		/* free mm2 */
	MOVQ_REG(mm5, mm3);
	PUNPCKLWD(mm4, mm5);
	PUNPCKHWD(mm4, mm3);		/* free mm4 */
	MOVQ_REG(mm7, mm2);
	MOVQ_REG(mm6, mm4);
	PUNPCKLDQ(mm5, mm7);		/* tmt0 */
	PUNPCKHDQ(mm5, mm2);		/* tmt2 ; free mm5 */
/* shuffle the rest of the data, and write it with 2 mmword writes */
	PUNPCKLDQ(mm3, mm6);		/* tmt4 */
/* moved from next block */
	MOVQ_REG(mm2, mm5);			/* duplicate tmt2 */
	PUNPCKHDQ(mm3, mm4);		/* tmt6 ; free mm3 */
/* moved from next block */
	MOVQ_REG(mm0, mm3);			/* duplicate tmt10 */
/* column 0: odd part (after transpose)
 *moved up to prev block
 *	MOVQ_REG(mm0, mm3			duplicate tmt10
 *	MOVQ_REG(mm2, mm5			duplicate tmt2
 */
	PSUBSW(mm4, mm0);		/* V110 */
	PADDSW(mm4, mm3);		/* V113 ; free mm4 */
	MOVQ_REG(mm0, mm4);			/* duplicate V110 */
	PADDSW(mm1, mm2);		/* V111 */
	PMULHW_REG(MUNG(x539f539f539f539f), mm0);	/* 21407-> V117 */
	PSUBSW(mm1, mm5);		/* V112 ; free mm1 */
	PSUBSW(mm5, mm4);		/* V116 */
	MOVQ_REG(mm2, mm1);			/* duplicate V111 */
	PMULHW_REG(MUNG(x4546454645464546), mm5);	/* 17734-> V119 */
	PSUBSW(mm3, mm2);		/* V114 */
	PMULHW_REG(MUNG(x61f861f861f861f8), mm4);	/* 25080-> V120 */
	PADDSW(mm3, mm1);		/* V115 ; free mm3 */
	PMULHW_REG(MUNG(x5a825a825a825a82), mm2);	/* 23170-> V118 */
	PSLLW(2, mm0);			/* t266 */
	MOVQ_TOMEM(mm1, (esip));		/* save V115 */
	PSLLW(1, mm5);			/* t268 */
	PSUBSW(mm4, mm5);		/* V122 */
	PSUBSW(mm0, mm4);		/* V121 ; free mm0 */
	PSLLW(1, mm5);			/* t270 */
	PSUBSW(mm1, mm5);		/* V123 ; free mm1 */
	PSLLW(2, mm2);			/* t272 */
	PSUBSW(mm5, mm2);		/* V124 (keep V123) */
	PSLLW(1, mm4);			/* t274 */
	MOVQ_TOMEM(mm5, 8*2+(esip));		/* save V123 ; free mm5 */
	PADDSW(mm2, mm4);		/* V125 (keep V124) */
/* column 0: even part (after transpose) */
	MOVQ(8*12+(esip), mm0);		/* tmt12 */
	MOVQ_REG(mm6, mm3);			/* duplicate tmt4 */
	PSUBSW(mm0, mm6);		/* V100 */
	PADDSW(mm0, mm3);		/* V101 ; free mm0 */
	PMULHW_REG(MUNG(x5a825a825a825a82), mm6);	/* 23170 ->V102 */
	MOVQ_REG(mm7, mm5);			/* duplicate tmt0 */
	MOVQ(8*8+(esip), mm1);		/* tmt8 */
	PADDSW(mm1, mm7);		/* V103 */
	PSUBSW(mm1, mm5);		/* V104 ; free mm1 */
	MOVQ_REG(mm7, mm0);			/* duplicate V103 */
	PSLLW(2, mm6);			/* t245 */
	PADDSW(mm3, mm7);		/* V106 */
	MOVQ_REG(mm5, mm1);			/* duplicate V104 */
	PSUBSW(mm3, mm6);		/* V105 */
	PSUBSW(mm3, mm0);		/* V109; free mm3 */
	PADDSW(mm6, mm5);		/* V107 */
	PSUBSW(mm6, mm1);		/* V108 ; free mm6 */
/* column 0: output butterfly (after transform) */
	MOVQ_REG(mm1, mm3);			/* duplicate V108 */
	PADDSW(mm2, mm1);		/* out4 */
	PSRAW(4, mm1);
	PSUBSW(mm2, mm3);		/* out10 ; free mm2 */
	PSRAW(4, mm3);
	MOVQ_REG(mm0, mm6);			/* duplicate V109 */
	MOVQ_TOMEM(mm1, 8*4+(esip));		/* out4 ; free mm1 */
	PSUBSW(mm4, mm0);		/* out6 */
	MOVQ_TOMEM(mm3, 8*10+(esip));		/* out10 ; free mm3 */
	PSRAW(4, mm0);
	PADDSW(mm4, mm6);		/* out8 ; free mm4 */
	MOVQ_REG(mm7, mm1);			/* duplicate V106 */
	MOVQ_TOMEM(mm0, 8*6+(esip));		/* out6 ; free mm0 */
	PSRAW(4, mm6);
	MOVQ((esip), mm4);		/* V115 */
	MOVQ_TOMEM(mm6, 8*8+(esip));		/* out8 ; free mm6 */
	MOVQ_REG(mm5, mm2);			/* duplicate V107 */
	MOVQ(8*2+(esip), mm3);		/* V123 */
	PADDSW(mm4, mm7);		/* out0 */
/* moved up from next block */
	MOVQ_REG(MUNG(scratch3), mm0);
	PSRAW(4, mm7);
/* moved up from next block */
	MOVQ_REG(MUNG(scratch5), mm6); 
	PSUBSW(mm4, mm1);		/* out14 ; free mm4 */
	PADDSW(mm3, mm5);		/* out2 */
	PSRAW(4, mm1);
	MOVQ_TOMEM(mm7, (esip));		/* out0 ; free mm7 */
	PSRAW(4, mm5);
	MOVQ_TOMEM(mm1, 8*14+(esip));		/* out14 ; free mm1 */
	PSUBSW(mm3, mm2);		/* out12 ; free mm3 */
	MOVQ_TOMEM(mm5, 8*2+(esip));		/* out2 ; free mm5 */
	PSRAW(4, mm2);
/* moved up to the prev block */
	MOVQ_REG(MUNG(scratch7), mm4);
/* moved up to the prev block */
	PSRAW(4, mm0);
	MOVQ_TOMEM(mm2, 8*12+(esip));		/* out12 ; free mm2 */
/* moved up to the prev block */
	PSRAW(4, mm6);
/* move back the data to its correct place
* moved up to the prev block
 *	MOVQ(MUNG(scratch3), mm0
 *	MOVQ(MUNG(scratch5), mm6
 *	MOVQ(MUNG(scratch7), mm4
 *	PSRAW(4, mm0
 *	PSRAW(4, mm6
*/
	MOVQ_REG(MUNG(scratch1), mm1);
	PSRAW(4, mm4);
	MOVQ_TOMEM(mm0, 8*3+(esip));		/* out3 */
	PSRAW(4, mm1);
	MOVQ_TOMEM(mm6, 8*5+(esip));		/* out5 */
	MOVQ_TOMEM(mm4, 8*7+(esip));		/* out7 */
	MOVQ_TOMEM(mm1, 8+(esip));		/* out1 */
	_mm_empty();
}
