﻿////////////////////////////////////////////////////////////////////////////////////////////////////
// file:	C:\Users\Quan\Documents\Visual Studio 2010\Projects\source\cvWavelet\dwt.cu
//
// summary:	dwt class
////////////////////////////////////////////////////////////////////////////////////////////////////

#include "dwt.hpp"
#include "cutils.hpp"
#include <iostream>
using namespace std;
namespace cuda
{
////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Calculate logarithm base 2 of a number. </summary>
////////////////////////////////////////////////////////////////////////////////////////////////////
static inline int _log2 (int n)
{
	int k=1; 
	int j=0;
	while(k<n)
	{
		k*=2;
		j+=1;
	}
	return j;
}

/// <summary>	Alpha, Beta and Gamma for lifting scheme </summary>
__constant__ float a = -0.5f;
__constant__ float b = 0.25f;
__constant__ float c = 1.4142135623730950488016887242097f;

/// <summary>	A macro that defines block cols. </summary>
#define bCols 32		//warpSize
/// <summary>	A macro that defines block rows. </summary>
#define bRows 32		//warpSize

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	A macro that defines swap. </summary>
#define swap(a,b)				\
{								\
	float2 tmp;					\
	tmp = a;					\
	__threadfence_block();		\
	a = b;						\
	__threadfence_block();		\
	b = tmp;					\
	__threadfence_block();		\
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Kernel for encode shared memory. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ 
void __encode_shared(float2* dst, float2* src, int nRows, int nCols)
{
	__shared__ float2 sMem[32][32+1];
	int head;
	int tail;
	/// <summary>	Copy a (32x32) block from global memory to shared memory. </summary>
	sMem[threadIdx.y][threadIdx.x] 		= src[(blockIdx.y*32+threadIdx.y)*nCols + blockIdx.x*32+threadIdx.x];	
	sMem[threadIdx.y][threadIdx.x+16]	= src[(blockIdx.y*32+threadIdx.y)*nCols + blockIdx.x*32+threadIdx.x+16];
	__syncthreads();
	/**********************************************************************************************************/
	if((threadIdx.y<32) && (threadIdx.x<16))
	{
		/// <summary>	Along x direction. </summary>
		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (16-1))
			sMem[threadIdx.y][2*threadIdx.x+1]
			+= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+1]
			+= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]+sMem[threadIdx.y][2*threadIdx.x+1+1]);
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[threadIdx.y][2*threadIdx.x+0]
			+= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+0]
			+= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]+sMem[threadIdx.y][2*threadIdx.x+0-1]);
		__threadfence_block();		

		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 1 2 3 4 5 6 7 		///		0 2 4 6 1 3 5 7		 	</example>
		head = 1;
		tail  = 16-1;
#pragma unroll
		for(tail; tail> 0; tail--)
		{
			if(threadIdx.x < tail)
				swap(sMem[threadIdx.y][2*threadIdx.x+0+head], sMem[threadIdx.y][2*threadIdx.x+1+head]);
			head++;
		}
		__threadfence_block();

		/// <summary>	Along y direction. </summary>
		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (16-1))
			sMem[2*threadIdx.x+1][threadIdx.y]
			+= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+1][threadIdx.y]
			+= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]+sMem[2*threadIdx.x+1+1][threadIdx.y]);
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[2*threadIdx.x+0][threadIdx.y]
			+= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+0][threadIdx.y]
			+= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]+sMem[2*threadIdx.x+0-1][threadIdx.y]);
		__threadfence_block();		

		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 1 2 3 4 5 6 7 		///		0 2 4 6 1 3 5 7		 	</example>
		head = 1;
		tail = 16-1;
#pragma unroll
		for(tail; tail> 0; tail--)
		{
			if(threadIdx.x < tail)
				swap(sMem[2*threadIdx.x+0+head][threadIdx.y], sMem[2*threadIdx.x+1+head][threadIdx.y]);
			head++;
		}
		__threadfence_block();
	}
	__syncthreads();
	/**********************************************************************************************************/
	if((threadIdx.y<16) && (threadIdx.x<8))
	{
		/// <summary>	Along x direction. </summary>
		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (8-1))
			sMem[threadIdx.y][2*threadIdx.x+1]
			+= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+1]
			+= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]+sMem[threadIdx.y][2*threadIdx.x+1+1]);
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[threadIdx.y][2*threadIdx.x+0]
			+= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+0]
			+= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]+sMem[threadIdx.y][2*threadIdx.x+0-1]);
		__threadfence_block();		

		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 1 2 3 4 5 6 7 		///		0 2 4 6 1 3 5 7		 	</example>
		head = 1;
		tail = 8-1;
#pragma unroll
		for(tail; tail> 0; tail--)
		{
			if(threadIdx.x < tail)
				swap(sMem[threadIdx.y][2*threadIdx.x+0+head], sMem[threadIdx.y][2*threadIdx.x+1+head]);
			head++;
		}
		__threadfence_block();

		/// <summary>	Along y direction. </summary>
		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (8-1))
			sMem[2*threadIdx.x+1][threadIdx.y]
			+= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+1][threadIdx.y]
			+= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]+sMem[2*threadIdx.x+1+1][threadIdx.y]);
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[2*threadIdx.x+0][threadIdx.y]
			+= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+0][threadIdx.y]
			+= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]+sMem[2*threadIdx.x+0-1][threadIdx.y]);
		__threadfence_block();	

		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 1 2 3 4 5 6 7 		///		0 2 4 6 1 3 5 7		 	</example>
		head = 1;
		tail = 8-1;
#pragma unroll
		for(tail; tail> 0; tail--)
		{
			if(threadIdx.x < tail)
				swap(sMem[2*threadIdx.x+0+head][threadIdx.y], sMem[2*threadIdx.x+1+head][threadIdx.y]);
			head++;
		}
		__threadfence_block();
	}
	__syncthreads();
	/**********************************************************************************************************/
	if((threadIdx.y<8) && (threadIdx.x<4))
	{
		/// <summary>	Along x direction. </summary>
		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (4-1))
			sMem[threadIdx.y][2*threadIdx.x+1]
			+= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+1]
			+= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]+sMem[threadIdx.y][2*threadIdx.x+1+1]);
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[threadIdx.y][2*threadIdx.x+0]
		+= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+0]
		+= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]+sMem[threadIdx.y][2*threadIdx.x+0-1]);
		__threadfence_block();		

		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 1 2 3 4 5 6 7 		///		0 2 4 6 1 3 5 7		 	</example>
		head = 1;
		tail = 4-1;
#pragma unroll
		for(tail; tail> 0; tail--)
		{
			if(threadIdx.x < tail)
				swap(sMem[threadIdx.y][2*threadIdx.x+0+head], sMem[threadIdx.y][2*threadIdx.x+1+head]);
			head++;
		}
		__threadfence_block();	

		/// <summary>	Along y direction. </summary>
		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (4-1))
			sMem[2*threadIdx.x+1][threadIdx.y]
			+= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+1][threadIdx.y]
			+= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]+sMem[2*threadIdx.x+1+1][threadIdx.y]);
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[2*threadIdx.x+0][threadIdx.y]
			+= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+0][threadIdx.y]
			+= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]+sMem[2*threadIdx.x+0-1][threadIdx.y]);
		__threadfence_block();	

		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 1 2 3 4 5 6 7 		///		0 2 4 6 1 3 5 7		 	</example>
		head = 1;
		tail = 4-1;
#pragma unroll
		for(tail; tail> 0; tail--)
		{
			if(threadIdx.x < tail)
				swap(sMem[2*threadIdx.x+0+head][threadIdx.y], sMem[2*threadIdx.x+1+head][threadIdx.y]);
			head++;
		}
		__threadfence_block();
	}
	__syncthreads();
	/**********************************************************************************************************/
	if((threadIdx.y<4) && (threadIdx.x<2))
	{
		/// <summary>	Along x direction. </summary>
		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (2-1))
			sMem[threadIdx.y][2*threadIdx.x+1]
			+= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+1]
			+= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]+sMem[threadIdx.y][2*threadIdx.x+1+1]);
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[threadIdx.y][2*threadIdx.x+0]
			+= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+0]
			+= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]+sMem[threadIdx.y][2*threadIdx.x+0-1]);
		__threadfence_block();		

		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 1 2 3 4 5 6 7 		///		0 2 4 6 1 3 5 7		 	</example>
		head = 1;
		tail = 2-1;
#pragma unroll
		for(tail; tail> 0; tail--)
		{
			if(threadIdx.x < tail)
				swap(sMem[threadIdx.y][2*threadIdx.x+0+head], sMem[threadIdx.y][2*threadIdx.x+1+head]);
			head++;
		}
		__threadfence_block();	

		/// <summary>	Along y direction. </summary>
		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (2-1))
			sMem[2*threadIdx.x+1][threadIdx.y]
			+= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+1][threadIdx.y]
			+= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]+sMem[2*threadIdx.x+1+1][threadIdx.y]);
		__threadfence_block();	

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[2*threadIdx.x+0][threadIdx.y]
			+= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+0][threadIdx.y]
			+= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]+sMem[2*threadIdx.x+0-1][threadIdx.y]);
		__threadfence_block();			

		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 1 2 3 4 5 6 7 		///		0 2 4 6 1 3 5 7		 	</example>
		head = 1;
		tail = 2-1;
#pragma unroll
		for(tail; tail> 0; tail--)
		{
			if(threadIdx.x < tail)
				swap(sMem[2*threadIdx.x+0+head][threadIdx.y], sMem[2*threadIdx.x+1+head][threadIdx.y]);
			head++;
		}
		__threadfence_block();		
	}
	__syncthreads();
	/**********************************************************************************************************/
	if((threadIdx.y<2) && (threadIdx.x<1))
	{
		/// <summary>	Along x direction. </summary>
		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (1-1))
			sMem[threadIdx.y][2*threadIdx.x+1]
			+= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+1]
			+= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]+sMem[threadIdx.y][2*threadIdx.x+1+1]);
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[threadIdx.y][2*threadIdx.x+0]
			+= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+0]
			+= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]+sMem[threadIdx.y][2*threadIdx.x+0-1]);
		__threadfence_block();	
				
		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 1 2 3 4 5 6 7 		///		0 2 4 6 1 3 5 7		 	</example>
		head = 1;
		tail = 1-1;
#pragma unroll
		for(tail; tail> 0; tail--)
		{
			if(threadIdx.x < tail)
				swap(sMem[threadIdx.y][2*threadIdx.x+0+head], sMem[threadIdx.y][2*threadIdx.x+1+head]);
			head++;
		}
		__threadfence_block();	

		/// <summary>	Along y direction. </summary>
		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (1-1))
			sMem[2*threadIdx.x+1][threadIdx.y]
			+= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+1][threadIdx.y]
			+= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]+sMem[2*threadIdx.x+1+1][threadIdx.y]);
		__threadfence_block();	

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[2*threadIdx.x+0][threadIdx.y]
			+= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+0][threadIdx.y]
			+= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]+sMem[2*threadIdx.x+0-1][threadIdx.y]);
		__threadfence_block();			

		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 1 2 3 4 5 6 7 		///		0 2 4 6 1 3 5 7		 	</example>
		head = 1;
		tail = 1-1;
#pragma unroll
		for(tail; tail> 0; tail--)
		{
			if(threadIdx.x < tail)
				swap(sMem[2*threadIdx.x+0+head][threadIdx.y], sMem[2*threadIdx.x+1+head][threadIdx.y]);
			head++;
		}
		__threadfence_block();	
	}
	__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Copy a (32x32) block from shared memory to global memory. </summary>
	dst[(blockIdx.y*32+threadIdx.y)*nCols + blockIdx.x*32+threadIdx.x]		= sMem[threadIdx.y][threadIdx.x];	
	dst[(blockIdx.y*32+threadIdx.y)*nCols + blockIdx.x*32+threadIdx.x+16]	= sMem[threadIdx.y][threadIdx.x+blockDim.x];
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Wrapper for Encode shared. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
void encode_shared(float2* dst, float2* src, int nRows, int nCols)
{
	/// <summary>	Kernel Configuration</summary>
	dim3 nBlocks(1, 1);
	dim3 nThreads(bCols/2, bRows);

	/// <summary>	Kernel Launching. </summary>
	__encode_shared<<<nBlocks, nThreads>>>(dst, src, nRows, nCols);
	cudaThreadSynchronize();
	cudaGetLastError();
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Kernel for Encode static. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="iRows">	Zero-based index of the rows. </param>
/// <param name="iCols">	Zero-based index of the cols. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ 
void __encode_static(float2* dst, float2* src, 
	int iRows, int iCols, int nRows, int nCols)
{
	/// <summary>	Declare shared memory with dummy column. </summary>
	/// <summary>	Dummy column is used for extra data copy and avoid bank conflict. </summary>
	__shared__ float2 sMem[bRows][bCols+3];
	/**********************************************************************************************************/
	/// <summary>	Copy a (32x32) block from global memory to shared memory. </summary>
	sMem[threadIdx.y][threadIdx.x] 
	= src[(blockIdx.y*blockDim.y+threadIdx.y)*iCols + blockIdx.x*(2*blockDim.x)+threadIdx.x];	
	sMem[threadIdx.y][threadIdx.x+blockDim.x]
	= src[(blockIdx.y*blockDim.y+threadIdx.y)*iCols + blockIdx.x*(2*blockDim.x)+threadIdx.x+blockDim.x];
	__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Copy the leftmost column of the right block to dummy column in shared memory. </summary>
	/// <summary>	If it is in the rightmost block, duplicate the column before the last column. </summary>
	if(blockIdx.x == (gridDim.x-1))
	{
		sMem[threadIdx.y][2*blockDim.x]
		= sMem[threadIdx.y][2*blockDim.x-1-1];
	}
	else
	{
		sMem[threadIdx.y][2*blockDim.x]
		= src[(blockIdx.y*blockDim.y+threadIdx.y)*iCols + (blockIdx.x+1)*(2*blockDim.x)+0];
	}
	__threadfence_block();	
	/**********************************************************************************************************/
	/// <summary>	Copy two rightmost columns of the left block to dummy columns in shared memory. </summary>
	/// <summary>	If it is in the leftmost block, do nothing. </summary>
	if(blockIdx.x == 0)
	{
		//It's not important, they can be anything
	}
	else
	{
		sMem[threadIdx.y][2*blockDim.x+1]
		= src[(blockIdx.y*blockDim.y+threadIdx.y)*iCols + (blockIdx.x-1)*(2*blockDim.x) + 2*blockDim.x-2];
		sMem[threadIdx.y][2*blockDim.x+2]
		= src[(blockIdx.y*blockDim.y+threadIdx.y)*iCols + (blockIdx.x-1)*(2*blockDim.x) + 2*blockDim.x-1];
	}
	__threadfence_block();	
	/**********************************************************************************************************/
	/// <summary>	Lifting with alpha. </summary>
	/// <summary>	If it is in the rightmost thread, lift with the dummy column. </summary>
	if(threadIdx.x == (blockDim.x-1))
	{
		sMem[threadIdx.y][2*threadIdx.x+1]
		+= a*(sMem[threadIdx.y][2*threadIdx.x+1-1] + sMem[threadIdx.y][2*blockDim.x]);
	}
	else
	{
		sMem[threadIdx.y][2*threadIdx.x+1]
		+= a*(sMem[threadIdx.y][2*threadIdx.x+1-1] + sMem[threadIdx.y][2*threadIdx.x+1+1]);
	}
	__threadfence_block();	
	/**********************************************************************************************************/
	/// <summary>	Lifting the dummy columns with alpha. </summary>
	/// 0 ... 2*blockDim, 2*blockDim+1, 2*blockDim+2;
	sMem[threadIdx.y][2*blockDim.x+2]
		+= a*(sMem[threadIdx.y][0] + sMem[threadIdx.y][2*blockDim.x+1]);
	__threadfence_block();	
	/**********************************************************************************************************/
	/// <summary>	If it is in the leftmost block, lift with the dummy column. </summary>
	if(blockIdx.x == 0)
		sMem[threadIdx.y][2*blockDim.x+2]
		= sMem[threadIdx.y][0+1];
	__threadfence_block();	
	/**********************************************************************************************************/
	/// <summary>	Lifting with beta. </summary>
	/// <summary>	If it is in the leftmost thread, lift with the dummy column. </summary>
	if(threadIdx.x == 0)
		sMem[threadIdx.y][2*threadIdx.x+0]
		+= b*(sMem[threadIdx.y][2*threadIdx.x+0+1] + sMem[threadIdx.y][2*blockDim.x+2]);
	else
		sMem[threadIdx.y][2*threadIdx.x+0]
		+= b*(sMem[threadIdx.y][2*threadIdx.x+0+1] + sMem[threadIdx.y][2*threadIdx.x+0-1]);
	__threadfence_block();	
	/**********************************************************************************************************/
	/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
	/// <example>	///		0 1 2 3 4 5 6 7 		///		0 2 4 6 1 3 5 7		 	</example>

	int head = 1;
	int tail  = blockDim.x-1;
#pragma unroll
	for(tail; tail> 0; tail--)
	{
		if(threadIdx.x < tail)
			swap(sMem[threadIdx.y][2*threadIdx.x+0+head], sMem[threadIdx.y][2*threadIdx.x+1+head]); 
		head++;
	}
	__threadfence_block();	
	/**********************************************************************************************************/
	/// <summary>	Copy to global memory and transpose. </summary>
	dst[((blockIdx.x+0)*blockDim.x + threadIdx.x)*iCols + blockIdx.y*blockDim.y + threadIdx.y]
	= sMem[threadIdx.y][threadIdx.x];
	dst[((blockIdx.x+gridDim.x)*blockDim.x + threadIdx.x)*iCols + blockIdx.y*blockDim.y + threadIdx.y]
	= sMem[threadIdx.y][threadIdx.x + blockDim.x];
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Wrapper for Encode static. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="iRows">	Zero-based index of the rows. </param>
/// <param name="iCols">	Zero-based index of the cols. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
void encode_static(float2* dst, float2* src,
	int iRows, int iCols, int nRows, int nCols)
{
	/// <summary>	Kernel Configuration</summary>
	dim3 nBlocks(iCols/bCols, iRows/bRows);
	dim3 nThreads(bCols/2, bRows);

	/// <summary>	Kernel Launching</summary>
	__encode_static<<<nBlocks, nThreads>>>
		(dst, src, iRows, iCols, nRows, nCols);
	cudaThreadSynchronize();
	cudaGetLastError();
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Kernel Decode shared. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

__global__
	void __decode_shared(float2* dst, float2* src, int nRows, int nCols)
{
	__shared__ float2 sMem[32][32+1];
	int head;
	int tail;
	/// <summary>	Copy a (32x32) block from global memory to shared memory. </summary>
	sMem[threadIdx.y][threadIdx.x] 		= src[(blockIdx.y*32+threadIdx.y)*nCols + blockIdx.x*32+threadIdx.x];	
	sMem[threadIdx.y][threadIdx.x+16]	= src[(blockIdx.y*32+threadIdx.y)*nCols + blockIdx.x*32+threadIdx.x+16];
	__syncthreads();

	/**********************************************************************************************************/
	if((threadIdx.y<2) && (threadIdx.x<1))
	{
		/// <summary>	Along y direction. </summary>
		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 2 4 6 1 3 5 7		///		0 1 2 3 4 5 6 7			 	</example>
		head = 1-1;
		tail = 1;
#pragma unroll
		for(tail; tail< 1; tail++)
		{
			if(threadIdx.x < tail)
				swap(sMem[2*threadIdx.x+0+head][threadIdx.y], sMem[2*threadIdx.x+1+head][threadIdx.y]);
			head--;
		}
		__threadfence_block();
		
		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[2*threadIdx.x+0][threadIdx.y]
		-= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]*2);		
		else
			sMem[2*threadIdx.x+0][threadIdx.y]
		-= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]+sMem[2*threadIdx.x+0-1][threadIdx.y]);
		__threadfence_block();	
		
		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (1-1))
			sMem[2*threadIdx.x+1][threadIdx.y]
			-= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+1][threadIdx.y]
			-= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]+sMem[2*threadIdx.x+1+1][threadIdx.y]);
		__threadfence_block();	

		/// <summary>	Along x direction. </summary>
		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 2 4 6 1 3 5 7		///		0 1 2 3 4 5 6 7			 	</example>
		head = 1-1;
		tail  = 1;
#pragma unroll
		for(tail; tail<1; tail++)
		{
			if(threadIdx.x < tail)
				swap(sMem[threadIdx.y][2*threadIdx.x+0+head], sMem[threadIdx.y][2*threadIdx.x+1+head]);
			head--;
		}
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[threadIdx.y][2*threadIdx.x+0]
			-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+0]
			-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]+sMem[threadIdx.y][2*threadIdx.x+0-1]);
		__threadfence_block();	

		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (1-1))
			sMem[threadIdx.y][2*threadIdx.x+1]
			-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+1]
			-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]+sMem[threadIdx.y][2*threadIdx.x+1+1]);
		__threadfence_block();	
	}
	__syncthreads();
	/**********************************************************************************************************/
	if((threadIdx.y<4) && (threadIdx.x<2))
	{
		/// <summary>	Along y direction. </summary>
		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 2 4 6 1 3 5 7		///		0 1 2 3 4 5 6 7			 	</example>
		head = 2-1;
		tail = 1;
#pragma unroll
		for(tail; tail< 2; tail++)
		{
			if(threadIdx.x < tail)
				swap(sMem[2*threadIdx.x+0+head][threadIdx.y], sMem[2*threadIdx.x+1+head][threadIdx.y]);
			head--;
		}
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[2*threadIdx.x+0][threadIdx.y]
			-= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+0][threadIdx.y]
			-= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]+sMem[2*threadIdx.x+0-1][threadIdx.y]);
		__threadfence_block();

		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (2-1))
			sMem[2*threadIdx.x+1][threadIdx.y]
		-= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+1][threadIdx.y]
		-= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]+sMem[2*threadIdx.x+1+1][threadIdx.y]);
		__threadfence_block();

		/// <summary>	Along x direction. </summary>
		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 2 4 6 1 3 5 7		///		0 1 2 3 4 5 6 7			 	</example>
		head = 2-1;
		tail = 1;
#pragma unroll
		for(tail; tail<2; tail++)
		{
			if(threadIdx.x < tail)
				swap(sMem[threadIdx.y][2*threadIdx.x+0+head], sMem[threadIdx.y][2*threadIdx.x+1+head]);
			head--;
		}
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[threadIdx.y][2*threadIdx.x+0]
		-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+0]
		-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]+sMem[threadIdx.y][2*threadIdx.x+0-1]);
		__threadfence_block();

		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (2-1))
			sMem[threadIdx.y][2*threadIdx.x+1]
			-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+1]
			-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]+sMem[threadIdx.y][2*threadIdx.x+1+1]);
		__threadfence_block();
	}
	__syncthreads();
	/**********************************************************************************************************/
	if((threadIdx.y<8) && (threadIdx.x<4))
	{
		/// <summary>	Along y direction. </summary>
		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 2 4 6 1 3 5 7		///		0 1 2 3 4 5 6 7			 	</example>
		head = 4-1;
		tail = 1;
#pragma unroll
		for(tail; tail< 4; tail++)
		{
			if(threadIdx.x < tail)
				swap(sMem[2*threadIdx.x+0+head][threadIdx.y], sMem[2*threadIdx.x+1+head][threadIdx.y]);
			head--;
		}
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[2*threadIdx.x+0][threadIdx.y]
			-= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+0][threadIdx.y]
			-= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]+sMem[2*threadIdx.x+0-1][threadIdx.y]);
		__threadfence_block();

		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (4-1))
			sMem[2*threadIdx.x+1][threadIdx.y]
			-= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+1][threadIdx.y]
			-= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]+sMem[2*threadIdx.x+1+1][threadIdx.y]);
		__threadfence_block();

		/// <summary>	Along x direction. </summary>
		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 2 4 6 1 3 5 7		///		0 1 2 3 4 5 6 7			 	</example>
		head = 4-1;
		tail = 1;
#pragma unroll
		for(tail; tail<4; tail++)
		{
			if(threadIdx.x < tail)
				swap(sMem[threadIdx.y][2*threadIdx.x+0+head], sMem[threadIdx.y][2*threadIdx.x+1+head]);
			head--;
		}
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[threadIdx.y][2*threadIdx.x+0]
			-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+0]
			-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]+sMem[threadIdx.y][2*threadIdx.x+0-1]);
		__threadfence_block();

		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (4-1))
			sMem[threadIdx.y][2*threadIdx.x+1]
			-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+1]
			-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]+sMem[threadIdx.y][2*threadIdx.x+1+1]);
		__threadfence_block();
	}
	__syncthreads();
	/**********************************************************************************************************/
	if((threadIdx.y<16) && (threadIdx.x<8))
	{
		/// <summary>	Along y direction. </summary>
		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 2 4 6 1 3 5 7		///		0 1 2 3 4 5 6 7			 	</example>
		head = 8-1;
		tail = 1;
#pragma unroll
		for(tail; tail< 8; tail++)
		{
			if(threadIdx.x < tail)
				swap(sMem[2*threadIdx.x+0+head][threadIdx.y], sMem[2*threadIdx.x+1+head][threadIdx.y]);
			head--;
		}
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[2*threadIdx.x+0][threadIdx.y]
			-= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+0][threadIdx.y]
			-= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]+sMem[2*threadIdx.x+0-1][threadIdx.y]);
		__threadfence_block();

		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (8-1))
			sMem[2*threadIdx.x+1][threadIdx.y]
			-= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+1][threadIdx.y]
			-= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]+sMem[2*threadIdx.x+1+1][threadIdx.y]);
		__threadfence_block();

		/// <summary>	Along x direction. </summary>
		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 2 4 6 1 3 5 7		///		0 1 2 3 4 5 6 7			 	</example>
		head = 8-1;
		tail = 1;
#pragma unroll
		for(tail; tail<8; tail++)
		{
			if(threadIdx.x < tail)
				swap(sMem[threadIdx.y][2*threadIdx.x+0+head], sMem[threadIdx.y][2*threadIdx.x+1+head]);
			head--;
		}
		__threadfence_block();
		
		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[threadIdx.y][2*threadIdx.x+0]
			-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+0]
			-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]+sMem[threadIdx.y][2*threadIdx.x+0-1]);
		__threadfence_block();

		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (8-1))
			sMem[threadIdx.y][2*threadIdx.x+1]
			-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+1]
			-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]+sMem[threadIdx.y][2*threadIdx.x+1+1]);
		__threadfence_block();
	}
	__syncthreads();
	/**********************************************************************************************************/
	if((threadIdx.y<32) && (threadIdx.x<16))
	{
		/// <summary>	Along y direction. </summary>
		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 2 4 6 1 3 5 7		///		0 1 2 3 4 5 6 7			 	</example>
		head = 16-1;
		tail = 1;
#pragma unroll
		for(tail; tail< 16; tail++)
		{
			if(threadIdx.x < tail)
				swap(sMem[2*threadIdx.x+0+head][threadIdx.y], sMem[2*threadIdx.x+1+head][threadIdx.y]);
			head--;
		}
		__threadfence_block();

		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[2*threadIdx.x+0][threadIdx.y]
			-= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+0][threadIdx.y]
			-= b*(sMem[2*threadIdx.x+0+1][threadIdx.y]+sMem[2*threadIdx.x+0-1][threadIdx.y]);
		__threadfence_block();

		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (16-1))
			sMem[2*threadIdx.x+1][threadIdx.y]
			-= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]*2);
		else
			sMem[2*threadIdx.x+1][threadIdx.y]
			-= a*(sMem[2*threadIdx.x+1-1][threadIdx.y]+sMem[2*threadIdx.x+1+1][threadIdx.y]);
		__threadfence_block();

		/// <summary>	Along x direction. </summary>
		/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
		/// <example>	///		0 2 4 6 1 3 5 7		///		0 1 2 3 4 5 6 7			 	</example>
		head = 16-1;
		tail = 1;
#pragma unroll
		for(tail; tail<16; tail++)
		{
			if(threadIdx.x < tail)
				swap(sMem[threadIdx.y][2*threadIdx.x+0+head], sMem[threadIdx.y][2*threadIdx.x+1+head]);
			head--;
		}
		__threadfence_block();
		
		/// <summary>	Lifting with beta. </summary>
		if(threadIdx.x == 0)
			sMem[threadIdx.y][2*threadIdx.x+0]
			-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+0]
			-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1]+sMem[threadIdx.y][2*threadIdx.x+0-1]);
		__threadfence_block();

		/// <summary>	Lifting with alpha. </summary>
		if(threadIdx.x == (16-1))
			sMem[threadIdx.y][2*threadIdx.x+1]
			-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]*2);
		else
			sMem[threadIdx.y][2*threadIdx.x+1]
			-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1]+sMem[threadIdx.y][2*threadIdx.x+1+1]);
		__threadfence_block();
	}
	__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Copy a (32x32) block from shared memory to global memory. </summary>
	dst[(blockIdx.y*32+threadIdx.y)*nCols + blockIdx.x*32+threadIdx.x]		= sMem[threadIdx.y][threadIdx.x];	
	dst[(blockIdx.y*32+threadIdx.y)*nCols + blockIdx.x*32+threadIdx.x+16]	= sMem[threadIdx.y][threadIdx.x+16];
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Wrapper for Decode shared. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

void decode_shared(float2* dst, float2* src, int nRows, int nCols)
{
	/// <summary>	Kernel Configuration</summary>
	dim3 nBlocks(1, 1);
	dim3 nThreads(bCols/2, bRows);

	/// <summary>	Kernel Launching</summary>
	__decode_shared<<<nBlocks, nThreads>>>
		(dst, src, nRows, nCols);
	cudaThreadSynchronize();
	cudaGetLastError();
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Kernel for Decode static. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="iRows">	Zero-based index of the rows. </param>
/// <param name="iCols">	Zero-based index of the cols. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void __decode_static(float2* dst, float2* src, 
	int iRows, int iCols, int nRows, int nCols)
{
	/// <summary>	Declare shared memory with dummy column. </summary>
	/// <summary>	Dummy column is used for extra data copy and avoid bank conflict. </summary>
	__shared__ float2 sMem[bRows][bCols+3];
	/**********************************************************************************************************/
	/// <summary>	Copy to shared memory and transpose. </summary>
	sMem[threadIdx.y][threadIdx.x]
	= src[((blockIdx.x+0)*blockDim.x + threadIdx.x)*iCols + blockIdx.y*blockDim.y + threadIdx.y];
	sMem[threadIdx.y][threadIdx.x + blockDim.x]
	= src[((blockIdx.x+gridDim.x)*blockDim.x + threadIdx.x)*iCols + blockIdx.y*blockDim.y + threadIdx.y];
	__syncthreads();

	/**********************************************************************************************************/
	/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
	/// <example>	///		0 2 4 6 1 3 5 7		///		0 1 2 3 4 5 6 7			 	</example>
	int head = blockDim.x-1;
	int tail = 1;
#pragma unroll
	for(tail; tail< blockDim.x; tail++)
	{
		if(threadIdx.x < tail)
			swap(sMem[threadIdx.y][2*threadIdx.x+0+head], sMem[threadIdx.y][2*threadIdx.x+1+head]);
		head--;
	}
	__threadfence_block();
	/**********************************************************************************************************/
	/// <summary>	Copy the rightmost column of the left block to dummy column in shared memory. </summary>
	/// <summary>	If it is in the leftmost block, duplicate the column after the first column	. </summary>
	/// <example>	-1	0	1	2	.	.	2*bdim-1	2*bim		2*bdim+1		2*bdim+2	. </example>
	/// <example>		0	1	2	.	.	2*bdim-1	(bid+1)+0	(bid+1)+1		-1			. </example>
	if(blockIdx.x == 0)
		sMem[threadIdx.y][2*blockDim.x+2]
		= sMem[threadIdx.y][1];
	else
		sMem[threadIdx.y][2*blockDim.x+2]
		= src[((blockIdx.x-1+gridDim.x)*blockDim.x+blockDim.x-1)*iCols + blockIdx.y*blockDim.y+threadIdx.y];
	__threadfence_block();
	/**********************************************************************************************************/
	/// <summary>	Copy two leftmost columns of the right block to dummy columns in shared memory. </summary>
	/// <summary>	If it is in the rightmost block, do nothing. </summary>
	if(blockIdx.x == (gridDim.x-1))
	{
		//It's not important, they can be anything
	}
	else
	{
		sMem[threadIdx.y][2*blockDim.x+0]
		= src[((blockIdx.x+1+0)*blockDim.x)*iCols + blockIdx.y*blockDim.y+threadIdx.y];
		sMem[threadIdx.y][2*blockDim.x+1]
		= src[((blockIdx.x+1+gridDim.x)*blockDim.x)*iCols + blockIdx.y*blockDim.y+threadIdx.y];
	}
	__threadfence_block();
	/**********************************************************************************************************/
	/// <summary>	Lifting with beta. </summary>
	/// <summary>	If it is in the leftmost thread, lift with the dummy column. </summary>
	if(threadIdx.x == 0)
		sMem[threadIdx.y][2*threadIdx.x+0]
		-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1] + sMem[threadIdx.y][2*blockDim.x+2]);
	else
		sMem[threadIdx.y][2*threadIdx.x+0]
		-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1] + sMem[threadIdx.y][2*threadIdx.x+0-1]);
	__threadfence_block();
	/**********************************************************************************************************/
	/// <summary>	Lifting the dummy columns with beta. </summary>
	/// 0 ... 2*blockDim, 2*blockDim+1, 2*blockDim+2;
	sMem[threadIdx.y][2*blockDim.x]
	-= b*(sMem[threadIdx.y][2*blockDim.x-1] + sMem[threadIdx.y][2*blockDim.x+1]);
	__threadfence_block();
	/**********************************************************************************************************/
	/// <summary>	If it is in the rightmost block, lift with the dummy column. </summary>
	if(blockIdx.x == (gridDim.x-1))
		sMem[threadIdx.y][2*blockDim.x]
		= sMem[threadIdx.y][2*blockDim.x-2];
	__threadfence_block();
	/**********************************************************************************************************/
	/// <summary>	Lifting with alpha. </summary>
	/// <summary>	If it is in the rightmost thread, lift with the dummy column. </summary>
	if(threadIdx.x == (blockDim.x-1))
		sMem[threadIdx.y][2*threadIdx.x+1]
		-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1] + sMem[threadIdx.y][2*blockDim.x]);
	else
		sMem[threadIdx.y][2*threadIdx.x+1]
		-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1] + sMem[threadIdx.y][2*threadIdx.x+1+1]);
	__threadfence_block();
	/**********************************************************************************************************/
	/// <summary>	Copy a (32x32) block to global memory. </summary>
	dst[(blockIdx.y*blockDim.y+threadIdx.y)*iCols + blockIdx.x*(2*blockDim.x)+threadIdx.x]	
	= sMem[threadIdx.y][threadIdx.x];
	dst[(blockIdx.y*blockDim.y+threadIdx.y)*iCols + blockIdx.x*(2*blockDim.x)+threadIdx.x+blockDim.x]
	= sMem[threadIdx.y][threadIdx.x+blockDim.x];
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Wrapper for Decode static. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="iRows">	Zero-based index of the rows. </param>
/// <param name="iCols">	Zero-based index of the cols. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

void decode_static(float2* dst, float2* src,
	int iRows, int iCols, int nRows, int nCols)
{
	/// <summary>	Kernel Configuration</summary>
	dim3 nBlocks(iCols/bCols, iRows/bRows);
	dim3 nThreads(bCols/2, bRows);

	/// <summary>	Kernel Launching</summary>
	__decode_static<<<nBlocks, nThreads>>>
		(dst, src, iRows, iCols, nRows, nCols);

	cudaThreadSynchronize();
	cudaGetLastError();
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Forward Wavelet Transform. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="image">  	[in,out] If non-null, the image. </param>
/// <param name="nRows">  	The rows. </param>
/// <param name="nCols">  	The cols. </param>
/// <param name="wavelet">	[in,out] If non-null, the wavelet. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
void fwt(float2 * image,		// original signal
	int nRows, int nCols,		// number of pointers, should be multiple of 2
	float2 * wavelet)			// decomposited data
{
	/// <summary>	Determine the number of levels for 2D wavelet decomposition</summary>
	int nLevels = (_log2(nRows) > _log2(nCols)) ?  _log2(nRows):_log2(nCols);
	nLevels -= 4;
	/// <summary>	Size of rows and cols for each level</summary>
	int iRows, iCols;

	/// <summary>	Get Optimal nRows and nCols are the powers of 2	</summary>
	iRows = (nRows >> nLevels) << nLevels;
	iCols = (nCols >> nLevels) << nLevels;

	/// <summary>	Initialization	</summary>
	float2 *tmp_image;
	float2 *tmp_transpose;
	float2 *tmp_wavelet;

	cudaMalloc((void**)&tmp_image,		sizeof(float2)*nRows*nCols);
	cudaMalloc((void**)&tmp_transpose,	sizeof(float2)*nRows*nCols);
	cudaMalloc((void**)&tmp_wavelet,	sizeof(float2)*nRows*nCols);

	/// <summary>	Copy image to temporary image at each level		</summary>
	cudaMemcpy2D(wavelet, sizeof(float2)*nCols, 
		image, sizeof(float2)*nCols, 
		sizeof(float2)*nCols, nRows, 
		cudaMemcpyDeviceToDevice);

	for(int k=0; k<nLevels; k++)
	{
		if((iRows|iCols) > (bRows|bCols))
		{
			cudaMemcpy2D(tmp_image, sizeof(float2)*iCols,
				wavelet, sizeof(float2)*nCols,
				sizeof(float2)*iCols, iRows, 
				cudaMemcpyDeviceToDevice);
			encode_static(tmp_transpose, tmp_image, iRows, iCols, nRows, nCols);
			encode_static(tmp_wavelet, tmp_transpose, iRows, iCols, nRows, nCols);
			cudaMemcpy2D(wavelet, sizeof(float2)*nCols,
				tmp_wavelet, sizeof(float2)*iCols,
				sizeof(float2)*iCols, iRows, 
				cudaMemcpyDeviceToDevice);
			iRows /= 2;
			iCols /= 2;
		}
		else
		{
			cudaMemcpy2D(tmp_image, sizeof(float2)*nCols,
				wavelet, sizeof(float2)*nCols,
				sizeof(float2)*bCols, bRows, 
				cudaMemcpyDeviceToDevice);
			encode_shared(tmp_wavelet, tmp_image, nRows, nCols);
			cudaMemcpy2D(wavelet, sizeof(float2)*nCols,
				tmp_wavelet, sizeof(float2)*nCols,
				sizeof(float2)*bCols, bRows, 
				cudaMemcpyDeviceToDevice);
		}
	}
	cudaFree(tmp_image);
	cudaFree(tmp_transpose);
	cudaFree(tmp_wavelet);
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Inverse Wavelet Transform. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="wavelet">	[in,out] If non-null, the wavelet. </param>
/// <param name="nRows">  	The rows. </param>
/// <param name="nCols">  	The cols. </param>
/// <param name="image">  	[in,out] If non-null, the image. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

void iwt(float2 * wavelet,		// wavelet coefficients
	int nRows, int nCols,	// number of pointers, should be multiple of 2
	float2 * image)		// reconstructed data

{
	/// <summary>	Determine the number of levels for 2D wavelet decomposition</summary>
	int nLevels = (_log2(nRows) > _log2(nCols)) ?  _log2(nRows):_log2(nCols);
	nLevels -= 4;
	//nLevels = 1;
	/// <summary>	Size of rows and cols for each level</summary>
	int iRows, iCols;

	/// <summary>	Get Optimal nRows and nCols are the powers of 2	</summary>
	//TODO: Wavelet level is here
	iRows = 32;
	iCols = 32;

	/// <summary>	Initialization	</summary>
	float2 *tmp_image;
	float2 *tmp_transpose;
	float2 *tmp_wavelet;

	cudaMalloc((void**)&tmp_image,		sizeof(float2)*nRows*nCols);
	cudaMalloc((void**)&tmp_transpose,	sizeof(float2)*nRows*nCols);
	cudaMalloc((void**)&tmp_wavelet,	sizeof(float2)*nRows*nCols);

	/// <summary>	Copy image to temporary image at each level		</summary>
	cudaMemcpy2D(image, sizeof(float2)*nCols, 
		wavelet, sizeof(float2)*nCols, 
		sizeof(float2)*nCols, nRows, 
		cudaMemcpyDeviceToDevice);

	//TODO: Wavelet level is here
	for(int k=0; k<nLevels; k++)
	{
		if((iRows|iCols) > (bRows|bCols))
		{
			cudaSafeCall(
				cudaMemcpy2D(tmp_wavelet, sizeof(float2)*iCols,
					image, sizeof(float2)*nCols,
					sizeof(float2)*iCols, iRows, 
					cudaMemcpyDeviceToDevice)
				);
// 			cudaSafeCall(
// 				cudaMemcpy2D(tmp_image, sizeof(float2)*iCols,
// 					tmp_wavelet, sizeof(float2)*nCols,
// 					sizeof(float2)*iCols, iRows, 
// 					cudaMemcpyDeviceToDevice)
//  				);
  			decode_static(tmp_transpose, tmp_wavelet, iRows, iCols, nRows, nCols);
  			decode_static(tmp_image, tmp_transpose, iRows, iCols, nRows, nCols);
			cudaSafeCall(
				cudaMemcpy2D(image, sizeof(float2)*nCols,
					tmp_image, sizeof(float2)*iCols,
					sizeof(float2)*iCols, iRows, 
					cudaMemcpyDeviceToDevice)
				);
			iRows *= 2;
			iCols *= 2;
		}
		else
		{
			cudaSafeCall(
				cudaMemcpy2D(tmp_wavelet, sizeof(float2)*nCols,
					image, sizeof(float2)*nCols,
					sizeof(float2)*iCols, iRows, 
					cudaMemcpyDeviceToDevice)
				);
			decode_shared(tmp_image, tmp_wavelet, nRows, nCols);
// 			cudaSafeCall(
// 				cudaMemcpy2D(tmp_image, sizeof(float2)*nCols,
// 					tmp_wavelet, sizeof(float2)*nCols,
// 					sizeof(float2)*iCols, iRows, 
// 					cudaMemcpyDeviceToDevice)
// 				);

			cudaSafeCall(
				cudaMemcpy2D(image, sizeof(float2)*nCols,
					tmp_image, sizeof(float2)*nCols,
					sizeof(float2)*iCols, iRows, 
					cudaMemcpyDeviceToDevice)
				);
			iRows *= 2;
			iCols *= 2;
		}
	}
	cudaFree(tmp_image);
	cudaFree(tmp_transpose);
	cudaFree(tmp_wavelet);
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Discrete Wavelet Transform. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
/// <param name="flag"> 	The flag. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

void dwt(float2* src, float2* dst, int nRows, int nCols, int flag)
{
	/// <summary>	Perform Wavelet Transform </summary>
	switch(flag)
	{
		/// <summary>	Forward Wavelet Transform </summary>
	case DWT_FORWARD:		
		fwt(src, nRows, nCols,	dst);
		break;

		/// <summary>	Inverse Wavelet Transform </summary>
	case DWT_INVERSE:		
		iwt(src, nRows, nCols,	dst);
		break;

		/// <summary>	Default case, do nothing  </summary>
	default:
		break;
	}
}
}
