﻿////////////////////////////////////////////////////////////////////////////////////////////////////
// file:	C:\Users\Quan\Documents\Visual Studio 2010\Projects\source\cvWavelet\dwt.cu
//
// summary:	dwt class
////////////////////////////////////////////////////////////////////////////////////////////////////

#include "dwt.hpp"
#include "cutils.hpp"
#include <iostream>
using namespace std;
namespace cuda
{
////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Calculate logarithm base 2 of a number. </summary>
////////////////////////////////////////////////////////////////////////////////////////////////////
static inline int _log2 (int n)
{
	int k=1; 
	int j=0;
	while(k<n)
	{
		k*=2;
		j+=1;
	}
	return j;
}

/// <summary>	Alpha, Beta and Gamma for lifting scheme </summary>
__constant__ float a = -0.5f;
__constant__ float b = 0.25f;
__constant__ float c = 1.4142135623730950488016887242097f;

/// <summary>	A macro that defines block cols. </summary>
#define bCols 32		//warpSize
/// <summary>	A macro that defines block rows. </summary>
#define bRows 32		//warpSize

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	A macro that defines inline swap. </summary>
#define swap(a,b)				\
{								\
	float2 tmp;					\
	tmp = a;					\
	__threadfence_block();		\
	a = b;						\
	__threadfence_block();		\
	b = tmp;					\
	__threadfence_block();		\
}
#define swap_encode(a,b)				\
{								\
	float2 tmp;					\
	tmp = a;					\
	__syncthreads();			\
	a = b;						\
	__syncthreads();			\
	b = tmp;					\
	__syncthreads();			\
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Kernel for encode shared memory. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ 
void __encode_shared(float2* dst, float2* src, int nRows, int nCols)
{
	__shared__ float2 sMem[32][32+1];
	__shared__ float2 tMem[32][32+1];
	int iRows = bRows;
	int iCols = bCols;
	/// <summary>	Copy a (32x32) block from global memory to shared memory. </summary>
	sMem[threadIdx.y][threadIdx.x] 		= src[(blockIdx.y*32+threadIdx.y)*nCols + blockIdx.x*32+threadIdx.x];	
	__syncthreads();
	tMem[threadIdx.y][threadIdx.x]		= sMem[threadIdx.y][threadIdx.x];
	__syncthreads();
	/**********************************************************************************************************/
	for(int iLevels=0; iLevels<5; iLevels++)
	{
		/// <summary>	Update rows and cols for next level. </summary>
		iCols /= 2;
		iRows /= 2;
		
		/// <summary>	Along x direction. </summary>	
		if(threadIdx.x < (2*iCols) && threadIdx.y < (2*iRows))
		{
			if(threadIdx.x < iCols)
			{
				tMem[threadIdx.y][threadIdx.x]
				= (sMem[threadIdx.y][2*threadIdx.x+0]+sMem[threadIdx.y][2*threadIdx.x+1])/2.0f;
			}
			if(threadIdx.x >= iCols)
			{
				tMem[threadIdx.y][threadIdx.x-iCols+iCols]
				= (sMem[threadIdx.y][2*(threadIdx.x-iCols)+0]-sMem[threadIdx.y][2*(threadIdx.x-iCols)+1])/2.0f;
			}
			__syncthreads();
			
			/// <summary>	Update shared memory. </summary>
			sMem[threadIdx.y][threadIdx.x]		= tMem[threadIdx.y][threadIdx.x];
			__syncthreads();

			/// <summary>	Along y direction. </summary>	
			if(threadIdx.y < iRows)
			{
				tMem[threadIdx.y][threadIdx.x]
				= (sMem[2*threadIdx.y+0][threadIdx.x]+sMem[2*threadIdx.y+1][threadIdx.x])/2.0f;
			}
			if(threadIdx.y >= iRows)
			{
				tMem[threadIdx.y-iRows+iRows][threadIdx.x]
				= (sMem[2*(threadIdx.y-iRows)+0][threadIdx.x]-sMem[2*(threadIdx.y-iRows)+1][threadIdx.x])/2.0f;
			}
			__syncthreads();
			
			/// <summary>	Update shared memory. </summary>
			sMem[threadIdx.y][threadIdx.x]		= tMem[threadIdx.y][threadIdx.x];
			__syncthreads();
		}
		__syncthreads();
	}
	
	/**********************************************************************************************************/
	/// <summary>	Copy a (32x32) block from shared memory to global memory. </summary>
	dst[(blockIdx.y*32+threadIdx.y)*nCols + blockIdx.x*32+threadIdx.x]		= sMem[threadIdx.y][threadIdx.x];	
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Wrapper for Encode shared. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
void encode_shared(float2* dst, float2* src, int nRows, int nCols)
{
	/// <summary>	Kernel Configuration</summary>
	dim3 nBlocks(1, 1);
	dim3 nThreads(bCols, bRows);

	/// <summary>	Kernel Launching. </summary>
	__encode_shared<<<nBlocks, nThreads>>>(dst, src, nRows, nCols);
	cudaThreadSynchronize();
	cudaError_t error = cudaGetLastError();
	if(error != cudaSuccess)
	{
		printf("CUDA error: %s\n", cudaGetErrorString(error));
		exit(-1);
	}
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Kernel for Encode static. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="iRows">	Zero-based index of the rows. </param>
/// <param name="iCols">	Zero-based index of the cols. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

__global__ 
void __encode_static(float2* dst, float2* src, 
	int iRows, int iCols, int nRows, int nCols)
{
	/// <summary>	Declare shared memory with dummy column. </summary>
	/// <summary>	Dummy column is used for extra data copy and avoid bank conflict. </summary>
	__shared__ float2 sMem[bRows][bCols+1];
	__shared__ float2 tMem[bRows][bCols+1];
	/**********************************************************************************************************/
	/// <summary>	Copy a (32x32) block from global memory to shared memory. </summary>
	sMem[threadIdx.y][threadIdx.x]	= src[(blockIdx.y*blockDim.y+threadIdx.y)*iCols + blockIdx.x*blockDim.x+threadIdx.x];	
	__syncthreads();
	tMem[threadIdx.y][threadIdx.x]	= sMem[threadIdx.y][threadIdx.x];
	__syncthreads();
	/**********************************************************************************************************/
	if(threadIdx.x < bCols && threadIdx.y < bRows)
	{
		if(threadIdx.x < bCols/2)
		{
			sMem[threadIdx.y][threadIdx.x]
			= (tMem[threadIdx.y][2*threadIdx.x+0]+tMem[threadIdx.y][2*threadIdx.x+1])/2.0f;
		}
 	 	if(threadIdx.x >= bCols/2)
 	 	{
 	 		sMem[threadIdx.y][threadIdx.x-bCols/2+bCols/2]
 	 		= (tMem[threadIdx.y][2*(threadIdx.x-bCols/2)+0]-tMem[threadIdx.y][2*(threadIdx.x-bCols/2)+1])/2.0f;
 	 	}
 	 	__syncthreads();
	}
	/**********************************************************************************************************/
	/// <summary>	Copy to global memory, arrange and transpose. </summary>
	if(threadIdx.x < bCols && threadIdx.y < bRows)
	{	
		if(threadIdx.x < bCols/2)
		{
			dst[(blockIdx.x*blockDim.x/2 + threadIdx.x)*iCols + blockIdx.y*blockDim.y + threadIdx.y]
			= sMem[threadIdx.y][threadIdx.x];
		}
 		if(threadIdx.x >= bCols/2)
 		{
 	 		dst[((blockIdx.x+(gridDim.x-1))*blockDim.x/2 + threadIdx.x)*iCols + blockIdx.y*blockDim.y + threadIdx.y]
			= sMem[threadIdx.y][threadIdx.x];
 		}
	}
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Wrapper for Encode static. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="iRows">	Zero-based index of the rows. </param>
/// <param name="iCols">	Zero-based index of the cols. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
void encode_static(float2* dst, float2* src,
	int iRows, int iCols, int nRows, int nCols)
{
	/// <summary>	Kernel Configuration</summary>
	dim3 nBlocks(iCols/bCols, iRows/bRows);
	dim3 nThreads(bCols, bRows);

	/// <summary>	Kernel Launching</summary>
	__encode_static<<<nBlocks, nThreads>>>
		(dst, src, iRows, iCols, nRows, nCols);
 	cudaThreadSynchronize();
  	cudaError_t error = cudaGetLastError();
	if(error != cudaSuccess)
	{
		printf("CUDA error encode_static: %s\n", cudaGetErrorString(error));
		exit(-1);
	}
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Kernel Decode shared. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

__global__
void __decode_shared(float2* dst, float2* src, int nRows, int nCols)
{
	__shared__ float2 sMem[32][32+1];
	__shared__ float2 tMem[32][32+1];
	int iRows = 1;
	int iCols = 1;
	/// <summary>	Copy a (32x32) block from global memory to shared memory. </summary>
	sMem[threadIdx.y][threadIdx.x] 		= src[(blockIdx.y*32+threadIdx.y)*nCols + blockIdx.x*32+threadIdx.x];	
	__syncthreads();
	tMem[threadIdx.y][threadIdx.x]		= sMem[threadIdx.y][threadIdx.x];
	__syncthreads();
	/**********************************************************************************************************/
	for(int iLevels=0; iLevels<5; iLevels++)
	{
		/// <summary>	Along x direction. </summary>	
		if(threadIdx.x < iCols && threadIdx.y<(2*iRows))
		{
			tMem[threadIdx.y][2*threadIdx.x+0]
			= (sMem[threadIdx.y][threadIdx.x]+sMem[threadIdx.y][threadIdx.x+iCols])/1.0f;
			tMem[threadIdx.y][2*threadIdx.x+1]
			= (sMem[threadIdx.y][threadIdx.x]-sMem[threadIdx.y][threadIdx.x+iCols])/1.0f;
		}
		__syncthreads();
		/// <summary>	Update shared memory. </summary>
		if(threadIdx.x < (2*iCols) && threadIdx.y < (2*iRows))
		{
			sMem[threadIdx.y][threadIdx.x]		= tMem[threadIdx.y][threadIdx.x];
		}
		__syncthreads();
		
		/// <summary>	Along y direction. </summary>	
		if(threadIdx.x < (2*iCols) && threadIdx.y<iRows)
		{
			tMem[2*threadIdx.y+0][threadIdx.x]
			= (sMem[threadIdx.y][threadIdx.x]+sMem[threadIdx.y+iRows][threadIdx.x])/1.0f;
			tMem[2*threadIdx.y+1][threadIdx.x]
			= (sMem[threadIdx.y][threadIdx.x]-sMem[threadIdx.y+iRows][threadIdx.x])/1.0f;
		}
		__syncthreads();
		/// <summary>	Update shared memory. </summary>
		if(threadIdx.x < (2*iCols) && threadIdx.y < (2*iRows))
		{
			sMem[threadIdx.y][threadIdx.x]		= tMem[threadIdx.y][threadIdx.x];
		}
		__syncthreads();
		
		/// <summary>	Update rows and cols for next level. </summary>
		iCols *= 2;
		iRows *= 2;
	}
	/**********************************************************************************************************/
	/// <summary>	Copy a (32x32) block from shared memory to global memory. </summary>
	dst[(blockIdx.y*32+threadIdx.y)*nCols + blockIdx.x*32+threadIdx.x]		= sMem[threadIdx.y][threadIdx.x];
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Wrapper for Decode shared. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

void decode_shared(float2* dst, float2* src, int nRows, int nCols)
{
	/// <summary>	Kernel Configuration</summary>
	dim3 nBlocks(1, 1);
	dim3 nThreads(bCols, bRows);

	/// <summary>	Kernel Launching</summary>
	__decode_shared<<<nBlocks, nThreads>>>
		(dst, src, nRows, nCols);
	cudaThreadSynchronize();
	cudaError_t error = cudaGetLastError();
	if(error != cudaSuccess)
	{
		printf("CUDA error: %s\n", cudaGetErrorString(error));
		exit(-1);
	}
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Kernel for Decode static. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="iRows">	Zero-based index of the rows. </param>
/// <param name="iCols">	Zero-based index of the cols. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void __decode_static(float2* dst, float2* src, 
	int iRows, int iCols, int nRows, int nCols)
{
	/// <summary>	Declare shared memory with dummy column. </summary>
	/// <summary>	Dummy column is used for extra data copy and avoid bank conflict. </summary>
	__shared__ float2 sMem[bRows][bCols+1];
	__shared__ float2 tMem[bRows][bCols+1];
	/**********************************************************************************************************/
	/// <summary>	Copy to from memory, arrange and transpose. </summary>
	if(threadIdx.x < bCols && threadIdx.y < bRows)
	{	
		if(threadIdx.x < bCols/2)
		{
			sMem[threadIdx.y][threadIdx.x] 
			= src[(blockIdx.x*blockDim.x/2 + threadIdx.x)*iCols + blockIdx.y*blockDim.y + threadIdx.y];
		}
 		if(threadIdx.x >= bCols/2)
 		{
 	 		sMem[threadIdx.y][threadIdx.x] 
			= src[((blockIdx.x+(gridDim.x-1))*blockDim.x/2 + threadIdx.x)*iCols + blockIdx.y*blockDim.y + threadIdx.y];
 		}
		__syncthreads();
	}

	tMem[threadIdx.y][threadIdx.x]	= sMem[threadIdx.y][threadIdx.x];
	__syncthreads();
	/**********************************************************************************************************/
	if(threadIdx.x < bCols/2 && threadIdx.y<bRows)
	{
		sMem[threadIdx.y][2*threadIdx.x+0]
		= (tMem[threadIdx.y][threadIdx.x]+tMem[threadIdx.y][threadIdx.x+bCols/2])/1.0f;
		sMem[threadIdx.y][2*threadIdx.x+1]
		= (tMem[threadIdx.y][threadIdx.x]-tMem[threadIdx.y][threadIdx.x+bCols/2])/1.0f;
	}
	__syncthreads();
	/**********************************************************************************************************/
	dst[(blockIdx.y*blockDim.y+threadIdx.y)*iCols + blockIdx.x*blockDim.x+threadIdx.x] 
	= sMem[threadIdx.y][threadIdx.x];	
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Wrapper for Decode static. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="iRows">	Zero-based index of the rows. </param>
/// <param name="iCols">	Zero-based index of the cols. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

void decode_static(float2* dst, float2* src,
	int iRows, int iCols, int nRows, int nCols)
{
	/// <summary>	Kernel Configuration</summary>
	dim3 nBlocks(iCols/bCols, iRows/bRows);
	dim3 nThreads(bCols, bRows);

	/// <summary>	Kernel Launching</summary>
	__decode_static<<<nBlocks, nThreads>>>
		(dst, src, iRows, iCols, nRows, nCols);

	cudaThreadSynchronize();
	cudaError_t error = cudaGetLastError();
	if(error != cudaSuccess)
	{
		printf("CUDA error: %s\n", cudaGetErrorString(error));
		exit(-1);
	}
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Forward Wavelet Transform. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="image">  	[in,out] If non-null, the image. </param>
/// <param name="nRows">  	The rows. </param>
/// <param name="nCols">  	The cols. </param>
/// <param name="wavelet">	[in,out] If non-null, the wavelet. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
void fwt(float2 * image,		// original signal
	int nRows, int nCols,		// number of pointers, should be multiple of 2
	float2 * wavelet)			// decomposited data
{
	/// <summary>	Determine the number of levels for 2D wavelet decomposition</summary>
	int nLevels = (_log2(nRows) > _log2(nCols)) ?  _log2(nRows):_log2(nCols);
	nLevels -= 4;
	//nLevels = 1;
	/// <summary>	Size of rows and cols for each level</summary>
	int iRows, iCols;

	/// <summary>	Get Optimal nRows and nCols are the powers of 2	</summary>
	iRows = (nRows >> nLevels) << nLevels;
	iCols = (nCols >> nLevels) << nLevels;

	/// <summary>	Initialization	</summary>
	float2 *tmp_image;
	float2 *tmp_transpose;
	float2 *tmp_wavelet;

	cudaMalloc((void**)&tmp_image,		sizeof(float2)*nRows*nCols);
	cudaMalloc((void**)&tmp_transpose,	sizeof(float2)*nRows*nCols);
	cudaMalloc((void**)&tmp_wavelet,	sizeof(float2)*nRows*nCols);

	/// <summary>	Copy image to temporary image at each level		</summary>
	cudaMemcpy2D(wavelet, sizeof(float2)*nCols, 
		image, sizeof(float2)*nCols, 
		sizeof(float2)*nCols, nRows, 
		cudaMemcpyDeviceToDevice);

	for(int k=0; k<nLevels; k++)
	{
		if((iRows|iCols) > (bRows|bCols))
		{
			cudaMemcpy2D(tmp_image, sizeof(float2)*iCols,
				wavelet, sizeof(float2)*nCols,
				sizeof(float2)*iCols, iRows, 
				cudaMemcpyDeviceToDevice);
			encode_static(tmp_transpose, tmp_image, iRows, iCols, nRows, nCols);
			encode_static(tmp_wavelet, tmp_transpose, iRows, iCols, nRows, nCols);
			cudaMemcpy2D(wavelet, sizeof(float2)*nCols,
				tmp_wavelet, sizeof(float2)*iCols,
				//tmp_transpose, sizeof(float2)*iCols,
				sizeof(float2)*iCols, iRows, 
				cudaMemcpyDeviceToDevice);
			iRows /= 2;
			iCols /= 2;
		}
		else
		{
			cudaMemcpy2D(tmp_image, sizeof(float2)*nCols,
				wavelet, sizeof(float2)*nCols,
				sizeof(float2)*bCols, bRows, 
				cudaMemcpyDeviceToDevice);
			encode_shared(tmp_wavelet, tmp_image, nRows, nCols);
			cudaMemcpy2D(wavelet, sizeof(float2)*nCols,
				tmp_wavelet, sizeof(float2)*nCols,
				sizeof(float2)*bCols, bRows, 
				cudaMemcpyDeviceToDevice);
		}
	}
	cudaFree(tmp_image);
	cudaFree(tmp_transpose);
	cudaFree(tmp_wavelet);
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Inverse Wavelet Transform. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="wavelet">	[in,out] If non-null, the wavelet. </param>
/// <param name="nRows">  	The rows. </param>
/// <param name="nCols">  	The cols. </param>
/// <param name="image">  	[in,out] If non-null, the image. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

void iwt(float2 * wavelet,		// wavelet coefficients
	int nRows, int nCols,		// number of pointers, should be multiple of 2
	float2 * image)				// reconstructed data

{
	/// <summary>	Determine the number of levels for 2D wavelet decomposition</summary>
	int nLevels = (_log2(nRows) > _log2(nCols)) ?  _log2(nRows):_log2(nCols);
	nLevels -= 4;
	//nLevels = 2;
	/// <summary>	Size of rows and cols for each level</summary>
	int iRows, iCols;

	/// <summary>	Get Optimal nRows and nCols are the powers of 2	</summary>
	iRows = 32;
	iCols = 32;

	/// <summary>	Initialization	</summary>
	float2 *tmp_image;
	float2 *tmp_transpose;
	float2 *tmp_wavelet;

	cudaMalloc((void**)&tmp_image,		sizeof(float2)*nRows*nCols);
	cudaMalloc((void**)&tmp_transpose,	sizeof(float2)*nRows*nCols);
	cudaMalloc((void**)&tmp_wavelet,	sizeof(float2)*nRows*nCols);

	/// <summary>	Copy image to temporary image at each level		</summary>
	cudaMemcpy2D(image, sizeof(float2)*nCols, 
		wavelet, sizeof(float2)*nCols, 
		sizeof(float2)*nCols, nRows, 
		cudaMemcpyDeviceToDevice);

	for(int k=0; k<nLevels; k++)
	{
		if((iRows|iCols) > (bRows|bCols))
		{
			cudaSafeCall(
				cudaMemcpy2D(tmp_wavelet, sizeof(float2)*iCols,
					image, sizeof(float2)*nCols,
					sizeof(float2)*iCols, iRows, 
					cudaMemcpyDeviceToDevice)
				);
  			decode_static(tmp_transpose, tmp_wavelet, iRows, iCols, nRows, nCols);
  			decode_static(tmp_image, tmp_transpose, iRows, iCols, nRows, nCols);
			cudaSafeCall(
				cudaMemcpy2D(image, sizeof(float2)*nCols,
					tmp_image, sizeof(float2)*iCols,
					//tmp_transpose, sizeof(float2)*iCols,
					sizeof(float2)*iCols, iRows, 
					cudaMemcpyDeviceToDevice)
				);
			iRows *= 2;
			iCols *= 2;
		}
		else
		{
			cudaSafeCall(
				cudaMemcpy2D(tmp_wavelet, sizeof(float2)*nCols,
					image, sizeof(float2)*nCols,
					sizeof(float2)*iCols, iRows, 
					cudaMemcpyDeviceToDevice)
				);
			decode_shared(tmp_image, tmp_wavelet, nRows, nCols);
			cudaSafeCall(
				cudaMemcpy2D(image, sizeof(float2)*nCols,
					tmp_image, sizeof(float2)*nCols,
					sizeof(float2)*iCols, iRows, 
					cudaMemcpyDeviceToDevice)
				);
			iRows *= 2;
			iCols *= 2;
		}
	}
	cudaFree(tmp_image);
	cudaFree(tmp_transpose);
	cudaFree(tmp_wavelet);
}

////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Discrete Wavelet Transform. </summary>
///
/// <remarks>	Quan, 5/9/2012. </remarks>
///
/// <param name="src">  	[in,out] If non-null, source for the. </param>
/// <param name="dst">  	[in,out] If non-null, destination for the. </param>
/// <param name="nRows">	The rows. </param>
/// <param name="nCols">	The cols. </param>
/// <param name="flag"> 	The flag. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////

void dwt(float2* src, float2* dst, int nRows, int nCols, int flag)
{
	/// <summary>	Perform Wavelet Transform </summary>
	switch(flag)
	{
		/// <summary>	Forward Wavelet Transform </summary>
	case DWT_FORWARD:		
		fwt(src, nRows, nCols,	dst);
		break;

		/// <summary>	Inverse Wavelet Transform </summary>
	case DWT_INVERSE:		
		iwt(src, nRows, nCols,	dst);
		break;

		/// <summary>	Default case, do nothing  </summary>
	default:
		break;
	}
}
}
