﻿////////////////////////////////////////////////////////////////////////////////////////////////////
// file:	C:\Users\Quan\Documents\Visual Studio 2010\Projects\source\cvWavelet\dwt.cu
//
// summary:	dwt class
////////////////////////////////////////////////////////////////////////////////////////////////////

#include "dwt.hpp"
#include "cutils.hpp"
#include <iostream>
using namespace std;
namespace cuda
{
////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>	Calculate logarithm base 2 of a number. </summary>
////////////////////////////////////////////////////////////////////////////////////////////////////
static inline int _log2 (int n)
{
	int k=1; 
	int j=0;
	while(k<n)
	{
		k*=2;
		j+=1;
	}
	return j;
}

/// <summary>	Alpha, Beta and Gamma for lifting scheme </summary>
__constant__ float a = -0.5f;
__constant__ float b = 0.25f;
__constant__ float c = 1.4142135623730950488016887242097f;

/// <summary>	A macro that defines block cols. </summary>
#define bCols 32		//warpSize
/// <summary>	A macro that defines block rows. </summary>
#define bRows 32		//warpSize



__global__ 
void __encode_shared(float2* src, float2* dst, int nRows, int nCols)
{
	__shared__ float2 sMem[bRows][bCols+1];
	__shared__ float2 tMem[bRows][bCols+1];
	int iRows = bRows;
	int iCols = bCols;
	/// <summary>	Copy a (32x32) block from global memory to shared memory. </summary>
	float2 pixel = src[(blockIdx.y*bRows+threadIdx.y)*nCols + blockIdx.x*bCols+threadIdx.x];	
	__syncthreads();
	sMem[threadIdx.y][threadIdx.x] 		= pixel;
	tMem[threadIdx.y][threadIdx.x]		= pixel;
	__syncthreads();
	/**********************************************************************************************************/
	for(int iLevels=0; iLevels<5; iLevels++)
	{
		/// <summary>	Update rows and cols for next level. </summary>
		iCols >>= 1;	//iCols /= 2;
		iRows >>= 1;	//iRows /= 2;

		/// <summary>	Along x direction. </summary>	
		if(threadIdx.x < iCols && threadIdx.y < (2*iRows))
			tMem[threadIdx.y][threadIdx.x]
			= (sMem[threadIdx.y][2*threadIdx.x+0]+sMem[threadIdx.y][2*threadIdx.x+1])*0.5f;

		if(threadIdx.x >= iCols && threadIdx.y < (2*iRows) && threadIdx.x < (2*iCols))
			tMem[threadIdx.y][threadIdx.x]
			= (sMem[threadIdx.y][2*(threadIdx.x-iCols)+0]-sMem[threadIdx.y][2*(threadIdx.x-iCols)+1])*0.5f;
		__syncthreads();
			
		/// <summary>	Update shared memory. </summary>
		sMem[threadIdx.y][threadIdx.x]		= tMem[threadIdx.y][threadIdx.x];
		__syncthreads();

		/// <summary>	Along y direction. </summary>	
		if(threadIdx.y < iRows && threadIdx.x < (2*iCols))
			tMem[threadIdx.y][threadIdx.x]
			= (sMem[2*threadIdx.y+0][threadIdx.x]+sMem[2*threadIdx.y+1][threadIdx.x])*0.5f;

		if(threadIdx.y >= iRows && threadIdx.x < (2*iCols) && threadIdx.y < (2*iRows))
			tMem[threadIdx.y][threadIdx.x]
			= (sMem[2*(threadIdx.y-iRows)+0][threadIdx.x]-sMem[2*(threadIdx.y-iRows)+1][threadIdx.x])*0.5f;
		__syncthreads();
			
		/// <summary>	Update shared memory. </summary>
		sMem[threadIdx.y][threadIdx.x]		= tMem[threadIdx.y][threadIdx.x];
		__syncthreads();
	
	}
	/**********************************************************************************************************/
	/// <summary>	Copy a (32x32) block from shared memory to global memory. </summary>
	dst[(blockIdx.y*bRows+threadIdx.y)*nCols + blockIdx.x*bCols+threadIdx.x]		= sMem[threadIdx.y][threadIdx.x];	
}


void encode_shared(float2* src, float2* dst, int nRows, int nCols)
{
	/// <summary>	Kernel Configuration</summary>
	dim3 nBlocks(1, 1);
	dim3 nThreads(bCols, bRows);

	/// <summary>	Kernel Launching. </summary>
	__encode_shared<<<nBlocks, nThreads>>>(src, dst, nRows, nCols);
	//cudaThreadSynchronize();
	cudaError_t error = cudaGetLastError();
	if(error != cudaSuccess)
	{
		printf("CUDA error: %s\n", cudaGetErrorString(error));
		exit(-1);
	}
}



__global__ 
void __encode_static(float2* src, float2* dst, 
	int iRows, int iCols, int nRows, int nCols)
{
	/// <summary>	Declare shared memory with dummy column. </summary>
	/// <summary>	Dummy column is used for extra data copy and avoid bank conflict. </summary>
	__shared__ float2 sMem[bRows][bCols+3];
	__shared__ float2 tMem[bRows][bCols+3];
	/**********************************************************************************************************/
	/// <summary>	Copy a (32x32) block from global memory to shared memory. </summary>
	float2 pixel1 = src[(blockIdx.y*blockDim.y+threadIdx.y)*nCols + blockIdx.x*(2*blockDim.x)+threadIdx.x];	
	float2 pixel2 = src[(blockIdx.y*blockDim.y+threadIdx.y)*nCols + blockIdx.x*(2*blockDim.x)+threadIdx.x+blockDim.x];

	
	__syncthreads();
	sMem[threadIdx.y][threadIdx.x] = pixel1;
	sMem[threadIdx.y][threadIdx.x+blockDim.x] = pixel2;
	tMem[threadIdx.y][threadIdx.x] = pixel1;
	tMem[threadIdx.y][threadIdx.x+blockDim.x] = pixel2;
	__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Copy the leftmost column of the right block to dummy column in shared memory. </summary>
	/// <summary>	If it is in the rightmost block, duplicate the column before the last column. </summary>
	if(blockIdx.x == (gridDim.x-1))
	{
		tMem[threadIdx.y][2*blockDim.x]
		= tMem[threadIdx.y][2*blockDim.x-1-1];
	}
	else
	{
		tMem[threadIdx.y][2*blockDim.x]
		= src[(blockIdx.y*blockDim.y+threadIdx.y)*nCols + (blockIdx.x+1)*(2*blockDim.x)+0];
	}

	/**********************************************************************************************************/
	/// <summary>	Copy two rightmost columns of the left block to dummy columns in shared memory. </summary>
	/// <summary>	If it is in the leftmost block, do nothing. </summary>
	if(blockIdx.x == 0)
	{
		//It's not important, they can be anything
	}
	else
	{
		tMem[threadIdx.y][2*blockDim.x+1]
		= src[(blockIdx.y*blockDim.y+threadIdx.y)*nCols + (blockIdx.x-1)*(2*blockDim.x) + 2*blockDim.x-2];
		tMem[threadIdx.y][2*blockDim.x+2]
		= src[(blockIdx.y*blockDim.y+threadIdx.y)*nCols + (blockIdx.x-1)*(2*blockDim.x) + 2*blockDim.x-1];
	}
	__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Lifting with alpha. </summary>
	/// <summary>	If it is in the rightmost thread, lift with the dummy column. </summary>
	if(threadIdx.x == (blockDim.x-1))
	{
		tMem[threadIdx.y][2*threadIdx.x+1]
		+= a*(tMem[threadIdx.y][2*threadIdx.x+1-1] + tMem[threadIdx.y][2*blockDim.x]);
	}
	else
	{
		tMem[threadIdx.y][2*threadIdx.x+1]
		+= a*(tMem[threadIdx.y][2*threadIdx.x+1-1] + tMem[threadIdx.y][2*threadIdx.x+1+1]);
	}

	/**********************************************************************************************************/
	/// <summary>	Lifting the dummy columns with alpha. </summary>
	/// 0 ... 2*blockDim, 2*blockDim+1, 2*blockDim+2;
	tMem[threadIdx.y][2*blockDim.x+2]
		+= a*(tMem[threadIdx.y][0] + tMem[threadIdx.y][2*blockDim.x+1]);

	/**********************************************************************************************************/
	/// <summary>	If it is in the leftmost block, lift with the dummy column. </summary>
	if(blockIdx.x == 0)
		tMem[threadIdx.y][2*blockDim.x+2]
		= tMem[threadIdx.y][0+1];
	__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Lifting with beta. </summary>
	/// <summary>	If it is in the leftmost thread, lift with the dummy column. </summary>
	if(threadIdx.x == 0)
		tMem[threadIdx.y][2*threadIdx.x+0]
		+= b*(tMem[threadIdx.y][2*threadIdx.x+0+1] + tMem[threadIdx.y][2*blockDim.x+2]);
	else
		tMem[threadIdx.y][2*threadIdx.x+0]
		+= b*(tMem[threadIdx.y][2*threadIdx.x+0+1] + tMem[threadIdx.y][2*threadIdx.x+0-1]);
	__syncthreads();	
	/**********************************************************************************************************/
	/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
	/// <example>	///		0 1 2 3 4 5 6 7 		///		0 2 4 6 1 3 5 7		 	</example>
	sMem[threadIdx.y][threadIdx.x] = tMem[threadIdx.y][2*threadIdx.x+0];
	sMem[threadIdx.y][threadIdx.x+blockDim.x] = tMem[threadIdx.y][2*threadIdx.x+1];
	__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Copy to global memory and transpose. </summary>
	dst[((blockIdx.x+0)*blockDim.x + threadIdx.x)*nCols + blockIdx.y*blockDim.y + threadIdx.y]
	= sMem[threadIdx.y][threadIdx.x];
	dst[((blockIdx.x+gridDim.x)*blockDim.x + threadIdx.x)*nCols + blockIdx.y*blockDim.y + threadIdx.y]
	= sMem[threadIdx.y][threadIdx.x + blockDim.x];
	// dst[((blockIdx.x+0)*blockDim.x + threadIdx.x) + (blockIdx.y*blockDim.y + threadIdx.y)*nCols]
	// = sMem[threadIdx.x][threadIdx.y];
	// dst[((blockIdx.x+gridDim.x)*blockDim.x + threadIdx.x) + (blockIdx.y*blockDim.y + threadIdx.y)*nCols]
	// = sMem[threadIdx.x + blockDim.x][threadIdx.y];
}


void encode_static(float2* src, float2* dst,
	int iRows, int iCols, int nRows, int nCols)
{
	/// <summary>	Kernel Configuration</summary>
	dim3 nBlocks(iCols/bCols, iRows/bRows);
	dim3 nThreads(bCols/2, bRows);

	/// <summary>	Kernel Launching</summary>
	__encode_static<<<nBlocks, nThreads>>>
		(src, dst, iRows, iCols, nRows, nCols);
 	//cudaThreadSynchronize();
  	cudaError_t error = cudaGetLastError();
	if(error != cudaSuccess)
	{
		printf("CUDA error encode_static: %s\n", cudaGetErrorString(error));
		exit(-1);
	}
}



__global__
void __decode_shared(float2* src, float2* dst, int nRows, int nCols)
{
	__shared__ float2 sMem[bRows][bCols+1];
	__shared__ float2 tMem[bRows][bCols+1];
	int iRows = 1;
	int iCols = 1;
	/// <summary>	Copy a (32x32) block from global memory to shared memory. </summary>	
	float2 pixel = src[(blockIdx.y*bRows+threadIdx.y)*nCols + blockIdx.x*bCols+threadIdx.x];	
	__syncthreads();
	sMem[threadIdx.y][threadIdx.x] 		= pixel;
	tMem[threadIdx.y][threadIdx.x]		= pixel;
	__syncthreads();
	/**********************************************************************************************************/
	for(int iLevels=0; iLevels<5; iLevels++)
	{
		/// <summary>	Along x direction. </summary>	
		if(threadIdx.x < iCols && threadIdx.y < (2*iRows))
		{
			tMem[threadIdx.y][2*threadIdx.x+0]
			= (sMem[threadIdx.y][threadIdx.x]+sMem[threadIdx.y][threadIdx.x+iCols])*1.0f;
		}
		if(threadIdx.x >= iCols && threadIdx.y < (2*iRows) && threadIdx.x < (2*iCols))
		{
			tMem[threadIdx.y][2*(threadIdx.x-iCols)+1]
			= (sMem[threadIdx.y][threadIdx.x-iCols]-sMem[threadIdx.y][threadIdx.x])*1.0f;
		}
		__syncthreads();

		/// <summary>	Update shared memory. </summary>
		if(threadIdx.x < (2*iCols) && threadIdx.y < (2*iRows))
			sMem[threadIdx.y][threadIdx.x]		= tMem[threadIdx.y][threadIdx.x];
		__syncthreads();
		
		/// <summary>	Along y direction. </summary>	
		if(threadIdx.y < iRows)
			tMem[2*threadIdx.y+0][threadIdx.x]
			= (sMem[threadIdx.y][threadIdx.x]+sMem[threadIdx.y+iRows][threadIdx.x])*1.0f;	

		if(threadIdx.y >= iRows && threadIdx.x < (2*iCols) && threadIdx.y < (2*iRows))
			tMem[2*(threadIdx.y-iRows)+1][threadIdx.x]
			= (sMem[threadIdx.y-iRows][threadIdx.x]-sMem[threadIdx.y][threadIdx.x])*1.0f;
		__syncthreads();

		/// <summary>	Update shared memory. </summary>
		if(threadIdx.x < (2*iCols) && threadIdx.y < (2*iRows))
			sMem[threadIdx.y][threadIdx.x]		= tMem[threadIdx.y][threadIdx.x];
		__syncthreads();			

		/// <summary>	Update rows and cols for next level. </summary>
		iCols <<= 1;
		iRows <<= 1;		
	}


	/**********************************************************************************************************/
	/// <summary>	Copy a (32x32) block from shared memory to global memory. </summary>
	dst[(blockIdx.y*bRows+threadIdx.y)*nCols + blockIdx.x*bCols+threadIdx.x]		= sMem[threadIdx.y][threadIdx.x];
}



void decode_shared(float2* src, float2* dst, int nRows, int nCols)
{
	/// <summary>	Kernel Configuration</summary>
	dim3 nBlocks(1, 1);
	dim3 nThreads(bCols, bRows);

	/// <summary>	Kernel Launching</summary>
	__decode_shared<<<nBlocks, nThreads>>>
		(src, dst, nRows, nCols);
	cudaError_t error = cudaGetLastError();
	if(error != cudaSuccess)
	{
		printf("CUDA error: %s\n", cudaGetErrorString(error));
		exit(-1);
	}
}


__global__
void __decode_static(float2* src, float2* dst, 
	int iRows, int iCols, int nRows, int nCols)
{
	/// <summary>	Declare shared memory with dummy column. </summary>
	/// <summary>	Dummy column is used for extra data copy and avoid bank conflict. </summary>
	__shared__ float2 sMem[bRows][bCols+3];
	__shared__ float2 tMem[bRows][bCols+3];
	/**********************************************************************************************************/
	/// <summary>	Copy to shared memory and transpose. </summary>
	float2 pixel1 = src[((blockIdx.x+0)*blockDim.x + threadIdx.x)*nCols + blockIdx.y*blockDim.y + threadIdx.y];
	float2 pixel2 = src[((blockIdx.x+gridDim.x)*blockDim.x + threadIdx.x)*nCols + blockIdx.y*blockDim.y + threadIdx.y];
	__syncthreads();
	sMem[threadIdx.y][threadIdx.x] = pixel1;
	sMem[threadIdx.y][threadIdx.x + blockDim.x] = pixel2;
	tMem[threadIdx.y][threadIdx.x] = pixel1;
	tMem[threadIdx.y][threadIdx.x + blockDim.x] = pixel2;
	__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Rearrange the odd and even coefficients inside shared memory	</summary>
	/// <example>	///		0 2 4 6 1 3 5 7		///		0 1 2 3 4 5 6 7			 	</example>
	sMem[threadIdx.y][2*threadIdx.x+0] = tMem[threadIdx.y][threadIdx.x] ;
	sMem[threadIdx.y][2*threadIdx.x+1] = tMem[threadIdx.y][threadIdx.x+blockDim.x];
	//__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Copy the rightmost column of the left block to dummy column in shared memory. </summary>
	/// <summary>	If it is in the leftmost block, duplicate the column after the first column	. </summary>
	/// <example>	-1	0	1	2	.	.	2*bdim-1	2*bim		2*bdim+1		2*bdim+2	. </example>
	/// <example>		0	1	2	.	.	2*bdim-1	(bid+1)+0	(bid+1)+1		-1			. </example>
	if(blockIdx.x == 0)
		sMem[threadIdx.y][2*blockDim.x+2]
		= sMem[threadIdx.y][1];
	else
		sMem[threadIdx.y][2*blockDim.x+2]
		= src[((blockIdx.x-1+gridDim.x)*blockDim.x+blockDim.x-1)*nCols + blockIdx.y*blockDim.y+threadIdx.y];
	//__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Copy two leftmost columns of the right block to dummy columns in shared memory. </summary>
	/// <summary>	If it is in the rightmost block, do nothing. </summary>
	if(blockIdx.x == (gridDim.x-1))
	{
		//It's not important, they can be anything
	}
	else
	{
		sMem[threadIdx.y][2*blockDim.x+0]
		= src[((blockIdx.x+1+0)*blockDim.x)*nCols + blockIdx.y*blockDim.y+threadIdx.y];
		sMem[threadIdx.y][2*blockDim.x+1]
		= src[((blockIdx.x+1+gridDim.x)*blockDim.x)*nCols + blockIdx.y*blockDim.y+threadIdx.y];
	}
	__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Lifting with beta. </summary>
	/// <summary>	If it is in the leftmost thread, lift with the dummy column. </summary>
	if(threadIdx.x == 0)
		sMem[threadIdx.y][2*threadIdx.x+0]
		-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1] + sMem[threadIdx.y][2*blockDim.x+2]);
	else
		sMem[threadIdx.y][2*threadIdx.x+0]
		-= b*(sMem[threadIdx.y][2*threadIdx.x+0+1] + sMem[threadIdx.y][2*threadIdx.x+0-1]);
	//__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Lifting the dummy columns with beta. </summary>
	/// 0 ... 2*blockDim, 2*blockDim+1, 2*blockDim+2;
	sMem[threadIdx.y][2*blockDim.x]
	-= b*(sMem[threadIdx.y][2*blockDim.x-1] + sMem[threadIdx.y][2*blockDim.x+1]);
	__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	If it is in the rightmost block, lift with the dummy column. </summary>
	if(blockIdx.x == (gridDim.x-1))
		sMem[threadIdx.y][2*blockDim.x]
		= sMem[threadIdx.y][2*blockDim.x-2];
	__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Lifting with alpha. </summary>
	/// <summary>	If it is in the rightmost thread, lift with the dummy column. </summary>
	if(threadIdx.x == (blockDim.x-1))
		sMem[threadIdx.y][2*threadIdx.x+1]
		-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1] + sMem[threadIdx.y][2*blockDim.x]);
	else
		sMem[threadIdx.y][2*threadIdx.x+1]
		-= a*(sMem[threadIdx.y][2*threadIdx.x+1-1] + sMem[threadIdx.y][2*threadIdx.x+1+1]);
	__syncthreads();
	/**********************************************************************************************************/
	/// <summary>	Copy a (32x32) block to global memory. </summary>
	dst[(blockIdx.y*blockDim.y+threadIdx.y)*nCols + blockIdx.x*(2*blockDim.x)+threadIdx.x]	
	= sMem[threadIdx.y][threadIdx.x];
	dst[(blockIdx.y*blockDim.y+threadIdx.y)*nCols + blockIdx.x*(2*blockDim.x)+threadIdx.x+blockDim.x]
	= sMem[threadIdx.y][threadIdx.x+blockDim.x];
}



void decode_static(float2* src, float2* dst,
	int iRows, int iCols, int nRows, int nCols)
{
	/// <summary>	Kernel Configuration</summary>
	dim3 nBlocks(iCols/bCols, iRows/bRows);
	dim3 nThreads(bCols/2, bRows);

	/// <summary>	Kernel Launching</summary>
	__decode_static<<<nBlocks, nThreads>>>
		(src, dst, iRows, iCols, nRows, nCols);

	cudaError_t error = cudaGetLastError();
	if(error != cudaSuccess)
	{
		printf("CUDA error: %s\n", cudaGetErrorString(error));
		exit(-1);
	}
}


void fwt(float2 * image,		// original signal
	float2 * wavelet,			// decomposited data
	int nRows, int nCols,
	float2* tmp_transpose)		
	
{
	/// <summary>	Determine the number of levels for 2D wavelet decomposition</summary>
	int nLevels = (_log2(nRows) > _log2(nCols)) ?  _log2(nRows):_log2(nCols);
	nLevels -= 4;
	//nLevels -= 5;
	//nLevels = 3;
	/// <summary>	Size of rows and cols for each level</summary>
	int iRows, iCols;

	/// <summary>	Get Optimal nRows and nCols are the powers of 2	</summary>
	iRows = (nRows >> nLevels) << nLevels;
	iCols = (nCols >> nLevels) << nLevels;

	/// <summary>	Copy image to temporary image at each level		</summary>
	cudaMemcpy2D(wavelet, sizeof(float2)*nCols, 
		image, sizeof(float2)*nCols, 
		sizeof(float2)*nCols, nRows, 
		cudaMemcpyDeviceToDevice);

	for(int k=0; k<nLevels; k++)
	{
		if((iRows|iCols) > (bRows|bCols))
		{
			encode_static(wavelet, tmp_transpose, iRows, iCols, nRows, nCols);
			encode_static(tmp_transpose, wavelet, iRows, iCols, nRows, nCols);
		}
		else
		{
			encode_shared(wavelet, wavelet, nRows, nCols);
		}
		iRows >>= 1;
		iCols >>= 1;
	}
}



void iwt(float2 * wavelet,		// wavelet coefficients
	float2 * image,				// reconstructed data
	int nRows, int nCols,
	float2* tmp_transpose)

{
	/// <summary>	Determine the number of levels for 2D wavelet decomposition</summary>
	int nLevels = (_log2(nRows) > _log2(nCols)) ?  _log2(nRows):_log2(nCols);
	nLevels -= 4;
	//nLevels = 1;
	
	/// <summary>	Size of rows and cols for each level</summary>
	int iRows, iCols;

	/// <summary>	Get Optimal nRows and nCols are the powers of 2	</summary>
	iRows = bRows;
	iCols = bCols;


	/// <summary>	Copy image to temporary image at each level		</summary>
	cudaMemcpy2D(image, sizeof(float2)*nCols, 
		wavelet, sizeof(float2)*nCols, 
		sizeof(float2)*nCols, nRows, 
		cudaMemcpyDeviceToDevice);

	for(int k=0; k<nLevels; k++)
	{
		if((iRows|iCols) > (bRows|bCols))
		{
			decode_static(image, tmp_transpose, iRows, iCols, nRows, nCols);
  			decode_static(tmp_transpose, image, iRows, iCols, nRows, nCols);
		}
		else
		{
			decode_shared(image, image, nRows, nCols);
		}
		iRows <<= 1;
		iCols <<= 1;
	}
}


void dwt(
		float2* src,	
		float2* dst,	
		int nRows,
		int nCols,
		int flag,
		float2* tmp_transpose)
{
	/// <summary>	Perform Wavelet Transform </summary>
	switch(flag)
	{
		/// <summary>	Forward Wavelet Transform </summary>
	case DWT_FORWARD:		
		fwt(src, dst, nRows, nCols, tmp_transpose);
		break;

		/// <summary>	Inverse Wavelet Transform </summary>
	case DWT_INVERSE:		
		iwt(src, dst, nRows, nCols, tmp_transpose);
		break;

		/// <summary>	Default case, do nothing  </summary>
	default:
		break;
	}
}
}