
//default Kernel 
template<typename T,typename R>
__global__ void COMPOUND_NAME(ANALYSIS_NAME,KernelAnalysis)(packet_t* GPU_buffer, T* GPU_data, R* GPU_results, analysisState_t state){
	state.blockIterator = blockIdx.x;
 	COMPOUND_NAME(ANALYSIS_NAME,mining)(GPU_buffer, GPU_data, GPU_results, state);
	__syncthreads();	

	state.blockIterator = blockIdx.x;
	COMPOUND_NAME(ANALYSIS_NAME,filtering)(GPU_buffer, GPU_data, GPU_results, state);
	__syncthreads();	

	/* Analysis implementation*/
	COMPOUND_NAME(ANALYSIS_NAME,analysis)(GPU_buffer, GPU_data, GPU_results, state);
	__syncthreads();	

	/* If there are SYNCBLOCKS barriers do not put Operations function call here */
	#if __SYNCBLOCKS_COUNTER == 0 && __SYNCBLOCKS_MODULE_COUNTER == 0
		COMPOUND_NAME(ANALYSIS_NAME,operations)(GPU_buffer, GPU_data, GPU_results, state);
	#endif

}


/**** Launch wrapper ****/
//default Launch Wrapper for Analysis not using Windows 

template<typename T,typename R>
void COMPOUND_NAME(ANALYSIS_NAME,launchAnalysis_wrapper)(PacketBuffer* packetBuffer, packet_t* GPU_buffer){

	analysisState_t state;
	T *GPU_data;
	R *GPU_results, *results;
	int64_t *auxBlocks;


	if(packetBuffer != NULL){
	
		memset(&state,0,sizeof(state));
		//TODO: CONDITIONAL PINNED MEMORY && STREAMS

		/*** Host memory allocation***/
		//results = (R*)malloc(sizeof(R)*MAX_BUFFER_PACKETS);	
        	cudaAssert(cudaHostAlloc((void**)&results,sizeof(R)*MAX_BUFFER_PACKETS,0));

		//auxBlocks = (int64_t*)malloc(sizeof(int64_t)*MAX_BUFFER_PACKETS/ANALYSIS_TPB);	
        	cudaAssert(cudaHostAlloc((void**)&auxBlocks,sizeof(int64_t)*MAX_BUFFER_PACKETS/ANALYSIS_TPB,0));


		/*** GPU memory allocation***/
		BMMS::mallocBMMS((void**)&GPU_data,ARRAY_SIZE(T));
		BMMS::mallocBMMS((void**)&GPU_results,ARRAY_SIZE(R));
		BMMS::mallocBMMS((void**)&state.GPU_aux,ARRAY_SIZE(T));  //Auxiliary array
		BMMS::mallocBMMS((void**)&state.GPU_auxBlocks,2*sizeof(int64_t)*MAX_BUFFER_PACKETS/ANALYSIS_TPB);
		BMMS::mallocBMMS((void**)&state.inputs.GPU_extendedParameters,sizeof(int64_t)*MAX_INPUT_EXTENDED_PARAMETERS);
		BMMS::mallocBMMS((void**)&state.GPU_codeRequiresWLR,ARRAY_SIZE(uint32_t)/ANALYSIS_TPB); //Op Code Exec Flags


		/*** MEMSET 0 GPU arrays ***/
		cudaAssert(cudaMemset(GPU_data,0,ARRAY_SIZE(T)));	
		cudaAssert(cudaMemset(GPU_results,0,ARRAY_SIZE(R)));	
		cudaAssert(cudaMemset(state.GPU_aux,0,ARRAY_SIZE(T)));	
		cudaAssert(cudaMemset(state.GPU_auxBlocks,0,2*sizeof(int64_t)*MAX_BUFFER_PACKETS/ANALYSIS_TPB));	
		cudaAssert(cudaMemset(state.GPU_codeRequiresWLR,0,ARRAY_SIZE(uint32_t)/ANALYSIS_TPB));
		cudaAssert(cudaThreadSynchronize());

		/*** KERNEL DIMS ***/
		dim3 block(ANALYSIS_TPB);		 			//Threads Per Block (1D)
		dim3 grid(MAX_BUFFER_PACKETS/ANALYSIS_TPB);		 	//Grid size (1D)

		//Set state number of blocks and last Packet position
		state.windowState.totalNumberOfBlocks = MAX_BUFFER_PACKETS/ANALYSIS_TPB;
		state.windowState.hasReachedWindowLimit = true;
		state.lastPacket = packetBuffer->getNumOfPackets(); 
		state.windowState.windowStartTime= packetBuffer->getPacket(0)->timestamp;
		state.windowState.windowEndTime= packetBuffer->getPacket(packetBuffer->getNumOfPackets()-1)->timestamp;

		DEBUG(STR(ANALYSIS_NAME)"> Throwing Kernel with default implementation.");
		DEBUG(STR(ANALYSIS_NAME)"> Parameters -> gridDim:%d",grid.x);
	
		/*** KERNEL CALLS ***/
		COMPOUND_NAME(ANALYSIS_NAME,KernelAnalysis)<<<grid,block>>>(GPU_buffer,GPU_data,GPU_results,state);
		cudaAssert(cudaThreadSynchronize());

		/*EXTRA KERNEL CALLS */
	
		/*Module Analysis Extra Kernels calls*/
		#define ITERATOR__ 0
		#include "ModuleExtraKernelCall.def"
	
		#define ITERATOR__ 1
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 2
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 3
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 4
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 5
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 6
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 7
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 8
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 9
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 10
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 11
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 12
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 13
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 14
		#include "ModuleExtraKernelCall.def"

		#define ITERATOR__ 15
		#include "ModuleExtraKernelCall.def"


		/*Userdefined Extra Kernels calls*/
		#define ITERATOR__ 0
		#include "UserExtraKernelCall.def"
	
		#define ITERATOR__ 1
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 2
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 3
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 4
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 5
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 6
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 7
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 8
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 9
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 10
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 11
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 12
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 13
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 14
		#include "UserExtraKernelCall.def"

		#define ITERATOR__ 15
		#include "UserExtraKernelCall.def"


		/*** END OF EXTRA KERNEL CALLS ***/

		/*** Copy results & auxBlocks arrays ***/
		cudaAssert(cudaMemcpy(results,GPU_results,MAX_BUFFER_PACKETS*sizeof(R),cudaMemcpyDeviceToHost));
		cudaAssert(cudaMemcpy(auxBlocks,state.GPU_auxBlocks,MAX_BUFFER_PACKETS/ANALYSIS_TPB*sizeof(int64_t),cudaMemcpyDeviceToHost));
		cudaAssert(cudaThreadSynchronize());

	
		/*** FREE GPU DYNAMIC MEMORY ***/
		BMMS::freeBMMS(GPU_data);
		BMMS::freeBMMS(GPU_results);
		BMMS::freeBMMS(state.GPU_aux);
		BMMS::freeBMMS(state.GPU_auxBlocks);
		BMMS::freeBMMS(state.inputs.GPU_extendedParameters);
		BMMS::freeBMMS(state.GPU_codeRequiresWLR);
	
		/*** LAUNCH HOOK (Host function) ***/
	
		//Launch hook (or preHook if window is set)
		COMPOUND_NAME(ANALYSIS_NAME,hooks)(packetBuffer, results, state,auxBlocks);
		
		//Frees results
		cudaAssert(cudaFreeHost(results));
		//free(results);
		cudaAssert(cudaFreeHost(auxBlocks));
		//free(auxBlocks);
	}
}
