
#pragma once

/*
	 Total intensity [NOT YET IMPLEMENTED]
	    R0 R1 R2 R3 L0 L1 L2 L3
	  R0 *  *  *  *
	  R1    *  *  *
	  R2       *  *
	  R3          *
	  L0             *  *  *  *
	  L1                *  *  *
	  L2                   *  *
	  L3                      *
*/

/*
	  Full polar [As implemented below]
	    R0 L0 R1 L1 R2 L2 R3 L3
	  R0 *  *  *  *  *  *  *  *
	  L0 *  *  *  *  *  *  *  *
	  R1       *  *  *  *  *  *
	  L1       *  *  *  *  *  *
	  R2             *  *  *  *
	  L2             *  *  *  *
	  R3                   *  *
	  L3                   *  *
*/

// Note: This kernel computes the full-polar correlation where
//         polarisations are adjacent in memory.
template<bool DO_FULL_POLAR, gpu_size_t BD_X, gpu_size_t BD_Y, class T>
__global__
void mac_device_kernel(const T*   d_in,
                       gpu_size_t in_stride1,
                       gpu_size_t in_stride2,
                       gpu_size_t ant_block_count, // Needed for manual grid decomp
                       gpu_size_t accum_count,
                       T*         d_out,
                       gpu_size_t out_stride1,
                       gpu_size_t out_stride2) {


	//                         ant   freq
	__shared__ float s_aj_real[BD_Y][BD_X];
	__shared__ float s_aj_imag[BD_Y][BD_X];
	
	// Block decomposition
	// Note: We manually decompose the 2nd grid dim into two dims
	gpu_size_t blk_ai = blockIdx.y % ant_block_count;
	gpu_size_t blk_aj = blockIdx.y / ant_block_count;
	
	// Apply diagonal reordering to avoid partition camping
	gpu_size_t old_blk_ai = blk_ai;
	blk_ai = blk_aj;
	blk_aj = (blk_aj + old_blk_ai) % ant_block_count;
	
	// Calculate the corresponding sample indices
	gpu_size_t blk_ai_idx = blk_ai * BD_Y;
	gpu_size_t blk_aj_idx = blk_aj * BD_Y;
	// -------------------
	
	// Skip symmetric antenna terms at the block level
	if( blk_ai_idx > blk_aj_idx ) {
		return;
	}
	
	// Thread decomposition
	gpu_size_t f_idx  = blockIdx.x * BD_X + threadIdx.x;
	gpu_size_t ai_idx = blk_ai_idx        + threadIdx.y;
	gpu_size_t aj_idx = blk_aj_idx        + threadIdx.y;
	
	// Initialise the accumulation results to zero
	// TODO: Try putting this in shared mem instead and seeing if can
	//       boost occupancy. Answer: no. 2x slower.
	T cij[BD_Y];
#pragma unroll
	for( gpu_size_t j=0; j<BD_Y; ++j ) {
		cij[j].x = 0;
		cij[j].y = 0;
	}
        int t_id1;
        
	// Iterate over each accumulation
	for( gpu_size_t accum_idx=0; accum_idx<accum_count; ++accum_idx ) 
        {
		// Load global data into registers
    t_id1 = in_stride1*accum_idx;
    __syncthreads();
		T ai = __ldg(&d_in[f_idx + t_id1 + in_stride2*ai_idx]);
		s_aj_real[threadIdx.y][threadIdx.x] = __ldg(&(d_in[f_idx + t_id1 + in_stride2*aj_idx].x));
		s_aj_imag[threadIdx.y][threadIdx.x] = __ldg(&(d_in[f_idx + t_id1 + in_stride2*aj_idx].y));                
		__syncthreads(); // Wait for writes to shared mem to complete
		// Iterate over the block of j antennas
#pragma unroll
		for( gpu_size_t j=0; j<BD_Y; ++j ) {

			// Conjugate-multiply and accumulate
			// Note: This form produces MAD MAD MAD MUL SUB
//#if 1
		    cij[j].x += ai.x*s_aj_real[j][threadIdx.x];
			  cij[j].x += ai.y*s_aj_imag[j][threadIdx.x];
			  cij[j].y += s_aj_real[j][threadIdx.x]*ai.y;
	      cij[j].y -= ai.x*s_aj_imag[j][threadIdx.x];
//#endif

#if 0
		          cij[j].x += ai.x*aj.x;
			  cij[j].x += ai.y*aj.y;
			  cij[j].y += aj.x*ai.y;
	                  cij[j].y -= ai.x*aj.y;
#endif
                          
		}
		
	}
  __syncthreads();
	// Write the result to global mem
#pragma unroll
	for( gpu_size_t j=0; j<BD_Y; ++j ) 
        {
	    aj_idx = blk_aj_idx + j;
	    // Avoid symmetric antenna terms
	    // Note: This is a little trick to divide by either 1 (single-pol)
	    // or 2 (full-polar) when skipping terms near the diagonal.
	    if( ai_idx/(DO_FULL_POLAR+1) <= aj_idx/(DO_FULL_POLAR+1) ) 
            {
		// Note: This is triangular lexicographical indexing, which
		//       excludes the symmetric terms.
		// TODO: The alignment of this indexing may kill coalescence.
		// Note: This is standard lexicographical indexing, which includes
		//       unused space for the symmetric terms.
		// Note: We add the result to the existing output data
		//T result = d_out[f_idx + ai_idx*out_stride1 + aj_idx*out_stride2];
		//T result = __ldg(&(d_out[f_idx + ai_idx*out_stride1 + aj_idx*out_stride2]));
		//result.x += cij[j].x;
		//result.y += cij[j].y;
		//d_out[f_idx + ai_idx*out_stride1 + aj_idx*out_stride2] = result;
              
                d_out[f_idx + ai_idx*out_stride1 + aj_idx*out_stride2].x += cij[j].x;
                d_out[f_idx + ai_idx*out_stride1 + aj_idx*out_stride2].y += cij[j].y;
	    }
	}
}


template<class T>
void mac_device(const T* d_in, size_t in_stride1, size_t in_stride2,
                size_t ant_count,
                size_t freq_count,
                size_t accum_count,
                T*     d_out, size_t out_stride1, size_t out_stride2,
                bool   do_full_polar = true,
                cudaStream_t stream=0) {
	// Block dimensions
	// Note: These can be tuned, but BD_X should be a multiple of 16

  // For best performance tune BD_X and BD_Y and recompile the code

  // 2-antennas BD_X=128, BD_Y=2
  // 4-antennas BD_X=64,  BD_Y=4
  // 8-antennas BD_X=32,  BD_Y=8
  // 16 antennas or more BD_X=16, BD_Y=16
	enum { BD_X = 16 ,     // frequency
	       BD_Y = 16 };    // antennas
    
#ifdef _DEBUG
	cudaError_t cuda_error;
#endif
	
	dim3 block(BD_X, BD_Y);
	// TODO: This assumes even divisions!
	if( ant_count % BD_Y != 0 ) cerr << "*** WARNING: antenna count must be a multiple of " << BD_Y << endl;
	gpu_size_t ant_block_count  = ant_count / BD_Y;
	// Note: We flatten a 3D grid decomposition into 2D to fit CUDA
	if( freq_count % BD_X != 0 ) cerr << "*** WARNING: frequency count must be a multiple of " << BD_X << endl;
	dim3 grid(freq_count/BD_X, ant_block_count * ant_block_count);
		
	if( do_full_polar ) {
		mac_device_kernel/*_half*/<true,BD_X,BD_Y><<<grid, block, 0, stream>>>
			(d_in, in_stride1, in_stride2, ant_block_count, accum_count,
			 d_out, out_stride1, out_stride2);
	}
	else {
		mac_device_kernel/*_half*/<false,BD_X,BD_Y><<<grid, block, 0, stream>>>
			(d_in, in_stride1, in_stride2, ant_block_count, accum_count,
			 d_out, out_stride1, out_stride2);
	}

#ifdef _DEBUG
	cudaStreamSynchronize(stream);
	cuda_error = cudaGetLastError();
	if( cuda_error != cudaSuccess ) {
		throw std::runtime_error(
			std::string("In mac_device kernel: ") +
			cudaGetErrorString(cuda_error));
	}
#endif
}
