#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "calc_conv_1d.hpp"

#include "stdio.h"
#include "stdlib.h"
#include "string.h"


__global__ void calc_conv_kernel_32x32(int * dev_arrayInX, unsigned int countX, int * dev_arrayInY, unsigned int countY, uchar * devBuff, int * globalOut)
{
	int indexX = blockDim.x*blockIdx.x + threadIdx.x;
	int indexY = blockDim.y*blockIdx.y + threadIdx.y;

	extern __shared__ int shareBlock[];

	int tempX = indexX < countX ? dev_arrayInX[indexX] : 0;
	int tempY = indexY < countY ? dev_arrayInY[indexY] : 0;

	int totolThreadsNum = blockDim.y*blockDim.x;

	int offsetNlines = threadIdx.y*blockDim.x;
	int tempIndex = offsetNlines + threadIdx.x;

	shareBlock[tempIndex] = 0;
	shareBlock[tempIndex + totolThreadsNum] = 0;


	int dstShareIndex = (threadIdx.x + threadIdx.y)*blockDim.x + threadIdx.x;

	__syncthreads();

	shareBlock[dstShareIndex] = tempX*tempY;

	__syncthreads();

	int i = blockDim.x >> 1;

	int tempIndex1 = tempIndex + totolThreadsNum;

	for (; i>0; i = i >> 1)
	{
		if (threadIdx.x < i)
		{

			shareBlock[tempIndex] += shareBlock[tempIndex + i];
			shareBlock[tempIndex1] += shareBlock[tempIndex1 + i];
		}
		__syncthreads();
	}

	if (threadIdx.x == 0)
	{
		int blockNum = blockIdx.x + gridDim.x * blockIdx.y;
		globalOut[threadIdx.y + (blockNum << 6)] = shareBlock[offsetNlines];
		globalOut[threadIdx.y + blockDim.y + (blockNum << 6)] = shareBlock[offsetNlines + totolThreadsNum];	
	}

}

__global__ void merge_conv_v1(int * devPart, int * devConvOut, unsigned int blockSizeX, unsigned int blockSizeY)
{
	int indexBlockRead = blockIdx.x * blockSizeX + threadIdx.y;

	int indexRead = indexBlockRead * 32 + threadIdx.x;

	extern __shared__ int shareOneLine[];

	shareOneLine[threadIdx.x * blockDim.y + threadIdx.y] = devPart[indexRead];

	__syncthreads();


	for (int i = blockSizeX / 2; i > 0; i = i >> 1)
	{
		if (threadIdx.y < i)
		{
			int tempToAdd = shareOneLine[threadIdx.x * blockDim.y + threadIdx.y + i];
			int tempToAdd1 = shareOneLine[threadIdx.x * blockDim.y + threadIdx.y];
			shareOneLine[threadIdx.x * blockDim.y + threadIdx.y] = tempToAdd + tempToAdd1;
		}
		__syncthreads();

	}

	if (threadIdx.y == 0)
		devConvOut[threadIdx.x + blockIdx.x * blockDim.x] = shareOneLine[threadIdx.x*blockDim.y];

}

__global__ void setToZeros(int * devMem)
{
	int indexX = blockDim.x*blockIdx.x + threadIdx.x;
	int indexY = blockDim.y*blockIdx.y + threadIdx.y;

	int index = indexY * gridDim.x*blockDim.x + indexX;

	devMem[index] = 0;
}

__global__ void merge_conv_step1(int * devPart, int * devConvOut, unsigned int blockSizeX, unsigned int blockSizeY)
{
	//blockIdx.y = 1 && blockIdx.x=0
	int indexBlockRead = gridDim.x * blockIdx.y + blockIdx.x;
	int indexRead = indexBlockRead * 64 + threadIdx.x;

	int indexReadLast = indexRead - gridDim.x * 64 + 32;

	int tempRead = (blockIdx.y >= blockSizeY) ? 0 : devPart[indexRead];
	int tempReadLast = (blockIdx.y == 0) ? 0 : devPart[indexReadLast];

	tempReadLast = (blockIdx.y > blockSizeY) ? 0 : tempReadLast;

	int indexBlockWrite = (blockIdx.y + blockIdx.x)*gridDim.x + blockIdx.x;
	int indexWrite = blockDim.x*indexBlockWrite + threadIdx.x;
	devConvOut[indexWrite] = tempRead + tempReadLast;
}

int calc_conv_cuda(int * dev_arrayInX, unsigned int countX, int * dev_arrayInY, unsigned int countY, uchar * devBuff, int * hostConvOut)
{
	int * devOutTemp = (int *)devBuff;
	dim3 blockSize(32, 32);
	int maxCount = (countY > countX) ? countY : countX;
	dim3 gridSize((maxCount + 31) / 32, (maxCount + 31) / 32);


	calc_conv_kernel_32x32 << <gridSize, blockSize, 64 * 32 * sizeof(int) >> >(dev_arrayInX, countX, dev_arrayInY, countY, devBuff, devOutTemp);

	int * devOut = NULL;
	if ((gridSize.x == 1) && (gridSize.y == 1))
	{
		devOut = devOutTemp;
	}
	else
	{
		int * devOutTemp1 = devOutTemp + 64 * gridSize.y * gridSize.x;

		dim3 gridSize1(gridSize.x, gridSize.y * 2);

		setToZeros << <gridSize1, 64 >> >(devOutTemp1);

		merge_conv_step1 << <gridSize1, 32 >> >(devOutTemp, devOutTemp1, gridSize.x, gridSize.y);


		devOut = devOutTemp1 + 64 * gridSize.y * gridSize.x;

		dim3 blockSize1(32, gridSize.x);

		merge_conv_v1 << <gridSize.y * 2, blockSize1, 32 * gridSize.x*sizeof(int) >> >(devOutTemp1, devOut, gridSize.x, gridSize.y);

	}
	cudaMemcpy(hostConvOut, devOut, (countX + countY - 1)*sizeof(int), cudaMemcpyDeviceToHost);

	return 0;
}