
#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include <cuda.h>
#include <device_functions.h>

#include <stdio.h>
#include "kernel.h"

#include <iostream>
using namespace std;



cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);

__global__ void addKernel(int *c, const int *a, const int *b)
{
    int i = threadIdx.x;
    c[i] = a[i] + b[i];
}

texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex;

cudaArray_t g_arr1;

__global__ void reshape_kernel(uchar * desDevImgBuff, uint32 dstWidth,uint32 dstHeight,float coeffX,float coeffY)
{
	int indexX = blockDim.x*blockIdx.x + threadIdx.x;
	int indexY = blockDim.y*blockIdx.y + threadIdx.y;

	if ((indexX >= dstWidth) || (indexY >= dstHeight))
	{
		return;
	}

	float x = float(indexX) * coeffX;
	float y = float(indexY) * coeffY;

	desDevImgBuff[indexY*dstWidth + indexX] = tex2D(tex, x, y);
}

void setDataToTexure(uchar * hostImgBuff,uint32 srcWidth,uint32 srcHeight)
{
	cudaError ret;

	cudaChannelFormatDesc chdesc = cudaCreateChannelDesc<uchar>();//cudaCreateChannelDesc<uchar>();// cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindFloat);

	ret = cudaMallocArray(&g_arr1, &chdesc, srcWidth, srcHeight);

	if (ret != cudaSuccess)
	{
		cout << "cudaMallocArray failed" << endl;
		return;
	}

	ret = cudaMemcpyToArray(g_arr1, 0, 0, hostImgBuff, srcWidth*srcHeight, cudaMemcpyHostToDevice);
	if (ret != cudaSuccess)
	{
		cout << "cudaMemcpyToArray failed" << endl;
		return;
	}

	tex.addressMode[0] = cudaAddressModeClamp;
	tex.addressMode[1] = cudaAddressModeClamp;
	tex.normalized = false;
	tex.filterMode = cudaFilterModePoint;

	ret = cudaBindTextureToArray(&tex, g_arr1, &chdesc);

	if (ret != cudaSuccess)
	{
		cout << "cudaBindTextureToArray failed" << endl;
	}
	else
	{
		cout << "cudaBindTextureToArray SUCCESS" << endl;
	}
}

void setDataToTexure_g(uchar * hostImgBuff, uint32 srcWidth, uint32 srcHeight,uchar * devBuffSrc)
{
	cudaError ret;

	cudaChannelFormatDesc chdesc = cudaCreateChannelDesc<uchar>();//cudaCreateChannelDesc<uchar>();// cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindFloat);

	cudaMemcpy(devBuffSrc, hostImgBuff, srcWidth*srcHeight,cudaMemcpyHostToDevice);

	tex.addressMode[0] = cudaAddressModeWrap;
	tex.addressMode[1] = cudaAddressModeWrap;
	tex.normalized = false;
	tex.filterMode = cudaFilterModeLinear;

	size_t offset = 0;
	//ret = cudaBindTexture2D(&offset, &tex, devBuffSrc, &chdesc, srcWidth, srcHeight, srcWidth);
	ret = cudaBindTexture(&offset, &tex, devBuffSrc, &chdesc, srcWidth*srcHeight);

	if (ret != cudaSuccess)
	{
		cout << "cudaBindTexture failed" << endl;
	}
	else
	{
		cout << "cudaBindTexture SUCCESS" << endl;
	}
}

void reshape(uchar * hostImgBuffIn, uint32 widthSrc, uint32 heightSrc, uchar*hostImgBuffOut, uint32 widthDst, uint32 heightDst,uchar * dev_buff)
{
	setDataToTexure(hostImgBuffIn, widthSrc, heightSrc);

	dim3 dimBlock(32,8);
	dim3 dimGrid((widthDst + 31) / 32, (heightDst+7)/8);

	float coeffX = float(widthSrc) / float(widthDst);
	float coeffY = float(heightSrc) / float(heightDst);


	cout << "coeffX:" << coeffX << " coeffY:" << coeffY<<endl;

	reshape_kernel << <dimGrid, dimBlock >> >(dev_buff, widthDst, heightDst, coeffX, coeffY);

	cudaMemcpy(hostImgBuffOut, dev_buff, widthDst*heightDst, cudaMemcpyDeviceToHost);
}

void releaseTexture(void)
{
	cudaUnbindTexture(&tex);
	cudaFreeArray(g_arr1);
}

// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
    int *dev_a = 0;
    int *dev_b = 0;
    int *dev_c = 0;
    cudaError_t cudaStatus;

    // Choose which GPU to run on, change this on a multi-GPU system.
    cudaStatus = cudaSetDevice(0);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?");
        goto Error;
    }

    // Allocate GPU buffers for three vectors (two input, one output)    .
    cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    // Copy input vectors from host memory to GPU buffers.
    cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
        goto Error;
    }

    cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
        goto Error;
    }

    // Launch a kernel on the GPU with one thread for each element.
    addKernel<<<1, size>>>(dev_c, dev_a, dev_b);

    // Check for any errors launching the kernel
    cudaStatus = cudaGetLastError();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
        goto Error;
    }
    
    // cudaDeviceSynchronize waits for the kernel to finish, and returns
    // any errors encountered during the launch.
    cudaStatus = cudaDeviceSynchronize();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
        goto Error;
    }

    // Copy output vector from GPU buffer to host memory.
    cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
        goto Error;
    }

Error:
    cudaFree(dev_c);
    cudaFree(dev_a);
    cudaFree(dev_b);
    
    return cudaStatus;
}

#include <math.h>
#define PI 3.141592653589793
__global__ void genW(float * devW_Re, float * devW_Im, unsigned int halfLength)
{
	int index = threadIdx.x;

	float theta = PI*index / halfLength;
	devW_Re[index] = cosf(theta);
	devW_Im[index] = -sinf(theta);
}

__global__ void genW_v1(float * devW, unsigned int halfLength)
{
	int index = threadIdx.x + (halfLength<<1)*threadIdx.y;
	float theta = PI*threadIdx.x / halfLength + PI / 2 * threadIdx.y;
	devW[index] = cosf(theta);
}

__global__ void genW_v2(float * devW, unsigned int halfLength)
{
	int indexX = blockIdx.x*blockDim.x + threadIdx.x;
	int indexY = blockIdx.y*blockDim.y + threadIdx.y;

	int index = indexY * gridDim.x*blockDim.x + indexX;

	float theta = (index >= (halfLength << 1)) ? PI*index / halfLength + PI / 2 : PI*index / halfLength;
	devW[index] = cosf(theta);
}

__global__ void kernel_test(uchar * devW, unsigned int halfLength)
{
	int indexX = blockIdx.x*blockDim.x + threadIdx.x;
	int indexY = blockIdx.y*blockDim.y + threadIdx.y;

	//int index = indexY * gridDim.x*blockDim.x + indexX;
	int index = indexY * 1024 + indexX;

	//float theta = (index >= (halfLength << 1)) ? PI*index / halfLength + PI / 2 : PI*index / halfLength;
	devW[index] = 30;// cosf(theta);
}

__device__ inline void complexMul(float reIn1,float imIn1,float reIn2,float imIn2,float* reOut,float * imOut)
{
	*reOut = reIn1*reIn2 - imIn1*imIn2;
	*imOut = reIn1*imIn2 + reIn2*imIn1;
	//*reOut = 30;
	//*imOut = 20;
}

__global__ void cuda_FFT_kernel(float * devReIn, float * devImIn, unsigned int length, float * wReInDev, float * wImInDev, float * devBuff)
{
	//int indexX = blockDim.x*blockIdx.x + threadIdx.x;
	int indexW = threadIdx.x * blockIdx.x % length;
	extern __shared__ float sh_mem[];
	complexMul(devReIn[threadIdx.x], devImIn[threadIdx.x], wReInDev[indexW], wImInDev[indexW], &sh_mem[threadIdx.x], &sh_mem[threadIdx.x + length]);
	//sh_mem[threadIdx.x] = threadIdx.x;
	//sh_mem[threadIdx.x + length] = 3;
	__syncthreads();

	int indexShare = threadIdx.y * length + threadIdx.x;

	for (int i = (length >> 1); i > 0; i = i >> 1)
	{
		if (threadIdx.x < i)
		{
			sh_mem[indexShare] += sh_mem[indexShare + i];
		}
		__syncthreads();
	}

	
	if (threadIdx.x == 0)
	{
		int indexOut = threadIdx.y * length + blockIdx.x;
		devBuff[indexOut] = sh_mem[indexShare];
	}

	//if ((blockIdx.x == 0) && (threadIdx.y == 0))
	//{
	//	devBuff[threadIdx.x] = sh_mem[threadIdx.x];
	//	devBuff[threadIdx.x + length] = sh_mem[threadIdx.x + length];
	//}
}

__global__ void cuda_FFT_kernel_L(float * devReIn, float * devImIn, unsigned int length, float * wReInDev, float * wImInDev, float * devBuff)
{
	int indexX = blockDim.x*blockIdx.x + threadIdx.x;
	int indexY = blockIdx.y;

	//int indexIn = (indexY*gridDim.x*blockDim.x + indexX) % length;

	//int indexOut = blockIdx.y;

	int indexW = (indexX * indexY) % length;
	extern __shared__ float sh_mem[];
	complexMul(devReIn[indexX], devImIn[indexX], wReInDev[indexW], wImInDev[indexW], &sh_mem[threadIdx.x], &sh_mem[threadIdx.x + 1024]);
	//sh_mem[threadIdx.x] = threadIdx.x;
	//sh_mem[threadIdx.x + length] = 3;
	__syncthreads();

	//int indexShare = threadIdx.y * length + threadIdx.x;

	for (int i = (1024 >> 1); i > 0; i = i >> 1)
	{
		if (threadIdx.x < i)
		{
			sh_mem[threadIdx.x] += sh_mem[threadIdx.x + i];
			sh_mem[threadIdx.x + 1024] += sh_mem[threadIdx.x + 1024 + i];
		}
		__syncthreads();
	}


	if (threadIdx.x == 0)
	{
		int blockIndex = blockIdx.y* gridDim.x + blockIdx.x;//) << 1;

		int offset = length*length >>10;//the total num of complex data.
		
		devBuff[blockIndex] = sh_mem[threadIdx.x];
		devBuff[blockIndex + offset] = sh_mem[threadIdx.x + 1024];
	}

	/*if ((blockIdx.y == 0) && (blockIdx.x == 0))
	{
		devBuff[threadIdx.x] = sh_mem[threadIdx.x];
		devBuff[threadIdx.x + 1024] = sh_mem[threadIdx.x + 1024];
	}*/
}


static unsigned int nextPowerOfTwo(unsigned int x)
{
	--x;
	x |= x >> 1;
	x |= x >> 2;
	x |= x >> 4;
	x |= x >> 8;
	x |= x >> 16;
	return ++x;
}

void cuda_fft(float * devReIn, float * devImIn, unsigned int length,  float * devBuff, float * hostFftOut)
{
	float * devRe_W = devBuff;
	float * devIm_W = devBuff + length;

	int totalLen = length*sizeof(float)* 2;

	//float * devRe_W = NULL;// devBuff;
	//float * devIm_W = NULL;// devBuff + length;

	//cudaMalloc(&devRe_W, totalLen);
	devIm_W = devRe_W + length;
	cout << "length is " << length << endl;

	dim3 blockSize(length, 2);

	//genW << <1, length >> >(devRe_W, devIm_W, length >> 1);
	genW_v1 << <1, blockSize >> >(devRe_W, length >> 1);
	float * fftOut = devIm_W + length;
	//float * fftOut = NULL;
	//cudaMalloc(&fftOut, totalLen);
	//dim3 blockSize(length,2);

	int shareMemSize = totalLen;

	cuda_FFT_kernel << <length, blockSize, shareMemSize >> >(devReIn, devImIn, length, devRe_W, devIm_W, fftOut);

	cudaMemcpy(hostFftOut, fftOut, totalLen, cudaMemcpyDeviceToHost);
}

__global__ void merge_fft(float * sumLineResults,float* fftResults, int len)
{
//	int indexX = threadIdx.x;
//	int indexY = blockIdx.x;
	int indexIn = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*len;;//(threadIdx.x << 10) + blockIdx.x + blockIdx.y*len;
	extern __shared__ float sh_mem[];
	sh_mem[threadIdx.x] = sumLineResults[indexIn];

	__syncthreads();

	int halfLen = blockDim.x >> 1;

	for (int i = halfLen; i > 0; i = i >> 1)
	{
		if (i > threadIdx.x)
		{
			sh_mem[threadIdx.x] += sh_mem[threadIdx.x + i];
		}
		__syncthreads();
	}

	if (threadIdx.x == 0 )//&& blockIdx.x == 0 && blockIdx.y==0
	{
		//here, put the imag part all after real part
		//int indexBlockOut = blockIdx.y >> 1;
		//int offset = (blockIdx.y & 0x01) ? len : 0;
		//int indexOut = (indexBlockOut << 10) + offset;
		int indexOut = (blockIdx.y << 10);
		fftResults[indexOut + blockIdx.x] = sh_mem[threadIdx.x];
	}
}


/*
fft length should be equal to or larger than 1024 and less than 1024*1024
*/
void cuda_fft_L(float * devReIn, float * devImIn, unsigned int length, float * devBuff, float * hostFftOut)
{
	float * devRe_W = devBuff;
	float * devIm_W = devBuff + length;

	int totalLen = length*sizeof(float)* 2;

	//float * devRe_W = NULL;// devBuff;
	//float * devIm_W = NULL;// devBuff + length;

	//cudaMalloc(&devRe_W, totalLen);
	devIm_W = devRe_W + length;
	
	unsigned int lenghtPower = nextPowerOfTwo(length);
	//cout << "length is " << length << "  lenghtPower is " << lenghtPower << endl;

	dim3 blockSize(32, 32);


	unsigned int gridLen = lenghtPower >> 9;

	int gridX = gridLen >= 32 ? 32 : gridLen;
	int gridY = (gridLen + 31) >> 5;

	dim3 gridSize(gridX, gridY);
	//cout << "blockSize.x is " << blockSize.x << "  blockSize.y is " << blockSize.y << endl;
	//cout << "gridSize.x is " << gridSize.x << "  gridSize.y is " << gridSize.y << endl;
	//genW << <1, length >> >(devRe_W, devIm_W, length >> 1);
	//int * testIntPtr = (int *)devBuff;
	genW_v2 << <gridSize, blockSize >> >(devRe_W, lenghtPower >> 1);
	float * fftOut = devIm_W + length;
	//float * fftOut = NULL;
	//cudaMalloc(&fftOut, totalLen);
	//dim3 blockSize(length,2);

	//int shareMemSize = totalLen;
	dim3 gridSize1(length >> 10,length);
	cuda_FFT_kernel_L << <gridSize1, 1024, 1024 * 2 * sizeof(float) >> >(devReIn, devImIn, length, devRe_W, devIm_W, fftOut);

	if ((length >> 10) > 1)
	{
		//do merge
		dim3 blockSizeMerge(length>>10,1);
		dim3 gridSizeMerge(1024, length);
		float * fftResults = fftOut + (length*(length >> 9));
		int shareMemSize = length >> 10;
		merge_fft << <gridSizeMerge, blockSizeMerge, shareMemSize >> >(fftOut, fftResults, length);

		fftOut = fftResults;
	}

	cudaMemcpy(hostFftOut, fftOut, totalLen, cudaMemcpyDeviceToHost);
}

void test_kernelSize(unsigned char * devBuffIn, unsigned char * hostOut, unsigned int len)
{
	dim3 blockSize(32, 64);

	kernel_test << <1, blockSize >> >(devBuffIn, len);

	cudaMemcpy(hostOut, devBuffIn, len, cudaMemcpyDeviceToHost);
}

