
#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include "measure.hpp"

#include <stdio.h>
#include <iostream>
#include <string>
#include <conio.h>
#include <cstdlib>

using namespace std;

typedef float mat_field_t;

#define SIZE 2048

cudaError_t chooseSmartestDevice();

struct MatMulData
{
	int m, n, z;
	mat_field_t *h_c;
	mat_field_t *h_a;
	mat_field_t *h_b;
	mat_field_t *d_c;
	mat_field_t *d_a;
	mat_field_t *d_b;
};

__global__ void matMulKernel(mat_field_t *c, mat_field_t *a, mat_field_t *b, const int z)
{
	const int i=blockIdx.x*blockDim.x + threadIdx.x;
	const int j=blockIdx.y*blockDim.y + threadIdx.y;

	mat_field_t res = 0.0;
	for(int o=0 ; o<z ; ++o)
	{
		mat_field_t v1 = a[i+o*z];
		mat_field_t v2 = b[o+j*z];
		res += v2*v1;
	}
	c[i+j*z] = res;
}

void cleanupMatrices(int size, void** pData)
{
	MatMulData *pMatrices = (MatMulData*)*pData;
	bool worked = true;
	for(int test=0; test<5 ; ++test)
	{
		//check for correctness
		int i=rand()%SIZE;
		int j=rand()%SIZE;
		mat_field_t result = 0;
		for(int o=0; o<SIZE ; ++o)
		{
			//cout << ": " << pMatrices->h_a[i+o*size] << "*" << pMatrices->h_a[o+j*size] << endl;
			result += pMatrices->h_b[i+o*SIZE]*pMatrices->h_a[o+j*SIZE];
		}
		const mat_field_t expected = pMatrices->h_c[i+j*SIZE];
		const mat_field_t tolerance = 0.0001;
		if(result>(expected+tolerance)||result<(expected-tolerance))
		{
			cout << "Did not calculate correctly!" << result << " != " << expected << "(i: " << i << ", " << j << ")" << endl;
			worked = false;
		}
	}
	if(worked)
	{
		cout << "worked" << endl;
	}
	delete [] pMatrices->h_c;
	delete [] pMatrices->h_a;
	delete [] pMatrices->h_b;
	delete pMatrices;
}

void mulMatrices(int dim1, void** pData)
{
	MatMulData *pMatrices = (MatMulData*)*pData;
	
	const int m = pMatrices->m;
	const int n = pMatrices->n;
	const int z = pMatrices->z;
	const size_t fieldSize = sizeof(mat_field_t);
	
	//size_t avail;
	//size_t total;
	//size_t trying = (pMatrices->m*pMatrices->n+pMatrices->m*pMatrices->z+pMatrices->n*pMatrices->z)*fieldSize;
	//cudaMemGetInfo(&avail, &total);
 //   fprintf(stderr, "avail: %umb total %umb try: %umb\n", avail/(1024*1024), total/(1024*1024), trying/(1024*1024));
	// Allocate GPU buffers for three vectors (two input, one output).
	cudaError_t cudaStatus = cudaMalloc((void**)&pMatrices->d_c, m*n*fieldSize);
	if (cudaStatus != cudaSuccess) {
		size_t avail;
		size_t total;
		cudaMemGetInfo(&avail, &total);
		size_t tried = m*n*fieldSize;
		fprintf(stderr, "cudaMalloc failed! avail: %u total %u tried: \n", avail, total, tried);
		cudaDeviceReset();
		throw "error";
	}

	cudaStatus = cudaMalloc((void**)&pMatrices->d_a, m*z*fieldSize);
	if (cudaStatus != cudaSuccess) {
		size_t avail;
		size_t total;
		cudaMemGetInfo(&avail, &total);
		size_t tried = m*z*fieldSize;
		fprintf(stderr, "cudaMalloc failed! avail: %u total %u tried: %u (%s)\n", avail, total, tried, cudaGetErrorString(cudaGetLastError()));
		cudaDeviceReset();
		throw "error";
	}

	cudaStatus = cudaMalloc((void**)&pMatrices->d_b, n*z*fieldSize);
	if (cudaStatus != cudaSuccess) {
		size_t avail;
		size_t total;
		cudaMemGetInfo(&avail, &total);
		size_t tried = n*z*fieldSize;
		fprintf(stderr, "cudaMalloc failed! avail: %u total %u tried: %u (%s)\n", avail, total, tried, cudaGetErrorString(cudaGetLastError()));
		throw "error";
	}
	//trying = avail-trying;
	//cudaMemGetInfo(&avail, &total);
 //   fprintf(stderr, "fin! avail: %umb expected: %umb\n", avail/(1024*1024), trying/(1024*1024));
	// Copy input vectors from host memory to GPU buffers.
	cudaStatus = cudaMemcpy(pMatrices->d_a, pMatrices->h_a, m*z*fieldSize, cudaMemcpyHostToDevice);
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, "cudaMemcpy failed! (%s)\n", cudaGetErrorString(cudaGetLastError()));
		cudaDeviceReset();
		throw "error";
	}

	cudaStatus = cudaMemcpy(pMatrices->d_b, pMatrices->h_b, n*z*fieldSize, cudaMemcpyHostToDevice);
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, "cudaMemcpy failed! (%s)\n", cudaGetErrorString(cudaGetLastError()));
		cudaDeviceReset();
		throw "error";
	}

	dim3 threads(dim1, dim1);
	dim3 blocks(SIZE/dim1, SIZE/dim1);
	matMulKernel<<<blocks, threads>>>(pMatrices->d_c, pMatrices->d_a, pMatrices->d_b, pMatrices->z);
	const int matSize = pMatrices->m*pMatrices->n;
	cudaStatus = cudaGetLastError();
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, "after kernel execution! (%s)\n", cudaGetErrorString(cudaGetLastError()));
		cudaDeviceReset();
		throw "error";
	}
	cudaStatus = cudaThreadSynchronize();
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, "sync after kernel execution! (%s)\n", cudaGetErrorString(cudaGetLastError()));
		cudaDeviceReset();
		throw "error";
	}
	cudaStatus = cudaGetLastError();
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, "after sync kernel execution! (%s)\n", cudaGetErrorString(cudaGetLastError()));
		cudaDeviceReset();
		throw "error";
	}
	cudaStatus = cudaMemcpy(pMatrices->h_c, pMatrices->d_c, matSize*sizeof(mat_field_t), cudaMemcpyDeviceToHost);
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, "cudaMemcpy BACK failed! (%s)\n", cudaGetErrorString(cudaGetLastError()));
		cudaDeviceReset();
		throw "error";
	}
	cudaFree(pMatrices->d_c);
	cudaFree(pMatrices->d_a);
	cudaFree(pMatrices->d_b);
	cudaStatus = cudaThreadSynchronize();
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, "cudaThreadSynchronize failed!\n");
		cudaDeviceReset();
		throw "error";
	}
}

__host__ void initMatrices(int dim1, void** pData)
{	
	cout << "blocks: " <<  SIZE/dim1 << " threads:" << dim1 << endl;
	MatMulData* pMatrices = new MatMulData();
	pMatrices->m = SIZE;
	pMatrices->n = SIZE;
	pMatrices->z = SIZE;
	const int m = pMatrices->m;
	const int n = pMatrices->n;
	const int z = pMatrices->z;
	const size_t fieldSize = sizeof(mat_field_t);

	pMatrices->h_a = new mat_field_t[m*z];
	for(int i=0 ; i<m*z ; ++i)
	{
		pMatrices->h_a[i] = i*0.1;
	}
	pMatrices->h_b = new mat_field_t[n*z];
	for(int i=0 ; i<n*z ; ++i)
	{
		pMatrices->h_b[i] = 1;
	}

	//cout << "Allocating 3 * " << m*n << " * " << fieldSize << " = " << (m*z+n*z+m*n)*fieldSize << " byte. 2GB=" << 2ul*1024ul*1204ul*1024ul << endl;

	const int matSize = pMatrices->m*pMatrices->n;
	pMatrices->h_c = new mat_field_t[matSize];

	*pData = pMatrices;
}

int main()
{
	chooseSmartestDevice();
	cudaDeviceProp deviceProp;
	int currentDevice;
	int maxThreads;
	cudaGetDevice(&currentDevice);
	cudaDeviceGetAttribute( &maxThreads, cudaDevAttrMaxThreadsPerBlock, currentDevice);
	Measure measure;
	measure.setOutstream("zeitmessungMatrixMul.txt");
	measure.setDebugOut(cout);
	measure.addFunction("cudaMatMul", mulMatrices, initMatrices, cleanupMatrices);
	//measure.addNewMeasure(1);	//2048 x   1	evil!
	//measure.addNewMeasure(1<<1);//1024 x   2	evil!
	//measure.addNewMeasure(1<<2);// 512 x   4
	//measure.addNewMeasure(1<<3);// 256 x   8
	measure.addNewMeasure(1<<4);// 128 x  16
	if(maxThreads >= 32*32)
	{
		measure.addNewMeasure(1<<5);//  64 x  32
	}
	if(maxThreads >= 64*64)
	{
		measure.addNewMeasure(1<<6);//  32 x  64
	}
	//measure.addNewMeasure(1<<7);//  16 x 128
	//measure.addNewMeasure(1<<8);//   8 x 256
	//measure.addNewMeasure(1<<9);//   4 x 512
	//measure.addNewMeasure(1<<10);//  2 x1024
	//measure.addNewMeasure(2<<11);//  1 x2048
	measure.setRepetitions(10);
	measure.run();
	getch();
}

cudaError_t chooseSmartestDevice()
{
	cudaError_t cudaStatus;

	int deviceId;
	string deviceName;
	int deviceCount = 0;
	cudaError_t error_id = cudaGetDeviceCount(&deviceCount);

	if (error_id != cudaSuccess)
	{
		printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
		exit(EXIT_FAILURE);
	}

	// This function call returns 0 if there are no CUDA capable devices.
	if (deviceCount == 0)
	{
		printf("There are no available device(s) that support CUDA\n");
	}
	else
	{
		printf("Detected %d CUDA Capable device(s)\n", deviceCount);
	}

	int major = 0;
	int minor = 0;
	for (int dev = 0; dev < deviceCount; ++dev)
	{
		cudaDeviceProp deviceProp;
		cudaGetDeviceProperties(&deviceProp, dev);

		printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
		if(deviceProp.major > major || (deviceProp.major == major && deviceProp.minor > minor))
		{
			major = deviceProp.major;
			minor = deviceProp.minor;
			deviceId = dev;
			deviceName = deviceProp.name;
		}
		//if(strcmpi(deviceProp.name, "GeForce GTX 260") == 0)
		//{
		//	major = deviceProp.major;
		//	minor = deviceProp.minor;
		//	deviceId = dev;
		//	deviceName = deviceProp.name;
		//	break;
		//}
	}
	printf("\nCoosing your coolest CUDA device: %d: \"%s\" (highest compute capability: %d.%d)\n", deviceId, deviceName.c_str(), major, minor);
	// Choose which GPU to run on, change this on a multi-GPU system.
	cudaStatus = cudaSetDevice(deviceId);
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, "cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?");
	}
	return cudaStatus;
}