#include <iostream>
#include <cstdlib>
#include <cassert>
#include <string>
#include <cstring>
#include <fstream>
#include <vector>
#include <memory>
#include <cstdlib>
#include <cuda_runtime.h>
#include <math_functions.h>
#include <cmath>
#include <ctime>
using namespace std;

void CheckCudaReturnCode(cudaError_t code, const char *fileName, int lineNo)
{
	if(code == cudaSuccess) return;
	cerr << "Cuda call failed at " << fileName << ":" << lineNo 
		<< " " << cudaGetErrorString(code) << endl;
	exit(-1);
}

#define CK(x) CheckCudaReturnCode((x), __FILE__, __LINE__)

template <typename T>
void CUDA_ALLOC_AND_COPY(T *&to, T *from, size_t size)
{
	CK(cudaMalloc((void**)&to, size));
	CK(cudaMemcpy(to, from, size, cudaMemcpyHostToDevice));
}

#define TILE_WIDTH 8

bool InitCUDA()
{
	int count;
	cudaGetDeviceCount(&count);
	if(count == 0) {
		cerr << "There is no cuda device" << endl;
		return false;
	}
	cout << "Total " << count << " cuda devices" << endl;

	int i;
	for(i = 0;i < count;i++) {
		cudaDeviceProp prop;
		if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
			if(prop.major >= 1) {
				break;
			}
		}
	}

	if(i == count) {
		cerr << "There is no device supporting CUDA 1.x" << endl;
		return false;
	}

	cudaSetDevice(i);
	return true;
}

// http://www.cnblogs.com/yeahgis/archive/2012/07/13/2590485.html
// 高斯分布的随机数，均值为0，方差为1
double gaussrand()
{
    static double V1, V2, S;
    static int phase = 0;
    double X;
     
    if ( phase == 0 ) {
        do {
            double U1 = (double)rand() / RAND_MAX;
            double U2 = (double)rand() / RAND_MAX;
             
            V1 = 2 * U1 - 1;
            V2 = 2 * U2 - 1;
            S = V1 * V1 + V2 * V2;
        } while(S >= 1 || S == 0);
         
        X = V1 * sqrt(-2 * log(S) / S);
    } else
        X = V2 * sqrt(-2 * log(S) / S);
         
    phase = 1 - phase;
 
    return X;
}

template<size_t WIDTH>
struct Matrix
{
	float data[WIDTH][WIDTH];
	__device__ __host__ inline float* operator[](size_t x)
	{
		//assert(x < ROW);
		return data[x];
	}
	void init()
	{
		for(int i = 0;i < WIDTH;i++) {
			for(int j = 0;j < WIDTH;j++) {
				data[i][j] = float(gaussrand() * 0.01f);
			}
		}
	}
	void assert_eq(Matrix<WIDTH> &y)
	{
		for(int i = 0;i < WIDTH;i++) {
			for(int j = 0;j < WIDTH;j++) {
				float delta = data[i][j] - y[i][j];
				if(delta < 0) {
					delta = -delta;
				}
				if(delta > 1E-6) {
					cerr << "delta = " << delta << endl;
					exit(-1);
				}
			}
		}
	}
};

template<size_t WIDTH>
inline void host_mul(Matrix<WIDTH> &x, Matrix<WIDTH> &y, Matrix<WIDTH> &z)
{
	for(int i = 0;i < WIDTH;i++) {
		for(int j = 0;j < WIDTH;j++) {
			float t = 0;
			for(int k = 0;k < WIDTH;k++) {
				t += x[i][k] * y[k][j];
			}
			z[i][j] = t;
		}
	}
}

template<size_t WIDTH>
__global__ void mul(Matrix<WIDTH> &x, Matrix<WIDTH> &y, Matrix<WIDTH> &z)
{
	int i = blockIdx.x * TILE_WIDTH + threadIdx.x;
	int j = blockIdx.y * TILE_WIDTH + threadIdx.y;
	__shared__ float cached_x[TILE_WIDTH][TILE_WIDTH];
	__shared__ float cached_y[TILE_WIDTH][TILE_WIDTH];

	//*
	float t = 0;
	for(int p = 0;p < WIDTH / TILE_WIDTH;p++) {
		int offset = p * TILE_WIDTH;

		// load into cached
		cached_x[threadIdx.x][threadIdx.y] = x[i][offset + threadIdx.y];
		cached_y[threadIdx.x][threadIdx.y] = y[offset + threadIdx.x][j];
		__syncthreads();

		for(int k = 0;k < TILE_WIDTH;k++) {
			t += cached_x[threadIdx.x][k] * cached_y[k][threadIdx.y];
		}
		__syncthreads();
	}
	/*/
	float t = 0;
	for(int k = 0;k < WIDTH;k++) {
		t += x[i][k] * y[k][j];
	}
	//*/

	z[i][j] = t;
}

void test_matrix_mul()
{
	const size_t cc = 5;
	const size_t width = 1024;

	dim3 dimGrid(width / TILE_WIDTH, width / TILE_WIDTH);
	dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);

	const size_t shared_size = TILE_WIDTH * TILE_WIDTH * 2 * sizeof(float);

	static Matrix<width> x, *x2;
	x.init();
	CUDA_ALLOC_AND_COPY(x2, &x, sizeof x);

	static Matrix<width> y, *y2;
	y.init();
	CUDA_ALLOC_AND_COPY(y2, &y, sizeof y);

	static Matrix<width> z, *z2, z3;
	z.init();
	CUDA_ALLOC_AND_COPY(z2, &z, sizeof z);

	host_mul(x, y, z3);

	clock_t start = clock();
	for(int i = 0;i < cc;i++) {
		mul<<<dimGrid, dimBlock, shared_size>>>(*x2, *y2, *z2);
		CK(cudaMemcpy(&z, z2, sizeof z, cudaMemcpyDeviceToHost));
	}
	CK(cudaMemcpy(&z, z2, sizeof z, cudaMemcpyDeviceToHost));
	clock_t end = clock();

	float s = width * width * width + width * width;
	s *= sizeof(float) * cc;
	s /= (end - start) * 1.0 / CLOCKS_PER_SEC;
	s /= 1E9;
	s = int(s * 100) / 100.0;
	cout << "test_matrix_mul speed=" << s << " GBps" << endl;

	z3.assert_eq(z);
}

int main()
{
	srand(1000);

	if(!InitCUDA()) {
		return -1;
	}

	// test kernel's
	test_matrix_mul();

	return 0;
}

