#include "rungekutta2.h"

#include <math.h>
#include "../rungekutta2_serial/Timer.h"

#define EPS 	10e-7
extern Stopwatch *s;

__device__ void squareMatrixMult(float *a, float *b, float *res, int n)
{
	int x = threadIdx.x + blockIdx.x * blockDim.x;
	int y = threadIdx.y + blockIdx.y * blockDim.y;
	
	if (x < n && y < n)
	{
		int pos = x + y * n;
		int posA = y * n;
		int posB = x;
		res[pos] = 0.0f;
		
		for (int i = 0; i < n; i++)
		{
			//res[i][j] += a[i][k]*b[k][j];
			//*(res + i*k + j) += *(a + i * m + l) * *(b + l * k + j);
			res[pos] += a[posA] * b[posB];
			posA++;
			posB += n;
		}
	}
	__syncthreads();
}

__device__ void squareMatrixAdd(float *res, float *a, int n)
{
	int x = threadIdx.x + blockIdx.x * blockDim.x;
	int y = threadIdx.y + blockIdx.y * blockDim.y;
	
	if (x < n && y < n)
	{
		int pos = x + y * n;
		res[pos] += a[pos];
	}
	__syncthreads();
}

__device__ void squareMatrixVectorMult(float *a, float *v, float *res, int n)
{
	int x = threadIdx.x + blockIdx.x * blockDim.x;
	int y = threadIdx.y + blockIdx.y * blockDim.y;
	
	if (x < n && y < n)
	{
		if (x == 0)
		{
			int pos = y;
			int posA = y * n;
			res[pos] = 0.0f;
			for (int i = 0; i < n; i++)
			{
				res[pos] += a[posA] * v[i];
				posA++;
			}
		}
	}
	__syncthreads();
}

__device__ void squareMatrixScalarMult(float *a, int n, float alpha)
{
	int x = threadIdx.x + blockIdx.x * blockDim.x;
	int y = threadIdx.y + blockIdx.y * blockDim.y;
	
	if (x < n && y < n)
	{
		int pos = x + y * n;
		a[pos] *= alpha;
	}
	__syncthreads();
}

__device__ void squareMatrixIdentityAdd(float *a, int n)
{
	int x = threadIdx.x + blockIdx.x * blockDim.x;
	int y = threadIdx.y + blockIdx.y * blockDim.y;
	
	if (x < n && y < n)
	{
		int pos = x + y * n;
		if (x == y)
			a[pos] += 1.0f;
	}
	__syncthreads();
}

/*
*Midpoint method is method Runge-Kutta second-order:
*x(0)
*x(n+1) = x(n)+h
*y(n+1) = y(n) + h*f(  x(n) + h/2,    y(n) + h/2*f(x(n), y(n))  )
*
*Differential equation:
*u'=Au   A - Rnxn
*f(x,y) = Ay
*
*Algorithm:
*y(n+1) = y(n) + h*f(  x(n) + h/2,    y(n) + h/2*f(x(n), y(n))  ) =
*= y(n) + h*A(y(n) + h/2*A*y(n)) = 
*= (E + h*A + h*h/2*A*A)y(n) = 
*= Zy(n)
*
*y(n+1) = Zy(n)
*/

__global__ void kernel(float *a, float *z, int n, float h, int steps, float *y)
{
	squareMatrixMult(a, a, z, n);
	squareMatrixScalarMult(z, n, h/2);
	squareMatrixAdd(z, a, n);
	squareMatrixScalarMult(z, n, h);
	squareMatrixIdentityAdd(z, n);

	for(int i = 0; i < steps; i++)
	{
		squareMatrixVectorMult(z, y + i * n, y + (i + 1)*n, n);
	}
}

#define BLOCKDIM	16

void cuda_midpointMethod(float *a, int n, float h, int steps, float *y)
{
	float* gpu_a;
	float* gpu_z;
	float* gpu_y;
	cudaMalloc( (void**)&gpu_a, n * n * sizeof(float) );
	cudaMalloc( (void**)&gpu_z, n * n * sizeof(float) );
	cudaMalloc( (void**)&gpu_y, n * (steps+1) * sizeof(float) );
	
	cudaMemcpy( gpu_a, a, n * n * sizeof(float), cudaMemcpyHostToDevice );
	cudaMemset( gpu_y, 0, n * (steps+1) * sizeof(float) );
	cudaMemcpy( gpu_y, y, n * sizeof(float), cudaMemcpyHostToDevice );
	
	int blocks = (n + BLOCKDIM - 1) / BLOCKDIM;
	dim3 dimGrid(blocks,blocks,1);
	dim3 dimBlock(BLOCKDIM,BLOCKDIM,1);
	s->start();
	kernel<<<dimGrid,dimBlock>>>( gpu_a, gpu_z, n, h, steps, gpu_y );
	cudaThreadSynchronize();
	s->stop();
	//cudaMemcpy( y, gpu_y + steps * n, n * sizeof(float), cudaMemcpyDeviceToHost );
	cudaMemcpy( y, gpu_y, (steps + 1) * n * sizeof(float), cudaMemcpyDeviceToHost );
	
	cudaFree( gpu_y );
	cudaFree( gpu_z );
	cudaFree( gpu_a );
}

bool compareArrays(float *a, float *b, int n)
{
	int i;
	for (i = 0; i < n; i++)
	{
		if ( fabs(a[i] - b[i]) >= EPS )
			break;
	}
	return i == n;
}