#include <stdlib.h>
#include "cublas_v2.h"
#include <stdio.h>
#include <string.h>
#include "mkl_lapack.h"
#include "cuda.h"
#include "cuda_runtime_api.h"
#include "dept150_common.h"

#define BLOCK_SIZE  16

dept150_error_t factorize_gpu_based_matrix(gpu_based_matrix_t gpu_matrix, cublasHandle_t* handle)
{
	int pivot_row = 0;
	int mkl_info = 0;
	double alpha_1 = 1;
	double alpha_m1 = -1;
	int cur_gpu = -1;
	cublasStatus_t stat;
	double *host_matrix_block = (double*)malloc(gpu_matrix.block_size*gpu_matrix.block_size*sizeof(double));
	double** gpu_column_blocks =(double**)malloc(gpu_matrix.gpu_n * sizeof(double *));
	//array that contains number of blocks used by each GPU
	int* num_gpu_blocks = (int *)malloc(gpu_matrix.gpu_n*sizeof(int));

	cudaStream_t* stream = (cudaStream_t *)malloc(gpu_matrix.gpu_n * sizeof(cudaStream_t));

	int num_blocks = (gpu_matrix.order + gpu_matrix.block_size -1)/gpu_matrix.block_size;

	//minimum number of blocks which would be present on each GPU
	int full_blocks_num = num_blocks/gpu_matrix.gpu_n;
	for(int i = 0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		//number of blocks that should be present on current GPU
		int blocks_on_cur_gpu = full_blocks_num;
		if(full_blocks_num*gpu_matrix.gpu_n + i < num_blocks)
		{
			blocks_on_cur_gpu++;
		}

		num_gpu_blocks[i] = blocks_on_cur_gpu;
		checkCudaErrors(cudaMalloc(&gpu_column_blocks[i], gpu_matrix.order* gpu_matrix.block_size * sizeof(double)));
		
		checkCudaErrors(cudaStreamCreate(&stream[i]));
	}

	while(pivot_row + gpu_matrix.block_size < gpu_matrix.order)
	{
		cur_gpu = (pivot_row/gpu_matrix.block_size)%gpu_matrix.gpu_n;

		checkCudaErrors(cudaSetDevice(cur_gpu));
		checkCudaErrors(cudaDeviceSynchronize());

		stat = cublasGetMatrix(gpu_matrix.block_size, gpu_matrix.block_size, sizeof(double), 
			&(gpu_matrix.gpu_matrix_blocks[cur_gpu][IDF2(pivot_row, ((pivot_row/gpu_matrix.block_size)/gpu_matrix.gpu_n)*gpu_matrix.block_size, gpu_matrix.order)]), 
			gpu_matrix.order, host_matrix_block, gpu_matrix.block_size);
		if (stat != CUBLAS_STATUS_SUCCESS)
		{
			printf("CUBLAS DOWNLOAD MATRIX FAILED");
			return DEPT150_CUBLAS_ERROR;
		}
		dpotrf("L", &gpu_matrix.block_size, host_matrix_block, &gpu_matrix.block_size, &mkl_info);
		if(mkl_info)
		{
			printf("BLOCK FACTORIZATION FAILED mkl_info - %d, pivot_row - %d \n", mkl_info, pivot_row);
			return DEPT150_MKL_ERROR;
		}
		int block_column_gpu_number = (pivot_row/gpu_matrix.block_size)/gpu_matrix.gpu_n;
		int id_col_cur_gpu_block = ((pivot_row/gpu_matrix.block_size)/gpu_matrix.gpu_n)*gpu_matrix.block_size;

		cublasSetMatrix(gpu_matrix.block_size, gpu_matrix.block_size, sizeof(double), host_matrix_block, gpu_matrix.block_size, 
			&(gpu_matrix.gpu_matrix_blocks[cur_gpu][IDF2(pivot_row, id_col_cur_gpu_block, gpu_matrix.order)]), gpu_matrix.order);

		//find A21
		stat = cublasDtrsm_v2(handle[cur_gpu], CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, gpu_matrix.order - pivot_row - gpu_matrix.block_size,
			gpu_matrix.block_size, &alpha_1, &(gpu_matrix.gpu_matrix_blocks[cur_gpu][IDF2(pivot_row, id_col_cur_gpu_block, gpu_matrix.order)]), gpu_matrix.order,
			&(gpu_matrix.gpu_matrix_blocks[cur_gpu][IDF2(pivot_row + gpu_matrix.block_size, 
			id_col_cur_gpu_block, gpu_matrix.order)]), gpu_matrix.order);

		if(stat != CUBLAS_STATUS_SUCCESS)
		{
			printf("CUBLA DTRSM ERROR %d %d", stat, (gpu_matrix.order - pivot_row - gpu_matrix.order));
			return DEPT150_CUBLAS_ERROR;
		}
		
		//copy to gpu_colum_blocks
		for(int i = 0; (i < gpu_matrix.gpu_n) /*&& (i!=cur_gpu)*/; i++)
		{	
			checkCudaErrors(cudaMemcpyAsync(gpu_column_blocks[i], &gpu_matrix.gpu_matrix_blocks[cur_gpu][id_col_cur_gpu_block*gpu_matrix.order],
				gpu_matrix.order*gpu_matrix.block_size*sizeof(double), cudaMemcpyDefault, stream[cur_gpu]));
		
		}

		checkCudaErrors(cudaStreamSynchronize(stream[cur_gpu]));
		/*for(int i =0; i< gpu_n; i++)
		{
			checkCudaErrors(cudaSetDevice(i));
			checkCudaErrors(cudaDeviceSynchronize());
			checkCudaErrors(cudaStreamSynchronize(stream[i]));
		}*/
		//update A22
		for(int i = 0; i < gpu_matrix.gpu_n ; i++)
		{	
			checkCudaErrors(cudaSetDevice(i));
			//checkCudaErrors(cudaDeviceSynchronize());
			
			int start_block = (pivot_row/gpu_matrix.block_size)/gpu_matrix.gpu_n;
			if(i <= cur_gpu)
			{
				start_block++;
			}
			
			while(start_block < num_gpu_blocks[i])
			{
				int start_row_index = start_block*gpu_matrix.gpu_n *gpu_matrix.block_size+ i*gpu_matrix.block_size;

				
				if (gpu_matrix.order - start_row_index - gpu_matrix.block_size > 0)
				{
					stat = cublasDgemm_v2(handle[i], CUBLAS_OP_N, CUBLAS_OP_T, gpu_matrix.order - start_row_index - gpu_matrix.block_size, gpu_matrix.block_size, gpu_matrix.block_size,
						&alpha_m1, &(gpu_column_blocks[i][IDF2( start_row_index + gpu_matrix.block_size, 0 ,gpu_matrix.order )]), gpu_matrix.order, 
						&(gpu_column_blocks[i][IDF2( start_row_index,0,gpu_matrix.order)]) , gpu_matrix.order,  &alpha_1,
						&(gpu_matrix.gpu_matrix_blocks[i][IDF2( start_row_index + gpu_matrix.block_size , start_block*gpu_matrix.block_size, gpu_matrix.order )]), gpu_matrix.order);
					if (stat != CUBLAS_STATUS_SUCCESS)
					{
						printf("CUBLAS DGEMM FAILED");
						return DEPT150_CUBLAS_ERROR;
					} 
				}
				int symmetric_subblock_num = gpu_matrix.block_size;	
				if(gpu_matrix.order - start_row_index < gpu_matrix.block_size )
				{
					symmetric_subblock_num = gpu_matrix.order - start_row_index; 
				}

				//symmetric
				stat = cublasDsyrk_v2(handle[i], CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, symmetric_subblock_num, gpu_matrix.block_size, &alpha_m1,
					&(gpu_column_blocks[i][IDF2( start_row_index , 0 ,gpu_matrix.order )]), gpu_matrix.order,  &alpha_1,
					&(gpu_matrix.gpu_matrix_blocks[i][IDF2( start_row_index  , start_block*gpu_matrix.block_size, gpu_matrix.order )]), gpu_matrix.order);
				
				if (stat != CUBLAS_STATUS_SUCCESS)
				{
					printf("CUBLAS DGEMM FAILED");
					return DEPT150_CUBLAS_ERROR;
				}
				start_block++;
			}
			//checkCudaErrors(cudaDeviceSynchronize());
		}
		pivot_row+= gpu_matrix.block_size;
	}

	//factorize last block
	cur_gpu = (pivot_row/gpu_matrix.block_size)%gpu_matrix.gpu_n;

	checkCudaErrors(cudaSetDevice(cur_gpu));
	checkCudaErrors(cudaDeviceSynchronize());

	int last_block_size = gpu_matrix.order - pivot_row;

	stat = cublasGetMatrix(last_block_size, last_block_size, sizeof(double), 
		&(gpu_matrix.gpu_matrix_blocks[cur_gpu][IDF2(pivot_row, ((pivot_row/gpu_matrix.block_size)/gpu_matrix.gpu_n)*gpu_matrix.block_size, gpu_matrix.order)]), 
		gpu_matrix.order, host_matrix_block, gpu_matrix.block_size);
	if (stat != CUBLAS_STATUS_SUCCESS)
	{
		printf("CUBLAS DOWNLOAD MATRIX FAILED");
		return DEPT150_CUBLAS_ERROR;
	}
	dpotrf("L", &last_block_size, host_matrix_block, &gpu_matrix.block_size, &mkl_info);
	if(mkl_info)
	{
		printf("BLOCK FACTORIZATION FAILED mkl_info - %d, pivot_row - %d \n", mkl_info, pivot_row);
		return DEPT150_MKL_ERROR;
	}
	int block_column_gpu_number = (pivot_row/gpu_matrix.block_size)/gpu_matrix.gpu_n;
	int id_col_cur_gpu_block = ((pivot_row/gpu_matrix.block_size)/gpu_matrix.gpu_n)*gpu_matrix.block_size;

	cublasSetMatrix(last_block_size, last_block_size, sizeof(double), host_matrix_block, gpu_matrix.block_size, 
		&(gpu_matrix.gpu_matrix_blocks[cur_gpu][IDF2(pivot_row, id_col_cur_gpu_block, gpu_matrix.order)]), gpu_matrix.order);

	free(host_matrix_block);

	for(int i = 0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		checkCudaErrors(cudaFree(gpu_column_blocks[i]));
		checkCudaErrors(cudaStreamDestroy(stream[i]));
	}

	free(gpu_column_blocks);
	free(stream);

	return DEPT150_SUCCESS;
}

dept150_error_t dept150_mgpu_LPPSADB(int order, double* matrix, double* b, double *E, double *El, double *cond, double ea, double eb)
{
	gpu_based_matrix_t gpu_matrix;
	cublasHandle_t* handle;
	double matrixNorm;
	cublasStatus_t stat;
	double *hostMatrixCopy;//used to calculate E1

	checkCudaErrors(cudaGetDeviceCount(&(gpu_matrix.gpu_n)));
	gpu_matrix.order = order;
	if(order <= 1000)
	{
		gpu_matrix.block_size = 128;
	}
	else
	{
		gpu_matrix.block_size = 512;
	}

	handle = (cublasHandle_t *)malloc(gpu_matrix.gpu_n * sizeof(cublasHandle_t));
	if(initCublasHandles(handle, gpu_matrix.gpu_n))
	{
		printf("CUBLAS HANDLE INITIALIZATION FAILED\n");
		return DEPT150_CUBLAS_ERROR;
	}

	if(copyMatrixToDevices(matrix, &gpu_matrix,handle ))
	{
		printf("INITIALIZATION OF DEVICE MATRIX FAILED");
		return DEPT150_CUBLAS_ERROR;
	}

	//find matrix norm
	stat = getMatrixNorm(gpu_matrix, handle, &matrixNorm);
	if (stat != CUBLAS_STATUS_SUCCESS)
	{
			printf("CUBLAS DOWNLOAD MATRIX FAILED");
			return DEPT150_CUBLAS_ERROR;
	}
	checkCudaErrors( cudaDeviceSynchronize());
	printf("matrix norm %e\n", matrixNorm);

	hostMatrixCopy = (double *)malloc(order*order*sizeof(double));
	memcpy(hostMatrixCopy, matrix, order*order*sizeof(double));

	dept150_error_t dept150Error = factorize_gpu_based_matrix(gpu_matrix, handle);

	if(dept150Error != DEPT150_SUCCESS)
	{
		return dept150Error;
	}

	copyMatrixFromDevices(matrix, gpu_matrix);
	*cond = 0;
	findMatrixCond(matrix, gpu_matrix, handle, matrixNorm, cond, true, false);
	printf("matrix cond %e\n", *cond);

	if (*cond +1.0 == *cond)
	{
		return DEPT150_MACHINE_SINGULAR;
	}

	//solve system
	solveLTempEqualsB(matrix, b, order, false);
	solveUXEqualsTemp(matrix, b, order, true);

	if (eb==0.) eb = MACH_EPS;
	if (ea==0.) ea = MACH_EPS;

	if(*cond * ea > 1.0)
	{
		return DEPT150_POSSIBLE_SINGULAR;
	}
	*E = *cond*(ea+eb)/(1.0-eb);

	return DEPT150_SUCCESS;	
}
