#include <stdlib.h>
#include "cublas_v2.h"
#include <stdio.h>
#include "cuda.h"
#include "cuda_runtime_api.h"
#include "dept150_common.h"
#include <string.h>


cublasStatus_t swap_GPU_matrix_rows(int first_row, int second_row,gpu_based_matrix_t gpu_matrix, int* num_gpu_blocks , cublasHandle_t* handle);

void printGPUColumnBlock(double * gpu_column_block, gpu_based_matrix_t gpu_matrix)
{
	double *matrix = (double*)malloc(gpu_matrix.order*gpu_matrix.block_size*sizeof(double));

	cublasGetMatrix(gpu_matrix.order, gpu_matrix.block_size, sizeof(double), gpu_column_block, gpu_matrix.order, matrix, gpu_matrix.order);
	print_matrix(gpu_matrix.order, gpu_matrix.block_size, matrix, gpu_matrix.order);
}


dept150_error_t gaus_factorize_gpu_based_matrix(gpu_based_matrix_t gpu_matrix, cublasHandle_t* handle, int *ipiv)
{
	int pivot_row = 0;
	int mkl_info = 0;
	double alpha_1 = 1;
	double alpha_m1 = -1;
	int cur_gpu = -1;
	cublasStatus_t stat;
	double *host_matrix_block = (double*)malloc(gpu_matrix.block_size*gpu_matrix.block_size*sizeof(double));
	double** gpu_column_blocks =(double**)malloc(gpu_matrix.gpu_n * sizeof(double *));

	//array that contains number of blocks used by each GPU
	int* num_gpu_blocks = (int *)malloc(gpu_matrix.gpu_n*sizeof(int));

	cudaStream_t* stream = (cudaStream_t *)malloc(gpu_matrix.gpu_n * sizeof(cudaStream_t));

	int num_blocks = (gpu_matrix.order + gpu_matrix.block_size -1)/gpu_matrix.block_size;

	//minimum number of blocks which would be present on each GPU
	int full_blocks_num = num_blocks/gpu_matrix.gpu_n;
	for(int i = 0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		//number of blocks that should be present on current GPU
		int blocks_on_cur_gpu = full_blocks_num;
		if(full_blocks_num*gpu_matrix.gpu_n + i < num_blocks)
		{
			blocks_on_cur_gpu++;
		}

		num_gpu_blocks[i] = blocks_on_cur_gpu;
		checkCudaErrors(cudaMalloc(&gpu_column_blocks[i], gpu_matrix.order* gpu_matrix.block_size * sizeof(double)));
		
		checkCudaErrors(cudaStreamCreate(&stream[i]));
	}

	//init ipiv
	for(int i = 0; i < gpu_matrix.order; i++)
	{
		ipiv[i] = i;
	}

	while(pivot_row  < gpu_matrix.order)
	{
		cur_gpu = (pivot_row/gpu_matrix.block_size)%gpu_matrix.gpu_n;

		checkCudaErrors(cudaSetDevice(cur_gpu));
		checkCudaErrors(cudaDeviceSynchronize());

		int maxRow = -1;
		for (int cur_row = pivot_row; (cur_row < pivot_row + gpu_matrix.block_size)&&(cur_row <gpu_matrix.order) ; cur_row++ )
		{
			int storedGPUElementCoordinate = getGPUCoordinate(gpu_matrix, cur_row, cur_row);
			cublasIdamax(handle[cur_gpu], gpu_matrix.order - cur_row, &(gpu_matrix.gpu_matrix_blocks[cur_gpu][storedGPUElementCoordinate]), 1, &maxRow );
			checkCudaErrors(cudaDeviceSynchronize());
			maxRow = cur_row + maxRow -1;//decremented because cublasIdamax looking for 1-based index
			if(maxRow != cur_row)
			{
				swap_GPU_matrix_rows(cur_row, maxRow, gpu_matrix, num_gpu_blocks, handle );
				checkCudaErrors(cudaSetDevice(cur_gpu));
				int temp = ipiv[cur_row];
				ipiv[cur_row] =ipiv[maxRow];
				ipiv[maxRow] = temp;
			}

			double diag_value = 1;
			//get max value
			cublasGetVector(1,sizeof(double), &(gpu_matrix.gpu_matrix_blocks[cur_gpu][storedGPUElementCoordinate]), 1, &diag_value, 1);

			if(abs(diag_value) < MACH_EPS)
			{
				return DEPT150_MACHINE_SINGULAR;
			}
			diag_value = 1.0/diag_value;

			if (cur_row + 1 < gpu_matrix.order)
			{
				//divide by diag element Lkk
				int nextElementCoordinate = getGPUCoordinate(gpu_matrix, cur_row + 1, cur_row);
				stat = cublasDscal(handle[cur_gpu], gpu_matrix.order - cur_row - 1 ,&diag_value, &(gpu_matrix.gpu_matrix_blocks[cur_gpu][nextElementCoordinate ]), 1);
				if(stat != CUBLAS_STATUS_SUCCESS)
				{
					printf("CUBLAS DSCAL FAILED");
					return DEPT150_CUBLAS_ERROR;
				}

				if (cur_row + 1 < gpu_matrix.block_size + pivot_row)
				{
					//make L21 transformation
					int nextHorizontalElementCoordinate = getGPUCoordinate(gpu_matrix, cur_row, cur_row +1);
					int nextDiagonalElementCoordinate = getGPUCoordinate(gpu_matrix, cur_row +1, cur_row +1);

					int lastL21ColumnIndex = pivot_row + gpu_matrix.block_size -1;
					if(lastL21ColumnIndex >= gpu_matrix.order)
					{
						lastL21ColumnIndex = gpu_matrix.order - 1;
					}
					
					stat = cublasDger(handle[cur_gpu],  gpu_matrix.order - cur_row - 1, lastL21ColumnIndex - cur_row , &alpha_m1,
						&(gpu_matrix.gpu_matrix_blocks[cur_gpu][nextElementCoordinate]), 1,
						&(gpu_matrix.gpu_matrix_blocks[cur_gpu][nextHorizontalElementCoordinate ]), gpu_matrix.order, 
						&(gpu_matrix.gpu_matrix_blocks[cur_gpu][nextDiagonalElementCoordinate]), gpu_matrix.order);

					if(stat != CUBLAS_STATUS_SUCCESS)
					{
						printf("CUBLAS DGER FAILED");
						return DEPT150_CUBLAS_ERROR;
					} 
				}
			}

			
		}
		if (pivot_row + gpu_matrix.block_size < gpu_matrix.order)
		{
			checkCudaErrors(cudaDeviceSynchronize());
			//copy L11 and L12 to each gpucopy to gpu_colum_blocks
			int id_col_cur_gpu_block = ((pivot_row/gpu_matrix.block_size)/gpu_matrix.gpu_n)*gpu_matrix.block_size;
			for(int i = 0; (i < gpu_matrix.gpu_n) /*&& (i!=cur_gpu)*/; i++)
			{	
				checkCudaErrors(cudaMemcpyAsync(gpu_column_blocks[i], &gpu_matrix.gpu_matrix_blocks[cur_gpu][id_col_cur_gpu_block*gpu_matrix.order],
					gpu_matrix.order*gpu_matrix.block_size*sizeof(double), cudaMemcpyDefault, stream[cur_gpu]));

			}

			checkCudaErrors(cudaStreamSynchronize(stream[cur_gpu]));

			//L21 was found, now looking for U12, using cublasDtrsm for this in cycle
			int globalStartBlockNumber = pivot_row/gpu_matrix.block_size + 1;
			int globalGPUSectionNumber = globalStartBlockNumber/ gpu_matrix.gpu_n;
			int startBlockSectionNumber = globalStartBlockNumber%gpu_matrix.gpu_n;
			for(int i = 0; i < gpu_matrix.gpu_n; i++)
			{
				checkCudaErrors(cudaSetDevice(i));
				//for each gpu factorize blocks of U12(which belongs to this gpu) independetly
				int startBlockNumber = globalGPUSectionNumber;
				if((i%gpu_matrix.gpu_n)< startBlockSectionNumber )
				{
					startBlockNumber++;
				}
				while(startBlockNumber < num_gpu_blocks[i])
				{
					int U12SubBlockColumns = gpu_matrix.block_size;
					int globalSubBlockFirstElementIndex = gpu_matrix.gpu_n*gpu_matrix.block_size*startBlockNumber + i*gpu_matrix.block_size;
					if(globalSubBlockFirstElementIndex + gpu_matrix.block_size > gpu_matrix.order )
					{
						U12SubBlockColumns = gpu_matrix.order - globalSubBlockFirstElementIndex;
					}

					int firstElementInBlockIndex = IDF2(pivot_row, startBlockNumber*gpu_matrix.block_size, gpu_matrix.order);
					cublasDtrsm(handle[i], CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT,
						gpu_matrix.block_size, U12SubBlockColumns, &alpha_1, &gpu_column_blocks[i][ pivot_row ], gpu_matrix.order, 
						&gpu_matrix.gpu_matrix_blocks[i][firstElementInBlockIndex], gpu_matrix.order);
					startBlockNumber++;
				}
			}

			//find updated A22
			int rowsInA22 = gpu_matrix.order - pivot_row - gpu_matrix.block_size;
			if (rowsInA22 > 0)
			{
				for(int i = 0; i < gpu_matrix.gpu_n; i++)
				{
					checkCudaErrors(cudaSetDevice(i));
					//for each gpu factorize blocks of U12(which belongs to this gpu) independetly
					int startBlockNumber = globalGPUSectionNumber;
					if((i%gpu_matrix.gpu_n)< startBlockSectionNumber )
					{
						startBlockNumber++;
					}

					while (startBlockNumber < num_gpu_blocks[i])
					{
						int U22SubBlockColumns = gpu_matrix.block_size;
						int globalSubBlockFirstElementIndex = gpu_matrix.gpu_n*gpu_matrix.block_size*startBlockNumber + i*gpu_matrix.block_size;
						if(globalSubBlockFirstElementIndex + gpu_matrix.block_size > gpu_matrix.order )
						{
							U22SubBlockColumns = gpu_matrix.order - globalSubBlockFirstElementIndex;
						}

						int U12BlockFirstElementIndex = IDF2(pivot_row, startBlockNumber*gpu_matrix.block_size, gpu_matrix.order);
						int U22BlockFirstElementIndex = IDF2(pivot_row + gpu_matrix.block_size, startBlockNumber*gpu_matrix.block_size, gpu_matrix.order);
						cublasDgemm(handle[i], CUBLAS_OP_N, CUBLAS_OP_N, rowsInA22, U22SubBlockColumns, gpu_matrix.block_size, &alpha_m1,
							&gpu_column_blocks[i][pivot_row+gpu_matrix.block_size], gpu_matrix.order,
							&gpu_matrix.gpu_matrix_blocks[i][U12BlockFirstElementIndex], gpu_matrix.order,  &alpha_1,
							&gpu_matrix.gpu_matrix_blocks[i][U22BlockFirstElementIndex], gpu_matrix.order);
						startBlockNumber++;
					}
				} 
			} 
		}
		pivot_row+= gpu_matrix.block_size;
	}

	

	for(int i = 0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		checkCudaErrors(cudaFree(gpu_column_blocks[i]));
		checkCudaErrors(cudaStreamDestroy(stream[i]));
	}

	free(gpu_column_blocks);
	free(stream);
	return DEPT150_SUCCESS;
}

dept150_error_t dept150_mgpu_LGESADB(int order, double* matrix, double* b, int *ipiv, double *E, double *El, double *cond, double ea, double eb)
{
	gpu_based_matrix_t gpu_matrix;
	cublasHandle_t* handle;
	double matrixNorm;
	cublasStatus_t stat;
	double *hostMatrixCopy;//used to calculate E1
	
	//cudaStream_t* stream = (cudaStream_t *)malloc(gpu_matrix.gpu_n * sizeof(cudaStream_t));

	checkCudaErrors(cudaGetDeviceCount(&(gpu_matrix.gpu_n)));
	gpu_matrix.order = order;
	if(order <= 1000)
	{
		gpu_matrix.block_size = min(32, order);
	}
	else
	{
		gpu_matrix.block_size = 512;
	}

	handle = (cublasHandle_t *)malloc(gpu_matrix.gpu_n * sizeof(cublasHandle_t));
	if(initCublasHandles(handle, gpu_matrix.gpu_n))
	{
		printf("CUBLAS HANDLE INITIALIZATION FAILED\n");
		return DEPT150_CUBLAS_ERROR;
	}

	/*if(initCudaStreams(stream, gpu_matrix.gpu_n))
	{
		printf("CUDA STREAM INITIALIZATION FAILED\n");
		return DEPT150_CUBLAS_ERROR;
	}*/


	if(copyMatrixToDevices(matrix , &gpu_matrix,handle ))
	{
		printf("INITIALIZATION OF DEVICE MATRIX FAILED");
		return DEPT150_CUBLAS_ERROR;
	}

	//find matrix norm
	stat = getMatrixNorm(gpu_matrix, handle, &matrixNorm);
	if (stat != CUBLAS_STATUS_SUCCESS)
	{
			printf("CUBLAS DOWNLOAD MATRIX FAILED");
			return DEPT150_CUBLAS_ERROR;
	}
	checkCudaErrors( cudaDeviceSynchronize());
	printf("matrix norm %e\n", matrixNorm);

	hostMatrixCopy = (double *)malloc(order*order*sizeof(double));
	memcpy(hostMatrixCopy, matrix, order*order*sizeof(double));

	dept150_error_t dept150Error =  gaus_factorize_gpu_based_matrix(gpu_matrix, handle/*, stream*/, ipiv);
	if(dept150Error != DEPT150_SUCCESS)
	{
		return dept150Error;
	}

	copyMatrixFromDevices(matrix, gpu_matrix);

	*cond = 0;
	findMatrixCond(matrix, gpu_matrix, handle, matrixNorm, cond, false, true);
	printf("matrix cond %e\n", *cond);

	if (*cond +1.0 == *cond)
	{
		return DEPT150_MACHINE_SINGULAR;
	}

	//solve system
	//solveLTempEqualsB(matrix, b, order, ipiv, true);
	cudaStream_t* stream = (cudaStream_t *)malloc(gpu_matrix.gpu_n * sizeof(cudaStream_t));
	for(int i = 0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		checkCudaErrors(cudaStreamCreate(&stream[i]));
	}
	solveLowerTriangularSystem(gpu_matrix, handle, stream, b, ipiv, true);
	
	//solveUXEqualsTemp(matrix, b, order, false);
	solveUpperTriangularSystem(gpu_matrix, handle, stream, b, false);
	
	if (eb==0.) eb = MACH_EPS;
	if (ea==0.) ea = MACH_EPS;

	if(*cond * ea > 1.0)
	{
		return DEPT150_POSSIBLE_SINGULAR;
	}
	*E = *cond*(ea+eb)/(1.0-eb);



	/*for(int i = 0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		checkCudaErrors(cudaStreamDestroy(stream[i]));
	}
	free(stream);*/

	return DEPT150_SUCCESS;	
}

cublasStatus_t swap_GPU_matrix_rows(int first_row, int second_row,gpu_based_matrix_t gpu_matrix, int* num_gpu_blocks , cublasHandle_t* handle)
{
	cublasStatus_t stat;
	for (int i =0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		stat = cublasDswap(handle[i], num_gpu_blocks[i]*gpu_matrix.block_size, 
			&(gpu_matrix.gpu_matrix_blocks[i][first_row]), gpu_matrix.order, 
			&(gpu_matrix.gpu_matrix_blocks[i][second_row]), gpu_matrix.order);

		if(stat != CUBLAS_STATUS_SUCCESS)
		{
			printf("CUBLAS SWAP FAILED");
			return stat;
		}
	}

	for(int i =0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		checkCudaErrors(cudaDeviceSynchronize());
	}

	return CUBLAS_STATUS_SUCCESS;
}
