#include <iostream>
#include "dept150_common.h"


using namespace std;

int gpuDeviceInit(int devID)
{
	int deviceCount;
	checkCudaErrors(cudaGetDeviceCount(&deviceCount));
	if (deviceCount == 0) {
		fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
		exit(-1);
	}
	if (devID < 0) 
		devID = 0;
	if (devID > deviceCount-1) {
		fprintf(stderr, "\n");
		fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
		fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
		fprintf(stderr, "\n");
		return -devID;
	}

	cudaDeviceProp deviceProp;
	checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
	if (deviceProp.major < 1) {
		fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
		exit(-1);                                                  \
	}

	checkCudaErrors( cudaSetDevice(devID) );
	printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name);
	return devID;
}

void print_matrix(int rows, int cols, double * matrix, int ld)
{
	for(int i = 0; i < rows; i++){
		for(int j = 0; j < cols; j++)
		{
			cout<<matrix[IDF2(i,j,ld)] <<"  ";
		}
		cout<<endl;
	}
}

void printGPUMatrix(gpu_based_matrix_t gpu_matrix)
{
	double *matrix = (double*)malloc(gpu_matrix.order*gpu_matrix.order*sizeof(double));
	copyMatrixFromDevices(matrix, gpu_matrix);

	print_matrix(gpu_matrix.order, gpu_matrix.order, matrix, gpu_matrix.order);
}

int copyMatrixFromDevices(double *matrix, gpu_based_matrix_t gpu_matrix )
{
	int device = 0;
	checkCudaErrors(cudaGetDevice(&device));
	//number of blocks in matrix
	int num_blocks;
	if(gpu_matrix.order%gpu_matrix.block_size)
	{
		num_blocks = gpu_matrix.order/gpu_matrix.block_size +1;
	} else
	{
		num_blocks = gpu_matrix.order/gpu_matrix.block_size;
	}

	//minimum number of blocks which would be present on each GPU
	int full_blocks_num = num_blocks/gpu_matrix.gpu_n;

	cublasStatus_t stat;
	for(int i = 0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		int curr_block_num = full_blocks_num;
		if(full_blocks_num*gpu_matrix.gpu_n + i < num_blocks)
		{
			curr_block_num++;
		}

		for(int j =0; j < curr_block_num; j++)
		{
			int first_column_number = j*gpu_matrix.gpu_n*gpu_matrix.block_size + i*gpu_matrix.block_size;
			int band_dim = gpu_matrix.block_size;
			if(first_column_number + band_dim > gpu_matrix.order)
			{
				band_dim = gpu_matrix.order - first_column_number;
			}

			stat = cublasGetMatrix(gpu_matrix.order, band_dim, sizeof(double), &(gpu_matrix.gpu_matrix_blocks[i][j*gpu_matrix.order*gpu_matrix.block_size]), 
				gpu_matrix.order, &matrix[IDF2(0, j*gpu_matrix.gpu_n*gpu_matrix.block_size + i*gpu_matrix.block_size, gpu_matrix.order)], gpu_matrix.order);
			if(stat != CUBLAS_STATUS_SUCCESS)
			{
				printf("CAN NOT COPY BLOCK MATRIX, cublas status -  %d\n", stat);
				return EXIT_FAILURE;
			}
		}
	}

	checkCudaErrors(cudaSetDevice(device));

	return EXIT_SUCCESS;
}

int copyMatrixToDevices(double *matrix, gpu_based_matrix_t *gpu_matrix, cublasHandle_t* cublasHandles )
{
	int device = 0;
	checkCudaErrors(cudaGetDevice(&device));
	cublasStatus_t stat;

	gpu_matrix->gpu_matrix_blocks = (double**)malloc(gpu_matrix->gpu_n * sizeof(matrix));

	//number of blocks in matrix
	int num_blocks;
	if(gpu_matrix->order%gpu_matrix->block_size)
	{
		num_blocks = gpu_matrix->order/gpu_matrix->block_size +1;
	} else
	{
		num_blocks = gpu_matrix->order/gpu_matrix->block_size;
	}

	//minimum number of blocks which would be present on each GPU
	int full_blocks_num = num_blocks/gpu_matrix->gpu_n;


	for(int i = 0; i < gpu_matrix->gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		//number of blocks that should be present on current GPU
		int blocks_on_cur_gpu = full_blocks_num;
		if(full_blocks_num*gpu_matrix->gpu_n + i < num_blocks)
		{
			blocks_on_cur_gpu++;
		}

		checkCudaErrors(cudaMalloc(&(gpu_matrix->gpu_matrix_blocks[i]), gpu_matrix->order* gpu_matrix->block_size * blocks_on_cur_gpu * sizeof(double)));
		

		for(int j =0; j < blocks_on_cur_gpu; j++)
		{
			int first_column_number = j*gpu_matrix->gpu_n*gpu_matrix->block_size + i*gpu_matrix->block_size;
			int band_dim = gpu_matrix->block_size;
			if(first_column_number + band_dim > gpu_matrix->order)
			{
				band_dim = gpu_matrix->order - first_column_number;
			}

			stat = cublasSetMatrix(gpu_matrix->order, band_dim, sizeof(double),
				&matrix[IDF2(0, j*gpu_matrix->gpu_n*gpu_matrix->block_size + i*gpu_matrix->block_size, gpu_matrix->order)], gpu_matrix->order,
				&(gpu_matrix->gpu_matrix_blocks[i][j*gpu_matrix->order*gpu_matrix->block_size]), gpu_matrix->order);
			if(stat != CUBLAS_STATUS_SUCCESS)
			{
				printf("%i %i\n", IDF2(0, j*gpu_matrix->gpu_n*gpu_matrix->block_size + i*gpu_matrix->block_size, gpu_matrix->order), j*gpu_matrix->order*gpu_matrix->block_size);
				printf("CAN NOT COPY BLOCK MATRIX, gpu %i and block %i, status %i ", i, j, stat);
				return EXIT_FAILURE;
			}
		}
	}
	checkCudaErrors(cudaSetDevice(device));
	return EXIT_SUCCESS;
}

double* getA15(int order)
{
	//cond 4*n*n
	double *matrix = (double*)malloc(order*order*sizeof(double));
	for(int i = 0; i < order; ++i)
	{    
		for(int j = 0; j < order; ++j)
        {       
			matrix[IDF2(i,j,order)] = i>j? (1 + i):(1 + j);
        }
	}

	return matrix;
}

double* getA16(int order)
{
	//cond 2*n*(n+1)
	double *matrix = (double*)malloc(order*order*sizeof(double));
	for(int i = 0; i < order; ++i)
	{    
		for(int j = 0; j < order; ++j)
        {       
			matrix[IDF2(i,j,order)] = i>j? (order- i):(order - j);
        }
	}

	return matrix;
}


int getGPUCoordinate(gpu_based_matrix_t gpu_matrix, int row, int col)
{
	int globalBlockNumber = col / gpu_matrix.block_size;
	int storedGPUBlockNumber = globalBlockNumber/gpu_matrix.gpu_n;
	int indexInBlock = col%gpu_matrix.block_size;

	int storedGPUCol = storedGPUBlockNumber*gpu_matrix.block_size + indexInBlock;

	return IDF2(row, storedGPUCol, gpu_matrix.order );
}

cublasStatus_t getMatrixNorm( gpu_based_matrix_t gpu_matrix, cublasHandle_t* cublasHandles, double *norm ) 
{
	int device = 0;
	checkCudaErrors(cudaGetDevice(&device));

	double tmp = 0;
	*norm = 0.;

	cublasStatus_t stat;
	int cur_gpu = -1;
	for(int j = 0; j < gpu_matrix.order; j++)
	{
		if( (j % gpu_matrix.block_size) == 0 )
		{
			cur_gpu = (cur_gpu +1)%gpu_matrix.gpu_n;
			checkCudaErrors(cudaSetDevice(cur_gpu));
		}
		int gpuCoordinate = getGPUCoordinate(gpu_matrix, 0, j);
		stat = cublasDasum( cublasHandles[cur_gpu], gpu_matrix.order, &(gpu_matrix.gpu_matrix_blocks[cur_gpu][gpuCoordinate]), 1, &tmp); 
		if(stat != CUBLAS_STATUS_SUCCESS)
		{
			printf("CUBLAS INITIALIZATION FAILED");
			return stat;
		}

		if ( tmp > (*norm))
		{
			*norm = tmp;
		}
	}

	checkCudaErrors(cudaSetDevice(device));
	return CUBLAS_STATUS_SUCCESS;
}

void findMaximizedFirstSolution(double *host_matrix, double *solution,  int order, bool isSymmetric)
{
	double ek;
	int coordinate;
	for(int i=0; i < order ;i++)
	{
		double temp=0;
		for(int j=0; j<i;j++)
		{
			if(isSymmetric)
			{
				coordinate = IDF2(i, j, order);
			}
			else
			{
				coordinate = IDF2(j, i, order);
			}
			temp+=solution[j] * host_matrix[coordinate];
		}
		ek = 1.0;
		if(temp <0) ek = -1.0;
		
		//what is this for????
		//if(a[ID(i,i,n)] == 0.0f) return 1e+9;
		solution[i] = -(ek+temp)/host_matrix[IDF2(i,i,order)];
	}
}

//solve (L^T) * y = maximized vector
void solveLTransYEqualsMaximized(double *host_matrix, double *maximized,  int order, bool isUnitDiagonal)
{
	int coordinate;
	for(int i = order - 1; i>=0;i--)
	{
		double temp=0;
		for(int j=i+1; j<order; j++)
		{
			temp+=maximized[j]*host_matrix[IDF2(j,i,order)];
		}
		if(isUnitDiagonal)
		{
			maximized[i] = maximized[i] - temp;
		}
		else
		{
			maximized[i] = (maximized[i] - temp)/host_matrix[IDF2(i,i,order)];
		}
	}
}

void solveLTempEqualsY(double *host_matrix, double *solution,  int order, bool isUnitDiagonal)
{
	for(int i=0; i < order ;i++)
	{
		double temp=0;
		for(int j=0; j<i;j++)
		{
			temp+=solution[j] * host_matrix[IDF2(i, j, order)];
		}

		if(isUnitDiagonal)
		{
			solution[i] = (solution[i] - temp);
		}
		else
		{
			solution[i] = (solution[i] - temp)/host_matrix[IDF2(i,i,order)];
		}
	}
}

void solveUZEqualsTemp(double *host_matrix, double *solution,  int order, bool isSymmetric)
{
	int coordinate;
	for(int i = order - 1; i>=0;i--)
	{
		double temp=0;
		for(int j=i+1; j<order; j++)
		{
			if(isSymmetric)
			{
				coordinate = IDF2(j, i, order);
			}
			else
			{
				coordinate = IDF2(i, j, order);
			}

			temp+=solution[j]*host_matrix[coordinate];
		}
		solution[i] = (solution[i] - temp)/host_matrix[IDF2(i,i,order)];
	}
}


//TODO try to use gpu device for triangular systems solving
cublasStatus_t findMatrixCond(double *host_matrix, gpu_based_matrix_t gpu_matrix, cublasHandle_t* cublasHandles, double matrixNorm, double * cond, bool isSymmetric, bool isUnitDiagonal)
{
	double *work;//used to solve L*temp = e with maximazing of temp
	double yNorm;
	double zNorm;	

	work = (double *)malloc(gpu_matrix.order*sizeof(double));
	
	findMaximizedFirstSolution(host_matrix, work, gpu_matrix.order, isSymmetric);

	solveLTransYEqualsMaximized(host_matrix, work, gpu_matrix.order, isUnitDiagonal);
	//now (A^T) y = e is solved

	//calculate y norm
	yNorm = 0;
	for(int i=0; i<gpu_matrix.order;i++)
	{
		yNorm+= abs(work[i]);
	}

	//solve A*z=y
	solveLTempEqualsY(host_matrix, work, gpu_matrix.order, isUnitDiagonal);
	solveUZEqualsTemp(host_matrix, work, gpu_matrix.order, isSymmetric);

	//calculate z norm
	zNorm = 0;
	for(int i=0; i<gpu_matrix.order;i++)
	{
		zNorm+= abs(work[i]);
	}

	*cond = matrixNorm*zNorm/yNorm;
}

int initCublasHandles(cublasHandle_t* handle, int gpu_n)
{
	cublasStatus_t stat;
	for(int i = 0; i < gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		stat = cublasCreate(&handle[i]);
		if(stat != CUBLAS_STATUS_SUCCESS)
		{
			printf("CUBLAS INITIALIZATION FAILED");
			return EXIT_FAILURE;
		}
	}

	return EXIT_SUCCESS;
}

int destroyCublasHandles(cublasHandle_t* handle, int gpu_n)
{
	cublasStatus_t stat;
	for(int i = 0; i < gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		stat = cublasDestroy(handle[i]);
		if(stat != CUBLAS_STATUS_SUCCESS)
		{
			printf("CUBLAS DESTROY FAILED");
			return EXIT_FAILURE;
		}
	}

	return EXIT_SUCCESS;
}

//int initCudaStreams(cudaStream_t* stream, int gpu_n)
//{
//	cublasStatus_t stat;
//	for(int i = 0; i < gpu_n; i++)
//	{
//		checkCudaErrors(cudaSetDevice(i));
//		checkCudaErrors(cudaStreamCreate(&stream[i]));
//		checkCudaErrors(cudaDeviceSynchronize());
//	}
//
//	return EXIT_SUCCESS;
//}

void solveLTempEqualsB(double *host_matrix, double *solution,  int order, int *ipiv, bool isUnitDiagonal)
{
	double * b_copy = (double *)malloc(order * sizeof(double));
	for(int i=0; i < order ;i++)
	{
		double temp=0;
		for(int j=0; j<i;j++)
		{
			temp+=solution[ipiv[j]] * host_matrix[IDF2(i, j, order)];
		}

		if(isUnitDiagonal)
		{
			solution[ipiv[i]] = (solution[ipiv[i]] - temp);
			b_copy[i]  = solution[ipiv[i]]; 
		}
		else
		{
			solution[ipiv[i]] = (solution[ipiv[i]] - temp)/host_matrix[IDF2(i,i,order)];
		}
	}

	for(int i =0; i < order ; i++)
	{
		solution[i] = b_copy[i];
 	}

	free(b_copy);
}

void solveLTempEqualsB(double *host_matrix, double *solution,  int order, bool isUnitDiagonal)
{
	for(int i=0; i < order ;i++)
	{
		double temp=0;
		for(int j=0; j<i;j++)
		{
			temp+=solution[j] * host_matrix[IDF2(i, j, order)];
		}

		if(isUnitDiagonal)
		{
			solution[i] = (solution[i] - temp);
		}
		else
		{
			solution[i] = (solution[i] - temp)/host_matrix[IDF2(i,i,order)];
		}
	}
}

void solveUXEqualsTemp(double *host_matrix, double *solution,  int order, bool isSymmetric)
{
	int coordinate;
	for(int i = order - 1; i>=0;i--)
	{
		double temp=0;
		for(int j=i+1; j<order; j++)
		{
			if(isSymmetric)
			{
				coordinate = IDF2(j, i, order);
			}
			else
			{
				coordinate = IDF2(i, j, order);
			}

			temp+=solution[j]*host_matrix[coordinate];
		}
		solution[i] = (solution[i] - temp)/host_matrix[IDF2(i,i,order)];
	}
}

//used for finding part of solution of length block_size. Assume that first elements are minused from solution
void solvePartOfLowerTriangular(double * hostDiagonalBlocks, double *solution, int start, int blockSize, int order,  bool isUnitDiagonal)
{
	for(int i=start; (i < order) &&(i < start + blockSize) ;i++)
	{
		double temp=0;
		for(int j=start; j<i;j++)
		{
			temp+=solution[j] * hostDiagonalBlocks[IDF2(i, j - start, order)];
		}

		if(isUnitDiagonal)
		{
			solution[i] = (solution[i] - temp);
		}
		else
		{
			solution[i] = (solution[i] - temp)/hostDiagonalBlocks[IDF2(i,0,order)];
		}
	}
}

cublasStatus_t solveLowerTriangularSystem(gpu_based_matrix_t gpu_matrix, cublasHandle_t *handle, cudaStream_t* stream, double *solution, int *ipiv, bool isUnitDiagonal)
{
	int device = 0;
	checkCudaErrors(cudaGetDevice(&device));

	double * solCopy = (double * )malloc(gpu_matrix.order * sizeof(double));
	for(int i = 0; i < gpu_matrix.order; i++)
	{
		solCopy[i] = solution[ipiv[i]];
	}

	for(int i = 0; i < gpu_matrix.order; i++)
	{
		solution[i] = solCopy[i];
	}

	cublasStatus_t stat;

	double * hostDiagonalBlocks = (double *)malloc(gpu_matrix.block_size * gpu_matrix.order * sizeof(double));
	double ** deviceSolutionVectors = (double **) malloc(gpu_matrix.gpu_n * sizeof(double *));
	//get diagonal blocks to host
	for(int i =0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));

		cudaMalloc( &(deviceSolutionVectors[i]), gpu_matrix.order * sizeof(double) );
		cublasSetVectorAsync(gpu_matrix.order, sizeof(double), solution, 1, deviceSolutionVectors[i], 1, stream[i] );

		int gpuBlockIndex = 0;
		for(int startBlockIndex = i*gpu_matrix.block_size; startBlockIndex < gpu_matrix.order; startBlockIndex+= gpu_matrix.gpu_n*gpu_matrix.block_size)
		{
			int curBlockSize = gpu_matrix.block_size;
			if(startBlockIndex + gpu_matrix.block_size > gpu_matrix.order)
			{
				curBlockSize = gpu_matrix.order - startBlockIndex;
			}
			double *deviceA =  &(gpu_matrix.gpu_matrix_blocks[i][IDF2(startBlockIndex, gpuBlockIndex, gpu_matrix.order)]);
			cublasGetMatrixAsync(curBlockSize, curBlockSize, sizeof(double), deviceA, gpu_matrix.order, 
				&hostDiagonalBlocks[IDF2(startBlockIndex, 0, gpu_matrix.order)], gpu_matrix.order, stream[i]);

			gpuBlockIndex+=gpu_matrix.block_size;
		}
	}

	for(int i = 0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		checkCudaErrors(cudaStreamSynchronize(stream[i]));
	}

	int curDevice = -1;
	double alpha_1 = 1.;
	double alpha_m1 = -1.;
	for(int currentBlockIndex = 0; currentBlockIndex < gpu_matrix.order; currentBlockIndex+= gpu_matrix.block_size)
	{
		curDevice = (curDevice+1)%gpu_matrix.gpu_n;
		checkCudaErrors(cudaSetDevice(curDevice));

		solvePartOfLowerTriangular(hostDiagonalBlocks, solution, currentBlockIndex, gpu_matrix.block_size, gpu_matrix.order, isUnitDiagonal);
		if(gpu_matrix.block_size + currentBlockIndex < gpu_matrix.order)
		{
			cublasSetVector(gpu_matrix.order - currentBlockIndex , sizeof(double), &solution[currentBlockIndex], 1, &deviceSolutionVectors[curDevice][currentBlockIndex], 1);

			int deviceSubTileStartCoordinate = getGPUCoordinate(gpu_matrix, currentBlockIndex + gpu_matrix.block_size, currentBlockIndex); 
			stat = cublasDgemv(handle[curDevice], CUBLAS_OP_N, gpu_matrix.order - currentBlockIndex - gpu_matrix.block_size, gpu_matrix.block_size, &alpha_m1 , 
				&gpu_matrix.gpu_matrix_blocks[curDevice][deviceSubTileStartCoordinate], gpu_matrix.order,
				&deviceSolutionVectors[curDevice][currentBlockIndex], 1, &alpha_1,
				&deviceSolutionVectors[curDevice][currentBlockIndex + gpu_matrix.block_size], 1);
			
			if(stat != CUBLAS_STATUS_SUCCESS)
			{
				printf("CUBLAS Dgemv FAILED\n");
				return stat;
			}
			
			checkCudaErrors(cudaDeviceSynchronize());

			cublasGetVector(gpu_matrix.order - currentBlockIndex - gpu_matrix.block_size, sizeof(double), 
				&deviceSolutionVectors[curDevice][currentBlockIndex + gpu_matrix.block_size], 1,
				&solution[currentBlockIndex + gpu_matrix.block_size], 1);
		}

	}

	
	for(int i = 0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		checkCudaErrors(cudaFree(deviceSolutionVectors[i]));
	}
	

	checkCudaErrors(cudaSetDevice(device));

	free(hostDiagonalBlocks);
	free(deviceSolutionVectors);
	free(solCopy);
	return CUBLAS_STATUS_SUCCESS;
}

//used for finding part of solution of length block_size. Assume that first elements are minused from solution
void solvePartOfUpperTriangular(double * hostDiagonalBlocks, double *solution, int start, int blockSize, int order,  bool isSymmetric)
{
	int coordinate;
	for (int i = min(order , start + blockSize) -1; i>=start; --i)
	{
		double temp=0;
		for(int j = i+1; j < min(order, start + blockSize); j++)
		{
			if(isSymmetric)
			{
				coordinate = IDF2(j -start, i, order);
			}
			else
			{
				coordinate = IDF2(i, j -start, order);
			}

			solution[i] -= hostDiagonalBlocks[coordinate];
		}

		solution[i] /= hostDiagonalBlocks[IDF2(i,0,order)];
	}
}

int getDeviceByCoordinate(gpu_based_matrix_t gpu_matrix, int row, int col)
{
	return (col/gpu_matrix.block_size)%gpu_matrix.gpu_n;
}

cublasStatus_t solveUpperTriangularSystem(gpu_based_matrix_t gpu_matrix, cublasHandle_t *handle, cudaStream_t* stream, double *solution, bool isSymmetric)
{
	int device = 0;
	checkCudaErrors(cudaGetDevice(&device));

	cublasStatus_t stat;

	double * hostDiagonalBlocks = (double *)malloc(gpu_matrix.block_size * gpu_matrix.order * sizeof(double));
	double ** deviceSolutionVectors = (double **) malloc(gpu_matrix.gpu_n * sizeof(double *));
	//int * gpuMatrixBlocksRowLength = (double *)malloc( gpu_matrix.gpu_n * sizeof(int));//row length of blocks in i-th gpu

	//get diagonal blocks to host
	for(int i =0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));

		cudaMalloc( &(deviceSolutionVectors[i]), gpu_matrix.order * sizeof(double) );
		cublasSetVectorAsync(gpu_matrix.order, sizeof(double), solution, 1, deviceSolutionVectors[i], 1, stream[i] );

		//gpuMatrixBlocksRowLength[i] = 0;
		int gpuBlockIndex = 0;
		for(int startBlockIndex = i*gpu_matrix.block_size; startBlockIndex < gpu_matrix.order; startBlockIndex+= gpu_matrix.gpu_n*gpu_matrix.block_size)
		{
			int curBlockSize = gpu_matrix.block_size;
			if(startBlockIndex + gpu_matrix.block_size > gpu_matrix.order)
			{
				curBlockSize = gpu_matrix.order - startBlockIndex;
			}
			//gpuMatrixBlocksRowLength[i] += curBlockSize;

			double *deviceA =  &(gpu_matrix.gpu_matrix_blocks[i][IDF2(startBlockIndex, gpuBlockIndex, gpu_matrix.order)]);
			cublasGetMatrixAsync(curBlockSize, curBlockSize, sizeof(double), deviceA, gpu_matrix.order, 
				&hostDiagonalBlocks[IDF2(startBlockIndex, 0, gpu_matrix.order)], gpu_matrix.order, stream[i]);

			gpuBlockIndex+=gpu_matrix.block_size;
		}
	}

	for(int i = 0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		checkCudaErrors(cudaStreamSynchronize(stream[i]));
	}

	int curDevice = -1;
	double alpha_1 = 1.;
	double alpha_m1 = -1.;

	int startIndex = gpu_matrix.order - gpu_matrix.order % gpu_matrix.block_size;
	if(startIndex == gpu_matrix.order)
	{
		startIndex = startIndex - gpu_matrix.block_size;
	}

	cublasOperation_t operation = isSymmetric ? CUBLAS_OP_T: CUBLAS_OP_N;
	while (startIndex >= 0)
	{
		curDevice = getDeviceByCoordinate(gpu_matrix, startIndex, startIndex);
		checkCudaErrors(cudaSetDevice(curDevice));

		solvePartOfUpperTriangular(hostDiagonalBlocks, solution, startIndex, gpu_matrix.block_size, gpu_matrix.order, isSymmetric);
		if(startIndex - gpu_matrix.block_size >=0)
		{
			int rightIndex = min(gpu_matrix.order, startIndex+ gpu_matrix.block_size);
			cublasSetVector(rightIndex , sizeof(double), &solution[0], 1, 
				&deviceSolutionVectors[curDevice][0], 1);

			int colNum = gpu_matrix.block_size;
			if(colNum + startIndex > gpu_matrix.order)
			{
				colNum = gpu_matrix.order - startIndex;
			}
			int deviceSubTileStartCoordinate = 0;
			if(isSymmetric)
			{
				deviceSubTileStartCoordinate = getGPUCoordinate(gpu_matrix, startIndex, 0); 
			}
			else 
			{
				deviceSubTileStartCoordinate = getGPUCoordinate(gpu_matrix, 0, startIndex); 
			}
			stat = cublasDgemv(handle[curDevice], operation, startIndex, colNum, &alpha_m1,
				&gpu_matrix.gpu_matrix_blocks[curDevice][deviceSubTileStartCoordinate], gpu_matrix.order,
				&deviceSolutionVectors[curDevice][startIndex], 1, &alpha_1,
				&deviceSolutionVectors[curDevice][0], 1);

			if(stat != CUBLAS_STATUS_SUCCESS)
			{
				printf("CUBLAS Dgemv FAILED\n");
				return stat;
			}
			checkCudaErrors(cudaDeviceSynchronize());

			cublasGetVector(startIndex, sizeof(double), &deviceSolutionVectors[curDevice][0], 1, &solution[0], 1);
		}

		startIndex-= gpu_matrix.block_size;
	}

	for(int i = 0; i < gpu_matrix.gpu_n; i++)
	{
		checkCudaErrors(cudaSetDevice(i));
		checkCudaErrors(cudaFree(deviceSolutionVectors[i]));
	}

	checkCudaErrors(cudaSetDevice(device));

	free(hostDiagonalBlocks);
	free(deviceSolutionVectors);
	return CUBLAS_STATUS_SUCCESS;
}

