/*
 * CPU_Backend.cpp
 *
 *  Created on: 2010-06-13
 *      Author: Daniel
 */

#include "../inc/GPU_Backend.h"

#include <string.h>

#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>

#include <cublas.h>

#include <stdlib.h>
#include <string.h>

int _gpu_init() {

	cublasInit();
	
	return 0;
}

int _gpu_shutdown() {

	cublasShutdown();

	return 0;
}

__global__ void _gpu_plus_kernel(float* result, float* first, float* second, int N) {
	int idx = blockIdx.x * blockDim.x + threadIdx.x;
	if (idx<N) 
		result[idx] = first[idx] + second[idx];
}

__global__ void _gpu_minus_kernel(float* result, float* first, float* second, int N) {
	int idx = blockIdx.x * blockDim.x + threadIdx.x;
	if (idx<N) 
		result[idx] = first[idx] - second[idx];
}

__global__ void _gpu_times_scalar_kernel(float* result, float* first, float scalar, int N) {
	int idx = blockIdx.x * blockDim.x + threadIdx.x;
	if (idx<N) 
		result[idx] = first[idx] * scalar;
}

__global__ void _gpu_div_scalar_kernel(float* result, float* first, float scalar, int N) {
	int idx = blockIdx.x * blockDim.x + threadIdx.x;
	if (idx<N) 
		result[idx] = first[idx] / scalar;
}

int _gpu_vector_copy(const Vector& source, Vector& dest) {
	if (source.getSize() <= 0 || dest.getSize() <= 0)
		return -1;
	if (source.getSize() != dest.getSize())
		return -1;
		
	memcpy(dest.tab, source.tab, dest.getSize()*sizeof(float));
	
	return 0;
}

__global__ void _gpu_vector_literal_kernel(float *dest, float offset, float step, int N) {
	int idx = blockIdx.x * blockDim.x + threadIdx.x;
	if (idx<N) dest[idx] = step*idx + offset;
}

int _gpu_vector_literal(Vector& dest, float begin, float end, float step) {

	if (dest.getSize() <= 0)
		return -1;

	if (step == 0) {
		return -1;
	}

	if (step > 0 && end <= begin)
		return -1;

	if (step < 0 && begin <= end)
		return -1;

	if (dest.getSize() != 1 + (end - begin) / step)
		return -1;

	float *dest_d;
	cudaMalloc((void **) &dest_d, dest.getSize()*sizeof(float));
	
	int n_threads = 32;
	int n_blocks = dest.getSize()/n_threads + (dest.getSize()%n_threads == 0 ? 0:1);
	_gpu_vector_literal_kernel <<< n_blocks, n_threads >>> (dest_d, begin, step, dest.getSize());

	cudaMemcpy(dest.tab, dest_d, dest.getSize()*sizeof(float), cudaMemcpyDeviceToHost);
	
	cudaFree(dest_d);

	return 0;
}

int _gpu_vector_equal(const Vector& first, const Vector& second) {

	if (first.getSize() <= 0 || second.getSize() <= 0)
		return -1;
	if (first.getSize() != second.getSize())
		return -1;

	for (int i = 0; i < first.getSize(); i++) {
		if (first.get(i) != second.get(i))
			return 0;
	}

	return 1;
}

int _gpu_vector_nequal(const Vector& first, const Vector& second) {

	if (first.getSize() <= 0 || second.getSize() <= 0)
		return -1;
	if (first.getSize() != second.getSize())
		return -1;

	for (int i = 0; i < first.getSize(); i++) {
		if (first.get(i) != second.get(i))
			return 1;
	}

	return 0;
}

int _gpu_vector_plus_vector(Vector& result, const Vector& first, const Vector& second) {

	if (result.getSize() <= 0 || first.getSize() <= 0 || second.getSize() <= 0)
		return -1;
	if (result.getSize() != first.getSize() || result.getSize() != second.getSize())
		return -1;
	
	float *first_d, *second_d;
	cublasAlloc (first.getSize(), sizeof(float), (void **) &first_d);
	cublasAlloc (second.getSize(), sizeof(float), (void **) &second_d);
	
	cublasSetVector (first.getSize(), sizeof(float), (void *)first.tab, 1, (void *)first_d, 1);
	cublasSetVector (second.getSize(), sizeof(float), (void *)second.tab, 1, (void *)second_d, 1);
	
	cublasSaxpy (first.getSize(), 1, second_d, 1, first_d, 1);
	
	cublasGetVector (result.getSize(), sizeof(float), (void *)first_d, 1, (void *)result.tab,1);

	cublasFree(first_d);
	cublasFree(second_d);

	return 0;
}

int _gpu_vector_minus_vector(Vector& result, const Vector& first, const Vector& second) {

	if (result.getSize() <= 0 || first.getSize() <= 0 || second.getSize() <= 0)
		return -1;
	if (result.getSize() != first.getSize() || result.getSize() != second.getSize())
		return -1;
		
	float *first_d, *second_d;
	cublasAlloc (first.getSize(), sizeof(float), (void **) &first_d);
	cublasAlloc (second.getSize(), sizeof(float), (void **) &second_d);
	
	cublasSetVector (first.getSize(), sizeof(float), (void *)first.tab, 1, (void *)first_d, 1);
	cublasSetVector (second.getSize(), sizeof(float), (void *)second.tab, 1, (void *)second_d, 1);
	
	cublasSaxpy (first.getSize(), -1, second_d, 1, first_d, 1);
	
	cublasGetVector (result.getSize(), sizeof(float), (void *)first_d, 1, (void *)result.tab,1);

	cublasFree(first_d);
	cublasFree(second_d);
	
	return 0;
}

int _gpu_matrix_times_vector(Vector& result, const Matrix& matrix, const Vector& vector) {

	if (result.getSize() <= 0 || matrix.getRowsNumber() <= 0 || matrix.getColumnsNumber() <= 0 || vector.getSize()
			<= 0)
		return -1;
	if (matrix.getColumnsNumber() != vector.getSize() || matrix.getRowsNumber() != result.getSize())
		return -1;
		
	float *matrix_d;
	int matrix_dim1_d=matrix.getRowsNumber()+32-matrix.getRowsNumber()%32;
	int matrix_dim2_d=matrix.getColumnsNumber()+32-matrix.getColumnsNumber()%32;
	cublasAlloc (matrix_dim1_d*matrix_dim2_d, sizeof(float), (void**)&matrix_d);
	cudaMemset(matrix_d,0,matrix_dim1_d*matrix_dim2_d*4); 
	cublasSetMatrix (matrix.getRowsNumber(), matrix.getColumnsNumber(), sizeof(float), matrix.tab, matrix.getRowsNumber(), (void*)matrix_d, matrix_dim1_d);
	
	float *vector_d;
	int vector_size_d=vector.getSize()+32-vector.getSize()%32;
	cublasAlloc (vector_size_d, sizeof(float), (void**)&vector_d);
	cudaMemset(vector_d,0,vector_size_d*4); 
	cublasSetVector(vector.getSize(), sizeof(float), (void*)vector.tab, 1, (void*)vector_d, 1);
	
	float *result_d;
	int result_size_d=result.getSize()+32-result.getSize()%32;
	cublasAlloc (result_size_d, sizeof(float), (void**)&result_d);
	cudaMemset(result_d,0,result_size_d*4); 
	
	cublasSgemv ('n', matrix_dim1_d, matrix_dim2_d, 1, matrix_d, matrix_dim1_d, vector_d, 1, 0, result_d, 1);
	
	cublasGetVector(result.getSize(), sizeof(float), (void*)result_d, 1, (void*)result.tab, 1);
	
	cublasFree(matrix_d);
	cublasFree(vector_d);
	cublasFree(result_d);

	return 0;
}

int _gpu_vector_times_matrix(Vector& result, const Matrix& matrix, const Vector& vector) {

	if (result.getSize() <= 0 || matrix.getRowsNumber() <= 0 || matrix.getRowsNumber() <= 0 || vector.getSize()
			<= 0)
		return -1;
	if (matrix.getRowsNumber() != vector.getSize() || matrix.getColumnsNumber() != result.getSize())
		return -1;

	float *matrix_d;
	int matrix_dim1_d=matrix.getRowsNumber()+32-matrix.getRowsNumber()%32;
	int matrix_dim2_d=matrix.getColumnsNumber()+32-matrix.getColumnsNumber()%32;
	cublasAlloc (matrix_dim1_d*matrix_dim2_d, sizeof(float), (void**)&matrix_d);
	cudaMemset(matrix_d,0,matrix_dim1_d*matrix_dim2_d*4); 
	cublasSetMatrix (matrix.getRowsNumber(), matrix.getColumnsNumber(), sizeof(float), matrix.tab, matrix.getRowsNumber(), (void*)matrix_d, matrix_dim1_d);
	
	float *vector_d;
	int vector_size_d=vector.getSize()+32-vector.getSize()%32;
	cublasAlloc (vector_size_d, sizeof(float), (void**)&vector_d);
	cudaMemset(vector_d,0,vector_size_d*4); 
	cublasSetVector(vector.getSize(), sizeof(float), (void*)vector.tab, 1, (void*)vector_d, 1);
	
	float *result_d;
	int result_size_d=result.getSize()+32-result.getSize()%32;
	cublasAlloc (result_size_d, sizeof(float), (void**)&result_d);
	cudaMemset(result_d,0,result_size_d*4); 
	
	cublasSgemv ('t', matrix_dim1_d, matrix_dim2_d, 1, matrix_d, matrix_dim1_d, vector_d, 1, 0, result_d, 1);
	
	cublasGetVector(result.getSize(), sizeof(float), (void*)result_d, 1, (void*)result.tab, 1);
	
	cublasFree(matrix_d);
	cublasFree(vector_d);
	cublasFree(result_d);

	return 0;
}

int _gpu_matrix_copy(const Matrix& source, Matrix& dest) {

	if (source.getRowsNumber() <= 0 || source.getColumnsNumber() <= 0 || dest.getRowsNumber() <= 0 || dest.getColumnsNumber()
			<= 0)
		return -1;
	if (source.getRowsNumber() != dest.getRowsNumber() || source.getColumnsNumber() != dest.getColumnsNumber())
		return -1;

	memcpy(dest.tab, source.tab, dest.getRowsNumber()*dest.getColumnsNumber()*sizeof(float));
	
	return 0;
}

int _gpu_matrix_equal(const Matrix& first, const Matrix& second) {

	if (first.getRowsNumber() <= 0 || first.getColumnsNumber() <= 0 || second.getRowsNumber() <= 0 || second.getColumnsNumber()
			<= 0)
		return -1;
	if (first.getRowsNumber() != second.getRowsNumber() || first.getColumnsNumber() != second.getColumnsNumber())
		return -1;

	for (int i = 0; i < first.getRowsNumber(); i++) {
		for (int j = 0; j < first.getColumnsNumber(); j++) {
			if (first.get(i,j) != second.get(i,j))
				return 0;
		}
	}

	return 1;
}

int _gpu_matrix_nequal(const Matrix& first, const Matrix& second) {

	if (first.getRowsNumber() <= 0 || first.getColumnsNumber() <= 0 || second.getRowsNumber() <= 0 || second.getColumnsNumber()
			<= 0)
		return -1;
	if (first.getRowsNumber() != second.getRowsNumber() || first.getColumnsNumber() != second.getColumnsNumber())
		return -1;

	for (int i = 0; i < first.getRowsNumber(); i++) {
		for (int j = 0; j < first.getColumnsNumber(); j++) {
			if (first.get(i,j) != second.get(i,j))
				return 1;
		}
	}

	return 0;
}

int _gpu_matrix_plus_matrix(Matrix& result, const Matrix& first, const Matrix& second) {

	if (result.getRowsNumber() <= 0 || result.getColumnsNumber() <= 0 || first.getRowsNumber() <= 0 || first.getColumnsNumber()
			<= 0 || second.getRowsNumber() <= 0 || second.getColumnsNumber() <= 0)
		return -1;
	if (result.getRowsNumber() != first.getRowsNumber() || result.getColumnsNumber() != first.getColumnsNumber() || result.getRowsNumber()
			!= second.getRowsNumber() || result.getColumnsNumber() != second.getColumnsNumber())
		return -1;
		
	int N = result.getRowsNumber()*result.getColumnsNumber();

	float *first_d;
	cudaMalloc((void **) &first_d, N*sizeof(float));
	cudaMemcpy(first_d,first.tab, N*sizeof(float), cudaMemcpyHostToDevice);
	
	float *second_d;
	cudaMalloc((void **) &second_d, N*sizeof(float));
	cudaMemcpy(second_d,second.tab, N*sizeof(float), cudaMemcpyHostToDevice);
	
	float *result_d;
	cudaMalloc((void **) &result_d, N*sizeof(float));
	
	
	int n_threads = 32;
	int n_blocks = N/n_threads + (N%n_threads == 0 ? 0:1);
	_gpu_plus_kernel <<< n_blocks, n_threads >>> (result_d, first_d, second_d, N);

	cudaMemcpy(result.tab, result_d, N*sizeof(float), cudaMemcpyDeviceToHost);
	
	cudaFree(first_d);
	cudaFree(second_d);
	cudaFree(result_d);

	return 0;
}

int _gpu_matrix_minus_matrix(Matrix& result, const Matrix& first, const Matrix& second) {

	if (result.getRowsNumber() <= 0 || result.getColumnsNumber() <= 0 || first.getRowsNumber() <= 0 || first.getColumnsNumber()
			<= 0 || second.getRowsNumber() <= 0 || second.getColumnsNumber() <= 0)
		return -1;
	if (result.getRowsNumber() != first.getRowsNumber() || result.getColumnsNumber() != first.getColumnsNumber() || result.getRowsNumber()
			!= second.getRowsNumber() || result.getColumnsNumber() != second.getColumnsNumber())
		return -1;

	int N = result.getRowsNumber()*result.getColumnsNumber();

	float *first_d;
	cudaMalloc((void **) &first_d, N*sizeof(float));
	cudaMemcpy(first_d,first.tab, N*sizeof(float), cudaMemcpyHostToDevice);
	
	float *second_d;
	cudaMalloc((void **) &second_d, N*sizeof(float));
	cudaMemcpy(second_d,second.tab, N*sizeof(float), cudaMemcpyHostToDevice);
	
	float *result_d;
	cudaMalloc((void **) &result_d, N*sizeof(float));
	
	
	int n_threads = 32;
	int n_blocks = N/n_threads + (N%n_threads == 0 ? 0:1);
	_gpu_minus_kernel <<< n_blocks, n_threads >>> (result_d, first_d, second_d, N);

	cudaMemcpy(result.tab, result_d, N*sizeof(float), cudaMemcpyDeviceToHost);
	
	cudaFree(first_d);
	cudaFree(second_d);
	cudaFree(result_d);

	return 0;
}

int _gpu_matrix_times_matrix(Matrix& result, const Matrix& first, const Matrix& second) {
	if (result.getRowsNumber() <= 0 || result.getColumnsNumber() <= 0 || first.getRowsNumber() <= 0 || first.getColumnsNumber()
			<= 0 || second.getRowsNumber() <= 0 || second.getColumnsNumber() <= 0)
		return -1;

	if (result.getRowsNumber() != first.getRowsNumber() || result.getColumnsNumber() != second.getColumnsNumber() || first.getColumnsNumber()
			!= second.getRowsNumber())
		return -1;
	
	float *first_d;
	int first_dim1_d=first.getRowsNumber()+32-first.getRowsNumber()%32;
	int first_dim2_d=first.getColumnsNumber()+32-first.getColumnsNumber()%32;
	cublasAlloc (first_dim1_d*first_dim2_d, sizeof(float), (void**)&first_d);
	cudaMemset(first_d,0,first_dim1_d*first_dim2_d*4); 
	cublasSetMatrix (first.getRowsNumber(), first.getColumnsNumber(), sizeof(float), first.tab, first.getRowsNumber(), (void*)first_d, first_dim1_d);
	
	float *second_d;
	int second_dim1_d=second.getRowsNumber()+32-second.getRowsNumber()%32;
	int second_dim2_d=second.getColumnsNumber()+32-second.getColumnsNumber()%32;
	cublasAlloc (second_dim1_d*second_dim2_d, sizeof(float), (void**)&second_d);
	cudaMemset(second_d,0,second_dim1_d*second_dim2_d*4);
	cublasSetMatrix (second.getRowsNumber(), second.getColumnsNumber(), sizeof(float),second.tab, second.getRowsNumber(), (void*)second_d, second_dim1_d);
	
	float *result_d;
	int result_dim1_d=result.getRowsNumber()+32-result.getRowsNumber()%32;
	int result_dim2_d=result.getColumnsNumber()+32-result.getColumnsNumber()%32;
	cublasAlloc (result_dim1_d*result_dim2_d, sizeof(float), (void**)&result_d);
	
	cublasSgemm ('n', 'n', first_dim1_d, second_dim2_d, first_dim2_d, 1, first_d, first_dim1_d, second_d, second_dim1_d, 0, result_d, result_dim1_d);
	
	cublasGetMatrix (result.getRowsNumber(), result.getColumnsNumber(), sizeof(float), result_d, result_dim1_d, result.tab, result.getRowsNumber());

	cublasFree (first_d);
	cublasFree (second_d);
	cublasFree (result_d);

	return 0;
}

int _gpu_scalar_times_vector(Vector& result, const Vector& vector, float scalar) {

	if (result.getSize() <= 0 || vector.getSize() <= 0)
		return -1;
	if (result.getSize() != vector.getSize())
		return -1;

	int N = result.getSize();

	float *first_d;
	cudaMalloc((void **) &first_d, N*sizeof(float));
	cudaMemcpy(first_d,vector.tab, N*sizeof(float), cudaMemcpyHostToDevice);
	
	float *result_d;
	cudaMalloc((void **) &result_d, N*sizeof(float));
	
	int n_threads = 32;
	int n_blocks = N/n_threads + (N%n_threads == 0 ? 0:1);
	_gpu_times_scalar_kernel <<< n_blocks, n_threads >>> (result_d, first_d, scalar, N);

	cudaMemcpy(result.tab, result_d, N*sizeof(float), cudaMemcpyDeviceToHost);
	
	cudaFree(first_d);
	cudaFree(result_d);

	return 0;
}

int _gpu_scalar_times_matrix(Matrix& result, const Matrix& matrix, float scalar) {

	if (result.getRowsNumber() <= 0 || result.getColumnsNumber() <= 0 || matrix.getRowsNumber() <= 0 || matrix.getColumnsNumber()
			<= 0)
		return -1;
	if (result.getRowsNumber() != matrix.getRowsNumber() || result.getColumnsNumber() != matrix.getColumnsNumber())
		return -1;

	int N = result.getRowsNumber() * result.getColumnsNumber();

	float *first_d;
	cudaMalloc((void **) &first_d, N*sizeof(float));
	cudaMemcpy(first_d,matrix.tab, N*sizeof(float), cudaMemcpyHostToDevice);
	
	float *result_d;
	cudaMalloc((void **) &result_d, N*sizeof(float));
	
	int n_threads = 32;
	int n_blocks = N/n_threads + (N%n_threads == 0 ? 0:1);
	_gpu_times_scalar_kernel <<< n_blocks, n_threads >>> (result_d, first_d, scalar, N);

	cudaMemcpy(result.tab, result_d, N*sizeof(float), cudaMemcpyDeviceToHost);
	
	cudaFree(first_d);
	cudaFree(result_d);

	return 0;
}

int _gpu_vector_div_scalar(Vector& result, const Vector& vector, float scalar) {

	if (result.getSize() <= 0 || vector.getSize() <= 0)
		return -1;
	if (result.getSize() != vector.getSize())
		return -1;

	int N = result.getSize();

	float *first_d;
	cudaMalloc((void **) &first_d, N*sizeof(float));
	cudaMemcpy(first_d,vector.tab, N*sizeof(float), cudaMemcpyHostToDevice);
	
	float *result_d;
	cudaMalloc((void **) &result_d, N*sizeof(float));
	
	int n_threads = 32;
	int n_blocks = N/n_threads + (N%n_threads == 0 ? 0:1);
	_gpu_div_scalar_kernel <<< n_blocks, n_threads >>> (result_d, first_d, scalar, N);

	cudaMemcpy(result.tab, result_d, N*sizeof(float), cudaMemcpyDeviceToHost);
	
	cudaFree(first_d);
	cudaFree(result_d);

	return 0;
}

int _gpu_matrix_div_scalar(Matrix& result, const Matrix& matrix, float scalar) {

	if (result.getRowsNumber() <= 0 || result.getColumnsNumber() <= 0 || matrix.getRowsNumber() <= 0 || matrix.getColumnsNumber()
			<= 0)
		return -1;
	if (result.getRowsNumber() != matrix.getRowsNumber() || result.getColumnsNumber() != matrix.getColumnsNumber())
		return -1;

	int N = result.getRowsNumber() * result.getColumnsNumber();

	float *first_d;
	cudaMalloc((void **) &first_d, N*sizeof(float));
	cudaMemcpy(first_d,matrix.tab, N*sizeof(float), cudaMemcpyHostToDevice);
	
	float *result_d;
	cudaMalloc((void **) &result_d, N*sizeof(float));
	
	int n_threads = 32;
	int n_blocks = N/n_threads + (N%n_threads == 0 ? 0:1);
	_gpu_div_scalar_kernel <<< n_blocks, n_threads >>> (result_d, first_d, scalar, N);

	cudaMemcpy(result.tab, result_d, N*sizeof(float), cudaMemcpyDeviceToHost);
	
	cudaFree(first_d);
	cudaFree(result_d);

	return 0;
}
