
#pragma once

#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cusp/array1d.h>
#include <cusp/blas.h>
#include <cusp/multiply.h>
#include <cusp/monitor.h>
#include <cusp/linear_operator.h>

extern "C" {
	#include "cublas.h"
}


#define MIN(X,Y) ( (X) >= (Y) ? (Y) : (X) )
#define NBLOCKS	 MIN( (N + B-1)/B, 65535 )

#define B 128
#define SWARP 32
#define MAX_BLOCKS 65535


namespace blas = cusp::blas;

namespace cusp
{
namespace krylov
{

template <typename ValueType, unsigned int blockSize>
__device__ ValueType binary_reduction(ValueType value) {
	__shared__ ValueType shmem[B];
	
	unsigned int tid = threadIdx.x; 
	
	shmem[tid] = value;
	
	__syncthreads();
	
	if (blockSize >= 512) { if (tid < 256) { shmem[tid] += shmem[tid + 256]; } __syncthreads(); } 
	if (blockSize >= 256) { if (tid < 128) { shmem[tid] += shmem[tid + 128]; } __syncthreads(); } 
	if (blockSize >= 128) { if (tid <  64) { shmem[tid] += shmem[tid +  64]; } __syncthreads(); }
	
	if (tid < 32){ 
		if (blockSize >=  64) shmem[tid] += shmem[tid + 32]; 
		if (blockSize >=  32) shmem[tid] += shmem[tid + 16]; 
		if (blockSize >=  16) shmem[tid] += shmem[tid +  8]; 
		if (blockSize >=   8) shmem[tid] += shmem[tid +  4]; 
		if (blockSize >=   4) shmem[tid] += shmem[tid +  2]; 
		if (blockSize >=   2) shmem[tid] += shmem[tid +  1]; 
	}

	__syncthreads();
	
	return shmem[0];
}
	
////////////////
// Host Paths //
////////////////
template <typename LinearOperator,
		typename Vector1,
		typename Vector2>
void multiply(const LinearOperator&  A,
              const Vector1& x,
                    Vector2& y,
              cusp::host_memory,
              cusp::host_memory,
              cusp::host_memory)
{
    cusp::multiply(A, x, y);
}

//////////////////
// Device Paths //
//////////////////
template <typename LinearOperator,
		typename Vector1,
		typename Vector2>
void multiply(const LinearOperator& A,
              const Vector1& x,
                    Vector2& y,
              cusp::device_memory,
              cusp::device_memory,
              cusp::device_memory,
			  cusp::dense_format)
{
	const size_t N = A.num_rows;
	cublasDgemv('n', N, N, ((double)1.0), thrust::raw_pointer_cast(&A.values[0]), N, thrust::raw_pointer_cast(&x[0]), 1, ((double)0.0), thrust::raw_pointer_cast(&y[0]), 1);
}

template <typename LinearOperator,
		typename Vector1,
		typename Vector2>
void multiply(const LinearOperator&  A,
              const Vector1& x,
                    Vector2& y,
              cusp::device_memory,
              cusp::device_memory,
              cusp::device_memory,
			  cusp::sparse_format)
{
	cusp::multiply(A, x, y);
}

template <typename LinearOperator,
		typename Vector1,
		typename Vector2>
void multiply(const LinearOperator& A,
              const Vector1& x,
                    Vector2& y,
              cusp::device_memory,
              cusp::device_memory,
              cusp::device_memory)
{
	cusp::krylov::multiply(A, x, y,
						   typename LinearOperator::memory_space(),
      	   				   typename Vector1::memory_space(),
       	   				   typename Vector2::memory_space(),
						   typename LinearOperator::format());
}

template <typename LinearOperator,
		  typename Vector1,
		  typename Vector2>
void multiply(LinearOperator&  A,
              Vector1& x,
              Vector2& y,
              cusp::known_format)
{
  	cusp::krylov::multiply(A, x, y,
						   typename LinearOperator::memory_space(),
      	   				   typename Vector1::memory_space(),
       	   				   typename Vector2::memory_space());
}

template <typename LinearOperator,
		  typename Vector1,
		  typename Vector2>
void multiply(LinearOperator&  A,
              Vector1& x,
              Vector2& y,
              cusp::unknown_format)
{
	// user-defined LinearOperator
  	A(x,y);
}

template <typename LinearOperator,
		  typename Vector1,
		  typename Vector2>
void multiply(const LinearOperator&  A,
              const Vector1& x,
                    Vector2& y)
{
	cusp::krylov::multiply(A, x, y,
						   typename LinearOperator::format());
}

template <typename ValueType, unsigned int blockSize>
__global__ void binary_reduction(ValueType* result, ValueType* R, size_t index, size_t N) {

	ValueType sum = ValueType(0);
	
	unsigned int tid = threadIdx.x;
	unsigned int ind = blockIdx.x*(blockSize*2) + tid;
	unsigned int gridSize = blockSize*2*gridDim.x;
	while(ind + blockSize < N)
	{
		sum += result[ind] + result[ind + blockSize];
		ind += gridSize;
	}
	
	if(ind < N) {
		sum += result[ind];
	}
	
	__syncthreads();
	
	sum = binary_reduction<ValueType, blockSize>(sum);
	
	if(threadIdx.x == 0)
	{
		R[index] = sum;
	}
}

template <typename ValueType, unsigned int blockSize>
__global__ void dot_product(ValueType* V, size_t v_pitch, ValueType* w, size_t i, size_t k, size_t N, ValueType* result)
{	
	ValueType value_h = ValueType(0);
	
	unsigned int tid = threadIdx.x;
	unsigned int ind = blockIdx.x*(blockSize*2) + tid;
	unsigned int gridSize = blockSize*2*gridDim.x;
	while(ind + blockSize < N)
	{
		value_h += w[ind] * V[k * v_pitch + ind] + w[ind + blockSize] * V[k * v_pitch + ind + blockSize];
		ind += gridSize;
	}
	
	if(ind < N) {
		value_h +=  w[ind] * V[k * v_pitch + ind];
	}
	
	__syncthreads();
	
	result[blockIdx.x] = binary_reduction<ValueType, B>(value_h);
}

template <typename ValueType, unsigned int blockSize>
__global__ void axpy_operation(ValueType* V, size_t v_pitch, ValueType* w, ValueType* R, size_t index, size_t i, size_t k, size_t N)
{	
	ValueType h = R[index];
	
	unsigned int tid = threadIdx.x;
	unsigned int ind = blockIdx.x*blockDim.x + tid;
	unsigned int gridSize = gridDim.x*blockDim.x;
	
	while(ind < N)
	{
		w[ind] = w[ind] - h * V[k * v_pitch + ind];
		ind += gridSize;
	}
}

template <typename LinearOperator1,
		  typename LinearOperator2,
		  typename Vector>		
void compute_basis(LinearOperator1& H, LinearOperator2& V, Vector& w, size_t i, size_t m, size_t N,
				      cusp::device_memory, cusp::device_memory, cusp::device_memory)
{
	typedef typename LinearOperator1::value_type   ValueType;
	typedef typename LinearOperator1::memory_space MemorySpace;

	const int G = MIN( (N + B-1)/B, 65535 );
	
	cusp::array1d<ValueType, MemorySpace> result(G,ValueType(0));

	for(size_t k = 0; k < i+1; ++k) {
		dot_product<ValueType, B><<<(G+1)/2,B>>>(thrust::raw_pointer_cast(&V.values[0]), V.pitch, thrust::raw_pointer_cast(&w[0]), i, k, N, thrust::raw_pointer_cast(&result[0]));
		if (cudaSuccess != cudaThreadSynchronize()) {
			fprintf(stderr, " cudaThreadSynchronize failed: %s\n", cudaGetErrorString(cudaGetLastError()));
			exit(128);
		}
		
		binary_reduction<ValueType, B><<<1,B>>>(thrust::raw_pointer_cast(&result[0]), thrust::raw_pointer_cast(&H.values[0]), (i * H.pitch + k), G);
		if (cudaSuccess != cudaThreadSynchronize()) {
			fprintf(stderr, " cudaThreadSynchronize failed: %s\n", cudaGetErrorString(cudaGetLastError()));
			exit(128);
		}
		
		axpy_operation<ValueType, B><<<G,B>>>(thrust::raw_pointer_cast(&V.values[0]), V.pitch, thrust::raw_pointer_cast(&w[0]), thrust::raw_pointer_cast(&H.values[0]), (i * H.pitch + k), i, k, N);
		if (cudaSuccess != cudaThreadSynchronize()) {
			fprintf(stderr, " cudaThreadSynchronize failed: %s\n", cudaGetErrorString(cudaGetLastError()));
			exit(128);
		}
	}
}

template <typename LinearOperator1,
		  typename LinearOperator2,
		  typename Vector>
void compute_basis(LinearOperator1& H, LinearOperator2& V, Vector& w, size_t i, size_t m, size_t N,
				   cusp::host_memory, cusp::host_memory, cusp::host_memory)
{
	typedef typename LinearOperator1::value_type   ValueType;
	typedef typename LinearOperator1::memory_space MemorySpace;
	typedef typename cusp::array1d<ValueType, MemorySpace>::iterator Iterator;
	typedef cusp::array1d_view<Iterator> ArrayView;
	
	cusp::array1d<ValueType, MemorySpace> h_col(m+1,ValueType(0));
	
	for(size_t k = 0; k < i+1; ++k)
	{
		ArrayView columnVk(V.values.begin() + k * V.pitch, V.values.begin() + k * V.pitch + V.num_rows);
		
		// h_{k,i} = <w^H, v>
		ValueType tmp_h = blas::dot(w, columnVk);
		
		// w <- w - h_{k,i}*v
		blas::axpy(columnVk, w, -tmp_h);

		h_col[k] = tmp_h;
	}
	
	ArrayView columnH(H.values.begin() + i * H.pitch, H.values.begin() + i * H.pitch + H.num_rows);
	
	// columnH <- h_col
	blas::copy(h_col, columnH);
}

template <typename LinearOperator1,
		  typename LinearOperator2,
		  typename Vector>
void compute_basis(LinearOperator1& H, LinearOperator2& V, Vector& w, size_t i, size_t m, size_t N)
{
	compute_basis(H, V, w, i, m, N,
				  typename LinearOperator1::memory_space(),
				  typename LinearOperator2::memory_space(),
				  typename Vector::memory_space());
}

template <typename ValueType>
__global__ void apply_old_givens_rotations(ValueType* H, ValueType* sn, ValueType* cs, size_t i, size_t pitch)
{
	__shared__ ValueType shmem[6];
	
	for(size_t k = 0; k < i; ++k)
	{
		shmem[threadIdx.x] = H[i * pitch + k + threadIdx.x];
		//shmem[2] = cs[k+1];
		//shmem[3] = sn[k+1];
		shmem[2 + 2*threadIdx.x] = cs[k+1];
		shmem[3 + 2*threadIdx.x] = sn[k+1];
		int c = (1 - 2*threadIdx.x);
		H[i * pitch + k + threadIdx.x] = c*shmem[2 + threadIdx.x]*shmem[0] + shmem[3 - threadIdx.x]*shmem[1];
	}
}

template <typename LinearOperator,
		  typename Vector1,
		  typename Vector2>
void apply_old_givens_rotations(LinearOperator& H,
        Vector1& sn,
		Vector2& cs, size_t i,
		cusp::device_memory,
        cusp::device_memory,
        cusp::device_memory)
{
	apply_old_givens_rotations<<<1,2>>>(thrust::raw_pointer_cast(&H.values[0]), thrust::raw_pointer_cast(&sn[0]), thrust::raw_pointer_cast(&cs[0]), i, H.pitch);
	
	if (cudaSuccess != cudaThreadSynchronize()) {
		fprintf(stderr, " cudaThreadSynchronize failed: %s\n", cudaGetErrorString(cudaGetLastError()));
		exit(128);
	}
}

template <typename LinearOperator,
		  typename Vector1,
		  typename Vector2>
void apply_old_givens_rotations(LinearOperator& H,
        Vector1& sn,
		Vector2& cs, size_t i,
		cusp::host_memory,
        cusp::host_memory,
        cusp::host_memory)
{
	typedef typename LinearOperator::value_type ValueType;
	
	for(size_t k = 0; k < i; ++k)
	{	
		ValueType temp = cs[k+1]*H(k,i) + sn[k+1]*H(k+1,i);
		H(k+1,i) = -sn[k+1]*H(k,i) + cs[k+1]*H(k+1,i);
		H(k,i) = temp;
	}
}

template <typename LinearOperator,
		  typename Vector1,
		  typename Vector2>
void apply_old_givens_rotations(LinearOperator& H,
		Vector1& sn,
        Vector2& cs, size_t i)
{
	apply_old_givens_rotations(H, sn, cs, i, 
								typename LinearOperator::memory_space(),
		   						typename Vector1::memory_space(),
		   						typename Vector2::memory_space());
}


template <typename ValueType>
__global__ void compute_new_givens_rotation(ValueType* H, size_t pitch, ValueType* s, ValueType* sn, ValueType* cs, ValueType new_h, size_t i)
{
	__shared__ ValueType h[2];
	__shared__ ValueType tmp[2];
	
	h[threadIdx.x] = H[i * pitch + i + threadIdx.x];
	ValueType beta = hypot(h[0], h[1]);
	ValueType value = h[threadIdx.x] / beta;
	ValueType old_s = s[i];
	
	H[i * pitch + i + threadIdx.x] = (1 - threadIdx.x) * beta;
	int c = 1 - 2*threadIdx.x;
	tmp[threadIdx.x] = c*value;
	s[i + threadIdx.x] = tmp[threadIdx.x]*old_s;
	
	if(threadIdx.x == 0) {
		cs[i+1] = value;
	} 
	else {
		sn[i+1] = value;
	}
	
}

template <typename LinearOperator,
		  typename ValueType,
		  typename Vector1,
		  typename Vector2,
		  typename Vector3>
void compute_new_givens_rotation(LinearOperator& H,
		Vector1& s,
        Vector2& sn,
		Vector3& cs, ValueType new_h, size_t i,
		cusp::device_memory,
        cusp::device_memory,
        cusp::device_memory,
		cusp::device_memory)
{
	compute_new_givens_rotation<<<1,2>>>(thrust::raw_pointer_cast(&H.values[0]), H.pitch, thrust::raw_pointer_cast(&s[0]), thrust::raw_pointer_cast(&sn[0]), thrust::raw_pointer_cast(&cs[0]), new_h, i);
	
	if (cudaSuccess != cudaThreadSynchronize()) {
		fprintf(stderr, " cudaThreadSynchronize failed: %s\n", cudaGetErrorString(cudaGetLastError()));
		exit(128);
	}
}

template <typename LinearOperator,
		  typename ValueType,
		  typename Vector1,
		  typename Vector2,
		  typename Vector3>
void compute_new_givens_rotation(LinearOperator& H,
		Vector1& s,
        Vector2& sn,
		Vector3& cs, ValueType new_h, size_t i,
		cusp::host_memory,
        cusp::host_memory,
        cusp::host_memory,
		cusp::host_memory)
{
	H(i+1,i) = new_h;
	ValueType beta = hypot(H(i,i), H(i+1,i));
	
	sn[i+1] = H(i+1,i) / beta;
	cs[i+1] = H(i,i) / beta;
	H(i,i) = beta;
	
	s[i+1] = -sn[i+1]*s[i];
	s[i] = cs[i+1]*s[i];
	H(i+1,i) = ValueType(0);
}

template <typename LinearOperator,
		  typename ValueType,
		  typename Vector1,
		  typename Vector2,
		  typename Vector3>
void compute_new_givens_rotation(LinearOperator& H,
		Vector1& s,
        Vector2& sn,
		Vector3& cs, ValueType new_h, size_t i)
{
	compute_new_givens_rotation(H, s, sn, cs, new_h, i,
								typename LinearOperator::memory_space(),
						   		typename Vector1::memory_space(),
						   		typename Vector2::memory_space(),
								typename Vector3::memory_space());
}


template <typename ValueType>
__global__ void compute_minimization(ValueType* H, size_t pitch, ValueType* s, ValueType* y, size_t i, size_t j)
{
	ValueType sum;
	
	while(j > 0)
	{
		sum = ValueType(0);
		
		size_t k = j + threadIdx.x;
		while(k <= i)
		{
			sum += H[k * pitch + j - 1]*y[k];
			k += blockDim.x;
		}
		
		__syncthreads();
		
		sum = binary_reduction<ValueType, SWARP>(sum);
		
		if(threadIdx.x == 0)
		{
			y[j-1] = (1.0 / H[(j - 1) * pitch + j - 1])*(s[j-1] - sum);
		}
		
		--j;
	}
}

template <typename LinearOperator,
		typename Vector1,
		typename Vector2>
void compute_minimization(LinearOperator&  H,
              Vector1& s,
              Vector2& y, size_t i, size_t j,
              cusp::device_memory,
              cusp::device_memory,
              cusp::device_memory)
{
	compute_minimization<<<1,SWARP>>>(thrust::raw_pointer_cast(&H.values[0]), H.pitch, thrust::raw_pointer_cast(&s[0]), thrust::raw_pointer_cast(&y[0]), i, j);
	if (cudaSuccess != cudaThreadSynchronize()) {
		fprintf(stderr, " cudaThreadSynchronize failed: %s\n", cudaGetErrorString(cudaGetLastError()));
		exit(128);
	}
}

template <typename LinearOperator,
		typename Vector1,
		typename Vector2>
void compute_minimization(LinearOperator&  H,
              Vector1& s,
              Vector2& y, size_t i, size_t j,
              cusp::host_memory,
              cusp::host_memory,
              cusp::host_memory)
{
	typedef typename LinearOperator::value_type ValueType;
	
	while(j > 0)
	{
		ValueType sum = ValueType(0);
		
		size_t k = j;
		while(k <= i)
		{
			sum += H(j-1,k)*y[k];
			++k;
		}
		
		y[j-1] = (ValueType(1) / H(j-1,j-1))*(s[j-1] - sum);
		--j;
	}
}

template <typename LinearOperator,
		  typename Vector1,
		  typename Vector2>
void compute_minimization(LinearOperator& H,
		Vector1& s,
        Vector2& y, size_t i, size_t j)
{
	compute_minimization(H, s, y, i, j, 
				typename LinearOperator::memory_space(),
		   		typename Vector1::memory_space(),
		   		typename Vector2::memory_space());
}

template <class LinearOperator,
		  class Vector>
void update_approximation(LinearOperator& V, 
		Vector& x, 
		Vector& y, size_t N, size_t i)
{
	typedef typename LinearOperator::index_type   IndexType;
	typedef typename LinearOperator::value_type   ValueType;
	typedef typename LinearOperator::memory_space MemorySpace;
	typedef typename cusp::array1d<ValueType, MemorySpace>::iterator Iterator;
	typedef cusp::array1d_view<Iterator> ArrayView;
	
	cusp::array1d<ValueType, MemorySpace> prod_sum(N,ValueType(0));

	for(size_t j = 0; j <= i; ++j)
	{
		ArrayView columnVj(V.values.begin() + j * V.pitch, V.values.begin() + j * V.pitch + V.num_rows);
		
		// x <- x + y_j * prod_sum
		ValueType yj = y[j];
		blas::axpy(columnVj, prod_sum, yj);
	}
	
	// x <- x + prodsum
	blas::axpy(prod_sum, x, ValueType(1));
}

} // end namespace krylov
} // end namespace cusp

