#include <cusp/detail/device/arch.h>
#include <cusp/detail/device/common.h>
#include <cusp/transpose.h>
#include <cusp/blas.h>
#include <cusp/device/block/qr.h>
#include <thrust/detail/backend/dereference.h>
#include <thrust/pair.h>

#include <cusp/detail/profiler.h>

#include <cusp/print.h>
#include <cusp/io/dense_matrix.h>
#include <cusp/io/matrix_market.h>
#include <iostream>
#include <fstream>

namespace cusp
{
namespace precond
{
namespace detail
{
namespace device
{

/** \brief Compute the sizes of the Ahat matrices.
	The number of columns is exact, but the number of rows is
	an upper bound.

	This must be run as one block with multiple threads.

	\param AFirstI iterator over row_offsets of A transposed
	\param ALastI
	\param MFirstI iterator over row_offsets of M transposed
	\param MLastI
	\param MFirstJ iterator over column_indices of M transposed
	\param temp vector of IndexType for temporary storage with the same dimension as A.row_offsets
	\param [out] sizesRowsFirst beginning of array where row sizes will be stored
	\param [out] sizesColumnsFirst beginning of array where column sizes will be stored
*/
template<typename RandomAccessIterator1,
	 typename RandomAccessIterator2,
	 typename RandomAccessIterator3,
	 typename RandomAccessIterator4,
	 typename RandomAccessIterator5>
__global__ void
get_sizes(RandomAccessIterator1 AFirstI, RandomAccessIterator1 ALastI,
	  RandomAccessIterator2 MFirstI, RandomAccessIterator2 MLastI,
	  RandomAccessIterator3 MFirstJ,
	  RandomAccessIterator4 temp,
	  RandomAccessIterator5 sizesRowsFirst,
	  RandomAccessIterator5 sizesColumnsFirst)
{
	typedef typename RandomAccessIterator5::value_type SizeType;
	using thrust::detail::backend::dereference;

	/* for every column in M get the number of nnz */
	/* MFirst and MLast are the beginning and, respectively, the end
	   of the row_offsets of M transposed */
	RandomAccessIterator2 MRow = MFirstI + threadIdx.x;
	RandomAccessIterator5 sizes = sizesColumnsFirst + threadIdx.x;
	--MLastI; /* row_offsets has one entry more than the number of rows */
	for(; MRow < MLastI; MRow += blockDim.x, sizes += blockDim.x)
	{
		dereference(sizes) = dereference(MRow+1) - dereference(MRow);
	}

	/* for every column of A get the number of nnz */
	RandomAccessIterator1 ARow = AFirstI + threadIdx.x;
	RandomAccessIterator4 tI = temp + threadIdx.x;
	--ALastI;
	for(; ARow < ALastI; ARow += blockDim.x, tI += blockDim.x)
	{
		dereference(tI) = dereference(ARow+1) - dereference(ARow);
	}

	__syncthreads();

	/* for every column i of M */
	/*   for each nnz row j in i (j is column of A) */
	/*     sum the number of nnz rows in column j of A (temp[j]) */
	MRow = MFirstI + threadIdx.x;
	sizes = sizesRowsFirst + threadIdx.x;
	tI = temp;
	for(; MRow < MLastI; MRow += blockDim.x, sizes += blockDim.x)
	{
		SizeType v = 0;
		for(RandomAccessIterator3 j = MFirstJ + dereference(MRow); j < MFirstJ + dereference(MRow+1); ++j)
			v += dereference(tI + dereference(j));
		dereference(sizes) = v;
	}
}

/** \brief Get indices of rows needed for Ahats.
	For every Ahat copy the indices of the rows of A which have nonzero items to
	the array starting at \a RowsIdxFirst.

	RowsOffsets[i] is the offset from RowsIdxFirst where the indices for i-th Ahat matrix start.
*/
template<typename RandomAccessIterator1,
	 typename RandomAccessIterator2,
	 typename RandomAccessIterator3,
	 typename RandomAccessIterator4,
	 typename RandomAccessIterator5,
	 typename RandomAccessIterator6,
	 typename RandomAccessIterator7>
__global__
void get_row_indices(RandomAccessIterator1 AFirstI, RandomAccessIterator1 ALastI,
		     RandomAccessIterator2 AFirstJ,
		     RandomAccessIterator3 MFirstI, RandomAccessIterator3 MLastI,
		     RandomAccessIterator4 MFirstJ,
		     RandomAccessIterator5 RowsOffsets,
		     RandomAccessIterator6 RowsIdxFirst,
		     RandomAccessIterator7 keys)
{
	using thrust::detail::backend::dereference;

	typename thrust::iterator_difference<RandomAccessIterator1>::type MNumRows = MLastI - MFirstI;
	// Each row of M^t (column of M) is assigned to a thread block
	if (blockIdx.x < MNumRows)
	{
	  // Move this blocks pointer to the location for this particular row (note local addressing)
	        RowsIdxFirst += dereference(RowsOffsets + blockIdx.x);
	        keys += dereference(RowsOffsets + blockIdx.x);

		RandomAccessIterator3 MRow = MFirstI + blockIdx.x;
		RandomAccessIterator4 MCol = MFirstJ + dereference(MRow);
		RandomAccessIterator4 MColEnd = MFirstJ + dereference(MRow + 1);
		// Go through all column indices in this particular row
		for(; MCol < MColEnd; ++MCol)
		{
			RandomAccessIterator1 ARow = AFirstI + dereference(MCol);
			RandomAccessIterator2 ACol = AFirstJ + dereference(ARow);
			RandomAccessIterator2 AColEnd = AFirstJ + dereference(ARow + 1);
			typename thrust::iterator_difference<RandomAccessIterator1>::type ACol_numRows = AColEnd - ACol;
			if (threadIdx.x < ACol_numRows)  // Every thread acts on a different col of A
			{
			  dereference(RowsIdxFirst + threadIdx.x) = dereference(ACol + threadIdx.x);
			  dereference(keys + threadIdx.x) = blockIdx.x;
			}
			RowsIdxFirst += ACol_numRows;
			keys += ACol_numRows;
		}
	}
}

/** \brief Build Ahat matrices.
	Copy values from A to Ahat matrices based on sparsity pattern M and the rowIndices array.
*/
template<typename RandomAccessIterator1, typename RandomAccessIterator2,
	 typename RandomAccessIterator3, typename RandomAccessIterator4,
	 typename RandomAccessIterator5, typename RandomAccessIterator6,
	 typename RandomAccessIterator7, typename RandomAccessIterator8,
	 typename RandomAccessIterator9, typename RandomAccessIterator10>
__global__
void populate_Ahats(RandomAccessIterator1 AFirstI, RandomAccessIterator2 AFirstJ, RandomAccessIterator3 AValues,
		    RandomAccessIterator4 MFirstI, RandomAccessIterator5 MFirstJ,
		    RandomAccessIterator6 sizesRows, RandomAccessIterator6 sizesCols,
		    RandomAccessIterator7 rowIndicesFirst, RandomAccessIterator8 rowIndicesOffsets,
		    RandomAccessIterator9 offsets, RandomAccessIterator9 offsetsLast,
		    RandomAccessIterator10 AhatFirst)
{
	using thrust::detail::backend::dereference;
	typedef typename RandomAccessIterator3::value_type ValueType;

	sizesRows += blockIdx.x;
	sizesCols += blockIdx.x;
	MFirstI  += blockIdx.x;
	rowIndicesOffsets += blockIdx.x;

	/* in case we have more matrices than blocks */
	for (offsets += blockIdx.x; offsets < offsetsLast; offsets += gridDim.x)
	{
		RandomAccessIterator10 Ahat = AhatFirst + dereference(offsets);
		RandomAccessIterator7 rowIndices = rowIndicesFirst + dereference(rowIndicesOffsets);
		size_t m = dereference(sizesRows);
		size_t n = dereference(sizesCols);
		size_t size = m * n;
		for (size_t el = threadIdx.x; el < size ; el += blockDim.x)
		{
			size_t i = el / n; /* row index in Ahat */
			size_t k = dereference(rowIndices + i); /* row in A */
			size_t j = el % n; /* col index in Ahat */
			size_t l = dereference(MFirstJ + (dereference(MFirstI) + j)); /* col in A */

			size_t c = dereference(AFirstI + k);
			size_t cStop = dereference(AFirstI + (k+1));
			ValueType value(0);
			for (; c < cStop; ++c)
			{
				size_t ci = dereference(AFirstJ + c);
				if (ci >= l)
				{
					if (ci == l) value = dereference(AValues + c);
					break;
				}
			}
			dereference(Ahat + el) = value;
		}

		/* prepare for next loop */
		sizesRows += gridDim.x;
		sizesCols += gridDim.x;
		MFirstI  += gridDim.x;
		rowIndicesOffsets += gridDim.x;
	}
}

/** \brief Perform QR decomposition of the matrices in an array.
*/
template<typename RandomAccessIterator1, typename RandomAccessIterator2,
	 typename RandomAccessIterator3,
	 typename RandomAccessIterator4, typename RandomAccessIterator5>
__global__
void qr(RandomAccessIterator1 Ahats, RandomAccessIterator2 AhatOffsets,
		size_t num_Ahats,
                RandomAccessIterator5 khats,
		RandomAccessIterator3 sizesRows, RandomAccessIterator3 sizesColumns,
		RandomAccessIterator4 QValues, RandomAccessIterator5 QOffsets)
{
	using thrust::detail::backend::dereference;

	for (size_t i = blockIdx.x; i < num_Ahats; i += gridDim.x)
	{
		cusp::device::block::qr(
			Ahats + dereference(AhatOffsets + i),
			dereference(sizesRows + i),
			dereference(sizesColumns + i),
#if !defined(FULL_Q)
                        dereference(khats + i),
#endif
			QValues + dereference(QOffsets + i));
	}
}

/** \brief Backward substitution.
	Do backward substitution to solve mhat = R \ chat.
*/
template<typename RandomAccessIterator1, typename RandomAccessIterator2,
	 typename RandomAccessIterator3, typename RandomAccessIterator4,
	 typename RandomAccessIterator5, typename RandomAccessIterator6,
	 typename RandomAccessIterator7, typename RandomAccessIterator8,
	 typename RandomAccessIterator9, typename RandomAccessIterator10>
__global__
void make_mhats(RandomAccessIterator1  RValues,    RandomAccessIterator2  ROffsets,
		size_t num_matrices,
                RandomAccessIterator6 khats,
		RandomAccessIterator3  sizesRows,  RandomAccessIterator4  sizesColumns,
		RandomAccessIterator5  QValues,    RandomAccessIterator6  QOffsets,
		RandomAccessIterator7  rowIndices, RandomAccessIterator8  rowIndicesOffsets,
		RandomAccessIterator9  mhatsFirst, RandomAccessIterator10 mhatOffsets )
{
	using thrust::detail::backend::dereference;
	typedef typename RandomAccessIterator1::value_type ValueType;

	/* use only one thread per block */
	if (threadIdx.x != 0) return;

	for (size_t nMat = blockIdx.x; nMat < num_matrices; nMat += gridDim.x)
	{
		size_t num_rows = dereference(sizesRows + nMat);
#if defined(FULL_Q)
		/* find out where the k-th column of M appears (if it does) in Ahat */
		size_t k = 0;
		for (RandomAccessIterator7 AhatRow = rowIndices + dereference(rowIndicesOffsets + nMat);
				k < num_rows; ++AhatRow)
		{
			if (dereference(AhatRow) == nMat) break;
			++k;
		}
                if ( k != dereference(khats + nMat) ) {
		  //  ASSERTION -- Flag an error here!!!
                }
#else
                size_t k = dereference(khats + nMat);
#endif

		/* do back substitution */
		size_t num_cols = dereference(sizesColumns + nMat);
		RandomAccessIterator9 mhat = mhatsFirst + dereference(mhatOffsets + nMat);
		if (k < num_rows)
		{
			RandomAccessIterator1 R = RValues + dereference(ROffsets + nMat);
			RandomAccessIterator5 Q = QValues + dereference(QOffsets + nMat);

			/* for every row in R, starting from last one */
			for (size_t r = num_cols; r > 0; --r)
			{
				size_t i = r - 1;
#if defined(FULL_Q)
				ValueType sum = dereference(Q + (i * num_rows + k));
#else
                                ValueType sum = dereference(Q + i);
#endif

				/* for every element on the row above the diagonal */
				for (size_t j = r; j < num_cols; ++j)
				{
					sum -= dereference(R + (i * num_cols + j)) * dereference(mhat + j);
				}

				dereference(mhat + i) = sum / dereference(R + (i * num_cols + i));
			}
		}
		else
		{
			for (size_t i = 0; i < num_cols; ++i)
			{
				dereference(mhat + i) = ValueType(0);
			}
		}
	}
}

/** \brief Operator multiplying the elements of a tuple.
	This unary operator takes a tuple with two elements as argument and
	returns the product of these two elements.
*/
template<typename ValueType>
struct multiply_pair : public thrust::unary_function<thrust::tuple<ValueType,ValueType>, ValueType>
{
	__host__ __device__
	ValueType operator()(thrust::tuple<ValueType,ValueType> const& pair)
	{
		return thrust::get<0>(pair) * thrust::get<1>(pair);
	}
};

/** \brief SPAI preconditioner implementation.

	\tparam MatrixType  a cusp::csr_format matrix
	\tparam MatrixType2 a cusp::csr_format matrix

	\param A input matrix
	\param [inout] M sparsity pattern and output preconditioner matrix
*/
template<typename MatrixType,
	 typename MatrixType2>
  void spai_csr(const MatrixType& A, MatrixType2 &M)
{
	PROFILE_SCOPED();

	typedef typename MatrixType::index_type IndexType;
	typedef typename MatrixType::value_type ValueType;

	//const unsigned int BLOCK_SIZE = 256;
	//const unsigned int MAX_BLOCKS = cusp::detail::device::arch::max_active_blocks(spai_csr_kernel<DevMatrixType>, BLOCK_SIZE, (size_t) 0);
	//const unsigned int WARPS_PER_BLOCK = BLOCK_SIZE / WARP_SIZE;
	//const unsigned int num_blocks = 128;

	/* assume that A is square */
	assert(A.num_rows == A.num_cols);

	/* the sparsity pattern must have the same size as A, for the time being */
	assert(A.num_rows == M.num_rows);
	assert(A.num_cols == M.num_cols);

        const unsigned int num_rows = A.num_rows;   // No distinction for different matrices, or columns

	/* create transposed of A and Pattern */
	// Note that the algorithm needs easy access to both the rows and columns of A
	// A is in CSR format, which provides easy access to rows, At provides easy access to columns

	MatrixType At;
	cusp::transpose(A, At);   

	typedef typename MatrixType::row_offsets_array_type::iterator RowIteratorType;
	typedef typename MatrixType::column_indices_array_type::iterator ColIteratorType;
	typedef typename MatrixType::values_array_type::iterator ValIteratorType;

	typedef typename cusp::array1d<size_t,cusp::device_memory> SizesType;
	typedef typename cusp::array1d<size_t,cusp::device_memory>::iterator SizesIteratorType;
	RowIteratorType MFirstI    = M.row_offsets.begin();
	RowIteratorType MLastI     = M.row_offsets.end();
	ColIteratorType MFirstJ    = M.column_indices.begin();
        ValIteratorType MFirstVal  = M.values.begin();

	// 
	// 2012-02-21 WS NOTE:  the block size should be replaced by a fixed buffers for the for Ahat.
	//   Only so many A-hats are added to the buffer as there is space available.
	//
	// const unsigned int BLOCK_SIZE = num_rows;
	const unsigned int BLOCK_SIZE = 150;   // This seems to be a sensible asymptotic limit 

	//
	// Conceptually it should be easy to do the following calculations in blocks
	// with the final block being the remainder operation.  Carlo's research shows that
	// performance plateaus at roughly 64 matrices.  
        // 

        // Fill preconditioner with zeros
        thrust::fill(M.values.begin(), M.values.end(), ValueType(0));

        SizesType tempSize(num_rows);
        SizesType sizesRowsGlobal(num_rows);
        SizesType sizesColumnsGlobal(num_rows);

        // For the time being, get the global sizes within one thread block
        get_sizes<<<1, 512>>>(A.row_offsets.begin(), A.row_offsets.end(),
			      MFirstI, MLastI,
			      MFirstJ, tempSize.begin(),
			      sizesRowsGlobal.begin(), sizesColumnsGlobal.begin());

	// mhatOffsets must be global!  This code needs to be refactored
	cusp::array1d<IndexType,cusp::device_memory> mhatOffsets(num_rows+1);
        thrust::inclusive_scan(sizesColumnsGlobal.begin(), sizesColumnsGlobal.end(), mhatOffsets.begin() + 1);
	mhatOffsets.front() = 0;

	for (size_t offset = 0; offset < num_rows; offset += BLOCK_SIZE, MFirstI += BLOCK_SIZE)
	{

          size_t blocks = min( BLOCK_SIZE, (unsigned int)(num_rows-offset) );

	  // The following lines cause a crash in get_row_indices...

          MLastI   = MFirstI + blocks+1;  // yes, LastI refers to an offset array

	  {
	    /* allocate space for row indices */
	    cusp::array1d<IndexType,cusp::host_memory> khatsH(blocks);
	    cusp::array1d<IndexType,cusp::device_memory> khats(blocks);
  	    cusp::array1d<IndexType,cusp::host_memory> sizesRowsH(blocks);

	    // For sizes of Ahats
	    SizesType sizesRows(blocks);
	    SizesType sizesColumns(blocks);
	    SizesType rowIndicesOffsets(blocks + 1);  // This vector contains local offsets

	    thrust::copy(sizesRowsGlobal.begin()+offset,sizesRowsGlobal.begin()+offset+blocks,sizesRowsH.begin());
	    thrust::copy(sizesColumnsGlobal.begin()+offset,sizesColumnsGlobal.begin()+offset+blocks,sizesColumns.begin());
	    thrust::inclusive_scan(sizesRowsGlobal.begin()+offset,sizesRowsGlobal.begin()+offset+blocks, rowIndicesOffsets.begin() + 1);
	    rowIndicesOffsets.front() = 0;

	    cusp::array1d<IndexType,cusp::device_memory> rowIndices(rowIndicesOffsets.back());
	    cusp::array1d<size_t,cusp::device_memory> keys(rowIndices.size());

	    get_row_indices<<<blocks, 512>>>(A.row_offsets.begin(), A.row_offsets.end(), 
					     A.column_indices.begin(),
					     MFirstI, MLastI, MFirstJ,
					     rowIndicesOffsets.begin(),
					     rowIndices.begin(), keys.begin());

	    // Remove duplicate row indices
	    typename cusp::array1d<IndexType,cusp::device_memory>::iterator start, end;
	    start = rowIndices.begin();
	    int k=0;
	    for (typename cusp::array1d<IndexType,cusp::host_memory>::iterator r = sizesRowsH.begin();
		 r != sizesRowsH.end(); ++r, ++k)
	      {
		end = start + *r;
		thrust::sort(start, end); /* required by unique */
		*r = thrust::distance(start, thrust::unique(start, end));
                /* find where the k-th column of M appears in Ahat; taking offset into account */
		khatsH[k] = thrust::find(start,start+*r,k+offset)-start;  
		start = end;
	      }
	    /* copy new sizes to device */
	    thrust::copy(sizesRowsH.begin(), sizesRowsH.end(), sizesRows.begin());
	    thrust::copy(khatsH.begin(), khatsH.end(), khats.begin());

	    /* allocate space for Ahat matrices within this block of rows */
	    SizesType AhatOffsets(blocks + 1);
	    thrust::inclusive_scan(
 	      thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(sizesRows.begin(), sizesColumns.begin())),
	      multiply_pair<ValueType>()),
	      thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(sizesRows.end(), sizesColumns.end())),
	      multiply_pair<ValueType>()),
	      AhatOffsets.begin() + 1);

	    AhatOffsets.front() = 0;

	    cusp::array1d<ValueType,cusp::device_memory> Ahats(AhatOffsets.back());

	    // Populate the Ahats for this offset
            // Note that A must point to the beginning -- offsets are added inside the kernel
	    // MFirstI includes the offset, but MFirstJ points to the beginning
	    populate_Ahats<<<blocks, 128>>>(At.row_offsets.begin(), 
					    At.column_indices.begin(),
					    At.values.begin(),  
					    MFirstI, MFirstJ,
					    sizesRows.begin(), sizesColumns.begin(),
					    rowIndices.begin(), rowIndicesOffsets.begin(),
					    AhatOffsets.begin(), AhatOffsets.end() - 1,
					    Ahats.begin());

	    /* allocate Q matrices */
	    cusp::array1d<IndexType,cusp::device_memory> QOffsets(AhatOffsets.size());
#if defined(FULL_Q)
	    thrust::inclusive_scan(
		thrust::make_transform_iterator(sizesRows.begin(), cusp::blas::detail::square<ValueType>()),
		thrust::make_transform_iterator(sizesRows.end(), cusp::blas::detail::square<ValueType>()),
		QOffsets.begin() + 1);
#else
	    // Here only one row is allocated
	    thrust::inclusive_scan( sizesRows.begin(), sizesRows.end(), QOffsets.begin() + 1);
#endif
	    QOffsets.front() = 0;

	    cusp::array1d<ValueType,cusp::device_memory> QValues(QOffsets.back(), ValueType(0));

	    /* QR decomposition */
	    qr<<<blocks,128>>>(Ahats.begin(), AhatOffsets.begin(),
			       AhatOffsets.size() - 1,
			       khats.begin(),
			       sizesRows.begin(), sizesColumns.begin(),
			       QValues.begin(), QOffsets.begin());

	    make_mhats<<<blocks,1>>>(Ahats.begin(), AhatOffsets.begin(),
				     blocks, khats.begin(),
				     sizesRows.begin(),  sizesColumns.begin(),
				     QValues.begin(),    QOffsets.begin(),
				     rowIndices.begin(), rowIndicesOffsets.begin(),
				     MFirstVal,  mhatOffsets.begin()+offset );
	  }


	}

}


} // end namespace device
} // end namespace detail
} // end namespace precond
} // end namespace cusp

