#include <cusp/detail/device/arch.h>
#include <cusp/detail/device/common.h>
#include <cusp/transpose.h>
#include <cusp/blas.h>
#include <cusp/device/block/qr.h>
#include <thrust/detail/backend/dereference.h>
#include <thrust/pair.h>

//#include <cusp/detail/timer.h> // DEBUGGING/PROFILLING DANA
//#include <cusp/detail/profiler.h>
#include <cusp/print.h>
#include <cusp/io/dense_matrix.h>
#include <cusp/io/matrix_market.h>
#include <iostream>
#include <fstream>

#include <algorithm>
#include <set>

//#include <boost/unordered_set.hpp>

namespace cusp
{
namespace precond
{
namespace detail
{
namespace device
{

/** \brief HostSet is a wrapper around a generic container used for the set operations.
 It has the following methods which must always be called :
 - HostSet(size_testimate) constructor called with an estimate of the number
                           of elements; useful if hash-based containers are used
                           since it allows to allocate a large enough hash space
                           in order to avoid frequent re-hashing right from the start
 - insert(Type item) inserts an element by callling the underlying method of the
                     container
 - size() returns the size of the set by callling the underlying method of the
                     container
 - begin() returns a const_iterator to the first elements by callling the underlying
           method of the container
 - end() returns a const_iterator to the last elements by callling the underlying
           method of the container                                          
 - cleanup() must be called after the last element has been inserted; its purpose is to
             sort the set and remove duplicates in case a non set-type container is used
 - const_iterator is a typedef to the constant iterator of the container
*/             
template<typename Type>
struct HostSet
{    
    private:
        typedef std::set<Type> setType;
        setType theSet;
    public:
        typedef typename setType::const_iterator const_iterator;
        HostSet(size_t estimate)
        {}
        HostSet()
        {}
        const setType* getSet() const
        {
            return &theSet;
        }
        void insert(Type item)
        {
            theSet.insert(item);
        }
        const_iterator begin() const
        {
            return theSet.begin();
        }
        const_iterator end() const
        {
            return theSet.end();
        }
        const_iterator find(const Type& item) const
        {
            return theSet.find(item);            
        }
        void cleanup()
        {}
        size_t size() const
        {
            return theSet.size();
        }
        void dump() const // For debugging purposes
        {
            for(const_iterator i( theSet.begin() ); i != theSet.end(); i++)
                std::cout << *i << " ";
        }
        ~HostSet()
        {}

};


/* \brief Creates the set of indices J for one column k of matrix M (i.e one line of Mt).
 This set contains the indices of the non-zero entries in that line and is 
 obtained from the range of the column_indices array corresponding
 to the relevant line, given by the start and end iterators.
*/
template <typename Type, typename Set>
Set* createSetJk(const Type* const __restrict__ MtColumnIndices, size_t start, size_t end, Type k)
{
    Set* setJ = new Set(end-start);
    // setI.rehash(end-start); FOR boost::unordered_set
    for(size_t i(start); i<end; i++)
    {
        setJ->insert(MtColumnIndices[i]);
        //std::cout << "[insert] createSetJk" << std::endl;    
    }
    //setJ->insert(k);
    setJ->cleanup(); 
    //std::cout << "\nJust created a set with the following number of elements: " << setJ->size();   
    return setJ;
}


/* \brief Creates the set of indices I in matrix A based on one set of indices J.
 This set contains the row indices of the non-zero entries in the columns of A
 given by the indices contained in set J. It is obtained by looping over the 
 relevant columns of A (through the row_offset array of At) and adding the indices
 of the non-zero rows (i.e corresponding entries of the column_indices array of At)
 into a set structure.
 */
template<typename Type, typename Set>
Set* createSetIk(const Type* const __restrict__ AtRowOffsets,
                const Type* const __restrict__ AtColumnIndices,
                const Set* const setJ,
                Type k)
{
    Set* setI = new Set(100);// !!!!!!! XXX REVIEW THIS XXX !!!!!!
    
    // WARNING setI is not necessarily sorted! Random access to the arrays!
    // --> NUMA issues ?
    typename Set::const_iterator idx( setJ->begin() ) ;
    typename Set::const_iterator end( setJ->end() );
    
    size_t offset;    
    size_t next_offset;
    while(idx!=end)
    {
        offset = AtRowOffsets[*idx];
        next_offset = AtRowOffsets[*idx+1];
        while(offset!=next_offset)
        {
            setI->insert(AtColumnIndices[offset]);
            offset++;
        }
        idx++;
    }
    setI->insert(k);
    setI->cleanup();   
    return setI;
}


/* \brief Returns the index of the non-zero element in the ehat_k of the minimization.
   Since rows are taken away when deriving the dense problem, the position of that
   non-zero is no longer k, but the index of in the I set.
*/
template <typename Type, typename Set >
size_t findKhat(Type k, const Set* const __restrict__ setI)
{
    size_t index( std::distance(setI->begin(), setI->find(k)) );
    return index;
}


/** \brief Build Ahat matrices.
	Copy values from A to Ahat matrices based on sparsity pattern M and the rowIndices array.
*/
template<typename RandomAccessIterator1, typename RandomAccessIterator2,
     typename RandomAccessIterator3, typename RandomAccessIterator4,
	 typename RandomAccessIterator5, typename RandomAccessIterator6,
	 typename RandomAccessIterator7, typename RandomAccessIterator8,
     typename RandomAccessIterator9, typename RandomAccessIterator10,
     typename RandomAccessIterator11>
__global__
void populate_Ahats(RandomAccessIterator1 AFirstI, RandomAccessIterator2 AFirstJ,
                    RandomAccessIterator3 AValuesFirst,	RandomAccessIterator4 sizesRowsFirst, 
                    RandomAccessIterator5 sizesColsFirst, RandomAccessIterator6 rowIndicesFirst, 
                    RandomAccessIterator7 columnIndicesFirst, RandomAccessIterator8 AhatFirst,
                    RandomAccessIterator9 AhatOffsetsFirst, RandomAccessIterator10 MhatOffsetsFirst,
                    size_t MhatCumulativeBlockOffset,
                    RandomAccessIterator11 rowIndicesOffsetsFirst,
                    size_t AhatsSize, size_t blockSize)
{
	using thrust::detail::backend::dereference;
	typedef typename RandomAccessIterator3::value_type ValueType;

    
	// in case we have more matrices than blocks
	for (size_t matrixToDo(blockIdx.x); matrixToDo < blockSize; matrixToDo += gridDim.x)
	{            
	    size_t AhatOffset(0);
	    size_t rowIndicesOffset(0);
	    size_t columnIndicesOffset(0);
        // Compute the offset of the thread block in the Ahat array
        for(size_t b(0); (b < matrixToDo); b++)
        {
            size_t cols( dereference(sizesColsFirst + b) );
            size_t rows( dereference(sizesRowsFirst + b) );
            rowIndicesOffset+= rows;
            columnIndicesOffset += cols;
            AhatOffset += rows*cols;
        }
        
        if(threadIdx.x == 0)
            dereference(AhatOffsetsFirst+matrixToDo) = AhatOffset;
            
        if(threadIdx.x == 1)
            dereference(MhatOffsetsFirst+matrixToDo) = columnIndicesOffset+MhatCumulativeBlockOffset;
        
        if(threadIdx.x == 2)
            dereference(rowIndicesOffsetsFirst+matrixToDo) = rowIndicesOffset;
            
    	RandomAccessIterator8 Ahat = AhatFirst + AhatOffset;
		    
		size_t m = dereference(sizesRowsFirst + matrixToDo);
		size_t n = dereference(sizesColsFirst + matrixToDo);
		
		RandomAccessIterator6 rowIndices = rowIndicesFirst + rowIndicesOffset;
		RandomAccessIterator7 columnIndices = columnIndicesFirst + columnIndicesOffset;
		size_t size = m * n;
		for (size_t el(threadIdx.x); el < size ; el += blockDim.x)
		{
			size_t i = el / n; /* row index in Ahat */
			size_t k = dereference(rowIndices + i); /* row in A */
			size_t j = el % n; /* col index in Ahat */
			size_t l = dereference(columnIndices + j); /* col in A */

			size_t c = dereference(AFirstI + k);
			size_t cStop = dereference(AFirstI + (k+1));
			ValueType value(0);
			for (; c < cStop; ++c)
			{
				size_t ci = dereference(AFirstJ + c);
				if (ci >= l)
				{
					if (ci == l) value = dereference(AValuesFirst + c);
					break;
				}
			}
			dereference(Ahat + el) = value;
		}

	}

}




/** \brief Perform QR decomposition of the matrices in an array.
*/
template<typename RandomAccessIterator1, typename RandomAccessIterator2,
         typename RandomAccessIterator3, typename RandomAccessIterator4,
         typename RandomAccessIterator5>
__global__
void qr(RandomAccessIterator1 Ahats,
        RandomAccessIterator2 AhatOffsets,
        size_t num_Ahats,
        RandomAccessIterator5 khats,
        RandomAccessIterator3 sizesRows,
        RandomAccessIterator3 sizesColumns,
        RandomAccessIterator4 QValues,
        RandomAccessIterator5 QOffsets)
{
    using thrust::detail::backend::dereference;

    for (size_t i = blockIdx.x; i < num_Ahats; i += gridDim.x)
    {
        cusp::device::block::qr(
            Ahats + dereference(AhatOffsets + i),
            dereference(sizesRows + i),
            dereference(sizesColumns + i),
            dereference(khats + i),
            QValues + dereference(QOffsets + i)
        );
    }
}

/** \brief Backward substitution.
    Do backward substitution to solve mhat = R \ chat.
*/
template<typename RandomAccessIterator1, typename RandomAccessIterator2,
         typename RandomAccessIterator3, typename RandomAccessIterator4,
         typename RandomAccessIterator5, typename RandomAccessIterator6,
         typename RandomAccessIterator7, typename RandomAccessIterator8,
         typename RandomAccessIterator9, typename RandomAccessIterator10>
__global__
void make_mhats(RandomAccessIterator1 RValues,
                RandomAccessIterator2 ROffsets,
                size_t num_matrices,
                RandomAccessIterator6 khats,
                RandomAccessIterator3 sizesRows,
                RandomAccessIterator4 sizesColumns,
                RandomAccessIterator5 QValues,
                RandomAccessIterator6 QOffsets,
                RandomAccessIterator7 rowIndices,
                RandomAccessIterator8 rowIndicesOffsets,
                RandomAccessIterator9 mhatsFirst,
                RandomAccessIterator10 mhatOffsets)
{
    using thrust::detail::backend::dereference;
    typedef typename RandomAccessIterator1::value_type ValueType;

    /* use only one thread per block */
    if (threadIdx.x != 0)
        return;

    for (size_t nMat = blockIdx.x; nMat < num_matrices; nMat += gridDim.x)
    {
        size_t num_rows = dereference(sizesRows + nMat);
        size_t k = dereference(khats + nMat);
        /* do back substitution */
        size_t num_cols = dereference(sizesColumns + nMat);
        RandomAccessIterator9 mhat = mhatsFirst + dereference(mhatOffsets + nMat);
        if (k < num_rows)
        {
            RandomAccessIterator1 R = RValues + dereference(ROffsets + nMat);
            RandomAccessIterator5 Q = QValues + dereference(QOffsets + nMat);

            /* for every row in R, starting from last one */
            for (size_t r = num_cols; r > 0; --r)
            {
                size_t i = r - 1;
                ValueType sum = dereference(Q + i);
                /* for every element on the row above the diagonal */
                for (size_t j = r; j < num_cols; ++j)
                {
                    sum -= dereference(R + (i * num_cols + j)) * dereference(mhat + j);
                }

                dereference(mhat + i) = sum / dereference(R + (i * num_cols + i));
            }
        }
        else
        {
            for (size_t i = 0; i < num_cols; ++i)
            {
                dereference(mhat + i) = ValueType(0);
            }
        }
    }
}

/** \brief SPAI preconditioner implementation.
    The arguments are all matrices of type cusp::csr_format
    Input matricx M holds the sparsity pattern
           
    \tparam MatrixType A_D           a matrix on the device holding A
    \tparam MatrixType2 Mt_D [inout] a matrix on the device holding M transpose
                             
*/
template<typename MatrixType>
void spai_csr(const MatrixType& A_D, MatrixType& M_D)
{
    //PROFILE_SCOPED();
    std::cout << "SPAI - NEW VERSION STARTED!" << std::endl;
    // Some typedefs and constants to start with...
    typedef typename MatrixType::index_type                 IndexType;
    typedef typename MatrixType::value_type                 ValueType;    
    typedef cusp::array1d<IndexType,cusp::host_memory>      HostIndexCuspArray;
    typedef cusp::array1d<IndexType,cusp::device_memory>    DeviceIndexCuspArray;
    typedef cusp::array1d<size_t,cusp::host_memory>         HostIntegerCuspArray;
    typedef cusp::array1d<size_t,cusp::device_memory>       DeviceIntegerCuspArray;
    typedef cusp::array1d<ValueType,cusp::device_memory>    DeviceValueCuspArray;
    typedef cusp::array1d<ValueType,cusp::host_memory>      HostValueCuspArray;
    typedef HostSet<IndexType>                              HostIndexSet;
    typedef typename HostIndexSet::const_iterator           setIter;
    
    const size_t BLOCK_SIZE = 150; // XXX CHECK THIS !!!
        
    // Assume that A is square
    assert(A_D.num_rows == A_D.num_cols);

    // The sparsity pattern must have the same size as A, for the time being
    //assert(A_D.num_rows == M_D.num_rows);

    // No distinction for different matrices, or columns    
    const size_t numRows = A_D.num_rows; 
    
    // We're going to be working with M transpose...
    MatrixType Mt_D;
    cusp::transpose(M_D, Mt_D);     
    
    // We're going to need A transpose on the host...    
    MatrixType At_D;
    cusp::transpose(A_D, At_D);     

    // First step: get the sparsity patterns and the values of A onto the host
    // by copying the row_offsets and column_indices arrays of the CSR structures 
    // containing At and Mt on the device.
    HostIndexCuspArray AtRowOffsetsCusp_H(At_D.row_offsets);
    HostIndexCuspArray AtColumnIndicesCusp_H(At_D.column_indices);
    HostIndexCuspArray MtRowOffsetsCusp_H(Mt_D.row_offsets);
    HostIndexCuspArray MtColumnIndicesCusp_H(Mt_D.column_indices);
    
    // Second step: place the sparsity patterns into plain C arrays
    // We first get the sizes...
    size_t AtRowOffsetsSize(AtRowOffsetsCusp_H.size());
    size_t AtColumnIndicesSize(AtColumnIndicesCusp_H.size());
    size_t MtRowOffsetsSize(MtRowOffsetsCusp_H.size());
    size_t MtColumnIndicesSize(MtColumnIndicesCusp_H.size());
    
    // ...then allocate arrays...
    IndexType* AtRowOffsets_H = new IndexType[AtRowOffsetsSize];
    IndexType* AtColumnIndices_H = new IndexType[AtColumnIndicesSize];
    IndexType* MtRowOffsets_H = new IndexType[MtRowOffsetsSize];
    IndexType* MtColumnIndices_H = new IndexType[MtColumnIndicesSize];
    
    // ...and we can finally copy the data from the CUSP arrays.
    for(size_t i(0); i < AtColumnIndicesSize; ++i)
    {
        AtColumnIndices_H[i] = AtColumnIndicesCusp_H[i];
    }
    for(size_t i(0); i < AtRowOffsetsSize; ++i)
    {
        AtRowOffsets_H[i] = AtRowOffsetsCusp_H[i];
    }    
    for(size_t i(0); i < MtRowOffsetsSize; ++i)
    {
        MtRowOffsets_H[i] = MtRowOffsetsCusp_H[i];
    }
    for(size_t i(0); i < MtColumnIndicesSize; ++i)
    {
        MtColumnIndices_H[i] = MtColumnIndicesCusp_H[i];
    } 
    
     // This variable keeps track of the total number of Ahat matrices processed
     // so far in the previous blocks. It is needed to construct the Mhat vectors.
    size_t MhatCumulativeBlockOffset(0);                    
    
    // MAIN LOOP OVER THE BLOCKS
    for(size_t blockOffset(0); blockOffset < numRows; blockOffset += BLOCK_SIZE)
    {
        
        // Take the minimum of the block size and the number of remaining matrices
        size_t blocks = std::min( BLOCK_SIZE, (numRows-blockOffset) );
        
        // We start by building the sets I and J.
        // We first allocate arrays of pointers to sets
        HostIndexSet* setsJ[blocks];
        HostIndexSet* setsI[blocks];
        
         // ...as well as some CUSP arrays which we will eventually send to the device
	    HostIndexCuspArray khats_H(blocks); // Holds the index of the non-zero element of ehat_k (see func. def.)
  	    HostIntegerCuspArray sizesRows_H(blocks); // Holds the number of rows of the Ahat matrices in the block
  	    HostIntegerCuspArray sizesColumns_H(blocks); // Holds the number of columns of the Ahat matrices in the block

        // We now compute the sets and save them into the array of pointers.
        // We also keep track of the sizes of the various sets and populate the
        // size arrays with them. The row-wise, column-wise and total number of
        // elements are also saved in order to allocate the other required arrays.
        size_t AhatsSize(0); // Holds the total number of elements of all Ahats combined
        size_t rowIndicesSize(0); // Holds the number of elements in all the sets Ik combined
        size_t columnIndicesSize(0); // Holds the number of elements in all the sets Jk combined  
        size_t MhatBlockOffset(0);
        //#pragma omp parallel for reduction(+:AhatsSize,rowIndicesSize,columnIndicesSize,MhatBlockOffset)
        for(size_t k = 0; k < blocks; k++) // So we loop over the matrices in the current block...
        {
            setsJ[k] = createSetJk<IndexType, HostIndexSet>(
                            MtColumnIndices_H,                          
                            (size_t)MtRowOffsets_H[k+blockOffset],
                            (size_t)MtRowOffsets_H[k+1+blockOffset],
                            (IndexType)(k+blockOffset)
                       ); // This returns a pointer to the (manually) allocated set Jk
            
            setsI[k] = createSetIk<IndexType, HostIndexSet>(
                            AtRowOffsets_H,
                            AtColumnIndices_H,
                            setsJ[k],
                            (IndexType)(k+blockOffset)
                       ); // This returns a pointer to the (manually) allocated set Ik
                             
            // We find the index of ehat_k for the current matrix         
            dereference(khats_H.begin()+k) = findKhat((IndexType)(k+blockOffset), setsI[k]);
            
            // Updates the size arrays with the sizes of the sets
            size_t rows( setsI[k]->size() );
            size_t columns( setsJ[k]->size() );
            dereference(sizesRows_H.begin()+k) = rows;
            dereference(sizesColumns_H.begin()+k) = columns;
            
            // Updates the total numbers
            MhatBlockOffset += columns;    
            AhatsSize+=rows*columns;
            rowIndicesSize += rows;
            columnIndicesSize += columns;

        }
        
        // Next, we allocate two more CUSP arrays for the indices
        // (i.e. the sets, because we can't send our set structure to the GPU, it needs
        // a CUSP array)
        HostIndexCuspArray rowIndices_H(rowIndicesSize); // another one for the I sets...
        HostIndexCuspArray columnIndices_H(columnIndicesSize); // and yet another one for the J sets!

        // We now loop again over the matrices in the block to populate these new arrays
        // XXX IS THIS ABSOLUTELY NECESSARY ??
        size_t columnIter(0); // This counts the number of elements processed from the J sets
        size_t rowIter(0); //This counts the number of elements processed from the I sets
        // CAUTION: The sets have to be *ordered* at that point for the current kernels to work!
        for(size_t k(0); k < blocks; k++)
        {
            //Defines iterators over the sets
            setIter idxJ( setsJ[k]->begin() );
            setIter endJ( setsJ[k]->end() );

            setIter idxI( setsI[k]->begin() );
            setIter endI( setsI[k]->end() );

            // For each J set, we copy the elements into the CUSP array...
            for(; idxJ != endJ; idxJ++)
            {
                dereference(columnIndices_H.begin()+columnIter) = *idxJ;
                columnIter++;  
            }

            // ...and we do the same for the I sets.
            for(; idxI != endI; idxI++)
            {
                dereference(rowIndices_H.begin()+rowIter) = *idxI;
                rowIter++;
            }
            
            // The sets were manually allocated in the functions !!
            delete setsJ[k];
            delete setsI[k];   
        }

        // The next step is to allocate and populate arrays on the device

        // An array for the Ahat matrices...
        DeviceValueCuspArray Ahats_D(AhatsSize);
        // One for the row sizes...
        DeviceIndexCuspArray sizesRows_D(blocks);
        // One for the column sizes...
        DeviceIndexCuspArray sizesColumns_D(blocks);
        // One for the row sizes...
        DeviceIndexCuspArray khats_D(blocks);
        // another one for the I sets...
        DeviceIndexCuspArray rowIndices_D(rowIndicesSize); 
        // and yet another one for the J sets!
        DeviceIndexCuspArray columnIndices_D(columnIndicesSize); 
        // This one will have the starting indices of the Ahat matrices in the
        // long CUSP array which contains them
        DeviceIndexCuspArray AhatOffsets_D(blocks); 
        // Similarly, this one contains the indices at which the various J sets
        // start in the corresponding CUSP array.
        DeviceIndexCuspArray rowIndicesOffsets_D(rowIndicesSize);
        // Here, we will store the cumulative length of all Mhats in the block
        // That's needed for the kernel that builds the Mhats. 
        DeviceIntegerCuspArray MhatOffsets_D(blocks, 0);        
        
        
        // We now copy all the results gathered so far...
        thrust::copy(sizesRows_H.begin(), sizesRows_H.end(), sizesRows_D.begin());
        thrust::copy(sizesColumns_H.begin(), sizesColumns_H.end(), sizesColumns_D.begin());
        thrust::copy(khats_H.begin(), khats_H.end(), khats_D.begin());
        thrust::copy(rowIndices_H.begin(), rowIndices_H.end(), rowIndices_D.begin());
        thrust::copy(columnIndices_H.begin(), columnIndices_H.end(), columnIndices_D.begin());
        
        // At that point everything's ready to assemble the Ahats of the current block on the device

        populate_Ahats<<<blocks, 128>>>(
                A_D.row_offsets.begin(), 
	            A_D.column_indices.begin(),
	            A_D.values.begin(),  
	            sizesRows_D.begin(),
	            sizesColumns_D.begin(),
	            rowIndices_D.begin(),
	            columnIndices_D.begin(),
	            Ahats_D.begin(),
	            AhatOffsets_D.begin(),
	            MhatOffsets_D.begin(),
	            MhatCumulativeBlockOffset,
	            rowIndicesOffsets_D.begin(),
	            AhatsSize,
	            blocks); 
        
        MhatCumulativeBlockOffset += MhatBlockOffset;
        	                       
	    // allocate Q matrices
        DeviceIndexCuspArray QOffsets_D(blocks+1);

	    // Here only one row is allocated
	    thrust::inclusive_scan( sizesRows_D.begin(), sizesRows_D.end(), QOffsets_D.begin() + 1);
	    QOffsets_D.front() = 0;
	    DeviceValueCuspArray QValues_D(QOffsets_D.back(), ValueType(0));

	    // QR decomposition
	    qr<<<blocks,128>>>(
	            Ahats_D.begin(),
	            AhatOffsets_D.begin(),
			    blocks,
			    khats_D.begin(),
			    sizesRows_D.begin(),
			    sizesColumns_D.begin(),
			    QValues_D.begin(),
			    QOffsets_D.begin());
        
        // Backward solve to build the Mhats
	    make_mhats<<<blocks,1>>>(
	            Ahats_D.begin(),
	            AhatOffsets_D.begin(),
			    blocks,
			    khats_D.begin(),
                sizesRows_D.begin(),
                sizesColumns_D.begin(),
                QValues_D.begin(),
                QOffsets_D.begin(),
                rowIndices_D.begin(),
                rowIndicesOffsets_D.begin(),
                Mt_D.values.begin(),  
                MhatOffsets_D.begin());

    } // End of MAIN LOOP
    
    // De-allocation of the C arrays
    delete[] AtRowOffsets_H;
    delete[] AtColumnIndices_H;
    delete[] MtRowOffsets_H;
    delete[] MtColumnIndices_H;

    // We've been working on M transpose so far...
    cusp::transpose(Mt_D, M_D);
        std::cout << "SPAI - NEW VERSION ENDING!" << std::endl;
    // Output for debugging purposes
     cusp::io::write_matrix_market_file(M_D, std::string("OUTPUT.mtx"));
}

} // end namespace device
} // end namespace detail
} // end namespace precond
} // end namespace cusp





