#include <cusp/detail/device/arch.h>
#include <cusp/detail/device/common.h>
#include <cusp/transpose.h>
#include <cusp/blas.h>
#include <thrust/detail/backend/dereference.h>
#include <thrust/pair.h>

#include <cusp/detail/profiler.h>

#include <cusp/print.h>
#include <cusp/io/dense_matrix.h>
#include <cusp/io/matrix_market.h>
#include <iostream>
#include <fstream>

#include <algorithm>
#include <set>
#include "magma.h"
#if defined(CUDA_3_2)
#include "cublas.h"
#else
#include "cublas_v2.h"
#endif

namespace cusp
{
namespace precond
{
namespace detail
{
namespace device
{

template <typename IndexType, typename ValueType, typename MemorySpace>
inline const IndexType* getCsrRowOffsetsPtr(const cusp::csr_matrix<IndexType, ValueType, MemorySpace>& matrix)
{
    return thrust::raw_pointer_cast(&(matrix.row_offsets)[0]);
}

template <typename IndexType, typename ValueType, typename MemorySpace>
inline const IndexType* getCsrColIndicesPtr(const cusp::csr_matrix<IndexType, ValueType, MemorySpace>& matrix)
{
    return thrust::raw_pointer_cast(&(matrix.column_indices)[0]);
}

template <typename IndexType, typename ValueType, typename MemorySpace>
inline const ValueType* getCsrValuesPtr(const cusp::csr_matrix<IndexType, ValueType, MemorySpace>& matrix)
{
    return thrust::raw_pointer_cast(&(matrix.values)[0]);
}

template <typename IndexType, typename ValueType, typename MemorySpace>
inline IndexType* setCooRowsPtr(cusp::coo_matrix<IndexType, ValueType, MemorySpace>& matrix)
{
    return thrust::raw_pointer_cast(&(matrix.row_indices)[0]);
}

template <typename IndexType, typename ValueType, typename MemorySpace>
inline IndexType* setCooColsPtr(cusp::coo_matrix<IndexType, ValueType, MemorySpace>& matrix)
{
    return thrust::raw_pointer_cast(&(matrix.column_indices)[0]);
}

template <typename IndexType, typename ValueType, typename MemorySpace>
inline ValueType* setCooValuesPtr(cusp::coo_matrix<IndexType, ValueType, MemorySpace>& matrix)
{
    return thrust::raw_pointer_cast(&(matrix.values)[0]);
}


//XXX DEBUG OUTPUT
template <typename Type>
void dumpArray(const Type* __restrict__ const array, size_t size, std::ostream& out, const char* eol=" ")
{
    for(size_t i(0); i < size; i++)
        out << array[i] << eol;
    out << std::endl;
}

void scanCudaDevices()
{
    int devCount;
    cudaGetDeviceCount(&devCount);
        
    std::cerr << devCount << " CUDA device(s) detected:" << std::endl;
    
    int maxProcessors(0), maxDevice(0);
    for (int i(0); i < devCount; ++i)
    {
        cudaDeviceProp devProp;
        cudaGetDeviceProperties(&devProp, i);
        std::cerr << " - Device #" << i << ": " << devProp.name << std::endl;
        if(maxProcessors < devProp.multiProcessorCount)
        {
            maxProcessors = devProp.multiProcessorCount;
            maxDevice = i;
        }
    }
        cudaSetDevice(maxDevice);
        std::cerr << "Selected device #" << maxDevice << " (" << maxProcessors << " processors)." << std::endl;

}

class bspaiExcept
{
    protected:
        std::stringstream* errMsg;
        bspaiExcept()
        {
            errMsg = new std::stringstream();
            *errMsg << "EXCEPTION CAUGHT:" << std::endl;
        }

    public:        

        virtual ~bspaiExcept()
        {
            delete errMsg;
        }
        
        std::string getMsg() const
        {
            return errMsg->str();
        }
};

class bspaiCudaMallocExcept : public bspaiExcept
{
    public:
        bspaiCudaMallocExcept()
        :bspaiExcept()
        {
             *errMsg << "Memory allocation failed on the GPU!" << std::endl;
        }     
};


class bspaiMagmaExcept : public bspaiExcept
{
    public:
        bspaiMagmaExcept(const size_t iter, const int magmaCode)
        :bspaiExcept()
        {
             *errMsg << "MAGMA returned an error in the core BSPAI loop, at iteration ";
             *errMsg << iter << " " << " (error code: " << magmaCode << ")." << std::endl;           
        }

};
/** \brief IndexSet is a wrapper around a generic container used for the set operations.
 It has the following methods which must always be called :
 - IndexSet(IndexType estimate) constructor called with an estimate of the number
                           of elements; useful if hash-based containers are used
                           since it allows to allocate a large enough hash space
                           in order to avoid frequent re-hashing right from the start
 - insert(Type item) inserts an element by callling the underlying method of the
                     container
 - size() returns the size of the set by callling the underlying method of the
                     container
 - begin() returns a const_iterator to the first elements by callling the underlying
           method of the container
 - end() returns a const_iterator to the last elements by callling the underlying
           method of the container                                          
 - cleanup() must be called after the last element has been inserted; its purpose is to
             sort the set and remove duplicates in case a non set-type container is used
 - const_iterator is a typedef to the constant iterator of the container
*/             
template <typename Type>
struct IndexSet
{    
    private:
        typedef std::set<Type> setType;
        setType theSet;
    public:
        typedef typename setType::const_iterator const_iterator;
        IndexSet(Type estimate)
        {}
        IndexSet()
        {}
        const setType* getSet() const
        {
            return &theSet;
        }
        void insert(Type item)
        {
            theSet.insert(item);
        }
        const_iterator begin() const
        {
            return theSet.begin();
        }
        const_iterator end() const
        {
            return theSet.end();
        }
        const_iterator find(const Type& item) const
        {
            return theSet.find(item);            
        }
        void cleanup()
        {}
        Type size() const
        {
            return theSet.size();
        }
        void dump() const // For debugging purposes
        {
            for(const_iterator i( theSet.begin() ); i != theSet.end(); i++)
                std::cout << *i << " ";
            std::cout << std::endl;
        }
        ~IndexSet()
        {}

};


template <typename Type, typename Set>
void fillSetJk(const Type* __restrict__ const MtRowOffsets,
               const Type* __restrict__ const MtColumnIndices,
               Type k,
               Set* setJ)
{
    Type start( MtRowOffsets[k] );
    Type end( MtRowOffsets[k+1] );
    
    for(Type i(start); i<end; i++)
        setJ->insert(MtColumnIndices[i]); 
}


template<typename Type, typename Set>
void fillSetIk(const Type* const __restrict__ AtRowOffsets,
               const Type* const __restrict__ AtColumnIndices,
               Type k,
               const Set* const setJ,
               Set* setI)
{    
    // WARNING setI is not necessarily sorted! Random access to the arrays!
    // --> NUMA issues ?
    typename Set::const_iterator idx( setJ->begin() ) ;
    typename Set::const_iterator end( setJ->end() );
    
    Type offset;    
    Type next_offset;
    while(idx!=end)
    {
        offset = AtRowOffsets[*idx];
        next_offset = AtRowOffsets[*idx+1];
        while(offset!=next_offset)
        {
            setI->insert(AtColumnIndices[offset]);
            offset++;
        }
        idx++;
    }
    setI->insert(k);
}



template <typename IndexType,typename SparsityPatternType>
void makeIndices(const SparsityPatternType& AtBSP,
                 const SparsityPatternType& MtBSP,
                 IndexType* __restrict__ * __restrict__ const indicesI,
                 IndexType* __restrict__ * __restrict__ const indicesJ,
                 IndexType* __restrict__ const sizesSetsI,
                 IndexType* __restrict__ const sizesSetsJ,
                 IndexType blocksToDo)
{
    typedef IndexSet<IndexType> SetType;
    typedef typename SetType::const_iterator SetTypeConstIter;

    #pragma omp parallel for
    for(IndexType i=0; i<blocksToDo; ++i)
    {
        SetType setJ; // XXX REVIEW THIS: it would be good to know an estimate
                      // of the number of elements in advance and pass it to the constructor,
                      // in case we use hash-based sets.
                      // This would allow for a large enough number of hash "buckets",
                      // thus avoiding the need to rehash as the set grows.
                      
        fillSetJk<IndexType, SetType>(getCsrRowOffsetsPtr(MtBSP), getCsrColIndicesPtr(MtBSP), i, &setJ);

        SetType setI; // !!!!!!! XXX REVIEW THIS: same observation.
        fillSetIk<IndexType, SetType>(getCsrRowOffsetsPtr(AtBSP), getCsrColIndicesPtr(AtBSP), i, &setJ, &setI);

        sizesSetsI[i] = setI.size();
        sizesSetsJ[i] = setJ.size();
        indicesI[i] = new IndexType[sizesSetsI[i]];
        indicesJ[i] = new IndexType[sizesSetsJ[i]];
        
        SetTypeConstIter iterSetJ( setJ.begin() );
        SetTypeConstIter iterSetI( setI.begin() );
        
        for(IndexType idx(0); idx < sizesSetsJ[i] ; idx++, iterSetJ++)
            indicesJ[i][idx] = *(iterSetJ);
        
        for(IndexType idx(0); idx < sizesSetsI[i] ; idx++, iterSetI++)
            indicesI[i][idx] = *(iterSetI);
        /*    
        std::cout << "k=" << i << std::endl;
        std::cout << "setI: "; dumpArray(indicesI[i], sizesSetsI[i], std::cout, " ");
        std::cout << "setJ: "; dumpArray(indicesJ[i], sizesSetsJ[i], std::cout, " ");
        */
    }    

}

template <typename IndexType>
__device__ void MCoord(IndexType* const MRows,
                       IndexType* const MCols,
                       const IndexType* __restrict__ const indicesJ,
                       const IndexType k)
{

    if(blockIdx.y == 0)
    {
        IndexType col = k*blockDim.x + threadIdx.x;
        IndexType row = indicesJ[blockIdx.x]*blockDim.y + threadIdx.y;
    
        IndexType offsetInM = threadIdx.x*gridDim.x*blockDim.x +
                              blockIdx.x*blockDim.y +
                              threadIdx.y;

        MRows[offsetInM] = row;
        MCols[offsetInM] = col;
    
    }

}


template <typename IndexType>             
__device__ void isBlockNotEmpty (const IndexType* const i_src,
                              const IndexType* const j_src,
                              const IndexType* __restrict__ const AtBSP_rowOffsets,
                              const IndexType* __restrict__ const AtBSP_columnIndices,
                              char* const flag) // This one is in SHARED MEMORY
{
    //printf("Call to isBlockNotEmpty: %d %d by thread (%d,%d) in threadBlock (%d, %d)\n", *i_src, *j_src, threadIdx.y, threadIdx.x, blockIdx.y, blockIdx.x);
    /*
    __shared__ IndexType blockOffset;
    __shared__ IndexType nextBlockOffset;
    
    if(threadIdx.x == 0 && threadIdx.y == 0)
    {
        blockOffset = AtBSP_rowOffsets[*j_src];
    }
    if(threadIdx.x == 1 && threadIdx.y == 0)
    {
        nextBlockOffset = AtBSP_rowOffsets[*j_src+1];
    }
 
    __syncthreads();*/
    


    IndexType blockOffset = AtBSP_rowOffsets[*j_src];
    IndexType nextBlockOffset = AtBSP_rowOffsets[*j_src+1];
    IndexType threadID = threadIdx.y*blockDim.x+threadIdx.x;

    //printf(" block %d %d is NOT empty! blockOffset=%d, nextBlockOffset=%d, threadID=%d\n", *i_src, *j_src, blockOffset, nextBlockOffset, threadID );
    for(IndexType i(blockOffset+threadID); i < nextBlockOffset; i+=blockDim.x*blockDim.y)
    {
        //printf("%d %d\n", AtBSP_columnIndices[i], *i_src);
        if(AtBSP_columnIndices[i] == *i_src)
        {
            *(flag) = 1;
        }
            
    }
}

template <typename IndexType, typename ValueType>
__device__ void extractBlock(const IndexType* const i_src,
                             const IndexType* const j_src,
                             const IndexType* __restrict__ const At_rowOffsets,
                             const IndexType* __restrict__ const At_columnIndices,
                             const ValueType* __restrict__ const At_values,
                             ValueType* const AhatBegin)
{   
    //printf("Call to extractBlock: %d %d by thread (%d,%d) in threadBlock (%d, %d)\n", *i_src, *j_src, threadIdx.y, threadIdx.x, blockIdx.y, blockIdx.x);      
  
    IndexType AhatIdx;
    if(threadIdx.y == 0)
    {                                         
        IndexType startRowInAt = *j_src*blockDim.x;
        //IndexType endRowInAt = (*j_src+1)*blockDim.x;
        
        IndexType offsetBegin = At_rowOffsets[startRowInAt+threadIdx.x];
        IndexType offsetEnd = At_rowOffsets[startRowInAt+threadIdx.x+1];
        
        IndexType startColInAt = *i_src*blockDim.y;
        IndexType endColInAt = (*i_src+1)*blockDim.y;
        
        for(IndexType k = offsetBegin; k < offsetEnd; k++)
        {
            IndexType colInAt = At_columnIndices[k];
            
            if(colInAt >= startColInAt && colInAt < endColInAt)
            {
                AhatIdx = blockIdx.x*gridDim.y*blockDim.x*blockDim.y +
                threadIdx.x*gridDim.y*blockDim.y +
                blockIdx.y*blockDim.y +
                colInAt-startColInAt;

                AhatBegin[AhatIdx] = At_values[k];
            }
            
        }
    }
    
}    

template <typename IndexType, typename ValueType>
__device__ void makeEhat(ValueType* const Ehat,
                         const IndexType* __restrict__ const indicesI,
                         const IndexType k)
{

    if(blockIdx.x == 0)
    {
        __shared__ char isItMe;
        
        if(threadIdx.x == 0 && threadIdx.y == 0)
        {            
            if(indicesI[blockIdx.y] == k)
                isItMe = 1;
            else
                isItMe = 0;
        }
        __syncthreads();
        
        IndexType offsetInEhat = threadIdx.x*gridDim.y*blockDim.y +
                                 blockIdx.y*blockDim.y +
                                 threadIdx.y;
                                 
        if(isItMe == 1 &&
           offsetInEhat % (gridDim.y*blockDim.y) == blockIdx.y*blockDim.y + threadIdx.x)
        {
            Ehat[offsetInEhat] = 1;
        
        }
        else
        {
            Ehat[offsetInEhat] = 0;
        }
        
    }
}

template <typename IndexType, typename ValueType>
__global__ void spaiGPUpreProcess(const IndexType* __restrict__ const At_rowOffsets,
                         const IndexType* __restrict__ const At_columnIndices,
                         const ValueType* __restrict__ const At_values,
                         const IndexType* __restrict__ const AtBSP_rowOffsets,
                         const IndexType* __restrict__ const AtBSP_columnIndices,
                         ValueType* const Ahat,
                         ValueType* const Ehat,
                         IndexType* const MRows,
                         IndexType* const MCols,    
                         const IndexType* __restrict__ const indicesI,
                         const IndexType* __restrict__ const indicesJ,
                         const IndexType k)
{
    __shared__ char isItThere;
    __shared__ IndexType i_src;
    __shared__ IndexType j_src;
    
    if(threadIdx.x==0 && threadIdx.y==0)
        i_src = indicesI[blockIdx.y];
    if(threadIdx.x==1 && threadIdx.y==0)
        j_src = indicesJ[blockIdx.x];
    if(threadIdx.x==2 && threadIdx.y==0)
        isItThere = 0;
        
    IndexType AhatIdx = blockIdx.x*gridDim.y*blockDim.x*blockDim.y +
                threadIdx.x*gridDim.y*blockDim.y +
                blockIdx.y*blockDim.y +
                threadIdx.y;
    
    Ahat[AhatIdx]=0.0;  

    __syncthreads();

    isBlockNotEmpty(&i_src, &j_src, AtBSP_rowOffsets, AtBSP_columnIndices, &isItThere);
     
    __syncthreads();
    
    if(isItThere==1)        
        extractBlock(&i_src, &j_src, At_rowOffsets, At_columnIndices, At_values, Ahat);
        
    makeEhat(Ehat, indicesI, k);
    MCoord(MRows, MCols, indicesJ, k);
    
}


template <typename IndexType, typename ValueType>
void makeSpai(const IndexType* __restrict__ const At_rowOffsets_GPU,
              const IndexType* __restrict__ const At_columnIndices_GPU,
              const ValueType* __restrict__ const At_values_GPU,
              const IndexType* __restrict__ const AtBSP_rowOffsets_GPU,
              const IndexType* __restrict__ const AtBSP_columnIndices_GPU,
              ValueType* const Ahat_GPU,
              ValueType* const AtAhat_GPU,
              ValueType* const Ehat_GPU,
              IndexType* const MRows_GPU,
              IndexType* const MCols_GPU,
              ValueType* const MValues_GPU,
              const IndexType* __restrict__ const indicesI_GPU,
              const IndexType* __restrict__ const indicesJ_GPU,
              const IndexType* __restrict__ const sizesSetsI,
              const IndexType* __restrict__ const sizesSetsJ,
              const IndexType* __restrict__ const inclusiveSizesSetsI,
              const IndexType* __restrict__ const inclusiveSizesSetsJ,
              IndexType blockSize,
              IndexType blocksToDo)
{

    for(IndexType k(0); k < blocksToDo; k++)
    {        
        IndexType numBlockRows = sizesSetsI[k];
        IndexType numBlockCols = sizesSetsJ[k];
        
        IndexType numRows =  numBlockRows * blockSize;
        IndexType numCols = numBlockCols * blockSize;
        
        IndexType MhatOffsetInMValues = inclusiveSizesSetsJ[k]*blockSize*blockSize;

        
        //std::cout << "ITERATION " << k << std::endl;
        //std::cout << "numRows=" << numRows << ", numCols=" << numCols << std::endl;
                     
        cudaEvent_t start, stop;
        float time;
        cudaEventCreate(&start);
        cudaEventCreate(&stop);
        
        dim3 grid, block;
        grid.x = numBlockCols; grid.y = numBlockRows;
        block.x = blockSize; block.y = blockSize;
        
        cudaEventRecord(start, 0);
        
        spaiGPUpreProcess<IndexType, ValueType> <<<grid, block>>> (At_rowOffsets_GPU,
                                                                   At_columnIndices_GPU,
                                                                   At_values_GPU,
                                                                   AtBSP_rowOffsets_GPU,
                                                                   AtBSP_columnIndices_GPU,
                                                                   Ahat_GPU,
                                                                   Ehat_GPU,
                                                                   MRows_GPU + MhatOffsetInMValues,
                                                                   MCols_GPU + MhatOffsetInMValues,
                                                                   indicesI_GPU+inclusiveSizesSetsI[k],
                                                                   indicesJ_GPU+inclusiveSizesSetsJ[k],
                                                                   k);
        cudaEventRecord(stop, 0);
        
        cudaEventSynchronize(stop);
        cudaEventElapsedTime(&time, start, stop);
        
        std::cerr <<  "------ spaiGPUpreProcess: "<< time << " ms" << std::endl; 
                
        //IndexType numBlocks = numBlockRows*numBlockCols;
        //IndexType AhatNumElements = numBlocks*blockSize*blockSize;
        //IndexType EhatNumElements = numBlockRows*blockSize*blockSize;
        
        //char fname[18];
        //sprintf(fname, "Ahats/Ahat%05d.dat", k);
        //std::ofstream fhdl;
        //fhdl.open(fname);
        
        //ValueType* Ahat = new ValueType[AhatNumElements];
        //ValueType* Ahat2 = new ValueType[AhatNumElements];
        //ValueType* Ehat = new ValueType[EhatNumElements];  
        
        //cudaMemcpy(Ahat, Ahat_GPU, AhatNumElements*sizeof(ValueType), cudaMemcpyDeviceToHost);
       //dumpArray(Ahat, AhatNumElements, fhdl);
        //fhdl.close();
        //cudaMemcpy(Ehat, Ehat_GPU, EhatNumElements*sizeof(ValueType), cudaMemcpyDeviceToHost);

       /*
        makeAhat(At_rowOffsets,
                 At_columnIndices,
                 At_values,
                 AtBSP_rowOffsets,
                 AtBSP_columnIndices,
                 Ahat2,
                 indicesI[k],
                 numBlockRows,
                 indicesJ[k],
                 numBlockCols,
                 blockSize);*/
                
        //std::cout << "AHAT_CPU_" << k << ": " << std::endl;
        //dumpArray(Ahat2, AhatNumElements, std::cout, " ");
        //std::cout << "AHAT_" << k << ": " << std::endl;
        //dumpArray(Ahat, AhatNumElements, std::cout, " ");
        //std::cout << "EHAT_" << k << ": " << std::endl;
        //dumpArray(Ehat, EhatNumElements, std::cout, " ");
         

        
        cublasStatus_t stat ;
        cublasHandle_t handle ;
        
        stat = cublasCreate (&handle );
        
        if(stat != CUBLAS_STATUS_SUCCESS)
            std::cerr << "ERROR: cublasCreate failed!" << std::endl;
        
        const ValueType nullCoeff(0.0);
        const ValueType unityCoeff(1.0);
        
        cudaEventRecord(start, 0);
        
        stat = cublasSgemm(handle,
                           CUBLAS_OP_T,
                           CUBLAS_OP_N,
                           numCols,
                           numCols,
                           numRows,
                           &unityCoeff,
                           Ahat_GPU,
                           numRows,
                           Ahat_GPU,
                           numRows,
                           &nullCoeff,
                           AtAhat_GPU,
                           numCols);
                     
                            
        if(stat != CUBLAS_STATUS_SUCCESS)
            std::cerr << "ERROR returned by cublasSgemm (when doing AhatT*Ahat on GPU)!" << std::endl;
            
        cudaEventRecord(stop, 0);
        
        cudaEventSynchronize(stop);
        cudaEventElapsedTime(&time, start, stop);
        std::cerr <<  "------ SGEMM1: "<< time << " ms" << std::endl;

        cudaEventRecord(start, 0);
            
        stat = cublasSgemm(handle,
                           CUBLAS_OP_T,
                           CUBLAS_OP_N,
                           numCols,
                           blockSize,
                           numRows,
                           &unityCoeff,
                           Ahat_GPU,
                           numRows,
                           Ehat_GPU,
                           numRows,
                           &nullCoeff,
                           MValues_GPU + MhatOffsetInMValues,
                           numCols);    
                                  
        cudaEventRecord(stop, 0);
        
        cudaEventSynchronize(stop);
        cudaEventElapsedTime(&time, start, stop);
        std::cerr <<  "------ SGEMM2: "<< time << " ms" << std::endl;                                        
                
        cudaEventRecord(start, 0);
        
        magma_int_t info=0;
        magma_sposv_gpu('U', numCols, blockSize, AtAhat_GPU, numCols, MValues_GPU + MhatOffsetInMValues, numCols, &info );
        
        cudaEventRecord(stop, 0);
        
        cudaEventSynchronize(stop);
        cudaEventElapsedTime(&time, start, stop);
        std::cerr <<  "------ SPOSV: "<< time << " ms" << std::endl;     
        //cudaMemcpy(Mhat, MValues_GPU + MhatOffsetInMValues, MhatNumElements*sizeof(ValueType), cudaMemcpyDeviceToHost);
        //std::cout << "MHAT_" << k << ": " << std::endl;
        //dumpArray(Mhat, MhatNumElements, std::cout);    
        
        if(info != 0)
            throw(bspaiMagmaExcept(k, info));
            
        stat = cublasDestroy (handle);
        
        if(stat != CUBLAS_STATUS_SUCCESS)
            std::cerr << "ERROR: cublasDestroy failed!" << std::endl;                
            
        dim3 grid2, block2;
        grid2.x = 1; grid2.y = numBlockCols;
        block2.x = blockSize; block2.y = blockSize; 
        
        //delete[] Ahat2;
        //delete[] Ahat;
        //delete[] Ehat;
    }
    
    
}    






template <typename MatrixType, typename SparsityPatternType, typename MatrixType2>
void spai_core(const MatrixType& A_GPU, const SparsityPatternType& A_BSP_GPU, const SparsityPatternType& M_BSP_GPU, MatrixType2& M_GPU)
{
	PROFILE_SCOPED();
    
    typedef typename MatrixType::index_type IndexType;
	typedef typename SparsityPatternType::value_type PatternValueType;
	typedef typename MatrixType::value_type ValueType;
	
    assert(A_GPU.num_rows==A_GPU.num_cols);
    assert(A_BSP_GPU.num_rows == A_BSP_GPU.num_cols);
    assert(A_GPU.num_rows % A_BSP_GPU.num_rows == 0);

	IndexType N                   = A_GPU.num_rows;
	IndexType blocksPerDim        = A_BSP_GPU.num_cols;
    IndexType totalNumberOfBlocks = A_BSP_GPU.num_entries;
    
    IndexType blocksToDo = blocksPerDim;
	IndexType blockSize = N / blocksToDo;
    
    MatrixType At_GPU;
    SparsityPatternType AtBSP_GPU;
    SparsityPatternType MtBSP_GPU;
    
    cusp::transpose<MatrixType, MatrixType>(A_GPU, At_GPU);
    cusp::transpose<SparsityPatternType, SparsityPatternType>(A_BSP_GPU, AtBSP_GPU);
    cusp::transpose<SparsityPatternType, SparsityPatternType>(M_BSP_GPU, MtBSP_GPU);
    
    cusp::csr_matrix<IndexType, PatternValueType, cusp::host_memory> AtBSP(AtBSP_GPU);
    cusp::csr_matrix<IndexType, PatternValueType, cusp::host_memory> MtBSP(MtBSP_GPU);
	
    size_t ticks(0);
    
    IndexType sizesSetsJ[blocksToDo];
    IndexType sizesSetsI[blocksToDo];
    IndexType inclusiveSizesSetsJ[blocksToDo];
    IndexType inclusiveSizesSetsI[blocksToDo];
    
    IndexType* indicesI_array[blocksToDo];
    IndexType* indicesJ_array[blocksToDo];
        
    ticks = clock();
    double totalTime(0.0);
    double currentTime(0.0);
    std::cerr << "*******************************" << std::endl;
    std::cerr << "*** SET OPERATIONS STARTED! ***" << std::endl;
    std::cerr << "*******************************" << std::endl;
        
    makeIndices<IndexType>(AtBSP, MtBSP, indicesI_array, indicesJ_array, sizesSetsI, sizesSetsJ, blocksToDo);
    IndexType maxNumBlockCols = *(std::max_element<IndexType*>(sizesSetsJ, sizesSetsJ+blocksToDo));
    IndexType maxNumBlockRows = *(std::max_element<IndexType*>(sizesSetsI, sizesSetsI+blocksToDo));

    inclusiveSizesSetsJ[0]=0;
    inclusiveSizesSetsI[0]=0;
    
    for(IndexType i=1; i<blocksToDo; i++)
    {
        IndexType previousSize = inclusiveSizesSetsJ[i-1];
        inclusiveSizesSetsJ[i] = previousSize + sizesSetsJ[i-1];
        previousSize = inclusiveSizesSetsI[i-1];
        inclusiveSizesSetsI[i] = previousSize + sizesSetsI[i-1];
    }   
    
    IndexType totalNbrIndicesI = inclusiveSizesSetsI[blocksToDo-1]+sizesSetsI[blocksToDo-1];
    IndexType totalNbrIndicesJ = inclusiveSizesSetsJ[blocksToDo-1]+sizesSetsJ[blocksToDo-1];
    IndexType* indicesI = new IndexType[totalNbrIndicesI];
    IndexType* indicesJ = new IndexType[totalNbrIndicesJ];
    
    #pragma omp parallel for 
    for(IndexType i=0; i<blocksToDo; ++i)
    {
        for(IndexType j(0); j<sizesSetsI[i]; j++)
            indicesI[inclusiveSizesSetsI[i]+j] = indicesI_array[i][j];
        
        for(IndexType k(0); k<sizesSetsJ[i]; k++)
            indicesJ[inclusiveSizesSetsJ[i]+k] = indicesJ_array[i][k];
        
        delete[] indicesI_array[i];
        delete[] indicesJ_array[i];
    }    
        
    std::cerr << "The total number of blocks in the preconditioner will be: " << totalNumberOfBlocks << std::endl;
    std::cerr << "Hence the number of non-zeros in the preconditionner will be: " << totalNumberOfBlocks*blockSize*blockSize << std::endl;
    std::cerr << "Memory requirements on CPU *and* GPU to store the preconditionner in COO:" << std::endl;
    std::cerr << "  - 2x " << totalNumberOfBlocks*blockSize*blockSize*sizeof(IndexType) << " bytes for the coordinates" << std::endl;
    std::cerr << "  - 1x " << totalNumberOfBlocks*blockSize*blockSize*sizeof(ValueType) << " bytes for the values" << std::endl;
    
    currentTime = (clock()-ticks)/float(CLOCKS_PER_SEC);
    totalTime +=  currentTime;
    std::cerr << "Completed in " << currentTime << " seconds." << std::endl;
    std::cerr << "*********************************" << std::endl;
    std::cerr << "*** SET OPERATIONS COMPLETED! ***" << std::endl;
    std::cerr << "*********************************" << std::endl;
    std::cerr << std::endl; 
    
         
    ticks = clock();
    std::cerr << "***************************" << std::endl;
    std::cerr << "*** ALLOCATION STARTED! ***" << std::endl;
    std::cerr << "***************************" << std::endl;
            
    IndexType MValuesElements = totalNumberOfBlocks*blockSize*blockSize;
    IndexType maxNumBlocks = maxNumBlockRows*maxNumBlockCols;    
    IndexType maxAhatNumElements = maxNumBlocks*blockSize*blockSize;
    IndexType maxAtAhatNumElements = maxNumBlockCols*maxNumBlockCols*blockSize*blockSize;
    IndexType maxEhatNumElements = maxNumBlockRows*blockSize*blockSize;
    
    M_GPU.resize(N, N, MValuesElements);
    
    const IndexType* At_rowOffsets_GPU        = getCsrRowOffsetsPtr(At_GPU);
    const IndexType* At_columnIndices_GPU     = getCsrColIndicesPtr(At_GPU);
    const ValueType* At_values_GPU            = getCsrValuesPtr(At_GPU);
    const IndexType* AtBSP_rowOffsets_GPU     = getCsrRowOffsetsPtr(AtBSP_GPU);
    const IndexType* AtBSP_columnIndices_GPU  = getCsrColIndicesPtr(AtBSP_GPU);
    IndexType* MRows_GPU                      = setCooRowsPtr(M_GPU);
    IndexType* MCols_GPU                      = setCooColsPtr(M_GPU);
    ValueType* MValues_GPU                    = setCooValuesPtr(M_GPU);
    IndexType* indicesI_GPU;
    IndexType* indicesJ_GPU;
    ValueType* Ahat_GPU;
    ValueType* Ehat_GPU;
    ValueType* AtAhat_GPU;
 
    cudaError_t gpuErr;
    
    gpuErr = cudaMalloc((void**)&indicesI_GPU, totalNbrIndicesI*sizeof(IndexType));
    if(gpuErr != cudaSuccess) throw(bspaiCudaMallocExcept());
    
    cudaMemcpy(indicesI_GPU, indicesI, totalNbrIndicesI*sizeof(IndexType), cudaMemcpyHostToDevice);
    
    gpuErr = cudaMalloc((void**)&indicesJ_GPU, totalNbrIndicesJ*sizeof(IndexType));  
    if(gpuErr != cudaSuccess) throw(bspaiCudaMallocExcept());
    
    cudaMemcpy(indicesJ_GPU, indicesJ, totalNbrIndicesJ*sizeof(IndexType), cudaMemcpyHostToDevice);
    
    gpuErr = cudaMalloc((void**)&Ahat_GPU, maxAhatNumElements*sizeof(ValueType));
    if(gpuErr != cudaSuccess) throw(bspaiCudaMallocExcept());
    
    gpuErr = cudaMalloc((void**)&AtAhat_GPU, maxAtAhatNumElements*sizeof(ValueType));
    if(gpuErr != cudaSuccess) throw(bspaiCudaMallocExcept());
    
    gpuErr = cudaMalloc((void**)&Ehat_GPU, maxEhatNumElements*sizeof(ValueType));
    if(gpuErr != cudaSuccess) throw(bspaiCudaMallocExcept());
    
    /*
    IndexType* indicesCheck = new IndexType[totalNbrIndicesI];
    cudaMemcpy(indicesCheck, indicesI_GPU, totalNbrIndicesI*sizeof(IndexType), cudaMemcpyDeviceToHost);
    dumpArray(indicesCheck, totalNbrIndicesI, std::cout, " ");
    delete[] indicesCheck;
    */
    delete[] indicesI;
    delete[] indicesJ;
    
    currentTime = (clock()-ticks)/float(CLOCKS_PER_SEC);
    totalTime +=  currentTime;
    std::cerr << "Completed in " << currentTime << " seconds." << std::endl;
    std::cerr << "*****************************" << std::endl;
    std::cerr << "*** ALLOCATION COMPLETED! ***" << std::endl;
    std::cerr << "*****************************" << std::endl;
    std::cerr << std::endl;  
    
    
    ticks = clock();
    std::cerr << "********************************************" << std::endl;
    std::cerr << "*** PRECONDITIONER CONSTRUCTION STARTED! ***" << std::endl;
    std::cerr << "********************************************" << std::endl;
    
    makeSpai<IndexType, ValueType>(At_rowOffsets_GPU,
                                   At_columnIndices_GPU,
                                   At_values_GPU,
                                   AtBSP_rowOffsets_GPU,
                                   AtBSP_columnIndices_GPU,
                                   Ahat_GPU,
                                   AtAhat_GPU,
                                   Ehat_GPU,
                                   MRows_GPU,
                                   MCols_GPU,
                                   MValues_GPU,
                                   indicesI_GPU,
                                   indicesJ_GPU,
                                   sizesSetsI,
                                   sizesSetsJ,
                                   inclusiveSizesSetsI,
                                   inclusiveSizesSetsJ,
                                   blockSize,
                                   blocksToDo);
                                   
             
    currentTime = (clock()-ticks)/float(CLOCKS_PER_SEC);
    totalTime +=  currentTime;                     
    std::cerr << "Completed in " << currentTime << " seconds." << std::endl;
    std::cerr << "**********************************************" << std::endl;
    std::cerr << "*** PRECONDITIONER CONSTRUCTION COMPLETED! ***" << std::endl;
    std::cerr << "**********************************************" << std::endl; 
    std::cerr << std::endl;


    ticks = clock();
    std::cerr << "**********************************************************" << std::endl;
    std::cerr << "*** PRECONDITIONNER RETRIEVAL + DE-ALLOCATION STARTED! ***" << std::endl;
    std::cerr << "**********************************************************" << std::endl;
    
    /*
    IndexType* MRows = new IndexType[MValuesElements];
    IndexType* MCols = new IndexType[MValuesElements];
    ValueType* MValues = new ValueType[MValuesElements];
    
    
    cudaMemcpy(MRows,   MRows_GPU,   MValuesElements*sizeof(ValueType), cudaMemcpyDeviceToHost); 
    cudaMemcpy(MCols,   MCols_GPU,   MValuesElements*sizeof(ValueType), cudaMemcpyDeviceToHost); 
    cudaMemcpy(MValues, MValues_GPU, MValuesElements*sizeof(ValueType), cudaMemcpyDeviceToHost); 


    std::cout << "MRows:" << std::endl;
    dumpArray(MRows, MValuesElements, std::cout, " ");
    std::cout << "MCols:" << std::endl;
    dumpArray(MCols, MValuesElements, std::cout, " ");
    std::cout << "MValues:" << std::endl;
    dumpArray(MValues, MValuesElements, std::cout, " ");

    
    delete[] MRows;
    delete[] MCols;
    delete[] MValues;
    */

    cudaFree(indicesI_GPU);
    cudaFree(indicesJ_GPU);    
    cudaFree(Ahat_GPU);
    cudaFree(AtAhat_GPU);
    cudaFree(Ehat_GPU);
   
 
    currentTime = (clock()-ticks)/float(CLOCKS_PER_SEC);
    totalTime +=  currentTime;                     
    std::cerr << "Completed in " << currentTime << " seconds." << std::endl;
    std::cerr << "************************************************************" << std::endl;
    std::cerr << "*** PRECONDITIONNER RETRIEVAL + DE-ALLOCATION COMPLETED! ***" << std::endl;
    std::cerr << "************************************************************" << std::endl;
    std::cerr << std::endl;
    std::cerr << "vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv" << std::endl;
    std::cerr << "OVERALL TIME: " << totalTime << " seconds." << std::endl;
    std::cerr << "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" << std::endl;

    
  CUSP_PROFILE_DUMP();
}




} // end namespace device
} // end namespace detail
} // end namespace precond
} // end namespace cusp

