/*
 *  Copyright 2008-2009 NVIDIA Corporation
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

#pragma once

#include <cusp/csr_matrix.h>
#include <cusp/linear_operator.h>
#include <cusp/print.h>

#include <cusp/detail/device/common.h>

#include <cmath>

namespace cusp
{

struct lu_forward {};
struct lu_backward {};

namespace detail
{

template< typename IndexType, typename ValueType, unsigned int BLOCK_SIZE >
__global__
void L_SOL_LEV(const ValueType* b, ValueType* x, const ValueType* la,
               const IndexType* lja, const IndexType* lia,
               const IndexType* jlevL, IndexType l1, IndexType l2) {

    // num of half-warps
    IndexType nhw = gridDim.x*BLOCK_SIZE/HALF_WARP_SIZE;
    // half warp id
    IndexType hwid = (blockIdx.x*BLOCK_SIZE+threadIdx.x)/HALF_WARP_SIZE;
    // thread lane in each half warp
    IndexType lane = threadIdx.x & (HALF_WARP_SIZE-1);
    // shared memory for patial result
    volatile __shared__ ValueType r[BLOCK_SIZE+8];

    for (size_t i=l1+hwid; i<l2; i+=nhw) {
        size_t jj = jlevL[i];
        IndexType p1 = lia[jj];
        IndexType q1 = lia[jj+1];

        ValueType sum = 0.0;
        for (size_t k=p1+lane; k<q1; k+=HALF_WARP_SIZE)
            sum += la[k]*x[lja[k]];

        // parallel reduction
        r[threadIdx.x] = sum;
        r[threadIdx.x] = sum = sum + r[threadIdx.x+8];
        r[threadIdx.x] = sum = sum + r[threadIdx.x+4];
        r[threadIdx.x] = sum = sum + r[threadIdx.x+2];
        r[threadIdx.x] = sum = sum + r[threadIdx.x+1];

        if (lane == 0)
            x[jj] = b[jj] - r[threadIdx.x];
    }
}

template< typename IndexType, typename ValueType, unsigned int BLOCK_SIZE>
__global__
void U_SOL_LEV(ValueType *x, const ValueType *ua, const IndexType *uja, const IndexType *uia,
               const IndexType *jlevU, IndexType l1, IndexType l2) {
    // num of half-warps
    IndexType nhw = gridDim.x*BLOCK_SIZE/HALF_WARP_SIZE;
    // half warp id
    IndexType hwid = (blockIdx.x*BLOCK_SIZE+threadIdx.x)/HALF_WARP_SIZE;
    // thread lane in each half warp
    IndexType lane = threadIdx.x & (HALF_WARP_SIZE-1);
    // shared memory for patial result
    volatile __shared__ ValueType r[BLOCK_SIZE+8];

    for (size_t i=l1+hwid; i<l2; i+=nhw) {
        size_t jj = jlevU[i];
        IndexType p1 = uia[jj];
        IndexType q1 = uia[jj+1];

        ValueType sum = 0.0;
        for (size_t k=p1+lane; k<q1; k+=HALF_WARP_SIZE)
            sum += ua[k]*x[uja[k]];

        // parallel reduction
        r[threadIdx.x] = sum;
        r[threadIdx.x] = sum = sum + r[threadIdx.x+8];
        r[threadIdx.x] = sum = sum + r[threadIdx.x+4];
        r[threadIdx.x] = sum = sum + r[threadIdx.x+2];
        r[threadIdx.x] = sum = sum + r[threadIdx.x+1];

        if (lane == 0) {
            ValueType t = ua[p1];
            x[jj] = t*(x[jj]-r[threadIdx.x]);
        }
    }
}

template< typename IndexType, typename MemorySpace, typename Direction >
struct Level
{
    IndexType nlev;
    cusp::array1d<IndexType,MemorySpace> jlev;
    cusp::array1d<IndexType,cusp::host_memory> ilev;

    Level(){}

    template< typename MatrixType >
    Level(const MatrixType& L) : nlev(0)
    {
        IndexType n = L.num_rows;

        ilev.resize(n,0);

        cusp::array1d<IndexType,cusp::host_memory> level(n,0);
        cusp::array1d<IndexType,cusp::host_memory> h_jlev(n,0);

	cusp::array1d<IndexType,cusp::host_memory> row_offsets(L.row_offsets);
	cusp::array1d<IndexType,cusp::host_memory> column_indices(L.column_indices);

        ilev[0] = 1;

	DirectionalSetup( n, row_offsets, column_indices, level, Direction() );

        for (IndexType i=1; i<=nlev; i++)
            ilev[i] += ilev[i-1];

        for (IndexType i=0; i<n; i++)
        {
            int *k = &ilev[level[i]-1];
            h_jlev[(*k)-1] = i+1;
            (*k)++;
        }

        for (IndexType i=nlev-1; i>0; i--)
            ilev[i] = ilev[i-1];

        ilev[0] = 1;

	// shift to zero indexed array
        for (IndexType i=0; i<n; i++)
	{
            ilev[i] -= 1;
            h_jlev[i] -= 1;
	}

        jlev = h_jlev;
    }

    template< typename Array > 
    void DirectionalSetup( const IndexType n, const Array& row_offsets, const Array& column_indices, Array& level, cusp::lu_forward )
    {
        for (IndexType i=0; i<n; i++)
        {
            IndexType l = 0;
            for (IndexType j = row_offsets[i]; j < row_offsets[i+1]; j++)
                l = std::max(l, level[column_indices[j]]);

            level[i] = l+1;
            ilev[l+1]++;
            nlev = max(nlev, l+1);
        }
    }

    template< typename Array > 
    void DirectionalSetup( const IndexType n, const Array& row_offsets, const Array& column_indices, Array& level, cusp::lu_backward )
    {
        for (IndexType i=n-1; i>=0; i--)
        {
            IndexType l = 0;
            for (IndexType j = row_offsets[i]; j < row_offsets[i+1]; j++)
                l = std::max(l, level[column_indices[j]]);

            level[i] = l+1;
            ilev[l+1]++;
            nlev = max(nlev, l+1);
        }
    }
};

template <typename MatrixType, typename LevelType, typename VectorType1, typename VectorType2>
void sparse_forwardsolve(const MatrixType& L, const LevelType& lev, const VectorType1& b, VectorType2& x,
                        cusp::host_memory)
{
    typedef typename MatrixType::index_type IndexType;
    typedef typename MatrixType::value_type ValueType;

    const IndexType n = L.num_rows;

    for (IndexType i=0; i<n; i++) {
        x[i] = b[i];
        IndexType i1 = L.row_offsets[i];
        IndexType i2 = L.row_offsets[i+1];
        for (IndexType k=i1; k<i2; k++)
            x[i] -= L.values[k]*x[L.column_indices[k]];
    }
}

template <typename MatrixType, typename LevelType, typename VectorType1, typename VectorType2>
void sparse_backsolve(const MatrixType& U, const LevelType& lev, const VectorType1& b, VectorType2& x,
                     cusp::host_memory)
{
    typedef typename MatrixType::index_type IndexType;
    typedef typename MatrixType::value_type ValueType;

    const IndexType n = U.num_rows;

    x = b;

    for (IndexType i=n-1; i>=0; i--) {
        ValueType t = U.values[U.row_offsets[i]];
        IndexType i1 = U.row_offsets[i]+1;
        IndexType i2 = U.row_offsets[i+1];
        for (IndexType k=i1; k<i2; k++)
            x[i] -= U.values[k]*x[U.column_indices[k]];

        x[i] = t*x[i];
    }
}

template <typename MatrixType, typename LevelType, typename VectorType1, typename VectorType2>
void sparse_forwardsolve(const MatrixType& L, const LevelType& lev, const VectorType1& b, VectorType2& x,
                        cusp::device_memory)
{
    typedef typename MatrixType::index_type IndexType;
    typedef typename MatrixType::value_type ValueType;

    const unsigned int BLOCK_SIZE = 256;

    // copy rhs to x
    for (IndexType i=0; i<lev.nlev; i++) {
        IndexType l1 = lev.ilev[i];
        IndexType l2 = lev.ilev[i+1];
        IndexType l_size = l2 - l1;
        IndexType nthreads = std::min(l_size*HALF_WARP_SIZE, MAX_THREADS);
        IndexType gDim = (nthreads+BLOCK_SIZE-1)/BLOCK_SIZE;
        L_SOL_LEV<IndexType,ValueType,BLOCK_SIZE><<<gDim, BLOCK_SIZE>>>( 	
		thrust::raw_pointer_cast(&b[0]),
                thrust::raw_pointer_cast(&x[0]),
                thrust::raw_pointer_cast(&L.values[0]),
                thrust::raw_pointer_cast(&L.column_indices[0]),
                thrust::raw_pointer_cast(&L.row_offsets[0]),
                thrust::raw_pointer_cast(&lev.jlev[0]),
                l1, l2);
    }
}

template <typename MatrixType, typename LevelType, typename VectorType1, typename VectorType2>
void sparse_backsolve(const MatrixType& U, const LevelType& lev, const VectorType1& b, VectorType2& x,
                     cusp::device_memory)
{
    typedef typename MatrixType::index_type IndexType;
    typedef typename MatrixType::value_type ValueType;

    const unsigned int BLOCK_SIZE = 256;

    x = b;

    // copy rhs to x
    for (IndexType i=0; i<lev.nlev; i++) {
        IndexType l1 = lev.ilev[i];
        IndexType l2 = lev.ilev[i+1];
        IndexType l_size = l2 - l1;
        IndexType nthreads = std::min(l_size*HALF_WARP_SIZE, MAX_THREADS);
        IndexType gDim = (nthreads+BLOCK_SIZE-1)/BLOCK_SIZE;
        U_SOL_LEV<IndexType,ValueType,BLOCK_SIZE><<<gDim, BLOCK_SIZE>>>( 	
                thrust::raw_pointer_cast(&x[0]),
                thrust::raw_pointer_cast(&U.values[0]),
                thrust::raw_pointer_cast(&U.column_indices[0]),
                thrust::raw_pointer_cast(&U.row_offsets[0]),
                thrust::raw_pointer_cast(&lev.jlev[0]),
                l1, l2);
    }
}

} // end namespace detail

template <typename MatrixType, typename LevelType, typename VectorType1, typename VectorType2>
void sparse_backsolve(const MatrixType& U, const LevelType& lev, const VectorType1& b, VectorType2& x)
{
	detail::sparse_backsolve(U,lev,b,x,typename MatrixType::memory_space());
}

template <typename MatrixType, typename LevelType, typename VectorType1, typename VectorType2>
void sparse_forwardsolve(const MatrixType& L, const LevelType& lev, const VectorType1& b, VectorType2& x)
{
	detail::sparse_forwardsolve(L,lev,b,x,typename MatrixType::memory_space());
}

} // end namespace cusp

