/*
 *  Copyright 2008-2009 NVIDIA Corporation
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

/*! \file multi_array1d.h
 *  \brief One-dimensional array partitioned on multiple devices
 */

#pragma once

#include <cusp/detail/config.h>
#include <cusp/verify.h>

#include <cusp/array1d.h>
#include <cusp/exception.h>
#include <cusp/format.h>
#include <cusp/memory.h>
#include <cusp/permutation_matrix.h>
#include <cusp/print.h>
#include <cusp/graph/partition.h>

#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/scatter.h>
#include <thrust/sort.h>
#include <thrust/unique.h>

#include <thrust/detail/vector_base.h>
#include <thrust/iterator/constant_iterator.h>

namespace cusp
{

/*! \addtogroup arrays Arrays
 */

/*! \addtogroup array_containers Array Containers
 *  \ingroup arrays
 *  \{
 */

/*! \p array1d : One-dimensional array container
 *
 * \tparam T value_type of the array
 * \tparam MemorySpace memory space of the array (cusp::host_memory or cusp::device_memory)
 *
 * \TODO example
 */
template <typename T, typename MemorySpace>
class multi_array1d
{
private:
    void ValueInit( T value )
    {
        if( partition == NULL )
            throw cusp::invalid_input_exception("partition is set to NULL");

        if ( cusp::is_valid_partition(*partition) )
        {
            int num_devices;
            if( cudaGetDeviceCount(&num_devices) != cudaSuccess )
                throw cusp::runtime_exception("cudaGetDeviceCount failed");

            cusp::permutation_matrix<int,cusp::host_memory> P;
            cusp::graph::partitionToPermMatrix(*partition, P);
            //cusp::permutation_matrix<int,cusp::host_memory> Pt(P.num_rows);
	    Pt.resize(P.num_rows);
            thrust::scatter(thrust::counting_iterator<int>(0),
                            thrust::counting_iterator<int>(0)+P.num_rows,
                            P.values.begin(),
                            Pt.values.begin());

            cusp::array1d<int,cusp::host_memory> unique_part(partition->begin(), partition->end());
            thrust::sort(unique_part.begin(), unique_part.end());
            int num_unique = thrust::unique(unique_part.begin(), unique_part.end()) - unique_part.begin();
	    Pt_dev.resize(num_unique);
            unique_part.resize(num_unique);
            vector_array.resize(num_unique);
            deviceNum = std::vector<size_t>(unique_part.begin(), unique_part.end());

            for ( int index = 0; index < num_unique; index++ )
            {
                int num_entries = thrust::count( partition->begin(), partition->end(), unique_part[index] );
                cudaSetDevice(deviceNum[index]);
                //vector_array[index] = cusp::array1d<T,MemorySpace>(num_entries,value);
                vector_array[index].resize(num_entries,value);
                vector_array[index].setDeviceNum( deviceNum[index] );

	    	Pt_dev[index].resize(Pt.num_rows);
	    	Pt_dev[index].values = Pt.values;
		Pt_dev[index].values.setDeviceNum(deviceNum[index]);
            }
        }
    }

    template< typename ArrayType >
    void ArrayInit( const ArrayType& value  )
    {
        if( partition == NULL )
            throw cusp::invalid_input_exception("partition is set to NULL");

        if ( cusp::is_valid_partition(*partition) )
        {
            int num_devices;
            if( cudaGetDeviceCount(&num_devices) != cudaSuccess )
                throw cusp::runtime_exception("cudaGetDeviceCount failed");

            ArrayType Pvalue(value.size());
            cusp::permutation_matrix<int,cusp::host_memory> P;
            cusp::graph::partitionToPermMatrix(*partition, P);
            //cusp::permutation_matrix<int,cusp::host_memory> Pt(P.num_rows);
	    Pt.resize(P.num_rows);
            thrust::scatter(thrust::counting_iterator<int>(0),
                            thrust::counting_iterator<int>(0)+P.num_rows,
                            P.values.begin(),
                            Pt.values.begin());
	    thrust::gather( Pt.values.begin(), Pt.values.end(), value.begin(), Pvalue.begin() );

            cusp::array1d<int,cusp::host_memory> unique( *partition );
            thrust::sort(unique.begin(), unique.end());
            int num_slices = thrust::unique( unique.begin(), unique.end() ) - unique.begin();

	    Pt_dev.resize(num_slices);
            unique.resize(num_slices);
            vector_array.resize(num_slices);
            deviceNum = std::vector<size_t>(unique.begin(), unique.end());

            int start = 0;
            int end   = 0;
            for ( int index = 0; index < num_slices; index++ )
            {
                int num_entries = thrust::count( partition->begin(), partition->end(), (int)deviceNum[index] );
                end = start + num_entries;
                cudaSetDevice(deviceNum[index]);
                vector_array[index] = ArrayType(Pvalue.begin()+start, Pvalue.begin()+end);
                vector_array[index].setDeviceNum(deviceNum[index]);
                start += num_entries;

	    	Pt_dev[index].resize(Pt.num_rows);
	    	Pt_dev[index].values = Pt.values;
		Pt_dev[index].values.setDeviceNum(deviceNum[index]);
            }
        }
    }

    template< typename ArrayType >
    void equate( const ArrayType& array, cusp::array1d_format )
    {
	#pragma omp parallel for
        for ( size_t index = 0; index < vector_array.size(); index++ )
        {
            size_t num_entries = vector_array[index].size();
	    size_t start = index == 0 ? 0 : vector_array[0].size();
            size_t end = start + num_entries;

            cudaSetDevice(deviceNum[index]);
            thrust::copy( thrust::make_permutation_iterator(array.begin(),Pt.values.begin())+start, 
			  thrust::make_permutation_iterator(array.begin(),Pt.values.begin())+end  , 
			  vector_array[index].begin() );
            vector_array[index].setDeviceNum(deviceNum[index]);
        }
    }

    template< typename ArrayType >
    void equate( const ArrayType& array, cusp::multi_array1d_format )
    {
        partition = array.partition;
        int vec_size = array.vector_array.size();
        vector_array.resize( vec_size );
        deviceNum = array.deviceNum;

	#pragma omp parallel for
        for( int index = 0; index < vec_size; index++ )
        {
            cudaSetDevice(deviceNum[index]);
            vector_array[index] = array(index);
            vector_array[index].setDeviceNum( deviceNum[index] );
        }
    }

public:
    typedef MemorySpace memory_space;
    typedef cusp::multi_array1d_format format;
    typedef T value_type;

    /*! equivalent container type
     */
    typedef typename cusp::multi_array1d<T,MemorySpace> container;

    std::vector< size_t > 	   		deviceNum;
    std::vector< cusp::array1d<T,MemorySpace> > vector_array;

    const cusp::array1d<int,cusp::host_memory> * partition;
    cusp::permutation_matrix<int,cusp::host_memory> Pt;
    std::vector< cusp::permutation_matrix<int,cusp::device_memory> > Pt_dev;

    multi_array1d(void) : partition(NULL) {};

    template< typename ArrayType >
    multi_array1d(const ArrayType * part, const T value) :  partition(part) {
        ValueInit(value);
    }

    template< typename ArrayType >
    multi_array1d(const ArrayType & part, const T value) :  partition(&part) {
        ValueInit(value);
    }

    multi_array1d(const multi_array1d<T,MemorySpace> & other_array, const T value) {
        partition = other_array.partition;
        int vec_size = other_array.getNumSlices();
        vector_array.resize( vec_size );
        deviceNum = other_array.deviceNum;

	#pragma omp parallel for
        for( int index = 0; index < vec_size; index++ )
        {
            cudaSetDevice(deviceNum[index]);
            vector_array[index].resize(other_array(index).size(), value);
            vector_array[index].setDeviceNum( deviceNum[index] );
        }
    }

    multi_array1d(const size_t num_entries, const T value) :  partition()
    {
        int num_devices;
        if( cudaGetDeviceCount(&num_devices) != cudaSuccess )
            throw cusp::runtime_exception("cudaGetDeviceCount failed");

        deviceNum.resize(num_devices);
        vector_array.resize(num_devices);

        for ( int index = 0; index < num_devices; index++ )
        {
            deviceNum[index] = index;
            cudaSetDevice(deviceNum[index]);
            vector_array[index] = cusp::array1d<T,cusp::host_memory>(num_entries,value);
            vector_array[index].setDeviceNum(deviceNum[index]);
        }
    }

    template< typename ArrayType >
    multi_array1d(const ArrayType & array) :  partition()
    {
        int num_devices;
        if( cudaGetDeviceCount(&num_devices) != cudaSuccess )
            throw cusp::runtime_exception("cudaGetDeviceCount failed");

        deviceNum.resize(num_devices);
        vector_array.resize(num_devices);

        for ( int index = 0; index < num_devices; index++ )
        {
            deviceNum[index] = index;
            cudaSetDevice(deviceNum[index]);
            vector_array[index] = array;
            vector_array[index].setDeviceNum(deviceNum[index]);
        }
    }

    template< typename ArrayType1, typename ArrayType2 >
    multi_array1d(const ArrayType1 & part, const ArrayType2& value,
                  typename thrust::detail::enable_if<!thrust::detail::is_convertible<ArrayType2,T>::value>::type * = 0)
        : partition(&part) {
        ArrayInit(value);
    }

    template< typename ArrayType1, typename ArrayType2 >
    multi_array1d(const ArrayType1 * part, const ArrayType2& value,
                  typename thrust::detail::enable_if<!thrust::detail::is_convertible<ArrayType2,T>::value>::type * = 0)
        : partition(part) {
        ArrayInit(value);
    }

    multi_array1d(const multi_array1d<T,MemorySpace> & other_array)
    {
        partition = other_array.partition;
        int vec_size = other_array.getNumSlices();
        vector_array.resize( vec_size );
        deviceNum = other_array.deviceNum;

        for( int index = 0; index < vec_size; index++ )
        {
            cudaSetDevice(deviceNum[index]);
            vector_array[index] = other_array(index);
            vector_array[index].setDeviceNum( deviceNum[index] );
        }
    }

    multi_array1d &operator=(const cusp::array1d<T,cusp::host_memory>& marray)
    {
        equate( marray, cusp::array1d_format() );
        return *this;
    }

    multi_array1d &operator=(const multi_array1d& marray)
    {
        equate( marray, cusp::multi_array1d_format() );
        return *this;
    }

    size_t size(void) const {
        return partition->size();
    }

    void resize(const size_t n) {
        if( n != partition->size() )
            throw cusp::invalid_input_exception("Distributed arrays cannot be resized.");
    }

    cusp::array1d<T,MemorySpace> &operator()(const size_t index) {
        return vector_array[index];
    }

    const cusp::array1d<T,MemorySpace> &operator()(const size_t index) const {
        return vector_array[index];
    }

    size_t getNumSlices( void ) const {
        return vector_array.size();
    }

}; // class array1d
/*! \}
 */

/*! \}
 */

} // end namespace cusp

