
#include <numeric>

#define IS_POWER_OF_2(arg) (arg > 1 && (arg & (arg - 1)) == 0)


//from http://blogs.msdn.com/b/nativeconcurrency/archive/2012/03/08/parallel-reduction-using-c-amp.aspx
//adjusted to calculate sum and sum(sqr()) of data
template <unsigned _tile_size>
void reduction_cascade(const array_view<float,1>& source, double& sumOut, double& sumSqrOut)
{
    static_assert(IS_POWER_OF_2(_tile_size), "Tile size must be a positive integer power of two!");

    assert(source.extent.size() <= UINT_MAX);
    unsigned element_count = static_cast<unsigned>(source.extent.size());
    assert(element_count != 0); // Cannot reduce an empty sequence.
    unsigned _tile_count = element_count / _tile_size / 2;
    unsigned stride = _tile_size * _tile_count * 2;

    // Reduce tail elements.
    double tail_sum = 0.f;
    double tail_sqr = 0.f;
    int tail_length = element_count % stride;
    if(tail_length != 0)
    {
        vector<float> tail(tail_length);
        copy(source.section((int)source.extent.size() - tail_length, tail_length), &tail[0]);
        for(int i=0; i<tail_length; ++i)
        {
            tail_sum += tail[i];
            tail_sqr += tail[i] * tail[i];
        }
        element_count -= tail_length;
        if(element_count == 0)
        {
            sumOut = tail_sum;
            sumSqrOut = tail_sqr;
            return;
        }
    }

    array<double, 1> partial_sum(_tile_count);
    array<double, 1> partial_sqr(_tile_count);

    parallel_for_each(
        extent<1>(_tile_count * _tile_size).tile<_tile_size>(), 
        [=, &partial_sum, &partial_sqr] (tiled_index<_tile_size> tidx) restrict(amp)
    {
        // Use tile_static as a scratchpad memory.
        tile_static double tile_sum[_tile_size];
        tile_static double tile_sqr[_tile_size];

        unsigned local_idx = tidx.local[0];

        // Reduce data strides of twice the tile size into tile_static memory.
        unsigned input_idx = (tidx.tile[0] * 2 * _tile_size) + local_idx;
        tile_sum[local_idx] = 0;
        tile_sqr[local_idx] = 0;
        do
        {
            tile_sum[local_idx] += source[input_idx] + source[input_idx + _tile_size]; 
            tile_sqr[local_idx] += source[input_idx]*source[input_idx] + 
                source[input_idx + _tile_size] * source[input_idx + _tile_size];
            input_idx += stride;
        } while (input_idx < element_count);

        tidx.barrier.wait();

        // Reduce to the tile result using multiple threads.
        for (unsigned stride = _tile_size / 2; stride > 0; stride /= 2)
        {
            if (local_idx < stride)
            {
                tile_sum[local_idx] += tile_sum[local_idx + stride];
                tile_sqr[local_idx] += tile_sqr[local_idx + stride];
            }

            tidx.barrier.wait();
        }

        // Store the tile result in the global memory.
        if (local_idx == 0)
        {
            partial_sum[tidx.tile[0]] = tile_sum[0];
            partial_sqr[tidx.tile[0]] = tile_sqr[0];
        }
    });

    // Reduce results from all tiles on the CPU.
    std::vector<double> v_partial_sum(_tile_count);
    copy(partial_sum, v_partial_sum.begin());
    sumOut = std::accumulate(v_partial_sum.begin(), v_partial_sum.end(), tail_sum);
    copy(partial_sqr, v_partial_sum.begin());
    sumSqrOut = std::accumulate(v_partial_sum.begin(), v_partial_sum.end(), tail_sum);
}
