/*
 * @file originally mtgp32-cuda.cu
 *
 * MTGP32-11213
 * This program generates 32-bit unsigned integers.
 * The period of generated integers is 2<sup>11213</sup>-1.
 *
 * This also generates single precision floating point numbers
 * uniformly distributed in the range [1, 2). (float r; 1.0 <= r < 2.0)
 */

#include <vector>
using std::vector;

#include <amp_math.h>

#include "mtgp32-amp.h"
using namespace concurrency;



#define MTGPDC_MEXP 11213
#define MTGPDC_N 351
#define MTGPDC_FLOOR_2P 256
#define MTGPDC_CEIL_2P 512
#define MTGPDC_PARAM_TABLE mtgp32dc_params_fast_11213
#define MEXP 11213
#define THREAD_NUM MTGPDC_FLOOR_2P
#define LARGE_SIZE (THREAD_NUM * 3)
#define BLOCKS_MAX 200
#define TBL_SIZE 16
#define N MTGPDC_N

extern mtgp32_params_fast_t mtgp32dc_params_fast_11213[];

/*
* Generator Parameters.
*/
mtgp_amp::Params::Params(const mtgp32_params_fast_t params[], int blocks)
    : pos_tbl(blocks),
    sh1_tbl(blocks),
    sh2_tbl(blocks),
    param_tbl(blocks, vector<uint32_t>(TBL_SIZE)),
    param_tbl_linear(blocks * TBL_SIZE),
    temper_tbl(blocks, vector<uint32_t>(TBL_SIZE)),
    temper_tbl_linear(blocks * TBL_SIZE),
    single_temper_tbl(blocks, vector<uint32_t>(TBL_SIZE)),
    single_temper_tbl_linear(blocks * TBL_SIZE),
    mask(1),
    inner(*this, blocks)
{
    mask[0] = params[0].mask;
    for (int i = 0; i < blocks; i++) {
        pos_tbl[i] = params[i].pos;
        sh1_tbl[i] = params[i].sh1;
        sh2_tbl[i] = params[i].sh2;
        for (int j = 0; j < TBL_SIZE; j++) 
        {
            param_tbl[i][j] = params[i].tbl[j];
            param_tbl_linear[i * TBL_SIZE + j] = params[i].tbl[j];
            temper_tbl[i][j] = params[i].tmp_tbl[j];
            temper_tbl_linear[i * TBL_SIZE + j] = params[i].tmp_tbl[j];
            single_temper_tbl[i][j] = params[i].flt_tmp_tbl[j];
            single_temper_tbl_linear[i * TBL_SIZE + j] = params[i].flt_tmp_tbl[j];
        }
    }
}
    
mtgp_amp::Params::ParamsView::ParamsView(const Params& params, int blocks)
    :
    pos_tbl(blocks, params.pos_tbl), 
    sh1_tbl(blocks, params.sh1_tbl), 
    sh2_tbl(blocks, params.sh2_tbl), 
    param_tbl(blocks, TBL_SIZE, params.param_tbl_linear), 
    temper_tbl(blocks, TBL_SIZE, params.temper_tbl_linear), 
    single_temper_tbl(blocks, TBL_SIZE, params.single_temper_tbl_linear), 
    mask(1, params.mask)
{}

mtgp_amp::Params::ParamsTexture::ParamsTexture(const Params& params, int blocks)
    :
    //pos_tbl(blocks, params.pos_tbl.begin(), params.pos_tbl.end()), 
    //sh1_tbl(blocks, params.sh1_tbl.begin(), params.sh1_tbl.end()), 
    //sh2_tbl(blocks, params.sh2_tbl.begin(), params.sh2_tbl.end()), 
    pos_tbl(blocks, params.pos_tbl.begin(), params.pos_tbl.end()), 
    sh1_tbl(blocks, params.sh1_tbl.begin(), params.sh1_tbl.end()), 
    sh2_tbl(blocks, params.sh2_tbl.begin(), params.sh2_tbl.end()), 
    param_tbl(blocks, TBL_SIZE, params.param_tbl_linear.begin(), params.param_tbl_linear.end()), 
    temper_tbl(blocks, TBL_SIZE, params.temper_tbl_linear.begin(), params.temper_tbl_linear.end()), 
    single_temper_tbl(blocks, TBL_SIZE, params.single_temper_tbl_linear.begin(), params.single_temper_tbl_linear.end()), 
    mask(1, params.mask.begin(), params.mask.end())
{}

//transform from [1,2] to normal distribution
void box_muller_transform(float& u1, float& u2) restrict(cpu, amp)
{
    float r = fast_math::sqrt(-2.0f * fast_math::log(u1-1.0f));
    float phi = 2.0f * 3.14159265358979f * (u2-1.0f);
    u1 = r * fast_math::cos(phi);
    u2 = r * fast_math::sin(phi);
}



/**
    * The function of the recursion formula calculation.
    *
    * @param[in] X1 the farthest part of state array.
    * @param[in] X2 the second farthest part of state array.
    * @param[in] Y a part of state array.
    * @param[in] bid block id.
    * @return output
    */
template<class PARAMS>
uint32_t para_rec(uint32_t X1, uint32_t X2, uint32_t Y, int bid, const PARAMS& t) 
    restrict(amp,cpu)
{
    uint32_t X = (X1 & t.mask[0]) ^ X2;
    uint32_t MAT;

    X ^= X << t.sh1_tbl[bid];
    Y = X ^ (Y >> t.sh2_tbl[bid]);
    MAT = t.param_tbl(bid,Y & 0x0f);
    return Y ^ MAT;
}

/**
    * The tempering function.
    *
    * @param[in] V the output value should be tempered.
    * @param[in] T the tempering helper value.
    * @param[in] bid block id.
    * @return the tempered value.
    */
template<class PARAMS>
uint32_t temper(uint32_t V, uint32_t T, int bid, const PARAMS& t) 
    restrict(amp,cpu)
{
    uint32_t MAT;

    T ^= T >> 16;
    T ^= T >> 8;
    MAT = t.temper_tbl(bid,T & 0x0f);
    return V ^ MAT;
}

/**
    * The tempering and converting function.
    * By using the preset-ted table, converting to IEEE format
    * and tempering are done simultaneously.
    *
    * @param[in] V the output value should be tempered.
    * @param[in] T the tempering helper value.
    * @param[in] bid block id.
    * @return the tempered and converted value.
    */
template<class PARAMS>
float temper_single(uint32_t V, uint32_t T, int bid, const PARAMS& t) 
    restrict(amp,cpu)
{
    uint32_t MAT;
    union {
        uint32_t u;
        float f;
    } x;

    T ^= T >> 16;
    T ^= T >> 8;
    MAT = t.single_temper_tbl(bid,T & 0x0f);
    x.u = (V >> 9) ^ MAT;
    return x.f;
}

/**
    * Read the internal state vector from kernel I/O data, and
    * put them into shared memory.
    *
    * @param[out] status shared memory.
    * @param[in] d_status kernel I/O data
    * @param[in] bid block id
    * @param[in] tid thread id
    */
void status_read(
    uint32_t status[LARGE_SIZE],
    const array_view<mtgp32_kernel_status_t>& d_status,
    int bid, int tid) 
    restrict(amp,cpu)
{
    status[LARGE_SIZE - N + tid] = d_status[bid].status[tid];
    if (tid < N - THREAD_NUM) {
            status[LARGE_SIZE - N + THREAD_NUM + tid] = d_status[bid].status[THREAD_NUM + tid];
    }
}

/**
    * Read the internal state vector from shared memory, and
    * write them into kernel I/O data.
    *
    * @param[out] d_status kernel I/O data
    * @param[in] status shared memory.
    * @param[in] bid block id
    * @param[in] tid thread id
    */
void status_write(
    const array_view<mtgp32_kernel_status_t>& d_status,
    const uint32_t status[LARGE_SIZE],
    int bid, int tid) 
    restrict(amp,cpu)
{
    d_status[bid].status[tid] = status[LARGE_SIZE - N + tid];
    if (tid < N - THREAD_NUM) {
            d_status[bid].status[THREAD_NUM + tid] = status[4 * THREAD_NUM - N + tid];
    }
}

template<class PARAMS>
void mtgp32_float_kernel_core( 
    uint32_t status[], 
    tiled_index<THREAD_NUM> idx,
    const PARAMS& params,
    const array_view<float>& dataOut
    ) 
    restrict(amp)
{
    uint32_t r;
    float o;
    int bid = idx.tile[0];
    int tid = idx.local[0];
    size_t n_per_block = dataOut.extent.size()/params.pos_tbl.extent.size();
    int pos = params.pos_tbl[bid];

    // main loop per thread
    for (size_t i = 0; i < n_per_block; i += LARGE_SIZE) 
    {
        r = para_rec(
            status[LARGE_SIZE - N + tid],
            status[LARGE_SIZE - N + tid + 1],
            status[LARGE_SIZE - N + tid + pos],
            bid, params);
        status[tid] = r;
        o = temper_single(r, status[LARGE_SIZE - N + tid + pos - 1], bid, params);
        dataOut[n_per_block * bid + i + tid] = o;
        idx.barrier.wait();

        r = para_rec(
            status[(4 * THREAD_NUM - N + tid) % LARGE_SIZE],
            status[(4 * THREAD_NUM - N + tid + 1) % LARGE_SIZE],
            status[(4 * THREAD_NUM - N + tid + pos) % LARGE_SIZE],
            bid, params);
        status[tid + THREAD_NUM] = r;
        o = temper_single(r, status[(4 * THREAD_NUM - N + tid + pos - 1) % LARGE_SIZE], bid, params);
        dataOut[n_per_block * bid + THREAD_NUM + i + tid] = o;
        idx.barrier.wait();

        r = para_rec(
            status[2 * THREAD_NUM - N + tid],
            status[2 * THREAD_NUM - N + tid + 1],
            status[2 * THREAD_NUM - N + tid + pos],
            bid, params);
        status[tid + 2 * THREAD_NUM] = r;
        o = temper_single(r, status[tid + pos - 1 + 2 * THREAD_NUM - N], bid, params);
        dataOut[n_per_block * bid + 2 * THREAD_NUM + i + tid] = o;
        idx.barrier.wait();
    }
}

/**
    * host function.
    * This function calls corresponding kernel function.
    */
void mtgp_amp::make_float_random_amp(array<float>& d_data) 
{
    int size = d_data.extent.size();
    int blocks = min(BLOCKS_MAX, (size-1) / LARGE_SIZE +1); //round up size/LARGE_size
    size_t n_per_block = size/blocks;
    array_view<float> v_data(d_data);

    const mtgp_amp::Params::ParamsView& d_params = params.inner;
    //const mtgp_amp::Params::ParamsTexture& d_params = paramsTexture;

    array_view<mtgp32_kernel_status_t>& d_status(v_status);

    parallel_for_each(
        extent<1>(blocks*THREAD_NUM).tile<THREAD_NUM>(), 
        [=](tiled_index<THREAD_NUM> idx) restrict(amp)
    {
        int bid = idx.tile[0];
        int tid = idx.local[0];

        //Shared memory: The generator's internal status vector.
        tile_static uint32_t status[LARGE_SIZE];

        // copy status data from global memory to shared memory.
        status_read(status, d_status, bid, tid);
        idx.barrier.wait();

        mtgp32_float_kernel_core(status, idx, d_params, v_data);

        // write back status for next call
        status_write(d_status, status, bid, tid);
        idx.barrier.wait();

        for (size_t i = 0; i < n_per_block/2; i += THREAD_NUM) 
            box_muller_transform(v_data[n_per_block*bid + tid + i], v_data[n_per_block*bid+n_per_block/2 + tid + i]);
    });
}

/**
* host function to calculate uniform randoms in parallel 
*/
void mtgp_amp::make_float_random_cpu_parallel(vector<float>& h_data) 
{
    int data_size = h_data.size();
    if(min(BLOCKS_MAX, (data_size-1) / LARGE_SIZE +1)!= blocks)
        throw std::exception("Block size does not match constructor");

    parallel_for(0U, blocks, [&](size_t bid)
    {
        size_t n_per_block = data_size/blocks;
        int pos = params.inner.pos_tbl[bid];

        uint32_t status[LARGE_SIZE];
        for(int tid = 0; tid<THREAD_NUM; ++tid)
            status_read(status, v_status, bid, tid);

        //The GPU implementation is tiled on block and thread (0-255), and the kernel
        //runs the MTGP recursion three times for each thread (taking advantage of sync after each so all threads 
        //have executed.  This repeats until all randoms generated.
        //The CPU in contrast runs blocks in parallel then loops (tid) over the 3x256 recursion per block, then repeats.
        for (size_t i = 0; i < n_per_block; i += LARGE_SIZE) 
        {
            for(int tid = 0; tid<LARGE_SIZE; ++tid)
            {
                uint32_t start = LARGE_SIZE - N + tid;
                uint32_t r = para_rec(
                    status[(start) % LARGE_SIZE],
                    status[(start + 1) % LARGE_SIZE],
                    status[(start + pos) % LARGE_SIZE],
                    bid, 
                    params.inner);
                status[tid] = r;
                float f =  temper_single(r, status[(start + pos - 1) % LARGE_SIZE], bid, params.inner);
                if(i + n_per_block * bid + tid < (size_t)data_size)
                    h_data[i + n_per_block * bid + tid] = f;
            }
        }

        for(int tid = 0; tid<THREAD_NUM; ++tid)
            status_write(v_status, status, bid, tid);

        for(size_t i=n_per_block*bid; i<n_per_block*bid+n_per_block/2; ++i)
            box_muller_transform(h_data[i], h_data[i+n_per_block/2]);
    });
}

/**
* host function to calculate uniform randoms on one cpu 
*/
void mtgp_amp::make_float_random_cpu_singlethread(vector<float>& h_data) 
{
    int data_size = h_data.size();
    if(min(BLOCKS_MAX, (data_size-1) / LARGE_SIZE +1)!= blocks)
        throw std::exception("Block size does not match constructor");

    for(size_t bid = 0U; bid<blocks; ++bid)
    {
        size_t n_per_block = data_size/blocks;
        int pos = params.inner.pos_tbl[bid];

        uint32_t status[LARGE_SIZE];
        for(int tid = 0; tid<THREAD_NUM; ++tid)
            status_read(status, v_status, bid, tid);

        //The GPU implementation is tiled on block and thread (0-255), and the kernel
        //runs the MTGP recursion three times for each thread (taking advantage of sync after each so all threads 
        //have executed.  This repeats until all randoms generated.
        //The CPU in contrast runs blocks in parallel then loops (tid) over the 3x256 recursion per block, then repeats.
        for (size_t i = 0; i < n_per_block; i += LARGE_SIZE) 
        {
            for(int tid = 0; tid<LARGE_SIZE; ++tid)
            {
                uint32_t start = LARGE_SIZE - N + tid;
                uint32_t r = para_rec(
                    status[(start) % LARGE_SIZE],
                    status[(start + 1) % LARGE_SIZE],
                    status[(start + pos) % LARGE_SIZE],
                    bid, 
                    params.inner);
                status[tid] = r;
                float f =  temper_single(r, status[(start + pos - 1) % LARGE_SIZE], bid, params.inner);
                if(i + n_per_block * bid + tid < (size_t)data_size)
                    h_data[i + n_per_block * bid + tid] = f;
            }
        }

        for(int tid = 0; tid<THREAD_NUM; ++tid)
            status_write(v_status, status, bid, tid);

        for(size_t i=n_per_block*bid; i<n_per_block*bid+n_per_block/2; ++i)
            box_muller_transform(h_data[i], h_data[i+n_per_block/2]);
    }
}

mtgp_amp::mtgp_amp(size_t data_size)
    :
    blocks_max(BLOCKS_MAX),
    blocks( min(blocks_max, (data_size-1) / LARGE_SIZE +1)), //round up size/LARGE_size
    h_status(blocks),
    v_status(blocks, h_status),
    params(MTGPDC_PARAM_TABLE, blocks),
    paramsTexture(params, blocks)
{
    for (size_t i = 0; i < blocks; i++) 
    {
        mtgp32_init_state(&(h_status[i].status[0]), &MTGPDC_PARAM_TABLE[i], i + 1);
    }
}


//int main(int argc, char** argv)
//{
//    // LARGE_SIZE is a multiple of 16
//    int size = 10000000;
//    int blocks;
//    int num_unit;
//    int r;
//    int device = 0;
//    mtgp32_kernel_status_t *d_status;
//    int mb, mp;
//
//    ccudaSetDevice(device);
//
//    if (argc >= 2) {
//         errno = 0;
//         blocks = strtol(argv[1], NULL, 10);
//         if (errno) {
//             printf("%s number_of_block number_of_output\n", argv[0]);
//             return 1;
//         }
//         if (blocks < 1 || blocks > BLOCKS_MAX) {
//             printf("%s blocks should be between 1 and %d\n",
//                     argv[0], BLOCKS_MAX);
//             return 1;
//         }
//         errno = 0;
//         size = strtol(argv[2], NULL, 10);
//         if (errno) {
//             printf("%s number_of_block number_of_output\n", argv[0]);
//             return 1;
//         }
//         argc -= 2;
//         argv += 2;
//    } else {
//         printf("%s number_of_block number_of_output\n", argv[0]);
//         blocks = get_suitable_blocks(device,
//                                                &mb,
//                                                &mp,
//                                                sizeof(uint32_t),
//                                                THREAD_NUM,
//                                                LARGE_SIZE);
//         if (blocks <= 0) {
//             printf("can't calculate sutable number of blocks.\n");
//             return 1;
//         }
//         printf("the suitable number of blocks for device 0 "
//                "will be multiple of %d, or multiple of %d\n", blocks,
//                (mb - 1) * mp);
//         return 1;
//    }
//    num_unit = LARGE_SIZE * blocks;
//    ccudaMalloc((void**)&d_status, sizeof(mtgp32_kernel_status_t) * blocks);
//    r = size % num_unit;
//    if (r != 0) {
//         size = size + num_unit - r;
//    }
//    make_constant(MTGPDC_PARAM_TABLE, blocks);
//    make_kernel_data32(d_status, MTGPDC_PARAM_TABLE, blocks);
//    make_uint32_random(d_status, size, blocks);
//    make_float_random(d_status, size, blocks);
//
//    ccudaFree(d_status);
//}
