/*
  Copyright (c) 2010 Davide Rossetti (davide.rossetti@roma1.infn.it)

  This file is part of CuHB (CUDA Heisenberg) package.
  
  CuHB is free software: you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published by
  the Free Software Foundation, either version 3 of the License, or
  (at your option) any later version.

  CuHB is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  GNU General Public License for more details.
  
  You should have received a copy of the GNU General Public License
  along with CuHB.  If not, see <http://www.gnu.org/licenses/>.

*/


#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <iostream>
using namespace std;

#include <cuda_runtime.h>

#include "hb.h"
#include "rng.hpp"
#include "rng.cuh"
#include "dualbuf.hpp"

//-----------------------------------------------------------------------------

rng_uniform::rng_uniform(size_t block_size, size_t n_blocks) : 
    m_gpu_states(NULL), m_cpu_states(NULL), m_block_size(block_size), m_n_blocks(n_blocks)
{
    trace("block_size=%d n_blocks=%d\n", block_size, n_blocks);
}
  
//-----------------------------------------------------------------------------

rng_uniform::~rng_uniform()
{
    if(m_gpu_states) {
        trace("freeing gpu_states buffer\n");
        cudaFree(m_gpu_states);
        m_gpu_states = NULL;
    }
    if(m_cpu_states) {
        trace("freeing cpu_states buffer\n");
        free(m_cpu_states);
        m_cpu_states = NULL;
    }
    m_n_blocks = 0;
    m_block_size = 0;
}

//-----------------------------------------------------------------------------
// same random numbers on all blocks

void rng_uniform::init(uint seed, bool equal_blocks)
{
    size_t states_bytes = sizeof(rng_state)*m_n_blocks;

    m_cpu_states = (rng_state*)malloc(states_bytes);
    assert(m_cpu_states);

    trace("filling %d states structs, equal_blocks=%s\n", m_n_blocks, equal_blocks?"true":"false");
    if(equal_blocks) {
        for(size_t b=0; b<m_n_blocks; ++b) {
            rng_state *state = m_cpu_states+b;
            state->ip.x = 255;
            state->ip.y = 255-24;
            state->ip.z = 255-55;
            state->ip.w = 255-61;
            srand(seed);
            for(size_t i=0; i<RNG_N; ++i) {
                rng_int_t n = (rand() << 16) | rand();
                //assert(n >= 0 && n < RNG_INT_MAX);
                state->ira[i] = n;
            }
        }
    } else {
        srand(seed);
        for(size_t b=0; b<m_n_blocks; ++b) {
            rng_state *state = m_cpu_states+b;
            state->ip.x = 255;
            state->ip.y = 255-24;
            state->ip.z = 255-55;
            state->ip.w = 255-61;
            for(size_t i=0; i<RNG_N; ++i) {
                int n = (rand() << 16) | rand();
                //assert(n >= 0 && n < RNG_INT_MAX);
                state->ira[i] = n;
            }
        }
    }

    trace("allocating %d bytes on GPU\n", states_bytes);
    if(cudaMalloc((void**)&m_gpu_states, states_bytes) != cudaSuccess) {
        error("%s: error allocating GPU memory\n", __FUNCTION__);
    }
    //if(cudaMemset(m_gpu_states, 0, states_bytes) != cudaSuccess) {
    //    error("%s: error setting GPU memory\n", __FUNCTION__);
    //}

    trace("copying states struct to GPU\n");
    if(cudaMemcpy(m_gpu_states, m_cpu_states, states_bytes, cudaMemcpyHostToDevice) != cudaSuccess) {
        error("%s: error copying RNG state\n", __FUNCTION__);
	}
}

//-----------------------------------------------------------------------------

rng_state *rng_uniform::gpu_states()
{
    if(!m_gpu_states) {
        error("%s: rng not initialized\n", __FUNCTION__);
    }
    return m_gpu_states;
}

//-----------------------------------------------------------------------------

rng_int_t rng_uniform::gen_uniform_int(size_t using_block)
{
    assert(m_n_blocks > 0);
    assert(m_cpu_states);
    assert(using_block < m_n_blocks);

    rng_state *state = m_cpu_states + using_block;
    
    uchar4 ip = state->ip;
    rng_int_t tmp = state->ira[ip.y]+state->ira[ip.z];
    rng_int_t rng = tmp ^ state->ira[ip.w];
    state->ira[ip.x] = tmp;
    ip.x++; ip.y++; ip.z++; ip.w++;
    state->ip = ip;
    return rng;
}

//-----------------------------------------------------------------------------

float rng_uniform::gen_uniform_float(size_t using_block)
{
    rng_int_t t = gen_uniform_int(using_block);
    return (float)t/(float)RNG_INT_MAX;
}

//-----------------------------------------------------------------------------

float2 rng_uniform::gen_uniform_float2(size_t using_block)
{
    rng_int_t t1 = gen_uniform_int(using_block);
    rng_int_t t2 = gen_uniform_int(using_block);
    return make_float2((float)t1/(float)RNG_INT_MAX,
                       (float)t2/(float)RNG_INT_MAX);
}

//-----------------------------------------------------------------------------

float rng_uniform::gen_gaussian_float(size_t using_block)
{
    float t1 = gen_uniform_float(using_block);
    float t2 = gen_uniform_float(using_block);
    return sqrt(-2.0*(log(t1))) * cos(2.0 * CUDART_PI_F * t2);
}

//-----------------------------------------------------------------------------

float2 rng_uniform::gen_gaussian_float2(size_t using_block)
{
    float t1 = gen_uniform_float(using_block);
    float t2 = gen_uniform_float(using_block);
    float t = sqrt(-2.0*(log(t1)));
    return make_float2(t * cos(2.0 * CUDART_PI_F * t2),
                       t * sin(2.0 * CUDART_PI_F * t2));
}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------

const float RNG_EPS = 10E-2;

template <typename T>
void rng_calc_estimators(const T *buf, const size_t size, T &mean, T &sigma)
{
    int i;
    T ex = 0;
    T x2 = 0;

    // E((x-E(x))^2) = E(x^2) - E(x)^2
    // scan data
    for(i=0; i<size; ++i) {
        ex += buf[i];
        x2 += buf[i]*buf[i];
    }

    // calc mean & std dev
    mean = ex / size;
    sigma = sqrt(x2/size - mean*mean);
}

//-----------------------------------------------------------------------------

template <typename T>
bool check_values_in_range(const T *buf, const size_t size, const T min, const T max)
{
    int i;
    bool ret = true;

    for(i=0; i<size; ++i) {
        bool bmax = (buf[i] <= max);
        bool bmin = (buf[i] >= min);
        if(!bmax || !bmin)
            cout << "BAD value in buf[" << i << "]=" << hex << buf[i] << dec << endl;
        ret = ret && (bmax && bmin);
    }

    return ret;
}

//-----------------------------------------------------------------------------

bool test_uniform(float *buf, size_t size)
{
    check_values_in_range(buf, size, 0.0f, 1.0f);

    float mean, sigma;
    rng_calc_estimators(buf, size, mean, sigma);
    trace("mean=%f dev=%f\n", mean, sigma);

    bool c1 = (fabs(mean - 0.5)/fabs(mean) < RNG_EPS);
    // 1/sqrtf(12.0f)=0.28867
    bool c2 = (fabs(sigma - 1/sqrtf(12.0f))/fabs(sigma) < RNG_EPS);

    return c1 && c2;
}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
// a benchmark kernel
//
// calculate n random numbers per thread and copy them to res[]
// n: how many rng numbers per thread
// size: is the full size of res[]

__global__ void rng_plain_float_test(float *res, rng_state* states, int n, int size)
{
    int tid = blockIdx.x*blockDim.x+threadIdx.x;
    float tmp;
    int j;
    for(j=0; j<n; ++j) {
        tmp = rng_uniform_float(states);
        int idx = tid*n+j;
        if(idx < size) {
            res[idx] = tmp;
        }
    }
}

//-----------------------------------------------------------------------------

__global__ void rng_plain_int_test(uint *res, rng_state* states, int n, int size)
{
    int tid = blockIdx.x*blockDim.x+threadIdx.x;
    uint tmp;
    int j;
    for(j=0; j<n; ++j) {
        tmp = rng_uniform_int(states);
        int idx = tid*n+j;
        if(idx < size) {
            res[idx] = tmp;
        }
    }
}

//-----------------------------------------------------------------------------

__global__ void rng_shm_float_test(float *res, rng_state* states, int n, int size)
{
    int tid = blockIdx.x*blockDim.x+threadIdx.x;
    float tmp;
    int j;
    for(j=0; j<n; ++j) {
        tmp = rng_uniform_shm_float(states);
        int idx = tid*n+j;
        if(idx < size) {
            res[idx] = tmp;
        }
    }
}

//-----------------------------------------------------------------------------

__global__ void rng_shm16_int_test(int *res, rng_state* states, int n, int size)
{
    int tid = blockIdx.x*blockDim.x+threadIdx.x;
    int tmp;
    int j;
    for(j=0; j<n; ++j) {
        tmp = rng_uniform_shm16_int(states);
        int idx = tid*n+j;
        if(idx < size) {
            res[idx] = tmp;
        }
    }
}

//-----------------------------------------------------------------------------

__global__ void rng_shm16_float2_test(float2 *res, rng_state* states, int n, int size)
{
    int tid = blockIdx.x*blockDim.x+threadIdx.x;
    float2 tmp;
    int j;
    for(j=0; j<n; ++j) {
        tmp = rng_uniform_shm16_float2(states);
        int idx = tid*n+j;
        if(idx < size) {
            res[idx] = tmp;
        }
    }
}

//-----------------------------------------------------------------------------

__global__ void rng_shm16_float4_test(float4 *res, rng_state* states, int n, int size)
{
    int tid = blockIdx.x*blockDim.x+threadIdx.x;
    float4 tmp;
    int j;
    for(j=0; j<n; ++j) {
        tmp = rng_uniform_shm16_float4(states);
        int idx = tid*n+j;
        if(idx < size) {
            res[idx] = tmp;
        }
    }
}

//-----------------------------------------------------------------------------

__global__ void rng_shm16_2xfloat4_test(float4 *res, rng_state* states, int n, int size)
{
    int tid = blockIdx.x*blockDim.x+threadIdx.x;
    float4 tmp1, tmp2;
    int j;
    for(j=0; j<n; j+=2) {
        rng_uniform_shm16_2xfloat4(states, tmp1, tmp2);
        int idx = tid*n+j;
        if(idx < size) {
            res[idx]   = tmp1;
            res[idx+1] = tmp2;
        }
    }
}

//-----------------------------------------------------------------------------

__global__ void rng_void_test(float *res, rng_state* states, int n, int size)
{
}

//-----------------------------------------------------------------------------

enum RngKernel {
    rng_void,
    rng_plain_float,
    rng_shm_float,
    rng_shm16_int,
    rng_shm16_float2,
    rng_shm16_float4,
    rng_shm16_2xfloat4,
    rng_n_kernels
};

void gpu_rng_kernel(RngKernel kernel, rng_uniform &rng, float *gpu_buf, size_t nn)
{
    dim3 gridDim(rng.n_blocks(), 1, 1);
    dim3 blockDim(rng.block_size(), 1, 1);
    rng_state *gpu_states = rng.gpu_states();
    int n =  nn/(rng.block_size()*rng.n_blocks());
    int size = nn;
    switch(kernel) {
    case rng_void:
        rng_void_test <<<gridDim, blockDim>>> ((float*)gpu_buf, gpu_states, n, size);
        break;
    case rng_plain_float:
        // uses half space
        rng_plain_float_test <<<gridDim, blockDim>>> ((float*)gpu_buf, gpu_states, n, size);
        break;
    case rng_shm_float:
        // uses half space
        rng_shm_float_test <<<gridDim, blockDim>>> ((float*)gpu_buf, gpu_states, n, size);
        break;
    case rng_shm16_int:
        // uses half space
        rng_shm16_int_test <<<gridDim, blockDim>>> ((int*)gpu_buf, gpu_states, n, size);
        break;
    case rng_shm16_float2:
        rng_shm16_float2_test <<<gridDim, blockDim>>> ((float2*)gpu_buf, gpu_states, n, size/2);
        break;
    case rng_shm16_float4:
        rng_shm16_float4_test <<<gridDim, blockDim>>> ((float4*)gpu_buf, gpu_states, n, size/4);
        break;
    case rng_shm16_2xfloat4:
        assert(n % 2 == 0);
        rng_shm16_2xfloat4_test <<<gridDim, blockDim>>> ((float4*)gpu_buf, gpu_states, n, size/4);
        break;
    default:
        error("invalid kernel\n");
    }
    cudaError_t err = cudaGetLastError();
    if(err != cudaSuccess) {
        error("CUDA error %d(%s) in spawning of kernel %d block_size=%d block_dim=%d gpu_buf=%p gpu_states=%p n=%d size=%d\n", (int)err, cudaGetErrorString(err), (int)kernel, rng.block_size(), rng.n_blocks(), gpu_buf, gpu_states, n, size);
    }
}

//-----------------------------------------------------------------------------

void rng_test(size_t volume, rng_uniform& rng)
{
    size_t size = 4*volume;
    dim3 gridDim(rng.n_blocks(), 1, 1);
    dim3 blockDim(rng.block_size(), 1, 1);
    int n =  size/(rng.block_size()*rng.n_blocks());
#if 0
    dualbuf<float> buf(size);
    rng_plain_float_test <<<gridDim, blockDim>>> (buf.gpu(), rng.gpu_states(), n, size);
    buf.gpu_to_cpu();
    bool r = test_uniform(buf.cpu(), size);
    trace("uniform test passed:%s\n", r?"true":"false");
#else
    dualbuf<rng_int_t> buf(size);
    trace("n=%d size=%d\n", n, size);
    rng_plain_int_test <<<gridDim, blockDim>>> (buf.gpu(), rng.gpu_states(), n, size);
    buf.gpu_to_cpu();
    check_values_in_range(buf.cpu(), size, RNG_INT_MIN, RNG_INT_MAX);
#endif
}

//-----------------------------------------------------------------------------

//#define HERE()     trace("%d\n", __LINE__)
#define HERE()     do { } while(0)

void benchmark_gpu_rng()
{
    size_t volume = LX * LY * LZ;
    size_t niters = 1000;
    size_t nn = 4*volume;

    //float2 *gpu_buf = NULL;
    size_t block_size = BLOCK_SIZE;
    size_t n_blocks = volume/BLOCK_SIZE;
    // make space for float2 test
    size_t buf_bytes = nn*sizeof(float);
    cudaEvent_t startEvent, stopEvent;

    HERE();
    printf("Random Number Generator benchmark:\n");
    printf(" lattice:                %dx%dx%d\n", LX, LY, LZ);
    printf(" tot produced numbers:   %d\n", volume);
    printf(" num iterations:         %d\n", niters);
    printf(" thr block size:         %d\n", block_size);
    printf(" num thr blocks:         %d\n", n_blocks);
    printf(" rng numbers per thread: %d\n", nn/(block_size*n_blocks));
    printf(" output buf size:        %dbytes\n", buf_bytes);

    HERE();
    rng_uniform rng(block_size, n_blocks);
    HERE();
    rng.init(0xf4543243, true);
    HERE();
    cudaEventCreate(&startEvent);
    cudaEventCreate(&stopEvent);

    //rng_test(volume, rng);

    trace("allocating buf...\n");
    dualbuf<float> buf(nn);

    struct {
        RngKernel krn;
        const char* name;
        float ms;
    } kernels[] = {
        { rng_void,           "rng_void",           0.0f },
        { rng_plain_float,    "rng_plain_float",    0.0f },
        { rng_shm_float,      "rng_shm_float",      0.0f },
        //{ rng_shm16_int,     "rng_shm16_int",     0.0f },
        { rng_shm16_float2,   "rng_shm16_float2",   0.0f },
        { rng_shm16_float4,   "rng_shm16_float4",   0.0f },
        { rng_shm16_2xfloat4, "rng_shm16_2xfloat4", 0.0f }
    };

    cudaError_t err = cudaGetLastError();
    if(err != cudaSuccess) {
        error("error %d(%s) before kernel benchmarking\n", (int)err, cudaGetErrorString(err));
    }
    
    for(int k=0; k<sizeof(kernels)/sizeof(kernels[0]); ++k) {
        printf("exec %s...\t", kernels[k].name); fflush(stdout);
        // start timer
        cudaEventRecord(startEvent, 0);
        // run kernel
        for(int i=0; i<niters; ++i) {
            //printf("iter:%d\n", i);
            gpu_rng_kernel(kernels[k].krn, rng, buf.gpu(), nn);
        }
        // stop and sync timer
        cudaEventRecord(stopEvent, 0);
        cudaEventSynchronize(stopEvent);
        // calc elapsed time
        {
            float ms = 0.0f;
            cudaEventElapsedTime(&ms, startEvent, stopEvent);
            float uspi = ms / niters * 1000.0f;
            float nsprng = uspi / nn * 1000.0f;
            float krngps = nn * niters / ms;

            kernels[k].ms = ms;
            if(kernels[k].krn == rng_void) {
                printf("%.2fms / %.2fus/iter / %.2fns/rng / %.2fKrng/s\n", 
                       ms, uspi, nsprng, krngps);
            } else {
                assert(kernels[0].krn == rng_void);
                float ms2 = ms - kernels[0].ms;
                float uspi2 = ms2 / niters * 1000.0f;
                float nsprng2 = uspi2 / nn * 1000.0f;
                float krngps2 = nn * niters / ms2;
                printf("%.2f(%.2f)ms / %.2f(%.2f)us/iter / %.2f(%.2f)ns/rng / %.2f(%.2f)Krng/s\n",
                       ms, ms2, uspi, uspi2, nsprng, nsprng2, krngps, krngps2);
            }
        }

        buf.gpu_to_cpu();
        bool r = test_uniform(buf.cpu(), nn);
        //trace("uniform test passed:%s\n", r?"true":"false");
        if(!r)
            warn("uniform test failed\n");
    }
    cudaEventDestroy(startEvent);
    cudaEventDestroy(stopEvent);

}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */
