// Random number generation using Mersenne Twister
#include "MersenneTwister.h"

__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];

//Load twister configurations
void loadMTGPU(const char *fname){
    FILE *fd = fopen(fname, "rb");
    if(!fd){
        printf("initMTGPU(): failed to open %s\n", fname);
        printf("FAILED\n");
        exit(0);
    }
    if( !fread(h_MT, sizeof(h_MT), 1, fd) ){
        printf("initMTGPU(): failed to load %s\n", fname);
        printf("FAILED\n");
        exit(0);
    }
    fclose(fd);
}

//Initialize/seed twister for current GPU context
void seedMTGPU(unsigned int seed){
    int i;
    // Need to be thread-safe
    mt_struct_stripped *MT = (mt_struct_stripped *)malloc(MT_RNG_COUNT *
            sizeof(mt_struct_stripped));

    for(i = 0; i < MT_RNG_COUNT; i++){
        MT[i]      = h_MT[i];
        MT[i].seed = seed;
    }
    CUDA_SAFE_CALL(cudaMemcpyToSymbol(ds_MT, MT, sizeof(h_MT)));

    free(MT);
}

__device__ void applyIFSVariation(float *x, float *y, int index);

////////////////////////////////////////////////////////////////////////////////
// Write MT_RNG_COUNT vertical lanes of nPerRng random numbers to *d_Random.
// For coalesced global writes MT_RNG_COUNT should be a multiple of warp size.
// Initial states for each generator are the same, since the states are
// initialized from the global seed. In order to improve distribution properties
// on small NPerRng supply dedicated (local) seed to each twister.
// The local seeds, in their turn, can be extracted from global seed
// by means of any simple random number generator, like LCG.
////////////////////////////////////////////////////////////////////////////////
__global__ void RandomGPU
(
    float *d_Random,
    int nPerRng
)
{
    const int tid = blockDim.x * blockIdx.x + threadIdx.x;

    int iState, iState1, iStateM, iOut;
    unsigned int mti, mti1, mtiM, x;
    unsigned int mt[MT_NN], matrix_a, mask_b, mask_c; 

    //Load bit-vector Mersenne Twister parameters
    matrix_a = ds_MT[tid].matrix_a;
    mask_b = ds_MT[tid].mask_b;
    mask_c = ds_MT[tid].mask_c;

    //Initialize current state
    mt[0] = ds_MT[tid].seed;
    for (iState = 1; iState < MT_NN; iState++)
        mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;

    iState = 0;
    mti1 = mt[0];
    for (iOut = 0; iOut < nPerRng; iOut++) {
        iState1 = iState + 1;
        iStateM = iState + MT_MM;
        if(iState1 >= MT_NN) iState1 -= MT_NN;
        if(iStateM >= MT_NN) iStateM -= MT_NN;
        mti  = mti1;
        mti1 = mt[iState1];
        mtiM = mt[iStateM];

        // MT recurrence
        x    = (mti & MT_UMASK) | (mti1 & MT_LMASK);
        x    =  mtiM ^ (x >> 1) ^ ((x & 1) ? matrix_a : 0);

        mt[iState] = x;
        iState = iState1;

        //Tempering transformation
        x ^= (x >> MT_SHIFT0);
        x ^= (x << MT_SHIFTB) & mask_b;
        x ^= (x << MT_SHIFTC) & mask_c;
        x ^= (x >> MT_SHIFT1);

        //Convert to (0, 1] float and write to global memory
        float result = ((float)x + 1.0f) / 4294967296.0f;
 
        d_Random[tid + iOut * MT_RNG_COUNT] = result;
    }
}

// When this kernel is invoked, d_Random contains random numbers in (0, 1]
// We can use this to calculate both the (x, y) co-ordinates of each point, and
// also the index of the function that will iterate on this point
// Number of threads = NUM_COORDS
// See the README
__global__ void iterateIFS
(
    float *d_Random,
    float *d_Probability,
    float d_Coefficients[][6],
    int numFunctions,
    float xScale,
    float xMin,
    float yScale,
    float yMin
)
{
    const int tid = blockDim.x * blockIdx.x + threadIdx.x;

    // This has NUM_COORDS threads, since each thread will take care of both X
    // and Y co-ordinates.
    // This (x, y) is a value between (0, 0) and (0, 1), needs to be scaled
    // before use.
    // XXX This uses coalesced accesses.
    float x = d_Random[tid] * xScale + xMin; 
    float y = d_Random[tid + NUM_COORDS] * yScale + yMin;

    for (int i = 0; i < AUTO_ITERATIONS; i++)
    {
        // Figure out which variation this (x, y) pair will use.
        // We need to call the weightedRandom() function with the probability
        // array so that we can choose a function according to the weights of the
        // variations.
        // XXX This uses coalesced accesses.
        int functionIndex = weightedRandom(d_Probability,
                                          numFunctions,
                                          d_Random[2 * NUM_COORDS +
                                                   tid +
                                                   i * NUM_COORDS]);

        // Apply coefficients to the points
        // x' = ax + by + c
        // y' = dx + ey + f
        // XXX This is SLOW, use shared memory here
        float xnew = d_Coefficients[functionIndex][0] * x +
                     d_Coefficients[functionIndex][1] * y +
                     d_Coefficients[functionIndex][2];

        float ynew = d_Coefficients[functionIndex][3] * x +
                     d_Coefficients[functionIndex][4] * y +
                     d_Coefficients[functionIndex][5];

        // Send this function in through the IFS variation
        applyIFSVariation(&xnew, &ynew, functionIndex);

        // // Now convert this function into a value lying within the raw image
        // // buffer
        // float xImg = WIDTH * ((xnew - xMin) / xScale);
        // float yImg = HEIGHT * ((ynew - yMin) / yScale);

        // // Translate these coordinates to the image coordinates. If they end up
        // // outside the image, terminate the thread and write the previous x, y
        // // values as the processed values.
        // if ((xImg < 0 || xImg > WIDTH) || (yImg < 0 || yImg > HEIGHT))
        // {
        //     // XXX Causes divergence!!
        //     break;
        // }

        x = xnew;
        y = ynew;
    }

    // Sync here for coalesced writes further on
    __syncthreads();

    // This uses coalesced accesses.
    d_Random[tid] = x; 
    d_Random[tid + NUM_COORDS] = y;

    // At the end of this kernel, d_Random[] contains points which are more or
    // less sure to be on the IFS
}

// Temporary array to use for reduction and stdd kernels
__device__ float d_Scratch[2 * NUM_COORDS];

// Calculates standard deviation.
// The contents of d_Array are overwritten.
// numElements must be a power of 2.
// d_Array[0..numElements-1] contains X coordinates,
// d_Array[numElements..2*numElements - 1] contains Y coordinates.
// Can be made faster by having separate threads for X and Y coordinates.
__global__ void
calculateStdd
(
    float *d_Array,
    int numElements,
    float *stddX,
    float *stddY,
    float *d_meanX,
    float *d_meanY
)
{
    const int tid = blockDim.x * blockIdx.x + threadIdx.x;

    // We only need enough threads to copy the array over
    if (tid >=  2 * numElements)
        return;

    // Coalesced reads and writes
    d_Scratch[tid] = d_Array[tid];

    float *d_Array_x = d_Scratch;
    float *d_Array_y = d_Scratch + numElements;
    
    for (int i = numElements / 2; (i > 0); (i /= 2))
    {
        if (tid >= i)
            return;

        // XXX Coalesced reads
        float operand1x = d_Array_x[tid];
        float operand2x = d_Array_x[tid + i];

        // needs to be done just once
        // stdd = sqrt(sum(Xi - mean)^2/numElements)
        if (i == numElements / 2)
        {
            operand1x -= *d_meanX;
            operand1x *= operand1x;

            operand2x -= *d_meanX;
            operand2x *= operand2x;
        }

        float operand1y = d_Array_y[tid];
        float operand2y = d_Array_y[tid + i];

        if (i == numElements / 2)
        {
            operand1y -= *d_meanY;
            operand1y *= operand1y;

            operand2y -= *d_meanY;
            operand2y *= operand2y;
        }

        // XXX Coalesced writes
        d_Array_x[tid] = operand1x + operand2x;
        d_Array_y[tid] = operand1y + operand2y;
    }

    // Only thread 0 will get here
    *stddX = sqrt(d_Array_x[0] /= numElements);
    *stddY = sqrt(d_Array_y[0] /= numElements);
}

// Sums up d_Array using reduction, and places the sum in d_Array[0].
// The contents of d_Array are overwritten.
// numElements must be a power of 2.
// d_Array[0..numElements-1] contains X coordinates,
// d_Array[numElements..2*numElements - 1] contains Y coordinates.
// Can be made faster by having separate threads for X and Y coordinates.
__global__ void
reduceArrayToMean
(
    float *d_Array,
    int numElements,
    float *d_meanX,
    float *d_meanY
)
{
    const int tid = blockDim.x * blockIdx.x + threadIdx.x;

    // We only need enough threads to copy the array over
    if (tid >=  2 * numElements)
        return;

    // XXX Coalesced reads and writes
    d_Scratch[tid] = d_Array[tid];
    
    float *d_Array_x = d_Scratch;
    float *d_Array_y = d_Scratch + numElements;
    
    for (int i = numElements / 2; (i > 0); (i /= 2))
    {
        if (tid >= i)
            return;

        // XXX Coalesced reads
        float operand1x = d_Array_x[tid];
        float operand2x = d_Array_x[tid + i];

        float operand1y = d_Array_y[tid];
        float operand2y = d_Array_y[tid + i];

        // XXX Coalesced writes
        d_Array_x[tid] = operand1x + operand2x;
        d_Array_y[tid] = operand1y + operand2y;
    }

    // Only thread 0 will get here
    *d_meanX = (d_Array_x[0] /= numElements);
    *d_meanY = (d_Array_y[0] /= numElements);
}

__device__ void
applyIFSVariation(float *x, float *y, int index)
{
    float r;
    float r2;
    float theta;

    switch(index)
    {
        case 1:	// Sinusodial
            (*x) = sin((*x));
            (*y) = sin((*y));
            break;
        case 2:	// Spherical
            r2 =  (*x) * (*x) + (*y) * (*y);
            (*x) = (*x) / r2;
            (*y) = (*y) / r2;
            break;
        case 3:	// Swirl
            r2 =  (*x) * (*x) + (*y) * (*y);
            (*x) = (*x) * sin(r2) - (*y) * cos(r2);
            (*y) = (*x) * cos(r2) + (*y) * sin(r2);
            break;
        case 4:	// Heart
            r = sqrtf((*x) * (*x) + (*y) * (*y));
            theta = atan2((*x), (*y));
            (*x) = r * sin(r * theta);
            (*y) = -r * cos(r * theta);
            break;
        case 5:	// Handkerchief
            r = sqrtf((*x) * (*x) + (*y) * (*y));
            theta = atan2((*x), (*y));
            (*x) = r * sin(r + theta);
            (*y) = r * cos(r - theta);
            break;
        case 6:	// Fisheye
            r = sqrtf((*x) * (*x) + (*y) * (*y));
            (*x) = 2 / (r + 1) * (*y);
            (*y) = 2 / (r + 1) * (*x);
            break;
        case 7:	// NoiseSquare
            // r = sqrtf((*x) * (*x) + (*y) * (*y));
            // (*x) = (*x) + r * (myrand.nextDouble() - 0.5);
            // (*y) = (*y) + r * (myrand.nextDouble() - 0.5);
            break;
        case 8:	// NoiseCircle
            // r = sqrtf((*x) * (*x) + (*y) * (*y) ) * myrand.nextDouble();
            // theta = 2 * 3.1415926535 * myrand.nextDouble();

            // (*x) = (*x) + r * cos(theta);
            // (*y) = (*y) + r * sin(theta);
            break;
    }
}

// Iterate over the entire coordinate array and create a histogram.
// One thread for each pixel of the image.
// Each thread block has a BLOCKSZx1 dimension.
// numElements is the number of coordinate pairs.
__global__ void
generateImageData
(
    float *coordArray,
    int (*img)[HEIGHT][NDATA],
    int imgWidth,
    int imgHeight,
    int numElements,
    float xScale,
    float xMin,
    float yScale,
    float yMin
)
{
    __shared__ float xcoords[BLOCKSZ];
    __shared__ float ycoords[BLOCKSZ];

    int tidx = blockIdx.x * blockDim.x + threadIdx.x;
    int tidy = blockIdx.y * blockDim.y + threadIdx.y;

    // Block-local index of this thread.
    const int tid = threadIdx.x;

    int freq = 0;

    // Iterate over the whole coordArray one block sized chunk at a time.
    for (int i = 0; i < numElements / blockDim.x; i++)
    {
        // First, each thread does its part in initing shared memory
        // 1) broadcast smem read, no BC
        // 2) smem read with stride 1, no BC
        xcoords[tid] = coordArray[tid + i * blockDim.x];
        ycoords[tid] = coordArray[numElements + tid + i * blockDim.x];

        // Sync to make sure the xcoords and ycoords arrays are ready.
        __syncthreads();

        // At this point, the shared memory array contains one chunk of the
        // coord array. Read each coordinate and compare it with the thread's
        // coordinates.
        for (int j = 0; j < BLOCKSZ; j++)
        {
            float x = xcoords[j]; 
            float y = ycoords[j];

            // Next, translate it to the image's plane.
            int imgX = (imgWidth  - 1) * (x - xMin) / (xScale);
            int imgY = (imgHeight - 1) * (y - yMin) / (yScale);

            // Compare the coordinate with the thread's own.
            // In case it matches, increment the pixel's values.
            if (imgX == tidx && imgY == tidy)
            {
                freq++;
            }
        }

        // Sync to make sure the xcoords and ycoords array are ready for reuse.
        __syncthreads();
    }

    img[tidx][tidy][0] = freq;
}

__global__ void
generateImageDataAtomic
(
    float *coordArray,
    int (*img)[HEIGHT][NDATA],
    int imgWidth,
    int imgHeight,
    int numElements,
    float xScale,
    float xMin,
    float yScale,
    float yMin
)
{
    const int tid = blockDim.x * blockIdx.x + threadIdx.x;

    float x = coordArray[tid];
    float y = coordArray[numElements + tid];

    int imgX = (imgWidth - 1) * (x - xMin) / (xScale);
    int imgY = (imgHeight - 1) * (y - yMin) / (yScale);

    if ((imgX < 0 || imgX >= imgWidth) || (imgY < 0 || imgY >= imgWidth))
    {
        return;
    }

    atomicInc((unsigned int*)&img[imgX][imgY][0], 0xFFFFFFFF);
}

