/*
  Copyright (c) 2010 Davide Rossetti (davide.rossetti@roma1.infn.it),
  Andrea Maiorano (andrea.maiorano@roma1.infn.it)

  This file is part of CuHB (CUDA Heisenberg) package.
  
  CuHB is free software: you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published by
  the Free Software Foundation, either version 3 of the License, or
  (at your option) any later version.

  CuHB is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  GNU General Public License for more details.
  
  You should have received a copy of the GNU General Public License
  along with CuHB.  If not, see <http://www.gnu.org/licenses/>.

*/

#include <stdlib.h>
#include <stdio.h>
#include <assert.h>

#include <cuda_runtime.h>
#include <math.h>
#include <math_constants.h>

#include "hb.h"
#include "rng.cuh"
#include "jmat.cuh"
#include "rng.hpp"
#include "rng.cuh"

#undef BLOCK_SIZE

#undef MOD
#undef XYZ2IDX

//#define MOD(I, L) ((i+(L))%(L))
// only for L == power of 2
#define MOD(I, L) ((i+(L))&(L-1))


// #define ZL_LEN 4
// #define ZL_BIT 0
// #define ZL_FLD ((0x1<<ZL_LEN)-1)
// #define ZL_MSK (ZL_FLD<<ZL_BIT)
// #define ZH_BIT 4
// #define ZH_FLD ((0x1<<ZH_LEN)-1)

// #define YL_LEN 4
// #define YL_BIT (ZL_BIT+ZL_LEN)
// #define YL_FLD ((0x1<<YL_LEN)-1)
// #define YL_MSK (YL_FLD<<YL_BIT)

// #define XL_LEN 4
// #define XL_BIT (YL_BIT+YL_LEN)
// #define XL_FLD ((0x1<<XL_LEN)-1)
// #define XL_MSK (XL_FLD<<XL_BIT)

// // combine components into index
// #define IDXLO(x, y, z) (((x&XL_FLD)<<XL_BIT)|((y&YL_FLD)<<YL_BIT)|((z&ZL_FLD)<<ZL_BIT))
// #define IDXHI(x, y, z) (((x&XH_FLD)<<XH_BIT)|((y&YH_FLD)<<YH_BIT)|((z&ZH_FLD)<<ZH_BIT))
// #define XYZ2IDX(x,y,z) (IDXLO(x, y, z)|IDXhi(x, y, z))

// // expand index into components
// #define ZLO(i) (i&0x000007)
// #define YLO(i) ((i&0x0000f8)>>3)
// #define XLO(i) ((i&0x000f00)>>6)

// #define ZHI(i) (i&0x)
// #define YHI(i) (i&YH_MSK)
// #define XHI(i) (i&XH_MSK)

const int block_x = 1;
const int block_y = 2;
const int block_z = LZ;
const int block_size = block_x*block_y*block_z;

const int LBZ = LZ/block_z;
const int LBY = LY/block_y;
const int LBX = LX/block_x;

// thread coord -> absolute coord

// absolute coord -> memory layout
#define XYZ2IDX_LO(x,y,z) (((x&0x2)<<4)|((y&0x2)<<2)|(z&0x2))
#define XYZ2IDX_HI(x,y,z) ( (x>>2)*LBY + (y>>2)*LBZ + (z>>2) )
#define XYZ2IDX(x,y,z)    (XYZ2IDX_LO(x,y,z)|(XYZ2IDX_HI(x,y,z)<<6))

//-----------------------------------------------------------------------------
// spins
//-----------------------------------------------------------------------------

// in units of???
__constant__ int svec_stride;

texture<float4, 1, cudaReadModeElementType> svecTexSingle;

#define texSpin svecTexSingle

typedef float4 svec;

#define READ_S(tmp, idx)                         \
    {                                            \
        tmp = tex1Dfetch((texSpin), idx);        \
    }

#define make_svec(X,Y,Z) make_float4(X,Y,Z,0)


//-----------------------------------------------------------------------------
// operations
//-----------------------------------------------------------------------------

#define VEC_ACC_MUL(RES, N, V)                                      \
    {                                                               \
        (RES).x += (N).j00*(V).x + (N).j01*(V).y + (N).j02*(V).z;   \
        (RES).y += (N).j10*(V).x + (N).j11*(V).y + (N).j12*(V).z;   \
        (RES).z += (N).j20*(V).x + (N).j21*(V).y + (N).j22*(V).z;   \
    }

// component .w is discarded
#define SCALAR_PROD(V1, V2)                                 \
    ( (V1).x * (V2).x + (V1).y * (V2).y + (V1).z * (V2).z )
    
#define LOCAL_FIELD_PLUS(RES,I,X,Y,Z)  {        \
        {   /* X+ */                            \
            int xp = MOD(X+1, LX);              \
            int ixp = XYZ2IDX(xp,Y,Z);          \
            svec sxp;                           \
            jmat jxp;                           \
            READ_S(sxp, ixp);                   \
            READ_J(jxp, texJ, I, DIR_X);        \
            VEC_ACC_MUL(RES, jxp, sxp);         \
        }{  /* Y+ */                            \
            int yp = MOD(Y+1, LY);              \
            int iyp = XYZ2IDX(X,yp,Z);          \
            svec syp;                           \
            jmat jyp;                           \
            READ_S(syp, iyp);                   \
            READ_J(jyp, texJ, I, DIR_Y);        \
            VEC_ACC_MUL(RES, jyp, syp);         \
        }{  /* Z+ */                            \
            int zp = MOD(Z+1, LZ);              \
            int izp = XYZ2IDX(X,Y,zp);          \
            svec szp;                           \
            jmat jzp;                           \
            READ_S(szp, izp);                   \
            READ_J(jzp, texJ, I, DIR_Z);        \
            VEC_ACC_MUL(RES, jzp, szp);         \
        }                                       \
    }

#define LOCAL_FIELD_MINUS(RES,I,X,Y,Z)  {       \
        { /* X- */                              \
            int xm = MOD(X-1, LX);              \
            int ixm = XYZ2IDX(xm,Y,Z);          \
            svec sxm;                           \
            jmat jxm;                           \
            READ_S(sxm, ixm);          \
            READ_J(jxm, texJ, ixm, DIR_X);      \
            VEC_ACC_MUL(RES, jxm, sxm);         \
        }{ /* Y- */                             \
            int ym = MOD(Y-1, LY);              \
            int iym = XYZ2IDX(X,ym,Z);          \
            svec sym;                           \
            jmat jym;                           \
            READ_S(sym, iym);          \
            READ_J(jym, texJ, iym, DIR_Y);      \
            VEC_ACC_MUL(RES, jym, sym);         \
        }{ /* Z- */                             \
            int zm = MOD(Z-1, LZ);              \
            int izm = XYZ2IDX(X,Y,zm);          \
            svec szm;                           \
            jmat jzm;                           \
            READ_S(szm, izm);          \
            READ_J(jzm, texJ, izm, DIR_Z);      \
            VEC_ACC_MUL(RES, jzm, szm);         \
        }                                       \
    }

#define LOCAL_FIELD(RES,I,X,Y,Z)                \
    {                                           \
        LOCAL_FIELD_PLUS(RES,I,X,Y,Z);          \
        LOCAL_FIELD_MINUS(RES,I,X,Y,Z);         \
    }

//-----------------------------------------------------------------------------
// kernels
//-----------------------------------------------------------------------------

#define PI CUDART_PI_F

__constant__ float beta_f;

__global__ void
hb3d_heatbath_sp_r1_sublat(float4* res, rng_state* states, int size)
{
    //int i = blockIdx.x*blockDim.x+threadIdx.x;
    //int x = i/(LY*LZ);        // i & XMASK
    //int y = (i-x*(LY*LZ))/LZ; // i & YMASK = ((LZ*LY-1)^(LZ-1)
    //int z = i-x*(LY*LZ)-y*LZ; // i & ZMASL = (LZ-1)
    //gridDim.x
    int bid = blockIdx.x;
    int xhi = bid / (LBZ*LBY);
    int yhi = (bid - xhi*LBZ*LBY)/LBZ;
    //int zhi = bid - xhi*(LZ/block_z*LY/block_y) - yhi*(LZ/block_z);
    int zhi = bid - (bid/LBZ)*LBZ;
    
    int x = threadIdx.x + xhi*block_x;
    int y = threadIdx.y + yhi*block_y;
    int z = threadIdx.z + zhi*block_z;

    int i = XYZ2IDX(x,y,z);

    float2 rng = rng_uniform_shm16_float2(states);

    // warning if there are threads which do not calculate but waste rng nums
    if(i < size) {
        svec h = make_svec(0,0,0);
        LOCAL_FIELD(h,i,x,y,z);
        h = make_svec(beta_f * h.x, beta_f * h.y, beta_f * h.z);
        float h2 = SCALAR_PROD(h, h);
        float mh = sqrtf(h2);
        float phi = 2 * PI * rng.y;
        float costheta = logf(1+rng.x*(expf(2*mh)-1))/mh-1;
        float sintheta = sqrtf(1 - costheta*costheta);
        float3 hnorm = make_float3(h.x/mh, h.y/mh, h.z/mh);
        float mhxy = sqrtf(1.0-hnorm.z*hnorm.z);
        float3 c = make_float3(hnorm.x/mhxy, hnorm.y/mhxy, hnorm.z);
        //sinphi = sinf(phi);
        //cosphi = cosf(phi);
        float cosphi, sinphi;
        sincosf(phi, &sinphi, &cosphi);
        //svec si;
        // si.x = c.x * (cosphi * sintheta * c.z + costheta * mhxy) - sintheta * sinphi * c.y;
        // si.y = c.x * sintheta * sinphi + c.y * (cosphi * c.z * sintheta + costheta * mhxy);
        // si.z = costheta * c.z - cosphi * sintheta * mhxy;
        // res[i] = si;
        res[i] = make_svec(c.x * (cosphi * sintheta * c.z + costheta * mhxy) - sintheta * sinphi * c.y,
                           c.x * sintheta * sinphi + c.y * (cosphi * c.z * sintheta + costheta * mhxy),
                           costheta * c.z - cosphi * sintheta * mhxy);
    }
}

//-----------------------------------------------------------------------------

__global__ void
hb3d_void_kernel_sp_r1_sublat(float4* res, rng_state* states, int size)
{
}

//-----------------------------------------------------------------------------

__global__ void
hb3d_rng_f2_sp_r1_sublat(float4* res, rng_state* states, int size)
{
    //int i = blockIdx.x*blockDim.x+threadIdx.x;
    //int x = i/(LY*LZ);        // i & XMASK
    //int y = (i-x*(LY*LZ))/LZ; // i & YMASK = ((LZ*LY-1)^(LZ-1)
    //int z = i-x*(LY*LZ)-y*LZ; // i & ZMASL = (LZ-1)
    //gridDim.x
    int bid = blockIdx.x;
    int xhi = bid / (LBZ*LBY);
    int yhi = (bid - xhi*LBZ*LBY)/LBZ;
    //int zhi = bid - xhi*(LZ/block_z*LY/block_y) - yhi*(LZ/block_z);
    int zhi = bid - (bid/LBZ)*LBZ;
    
    int x = threadIdx.x + xhi*block_x;
    int y = threadIdx.y + yhi*block_y;
    int z = threadIdx.z + zhi*block_z;

    int i = XYZ2IDX(x,y,z);

    float2 rng = rng_uniform_shm16_float2(states);
}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------

void gpu_no_kernel_r1_sublat(rng_uniform &rng, float *res, Svec &s_res, Svec &s_src, Jmat &j)
{
    dim3 gridDim(s_res.m_volume/block_size, 1, 1);
    dim3 blockDim(block_x, block_y, block_z);
    // dim3 gridDim(s_res.m_volume/BLOCK_SIZE, 1, 1);
    // dim3 blockDim(BLOCK_SIZE, 1, 1);

    // TODO: check equal cfg on s_res, s_src and J
    assert(s_src.m_replicas == 1);
    assert(s_res.m_replicas == 1);

    // bindGaugeTex(gauge, oddBit);
    // int spinor_bytes = res.length*sizeof(float);
    // cudaBindTexture(0, spinorTexSingle, spinor.spinor, spinor_bytes); 
    // int shared_bytes = blockDim.x*SHARED_FLOATS_PER_THREAD*sizeof(float);
    // dslashSS12Kernel <<<gridDim, blockDim, shared_bytes>>> ((float4 *)res.spinor, oddBit);
    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cuda_verify(cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int)));

    cudaBindTexture(0, texSpin, s_src.m_gpu_buf, s_src.m_gpu_bytes);
    cuda_verify(cudaMemcpyToSymbol("svec_stride", &s_src.m_stride, sizeof(int)));

    float beta_f = 1.0f;
    cuda_verify(cudaMemcpyToSymbol("beta_f", &beta_f, sizeof(float)));
    
    //hb3d_void_kernel_sp_r1_sublat <<<gridDim, blockDim>>> ((float4*)s_res.m_gpu_buf, rng.gpu_states(), s_res.m_volume);
}

//-----------------------------------------------------------------------------

void gpu_void_kernel_r1_sublat(rng_uniform &rng, float *res, Svec &s_res, Svec &s_src, Jmat &j)
{
    dim3 gridDim(s_res.m_volume/block_size, 1, 1);
    dim3 blockDim(block_x, block_y, block_z);
    // dim3 gridDim(s_res.m_volume/BLOCK_SIZE, 1, 1);
    // dim3 blockDim(BLOCK_SIZE, 1, 1);

    // TODO: check equal cfg on s_res, s_src and J
    assert(s_src.m_replicas == 1);
    assert(s_res.m_replicas == 1);

    assert(LZ % block_z == 0);
    assert(LY % block_y == 0);
    assert(LX % block_x == 0);


    // bindGaugeTex(gauge, oddBit);
    // int spinor_bytes = res.length*sizeof(float);
    // cudaBindTexture(0, spinorTexSingle, spinor.spinor, spinor_bytes); 
    // int shared_bytes = blockDim.x*SHARED_FLOATS_PER_THREAD*sizeof(float);
    // dslashSS12Kernel <<<gridDim, blockDim, shared_bytes>>> ((float4 *)res.spinor, oddBit);
    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaBindTexture(0, texSpin, s_src.m_gpu_buf, s_src.m_gpu_bytes);
    cudaMemcpyToSymbol("svec_stride", &s_src.m_stride, sizeof(int));  

    float beta_f = 1.0f;
    cudaMemcpyToSymbol("beta_f", &beta_f, sizeof(float));  
    
    hb3d_void_kernel_sp_r1_sublat <<<gridDim, blockDim>>> ((float4*)s_res.m_gpu_buf, rng.gpu_states(), s_res.m_volume);
}

//-----------------------------------------------------------------------------

void gpu_rng_r1_sublat(rng_uniform &rng, float *res, Svec &s_res, Svec &s_src, Jmat &j)
{
    dim3 gridDim(s_res.m_volume/block_size, 1, 1);
    dim3 blockDim(block_x, block_y, block_z);
    // dim3 gridDim(s_res.m_volume/BLOCK_SIZE, 1, 1);
    // dim3 blockDim(BLOCK_SIZE, 1, 1);

    // TODO: check equal cfg on s_res, s_src and J
    assert(s_src.m_replicas == 1);
    assert(s_res.m_replicas == 1);

    assert(LZ % block_z == 0);
    assert(LY % block_y == 0);
    assert(LX % block_x == 0);


    // bindGaugeTex(gauge, oddBit);
    // int spinor_bytes = res.length*sizeof(float);
    // cudaBindTexture(0, spinorTexSingle, spinor.spinor, spinor_bytes); 
    // int shared_bytes = blockDim.x*SHARED_FLOATS_PER_THREAD*sizeof(float);
    // dslashSS12Kernel <<<gridDim, blockDim, shared_bytes>>> ((float4 *)res.spinor, oddBit);
    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaBindTexture(0, texSpin, s_src.m_gpu_buf, s_src.m_gpu_bytes);
    cudaMemcpyToSymbol("svec_stride", &s_src.m_stride, sizeof(int));  

    float beta_f = 1.0f;
    cudaMemcpyToSymbol("beta_f", &beta_f, sizeof(float));  
    
    hb3d_rng_f2_sp_r1_sublat <<<gridDim, blockDim>>> ((float4*)s_res.m_gpu_buf, rng.gpu_states(), s_res.m_volume);
}

//-----------------------------------------------------------------------------

void gpu_heatbath_r1_sublat(rng_uniform &rng, float *res, Svec &s_res, Svec &s_src, Jmat &j)
{
    dim3 gridDim(s_res.m_volume/block_size, 1, 1);
    dim3 blockDim(block_x, block_y, block_z);
    // dim3 gridDim(s_res.m_volume/BLOCK_SIZE, 1, 1);
    // dim3 blockDim(BLOCK_SIZE, 1, 1);

    // TODO: check equal cfg on s_res, s_src and J
    assert(s_src.m_replicas == 1);
    assert(s_res.m_replicas == 1);

    assert(LZ % block_z == 0);
    assert(LY % block_y == 0);
    assert(LX % block_x == 0);


    // bindGaugeTex(gauge, oddBit);
    // int spinor_bytes = res.length*sizeof(float);
    // cudaBindTexture(0, spinorTexSingle, spinor.spinor, spinor_bytes); 
    // int shared_bytes = blockDim.x*SHARED_FLOATS_PER_THREAD*sizeof(float);
    // dslashSS12Kernel <<<gridDim, blockDim, shared_bytes>>> ((float4 *)res.spinor, oddBit);
    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaBindTexture(0, texSpin, s_src.m_gpu_buf, s_src.m_gpu_bytes);
    cudaMemcpyToSymbol("svec_stride", &s_src.m_stride, sizeof(int));  

    float beta_f = 1.0f;
    cudaMemcpyToSymbol("beta_f", &beta_f, sizeof(float));  
    
    hb3d_heatbath_sp_r1_sublat <<<gridDim, blockDim>>> ((float4*)s_res.m_gpu_buf, rng.gpu_states(), s_res.m_volume);
}

//-----------------------------------------------------------------------------

struct kernel_def {
    const char* name;
    void (*fun)(rng_uniform &rng, float *res, Svec &s_res, Svec &s_src, Jmat &j);
};

static struct kernel_def kernels[] = {
    {"gpu_no_kernel_r1_sublat",        gpu_no_kernel_r1_sublat},
    {"gpu_void_kernel_r1_sublat",      gpu_void_kernel_r1_sublat},
    {"gpu_rng_r1_sublat",              gpu_rng_r1_sublat},
    {"gpu_heatbath_r1_sublat",         gpu_heatbath_r1_sublat},
    {NULL, NULL}
};

static float overhead_r1_uspi = 0.0f;

static void test_kernel(size_t niters, Config &cfg, rng_uniform &rng, int kernel_id)
{
    cudaEvent_t startEvent, stopEvent;

    cudaEventCreate(&startEvent);
    cudaEventCreate(&stopEvent);

    size_t pad = 0;
    Svec spins(cfg, pad);
    Svec res(cfg, pad);
    Jmat j(cfg, pad);

    size_t nspins = cfg.lat[DIR_X]*cfg.lat[DIR_Y]*cfg.lat[DIR_Z];

    spins.init(1.0, 1.0, 1.0);
    j.init_ident();

    spins.cpu_to_gpu();
    j.cpu_to_gpu();

    printf("kernel: %s... ", kernels[kernel_id].name); fflush(stdout);
    // start timer
    cudaEventRecord(startEvent, 0);
    // run kernel
    float e;
    cudaError_t err = cudaGetLastError();
    if(err != cudaSuccess) {
        error("CUDA error %d(%s) before kernel %d looping\n", (int)err, cudaGetErrorString(err), (int)kernel_id);
    }

    for(int i=0; i<niters; ++i) {
        (*kernels[kernel_id].fun)(rng, &e, res, spins, j);
        err = cudaGetLastError();
        if(err != cudaSuccess) {
            error("CUDA error %d(%s) in spawning of kernel %d iter=%d\n", (int)err, cudaGetErrorString(err), (int)kernel_id, i);
        }
    }
    // stop and sync timer
    cudaEventRecord(stopEvent, 0);
    cudaEventSynchronize(stopEvent);
    // calc elapsed time
    {
        float milliseconds = 0.0f;
        cudaEventElapsedTime(&milliseconds, startEvent, stopEvent);
        float uspi = milliseconds / niters * 1000.0f;
        //float nsps = milliseconds / (float)(niters * nspins) * 1000000.0f;
        float nsps =  uspi / (float)nspins * 1000.0f;
        if(kernel_id == 1)
            overhead_r1_uspi = uspi;
        if(kernel_id <= 1)
            printf("elapsed time: %fms / %fus/iter / %f ns/spin\n", milliseconds, uspi, nsps);
        else {
            float nsps_fixed = (uspi - overhead_r1_uspi) / (float)nspins * 1000.0f;
            printf("elapsed time: %fms / %f(%f)us/iter / %f(%f) ns/spin\n", 
                   milliseconds, uspi, uspi-overhead_r1_uspi, nsps, nsps_fixed);
        }
    }
    cudaEventDestroy(startEvent);
    cudaEventDestroy(stopEvent);

    res.gpu_to_cpu();
    // check results    
}

//-----------------------------------------------------------------------------

void benchmark_gpu_hb3d_r1_sublat()
{
    Config cfg;
    cfg.dims = D;
    cfg.lat[DIR_X] = LX;
    cfg.lat[DIR_Y] = LY;
    cfg.lat[DIR_Z] = LZ;
    cfg.cpu_precision = SinglePrecision;
    cfg.gpu_precision = SinglePrecision;

    const int niters = NITERS;

    size_t pad = 0;
    Svec spins(cfg, pad);
    Svec res(cfg, pad);
    Jmat j(cfg, pad);
    float e;

    size_t nspins = cfg.lat[DIR_X]*cfg.lat[DIR_Y]*cfg.lat[DIR_Z];
    size_t n_blocks = (nspins*spins.replicas())/block_size;
    printf("Heisenberg %dD:\n", cfg.dims);
    printf(" lattice:          %dx%dx%d\n", cfg.lat[DIR_X], cfg.lat[DIR_Y], cfg.lat[DIR_Z]);
    printf(" replicas:         %u\n", spins.replicas());
    printf(" tot spins:        %d\n", nspins);
    printf(" precision:        %s\n", "single");
    printf(" num iterations:   %d\n", niters);
    printf(" thr block size:   %d\n", block_size);
    printf(" thr block:        %dx%dx%d\n", block_x, block_y, block_z);
    printf(" num thr blocks:   %d\n", n_blocks);
    printf(" spin vector size: %.3fKB\n", spins.m_gpu_bytes/(1000.0));
    printf(" J matrix size:    %.3fKB\n", j.m_gpu_bytes/(1000.0));

    assert(LZ >= block_z);
    assert(LY >= block_y);
    assert(LX >= block_x);

    assert(LZ % block_z == 0);
    assert(LY % block_y == 0);
    assert(LX % block_x == 0);


    rng_uniform rng(block_size, n_blocks);
    rng.init(0xf4543243, true);

    int i=0;
    while(kernels[i].name) {
        test_kernel(niters, cfg, rng, i);
        ++i;
    }

}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------

/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */
