/*
  Copyright (c) 2010 Davide Rossetti (davide.rossetti@roma1.infn.it),
  Andrea Maiorano (andrea.maiorano@roma1.infn.it)

  This file is part of CuHB (CUDA Heisenberg) package.
  
  CuHB is free software: you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published by
  the Free Software Foundation, either version 3 of the License, or
  (at your option) any later version.

  CuHB is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  GNU General Public License for more details.
  
  You should have received a copy of the GNU General Public License
  along with CuHB.  If not, see <http://www.gnu.org/licenses/>.

*/

/*
  HB on 2x4 replicas, even-odd trick

  input: 2 buffers with mixed even-odd sites
         buf0 has replica 0-3 even sites, 4-7 odd sites
         buf1 has         4-7 even sites, 0-3 odd sites

         layout is:
         [0,0,0 rep0 X][0,0,0 rep1 X][0,0,0 rep2 X][0,0,0 rep3 X]
         [0,0,1 rep4 X][0,0,1 rep5 X][0,0,6 rep2 X][0,0,7 rep3 X]
         ....
         [0,0,0 rep0 Y][0,0,0 rep1 Y][0,0,0 rep2 Y][0,0,0 rep3 Y]
         [0,0,1 rep4 Y][0,0,1 rep5 Y][0,0,6 rep2 Y][0,0,7 rep3 Y]
         ...

  output: same 2 mixed buffers
 */

#include <stdlib.h>
#include <stdio.h>
#include <assert.h>

#include <cuda_runtime.h>
#include <math.h>
#include <math_constants.h>

#include "hb.h"
#include "rng.cuh"
#include "jmat.cuh"
#include "rng.hpp"
#include "rng.cuh"

#define NREPS 4

//-----------------------------------------------------------------------------
// spins
//-----------------------------------------------------------------------------

typedef struct svec4 {
    float4 dimx;
    float4 dimy;
    float4 dimz;
} svec4;

__constant__ int svec_r4x2_stride; // == volume

texture<float4, 1, cudaReadModeElementType> svec4TexSingle;

#define texSpin svec4TexSingle

#define READ_SVEC4(tmp, texref, idx)                            \
    {                                                           \
        tmp.dimx = tex1Dfetch((texref), idx + 0*svec_r4x2_stride);	\
        tmp.dimy = tex1Dfetch((texref), idx + 1*svec_r4x2_stride);	\
        tmp.dimz = tex1Dfetch((texref), idx + 2*svec_r4x2_stride);	\
    }

static __inline__ __host__ __device__ svec4 make_svec4(float x, float y, float z) {
    svec4 ret;
    ret.dimx = make_float4(x,x,x,x);
    ret.dimy = make_float4(y,y,y,y);
    ret.dimz = make_float4(z,z,z,z);
    return ret;
}

//-----------------------------------------------------------------------------
// operations on replica4
//-----------------------------------------------------------------------------

#define VEC_ACC_MUL_R4(RES, N, V)                                       \
    {                                                                   \
        (RES).dimx.x += (N).j00*(V).dimx.x + (N).j01*(V).dimy.x + (N).j02*(V).dimz.x; \
        (RES).dimx.y += (N).j00*(V).dimx.y + (N).j01*(V).dimy.y + (N).j02*(V).dimz.y; \
        (RES).dimx.x += (N).j00*(V).dimx.z + (N).j01*(V).dimy.z + (N).j02*(V).dimz.z; \
        (RES).dimx.w += (N).j00*(V).dimx.w + (N).j01*(V).dimy.w + (N).j02*(V).dimz.w; \
                                                                        \
        (RES).dimy.x += (N).j10*(V).dimx.x + (N).j11*(V).dimy.x + (N).j12*(V).dimz.x; \
        (RES).dimy.y += (N).j10*(V).dimx.y + (N).j11*(V).dimy.y + (N).j12*(V).dimz.y; \
        (RES).dimy.z += (N).j10*(V).dimx.z + (N).j11*(V).dimy.z + (N).j12*(V).dimz.z; \
        (RES).dimy.w += (N).j10*(V).dimx.w + (N).j11*(V).dimy.w + (N).j12*(V).dimz.w; \
                                                                        \
        (RES).dimz.x += (N).j20*(V).dimx.x + (N).j21*(V).dimy.x + (N).j22*(V).dimz.x; \
        (RES).dimz.y += (N).j20*(V).dimx.y + (N).j21*(V).dimy.y + (N).j22*(V).dimz.y; \
        (RES).dimz.z += (N).j20*(V).dimx.z + (N).j21*(V).dimy.z + (N).j22*(V).dimz.z; \
        (RES).dimz.w += (N).j20*(V).dimx.w + (N).j21*(V).dimy.w + (N).j22*(V).dimz.w; \
    }

#define SCALAR_PROD_R4(V1, V2)                                          \
    make_float4(                                                        \
                ( (V1).dimx.x * (V2).dimx.x + (V1).dimy.x * (V2).dimy.x + (V1).dimz.x * (V2).dimz.x ), \
                ( (V1).dimx.y * (V2).dimx.y + (V1).dimy.y * (V2).dimy.y + (V1).dimz.y * (V2).dimz.y ), \
                ( (V1).dimx.z * (V2).dimx.z + (V1).dimy.z * (V2).dimy.z + (V1).dimz.z * (V2).dimz.z ), \
                ( (V1).dimx.w * (V2).dimx.w + (V1).dimy.w * (V2).dimy.w + (V1).dimz.w * (V2).dimz.w ))


#define LOCAL_FIELD_PLUS_R4(RES,I,X,Y,Z)  {     \
        {   /* X+ */                            \
            int xp = MOD(X+1, LX);              \
            int ixp = XYZ2IDX(xp,Y,Z);          \
            svec4 sxp;                          \
            jmat jxp;                           \
            READ_SVEC4(sxp, texSpin, ixp);		\
            READ_J(jxp, texJ, I, DIR_X);              \
            VEC_ACC_MUL_R4(RES, jxp, sxp);		\
        }{  /* Y+ */                            \
            int yp = MOD(Y+1, LY);              \
            int iyp = XYZ2IDX(X,yp,Z);          \
            svec4 syp;                          \
            jmat jyp;                           \
            READ_SVEC4(syp, texSpin, iyp);		\
            READ_J(jyp, texJ, I, DIR_Y);              \
            VEC_ACC_MUL_R4(RES, jyp, syp);		\
        }{  /* Z+ */                            \
            int zp = MOD(Z+1, LZ);              \
            int izp = XYZ2IDX(X,Y,zp);          \
            svec4 szp;                          \
            jmat jzp;                           \
            READ_SVEC4(szp, texSpin, izp);		\
            READ_J(jzp, texJ, I, DIR_Z);              \
            VEC_ACC_MUL_R4(RES, jzp, szp);		\
        }                                       \
    }

#define LOCAL_FIELD_MINUS_R4(RES,I,X,Y,Z)  {    \
        { /* X- */                              \
            int xm = MOD(X-1, LX);              \
            int ixm = XYZ2IDX(xm,Y,Z);          \
            svec4 sxm;                          \
            jmat jxm;                           \
            READ_SVEC4(sxm, texSpin, ixm);		\
            READ_J(jxm, texJ, ixm, DIR_X);			\
            VEC_ACC_MUL_R4(RES, jxm, sxm);		\
        }{ /* Y- */                             \
            int ym = MOD(Y-1, LY);              \
            int iym = XYZ2IDX(X,ym,Z);          \
            svec4 sym;                          \
            jmat jym;                           \
            READ_SVEC4(sym, texSpin, iym);		\
            READ_J(jym, texJ, iym, DIR_Y);       \
            VEC_ACC_MUL_R4(RES, jym, sym);		\
        }{ /* Z- */                             \
            int zm = MOD(Z-1, LZ);              \
            int izm = XYZ2IDX(X,Y,zm);          \
            svec4 szm;                          \
            jmat jzm;                           \
            READ_SVEC4(szm, texSpin, izm);		\
            READ_J(jzm, texJ, izm, DIR_Z);			\
            VEC_ACC_MUL_R4(RES, jzm, szm);		\
        }                                       \
    }

#define LOCAL_FIELD_R4(RES,I,X,Y,Z)             \
    {                                           \
        LOCAL_FIELD_PLUS_R4(RES,I,X,Y,Z);		\
        LOCAL_FIELD_MINUS_R4(RES,I,X,Y,Z);		\
    }

//-----------------------------------------------------------------------------
// kernels
//-----------------------------------------------------------------------------

#define PI CUDART_PI_F

__constant__ float beta_f;

__global__ void
hb3d_heatbath_sp_r4(float4* res, rng_state* states, int size)
{
    int i = blockIdx.x*blockDim.x+threadIdx.x;
    int x = i/(LY*LZ);         // i & XMASK
    int y = (i-x*(LY*LZ))/LZ; // i & YMASK = ((LZ*LY-1)^(LZ-1)
    int z = i-x*(LY*LZ)-y*LZ; // i & ZMASL = (LZ-1)

    float2 rng0, rng1, rng2, rng3;
    //rng0 = rng_uniform_shm16_float2(states);
    //rng1 = rng_uniform_shm16_float2(states);
    //rng2 = rng_uniform_shm16_float2(states);
    //rng3 = rng_uniform_shm16_float2(states);
    //rng1 = rng0;
    //rng2 = rng0;
    //rng3 = rng0;

    rng_uniform_shm16_4xfloat2(states, rng0, rng1, rng2, rng3);

    // float2 r1,r2;
    // r1 = rng_uniform_float(states);
    // r2 = rng_uniform_float(states);
    //r1 = 0.0f;
    //r2 = 0.0f;

    // warning if there are threads which do not calculate but waste rng nums
    if(i < size) {
        svec4 h = make_svec4(0,0,0);
        LOCAL_FIELD_R4(h,i,x,y,z);
        h.dimx = make_float4(beta_f * h.dimx.x, beta_f * h.dimx.y, beta_f * h.dimx.z, beta_f * h.dimx.w);
        h.dimy = make_float4(beta_f * h.dimy.x, beta_f * h.dimy.y, beta_f * h.dimy.z, beta_f * h.dimy.w);
        h.dimz = make_float4(beta_f * h.dimz.x, beta_f * h.dimz.y, beta_f * h.dimz.z, beta_f * h.dimz.w);
        float4 h2 = SCALAR_PROD_R4(h, h);
        float4 mh = make_float4(sqrtf(h2.x),sqrtf(h2.y),sqrtf(h2.z),sqrtf(h2.w));
        float4 phi = make_float4(2.0f*PI*rng0.y, 2.0f*PI*rng1.y, 2.0f*PI*rng2.y, 2.0f*PI*rng3.y);
        float4 costheta = make_float4(logf(1.0f+rng0.x*(expf(2*mh.x)-1))/mh.x-1.0f,
                                      logf(1.0f+rng1.x*(expf(2*mh.y)-1))/mh.y-1.0f,
                                      logf(1.0f+rng2.x*(expf(2*mh.z)-1))/mh.z-1.0f,
                                      logf(1.0f+rng3.x*(expf(2*mh.w)-1))/mh.w-1.0f);
        float4 sintheta = make_float4(sqrtf(1.0f - costheta.x*costheta.x),
                                      sqrtf(1.0f - costheta.y*costheta.y),
                                      sqrtf(1.0f - costheta.z*costheta.z),
                                      sqrtf(1.0f - costheta.w*costheta.w));
        // hnorm = make_float3(h.x/mh, h.y/mh, h.z/mh);
        float4 hnorm_z = make_float4(h.dimz.x/mh.x, h.dimz.y/mh.y, h.dimz.z/mh.z, h.dimz.w/mh.w);
        float4 mhxy = make_float4(sqrtf(1.0f-hnorm_z.x*hnorm_z.x), 
                                  sqrtf(1.0f-hnorm_z.y*hnorm_z.y), 
                                  sqrtf(1.0f-hnorm_z.z*hnorm_z.z), 
                                  sqrtf(1.0f-hnorm_z.w*hnorm_z.w));
        float4 hnorm_x = make_float4(h.dimx.x/mh.x, h.dimx.y/mh.y, h.dimx.z/mh.z, h.dimx.w/mh.w);
        float4 hnorm_y = make_float4(h.dimy.x/mh.x, h.dimy.y/mh.y, h.dimy.z/mh.z, h.dimy.w/mh.w);
        //float3 c = make_float3(hnorm_x/mhxy, hnorm_y/mhxy, hnorm_z);
        float4 c_x = make_float4(hnorm_x.x/mhxy.x, hnorm_x.y/mhxy.y, hnorm_x.z/mhxy.z, hnorm_x.w/mhxy.w);
        float4 c_y = make_float4(hnorm_y.x/mhxy.x, hnorm_y.y/mhxy.y, hnorm_y.z/mhxy.z, hnorm_y.w/mhxy.w);
        float4 c_z = make_float4(hnorm_z.x/mhxy.x, hnorm_z.y/mhxy.y, hnorm_z.z/mhxy.z, hnorm_z.w/mhxy.w);
        //sinphi = sinf(phi);
        //cosphi = cosf(phi);
        float4 cosphi, sinphi;
        sincosf(phi.x, &sinphi.x, &cosphi.x);
        sincosf(phi.y, &sinphi.y, &cosphi.y);
        sincosf(phi.z, &sinphi.z, &cosphi.z);
        sincosf(phi.w, &sinphi.w, &cosphi.w);
        //svec si;
        // si.x = c.x * (cosphi * sintheta * c.z + costheta * mhxy) - sintheta * sinphi * c.y;
        // si.y = c.x * sintheta * sinphi + c.y * (cosphi * c.z * sintheta + costheta * mhxy);
        // si.z = costheta * c.z - cosphi * sintheta * mhxy;
        // res[i] = si;
        res[i+0*svec_r4x2_stride] = make_float4(c_x.x * (cosphi.x * sintheta.x * c_z.x + costheta.x * mhxy.x) - sintheta.x * sinphi.x * c_y.x,
                                                c_x.y * (cosphi.y * sintheta.y * c_z.y + costheta.y * mhxy.y) - sintheta.y * sinphi.y * c_y.y,
                                                c_x.z * (cosphi.z * sintheta.z * c_z.z + costheta.z * mhxy.w) - sintheta.z * sinphi.z * c_y.z,
                                                c_x.w * (cosphi.w * sintheta.w * c_z.w + costheta.w * mhxy.z) - sintheta.w * sinphi.w * c_y.w);

        res[i+1*svec_r4x2_stride] = make_float4(c_x.x * sintheta.x * sinphi.x + c_y.x * (cosphi.x * c_z.x * sintheta.x + costheta.x * mhxy.x),
                                                c_x.y * sintheta.y * sinphi.y + c_y.y * (cosphi.y * c_z.y * sintheta.y + costheta.y * mhxy.y),
                                                c_x.z * sintheta.z * sinphi.z + c_y.z * (cosphi.z * c_z.z * sintheta.z + costheta.z * mhxy.z),
                                                c_x.w * sintheta.w * sinphi.w + c_y.w * (cosphi.w * c_z.w * sintheta.w + costheta.w * mhxy.w));

        res[i+2*svec_r4x2_stride] = make_float4(costheta.x * c_z.x - cosphi.x * sintheta.x * mhxy.x,
                                                costheta.y * c_z.y - cosphi.y * sintheta.y * mhxy.y,
                                                costheta.z * c_z.z - cosphi.z * sintheta.z * mhxy.z,
                                                costheta.w * c_z.w - cosphi.w * sintheta.w * mhxy.w);
    }
}

//-----------------------------------------------------------------------------

__global__ void
hb3d_void_kernel_sp_r4(float4* res, rng_state* states, int size)
{
    // int i = blockIdx.x*blockDim.x+threadIdx.x;
    // int x = i/(LY*LZ);         // i & XMASK
    // int y = (i-x*(LY*LZ))/LZ; // i & YMASK = ((LZ*LY-1)^(LZ-1)
    // int z = i-x*(LY*LZ)-y*LZ; // i & ZMASL = (LZ-1)

    // float2 rng0, rng1, rng2, rng3;
}

//-----------------------------------------------------------------------------

__global__ void
hb3d_rng_2f_sp_r4(float4* res, rng_state* states, int size)
{
    int i = blockIdx.x*blockDim.x+threadIdx.x;
    int x = i/(LY*LZ);         // i & XMASK
    int y = (i-x*(LY*LZ))/LZ; // i & YMASK = ((LZ*LY-1)^(LZ-1)
    int z = i-x*(LY*LZ)-y*LZ; // i & ZMASL = (LZ-1)

    float2 rng0, rng1, rng2, rng3;

    rng0 = rng_uniform_shm16_float2(states);
    rng1 = rng0;
    rng2 = rng0;
    rng3 = rng0;

    res[i + 0*svec_r4x2_stride] = make_float4(rng0.x, rng1.x, rng2.x, rng3.x);
    res[i + 1*svec_r4x2_stride] = make_float4(rng0.y, rng1.y, rng2.y, rng3.y);
}

//-----------------------------------------------------------------------------

__global__ void
hb3d_rng_4x2f_sp_r4(float4* res, rng_state* states, int size)
{
    int i = blockIdx.x*blockDim.x+threadIdx.x;
    int x = i/(LY*LZ);         // i & XMASK
    int y = (i-x*(LY*LZ))/LZ; // i & YMASK = ((LZ*LY-1)^(LZ-1)
    int z = i-x*(LY*LZ)-y*LZ; // i & ZMASL = (LZ-1)

    float2 rng0, rng1, rng2, rng3;

    rng0 = rng_uniform_shm16_float2(states);
    rng1 = rng_uniform_shm16_float2(states);
    rng2 = rng_uniform_shm16_float2(states);
    rng3 = rng_uniform_shm16_float2(states);

    res[i + 0*svec_r4x2_stride] = make_float4(rng0.x, rng1.x, rng2.x, rng3.x);
    res[i + 1*svec_r4x2_stride] = make_float4(rng0.y, rng1.y, rng2.y, rng3.y);
}

//-----------------------------------------------------------------------------

__global__ void
hb3d_rng_1x8f_sp_r4(float4* res, rng_state* states, int size)
{
    int i = blockIdx.x*blockDim.x+threadIdx.x;
    int x = i/(LY*LZ);         // i & XMASK
    int y = (i-x*(LY*LZ))/LZ; // i & YMASK = ((LZ*LY-1)^(LZ-1)
    int z = i-x*(LY*LZ)-y*LZ; // i & ZMASL = (LZ-1)

    float2 rng0, rng1, rng2, rng3;

    rng_uniform_shm16_4xfloat2(states, rng0, rng1, rng2, rng3);

    res[i + 0*svec_r4x2_stride] = make_float4(rng0.x, rng1.x, rng2.x, rng3.x);
    res[i + 1*svec_r4x2_stride] = make_float4(rng0.y, rng1.y, rng2.y, rng3.y);
}

//-----------------------------------------------------------------------------

#if 0
struct SvecR8 {
    Config m_cfg;

    size_t m_volume;
    size_t m_pad;
    size_t m_stride;
    size_t m_replicas;

    size_t m_gpu_bytes;
    void*  m_gpu_buf0;
    void*  m_gpu_buf1;

    size_t m_cpu_bytes;
    void*  m_cpu_buf;

    SvecR8(Config &conf, int pad, int packed = 1) {}
    ~SvecR8() {}
    void init(float x, float y, float z) {}
    void cpu_to_gpu() {}
    void gpu_to_cpu() {}
};
#endif

//-----------------------------------------------------------------------------

void gpu_heatbath_r4x2(rng_uniform &rng, float *e_res, Svec &s0, Svec &s1, Jmat &j)
{
    dim3 gridDim(s1.m_volume/BLOCK_SIZE, 1, 1);
    dim3 blockDim(BLOCK_SIZE, 1, 1);

    assert(s0.m_volume   == s1.m_volume);
    assert(s0.m_stride   == s1.m_stride);
    assert(s0. m_gpu_bytes  == s1.m_gpu_bytes);
    assert(s0.replicas() == s1.replicas());
    assert(s0.replicas() == NREPS);

    // TODO: check equal cfg on s_res, s_src and J

    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaMemcpyToSymbol("svec_r4x2_stride", &s0.m_stride, sizeof(int));  

    float beta_f = 1.0f;
    cudaMemcpyToSymbol("beta_f", &beta_f, sizeof(float));  

    size_t volume = s0.m_volume;
    size_t gpu_bytes = s0.m_gpu_bytes;
    cudaBindTexture(0, texSpin, s0.m_gpu_buf, gpu_bytes);
    hb3d_heatbath_sp_r4 <<<gridDim, blockDim>>> ((float4*)s1.m_gpu_buf, rng.gpu_states(), volume);
    cudaBindTexture(0, texSpin, s1.m_gpu_buf, gpu_bytes);
    hb3d_heatbath_sp_r4 <<<gridDim, blockDim>>> ((float4*)s0.m_gpu_buf, rng.gpu_states(), volume);
}

//-----------------------------------------------------------------------------

void gpu_void_kernel_r4x2(rng_uniform &rng, float *e_res, Svec &s0, Svec &s1, Jmat &j)
{
    dim3 gridDim(s1.m_volume/BLOCK_SIZE, 1, 1);
    dim3 blockDim(BLOCK_SIZE, 1, 1);

    assert(s0.m_volume   == s1.m_volume);
    assert(s0.m_stride   == s1.m_stride);
    assert(s0. m_gpu_bytes  == s1.m_gpu_bytes);
    assert(s0.replicas() == s1.replicas());
    assert(s0.replicas() == NREPS);

    // TODO: check equal cfg on s_res, s_src and J

    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaMemcpyToSymbol("svec4_stride", &s0.m_stride, sizeof(int));  

    float beta_f = 1.0f;
    cudaMemcpyToSymbol("beta_f", &beta_f, sizeof(float));  

    size_t volume = s0.m_volume;
    size_t gpu_bytes = s0.m_gpu_bytes;
    cudaBindTexture(0, texSpin, s0.m_gpu_buf, gpu_bytes);
    hb3d_void_kernel_sp_r4 <<<gridDim, blockDim>>> ((float4*)s1.m_gpu_buf, rng.gpu_states(), volume);
    cudaBindTexture(0, texSpin, s1.m_gpu_buf, gpu_bytes);
    hb3d_void_kernel_sp_r4 <<<gridDim, blockDim>>> ((float4*)s0.m_gpu_buf, rng.gpu_states(), volume);
}

//-----------------------------------------------------------------------------

void gpu_no_kernel_r4x2(rng_uniform &rng, float *e_res, Svec &s0, Svec &s1, Jmat &j)
{
    dim3 gridDim(s1.m_volume/BLOCK_SIZE, 1, 1);
    dim3 blockDim(BLOCK_SIZE, 1, 1);

    assert(s0.m_volume   == s1.m_volume);
    assert(s0.m_stride   == s1.m_stride);
    assert(s0. m_gpu_bytes  == s1.m_gpu_bytes);
    assert(s0.replicas() == s1.replicas());
    assert(s0.replicas() == NREPS);

    // TODO: check equal cfg on s_res, s_src and J

    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaMemcpyToSymbol("svec4_stride", &s0.m_stride, sizeof(int));  

    float beta_f = 1.0f;
    cudaMemcpyToSymbol("beta_f", &beta_f, sizeof(float));  

    size_t volume = s0.m_volume;
    size_t gpu_bytes = s0.m_gpu_bytes;
    cudaBindTexture(0, texSpin, s0.m_gpu_buf, gpu_bytes);
    //hb3d_heatbath_sp_r4 <<<gridDim, blockDim>>> ((float4*)s1.m_gpu_buf, rng.gpu_states(), volume);
    cudaBindTexture(0, texSpin, s1.m_gpu_buf, gpu_bytes);
    //hb3d_heatbath_sp_r4 <<<gridDim, blockDim>>> ((float4*)s0.m_gpu_buf, rng.gpu_states(), volume);
}

//-----------------------------------------------------------------------------

void gpu_rng_2f_r4x2(rng_uniform &rng, float *e_res, Svec &s0, Svec &s1, Jmat &j)
{
    dim3 gridDim(s1.m_volume/BLOCK_SIZE, 1, 1);
    dim3 blockDim(BLOCK_SIZE, 1, 1);

    assert(s0.m_volume   == s1.m_volume);
    assert(s0.m_stride   == s1.m_stride);
    assert(s0. m_gpu_bytes  == s1.m_gpu_bytes);
    assert(s0.replicas() == s1.replicas());
    assert(s0.replicas() == NREPS);

    // TODO: check equal cfg on s_res, s_src and J

    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaMemcpyToSymbol("svec4_stride", &s0.m_stride, sizeof(int));  

    float beta_f = 1.0f;
    cudaMemcpyToSymbol("beta_f", &beta_f, sizeof(float));  

    size_t volume = s0.m_volume;
    size_t gpu_bytes = s0.m_gpu_bytes;
    cudaBindTexture(0, texSpin, s0.m_gpu_buf, gpu_bytes);
    hb3d_rng_2f_sp_r4 <<<gridDim, blockDim>>> ((float4*)s1.m_gpu_buf, rng.gpu_states(), volume);
    cudaBindTexture(0, texSpin, s1.m_gpu_buf, gpu_bytes);
    hb3d_rng_2f_sp_r4 <<<gridDim, blockDim>>> ((float4*)s0.m_gpu_buf, rng.gpu_states(), volume);
}

//-----------------------------------------------------------------------------

void gpu_rng_4x2f_r4x2(rng_uniform &rng, float *e_res, Svec &s0, Svec &s1, Jmat &j)
{
    dim3 gridDim(s1.m_volume/BLOCK_SIZE, 1, 1);
    dim3 blockDim(BLOCK_SIZE, 1, 1);

    assert(s0.m_volume   == s1.m_volume);
    assert(s0.m_stride   == s1.m_stride);
    assert(s0. m_gpu_bytes  == s1.m_gpu_bytes);
    assert(s0.replicas() == s1.replicas());
    assert(s0.replicas() == NREPS);

    // TODO: check equal cfg on s_res, s_src and J

    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaMemcpyToSymbol("svec4_stride", &s0.m_stride, sizeof(int));  

    float beta_f = 1.0f;
    cudaMemcpyToSymbol("beta_f", &beta_f, sizeof(float));  

    size_t volume = s0.m_volume;
    size_t gpu_bytes = s0.m_gpu_bytes;
    cudaBindTexture(0, texSpin, s0.m_gpu_buf, gpu_bytes);
    hb3d_rng_4x2f_sp_r4 <<<gridDim, blockDim>>> ((float4*)s1.m_gpu_buf, rng.gpu_states(), volume);
    cudaBindTexture(0, texSpin, s1.m_gpu_buf, gpu_bytes);
    hb3d_rng_4x2f_sp_r4 <<<gridDim, blockDim>>> ((float4*)s0.m_gpu_buf, rng.gpu_states(), volume);
}

//-----------------------------------------------------------------------------

void gpu_rng_1x8f_r4x2(rng_uniform &rng, float *e_res, Svec &s0, Svec &s1, Jmat &j)
{
    dim3 gridDim(s1.m_volume/BLOCK_SIZE, 1, 1);
    dim3 blockDim(BLOCK_SIZE, 1, 1);

    assert(s0.m_volume   == s1.m_volume);
    assert(s0.m_stride   == s1.m_stride);
    assert(s0. m_gpu_bytes  == s1.m_gpu_bytes);
    assert(s0.replicas() == s1.replicas());
    assert(s0.replicas() == NREPS);

    // TODO: check equal cfg on s_res, s_src and J

    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaMemcpyToSymbol("svec4_stride", &s0.m_stride, sizeof(int));  

    float beta_f = 1.0f;
    cudaMemcpyToSymbol("beta_f", &beta_f, sizeof(float));  

    size_t volume = s0.m_volume;
    size_t gpu_bytes = s0.m_gpu_bytes;
    cudaBindTexture(0, texSpin, s0.m_gpu_buf, gpu_bytes);
    hb3d_rng_1x8f_sp_r4 <<<gridDim, blockDim>>> ((float4*)s1.m_gpu_buf, rng.gpu_states(), volume);
    cudaBindTexture(0, texSpin, s1.m_gpu_buf, gpu_bytes);
    hb3d_rng_1x8f_sp_r4 <<<gridDim, blockDim>>> ((float4*)s0.m_gpu_buf, rng.gpu_states(), volume);
}

//-----------------------------------------------------------------------------

struct kernel_def {
    const char* name;
    void (*fun)(rng_uniform &rng, float *res, Svec &s_res, Svec &s_src, Jmat &j);
};

static struct kernel_def kernels[] = {
    {"gpu_no_kernel_r4",     gpu_no_kernel_r4x2},
    {"gpu_void_kernel_r4",   gpu_void_kernel_r4x2},
    {"gpu_rng_2f_r4x2",      gpu_rng_2f_r4x2},
    {"gpu_rng_4x2f_r4x2",    gpu_rng_4x2f_r4x2},
    {"gpu_rng_1x8f_r4x2",    gpu_rng_1x8f_r4x2},
    {"gpu_heatbath_r4x2",    gpu_heatbath_r4x2},
    {NULL, NULL}
};

static float overhead_uspi = 0.0f;

static void test_kernel(bool dump_info, size_t niters, Config &cfg, rng_uniform &rng, int kernel_id)
{
    cudaEvent_t startEvent, stopEvent;

    cudaEventCreate(&startEvent);
    cudaEventCreate(&stopEvent);

    size_t pad = 0;
    Svec s0(cfg, pad, NREPS);
    Svec s1(cfg, pad, NREPS);
    Jmat j(cfg, pad);

    size_t volume = s0.m_volume;
    size_t totspins = volume*NREPS*2; // 2x4 replicas
    size_t n_blocks = (volume)/BLOCK_SIZE;

    if(dump_info) {
        printf("Heisenberg %dD:\n", cfg.dims);
        printf(" lattice:          %dx%dx%d\n", cfg.lat[DIR_X], cfg.lat[DIR_Y], cfg.lat[DIR_Z]);
        printf(" # replicas:       2x%u\n", NREPS);
        printf(" volume:           %d\n", volume);
        printf(" tot spins:        %d\n", totspins);
        printf(" precision:        %s\n", "single");
        printf(" num iterations:   %d\n", niters);
        printf(" thr block size:   %d\n", BLOCK_SIZE);
        printf(" num thr blocks:   %d\n", n_blocks);
        printf(" spin vector size: 2x%.3fKB\n", s0.m_gpu_bytes/(1000.0));
        printf(" J matrix size:    %.3fKB\n", j.m_gpu_bytes/(1000.0));
    }

    s0.init(1.0, 1.0, 1.0);
    s0.cpu_to_gpu();
    s1.init(1.0, 1.0, 1.0);
    s1.cpu_to_gpu();
    j.init_ident();
    j.cpu_to_gpu();

    printf("kernel: %s... ", kernels[kernel_id].name); fflush(stdout);
    // start timer
    cudaEventRecord(startEvent, 0);
    // run kernel
    float e;
    for(int i=0; i<niters; ++i) {
        (*kernels[kernel_id].fun)(rng, &e, s0, s1, j);
        cudaError_t err = cudaGetLastError();
        if(err != cudaSuccess) {
            error("CUDA error %d(%s) in spawning of kernel %d\n", 
                  (int)err, cudaGetErrorString(err), i);
        }

    }
    // stop and sync timer
    cudaEventRecord(stopEvent, 0);
    cudaEventSynchronize(stopEvent);
    // calc elapsed time
    {
        float milliseconds = 0.0f;
        cudaEventElapsedTime(&milliseconds, startEvent, stopEvent);
        float uspi = milliseconds / niters * 1000.0f;
        //float nsps = milliseconds / (float)(niters * nspins) * 1000000.0f;
        float nsps =  uspi / (float)totspins * 1000.0f;
        if(kernel_id == 1)
            overhead_uspi = uspi;
        if(kernel_id <= 1)
            printf("elapsed time: %fms / %fus/iter / %f ns/spin\n", milliseconds, uspi, nsps);
        else {
            float nsps_fixed = (uspi - overhead_uspi) / (float)totspins * 1000.0f;
            printf("elapsed time: %fms / %f(%f)us/iter / %f(%f) ns/spin\n", 
                   milliseconds, uspi, uspi-overhead_uspi, nsps, nsps_fixed);
        }
    }
    cudaEventDestroy(startEvent);
    cudaEventDestroy(stopEvent);
    //res.gpu_to_cpu();
    // check results    
}

//-----------------------------------------------------------------------------

void benchmark_gpu_hb3d_r4x2()
{
    Config cfg;
    cfg.dims = D;
    cfg.lat[DIR_X] = LX;
    cfg.lat[DIR_Y] = LY;
    cfg.lat[DIR_Z] = LZ;
    cfg.cpu_precision = SinglePrecision;
    cfg.gpu_precision = SinglePrecision;

    //const int niters = NITERS;
    const int niters = 500;

    size_t nspins = LX*LY*LZ;
    size_t n_blocks = (nspins)/BLOCK_SIZE;

    rng_uniform rng(BLOCK_SIZE, n_blocks);
    rng.init(0xf4543243, true);

    int i=0;
    while(kernels[i].name) {
        test_kernel(i==0, niters, cfg, rng, i);
        ++i;
    }
}

//-----------------------------------------------------------------------------
/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */
