/*
  Copyright (c) 2010 Davide Rossetti (davide.rossetti@roma1.infn.it),
  Andrea Maiorano (andrea.maiorano@roma1.infn.it)

  This file is part of CuHB (CUDA Heisenberg) package.
  
  CuHB is free software: you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published by
  the Free Software Foundation, either version 3 of the License, or
  (at your option) any later version.

  CuHB is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  GNU General Public License for more details.
  
  You should have received a copy of the GNU General Public License
  along with CuHB.  If not, see <http://www.gnu.org/licenses/>.

*/

#include <stdlib.h>
#include <stdio.h>
#include <assert.h>

#include <cuda_runtime.h>
#include <math.h>
#include <math_constants.h>

#include "hb.h"
#include "rng.cuh"
#include "jmat.cuh"
#include "rng.hpp"
#include "rng.cuh"

//-----------------------------------------------------------------------------
// spins
//-----------------------------------------------------------------------------

// in units of???
__constant__ int svec_stride;

texture<float4, 1, cudaReadModeElementType> svecTexSingle;

#define texSpin svecTexSingle

typedef float4 svec;

#define READ_S(tmp, idx)                         \
    {                                            \
        tmp = tex1Dfetch((texSpin), idx);        \
    }

#define make_svec(X,Y,Z) make_float4(X,Y,Z,0)


//-----------------------------------------------------------------------------
// operations
//-----------------------------------------------------------------------------

#define VEC_ACC_MUL(RES, N, V)                                      \
    {                                                               \
        (RES).x += (N).j00*(V).x + (N).j01*(V).y + (N).j02*(V).z;   \
        (RES).y += (N).j10*(V).x + (N).j11*(V).y + (N).j12*(V).z;   \
        (RES).z += (N).j20*(V).x + (N).j21*(V).y + (N).j22*(V).z;   \
    }

// component .w is discarded
#define SCALAR_PROD(V1, V2)                                 \
    ( (V1).x * (V2).x + (V1).y * (V2).y + (V1).z * (V2).z )

#if 1
#define LOCAL_FIELD_PLUS(RES,I,X,Y,Z)  {        \
        {   /* X+ */                            \
            int xp = MOD(X+1, LX);              \
            int ixp = XYZ2IDX(xp,Y,Z);          \
            svec sxp;                           \
            jmat jxp;                           \
            READ_S(sxp, ixp);                   \
            READ_J(jxp, texJ, I, DIR_X);        \
            VEC_ACC_MUL(RES, jxp, sxp);         \
        }{  /* Y+ */                            \
            int yp = MOD(Y+1, LY);              \
            int iyp = XYZ2IDX(X,yp,Z);          \
            svec syp;                           \
            jmat jyp;                           \
            READ_S(syp, iyp);                   \
            READ_J(jyp, texJ, I, DIR_Y);        \
            VEC_ACC_MUL(RES, jyp, syp);         \
        }{  /* Z+ */                            \
            int zp = MOD(Z+1, LZ);              \
            int izp = XYZ2IDX(X,Y,zp);          \
            svec szp;                           \
            jmat jzp;                           \
            READ_S(szp, izp);                   \
            READ_J(jzp, texJ, I, DIR_Z);        \
            VEC_ACC_MUL(RES, jzp, szp);         \
        }                                       \
    }

#define LOCAL_FIELD_MINUS(RES,I,X,Y,Z)  {       \
        { /* X- */                              \
            int xm = MOD(X-1, LX);              \
            int ixm = XYZ2IDX(xm,Y,Z);          \
            svec sxm;                           \
            jmat jxm;                           \
            READ_S(sxm, ixm);          \
            READ_J(jxm, texJ, ixm, DIR_X);      \
            VEC_ACC_MUL(RES, jxm, sxm);         \
        }{ /* Y- */                             \
            int ym = MOD(Y-1, LY);              \
            int iym = XYZ2IDX(X,ym,Z);          \
            svec sym;                           \
            jmat jym;                           \
            READ_S(sym, iym);          \
            READ_J(jym, texJ, iym, DIR_Y);      \
            VEC_ACC_MUL(RES, jym, sym);         \
        }{ /* Z- */                             \
            int zm = MOD(Z-1, LZ);              \
            int izm = XYZ2IDX(X,Y,zm);          \
            svec szm;                           \
            jmat jzm;                           \
            READ_S(szm, izm);          \
            READ_J(jzm, texJ, izm, DIR_Z);      \
            VEC_ACC_MUL(RES, jzm, szm);         \
        }                                       \
    }

#define LOCAL_FIELD(RES,I,X,Y,Z)                \
    {                                           \
        LOCAL_FIELD_PLUS(RES,I,X,Y,Z);          \
        LOCAL_FIELD_MINUS(RES,I,X,Y,Z);         \
    }

#else
#warning "using fake local field calculation"
#define LOCAL_FIELD_PLUS(RES,I,X,Y,Z)           \
    /* X+ */                                    \
    int xp = X;                                 \
    int ixp = XYZ2IDX(xp,Y,Z);                  \
    svec sxp;                                   \
    jmat jxp;                                   \
    READ_S(sxp, ixp);                           \
    READ_J(jxp, texJ, I, DIR_X);                \
    VEC_ACC_MUL(RES, jxp, sxp);                 \
                                                \
    

#define LOCAL_FIELD(RES,I,X,Y,Z)                \
    LOCAL_FIELD_PLUS(RES,I,X,Y,Z);              \
    

#endif

//-----------------------------------------------------------------------------
// kernels
//-----------------------------------------------------------------------------

#define PI CUDART_PI_F

__constant__ float beta_f;

__global__ void
hb3d_heatbath_sp_r1(float4* res, rng_state* states, int size)
{
    int i = blockIdx.x*blockDim.x+threadIdx.x;
    int x = i/(LY*LZ);        // i & XMASK
    int y = (i-x*(LY*LZ))/LZ; // i & YMASK = ((LZ*LY-1)^(LZ-1)
    int z = i-x*(LY*LZ)-y*LZ; // i & ZMASL = (LZ-1)

    float2 rng = rng_uniform_shm16_float2(states);

    // float2 r1,r2;
    // r1 = rng_uniform_float(states);
    // r2 = rng_uniform_float(states);
    //r1 = 0.0f;
    //r2 = 0.0f;

    // warning if there are threads which do not calculate but waste rng nums
    if(i < size) {
        svec h = make_svec(0,0,0);
        LOCAL_FIELD(h,i,x,y,z);
        h = make_svec(beta_f * h.x, beta_f * h.y, beta_f * h.z);
        float h2 = SCALAR_PROD(h, h);
        float mh = sqrtf(h2);
        float inv_mh = 1.0f / mh;
        float phi = 2 * PI * rng.y;
#if 1
        double expf = exp((double)2*mh);
        // float expf = std::exp(2*beta*mh); NaN here!!
        double logf = log(1 + (double)rng.x*(expf-1));
        float costheta = ((float)logf)*inv_mh-1;
#else
        float costheta = (logf(1.0f+rng.x*(expf(2.0f*mh)-1))/mh)-1.0f;
#endif

#define SQRT_CUTOFF 1.0e-12

#if 0
        float ctf = (fabsf(1-costheta)>1.0e-5) ? 0.0f : 1.0f;
        float sintheta = sqrtf(1 - costheta*costheta + ctf) * (1 - ctf);
#elif 1
        float sqarg = 1.0f - costheta*costheta;
        float sintheta = sqrtf((sqarg<SQRT_CUTOFF)?SQRT_CUTOFF:sqarg);
#else
        float sintheta = sqrtf(1.0f - costheta*costheta);
#endif

        float3 hnorm = make_float3(h.x*inv_mh, h.y*inv_mh, h.z*inv_mh);

#if 0
        float mhxy2 = 1.0-hnorm.z*hnorm.z;
        float inv_mhxy = rsqrtf(mhxy2);
        float mhxy = mhxy2*inv_mhxy;
#elif 1
        float mharg = 1.0-hnorm.z*hnorm.z;
        float mhxy = sqrtf((mharg<SQRT_CUTOFF)?SQRT_CUTOFF:mharg);
        float inv_mhxy = 1.0f / mhxy;
#else
        float mhxy = sqrtf(1.0-hnorm.z*hnorm.z);
        float inv_mhxy = 1.0f / mhxy;
#endif
        float3 c = make_float3(hnorm.x*inv_mhxy, hnorm.y*inv_mhxy, hnorm.z);
        //sinphi = sinf(phi);
        //cosphi = cosf(phi);
        float cosphi, sinphi;
        sincosf(phi, &sinphi, &cosphi);
        //svec si;
        // si.x = c.x * (cosphi * sintheta * c.z + costheta * mhxy) - sintheta * sinphi * c.y;
        // si.y = c.x * sintheta * sinphi + c.y * (cosphi * c.z * sintheta + costheta * mhxy);
        // si.z = costheta * c.z - cosphi * sintheta * mhxy;
        // res[i] = si;
#if 1
        res[i] = make_float4(c.x * (cosphi * sintheta * c.z + costheta * mhxy) - sintheta * sinphi * c.y,
                             c.x * sintheta * sinphi + c.y * (cosphi * c.z * sintheta + costheta * mhxy),
                             costheta * c.z - cosphi * sintheta * mhxy,
                             inv_mhxy); // for debugging
        // h.z=5.285046 inv_mh=0.189213 hnorm.z=.999999408798

        // [info dump:245] [25,18,22]={-inf,inf,inf,1.000021} <--- inv_mhxy

#else
        //res[i] = make_float4((float)x, (float)y, (float)z, (float)i);
        //res[i] = make_float4(jxp.j00, jxp.j01, jxp.j02, 0);
        //res[i] = make_float4(jxp.j10, jxp.j11, jxp.j12, 0);
        //res[i] = make_float4(jxp.j20, jxp.j21, jxp.j22, 0);
        //res[i] = make_float4(h.x, h.y, h.z, mh);
        res[i] = make_float4(c.x, c.y, c.z, mhxy);
#endif
    }
}

//-----------------------------------------------------------------------------

static void gpu_heatbath_r1(float beta, rng_uniform &rng, Svec &s_res, Svec &s_src, Jmat &j)
{
    dim3 gridDim(s_res.m_volume/BLOCK_SIZE, 1, 1);
    dim3 blockDim(BLOCK_SIZE, 1, 1);

    // TODO: check equal cfg on s_res, s_src and J
    assert(s_src.m_replicas == 1);
    assert(s_res.m_replicas == 1);

    // bindGaugeTex(gauge, oddBit);
    // int spinor_bytes = res.length*sizeof(float);
    // cudaBindTexture(0, spinorTexSingle, spinor.spinor, spinor_bytes); 
    // int shared_bytes = blockDim.x*SHARED_FLOATS_PER_THREAD*sizeof(float);
    // dslashSS12Kernel <<<gridDim, blockDim, shared_bytes>>> ((float4 *)res.spinor, oddBit);
    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaBindTexture(0, texSpin, s_src.m_gpu_buf, s_src.m_gpu_bytes);
    cudaMemcpyToSymbol("svec_stride", &s_src.m_stride, sizeof(int));  

    //printf("beta_f=%f\n", beta);
    cudaMemcpyToSymbol("beta_f", &beta, sizeof(float));  
    
    size_t dims = s_res.m_cfg.dims;
    switch(dims) {
    case 3:
        hb3d_heatbath_sp_r1 <<<gridDim, blockDim>>> ((float4*)s_res.m_gpu_buf, rng.gpu_states(), s_res.m_volume);
        break;
    default:
        error("unsupported dim %d\n", dims);
    }
}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------

__global__ void
hb3d_local_field_sp_r1(float4* res, int size)
{
    svec* lfield_out = (svec*)res;
    int i = blockIdx.x*blockDim.x+threadIdx.x;
    int x = i/(LY*LZ);
    int y = (i-x*(LY*LZ))/LZ;
    int z = i-x*(LY*LZ)-y*LZ;
    if(i < size) {
        svec lfield = make_svec(0,0,0);
        LOCAL_FIELD(lfield,i,x,y,z);
        lfield_out[i] = lfield;        
    }
}

//-----------------------------------------------------------------------------

static void gpu_calc_local_field_r1(Svec &s_res, Svec &s_src, Jmat &j)
{
    dim3 gridDim(s_res.m_volume/BLOCK_SIZE, 1, 1);
    dim3 blockDim(BLOCK_SIZE, 1, 1);

    // TODO: check equal cfg on s_res, s_src and J
    assert(s_src.m_replicas == 1);
    assert(s_res.m_replicas == 1);

    // bindGaugeTex(gauge, oddBit);
    // int spinor_bytes = res.length*sizeof(float);
    // cudaBindTexture(0, spinorTexSingle, spinor.spinor, spinor_bytes); 
    // int shared_bytes = blockDim.x*SHARED_FLOATS_PER_THREAD*sizeof(float);
    // dslashSS12Kernel <<<gridDim, blockDim, shared_bytes>>> ((float4 *)res.spinor, oddBit);
    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaBindTexture(0, texSpin, s_src.m_gpu_buf, s_src.m_gpu_bytes);
    cudaMemcpyToSymbol("svec_stride", &s_src.m_stride, sizeof(int));  

    size_t dims = s_res.m_cfg.dims;
    switch(dims) {
    case 3:
        hb3d_local_field_sp_r1 <<<gridDim, blockDim>>> ((float4*)s_res.m_gpu_buf, s_res.m_volume);
        break;
    default:
        error("unsupported dim %d\n", dims);
    }
}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------

// lacks the -1 factor
__global__ void
hb3d_partial_energy_sp_r1(float* res, int size)
{
    int i = blockIdx.x*blockDim.x+threadIdx.x;
    int x = i/(LY*LZ);
    int y = (i-x*(LY*LZ))/LZ;
    int z = i-x*(LY*LZ)-y*LZ;
    if(i < size) {
        svec lfield = make_svec(0,0,0);
        LOCAL_FIELD_PLUS(lfield,i,x,y,z);
        svec si;
        READ_S(si, i);
        float en = SCALAR_PROD(si, lfield);
        res[i] = en;
    }
}

//-----------------------------------------------------------------------------

struct energy_buffer {
    size_t m_volume;
    // before reduction
    float *m_d_partial_en;
    size_t m_partial_en_bytes;
    // after reduction
    float *m_d_en;
    float *m_h_en;
    size_t m_en_bytes;

    void do_free()
    {
        if(m_partial_en_bytes) {
            cudaFree(m_d_partial_en);
            m_d_partial_en = NULL;
            m_partial_en_bytes = 0;
        }

        if(m_en_bytes) {
            cudaFree(m_d_en);
            m_d_en = NULL;
            free(m_h_en);
            m_h_en = NULL;
            m_en_bytes = 0;
        }

        m_volume = 0;
    }

    void do_alloc(size_t volume, size_t n_blocks)
    {
        m_volume = volume;

        m_partial_en_bytes = m_volume*sizeof(float);
        if(cudaMalloc((void**)&m_d_partial_en, m_partial_en_bytes) != cudaSuccess) {
            error("%s: error allocating GPU memory\n", __FUNCTION__);
        }
        if(cudaMemset(m_d_partial_en, 0, m_partial_en_bytes) != cudaSuccess) {
            error("%s: error setting GPU memory\n", __FUNCTION__);
        }

        m_en_bytes = n_blocks*sizeof(float);
        if(cudaMalloc((void**)&m_d_en, m_en_bytes) != cudaSuccess) {
            error("%s: error allocating GPU memory\n", __FUNCTION__);
        }
        if(cudaMemset(m_d_en, 0, m_en_bytes) != cudaSuccess) {
            error("%s: error setting GPU memory\n", __FUNCTION__);
        }

        m_h_en = (float*)malloc(m_en_bytes);
        if(m_h_en == NULL) {
            error("%s: error allocating CPU memory\n", __FUNCTION__);
        }
        memset(m_h_en, 0, m_en_bytes);        
    }

    void alloc(size_t volume, size_t n_blocks) {
        if(volume != m_volume) {
            do_free();
            if(volume != 0)
                do_alloc(volume, n_blocks);
        }
    }
};

static energy_buffer ebuf;

static float gpu_calc_energy_r1(Svec &s_src, Jmat &j)
{
    size_t volume = s_src.m_volume;
    size_t dims = s_src.m_cfg.dims;
    size_t n_blocks = volume/BLOCK_SIZE;
    dim3 gridDim(n_blocks, 1, 1);
    dim3 blockDim(BLOCK_SIZE, 1, 1);

    // TODO: check equal cfg on s_res, s_src and J
    assert(s_src.m_replicas == 1);

    //printf("%s: %d\n", __FUNCTION__, __LINE__);

    ebuf.alloc(volume, n_blocks);

    // bindGaugeTex(gauge, oddBit);
    // int spinor_bytes = res.length*sizeof(float);
    // cudaBindTexture(0, spinorTexSingle, spinor.spinor, spinor_bytes); 
    // int shared_bytes = blockDim.x*SHARED_FLOATS_PER_THREAD*sizeof(float);
    // dslashSS12Kernel <<<gridDim, blockDim, shared_bytes>>> ((float4 *)res.spinor, oddBit);
    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cuda_verify(cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int)));

    cudaBindTexture(0, texSpin, s_src.m_gpu_buf, s_src.m_gpu_bytes);
    cuda_verify(cudaMemcpyToSymbol("svec_stride", &s_src.m_stride, sizeof(int)));

    trace("j_stride=%d svec_stride=%d volume=%d\n", j_stride, svec_stride, volume);

    switch(dims) {
    case 3:
        hb3d_partial_energy_sp_r1 <<<gridDim, blockDim>>> (ebuf.m_d_partial_en, volume);
        break;
    default:
        error("gpu_calc_energy: unsupported dim %d\n", dims);
    }

#if 0
    {
        float h_partial_en[volume];
        static bool msg = true;
        if(msg) {
            warn("%s: using expensive debugging checks\n", __FUNCTION__);
            msg = false;
        }
        cuda_verify(cudaMemcpy(h_partial_en, ebuf.m_d_partial_en, volume*sizeof(float), cudaMemcpyDeviceToHost));
        s_src.gpu_to_cpu();
        for(int k=0; k<volume; ++k) {
            if(is_nan(h_partial_en[k])) {
                int i = k;
                int x = i/(LY*LZ);
                int y = (i-x*(LY*LZ))/LZ;
                int z = i-x*(LY*LZ)-y*LZ;
                vec s = s_src(x,y,z);
                printf("[%d,%d,%d] partial_en[%d]=%f s=(%f,%f,%f)\n", x, y, z, k, h_partial_en[k], s.x, s.y, s.z);
            }
        }
    }
#endif


    float en = 0.0f;

    //cudaThreadSynchronize();
    reduce_float_array(volume, blockDim.x, gridDim.x, ebuf.m_d_partial_en, ebuf.m_d_en);

    //cudaThreadSynchronize();
    cuda_verify(cudaMemcpy(ebuf.m_h_en, ebuf.m_d_en, ebuf.m_en_bytes, cudaMemcpyDeviceToHost));

    //cudaThreadSynchronize();
    for(int i=0; i<n_blocks; ++i) {
        if(is_nan(ebuf.m_h_en[i]))
           printf("block_en[%d]=%f\n", i, ebuf.m_h_en[i]);
        en += ebuf.m_h_en[i];
    }

    float avg_en = -en/volume;

    if(is_nan(avg_en))
        printf("sum_en=%f avg_en=%f\n", en, avg_en);
    return avg_en;
}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
// e_res is dummy param

static void gpu_calc_local_field(float beta, rng_uniform &rng, float *e_res, Svec &s_res, Svec &s_src, Jmat &j)
{
    assert(s_res.m_replicas == s_src.m_replicas);
    assert(s_res.m_replicas == 1);
    gpu_calc_local_field_r1(s_res, s_src, j);
}

//-----------------------------------------------------------------------------
// s_res is dummy param

static void gpu_calc_energy(float beta, rng_uniform &rng, float *e_res, Svec &s_res, Svec &s_src, Jmat &j)
{
    assert(s_src.m_replicas == 1);
    *e_res = gpu_calc_energy_r1(s_src, j);
}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
// e_res is dummy param

static void gpu_heatbath(float beta, rng_uniform &rng, float *e_res, Svec &s_res, Svec &s_src, Jmat &j)
{
    assert(s_src.m_replicas == 1);
    gpu_heatbath_r1(beta, rng, s_res, s_src, j);
}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------

__global__ void
hb3d_rng_kernel_sp(float4* res, rng_state* states, int size)
{
    int i = blockIdx.x*blockDim.x+threadIdx.x;
    int x = i/(LY*LZ);
    int y = (i-x*(LY*LZ))/LZ;
    int z = i-x*(LY*LZ)-y*LZ;

    float2 rng = rng_uniform_shm16_float2(states);

    if(i < size) {
        res[i] = make_float4(rng.x, rng.y, 0, 1);
    }
}

//-----------------------------------------------------------------------------
// e_res is dummy param

static void gpu_rng_kernel(float beta, rng_uniform &rng, float *e_res, Svec &s_res, Svec &s_src, Jmat &j)
{
    dim3 gridDim(s_res.m_volume/BLOCK_SIZE, 1, 1);
    dim3 blockDim(BLOCK_SIZE, 1, 1);

    // TODO: check equal cfg on s_res, s_src and J
    assert(s_src.m_replicas == 1);
    assert(s_res.m_replicas == 1);

    // bindGaugeTex(gauge, oddBit);
    // int spinor_bytes = res.length*sizeof(float);
    // cudaBindTexture(0, spinorTexSingle, spinor.spinor, spinor_bytes); 
    // int shared_bytes = blockDim.x*SHARED_FLOATS_PER_THREAD*sizeof(float);
    // dslashSS12Kernel <<<gridDim, blockDim, shared_bytes>>> ((float4 *)res.spinor, oddBit);
    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaBindTexture(0, texSpin, s_src.m_gpu_buf, s_src.m_gpu_bytes);
    cudaMemcpyToSymbol("svec_stride", &s_src.m_stride, sizeof(int));  

    //info("volume=%d\n", s_res.m_volume);

    assert(3 == s_res.m_cfg.dims);
    hb3d_rng_kernel_sp <<<gridDim, blockDim>>> ((float4*)s_res.m_gpu_buf, rng.gpu_states(), s_res.m_volume);
}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------

__global__ void
hb3d_void_kernel_sp(float* res, int size)
{
    int i = blockIdx.x*blockDim.x+threadIdx.x;
    int x = i/(LY*LZ);
    int y = (i-x*(LY*LZ))/LZ;
    int z = i-x*(LY*LZ)-y*LZ;
    // if(i < size) {
    //     svec lfield = make_svec(0,0,0);
    //     LOCAL_FIELD_PLUS(lfield,i,x,y,z);
    //     svec si;
    //     READ_S(si, i);
    //     float en = SCALAR_PROD(si, lfield);
    //     res[i] = en;
    // }
}

//-----------------------------------------------------------------------------
// e_res is dummy param

static void gpu_void_kernel(float beta, rng_uniform &rng, float *e_res, Svec &s_res, Svec &s_src, Jmat &j)
{
    dim3 gridDim(s_res.m_volume/BLOCK_SIZE, 1, 1);
    dim3 blockDim(BLOCK_SIZE, 1, 1);

    // TODO: check equal cfg on s_res, s_src and J
    assert(s_src.m_replicas == 1);
    assert(s_res.m_replicas == 1);

    // bindGaugeTex(gauge, oddBit);
    // int spinor_bytes = res.length*sizeof(float);
    // cudaBindTexture(0, spinorTexSingle, spinor.spinor, spinor_bytes); 
    // int shared_bytes = blockDim.x*SHARED_FLOATS_PER_THREAD*sizeof(float);
    // dslashSS12Kernel <<<gridDim, blockDim, shared_bytes>>> ((float4 *)res.spinor, oddBit);
    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaBindTexture(0, texSpin, s_src.m_gpu_buf, s_src.m_gpu_bytes);
    cudaMemcpyToSymbol("svec_stride", &s_src.m_stride, sizeof(int));  

    size_t dims = s_res.m_cfg.dims;
    switch(dims) {
    case 3:
        hb3d_void_kernel_sp <<<gridDim, blockDim>>> ((float*)s_res.m_gpu_buf, s_res.m_volume);
        break;
    default:
        error("unsupported dim %d\n", dims);
    }
}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
// e_res is dummy param

static void gpu_no_kernel(float beta, rng_uniform &rng, float *e_res, Svec &s_res, Svec &s_src, Jmat &j)
{
    dim3 gridDim(s_res.m_volume/BLOCK_SIZE, 1, 1);
    dim3 blockDim(BLOCK_SIZE, 1, 1);

    // TODO: check equal cfg on s_res, s_src and J
    assert(s_src.m_replicas == 1);
    assert(s_res.m_replicas == 1);

    // bindGaugeTex(gauge, oddBit);
    // int spinor_bytes = res.length*sizeof(float);
    // cudaBindTexture(0, spinorTexSingle, spinor.spinor, spinor_bytes); 
    // int shared_bytes = blockDim.x*SHARED_FLOATS_PER_THREAD*sizeof(float);
    // dslashSS12Kernel <<<gridDim, blockDim, shared_bytes>>> ((float4 *)res.spinor, oddBit);
    cudaBindTexture(0, jTexSingle, j.m_gpu_buf, j.m_gpu_bytes);
    cudaMemcpyToSymbol("j_stride", &j.m_stride, sizeof(int));  

    cudaBindTexture(0, texSpin, s_src.m_gpu_buf, s_src.m_gpu_bytes);
    cudaMemcpyToSymbol("svec_stride", &s_src.m_stride, sizeof(int));  

    size_t dims = s_res.m_cfg.dims;
    switch(dims) {
    case 3:
        //hb3d_void_kernel_sp <<<gridDim, blockDim>>> ((float*)s_res.m_gpu_buf, s_res.m_volume);
        break;
    default:
        error("unsupported dim %d\n", dims);
    }
}

//-----------------------------------------------------------------------------

struct kernel_def {
    const char* name;
    void (*fun)(float beta, rng_uniform &rng, float *res, Svec &s_res, Svec &s_src, Jmat &j);
};

static struct kernel_def kernels[] = {
    {"gpu_no_kernel",        gpu_no_kernel},
    {"gpu_void_kernel",      gpu_void_kernel},
    {"gpu_rng_kernel",       gpu_rng_kernel},
    {"gpu_calc_local_field", gpu_calc_local_field},
    {"gpu_calc_energy",      gpu_calc_energy},
    {"gpu_heatbath",         gpu_heatbath},
    {NULL, NULL}
};

static float overhead_r1_uspi = 0.0f;

static void test_kernel(float beta, size_t niters, Config &cfg, rng_uniform &rng, int kernel_id)
{
    cudaEvent_t startEvent, stopEvent;

    cudaEventCreate(&startEvent);
    cudaEventCreate(&stopEvent);

    size_t pad = 0;
    Svec spins(cfg, pad);
    Svec res(cfg, pad);
    Jmat j(cfg, pad);

    size_t nspins = cfg.lat[DIR_X]*cfg.lat[DIR_Y]*cfg.lat[DIR_Z];

    spins.init(1.0, 0.0, 0.0);
    j.init_ident();

    spins.cpu_to_gpu();
    j.cpu_to_gpu();

    printf("kernel: %s... ", kernels[kernel_id].name); fflush(stdout);
    // start timer
    cudaEventRecord(startEvent, 0);
    // run kernel
    float e;
    cudaError_t err = cudaGetLastError();
    if(err != cudaSuccess) {
        error("CUDA error %d(%s) before kernel %d looping\n", (int)err, cudaGetErrorString(err), (int)kernel_id);
    }

    for(int i=0; i<niters; ++i) {
        (*kernels[kernel_id].fun)(beta, rng, &e, res, spins, j);
        err = cudaGetLastError();
        if(err != cudaSuccess) {
            error("CUDA error %d(%s) in spawning of kernel %d iter=%d\n", (int)err, cudaGetErrorString(err), (int)kernel_id, i);
        }
    }
    // stop and sync timer
    cudaEventRecord(stopEvent, 0);
    cudaEventSynchronize(stopEvent);
    // calc elapsed time
    {
        float milliseconds = 0.0f;
        cudaEventElapsedTime(&milliseconds, startEvent, stopEvent);
        float uspi = milliseconds / niters * 1000.0f;
        //float nsps = milliseconds / (float)(niters * nspins) * 1000000.0f;
        float nsps =  uspi / (float)nspins * 1000.0f;
        if(kernel_id == 1)
            overhead_r1_uspi = uspi;
        if(kernel_id <= 1)
            printf("elapsed time: %fms / %fus/iter / %f ns/spin\n", milliseconds, uspi, nsps);
        else {
            float nsps_fixed = (uspi - overhead_r1_uspi) / (float)nspins * 1000.0f;
            printf("elapsed time: %fms / %f(%f)us/iter / %f(%f) ns/spin\n", 
                   milliseconds, uspi, uspi-overhead_r1_uspi, nsps, nsps_fixed);
        }
    }
    cudaEventDestroy(startEvent);
    cudaEventDestroy(stopEvent);

    res.gpu_to_cpu();
    // check results    
}

//-----------------------------------------------------------------------------

static const vec init_dir = make_vec(1.0, 0.0, 0.0);

static void check_spins(Svec &s)
{
    s.gpu_to_cpu();

    //s.dump();

    float a = s.calc_alignment(init_dir);
    if(std::abs(a-1) > 1.0e-5)
        warn("alignment=%f\n", a);

    if(!s.check_norm())
        warn("1-norm spin check failed\n");
}

static void check_rng(rng_uniform &rng, Svec &res, Svec &spins, Jmat &j)
{
    printf("# calling rng_kernel\n");
    float beta = 0;
    float energy = 0;
    gpu_rng_kernel(beta, rng, &energy, res, spins, j);
    res.gpu_to_cpu();
    res.dump();
}

static void do_basic_test(Config &cfg, rng_uniform &rng)
{
    size_t pad = 0;
    Svec spins(cfg, pad);
    Svec res(cfg, pad);
    Jmat j(cfg, pad);

    //size_t nspins = cfg.lat[DIR_X]*cfg.lat[DIR_Y]*cfg.lat[DIR_Z];

    res.init(0.0, 0.0, 0.0);
    res.cpu_to_gpu();

    printf("# init spins to (%f,%f,%f)\n", init_dir.x, init_dir.y, init_dir.z);
    spins.init(init_dir);
    spins.check_norm();
    spins.cpu_to_gpu();

    printf("# init J to ident=diagonal(1,1,1)\n");
    j.init_ident();
    //printf("init J to Gaussian\n");
    //j.init_gaussian(rng);
    j.cpu_to_gpu();

    float energy = 0.0;
    energy = gpu_calc_energy_r1(spins, j);
    printf("# initial energy=%f\n", energy);

    float temp = 1.0f;
    float beta = 1/temp;

    //check_rng(rng, res, spins, j);

    size_t nsteps = NITERS;
    size_t nprints = nsteps/10;
    printf("# executing %d HB steps at beta=%f\n", nsteps, beta);
    for(int k=0; k<nsteps; k+=2) {
        float res_en, spins_en;
        gpu_heatbath_r1(beta, rng, res, spins, j);

        //printf("# step:%d 'res'\n", k);
        //check_spins(res);
#if 0
        res_en = gpu_calc_energy_r1(res, j);
        if(is_nan(res_en)) {
            spins_en = gpu_calc_energy_r1(spins, j);
            printf("# step:%d 'spins' en=%f(0x%08x) ---> 'res' en=%f(0x%08x)\n", 
                   k, spins_en, cast_as_hex(spins_en), res_en, cast_as_hex(res_en));
            printf("# step:%d checking 'res'\n", k);
            check_spins(res);
            res.dump();
            break;
        }
#endif
        gpu_heatbath_r1(beta, rng, spins, res, j);
        //printf("# step:%d 'spins'\n", k);
        //check_spins(spins);
#if 0
        spins_en = gpu_calc_energy_r1(spins, j);
        if(is_nan(spins_en)) {
            res_en = gpu_calc_energy_r1(res, j);
            printf("# step:%d 'res' en=%f(0x%08x) ---> 'spins' en=%f(0x%08x)\n", k, res_en, *(unsigned int*)&res_en, spins_en, *(unsigned int*)&spins_en);
            printf("# step:%d checking 'spins'\n", k);
            check_spins(spins);
            spins.dump();
            break;
            // printf("checking 'spins'\n");
            // check_spins(spins);
            // printf("checking 'res'\n");
            // check_spins(res);
        }
#endif
#if 1
        if((k+2) % nprints == 0) {
            check_spins(spins);
            energy = gpu_calc_energy_r1(spins, j);
            printf("%d %f\n", k+2, energy);
            if(is_nan(energy))
                break;
        }
#endif
    }
    
    energy = gpu_calc_energy_r1(spins, j);
    printf("# final energy=%f\n", energy);
}

//-----------------------------------------------------------------------------

void benchmark_gpu_hb3d_r1()
{
    Config cfg;
    cfg.dims = D;
    cfg.lat[DIR_X] = LX;
    cfg.lat[DIR_Y] = LY;
    cfg.lat[DIR_Z] = LZ;
    cfg.cpu_precision = SinglePrecision;
    cfg.gpu_precision = SinglePrecision;

    const int niters = NITERS;

    size_t pad = 0;
    Svec spins(cfg, pad);
    Svec res(cfg, pad);
    Jmat j(cfg, pad);
    float e;
    float beta = 5.0f;

    size_t nspins = cfg.lat[DIR_X]*cfg.lat[DIR_Y]*cfg.lat[DIR_Z];
    size_t n_blocks = (nspins*spins.replicas())/BLOCK_SIZE;
    printf("# %s\n", __FUNCTION__);
    printf("# Heisenberg %dD:\n", cfg.dims);
    printf("#  lattice:          %dx%dx%d\n", cfg.lat[DIR_X], cfg.lat[DIR_Y], cfg.lat[DIR_Z]);
    printf("#  replicas:         %u\n", spins.replicas());
    printf("#  tot spins:        %d\n", nspins);
    printf("#  precision:        %s\n", "single");
    printf("#  num iterations:   %d\n", niters);
    printf("#  thr block size:   %d\n", BLOCK_SIZE);
    printf("#  num thr blocks:   %d\n", n_blocks);
    printf("#  spin vector size: %.3fKB\n", spins.m_gpu_bytes/(1000.0));
    printf("#  J matrix size:    %.3fKB\n", j.m_gpu_bytes/(1000.0));

    rng_uniform rng(BLOCK_SIZE, n_blocks);
    rng.init(0xf4543243, false);

    //do_basic_test(cfg, rng);

    int i=0;
    while(kernels[i].name) {
        test_kernel(beta, niters, cfg, rng, i);
        ++i;
    }
}

//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------

/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */
