#ifndef __CUHB_HB_H__
#define __CUHB_HB_H__

/*
  Copyright (c) 2010 Davide Rossetti (davide.rossetti@roma1.infn.it)

  This file is part of CuHB (CUDA Heisenberg) package.
  
  CuHB is free software: you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published by
  the Free Software Foundation, either version 3 of the License, or
  (at your option) any later version.

  CuHB is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  GNU General Public License for more details.
  
  You should have received a copy of the GNU General Public License
  along with CuHB.  If not, see <http://www.gnu.org/licenses/>.

*/


// cuda thread block size
//#define BLOCK_SIZE   32
#define BLOCK_SIZE   64
//#define BLOCK_SIZE   128
//#define BLOCK_SIZE   256

// SM1_3
// num regs: 16K
// n threads: 256 -> regs per thr: 64


#if 0
#define LX           128
#define LY           128
#define LZ           128
#define NITERS       1000
#elif 0
#define LX           64
#define LY           64
#define LZ           64
#define NITERS       10000
#elif 1
#define LX           32
#define LY           32
#define LZ           32
#define NITERS       10000
#elif 0
#define LX           16
#define LY           16
#define LZ           16
#define NITERS       10000
#elif 0
#define LX           8
#define LY           8
#define LZ           8
#define NITERS       100000
#elif 0
#define LX           4
#define LY           4
#define LZ           4
#define NITERS       100000
#endif

#define LZ_MSK  (LZ-1)
#define LY_MSK  ((LZ*LY-1)^LZ_MSK)
#define LX_MSK  ((LX*LY*LZ-1)^(LY_MSK|LZ_MSK))

#define D            3

#define DIR_X        0
#define DIR_Y        1
#define DIR_Z        2

// #define REP_BITS     1
// #define REPS         (1<<REP_BITS)
// #define REP_MSK      (REPS-1)

//#define BLOCK_SIZE   4
//#define NUM_BLOCKS   8
//#define L            (NUM_BLOCKS*BLOCK_SIZE)
//#define L3           (L*L*L)
//#define SUB_LAT_SIZE (BLOCK_SIZE*BLOCK_SIZE*BLOCK_SIZE)
// #define LOCAL_IDX(TX,TY,TZ) (TZ+TY*BLOCK_SIZE+TX*BLOCK_SIZE*BLOCK_SIZE)
// #define BLOCK_IDX(BX,BY,BZ) (SUB_LAT_SIZE*(bz+by*NUM_BLOCKS+bx*NUM_BLOCKS*NUM_BLOCKS))
// #define GLOBAL_IDX(TX,TY,TZ,BX,BY,BZ) ( LOCAL_IDX(TX,TY,TZ)+BLOCK_IDX(BX,BY,BZ))
// #define IDX()               (LOCAL_IDX(tx,ty,tz) + BLOCK_IDX(bx,by,bz))

// complete form
//#define THR_MOD(I, BS) (((i+(BS))%(BS)+(BS))%(BS))
// simplified form
//#define MOD(I, L) ((i+(L))%(L))
// only for L == power of 2
#define MOD(I, L) ((i+(L))&(L-1))

#define XYZ2IDX(X,Y,Z) ((Z) + (Y)*LZ + (X)*(LZ*LY))

#define glb_thr_id    (threadIdx.x + (threadIdx.y * blockDim.x) + (threadIdx.z * blockDim.x * blockDim.y))
#define glb_block_id  (blockIdx.x + (blockIdx.y * gridDim.x) + (blockIdx.y * gridDim.x * gridDim.y))

#include "hb_utils.h"

enum Precision {
    DoublePrecision,
    SinglePrecision,
    HalfPrecision,
};

struct Config {
    int dims; // up to 4 dims
    int lat[4]; // the lattice size
    Precision cpu_precision;
    Precision gpu_precision;
};

struct rng_uniform;

#include "jmat.hpp"
#include "svec.hpp"

void benchmark_gpu_rng();
void benchmark_cpu_hb3d_r1();
void benchmark_gpu_hb3d_r1();
void benchmark_gpu_hb3d_r4x2();
void benchmark_gpu_hb3d_r1_sublat();

//struct rng_uniform;
// fake kernel, no GPU kernel invocation
//void gpu_no_kernel(rng_uniform &rng, float *e_res, Svec &s_res, Svec &s_src, Jmat &j);
// fake kernel, just to gauge the overhead
//void gpu_void_kernel(rng_uniform &rng, float *e_res, Svec &s_res, Svec &s_src, Jmat &j);

//void gpu_calc_local_field(rng_uniform &rng, float *e_res, Svec &s_res, Svec &s_src, Jmat &j);
//void gpu_calc_energy(rng_uniform &rng, float *res, Svec &s_res, Svec &s_src, Jmat &j);
//void gpu_heatbath(rng_uniform &rng, float *res, Svec &s_res, Svec &s_src, Jmat &j);
//void gpu_heatbath_r2_simple(rng_uniform &rng, float *e_res, Svec &s_res, Svec &s_src, Jmat &j);

//void gpu_heatbath_r1_sublat(rng_uniform &rng, float *res, Svec &s_res, Svec &s_src, Jmat &j);

//void gpu_heatbath_r4x2(rng_uniform &rng, float *e_res, Svec &s0, Svec &s1, Jmat &j);

// support functions
//float gpu_calc_energy_r1(Svec &s_src, Jmat &j);
//float4 gpu_calc_energy_r4(Svec &s_src, Jmat &j);
//void gpu_calc_local_field_r1(Svec &s_res, Svec &s_src, Jmat &j);
//void gpu_calc_local_field_r4(Svec &s_res, Svec &s_src, Jmat &j);

void reduce_float_array(int size, int threads, int blocks, float *d_idata, float *d_odata);
void reduce_float4_array(int size, int threads, int blocks, float4 *d_idata, float4 *d_odata);
  
/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */

#endif // __CUHB_HB_H__

