/*
 * part_2D.cu
 */

/* TODO explain the method

 * Copyright (C) 2012 Atala Jorge Eloy Alan
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */


/*
 * Order of colors:
 * ODD Slice:         downRed,   downBlue,   downRed,   downBlue, .....
 *                    downGreen, downYellow, downGreen, downYellow, .....
 *                    downRed,   downBlue,   downRed,   downBlue, .....
 *                    downGreen, downYellow, downGreen, downYellow, .....
 *                       .     ,     .     ,    .     ,     .     , ....    
 *                       .     ,     .     ,    .     ,     .     , ....    
 *                       .     ,     .     ,    .     ,     .     , ....    
 *
 *
 * EVEN Slice:        upRed,   upBlue,   upRed,   upBlue, .....
 *                    upGreen, upYellow, upGreen, upYellow, .....
 *                    upRed,   upBlue,   upRed,   upBlue, .....
 *                    upGreen, upYellow, upGreen, upYellow, .....
 *                       .   ,     .   ,    .   ,     .   , ....    
 *                       .   ,     .   ,    .   ,     .   , ....    
 *                       .   ,     .   ,    .   ,     .   , ....    
 */

#include <iostream>
#include <fstream>
#include <sstream>
#include <cstdlib>
#include <vector>
#include <pthread.h>
#include <stddef.h>  // NULL, size_t
#include <math.h> // expf
#include <stdio.h> // printf
#include <time.h> // time
#include <sys/time.h> // gettimeofday
#include <assert.h>
#include "cutil.h" // CUDA_SAFE_CALL, CUT_CHECK_ERROR


// Default parameters

#ifndef L
#define L 1024 // linear system size
#endif

#ifndef DEPTH
#define DEPTH 8 // linear system size volume
#endif

#ifndef SAMPLES
#define SAMPLES 1 // number of samples
#endif

#ifndef TEMP
#define TEMP 0.70f // minimum temperature
#endif

#ifndef TRAN
#define TRAN 20000 // equilibration time
#endif

#ifndef TMAX
#define TMAX 10000 // measurement time
#endif

#ifndef DELTA_T
#define DELTA_T 500 // sampling period for energy and magnetization
#endif

// Functions

// maximum
#define MAX(a,b) (((a)<(b))?(b):(a))
// minimum
#define MIN(a,b) (((a)<(b))?(a):(b))
// integer ceiling division
#define DIV_CEIL(a,b) (((a)+(b)-1)/(b))
// highest power of two less than x
// Thanks to Pablo Dal Lago for pointing this out, a minor variation of
// http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
#define ROUND_DOWN_POWER_OF_2(x) (( ( (x)>>0 | (x)>>1 | (x)>>2 | (x)>>3 | (x)>>4 | (x)>>5 | (x)>>6 | (x)>>7 | (x)>>8 | (x)>>9 | (x)>>10 | (x)>>11 | (x)>>12 | (x)>>13 | (x)>>14 | (x)>>15 | (x)>>16 | (x)>>17 | (x)>>18 | (x)>>19 | (x)>>20 ) +1 ) >> 1)

// here we cannot use __CUDA_ARCH__, since it is only available on kernels,
// simply uncomment the desired option

// Hardware parameters for GTX 280 (GT200)
//#define SHARED_PER_BLOCK 16384
//#define WARP_SIZE 32
//#define THREADS_PER_BLOCK 512
//#define BLOCKS_PER_GRID 65535
// Hardware parameters for GTX 470/480 (GF100)
//#define SHARED_PER_BLOCK 49152
//#define WARP_SIZE 32
//#define THREADS_PER_BLOCK 1024
//#define BLOCKS_PER_GRID 65535

// Hardware parameters for GTX 660 (GK104 Kepler)
#define SHARED_PER_BLOCK 49152
#define WARP_SIZE 32
#define THREADS_PER_BLOCK 1024
#define BLOCKS_PER_GRID 65535


// Auto adjusting parameters
// Block size for sumupECUDA, autoadjust to fill up max allowable threads per block
// Block size should be less than THREAD_PER_BLOCK and
// less than the SHARED_PER_BLOCK bytes of shared per block
//#define BLOCK_E_TMP (MIN(THREADS_PER_BLOCK, SHARED_PER_BLOCK/sizeof(unsigned int)))
//#define BLOCK_E (ROUND_DOWN_POWER_OF_2(BLOCK_E_TMP))
// BLOCKS_PER_GRID limits the amount of linear blocks, we divide in GRID_E
//#define GRID_E 64
// block size for sumupMCUDA, autoadjust to fill up shared memory per block
// 16 is a little slack because shared is not purely for __shared__
//#define BLOCK_M_TMP (MIN(THREADS_PER_BLOCK, (SHARED_PER_BLOCK-16)/(Q*sizeof(unsigned int))))
//#define BLOCK_M (ROUND_DOWN_POWER_OF_2(BLOCK_M_TMP))

#define BLOCK_GRAINS (L)/2

// Tweakeable parameters
#define CUDA_DEVICE 0	// card number
#define FRAME 64 //128 //256 	// the whole thing is framed for the RNG
#define TILE 16		// each block of threads is a tile
#undef DETERMINISTIC_WRITE // spin write is deterministic or probabilistic

// Internal definitions and functions
// out vector size, it is +1 since we reach TEMP_MAX
#define NPOINTS (TMAX/DELTA_T) //+ 1
#define N (L*L)	// system size
#define SAFE_PRIMES_FILENAME "safeprimes_base32.txt"
#define SEED (time(NULL)) // random seed
#define MICROSEC (1E-6)
#define ODD 0
#define EVEN 1


// cells are bytes
typedef unsigned int pixel;
typedef char byte;

struct Statpoint {
	unsigned int step;
	double volume; 
	unsigned int m; 
};


struct MarkerThread
{
    const pixel* slice;
    std::vector<bool> *sliceMask;
};

struct GlobalMarkerThread
{
    pixel* sliceDownRed;
    pixel* sliceDownGreen;
    pixel* sliceDownBlue;
    pixel* sliceDownYellow;
    pixel* sliceUpRed;
    pixel* sliceUpGreen;
    pixel* sliceUpBlue;
    pixel* sliceUpYellow;
    unsigned int *grains;
    unsigned int *fileIndex;
};



static int timeval_subtract (struct timeval *result, struct timeval *x, struct timeval *y);

// we frame the grid in FRAME*FRAME
#define NUM_THREADS (FRAME*FRAME)
// state of the random number generator, last number (x_n), last carry (c_n) packed in 64 bits
__device__ static unsigned long long d_x[NUM_THREADS];
// multipliers (constants)
__device__ static unsigned int d_a[NUM_THREADS];


/***
 * Device functions
 ***/

// RNG: multiply with carry
#include "CUDAMCMLrng.cu"

#define SLICE_SIZE ((L/2) * (L/2))
#define MATRIX_SIZE ((SLICE_SIZE) * (DEPTH/2))



__global__ void updateCUDA_27points(const int slicity,
                                    const int columnParity,
                                    const int rowParity,
                                    pixel* __restrict__ const write,
                                    const pixel* __restrict__ const sameSliceRead2,//same columnParity that write
                                    const pixel* __restrict__ const sameSliceRead3,
                                    const pixel* __restrict__ const sameSliceRead4,
                                    const pixel* __restrict__ const otherSliceRead1,
                                    const pixel* __restrict__ const otherSliceRead2,
                                    const pixel* __restrict__ const otherSliceRead3,
                                    const pixel* __restrict__ const otherSliceRead4)
{
	const unsigned int jOriginal = blockIdx.x*TILE + threadIdx.x;
    const unsigned int iOriginal = blockIdx.y*TILE + threadIdx.y;
	const unsigned int tid = iOriginal * FRAME + jOriginal;
	byte h_before;
	byte h_after;
	byte delta_E;
	pixel spin_neigh[26];
	pixel spin_old;
	pixel spin_new;

	// move thread RNG state to registers. Thanks to Carlos Bederián for pointing this out.
	unsigned long long rng_state = d_x[tid];
	const unsigned int rng_const = d_a[tid];

	for (unsigned short z = 0; z<(DEPTH/2);++z)
	{
    	for (unsigned short i = iOriginal; i<(L/2); i += FRAME)
    	{
	    	for (unsigned short j = jOriginal; j<(L/2); j += FRAME) 
	    	{
	    		spin_old = write[z * SLICE_SIZE + i*(L/2) + j];

	    		spin_neigh[0]  = sameSliceRead2[z * SLICE_SIZE + ((i + rowParity * (-1)) * (L/2) + SLICE_SIZE)%SLICE_SIZE + j];
	    		spin_neigh[1]  = sameSliceRead2[z * SLICE_SIZE + ((i + (0^rowParity)) * (L/2))%SLICE_SIZE + j];
    
  	    		spin_neigh[2]  = sameSliceRead3[z * SLICE_SIZE + i*(L/2) + ((j + columnParity * (-1)) + L/2)%(L/2)];
	    		spin_neigh[3]  = sameSliceRead3[z * SLICE_SIZE + i*(L/2) + (j + (0^columnParity) + L/2)%(L/2)];
	    		
	    		spin_neigh[4]  = sameSliceRead4[z * SLICE_SIZE + 
	    		                 ((i+rowParity*(-1))*(L/2)+SLICE_SIZE)%SLICE_SIZE +
	    		                 ((j+columnParity*(-1)+L/2)%(L/2))];
	    		spin_neigh[5]  = sameSliceRead4[z * SLICE_SIZE +
	    		                 ((i+rowParity*(-1))*(L/2)+SLICE_SIZE)%SLICE_SIZE +
	    		                 (j+(0^columnParity))%(L/2)];
	    		spin_neigh[6]  = sameSliceRead4[z * SLICE_SIZE + 
	    		                 ((i+(0^rowParity))*(L/2))%SLICE_SIZE + 
	    		                 ((j + columnParity * (-1) + L/2)%(L/2))];
	    		spin_neigh[7]  = sameSliceRead4[z * SLICE_SIZE + 
	    		                 ((i+(0^rowParity))*(L/2))%SLICE_SIZE + 
	    		                 (j + (0^columnParity))%(L/2)];
	    					
	    		//Slice down and up
	    		
	    		//down
	    		spin_neigh[8]  = otherSliceRead1[(((z + slicity * (-1)) * SLICE_SIZE + MATRIX_SIZE)%MATRIX_SIZE) + i*(L/2) + j];
	    		//up
	    		spin_neigh[9]  = otherSliceRead1[(((z +  (0^slicity))   * SLICE_SIZE + MATRIX_SIZE)%MATRIX_SIZE) + i*(L/2) + j];
	    		
	    		//down
	    		spin_neigh[10] = otherSliceRead2[(((z+slicity * (-1))*SLICE_SIZE + MATRIX_SIZE)%MATRIX_SIZE)+ 
	    		                 ((i+rowParity*(-1))*(L/2)+SLICE_SIZE)%SLICE_SIZE + 
	    		                 j];
	    		spin_neigh[11] = otherSliceRead2[(((z+slicity * (-1))*SLICE_SIZE + MATRIX_SIZE)%MATRIX_SIZE) + 
	    		                 ((i+(0^rowParity))*(L/2))%SLICE_SIZE + 
	    		                 j];
	    		//up
	    		spin_neigh[12] = otherSliceRead2[(((z+(0^slicity)) * SLICE_SIZE + MATRIX_SIZE)%MATRIX_SIZE) + 
	    		                 ((i+rowParity*(-1))*(L/2)+SLICE_SIZE)%SLICE_SIZE + 
	    		                 j];
	    		spin_neigh[13] = otherSliceRead2[(((z+(0^slicity)) * SLICE_SIZE + MATRIX_SIZE)%MATRIX_SIZE) + 
	    		                 ((i+(0^rowParity))*(L/2))%SLICE_SIZE + 
	    		                 j];
    
                //down
  	    		spin_neigh[14] = otherSliceRead3[(((z+slicity*(-1))*SLICE_SIZE+MATRIX_SIZE)%MATRIX_SIZE)+
  	    		                 i*(L/2) +
  	    		                 (j+columnParity*(-1)+L/2)%(L/2)];
	    		spin_neigh[15] = otherSliceRead3[(((z+slicity*(-1))*SLICE_SIZE+MATRIX_SIZE)%MATRIX_SIZE) +
	    		                 i*(L/2) +
	    		                 (j+(0^columnParity) + L/2)%(L/2)];
                //up
  	    		spin_neigh[16] = otherSliceRead3[(((z+(0^slicity))*SLICE_SIZE+MATRIX_SIZE)%MATRIX_SIZE) +
  	    		                 i*(L/2) +
  	    		                 (j+columnParity*(-1)+L/2)%(L/2)];
	    		spin_neigh[17] = otherSliceRead3[(((z+(0^slicity))*SLICE_SIZE+MATRIX_SIZE)%MATRIX_SIZE) +
	    		                 i*(L/2) +
	    		                 (j+(0^columnParity) + L/2)%(L/2)];
    
	    		//down
	    		spin_neigh[18] = otherSliceRead4[(((z+slicity*(-1))*SLICE_SIZE+MATRIX_SIZE)%MATRIX_SIZE) +
	    		                 ((i+rowParity*(-1))*(L/2)+SLICE_SIZE)%SLICE_SIZE +
	    		                 (j+columnParity*(-1)+L/2)%(L/2)];
	    		spin_neigh[19] = otherSliceRead4[(((z+slicity*(-1))*SLICE_SIZE+MATRIX_SIZE)%MATRIX_SIZE) +
	    		                 ((i+rowParity*(-1))*(L/2)+SLICE_SIZE)%SLICE_SIZE +
	    		                 (j+(0^columnParity)+L/2)%(L/2)];
	    		spin_neigh[20] = otherSliceRead4[(((z+slicity*(-1))*SLICE_SIZE+MATRIX_SIZE)%MATRIX_SIZE) +
	    		                 ((i+(0^rowParity))*(L/2))%SLICE_SIZE +
	    		                 (j+ columnParity*(-1)+L/2)%(L/2)];
	    		spin_neigh[21] = otherSliceRead4[(((z+slicity*(-1))*SLICE_SIZE+MATRIX_SIZE)%MATRIX_SIZE) +
	    		                 ((i+(0^rowParity))*(L/2))%SLICE_SIZE +
	    		                 (j+ (0^columnParity)+L/2)%(L/2)];
                //up
	    		spin_neigh[22] = otherSliceRead4[(((z+(0^slicity))*SLICE_SIZE+MATRIX_SIZE)%MATRIX_SIZE)+
	    		                 ((i+rowParity*(-1))*(L/2)+SLICE_SIZE)%SLICE_SIZE +
	    		                 (j+columnParity*(-1)+L/2)%(L/2)];
	    		spin_neigh[23] = otherSliceRead4[(((z+(0^slicity))*SLICE_SIZE+MATRIX_SIZE)%MATRIX_SIZE)+
	    		                 ((i+rowParity*(-1))*(L/2)+SLICE_SIZE)%SLICE_SIZE +
	    		                 (j+(0^columnParity)+L/2)%(L/2)];
	    		spin_neigh[24] = otherSliceRead4[(((z+(0^slicity))*SLICE_SIZE+MATRIX_SIZE)%MATRIX_SIZE)+
	    		                 ((i+(0^rowParity))*(L/2))%SLICE_SIZE +
	    		                 (j+ columnParity*(-1)+L/2)%(L/2)];
	    		spin_neigh[25] = otherSliceRead4[(((z+(0^slicity))*SLICE_SIZE+MATRIX_SIZE)%MATRIX_SIZE)+
	    		                 ((i+(0^rowParity))*(L/2))%SLICE_SIZE +
	    		                 (j+ (0^columnParity)+L/2)%(L/2)];
	    		

	    		// using !(spin_old^mag_neigh.x) + ... is slightly slower
	    		h_before = -(spin_old==spin_neigh[0 ]) - (spin_old==spin_neigh[1 ]) - (spin_old==spin_neigh[2 ]) -
	    		            (spin_old==spin_neigh[3 ]) - (spin_old==spin_neigh[4 ]) - (spin_old==spin_neigh[5 ]) -
	    		            (spin_old==spin_neigh[6 ]) - (spin_old==spin_neigh[7 ]) - (spin_old==spin_neigh[8 ]) -
	    		            (spin_old==spin_neigh[9 ]) - (spin_old==spin_neigh[10]) - (spin_old==spin_neigh[11]) -
	    		            (spin_old==spin_neigh[12]) - (spin_old==spin_neigh[13]) - (spin_old==spin_neigh[14]) -
	    		            (spin_old==spin_neigh[15]) - (spin_old==spin_neigh[16]) - (spin_old==spin_neigh[17]) -
	    		            (spin_old==spin_neigh[18]) - (spin_old==spin_neigh[19]) - (spin_old==spin_neigh[20]) -
	    		            (spin_old==spin_neigh[21]) - (spin_old==spin_neigh[22]) - (spin_old==spin_neigh[23]) -
	    		            (spin_old==spin_neigh[24]) - (spin_old==spin_neigh[25]);
    
	    		// new spin
	    		
	    		spin_new = spin_neigh[(pixel)(/*1 + */rand_MWC_co(&rng_state, &rng_const)*26)];
	    		
                // h after taking new spin
	    		h_after  = -(spin_new==spin_neigh[0 ]) - (spin_new==spin_neigh[1 ]) - (spin_new==spin_neigh[2 ]) -
	    		            (spin_new==spin_neigh[3 ]) - (spin_new==spin_neigh[4 ]) - (spin_new==spin_neigh[5 ]) -
	    		            (spin_new==spin_neigh[6 ]) - (spin_new==spin_neigh[7 ]) - (spin_new==spin_neigh[8 ]) -
	    		            (spin_new==spin_neigh[9 ]) - (spin_new==spin_neigh[10]) - (spin_new==spin_neigh[11]) -
	    		            (spin_new==spin_neigh[12]) - (spin_new==spin_neigh[13]) - (spin_new==spin_neigh[14]) -
	    		            (spin_new==spin_neigh[15]) - (spin_new==spin_neigh[16]) - (spin_new==spin_neigh[17]) -
	    		            (spin_new==spin_neigh[18]) - (spin_new==spin_neigh[19]) - (spin_new==spin_neigh[20]) -
	    		            (spin_new==spin_neigh[21]) - (spin_new==spin_neigh[22]) - (spin_new==spin_neigh[23]) -
	    		            (spin_new==spin_neigh[24]) - (spin_new==spin_neigh[25]);

	    		//calcular j para multiplicar en el delta_E a partir del angulo del spin
	    		delta_E = h_after - h_before;
#ifdef DETERMINISTIC_WRITE
	    		byte change = delta_E<=0 || (rand_MWC_co(&rng_state, &rng_const))<=expf(-delta_E/TEMP);
	    		write[z * SLICE_SIZE + i*(L/2) + j] = (change)*spin_new + (1-change)*spin_old;
#else
	    	        if (delta_E<=0 || (rand_MWC_co(&rng_state, &rng_const))<=expf(-delta_E/TEMP)) 
	    			write[z * SLICE_SIZE + i*(L/2) + j] = spin_new;
#endif
	    	}
	    }
	}    
	d_x[tid] = rng_state; // store RNG state into global again
}

//Fill the matrix with random spins
__global__ void fillMatrix(pixel* __restrict__ const matrix,
                           const unsigned int slice)
{
	const unsigned int jOriginal = blockIdx.x*TILE + threadIdx.x;
	const unsigned int iOriginal = blockIdx.y*TILE + threadIdx.y;

	for (unsigned int z = 0; z<(DEPTH/2);++z)
 	    for (unsigned int i = iOriginal; i<(L/2); i+=FRAME) 
            for (unsigned int j = jOriginal; j<(L/2); j+=FRAME) 
                matrix[z * SLICE_SIZE + i*(L/2) + j] = slice * MATRIX_SIZE + z * SLICE_SIZE + i*(L/2) + j;
}

/***
 * Host functions
 ***/

static void update(const float temp, 
                   pixel* const sliceDownRed,
			       pixel* const sliceDownGreen,
			       pixel* const sliceDownBlue,
			       pixel* const sliceDownYellow,
			       pixel* const sliceUpRed,
			       pixel* const sliceUpGreen,
			       pixel* const sliceUpBlue,
			       pixel* const sliceUpYellow)
{
	dim3 dimBlock(TILE, TILE);
	dim3 dimGrid(FRAME/TILE, FRAME/TILE);
	assert(0.0f<=temp);
	assert(sliceDownRed!=NULL && sliceUpRed!=NULL &&
	       sliceDownGreen!=NULL && sliceUpGreen!=NULL &&
	       sliceDownBlue!=NULL && sliceUpBlue!=NULL &&
	       sliceDownYellow!=NULL && sliceUpYellow!=NULL);
	assert(dimBlock.x*dimBlock.y<=THREADS_PER_BLOCK);
	assert(dimGrid.x<=BLOCKS_PER_GRID && dimGrid.y<=BLOCKS_PER_GRID);

    //Slice ODD
    // rowParity ODD
    // columnParity ODD
   	updateCUDA_27points<<<dimGrid, dimBlock>>>(ODD, ODD, ODD,
                                      sliceDownRed, 
                                      sliceDownGreen,
	                                  sliceDownBlue,
                                      sliceDownYellow,
                                      sliceUpRed,
		                              sliceUpGreen,
		                              sliceUpBlue,
		                              sliceUpYellow);
   	CUT_CHECK_ERROR("Kernel updateCUDA(sliceDownRed) execution failed");

    //Slice ODD
    // rowParity ODD
    // columnParity EVEN
   	updateCUDA_27points<<<dimGrid, dimBlock>>>(ODD, ODD, EVEN,
                                      sliceDownBlue,
                                      sliceDownYellow,
                                      sliceDownRed, 
	                                  sliceDownGreen,
		                              sliceUpBlue,
                                      sliceUpYellow,
		                              sliceUpRed,
		                              sliceUpGreen);
   	CUT_CHECK_ERROR("Kernel updateCUDA(sliceDownBlue) execution failed");

    //Slice ODD
    // rowParity EVEN
    // columnParity ODD
    updateCUDA_27points<<<dimGrid, dimBlock>>>(ODD, EVEN, ODD,
                                      sliceDownGreen, 
                                      sliceDownRed,
	                                  sliceDownYellow,
                                      sliceDownBlue,
                                      sliceUpGreen,
		                              sliceUpRed,
		                              sliceUpYellow,
		                              sliceUpBlue);
   	CUT_CHECK_ERROR("Kernel updateCUDA(sliceDownGreen) execution failed");

    //Slice ODD
    // rowParity EVEN
    // columnParity EVEN
   	updateCUDA_27points<<<dimGrid, dimBlock>>>(ODD, EVEN, EVEN,
                                      sliceDownYellow, 
                                      sliceDownBlue,
	                                  sliceDownGreen,
                                      sliceDownRed,
                                      sliceUpYellow,
		                              sliceUpBlue,
		                              sliceUpGreen,
		                              sliceUpRed);
   	CUT_CHECK_ERROR("Kernel updateCUDA(sliceDownYellow) execution failed");

    //Slice EVEN
    // rowParity ODD
    // columnParity ODD    
   	updateCUDA_27points<<<dimGrid, dimBlock>>>(EVEN, ODD, ODD,
                                      sliceUpRed, 
                                      sliceUpGreen,
	                                  sliceUpBlue,
                                      sliceUpYellow,
                                      sliceDownRed,
		                              sliceDownGreen,
		                              sliceDownBlue,
		                              sliceDownYellow);
   	CUT_CHECK_ERROR("Kernel updateCUDA(sliceUpRed) execution failed");

    //Slice EVEN
    // rowParity ODD
    // columnParity EVEN
   	updateCUDA_27points<<<dimGrid, dimBlock>>>(EVEN, ODD, EVEN,
                                      sliceUpBlue,
                                      sliceUpYellow,
                                      sliceUpRed, 
	                                  sliceUpGreen,
		                              sliceDownBlue,
                                      sliceDownYellow,
		                              sliceDownRed,
		                              sliceDownGreen);
   	CUT_CHECK_ERROR("Kernel updateCUDA(sliceUpBlue) execution failed");

    //Slice EVEN
    // rowParity EVEN
    // columnParity ODD
   	updateCUDA_27points<<<dimGrid, dimBlock>>>(EVEN, EVEN, ODD,
                                      sliceUpGreen, 
                                      sliceUpRed,
	                                  sliceUpYellow,
                                      sliceUpBlue,
                                      sliceDownGreen,
		                              sliceDownRed,
		                              sliceDownYellow,
		                              sliceDownBlue);
   	CUT_CHECK_ERROR("Kernel updateCUDA(sliceUpGreen) execution failed");

    //Slice EVEN
    // rowParity EVEN
    // columnParity EVEN
   	updateCUDA_27points<<<dimGrid, dimBlock>>>(EVEN, EVEN, EVEN,
                                      sliceUpYellow, 
                                      sliceUpBlue,
	                                  sliceUpGreen,
                                      sliceUpRed,
                                      sliceDownYellow,
		                              sliceDownBlue,
		                              sliceDownGreen,
		                              sliceDownRed);
   	CUT_CHECK_ERROR("Kernel updateCUDA(sliceUpYellow) execution failed");

}

void *markGrains(void* data)
{
    struct MarkerThread *myData = (struct MarkerThread*) data; 
    const pixel* const slice = myData->slice;

    for(unsigned int i = 0;i<MATRIX_SIZE;++i)
        (*(myData->sliceMask))[slice[i]] = true;

    return NULL;
}            

void *countGrains(void * data)
{
    struct GlobalMarkerThread *myData = (struct GlobalMarkerThread*) data; 
    std::vector<bool> sliceMask(L * L * DEPTH,false);

    pthread_t t[8];
    struct MarkerThread slices[8];
    //slices = (MarkerThread*) malloc(8 * sizeof(MarkerThread));

    slices[0].slice = myData->sliceDownRed;
    slices[1].slice = myData->sliceDownGreen;
    slices[2].slice = myData->sliceDownBlue;
    slices[3].slice = myData->sliceDownYellow;
    slices[4].slice = myData->sliceUpRed;
    slices[5].slice = myData->sliceUpGreen;
    slices[6].slice = myData->sliceUpBlue;
    slices[7].slice = myData->sliceUpYellow;

     
    for(unsigned int i = 0; i<8; ++i)
    {
        slices[i].sliceMask = &sliceMask;
        pthread_create(&t[i], NULL, markGrains,(void*) &slices[i]);
    }

    //While grains count is calculated, save the matrix in a file.
    //std::stringstream fileName;
    //fileName << *(myData->fileIndex);
    //(char*)(&(fileName.str()))

    if((*(myData->fileIndex))== (NPOINTS - 1)){
    std::ofstream outdata; // outdata is like cin
    outdata.open("matrix.ppm");//  + (char*)(&(fileName.str())) + ".ppm"); // opens the file
    if( !outdata ) 
    { // file couldn't be opened
        std::cerr << "Error: file could not be opened" << std::endl;
        exit(1);
    }
    
    outdata << "P3" << std::endl;
    outdata << L << " " << L << std::endl;
    outdata << "255" << std::endl;
    unsigned int factor1 = 13;
    unsigned int factor2 = 17;
    unsigned int factor3 = 11;
        
    for (unsigned int i=0; i<(L/2); ++i)
    {
        for (unsigned int j=0; j<(L/2); ++j)
        {    
            outdata << (myData->sliceDownRed[i*(L/2)+j] * factor1)%255
            << " "  << (myData->sliceDownRed[i*(L/2)+j] * factor2)%255
            << " "  << (myData->sliceDownRed[i*(L/2)+j] * factor3)%255  << std::endl;
            outdata << (myData->sliceDownBlue[i*(L/2)+j] * factor1)%255
            << " "  << (myData->sliceDownBlue[i*(L/2)+j] * factor2)%255
            << " "  << (myData->sliceDownBlue[i*(L/2)+j] * factor3)%255<< std::endl;
        }
        for (unsigned int j=0; j<(L/2); ++j)
        {    
            outdata << (myData->sliceDownGreen[i*(L/2)+j] * factor1)%255
            << " "  << (myData->sliceDownGreen[i*(L/2)+j] * factor2)%255
            << " "  << (myData->sliceDownGreen[i*(L/2)+j] * factor3)%255<< std::endl;
            outdata << (myData->sliceDownYellow[i*(L/2)+j] * factor1)%255
            << " "  << (myData->sliceDownYellow[i*(L/2)+j] * factor2)%255
            << " "  << (myData->sliceDownYellow[i*(L/2)+j] * factor3)%255<< std::endl;
        }
    }

/*    for (unsigned int i=0; i<(L/2); ++i)
    {
        for (unsigned int j=0; j<(L/2); ++j)
        {    
            outdata << (myData->sliceUpRed[i*(L/2)+j] * factor1)%255
            << " "  << (myData->sliceUpRed[i*(L/2)+j] * factor2)%255
            << " "  << (myData->sliceUpRed[i*(L/2)+j] * factor3)%255<< std::endl;
            outdata << (myData->sliceUpBlue[i*(L/2)+j] * factor1)%255
            << " "  << (myData->sliceUpBlue[i*(L/2)+j] * factor2)%255
            << " "  << (myData->sliceUpBlue[i*(L/2)+j] * factor3)%255<< std::endl;
        }
        for (unsigned int j=0; j<(L/2); ++j)
        {    
            outdata << (myData->sliceUpGreen[i*(L/2)+j] * factor1)%255
            << " "  << (myData->sliceUpGreen[i*(L/2)+j] * factor2)%255
            << " "  << (myData->sliceUpGreen[i*(L/2)+j] * factor3)%255<< std::endl;
            outdata << (myData->sliceUpYellow[i*(L/2)+j] * factor1)%255
            << " "  << (myData->sliceUpYellow[i*(L/2)+j] * factor2)%255
            << " "  << (myData->sliceUpYellow[i*(L/2)+j] * factor3)%255 << std::endl;
        }
    }
*/
	outdata.close();    
    }
    //Merge the counts of the 8 threads
    for(unsigned int i = 0; i<8; ++i)
        pthread_join(t[i], NULL);

    
    unsigned int tempCounter = 0;

    for(unsigned int i = 0;i<(L * L * DEPTH);++i)
        tempCounter+= sliceMask[i];

    *(myData->grains) = tempCounter;    
    
    return NULL;
}

static void cycle(pixel* const sliceDownRed,
			      pixel* const sliceDownGreen,
			      pixel* const sliceDownBlue,
			      pixel* const sliceDownYellow,
			      pixel* const sliceUpRed,
			      pixel* const sliceUpGreen,
			      pixel* const sliceUpBlue,
			      pixel* const sliceUpYellow,
		          const double temp, const unsigned int calc_step,
		          struct Statpoint stats[]) 
{
	unsigned int index = 0;
	struct GlobalMarkerThread slices;
    pthread_t t;
    unsigned int matrixGrains = 0;
    size_t size = MATRIX_SIZE * sizeof(pixel);

    slices.grains = &matrixGrains;
    slices.fileIndex = &index;
    
    slices.sliceDownRed = (pixel *) malloc(size);
    slices.sliceDownGreen = (pixel *) malloc(size);
    slices.sliceDownBlue = (pixel *) malloc(size);
    slices.sliceDownYellow = (pixel *) malloc(size);
    slices.sliceUpRed = (pixel *) malloc(size);
    slices.sliceUpGreen = (pixel *) malloc(size);
    slices.sliceUpBlue = (pixel *) malloc(size);
    slices.sliceUpYellow = (pixel *) malloc(size);

	assert(sliceDownRed!=NULL && sliceUpRed!=NULL &&
	       sliceDownGreen!=NULL && sliceUpGreen!=NULL &&
	       sliceDownBlue!=NULL && sliceUpBlue!=NULL &&
	       sliceDownYellow!=NULL && sliceUpYellow!=NULL);


    // equilibrium phase
	for (unsigned int step=0; step<TRAN; ++step) 
	{
		update(temp,
               sliceDownRed, 
               sliceDownGreen,
	           sliceDownBlue,
               sliceDownYellow,
               sliceUpRed,
	           sliceUpGreen,
	           sliceUpBlue,
	           sliceUpYellow);
	}

	// measurement phase

    //CUDA_SAFE_CALL(cudaThreadSynchronize());
    CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceDownRed, sliceDownRed, size, cudaMemcpyDeviceToHost));
    CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceDownGreen, sliceDownGreen, size, cudaMemcpyDeviceToHost));
    CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceDownBlue, sliceDownBlue, size, cudaMemcpyDeviceToHost));
    CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceDownYellow, sliceDownYellow, size, cudaMemcpyDeviceToHost));
    CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceUpRed, sliceUpRed, size, cudaMemcpyDeviceToHost));
    CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceUpGreen, sliceUpGreen, size, cudaMemcpyDeviceToHost));
    CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceUpBlue, sliceUpBlue, size, cudaMemcpyDeviceToHost));
    CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceUpYellow, sliceUpYellow, size, cudaMemcpyDeviceToHost));
    CUDA_SAFE_CALL(cudaThreadSynchronize());
    
    pthread_create(&t, NULL, countGrains,(void*) &slices);

	for (unsigned int step=0; step<=TMAX; ++step) 
	{
		update(temp,
               sliceDownRed, 
               sliceDownGreen,
	           sliceDownBlue,
               sliceDownYellow,
               sliceUpRed,
	           sliceUpGreen,
	           sliceUpBlue,
	           sliceUpYellow);

		if ((step+1)%calc_step==0) 
		{
	        pthread_join(t, NULL);
       		assert(index<NPOINTS);
       		stats[index].step = step - 99;
       		stats[index].volume = (float)(L*L*DEPTH)/(matrixGrains);
       		stats[index].m = (matrixGrains);
            ++index;
	        
	        //CUDA_SAFE_CALL(cudaThreadSynchronize());
            CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceDownRed, sliceDownRed, size, cudaMemcpyDeviceToHost));
            CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceDownGreen, sliceDownGreen, size, cudaMemcpyDeviceToHost));
            CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceDownBlue, sliceDownBlue, size, cudaMemcpyDeviceToHost));
            CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceDownYellow, sliceDownYellow, size, cudaMemcpyDeviceToHost));
            CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceUpRed, sliceUpRed, size, cudaMemcpyDeviceToHost));
            CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceUpGreen, sliceUpGreen, size, cudaMemcpyDeviceToHost));
            CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceUpBlue, sliceUpBlue, size, cudaMemcpyDeviceToHost));
            CUDA_SAFE_CALL(cudaMemcpyAsync(slices.sliceUpYellow, sliceUpYellow, size, cudaMemcpyDeviceToHost));
            CUDA_SAFE_CALL(cudaThreadSynchronize());
	        
    		pthread_create(&t, NULL, countGrains,(void*) &slices);
		}
	}
}


static void sample(pixel* const sliceDownRed,
			       pixel* const sliceDownGreen,
			       pixel* const sliceDownBlue,
			       pixel* const sliceDownYellow,
			       pixel* const sliceUpRed,
			       pixel* const sliceUpGreen,
			       pixel* const sliceUpBlue,
			       pixel* const sliceUpYellow,
			       struct Statpoint stat[])
{
	assert(sliceDownRed!=NULL && sliceUpRed!=NULL &&
	       sliceDownGreen!=NULL && sliceUpGreen!=NULL &&
	       sliceDownBlue!=NULL && sliceUpBlue!=NULL &&
	       sliceDownYellow!=NULL && sliceUpYellow!=NULL);

	// set the device matrix to 0
    dim3 dimBlock(TILE, TILE);
	dim3 dimGrid(FRAME/TILE, FRAME/TILE);
	cudaStream_t stream[8];
    for(unsigned int it =0; it<8;++it)
        cudaStreamCreate(&stream[it]);
	fillMatrix<<<dimGrid,dimBlock,0,stream[0]>>>(sliceDownRed,0);
	fillMatrix<<<dimGrid,dimBlock,0,stream[1]>>>(sliceDownGreen,1);
	fillMatrix<<<dimGrid,dimBlock,0,stream[2]>>>(sliceDownBlue,2);
	fillMatrix<<<dimGrid,dimBlock,0,stream[3]>>>(sliceDownYellow,3);
	fillMatrix<<<dimGrid,dimBlock,0,stream[4]>>>(sliceUpRed,4);
	fillMatrix<<<dimGrid,dimBlock,0,stream[5]>>>(sliceUpGreen,5);
	fillMatrix<<<dimGrid,dimBlock,0,stream[6]>>>(sliceUpBlue,6);
	fillMatrix<<<dimGrid,dimBlock,0,stream[7]>>>(sliceUpYellow,7);

	
    for(unsigned int it =0; it<8;++it)
        cudaStreamDestroy(stream[it]);

	// temperature increasing cycle
	cycle(sliceDownRed, 
	      sliceDownGreen,
	      sliceDownBlue,
		  sliceDownYellow,
		  sliceUpRed,
		  sliceUpGreen,
		  sliceUpBlue,
		  sliceUpYellow,
	      TEMP, DELTA_T,
	      stat);
}


static int ConfigureRandomNumbers(void) {
	// Allocate memory for RNG's
	unsigned long long h_x[NUM_THREADS];
	unsigned int h_a[NUM_THREADS];
	unsigned long long seed = (unsigned long long) SEED;

	// Init RNG's
	int ret = init_RNG(h_x, h_a, NUM_THREADS, SAFE_PRIMES_FILENAME, seed);

	if (!ret) {
		size_t size_x = NUM_THREADS * sizeof(unsigned long long);
		CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_x, h_x, size_x));

		size_t size_a = NUM_THREADS * sizeof(unsigned int);
		assert(size_a<size_x);
		CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_a, h_a, size_a));
	}

	return ret;
}

int main(int argc, char ** argv)
{

	// the lattice
	pixel* sliceDownRed = NULL;
    pixel* sliceDownGreen = NULL;
    pixel* sliceDownBlue = NULL;
    pixel* sliceDownYellow = NULL;
    pixel* sliceUpRed = NULL;
    pixel* sliceUpGreen = NULL;
    pixel* sliceUpBlue = NULL;
    pixel* sliceUpYellow = NULL;

	// the stats
	struct Statpoint stat[NPOINTS] = { {0.0, 0.0, 0} };

	double secs = 0.0;
	struct timeval start = {0L,0L}, end = {0L,0L}, elapsed = {0L,0L};

	// parameters checking
	assert(0<DELTA_T && DELTA_T<=TMAX); // at least one calculate()
	assert(TMAX%DELTA_T==0); // take equidistant calculate()
	assert(L%2==0); // we can halve height
	//assert(512<=FRAME); // TODO: get rid of this
	assert(__builtin_popcount(FRAME)==1); // FRAME=2^k
	assert(L%FRAME==0); // we can frame the grid
	assert(FRAME%2==0); // frames could be halved
	assert((FRAME/2)%TILE==0); // half-frames could be tiled
	//assert(MATRIX_SIZE*4L<UINT_MAX); // max energy, that is all spins are the same, fits into a ulong
    assert(L<USHRT_MAX); // do not overflow the representation
    assert(DEPTH<USHRT_MAX); // do not overflow the representation

	// set the GPGPU computing device
	//CUDA_SAFE_CALL(cudaSetDevice(CUDA_DEVICE));

	const size_t size = MATRIX_SIZE * sizeof(pixel);
	
	CUDA_SAFE_CALL(cudaMalloc((void**) &sliceDownRed, size));
	CUDA_SAFE_CALL(cudaMalloc((void**) &sliceDownGreen, size));
	CUDA_SAFE_CALL(cudaMalloc((void**) &sliceDownBlue, size));
	CUDA_SAFE_CALL(cudaMalloc((void**) &sliceDownYellow, size));
	CUDA_SAFE_CALL(cudaMalloc((void**) &sliceUpRed, size));
	CUDA_SAFE_CALL(cudaMalloc((void**) &sliceUpGreen, size));
	CUDA_SAFE_CALL(cudaMalloc((void**) &sliceUpBlue, size));
	CUDA_SAFE_CALL(cudaMalloc((void**) &sliceUpYellow, size));


	// print header
	
	printf("# L: %i\n", L);
	printf("# Depth: %i\n", DEPTH);
	printf("# Number of Samples: %i\n", SAMPLES);
	printf("# Temperature: %f\n", TEMP);
	printf("# Equilibration Time: %i\n", TRAN);
	printf("# Measurement Time: %i\n", TMAX);
	printf("# Data Acquiring Step: %i\n", DELTA_T);
	printf("# Number of Points: %i\n", NPOINTS);

	// start timer
	assert(gettimeofday(&start, NULL)==0);

	if (ConfigureRandomNumbers()) {
		return 1;
	}

	// stop timer
	assert(gettimeofday(&end, NULL)==0);
	assert(timeval_subtract(&elapsed, &end, &start)==0);
	secs = (double)elapsed.tv_sec + ((double)elapsed.tv_usec*MICROSEC);
	printf("# Configure RNG Time (sec): %lf\n", secs);

	// start timer
	assert(gettimeofday(&start, NULL)==0);

	for (unsigned int i=0; i<SAMPLES; ++i) {
		sample(sliceDownRed, 
	           sliceDownGreen,
	           sliceDownBlue,
		       sliceDownYellow,
		       sliceUpRed,
		       sliceUpGreen,
		       sliceUpBlue,
		       sliceUpYellow,
		       stat);
	}

	// stop timer
	CUDA_SAFE_CALL(cudaThreadSynchronize()); // ensure all threads are done
	assert(gettimeofday(&end, NULL)==0);
	assert(timeval_subtract(&elapsed, &end, &start)==0);
	secs = (double)elapsed.tv_sec + ((double)elapsed.tv_usec*MICROSEC);
	printf("# Total Simulation Time (sec): %lf\n", secs);

	printf("# Step\tVolume\t\t\tmatrixGrains\n");
	for (unsigned int i=0; i<NPOINTS; i++) {
		printf ("%u\t%.10lf\t\t\t%u\t\n",/*%.10lf\t%.10lf\t%.10lf\t%.10lf\n",*/
			stat[i].step,
			stat[i].volume,
			stat[i].m);
	}

	return 0;
}


/*
 * http://www.gnu.org/software/libtool/manual/libc/Elapsed-Time.html
 * Subtract the `struct timeval' values X and Y,
 * storing the result in RESULT.
 * return 1 if the difference is negative, otherwise 0.
 */
static int timeval_subtract (struct timeval *result, struct timeval *x, struct timeval *y) {
	/* Perform the carry for the later subtraction by updating y. */
	if (x->tv_usec < y->tv_usec) {
		int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
		y->tv_usec -= 1000000 * nsec;
		y->tv_sec += nsec;
	}
	if (x->tv_usec - y->tv_usec > 1000000) {
		int nsec = (x->tv_usec - y->tv_usec) / 1000000;
		y->tv_usec += 1000000 * nsec;
		y->tv_sec -= nsec;
	}

	/* Compute the time remaining to wait. tv_usec is certainly positive. */
	result->tv_sec = x->tv_sec - y->tv_sec;
	result->tv_usec = x->tv_usec - y->tv_usec;

	/* Return 1 if result is negative. */
	return x->tv_sec < y->tv_sec;
}
