#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<curand_kernel.h>
#include <time.h>
#include <malloc.h>


#define CUDA_CALL(lin) do { if((lin) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)

#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))

static void HandleError( cudaError_t err,
                         const char *file,
                         int line ) {
    if (err != cudaSuccess) {
        printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
                file, line );
        exit( EXIT_FAILURE );
    }
}


/*
void print_malha( int* matriz, int size )
{
    int i, j;
    for ( i = 0; i < size; i++ ){
        for ( j = 0; j < size; j++ ){
                printf("%5d", matriz[j + i * size] );

        }
        printf("\n");
    }
}
*/


__global__ void setup_kernel ( curandState* state, unsigned long long seed )
{
    int id = blockIdx.x * blockDim.x + threadIdx.x;
    
    /* Each thread gets same seed, a different sequence number , no offset */
    curand_init ( seed, id, 0, &state[id] ) ;
}


__global__ void randomWalkGpu( int* matriz, const int size, curandState* state )
{
    int id = blockIdx.x * blockDim.x + threadIdx.x;
    int ok;
    int passo = 0;
    int maxSteps = size*size;
    int matrix_id;
    
    /* Cria variáveis para controlar a movimentação da particula nos registradores
    de modo que cada thread tenha sua própria cópia das variáveis que tem acesso
    super rápido */
    int lin = 0;
    int col = 0;

    unsigned int random_number;
    
    /* Copy state to local memory for efficiency */
    curandState localState = state [ id ];
    
	atomicAdd ( &matriz[0], 1 );
	
    while( passo < maxSteps )
    {
        random_number = curand(&localState) % 4;

        ok = 0;

        switch ( random_number ){
            case 0:
                if ( ( lin - 1 ) >= 0 ){
                    --lin;
                    ok = 1;
                }
                break;
            case 1:
                if ( ( col + 1 ) < size ){
                    ++col;
                    ok = 1;
                }
                break;
            case 2:
                if ( ( lin + 1) < size ){
                    ++lin;
                    ok = 1;
                }
                break;
            case 3:
                if ( ( col - 1 ) >= 0 ){
                    --col;
                    ok = 1;
                }
                break;
        }
        
        if ( ok ){
            passo++;
            matrix_id = (lin * size) + col;

            // operação atomic
            atomicAdd ( &matriz[matrix_id], 1) ;
            
            if ( matrix_id == (maxSteps - 1) )
                break;
        }

    }
}


extern "C" int * initRandomWalk( int size, int blockNum, int threadsNum, int myrank )
{
    printf("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n");
    int totalParticulas = blockNum * threadsNum;
    
    printf("Processo %d\nRW-MPI-CUDA\nSize: %d x %d\nParticulas: %d\n\n", myrank, size, size, totalParticulas );
    
    
    /*Cria uma matriz no host para receber os valores cauculados no dispositivo*/
    int* buffer_h;
    buffer_h = (int*) calloc ( size * size, sizeof (int) );
    
    // aloca memoria no despositivo cuda 
    int *devMatrix;
    HANDLE_ERROR(cudaMalloc((void**)&devMatrix, size * size * sizeof(int) ) );
    
    //inicializa a matrix com 0
    HANDLE_ERROR(cudaMemset( (void*)devMatrix, 0, size * size * sizeof(int) ) );
    
    curandState * devStates;
    HANDLE_ERROR(cudaMalloc((void **)&devStates, totalParticulas * sizeof(curandState)));
                        
    setup_kernel<<<blockNum, threadsNum>>>
        ( devStates, time(NULL) + myrank );
        
    // capture the start time
    // starting the timer here so that we include the cost of
    // all of the operations on the GPU.  if the data were
    // already on the GPU and we just timed the kernel
    // the timing would drop from 74 ms to 15 ms.  Very fast.
    //cudaEvent_t     start, stop;
    //HANDLE_ERROR( cudaEventCreate( &start ) );
    //HANDLE_ERROR( cudaEventCreate( &stop ) );
    //HANDLE_ERROR( cudaEventRecord( start, 0 ) ); 
    
                         
    randomWalkGpu <<<blockNum, threadsNum>>>
          (devMatrix, size, devStates);
          
    const char * error;
    error = cudaGetErrorString(cudaPeekAtLastError());
    printf("%s\n", error);
                    
                                
    // get stop time, and display the timing results
    //HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
    //HANDLE_ERROR( cudaEventSynchronize( stop ) );
    //float   elapsedTime;
    //HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
    //                                      start, stop ) );
    //
    //printf("Processo: %d - Tempo Gpu (em Segundos) = %.3fs\n", myrank, elapsedTime/1000.0f);
    
    
    /*Cópia dados da matriz do device para o dispositivo*/
    HANDLE_ERROR( cudaMemcpy ( buffer_h, devMatrix, size * size * sizeof(int), cudaMemcpyDeviceToHost ) );
                                    
    printf("Processo: %d - Chegaram %d Particulas - Probabilidade: %lf\n", 
        myrank, buffer_h[size * size - 1], (buffer_h[size*size-1] / (double)totalParticulas) );
    
    // print_malha( buffer_h, size );

    cudaFree( devMatrix );
    
	return buffer_h;
}
