#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cutil.h"
#include "ftocmacros.h"
#define TRUE 1
#define FALSE 0

__global__ void kernel_loop1(double *umaru, float *umc, float *ump, float *aru, float chic, float chip, int bx, int by){

  int i = threadIdx.x + 1;
  int j = threadIdx.y + 1;
  
  //Calcula o primeiro bloco:
  int k = 1;
  umaru[FTNREF3D(i,j,k,bx,by,1,1,1)] = 0.0;
  if (i < bx && j > 1) {
     umaru[FTNREF3D(i,j,k,bx,by,1,1,1)] = (chic * umc[FTNREF3D(i,j,k,bx,by,1,1,1)] + chip * ump[FTNREF3D(i,j,k,bx,by,1,1,1)]) * aru[FTNREF3D(i,j,k,bx,by,1,1,1)];
  }

  //Calcula os blocos restantes, menos o ultimo:
  k = blockIdx.x + 2;
  umaru[FTNREF3D(i,j,k,bx,by,1,1,1)] = 0.0;
  if (i < bx ) {
     umaru[FTNREF3D(i,j,k,bx,by,1,1,1)] = (chic * umc[FTNREF3D(i,j,k,bx,by,1,1,1)] + chip * ump[FTNREF3D(i,j,k,bx,by,1,1,1)]) * aru[FTNREF3D(i,j,k,bx,by,1,1,1)];
  }
  return;
}


__global__ void kernel_loop1_remain(double *umaru, float *umc, float *ump, float *aru, float chic, float chip, int bx, int by, int gx){

  int i = threadIdx.x + 1;
  int j = threadIdx.y + 1;

  //Calcula o ultimo bloco:
  int k = gx + 1;
  umaru[FTNREF3D(i,j,k,bx,by,1,1,1)] = 0.0;
  if (i < bx ) {
     umaru[FTNREF3D(i,j,k,bx,by,1,1,1)] = (chic * umc[FTNREF3D(i,j,k,bx,by,1,1,1)] + chip * ump[FTNREF3D(i,j,k,bx,by,1,1,1)]) * aru[FTNREF3D(i,j,k,bx,by,1,1,1)];
  }
  return;
}

extern "C" void loop1_(double *umaru, float *umc, float *ump, float *aru, float *chic, float *chip, int *mza, int *mua){

    float chic_gpu=*chic;
    float chip_gpu=*chip;
    
    int bx=*mza;
    int dim2=*mua;
    int by;
    int gx;
    by = 512/bx;
    gx = dim2/by;

    dim3 threadsPerBlock(bx,by);
    dim3 blocksPerGrid(gx);

    double *umaru_gpu;
    cudaHostAlloc((void **) &umaru_gpu,((bx)*(dim2))*sizeof(double),cudaHostAllocMapped);
    cudaHostGetDevicePointer( &umaru_gpu, umaru, 0 );  // find out the GPU pointer

    float *umc_gpu;
    cudaHostAlloc((void **) &umc_gpu,((bx)*(dim2))*sizeof(float),cudaHostAllocWriteCombined | 
                                                                  cudaHostAllocMapped );
    cudaHostGetDevicePointer( &umc_gpu, umc, 0 );  

    float *ump_gpu;
    cudaHostAlloc((void **) &ump_gpu,((bx)*(dim2))*sizeof(float),cudaHostAllocWriteCombined | 
                                                                  cudaHostAllocMapped );
    cudaHostGetDevicePointer( &ump_gpu, ump, 0 ); 
 
    float *aru_gpu;
    cudaHostAlloc((void **) &aru_gpu,((bx)*(dim2))*sizeof(float),cudaHostAllocWriteCombined | 
                                                                  cudaHostAllocMapped );
    cudaHostGetDevicePointer( &aru_gpu, aru, 0 ); 


    kernel_loop1<<< blocksPerGrid, threadsPerBlock >>>(umaru_gpu, umc_gpu, ump_gpu, aru_gpu, chic_gpu, chip_gpu, bx, by);
    // check if kernel execution generated an error
    CUT_CHECK_ERROR("Kernel execution failed");

    int remain_by = dim2 - gx*by;
 
    if ( remain_by != 0 ) {
       dim3 threadsPerBlock_remain(bx,remain_by);
       dim3 blocksPerGrid_remain(1);
 
       kernel_loop1_remain<<< blocksPerGrid_remain, threadsPerBlock_remain >>>(umaru_gpu, umc_gpu, ump_gpu, aru_gpu, chic_gpu, chip_gpu, bx, by, gx);
       // check if kernel execution generated an error
       CUT_CHECK_ERROR("Kernel execution failed");
    }

    cudaFreeHost(umaru_gpu);
    cudaFreeHost(aru_gpu);
    cudaFreeHost(ump_gpu);
    cudaFreeHost(umc_gpu);

}


