/* 
 * Solves the Aliev-Panfilov model  using an explicit numerical scheme.
 * Based on code orginally provided by Xing Cai, Simula Research Laboratory
 * 
 * Modified and  restructured by Scott B. Baden, UCSD
 * Modified by Didem Unat, UCSD
 */

#include <assert.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <string>
#include <math.h>
#include "time.h"
#include "utils.h"
#include "apf.h"
#include "types.h"
#include <stdio.h>
using namespace std;


// Collect the boundary data
__global__ void boundariesY(_DOUBLE_ *Eprev, const int n, const int m, int width)
{

  //int idx= threadIdx.x +1;
  //int idy= threadIdx.y +1;

//  int gidx = idx + blockDim.x * blockIdx.x ;
//  int gidy = idy + blockDim.y * blockIdx.y ;
  int gidx = threadIdx.x + blockDim.x * blockIdx.x ;
  int gidy = threadIdx.y + blockDim.y * blockIdx.y ;

//if(gidx > n+1 || gidy > m+1)
//  return;
 
  int index = gidy * width + gidx ; 

  if(gidx == 2) {
    Eprev[index - 2] = Eprev[index];
}
  if(gidx == n)
    Eprev[index + 2] = Eprev[index];

  if(gidy == 2 )
    Eprev[index - 2*width] = Eprev[index] ;

  if ( gidy == m)
    Eprev[index + 2*width ] = Eprev[index];

}
__global__ void boundariesX(_DOUBLE_ *Eprev, const int n, const int m)
{
  int width = n + 3;

  int idx= threadIdx.x +1;
  int idy= threadIdx.y +1;

  int gidx = idx + blockDim.x * blockIdx.x ;
  int gidy = idy + blockDim.y * blockIdx.y ;

if(gidx > n+1 || gidy > m+1)
  return;
 
  int index = gidy * width + gidx ; 


  if (gidx == 2 )
    Eprev[index-2] = Eprev[index];

  if(gidx == n)
    Eprev[index+2] = Eprev[index];

}

//extern __shared__ float Eprev_s[];
__global__ void sweep(_DOUBLE_ *E,  _DOUBLE_ *Eprev, _DOUBLE_ *R, const _DOUBLE_ alpha, const int n, const int m, int width, const _DOUBLE_ dt)
{

//  int idx= threadIdx.x +1;
// int idy= threadIdx.y +1;

//  int gidx = idx + blockDim.x * blockIdx.x ;
//  int gidy = idy + blockDim.y * blockIdx.y ; 
  int gidx = threadIdx.x + blockDim.x * blockIdx.x ;
  int gidy = threadIdx.y + blockDim.y * blockIdx.y ;

//if(gidx > n+1 || gidy > m+1)
//  return;
 
  int index = gidy * width + gidx ; 

  if(gidx == 1) {
    Eprev[index - 1] = Eprev[index+1];
  }
  if(gidx == n+1)
    Eprev[index + 1] = Eprev[index-1];

  if(gidy == 1 )
    Eprev[index - width] = Eprev[index+width];

  if ( gidy == m+1)
    Eprev[index + width] = Eprev[index-width];
/*
  __shared__ _DOUBLE_ Eprev_s[BLOCKDIM_Y+2][BLOCKDIM_X+2];
  int B = blockDim.x * blockIdx.x - 1 + (blockDim.y * blockIdx.y - 1) * width; 
  int i = threadIdx.y*blockDim.x + threadIdx.x;
  int x = i%(blockDim.x+2);
  int y = i/(blockDim.x+2);
  int I = B + y*width + x;
  if(I >= 0) 
       Eprev_s[y][x] = Eprev[I];
  i = blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x;
  x = i%(blockDim.x+2);
  y = i/(blockDim.x+2);
  //I = B + y*width + x;
  if( y < blockDim.y + 2 ) {
      Eprev_s[y][x] = Eprev[B+y*width+x];
  }
  __syncthreads();
*/
  __shared__ _DOUBLE_ E_s[BLOCKDIM_Y][BLOCKDIM_X];

//  __shared__ _DOUBLE_ R_s[BLOCKDIM_Y][BLOCKDIM_X];

//  Eprev_s[threadIdx.y][threadIdx.x] = Eprev[index];
//  __syncthreads();

//  R_s[threadIdx.y][threadIdx.x] = R[index];

/*
  if(idy == 1) {
    if( gidy == 1) {
    	Eprev_s[0][idx] = Eprev[index+width];
    }
    else
	Eprev_s[0][idx] = Eprev[index-width];
  }
  else if(threadIdx.y == blockDim.y-1) {
     if(gidy == m+1)
        Eprev_s[idy+1][idx] = Eprev[index-width];
     else
        Eprev_s[idy+1][idx] = Eprev[index+width];
  }
  if(idx == 1) {
     if(gidx == 1)
        Eprev_s[idy][0] = Eprev[index+1];
     else
        Eprev_s[idy][0] = Eprev[index-1];
  }
  else if(threadIdx.x == blockDim.x-1) {
     if(gidx == n+1)
        Eprev_s[idy][idx+1] = Eprev[index-1];
     else
        Eprev_s[idy][idx+1] = Eprev[index+1];
  }
 
  __syncthreads();
*/  
    
  if (gidy >= 1 && gidy <= m+1 && gidx >= 1 && gidx <= n+1) {
/*     _DOUBLE_ EpW = threadIdx.x == 0 ? Eprev[index-1] : Eprev_s[threadIdx.y][threadIdx.x-1];
     _DOUBLE_ EpE = threadIdx.x == blockDim.x-1 ? Eprev[index+1] : Eprev_s[threadIdx.y][threadIdx.x+1];
     _DOUBLE_ EpN = threadIdx.y == 0 ? Eprev[index-width] : Eprev_s[threadIdx.y-1][threadIdx.x];
     _DOUBLE_ EpS = threadIdx.y == blockDim.y-1 ? Eprev[index+width] : Eprev_s[threadIdx.y+1][threadIdx.x];

     E_s[threadIdx.y][threadIdx.x] = Eprev_s[threadIdx.y][threadIdx.x]+alpha*(EpE+EpW   -4*Eprev_s[threadIdx.y][threadIdx.x]+ EpS + EpN);
*/
     E_s[threadIdx.y][threadIdx.x] = Eprev[index]+alpha*(Eprev[index+1]+Eprev[index-1] -4*Eprev[index]+Eprev[index+width]+Eprev[index-width]);
//     E[index] = Eprev[index]+alpha*(Eprev[index+1]+Eprev[index-1] -4*Eprev[index]+Eprev[index+width]+Eprev[index-width]);

     //E_s[threadIdx.y][threadIdx.x] = Eprev_s[threadIdx.y+1][threadIdx.x+1]+alpha*(Eprev_s[threadIdx.y+1][threadIdx.x+2]+Eprev_s[threadIdx.y+1][threadIdx.x] -4*Eprev_s[threadIdx.y+1][threadIdx.x+1]+Eprev_s[threadIdx.y+2][threadIdx.x+1]+Eprev_s[threadIdx.y][threadIdx.x+1]);

//      E_s[threadIdx.y][threadIdx.x] += -dt*(kk*E_s[threadIdx.y][threadIdx.x]*(E_s[threadIdx.y][threadIdx.x]-a)*(E_s[threadIdx.y][threadIdx.x]-1)+E_s[threadIdx.y][threadIdx.x]*R_s[threadIdx.y][threadIdx.x]);

      _DOUBLE_ RR = R[index];
      E_s[threadIdx.y][threadIdx.x] += -dt*(kk*E_s[threadIdx.y][threadIdx.x]*(E_s[threadIdx.y][threadIdx.x]-a)*(E_s[threadIdx.y][threadIdx.x]-1)+E_s[threadIdx.y][threadIdx.x]*RR);

//	    E[index] += -dt*(kk*E[index]*(E[index]-a)*(E[index]-1)+E[index]*R[index]);
	E[index] = E_s[threadIdx.y][threadIdx.x];

	 
//      R[index] = R_s[threadIdx.y][threadIdx.x] + dt*(epsilon+M1* R_s[threadIdx.y][threadIdx.x]/( E_s[threadIdx.y][threadIdx.x]+M2))*(-R_s[threadIdx.y][threadIdx.x]-kk*E_s[threadIdx.y][threadIdx.x]*(E_s[threadIdx.y][threadIdx.x]-b-1));

//	    R[index] += dt*(epsilon+M1* R[index]/( E[index]+M2))*(-R[index]-kk*E[index]*(E[index]-b-1));

//       R[index] += dt*(epsilon+M1* R[index]/( E_s[threadIdx.y][threadIdx.x]+M2))*(-R[index]-kk*E_s[threadIdx.y][threadIdx.x]*(E_s[threadIdx.y][threadIdx.x]-b-1));
	
       R[index] += dt*(epsilon+M1* RR/( E_s[threadIdx.y][threadIdx.x]+M2))*(-RR-kk*E_s[threadIdx.y][threadIdx.x]*(E_s[threadIdx.y][threadIdx.x]-b-1));

    }
}

int solve(_DOUBLE_ ***_E, _DOUBLE_ ***_E_prev, _DOUBLE_ **R, int m, int n, int width,
	  _DOUBLE_ T, _DOUBLE_ alpha, _DOUBLE_ dt, int do_stats, int plot_freq,
          int prefer_l1)
{

  /******************************GPU device Arrays ***********************************/

  _DOUBLE_ *E_device, *E_prev_device, *R_device ;
  
int memsize_E = sizeof(_DOUBLE_)* width * (m+3);
  //int memsize_E = sizeof(_DOUBLE_)* (n+3) * (m+3) ;

  _DOUBLE_ **E = *_E, **E_prev = *_E_prev;


  cudaMalloc((void**) &E_device,      memsize_E );
  checkCUDAError("Can't allocate E_device");
  cudaMalloc((void**) &E_prev_device, memsize_E );
  checkCUDAError("Can't allocate E_prev_device");
  cudaMalloc((void**) &R_device,      memsize_E);;
  checkCUDAError("Can't allocate R_device");

  //copy host data to the device                                                                                                             
  (cudaMemcpy(E_device, &E[0][0], memsize_E, cudaMemcpyHostToDevice ));
  checkCUDAError("Can't copy E_device to device");
  (cudaMemcpy(E_prev_device, &E_prev[0][0], memsize_E, cudaMemcpyHostToDevice ));
  checkCUDAError("Can't copy E_prev_device to device");
  (cudaMemcpy(R_device,  &R[0][0], memsize_E, cudaMemcpyHostToDevice ));
  checkCUDAError("Can't copy R_device to device");
				 

 // Simulated time is different from the integer timestep number
 _DOUBLE_ t = 0.0;
 // Integer timestep number
 int niter=0;

 dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);

 int numblocksX = (n+2)/BLOCKDIM_X;
 int numblocksY = (m+2)/BLOCKDIM_Y;

 if( (n+2) % BLOCKDIM_X != 0  )
     numblocksX++;
 if( (m+2) % BLOCKDIM_Y != 0  )
     numblocksY++;
 
 dim3 grid(numblocksX, numblocksY, 1);
/*
 int Eprev_s_w;
 int ss = (BLOCKDIM_X + 2)%32;
 if(ss == 0)
   Eprev_s_w = BLOCKDIM_X + 2;
 else
   Eprev_s_w = BLOCKDIM_X + 34 - ss;
 int Eprev_s_h = BLOCKDIM_Y + 2;
 int Eprev_s_size = Eprev_s_w * Eprev_s_h;
*/
// If we set the preference for L1 cache, rather than
// shared memory, we may run slightly faster on devices that have the capability
 cudaFuncCache Preference;
 if (prefer_l1){ 
 Preference = cudaFuncCachePreferL1;
 }
 else{
 Preference = cudaFuncCachePreferShared;
 } 
//cudaFuncSetCacheConfig(boundariesX,Preference);
cudaFuncSetCacheConfig(boundariesY,Preference);
cudaFuncSetCacheConfig(sweep,Preference);

 // We continue to sweep over the mesh until the simulation has reached
 // the desired simulation Time
 // This is different from the number of iterations
  while (t<T) {
  
   t += dt;
   niter++;

//   boundariesX<<<grid, threads>>>(E_prev_device, n, m);
//   boundariesY<<<grid, threads>>>(E_prev_device, n, m, width);
   sweep<<<grid, threads>>>(E_device, E_prev_device, R_device, alpha, n, m, width, dt);
   checkCUDAError("Can't run the kernel");
    
//   cudaThreadSynchronize();
     
   if (do_stats){
     (cudaMemcpy(&E[0][0], E_device, memsize_E, cudaMemcpyDeviceToHost ));  
     checkCUDAError("Can't get data from device to compute stats");
   }

   if (plot_freq){
        int k = (int)(t/plot_freq);
        if ((t-k*plot_freq)<dt){
	  (cudaMemcpy(&E[0][0], E_device, memsize_E, cudaMemcpyDeviceToHost )); 
          checkCUDAError("Can't get data from device for plotting");
	  	  splot(E,t,niter,m+1,n+1);
//	  	  splot(E,t,niter,m+1,n+1,WAIT);
        }
    }
   //swap pointers
   _DOUBLE_ *tmp = E_device; E_device = E_prev_device; E_prev_device = tmp;
 
 }

  (cudaMemcpy(&E[0][0], E_device, memsize_E, cudaMemcpyDeviceToHost ));  
  checkCUDAError("Can't get E from device");
  (cudaMemcpy(&E_prev[0][0], E_prev_device, memsize_E, cudaMemcpyDeviceToHost ));  
  checkCUDAError("Can't get E_prev from device");


  cudaFree(E_device);
  checkCUDAError("Can't free E_device on device");
  cudaFree(E_prev_device);
  checkCUDAError("Can't free E_prev_device on device");
  cudaFree(R_device);
  checkCUDAError("Can't free R on device");


  // Store them into the pointers passed in
  *_E = E;
  *_E_prev = E_prev;

  return niter;
}

