/*copyright 2009 Kevin Daley
This file is a part of nostredamus
nostredamus is free software: you can redistribute it and/or modify
   it under the terms of the GNU Lesser General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

 This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public License
along with this program.  If not, see <http://www.gnu.org/licenses/>.*/

#define BLOCK_SIZE 16
#define kk 2*3.1415927f
#define lmb 1
struct params{
	float d, dt;
};
 typedef struct {
                   int width;
                   int height;
                   int stride;
                   float* elements;
               } Matrix;
__global__ void node //kernel to evolve machine state and goal state.  we like to use the c stack instead of registers, because it's allegedly faster in CUDA.
( float* n  /*the number of instances of each descriptor, in row-major order*/,  float* spectrum /*the unique numeric identifier of each descriptor*/,  float* J /*the precomputed spectral Julia set, in row-major order*/,  float* j/*the goal-state modifier*/, int copy_point[2], float dt, struct params etc, int N, int bands){   				
	int2 index=make_int2(threadIdx.x+blockIdx.x*blockDim.x+copy_point[0], threadIdx.y+blockIdx.x*blockDim.x+copy_point[1]);//only one 16x16 thread block for now.
	__shared__ float sdata[1024+80]; //256 vectors each for: old instance array, changes made to that, total energy, julia set
	#pragma unroll 
	for (int i=0; i<bands; i++){
		float k=spectrum[i];
		sdata[(threadIdx.x+threadIdx.y*16)]=n[i*256+index.x+index.y*16*gridDim.x];
		n[i*256+index.x+index.y*16*gridDim.x]=0;
		sdata[255+16+(threadIdx.x+threadIdx.y*16)]=J[i*256+index.x+index.y*16*gridDim.x];
		sdata[767+48+(threadIdx.x+threadIdx.y*16)]=n[i*256+index.x+index.y*16*gridDim.x]*spectrum[i];
		for(int lp=0; lp<N; lp++){
			if((index.x%int((etc.dt)) && index.y%int((etc.dt))) || (sdata[255+16+(threadIdx.x+threadIdx.y*16)]>=0)){
				float2 delta=make_float2(-k*lmb*cos(dt*k*kk)*sdata[(threadIdx.x+threadIdx.y*16)]/etc.d, lmb*k*sin(dt*k*kk)*sdata[(threadIdx.x+threadIdx.y*16)]/etc.d);//find the displacement operator
				__syncthreads();//is this dangerous?
				if(int(delta.x)%16==delta.x && int(delta.y)%16 == delta.y){
					sdata[511+32+int(threadIdx.x+delta.x)%16+int(threadIdx.y+delta.y)%16*16]+=-1/lmb*pow(sdata[767+32+int(threadIdx.x+delta.x)%16+int(threadIdx.y+delta.y)%16*16]/sdata[767+32+int(threadIdx.x+delta.x)%16+int(threadIdx.y+delta.y)%16*16],2)*dt/spectrum[i]; //correct our guess
				}
				else{
					n[int(i*256+index.x+index.y*16*gridDim.x+delta.x+delta.y*16)]+=-1/lmb*pow(k*n[int(i*256+index.x+index.y*16*gridDim.x+delta.x+delta.y*16)]/k*n[i*256+index.x+index.y*16*gridDim.x],2.0f)*dt/spectrum[i];
			}
	__syncthreads();
			if(int(delta.x)%16==delta.x && int(delta.y)%16 == delta.y){
					sdata[511+32+int(threadIdx.x+delta.x)%16+int(threadIdx.y+delta.y)%16*16]+=1/lmb*pow(sdata[767+32+int(threadIdx.x+delta.x)%16+int(threadIdx.y+delta.y)%16*16]/sdata[767+32+int(threadIdx.x+delta.x)%16+int(threadIdx.y+delta.y)%16*16],2)*dt/spectrum[i]; //correct our guess
				}
				else{
					n[int(i*256+index.x+index.y*16*gridDim.x+delta.x+delta.y*16)]+=1/lmb*pow(k*n[int(i*256+index.x+index.y*16*gridDim.x+delta.x+delta.y*16)]/k*n[int(i*256+index.x+index.y*16*gridDim.x)],1.0f)*dt*spectrum[i];
				__syncthreads();
			
		sdata[int(threadIdx.x)+int(threadIdx.y)*16]=n[i*256+index.x+index.y*16*gridDim.x]+(sdata[int(threadIdx.x)+int(threadIdx.y)*16]!=0)?sdata[511+32+int(threadIdx.x)+int(threadIdx.y)*16]:0;
		n[i*256+index.x+index.y*16*gridDim.x]=sdata[int(threadIdx.x)+int(threadIdx.y)*16];
		}
		}
	
}}
}


               

               // Get a matrix element
               __device__ float GetElement(const Matrix A, int row, int col)
               {
                   return A.elements[row * A.stride + col];
               }
               // Set a matrix element
               __device__ void SetElement(Matrix A, int row, int col,
                                            float value)
               {
                   A.elements[row * A.stride + col] = value;
               }
               // Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
               // located col sub-matrices to the right and row sub-matrices down
               // from the upper-left corner of A
               __device__ Matrix GetSubMatrix(Matrix A, int row, int col)
               {
                   Matrix Asub;
                   Asub.width      = BLOCK_SIZE;
                   Asub.height     = BLOCK_SIZE;
                   Asub.stride     = A.stride;
                   Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
                                                         + BLOCK_SIZE * col];
                   return Asub;
               }

   // Matrix multiplication kernel called by MatrixMul()
   __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
   {
       // Block row and column
       int blockRow = blockIdx.y;
       int blockCol = blockIdx.x;
       // Each thread block computes one sub-matrix Csub of C
       Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
       // Each thread computes one element of Csub
       // by accumulating results into Cvalue
       float Cvalue = 0;
       // Thread row and column within Csub
       int row = threadIdx.y;
       int col = threadIdx.x;
       // Loop over all the sub-matrices of A and B that are
       // required to compute Csub
       // Multiply each pair of sub-matrices together
       // and accumulate the results
       for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
      // Get sub-matrix Asub of A
      Matrix Asub = GetSubMatrix(A, blockRow, m);
      // Get sub-matrix Bsub of B
      Matrix Bsub = GetSubMatrix(B, m, blockCol);
      // Shared memory used to store Asub and Bsub respectively
      __shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
      __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
      // Load Asub and Bsub from device memory to shared memory
      // Each thread loads one element of each sub-matrix
      As[row][col] = GetElement(Asub, row, col);
      Bs[row][col] = GetElement(Bsub, row, col);
      // Synchronize to make sure the sub-matrices are loaded
      // before starting the computation
      __syncthreads();
      // Multiply Asub and Bsub together
      for (int e = 0; e < BLOCK_SIZE; ++e)
          Cvalue += As[row][e] * Bs[e][col];
      // Synchronize to make sure that the preceding
      // computation is done before loading two new
      // sub-matrices of A and B in the next iteration
      __syncthreads();
  }
  // Write Csub to device memory
  // Each thread writes one element
  SetElement(Csub, row, col, Cvalue);
}

__global__ void MatAnd(Matrix A, Matrix B, Matrix C)
   {
       int row = threadIdx.y;
       int col = threadIdx.x;
	SetElement(C, row, col, GetElement(A, row, col) == GetElement(B, row, col));
   }