/*copyright 2009 Kevin Daley
#This file is a part of Cerebra.
#Cerebra is free software: you can redistribute it and/or modify
 #   it under the terms of the GNU  General Public License as published by
  #  the Free Software Foundation, either version 3 of the License, or
   # (at your option) any later version.

   # This program is distributed in the hope that it will be useful,
   # but WITHOUT ANY WARRANTY; without even the implied warranty of
   # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   # GNU  General Public License for more details.

   # You should have received a copy of the GNU  General Public License
   # along with this program.  If not, see <http://www.gnu.org/licenses/>.

#Kevin Daley, Lead Developer.  still.horse@gmail.com.*/

#include "node.cu"
#include "node.h"

 void network::toggle_wakeup(){
	wakeup=!wakeup;
}

  void network::setup(long gridsize, long sz, long bands){
	cudaSetDevice(0);
	gridsz=gridsize;
	N=gridsz*BLOCK_SIZE;
	cudaMalloc((void**) &buff, N*sizeof(cufftComplex));
	long size=sz*sz*(1+bands)*sizeof(float)+sizeof(float)*sz;
	siz=size;
	Jm=(float*) malloc(size);

    
bnds=bands;
cudaMalloc((void**)&n, siz);
cudaMalloc((void**)&j, siz-sz*bands*sizeof(float));
	cudaMalloc((void**)&J, siz);
	cudaMalloc((void**)&spectrum, bands*sizeof(float));
	dim3 dimGrid(gridsz*8, gridsz*8);
	dim3 dimBlock(16, 16);
	cudaMemcpy((void*)J, (void*) (Jm), siz, cudaMemcpyHostToDevice);
	cudaMemcpy((void*) n, (void*) nu, siz, cudaMemcpyHostToDevice);
	cudaMemcpy((void*) j, (void*) ju, siz, cudaMemcpyHostToDevice);
	cudaMemcpy((void*) spectrum, (void*) uspect, bnds*sizeof(float), cudaMemcpyHostToDevice);
	calc_Julia<<<(dimGrid), dimBlock>>>((float*)J, spectrum, d);
}

  void network::evolve(float deltat, long N){
	long alloc=gridsz*BLOCK_SIZE*gridsz*BLOCK_SIZE*(1+bnds)*sizeof(float)+sizeof(float)*gridsz*BLOCK_SIZE;
	dim3 dimBlock(16, 16);
	struct params etc;
	etc.d=d;
	etc.dt=dt;
	dim3 dimGrid (gridsz*8, gridsz*8);
	int copy_point[2]={0,0};
	for(long ix=0; ix<long(64/(BLOCK_SIZE*gridsz)); ix+=1){
	node<<<dimGrid,dimBlock>>>((float*)(long(n)+ix*alloc), (float*) (long(spectrum)),  (float*)  (long(J)+(ix*alloc)),   (float*) (long(j)+ix*gridsz*gridsz*BLOCK_SIZE*BLOCK_SIZE+ix*gridsz*BLOCK_SIZE), copy_point, deltat, etc, N, bnds);
	}
	cudaThreadSynchronize();
	return;
	
}

 Matrix mat(float* elements){
	Matrix x;
	x.width=16;
	x.height=16;
	x.stride=0;
	x.elements=elements;
	return x;
}

  void MatAndH(const Matrix A, const Matrix B, Matrix C)
               {
                   // Load A and B to device memory
		    Matrix d_A;
                     d_A.width = d_A.stride = A.width; d_A.height = A.height;
                     size_t size = A.width * A.height * sizeof(float);
                     cudaMalloc((void**)&d_A.elements, size);
                     cudaMemcpy(d_A.elements, A.elements, size,
                                 cudaMemcpyHostToDevice);
                     Matrix d_B;
                     d_B.width = d_B.stride = B.width; d_B.height = B.height;
                     size = B.width * B.height * sizeof(float);
                     cudaMalloc((void**)&d_B.elements, size);
                     cudaMemcpy(d_B.elements, B.elements, size,
                                 cudaMemcpyHostToDevice);
                     // Allocate C in device memory
                     Matrix d_C;
                     d_C.width = d_C.stride = C.width; d_C.height = C.height;
                     size = C.width * C.height * sizeof(float);
                     cudaMalloc((void**)&d_C.elements, size);
                     // Invoke kernel
                     dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
                     dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
                     MatAnd<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
                     // Read C from device memory
                     cudaMemcpy(C.elements, d_C.elements, size,cudaMemcpyDeviceToHost);
                     // Free device memory
                     cudaFree(d_A.elements);
                     cudaFree(d_B.elements);
                     cudaFree(d_C.elements);
                 }
		 
	  long network::sustantiv(float* x){
		
		cufftPlan1d(plan, N, CUFFT_R2C,1);
		cudaThreadSynchronize();
		cufftExecR2C( *plan, (cufftReal *) x, (cufftComplex *) buff);
		return (long) buff;

	}
	   long  network::descriptiv(float* y){
		cufftPlan1d(plan, N, CUFFT_R2C,1);
		cudaThreadSynchronize();
		cufftExecC2R( *plan,  (cufftComplex *) y, (cufftReal *) buff);
		return (long) buff;
	}
	  long  network::instantiv(long i)
	{
		buff=n+i*(N*N+N)*sizeof(float);
		return (long) buff;
	}
	  long  network::equiv(float* x, float* y){
		dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
		dim3 dimGrid(gridsz, gridsz);
		
		MatAnd<<<dimGrid, dimBlock>>>(mat(x), mat(y), mat(buff));
		return (long) buff;
	}
	  long network::set(long j){
		return long(j/(N*N+N));
	}
	  long network::assign(float* x, float* y){
		cudaMemcpy(x, y, (N+N*N)*sizeof(float), cudaMemcpyDeviceToDevice);
		return (long) x;
	}
