/*copyright 2009 Kevin Daley
This file is a part of nostredamus
Nyst is free software: you can redistribute it and/or modify
   it under the terms of the GNU Lesser General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

 This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public License
along with this program.  If not, see <http://www.gnu.org/licenses/>.*/

#include "node.cu"
#include "node.h"
inline void network::setup(int gridsize, int sz, int bands){
	cudaSetDevice(0);
	gridsz=gridsize;
	
	int size=sz*sz*(1+bands)*sizeof(float)+sizeof(float)*sz;
	siz=size;
	int alloc=gridsz*BLOCK_SIZE*gridsz*BLOCK_SIZE*(1+bands)*sizeof(float)+sizeof(float)*gridsz*BLOCK_SIZE;
	Jm=(float*) malloc(size);
	//copypasta from http://www.student.kuleuven.be/~m0216922/CG/juliamandelbrot.html#Julia_Set
    double cRe, cIm;                   //real and imaginary part of the constant c, determinate shape of the Julia Set
    double newRe, newIm, oldRe, oldIm;   //real and imaginary parts of new and old
    int maxIterations = 300; //after how much iterations the function should stop

    //pick some values for the constant c, this determines the shape of the Julia Set
    for(int m=0; m<bands; m++){
	float cRe=uspect[m];
	float cIm=0;
    //loop through every pixel
    for(int x = 0; x < 16; x++){
    for(int y = 0; y < 16; y++)
    {
        //calculate the initial real and imaginary part of z, based on the pixel location and zoom and position values
        newRe = x/d;
        newIm = y/d;
        //i will represent the number of iterations
        int i;
        //start the iteration process
        for(i = 0; i < maxIterations; i++)
        {
            //remember value of previous iteration
            oldRe = newRe;
            oldIm = newIm;
            //the actual iteration, the real and imaginary part are calculated
            newRe = oldRe * oldRe - oldIm * oldIm + cRe;
            newIm = 2 * oldRe * oldIm + cIm;
            //if the point is outside the circle with radius 2: stop
            if((newRe * newRe + newIm * newIm) > 4) break;
        }
	Jm[x+y*sz+m*sz*sz]=(i < maxIterations);
	
}
}
}
bnds=bands;
cudaMalloc((void**)&n, alloc);
	cudaMalloc((void**)&J, alloc);
	cudaMalloc((void**)&spectrum, bands);
	
}

inline void network::evolve(float deltat, int N){
	int alloc=gridsz*BLOCK_SIZE*gridsz*BLOCK_SIZE*(1+bnds)*sizeof(float)+sizeof(float)*gridsz*BLOCK_SIZE;
	dim3 dimBlock(16, 16);
	struct params etc;
	etc.d=d;
	etc.dt=dt;
	for(int ix=0; ix<siz/alloc; ix++){
		cudaMemcpy((void*)J, (void*) (int(Jm)+ix*alloc), alloc, cudaMemcpyHostToDevice);
		cudaMemcpy((void*) spectrum, (void*) uspect, bnds*sizeof(float), cudaMemcpyHostToDevice);
		cudaMemcpy((void*) n, (void*) nu, alloc, cudaMemcpyHostToDevice);
	int copy_point[2]={0,0};
	dim3 dimGrid (gridsz, gridsz);
	node<<<dimGrid,dimBlock>>>((float*) n, (float*) spectrum,  (float*)  J,   (float*) j, copy_point, deltat, etc, N, bnds);
	cudaThreadSynchronize();
	cudaMemcpy((void*) (int(nu)+ix*alloc), (void*) n, alloc, cudaMemcpyDeviceToHost);
}
}
Matrix mat(float* elements){
	Matrix x;
	x.width=16;
	x.height=16;
	x.stride=0;
	x.elements=elements;
	return x;
}
// Matrices are stored in row-major order:
               // M(row, col) = *(M.elements + row * M.stride + col)
              
               // Thread block size
               
               
               // Matrix multiplication - Host code
               // Matrix dimensions are assumed to be multiples of BLOCK_SIZE
               void MatMul(const Matrix A, const Matrix B, Matrix C)
               {
                   // Load A and B to device memory
		   Matrix d_A;
                     d_A.width = d_A.stride = A.width; d_A.height = A.height;
                     size_t size = A.width * A.height * sizeof(float);
                     cudaMalloc((void**)&d_A.elements, size);
                     cudaMemcpy(d_A.elements, A.elements, size,
                                 cudaMemcpyHostToDevice);
                     Matrix d_B;
                     d_B.width = d_B.stride = B.width; d_B.height = B.height;
                     size = B.width * B.height * sizeof(float);
                     cudaMalloc((void**)&d_B.elements, size);
                     cudaMemcpy(d_B.elements, B.elements, size,
                                 cudaMemcpyHostToDevice);
                     // Allocate C in device memory
                     Matrix d_C;
                     d_C.width = d_C.stride = C.width; d_C.height = C.height;
                     size = C.width * C.height * sizeof(float);
                     cudaMalloc((void**)&d_C.elements, size);
                     // Invoke kernel
                     dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
                     dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
                     MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
                     // Read C from device memory
                     cudaMemcpy(C.elements, d_C.elements, size,
                                 cudaMemcpyDeviceToHost);
                     // Free device memory
                     cudaFree(d_A.elements);
                     cudaFree(d_B.elements);
                     cudaFree(d_C.elements);
                 }
void MatAndH(const Matrix A, const Matrix B, Matrix C)
               {
                   // Load A and B to device memory
		    Matrix d_A;
                     d_A.width = d_A.stride = A.width; d_A.height = A.height;
                     size_t size = A.width * A.height * sizeof(float);
                     cudaMalloc((void**)&d_A.elements, size);
                     cudaMemcpy(d_A.elements, A.elements, size,
                                 cudaMemcpyHostToDevice);
                     Matrix d_B;
                     d_B.width = d_B.stride = B.width; d_B.height = B.height;
                     size = B.width * B.height * sizeof(float);
                     cudaMalloc((void**)&d_B.elements, size);
                     cudaMemcpy(d_B.elements, B.elements, size,
                                 cudaMemcpyHostToDevice);
                     // Allocate C in device memory
                     Matrix d_C;
                     d_C.width = d_C.stride = C.width; d_C.height = C.height;
                     size = C.width * C.height * sizeof(float);
                     cudaMalloc((void**)&d_C.elements, size);
                     // Invoke kernel
                     dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
                     dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
                     MatAnd<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
                     // Read C from device memory
                     cudaMemcpy(C.elements, d_C.elements, size,
                                 cudaMemcpyDeviceToHost);
                     // Free device memory
                     cudaFree(d_A.elements);
                     cudaFree(d_B.elements);
                     cudaFree(d_C.elements);
                 }
		 
	