

// includes, system

#ifndef __CPP_INTEGRATION_CU__
#define __CPP_INTEGRATION_CU__

#include <math.h>
#include "randomlib.h"
#include <ctime>


// includes, kernels
#include "GPU_kernels.cu"
#include "Cuda_Config.h"
#include "cutil.h"

#include "RandomNumberGenerator.cu"
//debug
#include <iostream>
#include <fstream>

typedef float* Chromosome; //BU GENETIC CONFIG DE TANIMLI KALDIR BURDAN

//
////////////////////////////////////////////////////////////////////////////////
// declaration, forward


using namespace std;

extern "C" void train( ANNDATA* trainData, int trainDataSize){

	cudaEvent_t start, stop;
	float elapsedTime = 0;

	unsigned int timer = 0;

	/************************************************************************/
	/* GENETIC PART IMPLEMENTATION                                          */
	/************************************************************************/

	//create initial population on host
	RandomInt (1802,19373);
	Chromosome individuals;
	individuals = new float[CONNECTION_NUM * MAX_POPULATION * sizeof(float)];
	for(int i = 0 ; i  < MAX_POPULATION * CONNECTION_NUM ; i++){
		individuals[i] = RandomDouble (-10.0f, 10.0f);
	}


	Chromosome d_individuals;
	cudaMalloc ((void**)&d_individuals, CONNECTION_NUM * MAX_POPULATION * sizeof(float));
	cudaMemcpy (d_individuals,individuals,CONNECTION_NUM * MAX_POPULATION * sizeof(float),cudaMemcpyHostToDevice);

	

	Rand48 rng;
	rng.init(MAX_POPULATION , (int)time(0));

	Rand48* d_random;
	cudaMalloc ((void**)&d_random,sizeof(Rand48));
	cudaMemcpy (d_random,&rng,sizeof(Rand48), cudaMemcpyHostToDevice);


	//copy training data to global memory
	ANNDATA* d_training_data;
	cudaMalloc ((void**)&d_training_data, sizeof(ANNDATA)* trainDataSize);
	cudaMemcpy (d_training_data,trainData, sizeof(ANNDATA)* trainDataSize, cudaMemcpyHostToDevice);


	Chromosome resultWeights;
	resultWeights = new float[CONNECTION_NUM];

	Chromosome d_result;
	cudaMalloc ((void**)&d_result, sizeof(float) * CONNECTION_NUM);
	
	float* d_fitnessArry;
	cudaMalloc ((void**)&d_fitnessArry,MAX_POPULATION * sizeof(float));

	float* h_fitnessAr = new float[MAX_POPULATION];
	calculateFirstFitness<<<MAX_POPULATION / BLOCK_SIZE, BLOCK_SIZE>>>(d_individuals,d_training_data,trainDataSize,d_fitnessArry);
	cudaMemcpy (h_fitnessAr,d_fitnessArry,MAX_POPULATION* sizeof(float),cudaMemcpyDeviceToHost);
	float h_best=0;

	h_best = h_fitnessAr[0];
	for(int i = 1 ; i < MAX_POPULATION ; i++){
		if(h_fitnessAr[i] < h_best)
			h_best = h_fitnessAr[i];
	}

	
	ofstream out;
	out.open("debug.txt");
	if(!out.is_open () || !out.good ()){
		;
	}
	out<<"Fitness values"<<std::endl;

	
	float* d_best;
	cudaMalloc ((void**)&d_best, sizeof(float));

	int genNumber = 0;


	ofstream perform;
	perform.open ("performance.dat");
	if (!perform.is_open () || !perform.good ())
	{
		;
	}

cutCreateTimer (&timer);

float total_time = 0;
float elapsed_time = 0;
	while(genNumber < MAX_GENERATION && h_best > DESIRED_FITNESS){


		cutStartTimer (timer);
		evolvePopulation<<<MAX_POPULATION / BLOCK_SIZE, BLOCK_SIZE, trainDataSize * sizeof(ANNDATA) + BLOCK_SIZE * sizeof(float)>>>(d_individuals,d_training_data,trainDataSize,d_random,d_fitnessArry, d_result,d_best);
		
		cutStopTimer (timer);
		elapsed_time = cutGetTimerValue (timer);
		total_time +=elapsed_time;

		cudaMemcpy (&h_best,d_best,sizeof(float), cudaMemcpyDeviceToHost);	

		cudaThreadSynchronize ();

		
		perform<<" Generation : "<<genNumber<<" elapsed time : "<<elapsed_time<<" Total time : "<<total_time<<endl;
		cout<<"Generation : "<<genNumber<<" Min Fitness: "<<h_best<<"elapsed time: "<<cutGetTimerValue(timer)<<endl;
		++genNumber;
		
	}
cutDeleteTimer (timer);
	rng.destroy();
	out.close();
	perform.close ();
	cudaFree (d_fitnessArry);
	cudaFree (d_individuals);
	cudaFree (d_random);
	cudaFree (d_result);
	cudaFree (d_training_data);
	cudaEventDestroy (start);
	cudaEventDestroy (stop);
}



#include "randomlib.c"

#endif