
#include <mpi.h>
#include "nnengine.h"
// Random number generator
#define  frand() ((double) rand() / (double)RAND_MAX)

#define  allocateDoubleArray(x, y) (x) = new double [(y)]; if ((x) == NULL) has_errors = 1;
#define  deleteAndNullArray(x)     if ((x)!=NULL) { delete [] (x); (x) = NULL; }
#define  deleteAndNullElement(x)   if ((x)!=NULL) { delete (x); (x) = NULL; }
#define  randomizeDoubleArray(x,y) for(i=0; i<(y); i++)	(x)[i]=(2*frand()-1)/5;
#define  zeroDoubleArray(x,y)      for(i=0; i<(y); i++)	(x)[i]=0;

#define  MAX_OSCILATIONS 4
#define  MIN_ETA 1.e-6

/*
 * Constructor. No need to create multiple instances, the class automatically
 * reinitialises itself when a new conversion is called
 */
NeuralNetworkEngine::NeuralNetworkEngine(Parameters* params, Message* msg) {
	parameters 		= params;
	message 		= msg;
	
	/* 
	 * Assign NULL to all memory-related structures to make sure they don't get
	 * automatically freed without being allocated (see freeAll() function)
	 */
	bmp				= NULL;
	ngf				= NULL;
	
	hidden_weights			= NULL;
	hidden_weights_feedback	= NULL;
	hidden_data				= NULL;	
	output_weights			= NULL;
	output_weights_feedback	= NULL;
	output_data				= NULL;
	
	current_sample			= NULL;

	/*
	 * Call freeAll to make sure everything is reinitialised
	 */
	freeAll();
}



/*
 * Destructor
 */
NeuralNetworkEngine::~NeuralNetworkEngine() {
	// not the most efficient cleanup method, but it does a 
	// "mrproper" clean anyway (kernel people know what I'm talking about) :D
	freeAll();
}



int NeuralNetworkEngine::transformBMPintoNGF() {
	
	int rank, root=0;
	MPI_Status stat;
	int size;	// MPI group size
	
	// initialize mpi
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);
	MPI_Comm_size(MPI_COMM_WORLD,&size);
	
	/*
	 * Phase 1: Prologue
	 */

	/*
	 * 1.1 make this object reusable who knows, some day it might be used in 
	 * batch mode if there is anybody who can "help" a neural network
	 */ 

	freeAll();
	
	/*
	 * 1.2 open the bmp file
	 */
	
	if (rank==root) {
	
		bmp = new BitmapInterface(message, parameters);
		bmp->openForRead(parameters->getInputFilename());
		if (bmp->hasErrors()) {
			has_errors = 1;
			return has_errors;
		}
	}
		
	/*
	 * 1.3 set all the size variables
	 * 
	 * note: some parameters are hard-coded for the current implementation of
	 * the neural network engine. It is likely that these will change with 
	 * future implementations and therefore have not been copied thru-out the 
	 * program
	 */
	
	// <fixed_for_current_implementation>
	input_height 	= 8;
	input_width 	= 8;
	hidden_size		= 16;
	output_height	= 8;
	output_width	= 8;
	// </fixed_for_current_implementation>	
	
	if (rank==root) {
		image_height = bmp->getRealHeight();
		image_width  = bmp->getRealWidth(); 
	}
	
	MPI_Bcast(&image_width, 1, MPI_INT, root, MPI_COMM_WORLD);
	MPI_Bcast(&image_height, 1, MPI_INT, root, MPI_COMM_WORLD);
	
	// resize the image in case it does not fit into the normal frame
	// rule: always expand, never crop!
	if (image_height%input_height!=0)
		image_height = ((int)(image_height/input_height)+1)*input_height;	
	
	if (image_width%input_width!=0)
		image_width = ((int)(image_width/input_width)+1)*input_width;
	
	if (rank==root)
		bmp->setLocalSize(image_height, image_width);
		
	samples_count_h	= image_height/input_height;
	samples_count_w	= image_width/input_width;
	
	/*
	 * 1.4 allocate the memory we need and fill in the structures
	 */
	srand((unsigned)time(0)); 
	allocateCurrentSampleSpace();
	allocateHiddenData();
	allocateHiddenWeights();
	allocateHiddenWeightsFeedback();
	allocateOutputData();
	allocateOutputWeights();
	allocateOutputWeightsFeedback();	
	
	if (hasErrors())
		return has_errors;
	
	/* 
	 * MPI: alocate the number of samples
	 */ 
	int local_sample_no;
	
	if (rank==root) {

		int total_samples = samples_count_h*samples_count_w;
		int tosend_samples;
		
		// samples for root
		local_sample_no = (int)(total_samples/size);

		// samples for rest
		for (int rk=1; rk<size; rk++) {
			if (rk != size-1)
				tosend_samples = (int)(total_samples/size);
			else
				tosend_samples = (int)(total_samples-(size-1)*local_sample_no);
			
			MPI_Send(&tosend_samples, 1, MPI_INT, rk, 0, MPI_COMM_WORLD);
		}
	}
	else
		MPI_Recv(&local_sample_no, 1, MPI_INT, root, 0, MPI_COMM_WORLD, &stat);
	
	/*
	 * MPI: Send the actual samples
	 */
	
	int local_x,  local_y;
	double *local_samples = new double[local_sample_no*input_height*input_width];

	// for root:
	if (rank == root) {
		int total_samples = samples_count_h*samples_count_w;
		int coresponding_rank = 0;
		
		// 1. fill root's local_samples first:
		for (int sp=0; sp<local_sample_no; sp++) {
			
			for (local_x=0; local_x<input_width; local_x++)
				for (local_y=0; local_y<input_height; local_y++)
					local_samples[sp*input_width*input_height+local_x+local_y*input_width] = 
						bmp->getPixel((sp%samples_count_w)*input_width+local_x,
								((int)(sp/samples_count_w))*input_height+local_y);
		}
		
		// 2. send the rest of the samples:
		for (int sp=local_sample_no; sp<total_samples; sp++) {
			
			for (local_x=0; local_x<input_width; local_x++)
				for (local_y=0; local_y<input_height; local_y++)
					current_sample[local_x+local_y*input_width] =
						bmp->getPixel((sp%samples_count_w)*input_width+local_x,
								((int)(sp/samples_count_w))*input_height+local_y);			

			
			if (sp>=local_sample_no*size)
				coresponding_rank = size-1;
			else
				coresponding_rank = sp/local_sample_no;
			
			MPI_Send(current_sample, input_height*input_width, MPI_DOUBLE, coresponding_rank, 0, MPI_COMM_WORLD);
		}
	}
	else {
		for (int sp=0; sp<local_sample_no; sp++) {
			MPI_Recv(&(local_samples[sp*input_height*input_width]),input_height*input_width, MPI_DOUBLE, root, MPI_ANY_TAG, MPI_COMM_WORLD, &stat);
		}
	}		

	
	/*
	 * Phase 2: Training
	 */
	
	// MPI: Will be needed later on...
	double *recvbuffer = NULL;
	if (rank==root) {
		recvbuffer = new double [output_width*output_height*hidden_size];
	}
	
	int sample_x, sample_y;
	int i, j;
	double sum;
	int first_snr=1;
	// snr-related
	double snr;
	double snr_up = 0.;
	double snr_down = 0.;
	double old_snr = 0.;
	double temp;
	// eta related
	double eta = 5;
	// eta oscilation detection and adaption
	int oscilations=0;
	
	/*
	 * Serialization convention:
	 * 
	 * current_sample[local_x][local_y] ->
	 * 		current_sample[local_x+local_y*input_width]
	 * 
	 * hidden_weights[i(hidden neuron no)][j(input neuron number)] ->
	 * 		hidden_weights[i*input_width*input_height+j]
	 * 
	 * output_data[local_x][local_y] ->
	 * 		output_data[local_x+local_y*output_width]
	 * 
	 * output_weights[local_x][local_y][i(hidden neuron number)] ->
	 * 		output_weights[(local_x+local_y*output_width)*hidden_size+i]
	 */

	while(1) {
		// 1. announce the start of a new iteration
		if (rank==root)
			parameters->getStopCondition()->startIteration();
		
		// 2. initializations
		snr = 0;
		zeroDoubleArray(output_weights_feedback, hidden_size*output_height*output_width);
		zeroDoubleArray(hidden_weights_feedback, hidden_size*input_height*input_width);
		// MPI: Send the weights
		MPI_Bcast(output_weights, output_height*output_width*hidden_size, MPI_DOUBLE, root, MPI_COMM_WORLD);
		MPI_Bcast(hidden_weights, output_height*output_width*hidden_size, MPI_DOUBLE, root, MPI_COMM_WORLD);
		
		snr_down=0.;

		// 3. for each and every sample
		for (int sp = 0; sp<local_sample_no; sp++) {
				// 3.1 bring the current sample into current_sample				
				for (local_x=0; local_x<input_width; local_x++)
					for (local_y=0; local_y<input_height; local_y++)
						current_sample[local_x+local_y*input_width] = 
							local_samples[sp*input_width*input_height+
							             local_x+local_y*input_width];
				
				
				// 3.2 calculate the current outputs according to the current 
				// sample
				
				// 3.2.1 hidden neuron layer
				for (i=0; i<hidden_size; i++) {
					hidden_data[i] = 0;
					
					for (j=0; j<input_width*input_height; j++)
						hidden_data[i] += hidden_weights[i*input_width*input_height+j] * current_sample [j];

					hidden_data[i] = 1./(1.+exp(-hidden_data[i]));
				}
								
				// 3.2.2 output neuron layer
				for (local_x=0; local_x<output_width; local_x++) 
					for (local_y=0; local_y<output_height; local_y++) {
						output_data[local_x+local_y*output_width] = 0;

						for (i=0; i<hidden_size; i++)
							output_data[local_x+local_y*output_width] += 
								hidden_data[i] * output_weights[(local_x+local_y*output_width)*hidden_size+i];
						
						output_data[local_x+local_y*output_width] = 1./(1.+exp(-output_data[local_x+local_y*output_width]));
					
				}

				// 3.2.3 feedback loop
				// 3.2.3.1 output neurons
				for (local_x=0; local_x<output_width; local_x++) 
					for (local_y=0; local_y<output_height; local_y++)
						for (j=0; j<hidden_size; j++)
							output_weights_feedback[(local_x+local_y*output_width)*hidden_size+j] +=
								output_data[local_x+local_y*output_width] *
								(1 - output_data[local_x+local_y*output_width]) *
								(output_data[local_x+local_y*output_width] -
								current_sample[local_x+local_y*output_width]) *
								hidden_data[j];
				
				// 3.2.3.2 hidden neurons
				for (j=0; j<hidden_size; j++) {
					sum = 0;
					
					for (local_x=0; local_x<output_width; local_x++)
						for (local_y=0; local_y<output_height; local_y++)
							sum += output_weights[(local_x+local_y*output_width)*hidden_size+j] *
							(output_data[local_x+local_y*output_width]-current_sample[local_x+local_y*output_width])*
							output_data[local_x+local_y*output_width]*(1-output_data[local_x+local_y*output_width]);
				
					for (local_x=0; local_x<input_width; local_x++)
						for (local_y=0; local_y<input_height; local_y++)
							hidden_weights_feedback[j*input_width*input_height+(local_x+local_y*input_width)] += sum *
							current_sample[local_x+local_y*input_width] * hidden_data[j] * (1-hidden_data[j]);					
				}
				
				// 3.3 calculate the Signal-to-Noise Ratio for current sample
				// 3.3.1 total value will be calculated only once
				if (first_snr)
					for (local_x=0; local_x<output_width; local_x++) 
						for (local_y=0; local_y<output_height; local_y++) {
							temp = current_sample[local_x+local_y*input_width];
							snr_up += temp * temp;
						}


				// 3.3.2 error value will always be calculated
				for (local_x=0; local_x<output_width; local_x++) 
					for (local_y=0; local_y<output_height; local_y++) {
						temp = 	current_sample[local_x+local_y*input_width] - 
								output_data[local_x+local_y*input_width];
						snr_down += temp * temp;  
					}
				
			}
		
		
		// MPI: Bring all results together :D
				
		MPI_Reduce(output_weights_feedback, recvbuffer,output_width*output_height*hidden_size, MPI_DOUBLE, MPI_SUM, root, MPI_COMM_WORLD);
		if (rank == root)
			memcpy(output_weights_feedback, recvbuffer, output_width*output_height*hidden_size*sizeof(double));
		
		MPI_Reduce(hidden_weights_feedback, recvbuffer,output_width*output_height*hidden_size, MPI_DOUBLE, MPI_SUM, root, MPI_COMM_WORLD);
		if (rank == root)
			memcpy(output_weights_feedback, recvbuffer, output_width*output_height*hidden_size*sizeof(double));
		
		double snr_mpi=0;
				
		MPI_Reduce(&snr_down, &snr_mpi, 1, MPI_DOUBLE, MPI_SUM, root, MPI_COMM_WORLD);
		snr_down = snr_mpi;
				
		if (first_snr) {
			MPI_Reduce(&snr_up, &snr_mpi,1, MPI_DOUBLE, MPI_SUM, root, MPI_COMM_WORLD);
			snr_up = snr_mpi;
		}
		
		// do not calculate the snr picture "value" again, for optimization
		first_snr=0;		

		int will_break=0;
		
		if (rank==root) {
		
			// 4. Calculate SNR and adapt eta accordingly
			snr = (10*log10(snr_up/snr_down));
			
			if(snr < old_snr)
				oscilations++;		
			
			if (oscilations>MAX_OSCILATIONS) {
				eta/=1.1;
				oscilations=0;
			}
			
			old_snr = snr;
			
			// 5. apply feedback
			for (j=0; j<hidden_size; j++)
				for (local_x=0; local_x<output_width; local_x++)
					for (local_y=0; local_y<output_height; local_y++)
						hidden_weights[j*input_width*input_height+(local_x+local_y*output_width)] -= eta *
						hidden_weights_feedback[j*input_width*input_height+(local_x+local_y*output_width)]/(samples_count_h*samples_count_w);					
			
			for (local_x=0; local_x<output_width; local_x++)
				for (local_y=0; local_y<output_height; local_y++)
					for (j=0; j<hidden_size; j++)
						output_weights[(local_x+local_y*output_width)*hidden_size+j] -= eta *
						output_weights_feedback[(local_x+local_y*output_width)*hidden_size+j]/(samples_count_h*samples_count_w);
			
			// 6. announce the finish of an iteration and check if a new one can
			// be started
			if (parameters->getVerbose()) {
				message->printMessage("Iteration finalized: snr=");
				message->printMessage(snr);
				message->printMessage(" eta=");
				message->printMessage(eta);
				message->printMessage(" err=");
				message->printMessage(snr_down);
				message->printMessage(". \n");
			}
			
			parameters->getStopCondition()->finishedIteration(snr);
			if (!(parameters->getStopCondition()->canBeginNewIteration()))
				will_break=1;
			if (eta<MIN_ETA) {
				message->printMessage("Stopping because eta<");
				message->printMessage(MIN_ETA);
				message->printMessage(". For a value of eta so low the differences are not perceivable.\n");
				will_break=1;
			}
		}
		
		MPI_Bcast(&will_break, 1, MPI_INT, root, MPI_COMM_WORLD);
		if (will_break)
			break;
	}

	/*
	 * Phase 3: Write to NGF file
	 */

	if (rank == root) {
	
	ngf = new NeuralGraphicsFileInterface(message, parameters);
	ngf->setHiddenNeuronCount(hidden_size);
	ngf->setOutputMatrixSize(output_height, output_width);
	ngf->setRealSize(bmp->getRealHeight(), bmp->getRealWidth());
	ngf->setSampleMatrixSize(samples_count_h, samples_count_w);
	ngf->allocateWeightsOutputs();
	if (ngf->hasErrors()) {
		has_errors = 1;
		return has_errors;
	}
		
	for (local_x=0; local_x<output_width; local_x++)
		for (local_y=0; local_y<output_height; local_y++)
			ngf->setOutputNeuronWeight((local_x+local_y*output_width),&(output_weights[(local_x+local_y*output_width)*hidden_size])); 
	
	// for each and every sample
	for (sample_x=0; sample_x<samples_count_w; sample_x++) 
		for (sample_y=0; sample_y<samples_count_h; sample_y++) {
							
			// 3.1 bring the current sample into current_sample				
			for (local_x=0; local_x<input_width; local_x++)
				for (local_y=0; local_y<input_height; local_y++)
					current_sample[local_x+local_y*input_width] = 
						bmp->getPixel(sample_x*input_width+local_x,
								sample_y*input_height+local_y);
			
			
			// calculate the current outputs according to the current sample
			for (i=0; i<hidden_size; i++) {
				hidden_data[i] = 0;
				
				for (j=0; j<input_width*input_height; j++)
					hidden_data[i] += hidden_weights[i*input_width*input_height+j] * current_sample [j];

				hidden_data[i] = 1./(1.+exp(-hidden_data[i]));
				ngf->setPreOutputSample(sample_x+sample_y*samples_count_w, hidden_data);
			}							 
		}
		
	ngf->writeNGFFile();
	}
	
	if (recvbuffer!=NULL) delete[] recvbuffer;
	if (local_samples!=NULL) delete[] local_samples;
	
	return has_errors;	
}

int NeuralNetworkEngine::transformNDFintoBMP() {
	
	int local_x, local_y, i;
	int sample_x, sample_y;
	
	ngf = new NeuralGraphicsFileInterface(message, parameters);
	
	/*
	 * Phase 1: Read ngf file and get parameters
	 */
	ngf->readNGFFile();
	if (ngf->hasErrors()) {
		has_errors=1;
		return has_errors;
	}
	
	image_height = ngf->getRealHeight();
	image_width = ngf->getRealWidth();	
	hidden_size = ngf->getHiddenNeuronCount();
	output_height = input_height = ngf->getOutputNeuronMatrixHeight();
	output_width = input_width = ngf->getOutputNeuronMatrixWidth();
	
	samples_count_h = ngf->getOutputSampleHeight();
	samples_count_w = ngf->getOutputSampleWidth();
	
	color = ngf->getColor();
	if (color) {
		message->printMessage("Color neural network compressed images are not available in this release.\n");
		has_errors=1;
		return has_errors;
	}
	
	// IMPORTANT TODO: Check sanity of the parameters here...
	
	/*
	 * Phase 2: Prepare the BMP file
	 */
	bmp = new BitmapInterface(message, parameters);
	bmp->setRealSize(image_height, image_width);
	bmp->setLocalSize(output_height*samples_count_h, output_width*samples_count_w);
	
	/*
	 * Phase 3: Prepare the partial neural network
	 */
	
	allocateOutputWeights();
	allocateOutputData();
	allocateHiddenData();
	// copy Output weights
	for (local_x=0; local_x<output_width; local_x++)
		for (local_y=0; local_y<output_height; local_y++)
			memcpy(&(output_weights[(local_x+local_y*output_width)*hidden_size]), ngf->getOutputNeuronWeight(local_x+local_y*output_width), hidden_size*sizeof(double));
			

	/*
	 * Phase 4: Compute the neural network result
	 */
	for (sample_x=0; sample_x<samples_count_w; sample_x++) 
		for (sample_y=0; sample_y<samples_count_h; sample_y++) {
			
			// 1. bring the hidden output data to structure
			memcpy(hidden_data, ngf->getPreOutputSample(sample_x+sample_y*samples_count_w), hidden_size*sizeof(double));
			
			// 2. calculate the output neurons' output values
			for (local_x=0; local_x<output_width; local_x++) 
				for (local_y=0; local_y<output_height; local_y++) {
					output_data[local_x+local_y*output_width] = 0;

					for (i=0; i<hidden_size; i++)
						output_data[local_x+local_y*output_width] += 
							hidden_data[i] * output_weights[(local_x+local_y*output_width)*hidden_size+i];
					
					output_data[local_x+local_y*output_width] = 1./(1.+exp(-output_data[local_x+local_y*output_width]));
					
					bmp->setPixel(sample_x*input_width+local_x, sample_y*input_height+local_y, output_data[local_x+local_y*output_width]); 
			}
		}

	/*
	 * Phase 5: Write result to the bitmap file
	 */
	bmp->writeFile();

	return has_errors;
}

void NeuralNetworkEngine::allocateHiddenWeights() {
	int i; // needed by randomize macro
	// size = hidden_size*input_height*input_width*sizeof(double)
	allocateDoubleArray(hidden_weights, hidden_size*input_height*input_width);
	randomizeDoubleArray(hidden_weights, hidden_size*input_height*input_width);
}

void NeuralNetworkEngine::allocateHiddenWeightsFeedback() {
	// size = hidden_size*input_height*input_width*sizeof(double)
	allocateDoubleArray(hidden_weights_feedback, hidden_size*input_height*input_width);
}

void NeuralNetworkEngine::allocateHiddenData() {
	// size = hidden_size*sizeof(double)
	allocateDoubleArray(hidden_data, hidden_size);
}

void NeuralNetworkEngine::allocateOutputWeights() {
	int i; // needed by randomize macro
	// size = hidden_size*input_height*input_width*sizeof(double)
	allocateDoubleArray(output_weights, hidden_size*output_height*output_width);
	randomizeDoubleArray(output_weights, hidden_size*output_height*output_width);
}

void NeuralNetworkEngine::allocateOutputWeightsFeedback() {
	// size = hidden_size*input_height*input_width*sizeof(double)
	allocateDoubleArray(output_weights_feedback, hidden_size*output_height*output_width);
}

void NeuralNetworkEngine::allocateOutputData() {
	// size = output_height*output_width*sizeof(double)
	allocateDoubleArray(output_data, output_height*output_width);
}

void NeuralNetworkEngine::allocateCurrentSampleSpace() {
	// size = input_width*input_height
	allocateDoubleArray(current_sample, input_width*input_height);
}

void NeuralNetworkEngine::freeAll() {

	// free allocated memory
	deleteAndNullArray(hidden_weights);
	deleteAndNullArray(hidden_weights_feedback);
	deleteAndNullArray(hidden_data);
	deleteAndNullArray(output_weights);
	deleteAndNullArray(output_weights_feedback);
	deleteAndNullArray(output_data);
	deleteAndNullArray(current_sample);

	deleteAndNullElement(bmp);
	deleteAndNullElement(ngf);
	
	image_height 	= 1; 
	image_width 	= 1;
	input_height 	= 1;
	input_width 	= 1;
	hidden_size		= 1;
	output_height	= 1;
	output_width	= 1;
	
	samples_count_h	= 1;
	samples_count_w	= 1;

	// clear the errors that were encountered
	has_errors=0;
}

int NeuralNetworkEngine::hasErrors() {
	return has_errors;
}

