/*
 * training.c
 *
 *  Created on: Sep 4, 2013
 *      Author: Alan
 */
#define DEBUG

#include "training.h"
#include "constants.h"

#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <stdbool.h>
#include <float.h>

//#include <unistd.h>

#define DELTAMAX 50
#define DELTAMIN 10e-6
#define MAX_LINE_LENGTH 1024
#define COMMENT_CHAR '#'
//helpers functions
void shuffle(trainingPattern_t *patterns, uint32 patternSize);
float32 sgn(float32 x);
//Simple Backpropagation
void bpOutputLayer(netdata_t* net, float32* output, float32* target, float32* deltaO);
void bpHiddenLayer(netdata_t* net, uint16 layer);

//general functions for the skeleton of the training
void updateAllNeurons(netdata_t* net);
void computeGradientOnLayer(netdata_t* net, uint16 layer);
void updateWeigths(netdata_tp net, neuron_tp self);
float32 computeOutputAndBackpropagateError(netdata_t* net, trainingPattern_t pattern);

//	Vectorized version of computeOutputAndBackpropagateError
float32 computeAndBackpropagateErrorsV(vectorizedNet_tp vnet, matrix_tp outputs, matrix_tp targets);
void updateAllNeuronsV(vectorizedNet_tp vnet);

void bpOutputLayer(netdata_t* net, float32* output, float32* target, float32* deltaO) {
	uint32 lastLayerSize, i;
	uint16 lastLayerId;
	neuron_tp self;
	lastLayerSize = net->neurons[net->layersize - 1];
	//error computation
	lastLayerId = net->layersize - 1;
	for (i = 0; i < lastLayerSize; i++) {
		self = getNeuron(net, lastLayerId, i);
		//deltaO[i] = target[i] - output[i];//old version
		deltaO[i] = output[i] - target[i];
		self->delta = deltaO[i] * self->diffActivationFunction(net, self->a);
		//self->gradient = self->delta * self->diffActivationFunction(self->a);
		//fprintf(stderr, "d_n(%d,%d) = %f", lastLayerId, i, self->delta);
	}
}

void bpHiddenLayer(netdata_t* net, uint16 layer) {
	uint32 j, k, layerSize; //j is the index of the current neuron
	neuron_tp self;
	float32 wdelta = .0f;

	layerSize = net->neurons[layer] + 1; //+1 for the bias

	for (j = 0; j < layerSize; j++) {

		self = getNeuron(net, layer, j);
		wdelta = .0f;
		for (k = 0; k < self->n_output; k++) {
			wdelta += getNeuron(net, layer + 1, k)->delta * self->weights[k];
		}
		//tmp->delta = tmp->diffActivationFunction(tmp->z) * wdelta;
		self->delta = self->diffActivationFunction(net, self->a) * wdelta;
	}

}

void bpComputeDeltaWeights(netdata_tp net, neuron_tp self) {
	uint16 j;
	//fprintf(stderr, "N(%d,%d)\n", self->layer, self->pos);
	for (j = 0; j < self->n_output; j++) {
		self->deltaWeightsP[j] = self->deltaWeights[j];
		//fprintf(stderr, "self->gradient[%d] = %f\n", j, self->gradient[j]);
		self->deltaWeights[j] = -1.0f * ((net->settings.eta * (self->gradient[j]) + net->settings.alpha * self->deltaWeightsP[j]));
		//fprintf(stderr, "self->deltaWeights[%d] = %f\n", j, self->deltaWeights[j]);
		self->gradient[j] = 0;
	}

}

//backprop
void bpComputeDeltaWeightsV(vectorizedNet_tp vnet) {
	uint32 i;
	matrix_tp dwp;
	matrix_tp dwbp;
	matrix_tp tmp1, tmp2, tmp3;

	for (i = 0; i < vnet->numLayers - 1; i++) {
		dwp = copyMatrix(vnet->dw[i]);

		tmp1 = mulScalar(vnet->g[i], vnet->eta);
		tmp2 = mulScalar(dwp, vnet->alpha);
		tmp3 = add(tmp1, tmp2);
		freeData(vnet->dw[i]);
		free(vnet->dw[i]);
		vnet->dw[i] = mulScalar(tmp3, -1.0f);
		zeros(vnet->g[i]);
		freeData(tmp1);
		free(tmp1);
		freeData(tmp2);
		free(tmp2);
		freeData(tmp3);
		free(tmp3);

		dwbp = copyMatrix(vnet->db[i]);
		tmp1 = mulScalar(vnet->gb[i], vnet->eta);
		tmp2 = mulScalar(dwbp, vnet->alpha);
		tmp3 = add(tmp1, tmp2);

		freeData(vnet->db[i]);
		free(vnet->db[i]);

		vnet->db[i] = mulScalar(tmp3, -1.0f);
		zeros(vnet->gb[i]);

		freeData(tmp1);
		free(tmp1);
		freeData(tmp2);
		free(tmp2);
		freeData(tmp3);
		free(tmp3);

		freeData(dwp);
		free(dwp);
		freeData(dwbp);
		free(dwbp);
	}

}

void irpComputeDeltaWeights(netdata_tp net, neuron_tp self) {
	uint16 j;
	float32 condition;
	float32 etaPlus, etaMinus;
	etaPlus = net->settings.etaPlus;
	etaMinus = net->settings.etaMinus;
//fprintf(stderr,"Resilient BackPropagation\n");
//fprintf(stderr,"Neuron(%d,%d) Outputs number: %d\n", self->layer, self->pos, self->n_output);
	for (j = 0; j < self->n_output; j++) {
		self->ipropDeltaP[j] = self->ipropDelta[j];
		//fprintf(stderr,"Computing condition for each output\n");
		condition = self->gradientP[j] * self->gradient[j];
		//fprintf(stderr,"condition computed ---> %f\n", condition);
		if (condition > 0) {
			//fprintf(stderr,"condition > 0\n");
			self->ipropDelta[j] = fminf(self->ipropDeltaP[j] * etaPlus, DELTAMAX);
			self->deltaWeights[j] = -1.0f * sgn(self->gradient[j]) * self->ipropDelta[j];
		} else if (condition < 0) {
			//fprintf(stderr,"condition < 0\n");
			self->ipropDelta[j] = fmaxf(self->ipropDeltaP[j] * etaMinus, DELTAMIN);
			self->deltaWeights[j] = -1.0f * self->deltaWeightsP[j];
			self->gradient[j] = .0f;
			if (net->error > net->lastError) {
				self->deltaWeights[j] = -1.0f * self->deltaWeightsP[j];
			} else {
				//fprintf(stderr,"condition < 0 && self->gradient[j] <= self->gradientP[j]\n");
				self->deltaWeights[j] = .0f;
			}
			self->gradient[j] = .0f;

		} else {
			//fprintf(stderr,"condition = 0\n");
			self->deltaWeights[j] = -1.0f * sgn(self->gradient[j]) * self->ipropDelta[j];
		}
		//fprintf(stderr,"deltaW = %f\n", self->deltaWeights[j]);
		//fprintf(stderr,"For done\n");
	}
}

//iRprop+
void irpComputeDeltaWeightsV(vectorizedNet_tp vnet);

void updateWeigths(netdata_tp net, neuron_tp self) {
	uint16 j;
//fprintf(stderr, "Updating Weights for N(%d,%d)\n", self->layer, self->pos);
	net->computeDeltaWeights(net, self);
	for (j = 0; j < self->n_output; j++) {
		//fprintf(stderr, " %f", self->gradient[j]);
		//self->weights[j] = self->weights[j] - net->settings.eta * (self->gradient[j]) - net->settings.eta * net->settings.alpha * self->deltaWeightsP[j];

		//fprintf(stderr,"Gradients : [%f %f] // %f\n", self->gradientP[j],self->gradient[j], self->gradientP[j]-self->gradient[j]);
		self->gradientP[j] = self->gradient[j];
		//self->gradient
		self->weights[j] += self->deltaWeights[j];

	}

}

void updateAllNeurons(netdata_t* net) {
	int i, j;
//fprintf(stderr, "Updating Neurons\n");
	for (i = 0; i < net->layersize; i++) {
		for (j = 0; j < net->neurons[i]; j++) {
			//net->computeDeltaWeights(net, getNeuron(net, i, j));
			//fprintf(stderr, "Effective Neuron\n");
			updateWeigths(net, getNeuron(net, i, j));
		}
		if (i < net->layersize - 1) { //bias
		//net->computeDeltaWeights(net, getNeuron(net, i, j));
		//fprintf(stderr, "Bias\n");
			updateWeigths(net, getNeuron(net, i, j));
		}
	}

}

void updateAllNeuronsV(vectorizedNet_tp vnet) {
	uint32 i;

	vnet->computeDeltaWeights(vnet);
//Weight update
	matrix_tp tmp;
	for (i = 0; i < vnet->numLayers - 1; i++) {
		//weights
		tmp = add(vnet->w[i], vnet->dw[i]);
		freeData(vnet->w[i]);
		free(vnet->w[i]);
		vnet->w[i] = copyMatrix(tmp);
		freeData(tmp);
		free(tmp);
		//bias
		tmp = add(vnet->b[i], vnet->db[i]);
		freeData(vnet->b[i]);
		free(vnet->b[i]);
		vnet->b[i] = copyMatrix(tmp);
		freeData(tmp);
		free(tmp);
	}

}
void computeGradientOnLayer(netdata_t* net, uint16 layer) {

	uint16 j, k = 0;
	neuron_tp self;

	for (j = 0; j <= net->neurons[layer]; j++) {
//		fprintf(stderr, "n(%d,%d)->gradient[ ", layer, j);
		self = getNeuron(net, layer, j);

		for (k = 0; k < self->n_output; k++) {
			//fprintf(stderr,"Gradient before: %f\n",self->gradient[k]);
			self->gradient[k] += self->outputs[k]->delta * self->z; //formula 5.53 by Bishop
			//fprintf(stderr,"Gradient after: %f\n",self->gradient[k]);
			/*
			 * Correct formula should be:
			 * self->gradient[k] = getNeuron(net, layer + 1, k)->delta * self->z;
			 * but since we are implementing both batch learning and online training we must consider both cases
			 * because in online learning the error function gradient is evaluated every time and then reset to zero for the next step,
			 * instead in batch training the derivative is computed only at the end and then reset to 0.
			 *
			 * ATTENTION: WHEN WEIGHT UPGRADE RESET GRADIENT TO 0!!!!!!
			 */
//			fprintf(stderr, "%f ", self->gradient[k]);
		}
//		fprintf(stderr, "]\n");
		//update x bias
		//self->deltaWeights[self->n_input] = ETA * 1 * tmp->delta; // + ALPHA * tmp->deltaWeightsP[k];
	}
}

float32 computeOutputAndBackpropagateError(netdata_t* net, trainingPattern_t pattern) {
	float32 *output, *deltaOutput;
	float32 error;
	uint32 lastLayerSize, i;
	int32 j;

//fprintf(stderr, "trainSingle\n");

	lastLayerSize = net->neurons[net->layersize - 1];
//fprintf(stderr, "lastLayerSize = %d\n",lastLayerSize);
	output = (float32*) malloc(sizeof(float32) * lastLayerSize);
	deltaOutput = (float32*) malloc(sizeof(float32) * lastLayerSize);
	feedForward(net, pattern.input, output);
//fprintf(stderr, "ff done\n");
	bpOutputLayer(net, output, pattern.target, deltaOutput);
//fprintf(stderr, "bpOutputLayer\n");
	for (j = net->layersize - 2; j > 0; j--) {
		bpHiddenLayer(net, j);
	}

	for (j = net->layersize - 1; j >= 0; j--) {
		computeGradientOnLayer(net, j);
	}

	error = net->errorComputation(output, pattern.target, lastLayerSize);
//fprintf(stderr,"Computed error: %f\n", error);
	free(output);
	free(deltaOutput);
	return error;
}

float32 computeAndBackpropagateErrorsV(vectorizedNet_tp vnet, matrix_tp outputs, matrix_tp targets) {
	int32 i, olID;
	float32 errorSum;
	float32* dfVector;
	uint32 tmpSize;

	matrix_tp Wt, Wtd;
	matrix_tp errors, df;
	matrix_tp dfa;
	matrix_tp zt;

	errorSum = .0f;

	olID = vnet->numLayers - 1; // layer id of the output layer

	errors = sub(outputs, targets); //Bishop 5.54 vectorized
	df = newMatrix(errors->rows, 1);
	dfVector = vnet->daf[olID](vnet, vnet->a[olID]->data, vnet->a[olID]->rows);
	setColumn(df, 0, dfVector, vnet->a[olID]->rows);
	free(dfVector);
//output layer
	/*fprintf(stderr, "===================================================\n");
	 printMatrix(errors);
	 printMatrix(df);
	 fprintf(stderr, "===================================================\n");*/
	freeData(vnet->d[olID]);
	free(vnet->d[olID]);
	vnet->d[olID] = hadamard(errors, df);
	freeData(df);
	free(df);
	for (i = olID - 1; i > 0; i--) {
		Wt = t(vnet->w[i]);
		Wtd = multiply(Wt, vnet->d[i + 1]);
		dfa = newMatrix(vnet->a[i]->rows, 1);
		dfVector = vnet->daf[i](vnet, vnet->a[i]->data, vnet->a[i]->rows);
		setColumn(dfa, 0, dfVector, 0);
		freeData(vnet->d[i]);
		free(vnet->d[i]);
		vnet->d[i] = hadamard(Wtd, dfa);
		//printMatrix(vnet->d[i]);
		freeData(Wt);
		free(Wt);
		freeData(Wtd);
		free(Wtd);
		freeData(dfa);
		free(dfa);
		free(dfVector);
	}

	for (i = 0; i < vnet->numLayers - 1; i++) {
		zt = t(vnet->z[i]);
		df = multiply(vnet->d[i + 1], zt);
		//fprintf(stderr,"===========================================\n");
		//printMatrix(df);
		//fprintf(stderr,"===========================================\n");
		dfa = copyMatrix(vnet->g[i]);
		freeData(vnet->g[i]);
		free(vnet->g[i]);
		vnet->g[i] = add(df, dfa);
//		fprintf(stderr, "+++++++++++++++++++++++++++++++++++++++++++\n");
//		printMatrix(vnet->g[i]);
//		fprintf(stderr, "+++++++++++++++++++++++++++++++++++++++++++\n");
		freeData(df);
		free(df);
		freeData(zt);
		free(zt);
		freeData(dfa);
		free(dfa);
		dfa = copyMatrix(vnet->gb[i]);
		freeData(vnet->gb[i]);
		free(vnet->gb[i]);
		vnet->gb[i] = add(vnet->d[i + 1], dfa);
		freeData(dfa);
		free(dfa);
//		fprintf(stderr, "---------------------------------------------\n");
//		printMatrix(vnet->gb[i]);
//		fprintf(stderr, "---------------------------------------------\n");
	}
	freeData(errors);
	free(errors);
	errorSum = vnet->errorComputation(outputs->data, targets->data, targets->rows);
	return errorSum;
}

float32 trainOneEpoch(netdata_t* net, trainingPattern_t *patterns, uint32 patternSize) {
	float32 totError;
	uint32 i; //, j;
	totError = .0f;
//fprintf(stderr,"trainOneEpoch\n");
	if (net->settings.onlineTraining) {
		shuffle(patterns, patternSize);
		//fprintf(stderr, "Shuffled\n");
	}
	for (i = 0; i < patternSize; i++) {
		totError += computeOutputAndBackpropagateError(net, patterns[i]);
		if (net->settings.onlineTraining) {
			//update weights
			//fprintf(stderr, "Updating Weights in online mode \n");
			net->lastError = net->error;
			net->error = totError;
			updateAllNeurons(net);
			//debugMatrix(net);
		}

	}
	if (!net->settings.onlineTraining) {
		//update weights
		//fprintf(stderr, "Updating Weights in batch mode \n");
		net->lastError = net->error;
		net->error = totError;
		updateAllNeurons(net);
	}
//debugMatrix(net);
	return totError;
}

float32 trainOneEpochV(vectorizedNet_tp vnet, trainingData_t* td) {
	uint32 i;
	uint32 numOutputs;
	matrix_tp outputs;
	matrix_tp targets;

	float32 totError;

	totError = .0f;
	numOutputs = neuronsInLayer(vnet, vnet->numLayers - 1);

	if (vnet->onlineTraining) {
		shuffle(td->samples, td->sampleSize);
//		fprintf(stderr, "Shuffled\n");
	}
	for (i = 0; i < td->sampleSize; i++) {
		//for (i = 0; i < 1; i++) {
		outputs = newMatrix(numOutputs, 1);
		targets = newMatrix(numOutputs, 1);
		memcpy(targets->data, td->samples[i].target, numOutputs * sizeof(float32));
		feedForwardV(vnet, td->samples[i].input, outputs->data);
		totError += computeAndBackpropagateErrorsV(vnet, outputs, targets);
		if (vnet->onlineTraining) {
//			fprintf(stderr, "online\n");
			vnet->lastError = vnet->error;
			vnet->error = totError;
			updateAllNeuronsV(vnet);
		}
		freeData(targets);
		free(targets);
		freeData(outputs);
		free(outputs);

	}
	if (!vnet->onlineTraining) {
//		fprintf(stderr, "batch\n");
		vnet->lastError = vnet->error;
		vnet->error = totError;
		updateAllNeuronsV(vnet);
	}
	return totError;
}

float32 trainUntilConvergence(netdata_t* net, trainingPattern_t *trainingSet, uint32 trainingSetSize, trainingPattern_t *validationSet, uint32 validationSetSize, uint32 maxEpochsFromMinimum) {
	uint32 i;
	float32 trainingError, validationError, bestValidationError;
	uint32 epochsFromMinimum;
	netdata_tp bestNet;
	epochsFromMinimum = 0;

//fprintf(stderr, "TrainingSetSize: %d, ValidationSetSize: %d\n", trainingSetSize, validationSetSize);
	bestNet = (netdata_tp) malloc(sizeof(netdata_t));
	reserveNetSpace(net, bestNet);
	copy(net, bestNet);
//fprintf(stderr,"First copy done\nValidating first net\n");
	bestValidationError = validate(net, validationSet, validationSetSize);
//fprintf(stderr, "-->BVE:%f\n", bestValidationError);
//fprintf(stderr,"Validation done\n");
//	while (1) {
	for (i = 0; i < net->settings.maxEpochs; i++) {
		//fprintf(stderr,"Looping\n");
		trainingError = trainOneEpoch(net, trainingSet, trainingSetSize);
		//fprintf(stderr,"Trained\nValidating\n");
		validationError = validate(net, validationSet, validationSetSize);
		//fprintf(stderr,"Validated\nChecking\n");
		if (validationError < bestValidationError) {
			epochsFromMinimum = 0;
			bestValidationError = validationError;
			//fprintf(stderr, "-->BVE:%f\n", bestValidationError);
			//salva come miglior rete
			copy(net, bestNet);
		} else if (epochsFromMinimum < maxEpochsFromMinimum) {
			//fprintf(stderr, "------>%d, %f\n", epochsFromMinimum, validationError);
			epochsFromMinimum++;
		} else {
			break;
		}

	}
//revert della miglior rete
	copy(bestNet, net);
//ritorno il risultato della validazione della miglior rete
	destroyNetwork(bestNet);
	return bestValidationError;
}

float32 trainUntilConvergenceV(vectorizedNet_tp vnet, trainingData_t *trainingSet, trainingData_t *validationSet, uint32 maxEpochsFromMinimum) {
	uint32 i;
	float32 trainingError, validationError, bestValidationError;
	uint32 epochsFromMinimum;
	vectorizedNet_tp bestNet;
	epochsFromMinimum = 0;

//fprintf(stderr, "TrainingSetSize: %d, ValidationSetSize: %d\n", trainingSet->sampleSize, validationSet->sampleSize);
	bestNet = (vectorizedNet_tp) malloc(sizeof(vectorizedNet_t));
	reserveNetSpaceV(vnet, bestNet);
//copy(vnet, bestNet);
	copyNetV(vnet, bestNet);
//fprintf(stderr, "First copy done\nValidating first net\n");
	bestValidationError = validateV(vnet, validationSet->samples, validationSet->sampleSize);
//fprintf(stderr, "-->BVE:%f\n", bestValidationError);
//fprintf(stderr, "Validation done\n");
//	while (1) {
	for (i = 0; i < vnet->maxEpochs; i++) {
		//fprintf(stderr, "Looping\n");
		trainingError = trainOneEpochV(vnet, trainingSet);
		//fprintf(stderr, "Trained\nValidating\n");
		validationError = validateV(vnet, validationSet->samples, validationSet->sampleSize);
		//fprintf(stderr, "Validated\nChecking\n");
		if (validationError < bestValidationError) {
			epochsFromMinimum = 0;
			bestValidationError = validationError;
//			fprintf(stderr, "-->BVE:%f\n", bestValidationError);
			//salva come miglior rete
			copyNetV(vnet, bestNet);
		} else if (epochsFromMinimum < maxEpochsFromMinimum) {
			//fprintf(stderr, "------>%d, %f\n", epochsFromMinimum, validationError);
			epochsFromMinimum++;
		} else {
			break;
		}

	}
//revert della miglior rete

	copyNetV(bestNet, vnet);
	destroyNetworkV(bestNet);
//ritorno il risultato della validazione della miglior rete
//destroyNetwork(bestNet);
	return bestValidationError;
}

uint8 loadSamples(trainingData_t *td, int8* filename) {
	FILE* infile;
	uint32 inputs, outputs, numSamples;
	uint16 len, i, j;
	int8 line[MAX_LINE_LENGTH], *token;

	infile = fopen(filename, "r");
	if (!infile) {
		fprintf(stderr, "Unable to open file %s\n", filename);
		return EXIT_FAILURE;
	}

	do {
		fgets(line, MAX_LINE_LENGTH, infile);
		//assert(infile != NULL);
	} while (line[0] == COMMENT_CHAR);

	sscanf(line, "%*s %d", &inputs);
	fprintf(stderr, "Number of inputs: %d\n", inputs);
	td->inputs = inputs;
	do {
		fgets(line, MAX_LINE_LENGTH, infile);
		//assert(infile != NULL);
	} while (line[0] == COMMENT_CHAR);

	sscanf(line, "%*s %d", &outputs);
//fprintf(stderr, "Number of outputs: %d\n", outputs);
	td->outputs = outputs;
	do {
		fgets(line, MAX_LINE_LENGTH, infile);
//assert(infile != NULL);
	} while (line[0] == COMMENT_CHAR);

	sscanf(line, "%*s %d", &numSamples);
//fprintf(stderr, "Number of samples: %d\n", numSamples);
	td->sampleSize = numSamples;
	do {
		fgets(line, MAX_LINE_LENGTH, infile);
//assert(infile != NULL);
	} while (line[0] == COMMENT_CHAR);

	len = strlen(line);
	line[len - 1] = '\0';
	if (strcmp(line, "START_SAMPLES") != 0) {
		fprintf(stderr, "Wrong file format in %s, expected START_SAMPLES but found %s \n", filename, line);
		return EXIT_FAILURE;
	}
	td->samples = (trainingPattern_t*) malloc(numSamples * sizeof(trainingPattern_t));
	for (i = 0; i < numSamples; i++) {
		//fprintf(stderr,"Reading sample[%d]\n",i);
		td->samples[i].input = (float32*) malloc(inputs * sizeof(float32));
		td->samples[i].target = (float32*) malloc(outputs * sizeof(float32));

		do {
			fgets(line, MAX_LINE_LENGTH, infile);
			//assert(infile);
		} while (line[0] == COMMENT_CHAR);
		//fprintf(stderr,"line: %s",line);
		token = strtok(line, "\t\n ");
		for (j = 0; j < inputs; j++) {
			//fprintf(stderr,"Token : %s\n", token);
			td->samples[i].input[j] = atof(token);
			token = strtok(NULL, "\t\n ");
		}
		for (j = 0; j < outputs; j++) {
			td->samples[i].target[j] = atof(token);
			token = strtok(NULL, "\t\n ");
		}
		//fprintf(stderr,"sample[%d] read\n",i);
	}
	fclose(infile);
	return EXIT_SUCCESS;
}

void shuffle(trainingPattern_t *patterns, uint32 patternSize) {
	uint32 i, j;
	trainingPattern_t tmp;

	if (patternSize > 1) {
		for (i = 0; i < patternSize; i++) {
			j = i + rand() / (RAND_MAX / (patternSize - i) + 1);
			tmp = patterns[j];
			patterns[j] = patterns[i];
			patterns[i] = tmp;
		}
	}
}

float32 crossValidation(netdata_t* net, trainingPattern_t *pattern, uint32 patternSize, uint32 folds) {
	uint32 i, j; //loop indexes
	uint32 t, v; //array indexes
	int32 inputSize, outputSize;
	int32 foldSize;
	float32 crossValidationError, foldError;
	trainingPattern_t *validationSet, *trainingSet;

	foldSize = patternSize / folds; //integer division
//fprintf(stderr, "Folds: %d, FoldSize: %d, TrainingSet Size: %d, ValidationSet Size: %d Total Samples: %d\n", folds, foldSize, patternSize - foldSize, foldSize, patternSize);
	validationSet = (trainingPattern_t *) malloc(sizeof(trainingPattern_t) * foldSize);
	trainingSet = (trainingPattern_t *) malloc(sizeof(trainingPattern_t) * (patternSize - foldSize));

	inputSize = net->neurons[0];
	outputSize = net->neurons[net->layersize - 1];

	for (i = 0; i < foldSize; i++) {
		validationSet[i].input = (float32*) malloc(sizeof(float32) * inputSize);
		validationSet[i].target = (float32*) malloc(sizeof(float32) * outputSize);
	}
	for (i = 0; i < (patternSize - foldSize); i++) {
		trainingSet[i].input = (float32*) malloc(sizeof(float32) * inputSize);
		trainingSet[i].target = (float32*) malloc(sizeof(float32) * outputSize);
		//fprintf(stderr,"i: %d\n",i);
	}
//fprintf(stderr, "Space reserved\n");
//fare uno shuffle del dataset
	shuffle(pattern, patternSize);
	for (i = 0; i < folds; i++) {
		fprintf(stderr, "Fold %d ", i);
		v = 0;
		t = 0;
		//build the training set / test set
		for (j = 0; j < patternSize; j++) {
			//fprintf(stderr, "Sample %d: v = %d, t = %d ", j, v, t);
			if (j >= i * foldSize && j < (i + 1) * foldSize) {

				//validationSet[v].input = (float32*) malloc(sizeof(float32) * inputSize);
				memcpy(validationSet[v].input, pattern[j].input, inputSize * sizeof(float32));
				//validationSet[v].target = (float32*) malloc(sizeof(float32) * outputSize);
				memcpy(validationSet[v].target, pattern[j].target, outputSize * sizeof(float32));
				//fprintf(stderr, "validation %d ok\n", v);
				v++;
			} else {
				trainingSet[t].input[inputSize];
				pattern[j].input[inputSize];
				//trainingSet[t].input = (float32*) malloc(sizeof(float32) * inputSize);
				memcpy(trainingSet[t].input, pattern[j].input, inputSize * sizeof(float32));
				//trainingSet[t].target = (float32*) malloc(sizeof(float32) * outputSize);
				trainingSet[t].target[outputSize];
				pattern[j].target[outputSize];
				memcpy(trainingSet[t].target, pattern[j].target, outputSize * sizeof(float32));
				t++;
			}
		}
		//train & test the net
		foldError = trainUntilConvergence(net, trainingSet, (patternSize - foldSize), validationSet, foldSize, net->settings.maxEpochsFromMinimum);
		crossValidationError += foldError;
		fprintf(stderr, " error %f\n", foldError);
		//debug
		/*fprintf(stderr,"Fold: %d\nTraining Set: ", i);
		 for(j=0;j<(patternSize - foldSize);j++){
		 fprintf(stderr," %d ", trainingSetIndexes[j]);
		 }
		 fprintf(stderr, "\n");

		 fprintf(stderr,"Validation Set: ");
		 for(j=0;j<(foldSize);j++){
		 fprintf(stderr," %d ", validationSetIndexes[j]);
		 }
		 fprintf(stderr, "\n---------------------------------------------------\n");*/

	}

	for (i = 0; i < foldSize; i++) {
		free(validationSet[i].input);
		free(validationSet[i].target);
	}
	for (i = 0; i < (patternSize - foldSize); i++) {
		free(trainingSet[i].input);
		free(trainingSet[i].target);
		//fprintf(stderr,"i: %d\n",i);
	}
	free(trainingSet);
	free(validationSet);
	return crossValidationError / (float32) folds;
}

float32 validate(netdata_t* net, trainingPattern_t *validationSet, uint32 validationSetSize) {
	float32 totError;
	float32 *output;
	uint32 i;
	uint32 lastLayerSize;

	totError = .0f;

	lastLayerSize = net->neurons[net->layersize - 1];
	output = (float32*) malloc(sizeof(float32) * lastLayerSize);
	for (i = 0; i < validationSetSize; i++) {
		feedForward(net, validationSet[i].input, output);
		totError += net->errorComputation(output, validationSet[i].target, lastLayerSize);
	}
	free(output);
	return totError;
}

float32 validateV(vectorizedNet_tp vnet, trainingPattern_t *validationSet, uint32 validationSetSize) {
	uint32 i;
	uint32 lastLayerSize;
	float32 *output;
	float32 totError;

	totError = .0f;
	lastLayerSize = neuronsInLayer(vnet, vnet->numLayers - 1);
	output = (float32*) malloc(sizeof(float32) * lastLayerSize);
//fprintf(stderr, "Validating\n");
	for (i = 0; i < validationSetSize; i++) {
		//fprintf(stderr, "%d ", i);
		feedForwardV(vnet, validationSet[i].input, output);
		//fprintf(stderr, "output computed (FF) ");
		totError += vnet->errorComputation(output, validationSet[i].target, lastLayerSize);
		//fprintf(stderr, "error computed\n");
	}
	free(output);
	return totError;
}

float32 sgn(float32 x) {
	if (x > 0)
		return 1.0f;
	if (x < 0)
		return -1.0f;
	else
		return .0f;
}

void freeSamples(trainingData_t* td) {
	size_t i;
	for (i = 0; i < td->sampleSize; i++) {
		free(td->samples[i].input);
		free(td->samples[i].target);
	}
	free(td->samples);
}

uint32 incrementalPruning(/*result*/netdata_tp bestNet, float32 epsilon, uint32 maxTrainingCycles, uint32 maxHiddenNeurons, trainingData_t* trainingData, trainingData_t* validationData, float32 (*huaf)(netdata_tp, float32 x), float32 (*dhuaf)(netdata_tp, float32 x), float32 (*ouaf)(netdata_tp, float32 x), float32 (*douaf)(netdata_tp, float32 x), float32 (*errorComputation)(float32 *output, float32 *target, size_t size), settings_tp settings) {
	uint32 numberOfInputs, numberOfHiddenNeurons, numberOfOutputs;
	uint32 i;
	float32 bestError, error;
	bool netFound;
	uint32 status;
	netdata_tp net;

	numberOfInputs = trainingData->inputs;
	numberOfOutputs = trainingData->outputs;
	numberOfHiddenNeurons = 1;

	bestError = FLT_MAX;
	netFound = false;
	status = EXIT_SUCCESS;

	do {
		net = initFullyConnectedNetwork(numberOfInputs, numberOfHiddenNeurons, numberOfOutputs, huaf, dhuaf, ouaf, douaf, errorComputation, settings);

		for (i = 0; i < net->settings.maxEpochs; i++) {
			trainOneEpoch(net, trainingData->samples, trainingData->sampleSize);
			error = validate(net, validationData->samples, validationData->sampleSize);
			net->error = error;
			if (error < bestError) {
				bestError = error;
				if (bestError < epsilon) {
					fprintf(stderr, "Topology found\n");
					if (bestNet->neuron_matrix != NULL) {
						destroyNetwork(bestNet);
						fprintf(stderr, "Network destroyed\n");
					} else {
						fprintf(stderr, "No network to destroy\n");
					}
					reserveNetSpace(net, bestNet);
					copy(net, bestNet);

					netFound = true;
					break;
				}
			}
		}
		fprintf(stderr, "Network of topology [%d %d %d] had an error of %f\n", net->neurons[0], net->neurons[1], net->neurons[2], bestError);
		destroyNetwork(net);
		bestError = FLT_MAX;
		numberOfHiddenNeurons++;
		if (numberOfHiddenNeurons > maxHiddenNeurons) {
			break;
		}
	} while (!netFound);

	if (!netFound) {
		fprintf(stderr, "No network found\n");
		status = EXIT_FAILURE;
	}
	fprintf(stderr, "Incrmental Pruning terminated\n");
//fprintf(stderr, "Network of topology [%d %d %d] had an error of %f\n", bestNet->neurons[0], bestNet->neurons[1], bestNet->neurons[2], bestNet->error);
	return status;
}

void printSamples(trainingData_t *td) {
	uint32 i, j;

	for (i = 0; i < td->sampleSize; i++) {
		fprintf(stderr, "%d [", i);
		for (j = 0; j < td->inputs; j++) {
			fprintf(stderr, "%f ", td->samples[i].input[j]);
		}
		fprintf(stderr, "] - ");
		fprintf(stderr, "[", i);
		for (j = 0; j < td->outputs; j++) {
			fprintf(stderr, "%f ", td->samples[i].target[j]);
		}
		fprintf(stderr, "]\n");
	}
}
