/*
 * net.c
 *
 *  Created on: Nov 21, 2013
 *      Author: cinus
 */

#include "net.h"

#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <math.h>

#include "jjann.h"
#include "constants.h"
#include "activation.h"
#include "topology.h"
#include "training.h"

float32 rand01();
void debugMatrix(netdata_t* net);
void reserveNeuronSpace(neuron_tp originalNeuron, neuron_tp newNeuron);
void createNeuron(uint32 layerId, uint32 pos, topology_tp topology, uint32 numNeurons, netdata_t* net, uint32 ctr);
void createSimpleNeuron(uint32 layerId, uint32 pos, netdata_tp net, float32 (*af)(netdata_tp, float32 x), float32 (*daf)(netdata_tp, float32 x));
void destroyNeuron(netdata_t* net, uint32 layerId, uint32 pos);

void createSimpleNeuron(uint32 layerId, uint32 pos, netdata_tp net, float32 (*af)(netdata_tp, float32 x), float32 (*daf)(netdata_tp, float32 x)) {
	neuron_tp tmp;
	uint32 k, neuronId;

	tmp = getNeuron(net, layerId, pos);
	tmp->layer = layerId;
	tmp->pos = pos;
	tmp->z = 0;
	tmp->a = 0;
	tmp->delta = 0;
	tmp->activationFunction = af;
	tmp->diffActivationFunction = daf;
	tmp->n_input = (layerId == 0) ? 0 : net->neurons[layerId - 1];
	tmp->n_output = (layerId == (net->layersize - 1)) ? 0 : net->neurons[layerId + 1];
	tmp->outputs = (layerId == net->layersize - 1) ? NULL : (neuron_tp*) malloc(sizeof(neuron_tp) * tmp->n_output);
	tmp->weights = (float32*) calloc(sizeof(float32), tmp->n_output);
	tmp->gradient = (float32*) calloc(tmp->n_output, sizeof(float32));
	tmp->gradientP = (float32*) calloc(tmp->n_output, sizeof(float32));
	tmp->deltaWeights = (float32*) calloc(tmp->n_output, sizeof(float32));
	tmp->deltaWeightsP = (float32*) calloc(tmp->n_output, sizeof(float32));
	tmp->ipropDelta = (float32*) calloc(tmp->n_output, sizeof(float32));
	for (k = 0; k < tmp->n_output; k++) {
		tmp->ipropDelta[k] = 0.1;
	}
	tmp->ipropDeltaP = (float32*) calloc(tmp->n_output, sizeof(float32));
	//inizializza i pesi e l'array di neuroni collegati in output
	if (layerId < net->layersize - 1) {
		for (k = 0; k < net->neurons[layerId + 1]; k++) {
			tmp->weights[k] = rand01() - .5f;
			tmp->outputs[k] = getNeuron(net, layerId + 1, k);
		}
	}
	tmp->inputs = (layerId == 0) ? NULL : (neuron_tp*) malloc(sizeof(neuron_tp) * (tmp->n_input + 1));
	if (layerId > 0) {
		for (k = 0; k < net->neurons[layerId - 1] + 1; k++) { //adding 1 for the bias
			tmp->inputs[k] = getNeuron(net, layerId - 1, k);
		}
	}

}
void createNeuron(uint32 layerId, uint32 pos, topology_tp topology, uint32 numNeurons, netdata_t* net, uint32 ctr) {
	//fprintf(stderr, "i -> %d pos -> %d \n", layerId, pos);
	neuron_tp tmp;
	uint32 k, neuronId;

	tmp = getNeuron(net, layerId, pos);
	tmp->layer = layerId;
	tmp->pos = pos;
	//fprintf(stderr, "Got tmp\n");
	tmp->z = 0;
	tmp->a = 0;
	tmp->delta = 0;
	tmp->activationFunction = topology->activationFunctions[ctr];
	tmp->diffActivationFunction = topology->diffActivationFunctions[ctr];
	tmp->n_output = 0;
	for (k = 0; k < numNeurons; k++) {
		tmp->n_output += topology->connections[ctr][k];
		//fprintf(stderr, "\ttopology->connections[%d][%d] = %d\n", ctr, k, topology->connections[ctr][k]);
		//fprintf(stderr, "\tOutputs = %d\n", tmp->n_output);
	}
	//fprintf(stderr, "N(%d,%d) has %d outputs\n", layerId, pos, tmp->n_output);
	tmp->outputs = (layerId == net->layersize - 1) ? NULL : (neuron_tp*) malloc(sizeof(neuron_tp) * tmp->n_output);
	//OUTPUTS + WEIGHTS
	tmp->weights = (float32*) calloc(sizeof(float32), tmp->n_output);
	tmp->gradient = (float32*) calloc(tmp->n_output, sizeof(float32));
	tmp->gradientP = (float32*) calloc(tmp->n_output, sizeof(float32));
	tmp->deltaWeights = (float32*) calloc(tmp->n_output, sizeof(float32));
	tmp->deltaWeightsP = (float32*) calloc(tmp->n_output, sizeof(float32));
	tmp->ipropDelta = (float32*) calloc(tmp->n_output, sizeof(float32));
	//imposta il valore di ipropDelta a 0.1
	for (k = 0; k < tmp->n_output; k++) {
		tmp->ipropDelta[k] = 0.1;
	}
	tmp->ipropDeltaP = (float32*) calloc(tmp->n_output, sizeof(float32));
	assert(tmp->weights != NULL);
	assert(tmp->deltaWeightsP != NULL);
	assert(tmp->gradient != NULL);
	k = 0;
	neuronId = 0;
	for (k = 0; k < numNeurons; k++) {
		//tmp->n_output
		if (topology->connections[ctr][k] == 1) {
			tmp->weights[neuronId] = rand01() - .5f;
			tmp->outputs[neuronId] = getNeuron(net, topology->indexes[k].layer, topology->indexes[k].pos);
			neuronId++;
		}
	}
	//fprintf(stderr, "\n");
	tmp->n_input = 0;
	for (k = 0; k < numNeurons; k++) {
		tmp->n_input += topology->connections[k][ctr];
	}
	//fprintf(stderr, "N(%d,%d) has %d inputs\n", i, j, tmp->n_input);
	tmp->inputs = (layerId == 0) ? NULL : (neuron_tp*) malloc(sizeof(neuron_tp) * (tmp->n_input + 1));
	neuronId = 0;
	for (k = 0; k < numNeurons; k++) {
		//tmp->n_input
		if (topology->connections[k][ctr] == 1) {
			//fprintf(stderr, "topology->indexes[%d] -> {%d,%d}\n", k, topology->indexes[k].layer, topology->indexes[k].pos);
			tmp->inputs[neuronId] = getNeuron(net, topology->indexes[k].layer, topology->indexes[k].pos);
			neuronId++;
		}
	}
	//adding the bias to the inputs
	if (tmp->n_input > 0)
		tmp->inputs[tmp->n_input] = getNeuron(net, layerId - 1, net->neurons[layerId - 1]);
}

void destroyNeuron(netdata_t* net, uint32 layerId, uint32 pos) {
	neuron_tp toBeDestroyed = getNeuron(net, layerId, pos);

	free(toBeDestroyed->deltaWeights);
	free(toBeDestroyed->deltaWeightsP);
	free(toBeDestroyed->gradient);
	free(toBeDestroyed->gradientP);
	if (toBeDestroyed->n_input > 0) {
		free(toBeDestroyed->inputs);
	}
	free(toBeDestroyed->ipropDelta);
	free(toBeDestroyed->ipropDeltaP);
	free(toBeDestroyed->outputs);
	free(toBeDestroyed->weights);
	//free(toBeDestroyed);
}

netdata_t* initNetwork(const int8* settingsFileName) {
//netdata_t* initNetwork(topology_t* topology, const int8* settingsFileName) {
	uint32 layerId, i, pos, k, ctr, ctr2, neuronId;
	neuron_tp bias;
	int32 status;

	uint32 numNeurons;
	topology_tp topology;
	topology = (topology_tp) malloc(sizeof(topology_t));
	//create the network structure
	netdata_t *net = (netdata_t*) malloc(sizeof(netdata_t));

	loadSettings(&(net->settings), settingsFileName);
	srand(net->settings.seed);
	status = readTopology(topology, net->settings.topologyFileName);
	if (status != EXIT_SUCCESS) {
		fprintf(stderr, "Error reading topology, aborting program\n");
		exit(status);
	}
	net->layersize = topology->layers;
	if (net->layersize < MIN_LAYERS) {
		fprintf(stderr, "Too few layers");
		return NULL;
	}
	net->neurons = (uint32*) malloc(net->layersize * sizeof(uint32));
	memcpy(net->neurons, topology->neurons, net->layersize * sizeof(uint32));

	/*for (i = 0; i < net->layersize-1; i++) {//add an extra bias node to each layer
	 net->neurons[i]++;
	 }*/

	//find maximum network.neurons size
	net->colums = net->neurons[0];
	numNeurons = net->neurons[0];
	for (i = 1; i < net->layersize; i++) {
		numNeurons += net->neurons[i];
		if (net->neurons[i] > net->colums)
			net->colums = net->neurons[i];

	}

	net->colums += 1; //add one more for the bias
	//DEFINE NETWORK STRUCTURE

	//alloc the matrix
	net->neuron_matrix = (neuron_t*) calloc(sizeof(neuron_t), net->colums * net->layersize);
	ctr = 0;
	for (layerId = 0; layerId < net->layersize; layerId++) {
		for (pos = 0; pos < net->neurons[layerId]; pos++) {
			//fprintf(stderr, "i -> %d pos -> %d \n", layerId, pos);
			createNeuron(layerId, pos, topology, numNeurons, net, ctr);
			//fprintf(stderr, "N(%d,%d) created\n", layerId, pos);
			ctr++;
		}
		if (layerId != net->layersize - 1) {
			//bias neuron
			//pos++;
			//fprintf(stderr, "Bias:  ");
			bias = getNeuron(net, layerId, pos);
			bias->layer = layerId;
			bias->pos = pos;
			//fprintf(stderr, "Bias(%d,%d)\n", layerId,pos);
			bias->z = 1.0f; //bias has a fixed value of 1 in the output
			bias->a = 1.0;
			bias->n_input = 0;
			bias->delta = 0;
			bias->activationFunction = activationLinear;
			bias->diffActivationFunction = diffActivationLinear;

			bias->n_output = net->neurons[layerId + 1];
			//fprintf(stderr, "N(%d,%d) has %d outputs\n", layerId, pos, tmp->n_output);
			bias->outputs = (neuron_tp*) malloc(sizeof(neuron_tp) * bias->n_output);
			//OUTPUTS + WEIGHTS
			bias->weights = (float32*) malloc(sizeof(float32) * bias->n_output);
			bias->gradient = (float32*) calloc(bias->n_output, sizeof(float32));
			bias->gradientP = (float32*) calloc(bias->n_output, sizeof(float32));
			bias->deltaWeights = (float32*) calloc(bias->n_output, sizeof(float32));
			bias->deltaWeightsP = (float32*) calloc(bias->n_output, sizeof(float32));
			bias->ipropDelta = (float32*) calloc(bias->n_output, sizeof(float32));
			bias->ipropDeltaP = (float32*) calloc(bias->n_output, sizeof(float32));
			for (k = 0; k < bias->n_output; k++) {
				bias->ipropDelta[k] = 0.1;
			}
			assert(bias->weights != NULL);
			assert(bias->deltaWeightsP != NULL);
			assert(bias->gradient != NULL);

			for (k = 0; k < bias->n_output; k++) {
				bias->weights[k] = rand01() - .5f;
				bias->outputs[k] = getNeuron(net, layerId + 1, k);
			}

		}

	}
	//set the errorFuntion
	net->errorComputation = topology->errorFunction;

	//set the algorithm for backpropogations
	fprintf(stderr, "%s\n", net->settings.algorithm);
	if (strcmp(net->settings.algorithm, "BACKPROP") == 0) {
		net->computeDeltaWeights = bpComputeDeltaWeights;
	} else if (strcmp(net->settings.algorithm, "RPROP") == 0) {
		net->computeDeltaWeights = irpComputeDeltaWeights;
	} else {
		fprintf(stderr, "Invalid learning algorithm selected, \"training.algorithm\" : \"[BACKPROP | RPROP]\" \n");
		exit(status);
	}
	//fprintf(stderr, "Start print matrix\n");
	//debugMatrix(net);
	freeTopology(topology);
	free(topology);
	return net;
}

void destroyNetwork(netdata_tp net) {
	uint32 layerId, pos;
	//fprintf(stderr, "Destroying the network\n");
	for (layerId = 0; layerId < net->layersize; layerId++) {
		for (pos = 0; pos < net->neurons[layerId]; pos++) {
			destroyNeuron(net, layerId, pos);
		}
		if (layerId != net->layersize - 1) { //BIAS
			destroyNeuron(net, layerId, pos);
		}

	}
	free(net->neuron_matrix);
	free(net->neurons);
	//destroy the settings
	free(net->settings.algorithm);
	free(net->settings.topologyFileName);
	free(net);
	//fprintf(stderr, "Network destroyed\n");
}

/**
 * HELPER FUNCTIONS
 */

void debugMatrix(netdata_t* net) {
#ifdef DEBUG
	uint32 i, j, k;
	neuron_t* tmp;
	for (i = 0; i < net->layersize; i++) {
		for (j = 0; j < net->neurons[i]; j++) {
			tmp = getNeuron(net, i, j);
			fprintf(stderr, "Neuron nr: (%d, %d) \n", i, j);
			fprintf(stderr, "Z: %f \n", tmp->z);
			fprintf(stderr, "A: %f \n", tmp->a);
			fprintf(stderr, "delta: %f \n", tmp->delta);

			for (k = 0; k < tmp->n_output; k++) {
				fprintf(stderr, "(w: %f grad: %f d: %f) ", tmp->weights[k], tmp->gradient[k], tmp->deltaWeights[k]);
			}

			fprintf(stderr, "\n");
			//fprintf(stderr, "%f ",getNeuron(i,j)->z );

		}
		if (i != net->layersize - 1) {
			tmp = getNeuron(net, i, j);
			fprintf(stderr, "Neuron nr: (%d, %d) \n", i, j);
			fprintf(stderr, "Z: %f \n", tmp->z);
			fprintf(stderr, "A: %f \n", tmp->a);
			fprintf(stderr, "delta: %f \n", tmp->delta);

			for (k = 0; k < tmp->n_output; k++) {
				fprintf(stderr, "(w: %f grad: %f d: %f) ", tmp->weights[k], tmp->gradient[k], tmp->deltaWeights[k]);
			}

			fprintf(stderr, "\n");
		}
		fprintf(stderr, "\n\n");

	}
	fprintf(stderr, "All neurons printed\n");
#endif
}

neuron_t* getNeuron(netdata_t* net, uint32 i, uint32 j) {
	uint32 index;
	index = i * net->colums + j;
	if (!(index < net->layersize * net->colums)) {
		fprintf(stderr, "Array index out of bound looking for neuron n(%d,%d) at index %d, but size is %d\n", i, j, index, net->layersize * net->colums);
	}
	return &net->neuron_matrix[index];
}

float32 rand01() {
	return (float32) rand() / (float32) RAND_MAX;
}

void save(netdata_t* net, int8* fileName) {
	FILE* of;
	neuron_tp current;
	uint32 numNeurons, layer, pos;

	of = fopen(fileName, "wb");

//computes the number of neurons
	for (layer = 0; layer < net->layersize; layer++) {
		for (pos = 0; pos < net->neurons[layer]; pos++) {
			current = getNeuron(net, layer, pos);
			fwrite(current->weights, sizeof(float32), current->n_output, of);
		}
		if (layer != net->layersize - 1) { //adding also the bias weights
			current = getNeuron(net, layer, pos);
			fwrite(current->weights, sizeof(float32), current->n_output, of);
		}
	}
	fclose(of);
}
void load(netdata_t* net, int8* fileName) {
	FILE* of;
	neuron_tp current;
	uint32 numNeurons, layer, pos;

	of = fopen(fileName, "rb");

//computes the number of neurons
	for (layer = 0; layer < net->layersize; layer++) {
		for (pos = 0; pos < net->neurons[layer]; pos++) {
			current = getNeuron(net, layer, pos);
			fread(current->weights, sizeof(float32), current->n_output, of);
		}
		if (layer != net->layersize - 1) { //adding also the bias weights
			current = getNeuron(net, layer, pos);
			fread(current->weights, sizeof(float32), current->n_output, of);
		}
	}
	fclose(of);
}

void reserveNetSpace(netdata_tp originalNet, netdata_tp newNet) {
	uint32 layer, pos;
	neuron_tp currentSrc, currentDst;

	newNet->neurons = (uint32*) malloc(originalNet->layersize * sizeof(uint32));
	newNet->neuron_matrix = (neuron_t*) malloc(sizeof(neuron_t) * originalNet->colums * originalNet->layersize);

	newNet->colums = originalNet->colums;
	newNet->layersize = originalNet->layersize;

	for (layer = 0; layer < originalNet->layersize; layer++) {
		for (pos = 0; pos < originalNet->neurons[layer]; pos++) {
			currentSrc = getNeuron(originalNet, layer, pos);
			currentDst = getNeuron(newNet, layer, pos);
			//copyNeuron(currentSrc, currentDst, dst);
			reserveNeuronSpace(currentSrc, currentDst);
		}
		if (layer != originalNet->layersize - 1) { //adding also the bias weights
			currentSrc = getNeuron(originalNet, layer, pos);
			currentDst = getNeuron(newNet, layer, pos);
			reserveNeuronSpace(currentSrc, currentDst);
		}
	}
	reserveSpaceForSettings(&(originalNet->settings), &(newNet->settings));
}

void copy(netdata_tp src, netdata_tp dst) {
	neuron_tp currentSrc, currentDst;
	uint32 layer, pos;

	dst->colums = src->colums;
	dst->layersize = src->layersize;

	dst->errorComputation = src->errorComputation;
	dst->computeDeltaWeights = src->computeDeltaWeights;
	dst->error = src->error;
	//dst->settings = src->settings;
	copySettings(&(src->settings), &(dst->settings));
	//dst->neurons = (uint16*) malloc(dst->layersize * sizeof(uint32));
	memcpy(dst->neurons, src->neurons, dst->layersize * sizeof(uint32));

	//dst->neuron_matrix = (neuron_t*) malloc(sizeof(neuron_t) * dst->colums * dst->layersize);
	for (layer = 0; layer < src->layersize; layer++) {
		for (pos = 0; pos < src->neurons[layer]; pos++) {
			currentSrc = getNeuron(src, layer, pos);
			currentDst = getNeuron(dst, layer, pos);
			copyNeuron(currentSrc, currentDst, dst);
		}
		if (layer != src->layersize - 1) { //adding also the bias weights
			currentSrc = getNeuron(src, layer, pos);
			currentDst = getNeuron(dst, layer, pos);
			copyNeuron(currentSrc, currentDst, dst);
		}
	}
	//debugMatrix(src);
	//debugMatrix(dst);
	//exit(-1);
}

void reserveNeuronSpace(neuron_tp originalNeuron, neuron_tp newNeuron) {
	newNeuron->deltaWeights = (float32*) calloc(sizeof(float32), originalNeuron->n_output);
	newNeuron->deltaWeightsP = (float32*) calloc(sizeof(float32), originalNeuron->n_output);
	newNeuron->gradient = (float32*) calloc(sizeof(float32), originalNeuron->n_output);
	newNeuron->gradientP = (float32*) calloc(sizeof(float32), originalNeuron->n_output);
	newNeuron->ipropDelta = (float32*) calloc(sizeof(float32), originalNeuron->n_output);
	newNeuron->ipropDeltaP = (float32*) calloc(sizeof(float32), originalNeuron->n_output);
	newNeuron->weights = (float32*) calloc(sizeof(float32), originalNeuron->n_output);
	if (originalNeuron->n_input > 0) { //only if it has inputs
		newNeuron->inputs = (neuron_tp*) calloc(sizeof(neuron_tp), (originalNeuron->n_input + 1));
	}
	newNeuron->outputs = (neuron_tp*) calloc(sizeof(neuron_tp), originalNeuron->n_output);
}
void copyNeuron(neuron_tp src, neuron_tp dst, netdata_tp dstNet) {
	size_t i;

	dst->a = src->a;
	dst->activationFunction = src->activationFunction;
	dst->delta = src->delta;
	dst->diffActivationFunction = src->diffActivationFunction;
	dst->n_input = src->n_input;
	dst->n_output = src->n_output;
	dst->z = src->z;
	dst->layer = src->layer;
	dst->pos = src->pos;

	//dst->deltaWeights = (float32*) malloc(sizeof(float32) * dst->n_output);
//fprintf(stderr, "copying dst->deltaWeightsP\n");
	memcpy(dst->deltaWeights, src->deltaWeights, dst->n_output * sizeof(float32));
//fprintf(stderr, "Initializing dst->deltaWeightsP\n");
	//dst->deltaWeightsP = (float32*) malloc(sizeof(float32) * dst->n_output);
//fprintf(stderr, "copying dst->deltaWeightsP\n");
	memcpy(dst->deltaWeightsP, src->deltaWeightsP, dst->n_output * sizeof(float32));
//fprintf(stderr, "Copied dst->deltaWeightsP\nInitializing dst->gradient\n");
	//dst->gradient = (float32*) malloc(sizeof(float32) * dst->n_output);
	memcpy(dst->gradient, src->gradient, dst->n_output * sizeof(float32));
	//dst->gradientP = (float32*) malloc(sizeof(float32) * dst->n_output);
	memcpy(dst->gradientP, src->gradientP, dst->n_output * sizeof(float32));
	//dst->ipropDelta = (float32*) malloc(sizeof(float32) * dst->n_output);
	memcpy(dst->ipropDelta, src->ipropDelta, dst->n_output * sizeof(float32));
	//dst->ipropDeltaP = (float32*) malloc(sizeof(float32) * dst->n_output);
	memcpy(dst->ipropDeltaP, src->ipropDeltaP, dst->n_output * sizeof(float32));
//fprintf(stderr, "Copied dst->gradient\nInitializing dst->weights\n");
	//dst->weights = (float32*) malloc(sizeof(float32) * dst->n_output);
	memcpy(dst->weights, src->weights, dst->n_output * sizeof(float32));
//fprintf(stderr, "Copied dst->weights\nInitializing dst->inputs\n");
//pointers to other neurons... cannot be the on the same address...
	if (dst->n_input > 0) { //only if it has inputs
		//dst->inputs = (neuron_tp*) malloc(sizeof(neuron_tp) * (dst->n_input + 1));
		for (i = 0; i < dst->n_input + 1; i++) { //+1 for bias
			//fprintf(stderr,"Iteration %d\n", i);
			//fprintf(stderr,"- dst->inputs[%d] = getNeuron(dstNet, %d, %d)\n",i,src->inputs[i]->layer, src->inputs[i]->pos);
			dst->inputs[i] = getNeuron(dstNet, src->inputs[i]->layer, src->inputs[i]->pos);
			//fprintf(stderr,"+ dst->inputs[%d] = getNeuron(dstNet, %d, %d)\n",i,src->inputs[i]->layer, src->inputs[i]->pos);
		}
	} else {
		dst->inputs = NULL;
	}
	//dst->outputs = (neuron_tp*) malloc(sizeof(neuron_tp) * dst->n_output);
	for (i = 0; i < dst->n_output; i++) {
		dst->outputs[i] = getNeuron(dstNet, src->outputs[i]->layer, src->outputs[i]->pos);
	}
}

netdata_tp initFullyConnectedNetwork(uint32 inputs, uint32 hidden, uint32 output, float32 (*huaf)(netdata_tp, float32 x), float32 (*dhuaf)(netdata_tp, float32 x), float32 (*ouaf)(netdata_tp, float32 x), float32 (*douaf)(netdata_tp, float32 x), float32 (*errorComputation)(float32 *output, float32 *target, size_t size), settings_tp settings) {
	netdata_tp net;
	uint32 pos;
	neuron_tp bias;

	net = (netdata_tp) malloc(sizeof(netdata_t));

	//loadSettings(&(net->settings), settingsFileName);
	//net->settings = settings;
	reserveSpaceForSettings(settings, &(net->settings));
	copySettings(settings, &(net->settings));
	//fprintf(stderr,"settings.maxEpochs = %d\n", settings->maxEpochs);
	//fprintf(stderr,"net->settings.maxEpochs = %d\n", net->settings.maxEpochs);
	net->layersize = 3;

	net->neurons = (uint32*) malloc(net->layersize * sizeof(uint32));
	net->neurons[0] = inputs;
	net->neurons[1] = hidden;
	net->neurons[2] = output;
	//fprintf(stderr,"Network [%d %d %d]\n", net->neurons[0], net->neurons[1], net->neurons[2]);

	net->colums = fmaxf(inputs, hidden);
	net->colums = fmaxf(net->colums, output);
	net->colums += 1; //add one more for the bias
	net->neuron_matrix = (neuron_t*) calloc(sizeof(neuron_t), net->colums * net->layersize);

	//INPUT LAYER
	for (pos = 0; pos < net->neurons[0]; pos++) {
		//create the input neurons
		createSimpleNeuron(0, pos, net, activationLinear, diffActivationLinear);
	}
	createSimpleNeuron(0, pos, net, activationLinear, diffActivationLinear);
	bias = getNeuron(net, 0, pos);
	bias->a = 1;
	bias->z = 1;
	//fprintf(stderr,"Bias(%d,%d)\n", 0,pos);
	//HIDDEN LAYER
	for (pos = 0; pos < net->neurons[1]; pos++) {
		//create the input neurons
		createSimpleNeuron(1, pos, net, huaf, dhuaf);
	}
	createSimpleNeuron(1, pos, net, activationLinear, diffActivationLinear);
	bias = getNeuron(net, 1, pos);
	bias->a = 1;
	bias->z = 1;
	//fprintf(stderr,"Bias(%d,%d)\n", 1,pos);
	//OUTPUT LAYER
	for (pos = 0; pos < net->neurons[2]; pos++) {
		//create the input neurons
		createSimpleNeuron(2, pos, net, ouaf, douaf);
	}
	net->errorComputation = errorComputation;

	//fprintf(stderr, "%s\n", net->settings.algorithm);
	if (strcmp(net->settings.algorithm, "BACKPROP") == 0) {
		net->computeDeltaWeights = bpComputeDeltaWeights;
	} else if (strcmp(net->settings.algorithm, "RPROP") == 0) {
		net->computeDeltaWeights = irpComputeDeltaWeights;
	} else {
		fprintf(stderr, "Invalid learning algorithm selected, \"training.algorithm\" : \"[BACKPROP | RPROP]\" \n");
		exit(EXIT_FAILURE);
	}

	//debugMatrix(net);
	//fprintf(stderr,"Network [%d %d %d]\n", net->neurons[0], net->neurons[1], net->neurons[2]);
	return net;
}
