/*
 * mlp.cpp
 *
 *  Created on: Jan 25, 2014
 *      Author: filipe
 */
#include <math.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include "mlp.h"

bool config_use_batch;
double config_acceptable_error;
double config_learning_factor;
int config_max_iteractions;
int config_num_epochs = 10;

int config_data_dim;
int config_num_layers;
int *config_num_neurons; // [num_layers]
int *config_num_synapsis;  // [num_layers]

double ***mlp_weights; // [num_layers] [num_neurons] [num_synapsis]
double ***mlp_deltas; // [num_layers] [num_neurons] [num_synapsis]
double ***mlp_batch_deltas; // [num_layers] [num_neurons] [num_synapsis]
double **mlp_batch_bias; // [num_layers] [num_neurons]
double **mlp_neurons; // [num_layers] [num_neurons]
double **mlp_nets; // [num_layers] [num_neurons]
double **mlp_bias; // [num_layers] [num_neurons]

void
mlp_print()
{
	for (int i = 0; i < config_num_layers; i++)
	{
		printf("Layer %d:\n", i);

		for (int j = 0; j < config_num_neurons[i]; j++)
		{
			printf("\tNeuron %d [Output: %.2lf] [bias: %.2lf]\n", j, mlp_neurons[i][j], mlp_bias[i][j]);
			printf("\t\tWeights: ");

			for (int k = 0; k < config_num_synapsis[i]; k++)
			{
				printf("%.2lf, ", mlp_weights[i][j][k]);
			}

			printf("\n");
		}
	}
}


void
mlp_initialize_num_synapsis()
{
	config_num_synapsis = (int *) calloc (config_num_layers, sizeof(int));
	config_num_synapsis[0] = config_data_dim;

	for (int i = 1; i < config_num_layers; i++)
		config_num_synapsis[i] = config_num_neurons[i - 1];
}


void
mlp_initialize_config(int num_layers, int *num_neurons_per_layer, int data_dim, double learning_factor, int max_iteractions, double acceptable_error, bool use_batch)
{
	config_use_batch = use_batch;
	config_data_dim = data_dim;
	config_num_layers = num_layers;
	config_num_neurons = num_neurons_per_layer;
	config_learning_factor = learning_factor;
	config_max_iteractions = max_iteractions;
	config_acceptable_error = acceptable_error;
	mlp_initialize_num_synapsis();
}


void
mlp_alloc_net()
{
	int i, j;

	mlp_batch_deltas = (double ***) calloc (config_num_layers, sizeof(double**));
	mlp_weights = (double ***) calloc (config_num_layers, sizeof(double**));
	mlp_neurons = (double **) calloc (config_num_layers, sizeof(double*));
	mlp_deltas = (double ***) calloc (config_num_layers, sizeof(double**));
	mlp_nets = (double **) calloc (config_num_layers, sizeof(double*));
	mlp_bias = (double **) calloc (config_num_layers, sizeof(double*));
	mlp_batch_bias = (double **) calloc (config_num_layers, sizeof(double*));

	for (i = 0; i < config_num_layers; i++)
	{
		mlp_batch_deltas[i] = (double **) calloc (config_num_neurons[i], sizeof(double*));
		mlp_weights[i] = (double **) calloc (config_num_neurons[i], sizeof(double*));
		mlp_neurons[i] = (double *) calloc (config_num_neurons[i], sizeof(double));
		mlp_deltas[i] = (double **) calloc (config_num_neurons[i], sizeof(double*));
		mlp_nets[i] = (double *) calloc (config_num_neurons[i], sizeof(double));
		mlp_bias[i] = (double *) calloc (config_num_neurons[i], sizeof(double));
		mlp_batch_bias[i] = (double *) calloc (config_num_neurons[i], sizeof(double));

		for (j = 0; j < config_num_neurons[i]; j++)
		{
			mlp_batch_deltas[i][j] = (double *) calloc (config_num_synapsis[i], sizeof(double));
			mlp_weights[i][j] = (double *) calloc (config_num_synapsis[i], sizeof(double));
			mlp_deltas[i][j] = (double *) calloc (config_num_synapsis[i], sizeof(double));
		}
	}
}


void
mlp_randomize_net()
{
	int i, j, k;

	for (i = 0; i < config_num_layers; i++)
	{
		for (j = 0; j < config_num_neurons[i]; j++)
		{
			// random bias between -1 and 1
			mlp_bias[i][j] = ((double) rand() / (double) RAND_MAX) * 2.0 - 1.0;

			for (k = 0; k < config_num_synapsis[i]; k++)
			{
				// random weight between -1 and 1
				mlp_weights[i][j][k] = ((double) rand() / (double) RAND_MAX) * 2.0 - 1.0;
			}
		}
	}
}


void
mlp_build_netowrk()
{
	srand(time(NULL));
	mlp_alloc_net();
	mlp_randomize_net();
}


double
logistic(double value)
{
	return (1.0 / (1 + exp(-value)));
}


double
logistic_derivative(double value)
{
	return (logistic(value) * (1 - logistic(value)));
}


double
mlp_activation(double value)
{
	return logistic(value);
}


double
mlp_activation_derivative(double value)
{
	return logistic_derivative(value);
}


void
mlp_run_net(double *input)
{
	int i, j, k;
	double *neuron_layer_input = input;

	for (i = 0; i < config_num_layers; i++)
	{
		for (j = 0; j < config_num_neurons[i]; j++)
		{
			mlp_nets[i][j] = 0.0;

			for (k = 0; k < config_num_synapsis[i]; k++)
				mlp_nets[i][j] += (mlp_weights[i][j][k] * neuron_layer_input[k]);

			mlp_nets[i][j] += (mlp_bias[i][j]);
			mlp_neurons[i][j] = mlp_activation(mlp_nets[i][j]);
		}

		neuron_layer_input = mlp_neurons[i];
	}
}


void
mlp_clean_deltas()
{
	int i, j, k;

	for (i = 0; i < config_num_layers; i++)
		for (j = 0; j < config_num_neurons[i]; j++)
			for (k = 0; k < config_num_synapsis[i]; k++)
				mlp_deltas[i][j][k] = 0.0;
}


void
mlp_clean_batch_deltas()
{
	int i, j, k;

	for (i = 0; i < config_num_layers; i++)
	{
		for (j = 0; j < config_num_neurons[i]; j++)
		{
			mlp_batch_bias[i][j] = 0.0;

			for (k = 0; k < config_num_synapsis[i]; k++)
			{
				mlp_batch_deltas[i][j][k] = 0.0;
			}
		}
	}
}


void
mlp_update_deltas(double *expected_output, double *net_error)
{
	int layer;
	int i, j, k, l;
	double neuron_error;

	// update deltas for the output layer
	layer = config_num_layers - 1;
	for (j = 0; j < config_num_neurons[layer]; j++)
	{
		neuron_error = expected_output[j] - mlp_neurons[layer][j];
		(*net_error) += sqrt(neuron_error * neuron_error);

		for (k = 0; k < config_num_synapsis[layer]; k++)
		{
			mlp_deltas[layer][j][k] = mlp_activation_derivative(mlp_nets[layer][j]) * neuron_error;
		}
	}

	// update deltas for the other layer
	for (i = (config_num_layers - 2); i > 0; i--)
	{
		for (j = 0; j < config_num_neurons[i]; j++)
		{
			for (k = 0; k < config_num_synapsis[i]; k++)
			{
				mlp_deltas[i][j][k] = 0.0;

				for (l = 0; l < config_num_neurons[i + 1]; l++)
				{
					mlp_deltas[i][j][k] +=
						mlp_activation_derivative(mlp_nets[i][j]) *
						mlp_deltas[i + 1][l][0] *
						mlp_weights[i + 1][l][j];
				}
			}
		}
	}
}


void
mlp_update_weights()
{
	int i, j, k;

	for (i = 0; i < config_num_layers; i++)
	{
		for (j = 0; j < config_num_neurons[i]; j++)
		{
			for (k = 0; k < config_num_synapsis[i]; k++)
			{
				mlp_weights[i][j][k] += mlp_batch_deltas[i][j][k];
			}

			mlp_bias[i][j] += mlp_batch_bias[i][j];
		}
	}
}


int
convergence_test(double net_error)
{
	if (net_error < config_acceptable_error)
		return 1;
	else
		return 0;
}


void
mlp_update_batch_deltas(double *input)
{
	int i, j, k;
	double *neuron_layer_input = input;

	for (i = 0; i < config_num_layers; i++)
	{
		for (j = 0; j < config_num_neurons[i]; j++)
		{
			for (k = 0; k < config_num_synapsis[i]; k++)
			{
				mlp_batch_deltas[i][j][k] +=
					config_learning_factor *
					mlp_deltas[i][j][k] *
					neuron_layer_input[k];
			}

			mlp_batch_bias[i][j] +=
				config_learning_factor *
				mlp_deltas[i][j][0];
		}

		neuron_layer_input = mlp_neurons[i];
	}
}


void
mlp_train(double **input, double **expected_output, int num_examples)
{
	int i, epoch;
	int converged = 0;
	int num_iteractions = 0;

	mlp_clean_deltas();

	while ((!converged) && (num_iteractions < config_max_iteractions))
	{
		double net_error = 0.0;

		if (config_use_batch)
		{
			mlp_clean_batch_deltas();

			for (epoch = 0; epoch < config_num_epochs; epoch++)
			{
				for (i = 0; i < num_examples; i++)
				{
					mlp_run_net(input[i]);
					mlp_update_deltas(expected_output[i], &net_error);
					mlp_update_batch_deltas(input[i]);
				}
			}

			mlp_update_weights();
			net_error /= ((double) config_num_epochs);
		}
		else
		{
			for (epoch = 0; epoch < config_num_epochs; epoch++)
			{
				for (i = 0; i < num_examples; i++)
				{
					mlp_clean_batch_deltas();
					mlp_run_net(input[i]);
					mlp_update_deltas(expected_output[i], &net_error);
					mlp_update_batch_deltas(input[i]);
					mlp_update_weights();
				}
			}

			net_error /= ((double) config_num_epochs);
		}

		converged = convergence_test(net_error);
		fprintf(stderr, "%lf\n", net_error);
		num_iteractions++;
	}
}


void
mlp_upate_output_net(double *output)
{
	int i;

	for (i = 0; i < config_data_dim; i++)
	{
		output[i] = mlp_neurons[config_num_layers - 1][i];
	}
}


void
mlp_test(double **input, double **net_output, int num_examples)
{
	int i;

	for (i = 0; i < num_examples; i++)
	{
		mlp_run_net(input[i]);
		mlp_upate_output_net(net_output[i]);
	}
}


void
mlp_free()
{
	int i, j;

	for (i = 0; i < config_num_layers; i++)
	{
		for (j = 0; j < config_num_neurons[i]; j++)
		{
			free(mlp_batch_deltas[i][j]);
			free(mlp_weights[i][j]);
			free(mlp_deltas[i][j]);
		}

		free(mlp_batch_deltas[i]);
		free(mlp_weights[i]);
		free(mlp_neurons[i]);
		free(mlp_deltas[i]);
		free(mlp_nets[i]);
		free(mlp_bias[i]);
		free(mlp_batch_bias[i]);
	}

	free(config_num_synapsis);
	free(mlp_batch_deltas);
	free(mlp_weights);
	free(mlp_neurons);
	free(mlp_deltas);
	free(mlp_nets);
	free(mlp_bias);
	free(mlp_batch_bias);
}

