#include "perceptron.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>

double
sigmoid(double x)
{
	return (1.0 / (1 + exp(-x)));
}


Perceptron::Perceptron(int data_dim, double max_iteractions, double learning_rate)
{
	_data_dim = data_dim;
	_learning_rate = learning_rate;
	_max_iteractions = max_iteractions;

	_alloc_stuff();
	_randomize_weights();
}


Perceptron::~Perceptron()
{
	_dealloc_stuff();
}


double 
Perceptron::RunNetwork(double *data)
{
	int i;
	double out = 0.0;

	for (i = 0; i < _data_dim; i++)
		out += (_weights[i] * data[i]);

	out += _bias;
	return sigmoid(out);
}


void 
Perceptron::Train(double **input, double *output, int num_samples)
{
	double error;
	int converged = 0;
	int num_iteractions = 0;

	// TODO: train batch
	// TODO: train using inverse

	while (!converged && (num_iteractions < _max_iteractions))
	{
		error = _train_step(input, output, num_samples);
		converged = _convergence_test();
		num_iteractions++;

		fprintf(stderr, "error: %lf\n", error);
	}
}


void 
Perceptron::_alloc_stuff()
{
	_weights = (double *) calloc (_data_dim, sizeof(double));
}


void 
Perceptron::_dealloc_stuff()
{
	free(_weights);
}


void 
Perceptron::_randomize_weights()
{
	int i;

	for (i = 0; i < _data_dim; i++)
		_weights[i] = (double) rand() / (double) RAND_MAX;
	
	_bias = (double) rand() / (double) RAND_MAX;
}


double
Perceptron::_train_step(double **input, double *output, int num_samples)
{
	int i;
	double net_out;
	double error;
	double total_error = 0;

	for (i = 0; i < num_samples; i++)
	{
		net_out = RunNetwork(input[i]);
		error = (output[i] - net_out);
		_update_weights(error, input[i]);
		total_error += fabs(error);
	}

	return total_error;
}


int
Perceptron::_convergence_test()
{
	return 0;
}


void
Perceptron::_update_weights(double delta, double *data)
{
	int i;

	for (i = 0; i < _data_dim; i++)
	{
		_weights[i] = _weights[i] + _learning_rate * data[i] * delta;
	}

	_bias = _bias + _learning_rate * delta;
}
