/*
 * CNeuralNet.cpp
 *
 *  Created on: 26 Dec 2013
 *      Author: benjamin
 */

#include "CNeuralNet.h"

inline double sigmoid(double t) {
	return 1.0 / ( 1.0 + exp( -t ) );
}
inline double sigmoid_d(double t) {
	double s = sigmoid(t);
	return s * (1.0 - s);
}

CNeuralNet::CNeuralNet(uint inputLayerSize, uint hiddenLayerSize, uint outputLayerSize, double lRate):
																						_inputLayerSize(inputLayerSize),
																						_hiddenLayerSize(hiddenLayerSize),
																						_outputLayerSize(outputLayerSize),
																						_lRate(lRate)
{
	//init weight arrays
	//input -> hidden:
	_weights_h_i = new double * [hiddenLayerSize];
	for (uint h = 0; h < hiddenLayerSize; ++h)
		_weights_h_i[h] = new double [inputLayerSize];
	//hidden -> output:
	_weights_o_h = new double * [outputLayerSize];
	for (uint o = 0; o < outputLayerSize; ++o)
		_weights_o_h[o] = new double [hiddenLayerSize];

	//create data structures to hold intermediate computed values:
	_inputs = new double[inputLayerSize];
	_hidden = new double[hiddenLayerSize];
	_outputs = new double[outputLayerSize];
}

CNeuralNet::~CNeuralNet() {
	for (uint i = 0; i < _hiddenLayerSize; ++i)
		delete[] _weights_h_i[i];
	for (uint i = 0; i < _outputLayerSize; ++i)
		delete[] _weights_o_h[i];
	delete[] _weights_h_i;
	delete[] _weights_o_h;
	delete[] _inputs;
	delete[] _hidden;
	delete[] _outputs;
}
void CNeuralNet::initWeights(){
	//initialize weights of both levels of connections between -0.5 and 0.5 (inclusive):
	for (uint h = 0; h < _hiddenLayerSize; ++h){
		for (uint i = 0; i < _inputLayerSize; ++i)
			_weights_h_i[h][i] = (((double)rand() / (double)RAND_MAX) - 0.5);
	}
	for (uint o = 0; o < _outputLayerSize; ++o){
		for (uint h = 0; h < _hiddenLayerSize; ++h)
			_weights_o_h[o][h] = (((double)rand() / (double)RAND_MAX) - 0.5);
	}
}
void CNeuralNet::feedForward(const double * const inputs) {
	  memcpy(_inputs,inputs,sizeof(double)*_inputLayerSize);
	  uint i, j;
	  // Calculate outputs of the hidden layer
	  for (i = 0; i < _hiddenLayerSize ; i++) {
	    _hidden[i] = 0.0;
	    //sum weighted input to this hidden layer node
	    for (j = 0 ; j < _inputLayerSize ; j++)
	    	_hidden[i] += (_weights_h_i[i][j] * _inputs[j]);
	    _hidden[i] = sigmoid( _hidden[i] );
	  }

	  // Calculate outputs for the output layer
	  for (i = 0 ; i < _outputLayerSize ; i++) {
	    _outputs[i] = 0.0;
	    //sum weighted input to this output layer node
	    for (j = 0 ; j < _hiddenLayerSize; j++) {
	      _outputs[i] += (_weights_o_h[i][j] * _hidden[j] );
	    }
	    _outputs[i] = sigmoid( _outputs[i] );
	  }
}
void CNeuralNet::propagateErrorBackward(const double * const desiredOutput){
	//keep error arrays for the output and hidden layers:
	double * errOutput = new double[_outputLayerSize];
	double * errHidden = new double[_hiddenLayerSize];
	//compute the error at the output layer:
	for (uint o = 0; o < _outputLayerSize; ++o)
		errOutput[o] = sigmoid_d(_outputs[o])*(desiredOutput[o]-_outputs[o]);
	//compute the approximated error at the hidden layer (weighted contribution to the error at the output layer):
	for (uint h = 0; h < _hiddenLayerSize; ++h){
			double weightedErr = 0;
			for (uint o = 0; o < _outputLayerSize; ++o)
				weightedErr += _weights_o_h[o][h]*errOutput[o];
			errHidden[h] = sigmoid_d(_hidden[h])*weightedErr;
	}
	//adjust weights between the hidden and output layers accordingly (delta w formula)
	for (uint o = 0 ; o < _outputLayerSize ; o++) {
	    for (uint h = 0 ; h < _hiddenLayerSize ; h++) {
	      _weights_o_h[o][h] += _lRate * errOutput[o] * _hidden[h];
	    }
	}
	//adjust weights between the input and hidden layers accordingly (delta w formula)
	for (uint h = 0 ; h < _hiddenLayerSize ; h++) {
		for (uint i = 0 ; i < _inputLayerSize ; i++) {
		      _weights_h_i[h][i] += _lRate * errHidden[h] * _inputs[i];
		}
	}
	delete[] errOutput;
	delete[] errHidden;
}
double CNeuralNet::meanSquaredError(const double * const desiredOutput){
	double sum = 0;
	for (uint o = 0; o < _outputLayerSize; ++o){
		double err = desiredOutput[o] - _outputs[o];
		sum += err*err;
	}
	return sum / _outputLayerSize;
}
void CNeuralNet::train(const double** const inputs,
		const double** const outputs, uint trainingSetSize) {
	printf("Initializing weights\n");
	initWeights();
	double mse;
	int tRound = 1;
	do {
		printf("Training round %d starting",tRound++);
		mse = 0;
		for (uint32_t pattern = 0; pattern < trainingSetSize; ++pattern){
			feedForward(inputs[pattern]);
			propagateErrorBackward(outputs[pattern]);
			mse = std::max(mse,meanSquaredError(outputs[pattern])); //store the largest mse to ensure all training samples are correctly classified
		}
		printf(" <MSE = %f>\n",mse);
	} while (mse > EPSILON);//train until all samples are classified correctly
	printf("Finished training\n");
}
uint CNeuralNet::classify(const double * const input){
	feedForward(input);
	//get the argMax of the output layer and use that as the final classification:
	uint maxIndex = 0;
	for (uint o = 1; o < _outputLayerSize; ++o)
		if (_outputs[o] > _outputs[maxIndex])
			maxIndex = o;
	return maxIndex;
}
