/**
 * @file	FastNN.cpp
 * @author  Thomas Fu <thomas.ks.fu@gmail.com>
 * @version 0.0.1
 * @date	06/02/2011	
 *
 * @section License
 * This program is propety of Jyeah Jyeah Jyeah Jyeah Jyeah and may not be used 
 * by any individual for any purpose at any time without express permission from 
 * Mike Casey, Thomas Fu, Chris Hairfield, Kyle Lamson, Ben Treweek, or Cliff 
 * Warren.
 *
 * @brief 
 * This file contains the implementation of the functions required for a fast 
 * evaluator for a neural network. This struct cannot be used for training the 
 * network, but provides an improvement in speed and memory usage for network 
 * evaluation.
 */

#include <iostream>
#include "FastNN.h"

using namespace std;

/* Faster version of the built-in srand function taken from Intel's site */
inline void fast_srand(int seed) 
{ 
	g_seed = seed;
} 

/* Faster version of the built-in rand function taken from Intel's site */
inline int fastrand() 
{
	// Linear congruential method of generating random numbers with a 16 bit mask
	g_seed = (214013*g_seed+2531011); 
  return (g_seed>>16)&0x7FFF; 
} 

/*
 * Allocates all memory associated with the FastNN struct based on the values 
 * of numInputs, numOutputs, and numHidden specified in the FastNN struct.
 */
void initializeFastNN(FastNN *nn)
{
	// To maximize efficiency with regards to memory overhead, all memory for a
	// FastNN is allocated all together and then divided manually
	int totalMemRequirement = (nn->numHidden + 1) * 2 + nn->numOutputs + 
	(nn->numOutputs + nn->numHidden * (nn->numHidden + nn->numOutputs + nn->numInputs 
																	 + 2));
	nn->inputValues = (double*) malloc(sizeof(double) * totalMemRequirement);
	nn->outputValues = nn->inputValues + nn->numHidden + 1;
	nn->inputToHidden1Weights = nn->outputValues + nn->numHidden + 1;
	nn->hidden1ToHidden2Weights = nn->inputToHidden1Weights + nn->numInputs 
	* nn->numHidden + nn->numHidden;
	nn->hidden2ToOutputWeights = nn->hidden1ToHidden2Weights + nn->numHidden 
	* nn->numHidden + nn->numHidden;
}

/*
 * Maps each value stored in values to a real number between 0 and 1 via a 
 * sigmoid function. 
 */
void sigmoid(double *values, int num)
{
	int i;
	for(i = 0; i < num; i++)
	{
		*values = 1 / (1 + exp(-(*values)));
		values++;
	}
}

/* Sets the input values to the neural network prior to evaluation */
void setInputs(FastNN  *nn, double *inputVals)
{
	memcpy(nn->inputValues, inputVals, sizeof(double) * nn->numInputs);
	nn->inputValues[nn->numInputs] = 1.0;
}

/*
 * Propagates the input values through the network and places the results of the
 * network evaluation at nn->outputValues
 */
void generateOutput(FastNN *nn)
{
	matrixVectorProduct(nn->inputToHidden1Weights, nn->inputValues, nn->outputValues, 
					nn->numHidden, nn->numInputs + 1);

	sigmoid(nn->outputValues, nn->numHidden);
	nn->outputValues[nn->numHidden] = 1.0;
	
	matrixVectorProduct(nn->hidden1ToHidden2Weights, nn->outputValues, nn->inputValues,
					nn->numHidden, nn->numHidden + 1);
	sigmoid(nn->inputValues, nn->numHidden);
	nn->inputValues[nn->numHidden] = 1.0;
	
	matrixVectorProduct(nn->hidden2ToOutputWeights, nn->inputValues, nn->outputValues, 
					nn->numOutputs, nn->numHidden + 1);
	sigmoid(nn->outputValues, nn->numOutputs);
}

/** 
 * Computes the matrix-vector product for the matrix A (represented as a one
 * dimensional array with elements in row major order) and the vector x and 
 * stores the result in the vector b.
 */
void matrixVectorProduct(double *A, double *x, double *b, int numRows, 
												 int numCols)
{
	
	double *Apos1 = A;
	double *Apos2 = A + numCols;
	double *bpos = &b[0];
	
	int i;
	for(i = 0; i < numRows / 2; i++)
	{
		double btemp1 = 0;
		double btemp2 = 0;
		double *xpos = &x[0];
		
		int j;
		for(j = 0; j < numCols; j++)
		{
			btemp1 += (*Apos1++) * (*xpos);
			btemp2 += (*Apos2++) * (*xpos);
			xpos++;
		}
		
		*bpos = btemp1;
		bpos++;
		
		*bpos = btemp2;
		bpos++;
		
		Apos1 += numCols;
		Apos2 += numCols;
	}	
	
	if(numRows % 2 == 1)
	{
		double btemp1 = 0;
		double *xpos = &x[0];
		int j;
		for(j = 0; j < numCols; j++)
		{
			btemp1 += (*Apos1++) * (*xpos);
			x++;
		}
		*bpos = btemp1;
	}	
}