#ifndef _LAYER_H__
#define _LAYER_H__

#include <math.h>
#include <stdio.h>
#include <stdlib.h>

template<int NUM_INPUTS, int NUM_OUTPUTS, int BOOL_SIGMOID_OUTS, int BOOL_ADD_CONST_INPUT>
class NnetLayer
{
	double m_arrLfWeights[NUM_OUTPUTS][NUM_INPUTS+BOOL_ADD_CONST_INPUT]; 
	// transposition order is for eval efficiency, train uses matrix both ways, so thats a wash

	double m_arrLfOutputs[NUM_OUTPUTS];

	double m_arrLfDerrorDactivation[NUM_OUTPUTS];

	double m_arrLfDErrDInput[NUM_INPUTS]; // no need to worry about const input here
	
public:
	NnetLayer();
	~NnetLayer();

	inline int GetNumOutputs() const { return NUM_OUTPUTS; }

	bool Eval(const double * const arrLfInputs);

	bool CalcErrorPartials(const double * const arrLfDErrDInputPrev);	// gets dErr/dAct and dErr/dInput

	const double * const GetOutputVec() const { return m_arrLfOutputs; }

	const double * const GetDErrDAct() const { return m_arrLfDerrorDactivation; } // gets sigmas 

	const double * const GetDerrDinput() const { return m_arrLfDErrDInput; } 

	bool DoBackpropStep(double lfLearningRate, const double * const arrLfPrevOutput)
	{
		if(BOOL_ADD_CONST_INPUT)
		{
			for(int i = 0; i < NUM_OUTPUTS; ++i)
			{
				m_arrLfWeights[i][NUM_INPUTS] -= lfLearningRate*m_arrLfDerrorDactivation[i]; // * 1.0 for extra input
				for(int j = 0; j < NUM_INPUTS; ++j)
				{
					m_arrLfWeights[i][j] -= lfLearningRate*m_arrLfDerrorDactivation[i]*arrLfPrevOutput[j];
				}
			}
		}
		else
		{
			for(int i = 0; i < NUM_OUTPUTS; ++i)
			{
				for(int j = 0; j < NUM_INPUTS; ++j)
				{
					m_arrLfWeights[i][j] -= lfLearningRate*m_arrLfDerrorDactivation[i]*arrLfPrevOutput[j];
				}
			}
		}
		return true;
	}

	void Serialize(FILE * fiOut);

	void DeSerialize(FILE * fiIn);

	void Clone(const NnetLayer<NUM_INPUTS, NUM_OUTPUTS, BOOL_SIGMOID_OUTS, BOOL_ADD_CONST_INPUT> & cloneMe);
	
};



template<int NUM_INPUTS, int NUM_OUTPUTS, int BOOL_SIGMOID_OUTS, int BOOL_ADD_CONST_INPUT>
NnetLayer<NUM_INPUTS, NUM_OUTPUTS, BOOL_SIGMOID_OUTS, BOOL_ADD_CONST_INPUT>::NnetLayer()
{
	double lfScale = 1.0/((double)RAND_MAX);
	for(int i = 0; i < NUM_OUTPUTS; ++i)
	{
		for(int j = 0; j < NUM_INPUTS+BOOL_ADD_CONST_INPUT; ++j)
		{
			m_arrLfWeights[i][j] = 1.0 - 2.0*rand()*lfScale;
		}
	}
}


template<int NUM_INPUTS, int NUM_OUTPUTS, int BOOL_SIGMOID_OUTS, int BOOL_ADD_CONST_INPUT>
NnetLayer<NUM_INPUTS, NUM_OUTPUTS, BOOL_SIGMOID_OUTS, BOOL_ADD_CONST_INPUT>::~NnetLayer()
{
	// do nothing yet
}

template<int NUM_INPUTS, int NUM_OUTPUTS, int BOOL_SIGMOID_OUTS, int BOOL_ADD_CONST_INPUT>
bool NnetLayer<NUM_INPUTS, NUM_OUTPUTS, BOOL_SIGMOID_OUTS, BOOL_ADD_CONST_INPUT>::Eval(const double * const arrLfInputs)
{
	if(BOOL_ADD_CONST_INPUT) // use template specialization later
	{
		for(int i = 0; i < NUM_OUTPUTS; ++i)
		{
			m_arrLfOutputs[i] = m_arrLfWeights[i][NUM_INPUTS];
			for(int j = 0; j < NUM_INPUTS; ++j)
			{
				m_arrLfOutputs[i] += arrLfInputs[j]*m_arrLfWeights[i][j];
			}
		}
	}
	else
	{
		for(int i = 0; i < NUM_OUTPUTS; ++i)
		{
			m_arrLfOutputs[i] = 0.0;
			for(int j = 0; j < NUM_INPUTS; ++j)
			{
				m_arrLfOutputs[i] += arrLfInputs[j]*m_arrLfWeights[i][j];
			}
		}
	}

	if(BOOL_SIGMOID_OUTS) // template specialize this as well later
	{
		for(int i = 0; i < NUM_OUTPUTS; ++i)
		{
			m_arrLfOutputs[i] = 1.0/(1.0+exp(-m_arrLfOutputs[i]));
		}
	}
	
	
	return true; // blithely assume we succeeded
	// for debug mode, might consider adding extra params and asserting that they match
	// not sure how to do this though, without adding extra unneccessary parameters for release mode
}

template<int NUM_INPUTS, int NUM_OUTPUTS, int BOOL_SIGMOID_OUTS, int BOOL_ADD_CONST_INPUT>
bool NnetLayer<NUM_INPUTS, NUM_OUTPUTS, BOOL_SIGMOID_OUTS, BOOL_ADD_CONST_INPUT>::CalcErrorPartials(const double * const arrLfDErrDInputPrev)
{
	if(BOOL_SIGMOID_OUTS) // template specialize this as well later
	{
		for(int i = 0; i < NUM_OUTPUTS; ++i)
		{
			m_arrLfDerrorDactivation[i] = arrLfDErrDInputPrev[i]*m_arrLfOutputs[i]*(1.0-m_arrLfOutputs[i]);
		}
	}
	else
	{
		for(int i = 0; i < NUM_OUTPUTS; ++i)
		{
			m_arrLfDerrorDactivation[i] = arrLfDErrDInputPrev[i];
		}
	}

	
	for(int i = 0; i < NUM_INPUTS; ++i)
	{
		m_arrLfDErrDInput[i] = 0.0;
	}
	for(int i = 0; i < NUM_OUTPUTS; ++i)
	{
		for(int j = 0; j < NUM_INPUTS; ++j)
		{
			m_arrLfDErrDInput[j] += m_arrLfDerrorDactivation[i]*m_arrLfWeights[i][j];
		}
	}


	return true;
}


template<int NUM_INPUTS, int NUM_OUTPUTS, int BOOL_SIGMOID_OUTS, int BOOL_ADD_CONST_INPUT>
void NnetLayer<NUM_INPUTS, NUM_OUTPUTS, BOOL_SIGMOID_OUTS, BOOL_ADD_CONST_INPUT>::Serialize(FILE * fiOut)
{
	if(BOOL_ADD_CONST_INPUT)
	{
		for(int i = 0; i < NUM_OUTPUTS; ++i)
		{
			fprintf(fiOut, "%f, ", m_arrLfWeights[i][NUM_INPUTS]);
			for(int j = 0; j < NUM_INPUTS; ++j)
			{
				fprintf(fiOut, "%lf| ", m_arrLfWeights[i][j]);				
			}
		}
	}
	else
	{
		for(int i = 0; i < NUM_OUTPUTS; ++i)
		{
			for(int j = 0; j < NUM_INPUTS; ++j)
			{
				fprintf(fiOut, "%lf| ", m_arrLfWeights[i][j]);				
			}
		}
	}
}

template<int NUM_INPUTS, int NUM_OUTPUTS, int BOOL_SIGMOID_OUTS, int BOOL_ADD_CONST_INPUT>
void NnetLayer<NUM_INPUTS, NUM_OUTPUTS, BOOL_SIGMOID_OUTS, BOOL_ADD_CONST_INPUT>::DeSerialize(FILE * fiIn)
{
	char cTmp;
	if(BOOL_ADD_CONST_INPUT)
	{
		for(int i = 0; i < NUM_OUTPUTS; ++i)
		{
			fscanf(fiIn, "%lf", &(m_arrLfWeights[i][NUM_INPUTS]));
			fscanf(fiIn, "%c", &cTmp);
			while(cTmp != ' ')
			{
				fscanf(fiIn, "%c", &cTmp);
			}
			for(int j = 0; j < NUM_INPUTS; ++j)
			{
				fscanf(fiIn, "%lf", &(m_arrLfWeights[i][j]));
				fscanf(fiIn, "%c", &cTmp);
				while(cTmp != ' ')
				{
					fscanf(fiIn, "%c", &cTmp);
				}
			}
			
		}
	}
	else
	{
		for(int i = 0; i < NUM_OUTPUTS; ++i)
		{
			for(int j = 0; j < NUM_INPUTS; ++j)
			{
				fscanf(fiIn, "%f| ", &(m_arrLfWeights[i][j]));
			}
		}
	}
}

template<int NUM_INPUTS, int NUM_OUTPUTS, int BOOL_SIGMOID_OUTS, int BOOL_ADD_CONST_INPUT>
void 
NnetLayer<NUM_INPUTS, NUM_OUTPUTS, BOOL_SIGMOID_OUTS, BOOL_ADD_CONST_INPUT>::Clone(
	const NnetLayer<NUM_INPUTS, NUM_OUTPUTS, BOOL_SIGMOID_OUTS, BOOL_ADD_CONST_INPUT> & cloneMe)
{
	for(int i = 0; i < NUM_OUTPUTS; ++i)
	{
		for(int j = 0; j < NUM_INPUTS+BOOL_ADD_CONST_INPUT; ++j)
		{
			m_arrLfWeights[i][j] = cloneMe.m_arrLfWeights[i][j];
		}
	}
}


#endif