#include "NeuralNet.h"

Neuron::Neuron(int iNumInputs):
m_iNumInputs(iNumInputs + 1)
{
	// we need an additional weight for the bias
	for (int i = 0; i < iNumInputs+1; ++i)
	{
		// set up the weights with an initial random value
		m_vecWeight.push_back(RandomClamped());
	}
}

/*-------------------------------------------------------------------------------------------------------------*/
NeuronLayer::NeuronLayer(int iNumNeurons, 
						   int iNumInputsPerNeuron):	m_iNumNeurons(iNumNeurons)
{
	for (int i=0; i<iNumNeurons; ++i)

		m_vecNeurons.push_back(Neuron(iNumInputsPerNeuron));
}

/*-------------------------------------------------------------------------------------------------------------*/

NeuralNet::NeuralNet() 
{
	m_iNumInputs = I_NUM_INPUTS;
	m_iNumOutputs = I_NUM_OUTPUTS;
	m_iNumHiddenLayers = I_NUM_HIDDEN;
	m_iNeuronsPerHiddenLyr = I_NEURONS_PER_HIDDEN_LAYER;

	CreateNet();

}

/*-------------------------------------------------------------------------------------------------------------*/
void NeuralNet::CreateNet()
{
	//create the layers of the network
	if (m_iNumHiddenLayers > 0)
	{
		//create first hidden layer
		m_vecLayers.push_back(NeuronLayer(m_iNeuronsPerHiddenLyr, m_iNumInputs));

		for (int i=0; i<m_iNumHiddenLayers-1; ++i)
		{

			m_vecLayers.push_back(NeuronLayer(m_iNeuronsPerHiddenLyr,
				m_iNeuronsPerHiddenLyr));
		}

		//create output layer
		m_vecLayers.push_back(NeuronLayer(m_iNumOutputs, m_iNeuronsPerHiddenLyr));
	}

	else
	{
		//create output layer
		m_vecLayers.push_back(NeuronLayer(m_iNumOutputs, m_iNumInputs));
	}
}

/*-------------------------------------------------------------------------------------------------------------*/
vector<double> NeuralNet::GetWeights() const
{
	//this will hold the weights
	vector<double> weights;

	//for each layer
	for (int i=0; i<m_iNumHiddenLayers + 1; ++i)
	{

		//for each neuron
		for (int j=0; j<m_vecLayers[i].m_iNumNeurons; ++j)
		{
			//for each weight
			for (int k=0; k<m_vecLayers[i].m_vecNeurons[j].m_iNumInputs; ++k)
			{
				weights.push_back(m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k]);
			}
		}
	}

	return weights;
}

/*-------------------------------------------------------------------------------------------------------------*/
void NeuralNet::PutWeights(vector<double> &weights)
{
	int cWeight = 0;

	//for each layer
	for (int i=0; i<m_iNumHiddenLayers + 1; ++i)
	{

		//for each neuron
		for (int j=0; j<m_vecLayers[i].m_iNumNeurons; ++j)
		{
			//for each weight
			for (int k=0; k<m_vecLayers[i].m_vecNeurons[j].m_iNumInputs; ++k)
			{
				m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k] = weights[cWeight++];
			}
		}
	}

	return;
}

/*-------------------------------------------------------------------------------------------------------------*/
int NeuralNet::GetNumberOfWeights() const
{

	int weights = 0;

	//for each layer
	for (int i=0; i<m_iNumHiddenLayers + 1; ++i)
	{

		//for each neuron
		for (int j=0; j<m_vecLayers[i].m_iNumNeurons; ++j)
		{
			//for each weight
			for (int k=0; k<m_vecLayers[i].m_vecNeurons[j].m_iNumInputs; ++k)

				weights++;

		}
	}

	return weights;
}

/*-------------------------------------------------------------------------------------------------------------*/
double NeuralNet::Sigmoid(double netinput, double response)
{
	return ( 1 / ( 1 + exp(-netinput / response)));
}

/*-------------------------------------------------------------------------------------------------------------*/
vector<double> NeuralNet::Update(vector<double> &inputs)
{
    // stores the resultant outputs from each layer
    vector<double> outputs;
    
    int iWeight = 0;
    
    // first check that we have the correct amount of inputs
    
    if(inputs.size() != m_iNumInputs)
    {
        // just return an empty vector if incorrect
        return outputs;
    }
    
    for (int i=0; i<m_iNumHiddenLayers + 1; ++i)
    {
        if( i > 0 )
        {
            inputs = outputs;    
        }
    
        
        outputs.clear();
        
        iWeight = 0;
        
        // for each neuron sum the (inputs * corresponding weights)
        // throw the total at our sigmoid function to get the output
        for(int j = 0; j<m_vecLayers[i].m_iNumNeurons; ++j)
        {
            double dNetInput = 0;
            
            int iNumInputs = m_vecLayers[i].m_vecNeurons[j].m_iNumInputs;
            
            // for each weight
            for (int k = 0; k < iNumInputs - 1; ++k)
            {
                // sum the weights x inputs
                dNetInput += m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k] *
                    inputs[iWeight++];
            }
            
            // add in the bias
            dNetInput += m_vecLayers[i].m_vecNeurons[j].m_vecWeight[iNumInputs-1] * D_BIAS;
            
            // we can stre the outputs from each layer as we generate them
            // the combined activation is first filtered through the sigmid
            // function
            
            outputs.push_back(Sigmoid(dNetInput, D_ACTIVATION_RESPONSE));
            
            iWeight = 0;
        }
    }
    
    return outputs;
}