#include <iostream>
#include <vector>
#include <stdexcept>
#include <assert.h>
#include <map>
#include <algorithm>

/*
	Returns the value of the polar sigma function.
	if firstDerivative=true, returns the first derivative.
	if the input is outside of 0 to 1, exception is thrown.
*/
double SigmoidFunction(double x,bool firstDerivative=false)
{
	

	double sigma=1/(1+exp(-x));
	if(!firstDerivative)
		return sigma;
	else
		return sigma*(1-sigma);
}

/*
	Output Error Gradient
	deltaK=yK(1-yK)(dK-yK);
	deltaK=error
	yK= actual output
	dK=desired output
	K=the output neuron K

*/
namespace ICProject{
	class NeuralNetwork
	{
		//Input Layer
		std::vector<double> inputLayerNeurons;
		int nInputNeurons;

		//Hidden Layer
		std::vector<double> hiddenLayerNeurons;
		int nHiddenNeurons;

		//Output Layer
		std::vector<double> outputLayerNeurons;
		int nOutputNeurons;

		//Desired Accuracy
		double desiredAccuracy;

		//Maximum Epochs
		unsigned int maxEpochs;

		//Input layer weights
		std::vector<std::vector<double>> inputLayerWeights;

		//Hidden layer weights
		std::vector<std::vector<double>> hiddenLayerWeights;

		//Output layer weights
		std::vector<std::vector<double>> outputLayerWeights;

		//Delta Output error gradients
		std::vector<double> deltaOutput;

		//Delta Hidden error gradients
		std::vector<double> deltaHidden;

		//Delta Weights Hidden
		std::vector<std::vector<double>> deltaHiddenWeights;

		//Delta Weights Input
		std::vector<std::vector<double>> deltaInputWeights;

		unsigned int Epochs;
		double learningRate;
		double momentum;
		bool USE_BATCH;
		
	public:

		NeuralNetwork(int nInputLayerNeurons,int nHiddenLayerNeurons,int nOutputLayerNeurons,double desiredAccuracy,int maxEpochs,double learningRate,double momentum=1,bool USE_BATCH=true)
		{
			this->nInputNeurons=nInputLayerNeurons;
			this->inputLayerNeurons=*new std::vector<double>(nInputLayerNeurons+1);
			this->deltaInputWeights=*new std::vector<std::vector<double>>(nHiddenLayerNeurons);
			this->nHiddenNeurons=nHiddenLayerNeurons;
			this->hiddenLayerNeurons=*new std::vector<double>(nHiddenLayerNeurons+1);
			this->momentum=momentum;
			this->nOutputNeurons=nOutputLayerNeurons;
			this->outputLayerNeurons=*new std::vector<double>(nOutputLayerNeurons);
			this->desiredAccuracy=desiredAccuracy;
			this->maxEpochs=maxEpochs;
			this->Epochs=0;
			this->learningRate=learningRate;
			this->USE_BATCH=USE_BATCH;
			RandomiseNetwork();
		}
		/*
		Basic technique of forward feeding the neural network
		Input - inputVector
			
		Output:Output Vector
		*/
		std::vector<double> ForwardFeed(const std::vector<double> inputVector)
		{
			//reset Hidden output and network outputs
			//std::vector<double> hiddenOutput(nHiddenNeurons,0);
			//std::vector<double> networkOutput(nOutputNeurons,0);

			//Add input bias
			std::vector<double> copyInputVector(inputVector);
			copyInputVector.push_back(0);
			
			//set up hidden outputs
			for(int i=0;i<nHiddenNeurons;i++)
			{
				for(int j=0;j<nInputNeurons+1;j++)
				{
					hiddenLayerNeurons[i]+=copyInputVector[j]*inputLayerWeights[j][i];
				}
				hiddenLayerNeurons[i]=SigmoidFunction(hiddenLayerNeurons[i]);
			}
			//add bias
			hiddenLayerNeurons[nHiddenNeurons]=0;

			//set up network output
			for(int i=0;i<nOutputNeurons;i++)
			{
				for(int j=0;j<nHiddenNeurons+1;j++)
				{
					outputLayerNeurons[i]+=hiddenLayerNeurons[j]*hiddenLayerWeights[j][i];
				}
				outputLayerNeurons[i]=SigmoidFunction(outputLayerNeurons[i]);
			}
			//this->hiddenOutput=hiddenOutput;
			//this->networkOutput=networkOutput;
			inputLayerNeurons=copyInputVector;
			return outputLayerNeurons;
		}
		
		/*
		Backpropagation
		Input - DesiredOutput
		Output - N/A
		*/
		void Backpropagate(std::vector<double> desiredOutput)
		{
			//assert(deltaOutput.size()>0);
			deltaOutput=*new std::vector<double>(nOutputNeurons);
			deltaHidden=*new std::vector<double>(nHiddenNeurons+1);
			
			VectorFill<double>(deltaOutput,0);
			VectorFill<double>(deltaHidden,0);
			//assert(networkOutput.size()>0);
			//step 1
			//Calculate the delta error in output
			for(int k=0;k<nOutputNeurons;k++)
			{
				deltaOutput[k]=(outputLayerNeurons[k]*(1-outputLayerNeurons[k])*(desiredOutput[k]-outputLayerNeurons[k]));
			}
			//step 2
			//calculate the delta error in weights
			for(int i=0;i<nHiddenNeurons+1;i++)
			{
				for(int j=0;j<nOutputNeurons;j++)
				{
					deltaHiddenWeights[i][j]=learningRate*hiddenLayerNeurons[i]*deltaOutput[j]+momentum*deltaHiddenWeights[i][j];
				}
			}

			//step 3
			//calculate the delta error in hidden neurons' output
			for(int j=0;j<nHiddenNeurons+1;j++)
			{
				deltaHidden[j]=hiddenLayerNeurons[j]*(1-hiddenLayerNeurons[j]); 
				double tempSum=0;
				for(int k=0;k<nOutputNeurons;k++)
				{
					tempSum+=hiddenLayerWeights[j][k]*deltaOutput[k];
				}
				deltaHidden[j]*=tempSum;
			}

			//step 4
			//calculate the delta weights input layer weights
			for(int i=0;i<nInputNeurons+1;i++)
			{
				for(int j=0;j<nHiddenNeurons+1;j++)
				{
					deltaInputWeights[i][j]=learningRate*inputLayerNeurons[i]*deltaHidden[j]+momentum*deltaInputWeights[i][j];
				}
			}

			
			
		}
		/*
		UpdateWeights
		Input/Output -- void
		Updates all the weights after a backpropagation cycle
		
		*/
		void UpdateWeights()
		{
			
			for(int i=0;i<nInputNeurons+1;i++)
			{
				for(int j=0;j<nHiddenNeurons+1;j++)
				{
					inputLayerWeights[i][j]+=deltaInputWeights[i][j];
				}
			}

			for(int i=0;i<nHiddenNeurons+1;i++)
			{
				for(int j=0;j<nOutputNeurons;j++)
				{
					hiddenLayerWeights[i][j]+=deltaHiddenWeights[i][j];
				}
			}
			
			
			
		}
		template<typename T>
		void VectorFill(std::vector<T> toFill,T value)
		{
			for_each(toFill.begin(),toFill.end(),[value](T element){
				element=value;
			}
			);
		}
		/*
		RunEpoch
		Runs one Epoch
		First calls ForwardFeed, then calls Backpropagate
		Input: patternCollection
			Collection of input output pairs to be sent in.
		*/
		void RunEpoch(std::vector<double> input,std::vector<double> output)
		{
			
			auto actualOutput=ForwardFeed(input);
			auto temp_test=actualOutput[0];
			Backpropagate(output);
			if(!USE_BATCH)
			UpdateWeights();
			Epochs++;
		}
		/*
			Starts the training of the network
			Currently operates in Batch Operation mode
		*/
		void StartTraining(std::map<std::vector<double>,std::vector<double>> patternCollection)
		{
			while(Epochs<maxEpochs)
			{
			std::for_each(patternCollection.begin(),patternCollection.end(),[this](const std::pair<std::vector<double>,std::vector<double>> input)
			{
				RunEpoch(input.first,input.second);
				//UpdateWeights();
			});
			if(USE_BATCH)
			UpdateWeights();
			}
			
		}
		//Randomise the network weights between -0.5 to 0.5
		void RandomiseNetwork()
		{
			this->deltaHiddenWeights=*new std::vector<std::vector<double>>(nHiddenNeurons+1);
			this->deltaInputWeights=*new std::vector<std::vector<double>>(nInputNeurons+1);

			

			for(int i=0;i<nInputNeurons+1;i++)
			{
				//initialise
				deltaInputWeights[i]=*new std::vector<double>(nHiddenNeurons+1);
				std::vector<double> values;
				for(int j=0;j<nHiddenNeurons+1;j++)
				{
					
					values.push_back(rand()/(RAND_MAX+1)-0.5);
				}
				inputLayerWeights.push_back(values);
			}
			for(int i=0;i<nHiddenNeurons+1;i++)
			{
				deltaHiddenWeights[i]=*new std::vector<double>(nOutputNeurons);
				std::vector<double> values;
				for(int j=0;j<nOutputNeurons;j++)
				{
					
					values.push_back(rand()%2-0.5);
				}
				hiddenLayerWeights.push_back(values);
			}
		}
	};

}
using namespace ICProject;
int unmain()
	{
		
		NeuralNetwork* obj=new NeuralNetwork(2,2,1,100,1000,0.6,0.8);
		std::map<std::vector<double>,std::vector<double>> patternCollection;
		double i1[]={0,0};
		double o1[]={0};
		double i2[]={0,1};
		double o2[]={1};
		double i3[]={1,0};
		double o3[]={1};
		double i4[]={1,1};
		double o4[]={1};
		double i5[]={0.2,0.3};
		double o5[]={0.5};
		double i6[]={0.2,0.3};
		double o6[]={0.5};
		double i7[]={0.2,0.3};
		double o7[]={0.5};
		double i8[]={0.2,0.3};
		double o8[]={0.5};
		double i9[]={0.2,0.3};
		double o9[]={0.5};
		double i10[]={0.2,0.3};
		double o10[]={0.5};
		double i11[]={0.2,0.3};
		double o11[]={0.5};
		patternCollection.insert(std::pair<std::vector<double>,std::vector<double>> (std::vector<double>(i1,(i1+2)),std::vector<double>(o1,(o1+1))));
		patternCollection.insert(std::pair<std::vector<double>,std::vector<double>>(std::vector<double>(i2,(i2+2)),std::vector<double>(o2,(o2+1))));
		patternCollection.insert(std::pair<std::vector<double>,std::vector<double>>(std::vector<double>(i3,(i3+2)),std::vector<double>(o3,(o3+1))));
		patternCollection.insert(std::pair<std::vector<double>,std::vector<double>>(std::vector<double>(i4,(i4+2)),std::vector<double>(o4,(o4+1))));
		obj->StartTraining(patternCollection);
		auto output=obj->ForwardFeed(std::vector<double>(i2,i2+2));
		return 0;
	}