#include "ArtificialNeuralNetwork.h"
//#include <ctime>
#include <cstdlib>
#include <cmath>
#include <fstream>

using namespace std;

ArtificialNeuralNetwork::ArtificialNeuralNetwork(int numOfLay, int *numOfNeorunsInEachLayer){


	m_numberOfLayers	= numOfLay;

	//save neuron numbers in each layer
	m_numOfNeurons = new int[m_numberOfLayers];
	for(int i = 0 ; i < m_numberOfLayers ; i++)
		m_numOfNeurons[i] = numOfNeorunsInEachLayer[i];



	//allocate memory for neuron values - represented as 2D matrix - 
	m_neuronValues = new double*[m_numberOfLayers];
	for(int i = 0 ; i < m_numberOfLayers ; i++)
		m_neuronValues[i] = new double[m_numOfNeurons[i]];


	//allocate memory for weights
	m_neuronWeights = new double **[m_numberOfLayers];
	for(int i = 1 ; i < m_numberOfLayers ; i++){
		m_neuronWeights[i] = new double*[m_numOfNeurons[i]];
	}
	for(int i = 1 ; i < m_numberOfLayers ; i++){
		for(int j = 0 ; j < m_numOfNeurons[i] ; j++){
			m_neuronWeights[i][j] = new double[m_numOfNeurons[i-1]+1];
		}
	}


	//initialize weights to random values
	//srand((unsigned)time(NULL));
	for(int i = 1 ; i < m_numberOfLayers ; i++){
		for(int j = 0 ; j <  m_numOfNeurons[i] ; j++){
			for(int k = 0 ; k < m_numOfNeurons[i-1]+1 ; k++){
				m_neuronWeights[i][j][k]=0/*(double)(rand())/(RAND_MAX/2) - 1*/;//32767
			}
		}
	}

}
ArtificialNeuralNetwork::~ArtificialNeuralNetwork(){}
void ArtificialNeuralNetwork::release (){
	delete[] m_numOfNeurons;

	for(int i  = 0 ; i < m_numberOfLayers ; i++)
		delete[] m_neuronValues[i];
	delete[] m_neuronValues;



	for(int i = 0 ; i < m_numberOfLayers ; i++){
		for(int j = 0 ; j < m_numOfNeurons[i] ; j++){
			delete[] m_neuronWeights[i][j];
		}
	}
	for(int i = 1 ; i < m_numberOfLayers ; i++)
		delete[] m_neuronWeights[i];

	delete[] m_neuronWeights;


}


void ArtificialNeuralNetwork::feedForward(double *input){

	double sum;

	//first set input layer with input pattern
	for(int i = 0 ; i < m_numOfNeurons[0] ; i++){
		m_neuronValues[0][i] = input[i];
	}

	//then feed forward it

	//for each layer in neural network
	for(int i = 1 ; i < m_numberOfLayers ; i++){

		//for each neuron on a particular layer
		for(int j = 0 ; j < m_numOfNeurons[i] ; j++){

			sum = 0.0; //reset sum
			for(int k = 0 ; k < m_numOfNeurons[i-1] ; k++){

				sum += m_neuronValues[i-1][k] * m_neuronWeights[i][j][k];
			}
			//add bias
			sum +=m_neuronWeights[i][j][m_numOfNeurons[i-1]];

			m_neuronValues[i][j] = sigmoid(sum);
			
		}
	}

}


double ArtificialNeuralNetwork::getOutput(int index){
	
	return m_neuronValues[m_numberOfLayers-1][index];
}

double ArtificialNeuralNetwork::getMeanSquareError(double* target){
	//	mean square error
	
		double mse=0;
		for(int i = 0 ; i < m_numOfNeurons[m_numberOfLayers-1] ; i++){
			mse += (target[i]-m_neuronValues[m_numberOfLayers-1][i])*(target[i]-m_neuronValues[m_numberOfLayers-1][i]);
		}
		//return mse/m_numOfNeurons[m_numberOfLayers-1];
		return mse/2;
	
}

double ArtificialNeuralNetwork::sigmoid(double inValue){
	
	return (double)(1/(1+exp(-inValue)));

}

void ArtificialNeuralNetwork::loadWights(double *weightVector){

	

	for(int layer = 1; layer < m_numberOfLayers ; ++layer){ //for each layer
		for(int neuron = 0 ; neuron < m_numOfNeurons[layer] ; neuron++){//for each neuron

			for(int preNeuron = 0 ; preNeuron < m_numOfNeurons[layer-1] ; preNeuron++){
				m_neuronWeights[layer][neuron][preNeuron] = *(weightVector++); 
			}
			//load bias
			m_neuronWeights[layer][neuron][m_numOfNeurons[layer-1]] = *(weightVector++);
			
		}
	}

}

void ArtificialNeuralNetwork::printANN (){

	std::ofstream out("ANN.txt");
	out<<"Artificial neural network information"<<endl;
	out<<"ANN has ["<<m_numberOfLayers<<"] layers"<<endl;
	for(int i = 0 ; i < m_numberOfLayers ; i++)
		out<<"Layer "<<i<<" has "<<m_numOfNeurons[i]<<" neurons"<<endl;

	out<<endl<<"-WEIHTS-"<<endl;
	for(int layer = 1 ; layer < m_numberOfLayers ; layer++){
		out<<"In layer "<<layer<<" : "<<endl;
		for(int neuron = 0 ; neuron < m_numOfNeurons[layer] ; neuron++){
			out<<"  For "<<neuron<<" th neuron"<<endl;

			for(int weight = 0 ; weight < m_numOfNeurons[layer-1] ; weight++){
				out<<"     Weight between the neuron "<<neuron<<" and previous neuron "<< weight<< " is "<<m_neuronWeights[layer][neuron][weight]<<endl;

			}
			out<<"          Bias weight in layer "<<layer<< " and for neuron "<<neuron<< " is "<<m_neuronWeights[layer][neuron][m_numOfNeurons[layer-1]]<<endl;
		}
	}
out.close ();
}