#pragma once

#include <memory.h>
#include "math_base.h"
#include <omp.h>

class KuniMLP
{
public:
	typedef struct
	{
		double* pWeights;
		double rOut;
		int nInputSize;
	}Perceptron;

	typedef struct
	{
		Perceptron* pPerceptrons;
		int nPerceptronCount;
	}Layer;


	double m_rActiveRate;
	int m_nActiveMode;
	double* m_pX;
	int m_nXSize;
	double* m_pO;
	int m_nOSize;
	Layer* m_pLayers;
	int m_nLayerCount;
	bool m_bIsCopy;
	
	Perceptron* m_pPerceptrons;
	double* m_pWeights;
	int m_nWeightCount;

	KuniMLP(void){};
	~KuniMLP(void)
	{
		delete[] m_pX;
		delete[] m_pO;
		delete[] m_pLayers;
		delete[] m_pPerceptrons;
		if(!m_bIsCopy)
			delete[] m_pWeights;
	}

	KuniMLP(double active_rate, int active_mode,int x_size, int o_size, int layer_count, int* perceptron_counts, double* weights=0)
		:m_rActiveRate(active_rate), m_nActiveMode(active_mode), m_nXSize(x_size), m_nOSize(o_size), m_nLayerCount(layer_count)
	{
		m_bIsCopy=false;
		srand(time(0));
		m_pX=new double[m_nXSize];
		m_pO=new double[m_nOSize];

		m_pLayers=new Layer[m_nLayerCount];

		int perceptron_count=0;
		for(int i=0;i<m_nLayerCount;i++)
		{
			perceptron_count+=(m_pLayers[i].nPerceptronCount=perceptron_counts[i]);
		}
		m_pPerceptrons=new Perceptron[perceptron_count];
		
		Perceptron* p_pctr=m_pPerceptrons;
		for(int i=0;i<m_nLayerCount;i++)
		{
			m_pLayers[i].pPerceptrons=p_pctr;
			p_pctr+=m_pLayers[i].nPerceptronCount;
		}

		m_nWeightCount=0;
		for(int i=0;i<m_nLayerCount;i++)
		{
			for(int j=0;j<m_pLayers[i].nPerceptronCount;j++)
			{
				if(i==0)
				{
					m_pLayers[i].pPerceptrons[j].nInputSize=m_nXSize;
				}
				else
				{
					m_pLayers[i].pPerceptrons[j].nInputSize=m_pLayers[i-1].nPerceptronCount;
				}
			}
			if(i==0)
			{
				m_nWeightCount+=( (m_nXSize+1)*m_pLayers[i].nPerceptronCount );
			}
			else
			{
				m_nWeightCount+=( (m_pLayers[i-1].nPerceptronCount+1)*m_pLayers[i].nPerceptronCount );
			}
		}
		m_pWeights=new double[m_nWeightCount];

		double* p_weights=m_pWeights;
		for(int i=0;i<m_nLayerCount;i++)
		{
			for(int j=0;j<m_pLayers[i].nPerceptronCount;j++)
			{
				m_pLayers[i].pPerceptrons[j].pWeights=p_weights;
				p_weights+=(m_pLayers[i].pPerceptrons[j].nInputSize+1);
			}
		}

		if(weights)
		{
			memcpy(m_pWeights,weights,sizeof(double)*m_nWeightCount);
		}
		else
		{
			for(int i=0;i<m_nLayerCount;i++)
			{
				for(int j=0;j<m_pLayers[i].nPerceptronCount;j++)
				{
					for(int k=0;k<m_pLayers[i].pPerceptrons[j].nInputSize;k++)
						m_pLayers[i].pPerceptrons[j].pWeights[k]=randR(0.5,-0.5);
					m_pLayers[i].pPerceptrons[j].pWeights[ m_pLayers[i].pPerceptrons[j].nInputSize ]=randR(0.05,-0.05);
				}
			}
		}
	}
	KuniMLP(KuniMLP& mlp)
		:m_rActiveRate(mlp.m_rActiveRate),m_nActiveMode(mlp.m_nActiveMode),m_nXSize(mlp.m_nXSize),m_nOSize(mlp.m_nOSize)
		,m_nLayerCount(mlp.m_nLayerCount),m_pWeights(mlp.m_pWeights),m_nWeightCount(mlp.m_nWeightCount)
	{
		m_bIsCopy=true;
		m_pX=new double[m_nXSize];
		m_pO=new double[m_nOSize];

		m_pLayers=new Layer[m_nLayerCount];

		int perceptron_count=0;
		for(int i=0;i<m_nLayerCount;i++)
		{
			perceptron_count+=(m_pLayers[i].nPerceptronCount=mlp.m_pLayers[i].nPerceptronCount);
		}
		m_pPerceptrons=new Perceptron[perceptron_count];
		
		Perceptron* p_pctr=m_pPerceptrons;
		for(int i=0;i<m_nLayerCount;i++)
		{
			m_pLayers[i].pPerceptrons=p_pctr;
			p_pctr+=m_pLayers[i].nPerceptronCount;
		}

		m_nWeightCount=0;
		for(int i=0;i<m_nLayerCount;i++)
		{
			for(int j=0;j<m_pLayers[i].nPerceptronCount;j++)
			{
				if(i==0)
				{
					m_pLayers[i].pPerceptrons[j].nInputSize=m_nXSize;
				}
				else
				{
					m_pLayers[i].pPerceptrons[j].nInputSize=m_pLayers[i-1].nPerceptronCount;
				}
			}
			if(i==0)
			{
				m_nWeightCount+=( (m_nXSize+1)*m_pLayers[i].nPerceptronCount );
			}
			else
			{
				m_nWeightCount+=( (m_pLayers[i-1].nPerceptronCount+1)*m_pLayers[i].nPerceptronCount );
			}
		}

		double* p_weights=m_pWeights;
		for(int i=0;i<m_nLayerCount;i++)
		{
			for(int j=0;j<m_pLayers[i].nPerceptronCount;j++)
			{
				m_pLayers[i].pPerceptrons[j].pWeights=p_weights;
				p_weights+=(m_pLayers[i].pPerceptrons[j].nInputSize+1);
			}
		}
	}

	void Activate(Perceptron* pctr,double sum)
	{
		if(m_nActiveMode)
			pctr->rOut=((double)2)/(1+exp((-m_rActiveRate)*sum))-1;
		else
			pctr->rOut=((double)1)/(1+exp((-m_rActiveRate)*sum));
	}

	void Stimulate(double* x)
	{
		memcpy(m_pX,x,sizeof(double)*m_nXSize);

		for(int i=0;i<m_nLayerCount;i++)
		{
#ifdef USE_OPENMP_FOR_STIMULATE_PERCEPTRONS
#pragma omp parallel for
#endif
			for(int j=0;j<m_pLayers[i].nPerceptronCount;j++)
			{
				double sum=0;
				for(int k=0;k<m_pLayers[i].pPerceptrons[j].nInputSize;k++)
				{
					if(i==0)
						sum+=( m_pX[k]*m_pLayers[i].pPerceptrons[j].pWeights[k] );
					else
						sum+=( m_pLayers[i-1].pPerceptrons[k].rOut*m_pLayers[i].pPerceptrons[j].pWeights[k] );
				}
				sum+=m_pLayers[i].pPerceptrons[j].pWeights[m_pLayers[i].pPerceptrons[j].nInputSize];
				Activate(m_pLayers[i].pPerceptrons+j,sum);

				if(i==m_nLayerCount-1)
					m_pO[j]=m_pLayers[i].pPerceptrons[j].rOut;
			}
		}
	}
};

