#pragma once

//#define USE_OPENMP_FOR_STIMULATE_PERCEPTRONS
#define USE_OPENMP_FOR_BACK_PROPAGATION
//#define USE_OPENMP_FOR_MULTI_TRAIN
#define USE_OPENMP_FOR_CALC_TOTAL_ERROR

#include "KuniMLP.h"
#include <memory>

class KuniMLPTrainer
{
public:
	typedef struct
	{
		double r_max_err;
		double r_err_sum;
	}STDERR;
	KuniMLP* m_pMLPs;
	KuniMLP* m_pMLPs_self;
	int m_nBestMLPIndex;
	int m_nMLPCount;

	double* m_pBestMaxErrors;
	double** m_pBestWeights;
	double* m_pTrainingRates;
	double m_rEscapeMaxError;
	int m_nMaxTrainingCount;
	double* m_pXs;
	int m_nXSize;
	int* m_pTs;
	int m_nTSize;
	int m_nDataCount;

	double** m_pErrors;
	double* (*m_pDEDSs)[2];

	int m_nKillCount;
	double m_rKillMaxError;

	double m_rMomentumRate;
	double** m_pMomentums;

	KuniMLPTrainer(void){};
	~KuniMLPTrainer(void)
	{
		delete[] m_pBestMaxErrors;

		for(int i=0;i<m_nMLPCount;i++)
			delete[] m_pBestWeights[i];
		delete[] m_pBestWeights;

		for(int i=0;i<m_nMLPCount;i++)
			delete[] m_pErrors[i];
		delete[] m_pErrors;

		for(int i=0;i<m_nMLPCount;i++)
		{
			delete[] m_pDEDSs[i][0];
			delete[] m_pDEDSs[i][1];
		}
		delete[] m_pDEDSs;
		if(m_pMLPs_self)
		{
			delete[] m_pTrainingRates;
			delete[] m_pMLPs_self;
		}

		for(int i=0;i<m_nMLPCount;i++)
			delete[] m_pMomentums[i];
		delete[] m_pMomentums;
	}
	KuniMLPTrainer(int mlp_count, int active_mode, int layer_count, int min_perceptron, int max_perceptron, int data_count, double* x_datas, int* t_datas, int x_size, int t_size
		, double escape_error, int max_training_count, double kill_error, int kill_count)
		:m_nMLPCount(mlp_count), m_rEscapeMaxError(escape_error), m_nMaxTrainingCount(max_training_count)
		, m_pXs(x_datas), m_nXSize(x_size), m_pTs(t_datas), m_nTSize(t_size), m_nDataCount(data_count), m_rKillMaxError(kill_error)
		,m_nKillCount(kill_count)
	{
		int *pctr_counts=new int[layer_count];

		m_pMLPs_self=new KuniMLP[m_nMLPCount];
		pctr_counts[layer_count-1]=m_nTSize;
		for(int i=0;i<m_nMLPCount;i++)
		{
			for(int j=0;j<layer_count-1;j++)
				pctr_counts[j]=randN(max_perceptron,min_perceptron);
			double active_rate=randR(2.3,0.8);
			new (m_pMLPs_self+i)KuniMLP(active_rate,active_mode,x_size,t_size,layer_count,pctr_counts);
		}
		m_pMLPs=m_pMLPs_self;
		delete[] pctr_counts;

		m_pTrainingRates=new double[m_nMLPCount];
		for(int i=0;i<m_nMLPCount;i++)
			m_pTrainingRates[i]=0.001;

		m_pBestMaxErrors=new double[m_nMLPCount];
		m_pBestWeights=new double*[m_nMLPCount];
		for(int i=0;i<m_nMLPCount;i++)
			m_pBestWeights[i]=new double[m_pMLPs[i].m_nWeightCount];
		m_pErrors=new double*[m_nMLPCount];
		for(int i=0;i<m_nMLPCount;i++)
			m_pErrors[i]=new double[m_nTSize];
		m_pDEDSs=(double*(*)[2])new double*[m_nMLPCount*2];

		for(int i=0;i<m_nMLPCount;i++)
		{
			int max=0;
			for(int j=0;j<m_pMLPs[i].m_nLayerCount;j++)
			{
				if(max<m_pMLPs[i].m_pLayers[j].nPerceptronCount)
					max=m_pMLPs[i].m_pLayers[j].nPerceptronCount;
			}
			m_pDEDSs[i][0]=new double[max];
			m_pDEDSs[i][1]=new double[max];
		}

		m_rMomentumRate=0.3;
		m_pMomentums=new double*[m_nMLPCount];
		for(int i=0;i<m_nMLPCount;i++)
			m_pMomentums[i]=new double[m_pMLPs[i].m_nWeightCount];
	}

	KuniMLPTrainer(KuniMLP* mlps, int mlp_count, int data_count, double* x_datas, int* t_datas, int x_size, int t_size,double* training_rates
		, double escape_error, int max_training_count, double kill_error, int kill_count)
		:m_pMLPs_self(0), m_pMLPs(mlps), m_nMLPCount(mlp_count), m_pTrainingRates(training_rates), m_rEscapeMaxError(escape_error), m_nMaxTrainingCount(max_training_count)
		, m_pXs(x_datas), m_nXSize(x_size), m_pTs(t_datas), m_nTSize(t_size), m_nDataCount(data_count), m_rKillMaxError(kill_error)
		,m_nKillCount(kill_count)
	{
		m_pBestMaxErrors=new double[m_nMLPCount];
		m_pBestWeights=new double*[m_nMLPCount];
		for(int i=0;i<m_nMLPCount;i++)
			m_pBestWeights[i]=new double[m_pMLPs[i].m_nWeightCount];
		m_pErrors=new double*[m_nMLPCount];
		for(int i=0;i<m_nMLPCount;i++)
			m_pErrors[i]=new double[m_nTSize];
		m_pDEDSs=(double*(*)[2])new double*[m_nMLPCount*2];

		for(int i=0;i<m_nMLPCount;i++)
		{
			int max=0;
			for(int j=0;j<m_pMLPs[i].m_nLayerCount;j++)
			{
				if(max<m_pMLPs[i].m_pLayers[j].nPerceptronCount)
					max=m_pMLPs[i].m_pLayers[j].nPerceptronCount;
			}
			m_pDEDSs[i][0]=new double[max];
			m_pDEDSs[i][1]=new double[max];
		}

		m_rMomentumRate=0.3;
		m_pMomentums=new double*[m_nMLPCount];
		for(int i=0;i<m_nMLPCount;i++)
			m_pMomentums[i]=new double[m_pMLPs[i].m_nWeightCount];
	}

	void CalcError(int mlp_index, int t_index)
	{
		int offset=m_nTSize*t_index;
		for(int i=0;i<m_nTSize;i++)
			m_pErrors[mlp_index][i]=m_pMLPs[mlp_index].m_pO[i]-m_pTs[offset+i];
	}
	double CalcMaxError(KuniMLP* mlp,int t_index)
	{
		int offset=m_nTSize*t_index;
		double max_err=0;
		for(int i=0;i<m_nTSize;i++)
		{
			double err=abs(mlp->m_pO[i]-m_pTs[offset+i]);
			if(max_err<err)
				max_err=err;
		}
		return max_err;
	}
	double CalcDODS(int mlp_index,int layer_index, int perceptron_index)
	{
		KuniMLP* mlp=m_pMLPs+mlp_index;
		double out=mlp->m_pLayers[layer_index].pPerceptrons[perceptron_index].rOut;
		if(mlp->m_nActiveMode)
			return 0.5*mlp->m_rActiveRate*(1+out)*(1-out);
		else
			return mlp->m_rActiveRate*out*(1-out);
	}
	void ErrorBackPropagationWithMomentum(int mlp_index)
	{
		KuniMLP* mlp=m_pMLPs+mlp_index;
		KuniMLP::Layer* L=mlp->m_pLayers+mlp->m_nLayerCount-1;
		double* p_momentums=m_pMomentums[mlp_index];
		double* p_copy_w=new double[mlp->m_nWeightCount];
		memcpy(p_copy_w,mlp->m_pWeights,sizeof(double)*mlp->m_nWeightCount);
		for(int i=0;i<L->nPerceptronCount;i++)
		{
			m_pDEDSs[mlp_index][0][i]=m_pErrors[mlp_index][i]*CalcDODS(mlp_index,mlp->m_nLayerCount-1,i);

			double ratedDEDS=m_pTrainingRates[mlp_index]*m_pDEDSs[mlp_index][0][i];
			int len_offset=L->pPerceptrons[i].pWeights-mlp->m_pWeights;
			int len;
			for(int j=0;j<L->pPerceptrons[i].nInputSize;j++)
			{
				len=len_offset+j;
				p_momentums[len]=ratedDEDS*(L-1)->pPerceptrons[j].rOut+p_momentums[len]*m_rMomentumRate;
			//	mlp->m_pWeights[len]-=p_momentums[len];
				p_copy_w[len]-=p_momentums[len];
			}
			len=len_offset+L->pPerceptrons[i].nInputSize;
			p_momentums[len]=ratedDEDS+p_momentums[len]*m_rMomentumRate;
		//	mlp->m_pWeights[len]-=p_momentums[len];
			p_copy_w[len]-=p_momentums[len];
		}

		for(int i=mlp->m_nLayerCount-2;i>=0;i--)
		{
			L=mlp->m_pLayers+i;
#ifdef USE_OPENMP_FOR_BACK_PROPAGATION
#pragma omp parallel for
#endif
			for(int j=0;j<L->nPerceptronCount;j++)
			{
				double sum=0;
				for(int k=0;k<(L+1)->nPerceptronCount;k++)
					sum+=m_pDEDSs[mlp_index][0][k]*(L+1)->pPerceptrons[k].pWeights[j];

				m_pDEDSs[mlp_index][1][j]=sum*CalcDODS(mlp_index,i,j);

				double ratedDEDS=m_pTrainingRates[mlp_index]*m_pDEDSs[mlp_index][1][j];
				int len_offset=L->pPerceptrons[j].pWeights-mlp->m_pWeights;
				int len;
				for(int k=0;k<L->pPerceptrons[j].nInputSize;k++)
				{
					len=len_offset+k;
					if(i==0)
						p_momentums[len]=ratedDEDS*mlp->m_pX[k]+p_momentums[len]*m_rMomentumRate;
					else
						p_momentums[len]=ratedDEDS*(L-1)->pPerceptrons[k].rOut+p_momentums[len]*m_rMomentumRate;
			//		mlp->m_pWeights[len]-=p_momentums[len];
					p_copy_w[len]-=p_momentums[len];
				}
				len=len_offset+L->pPerceptrons[j].nInputSize;
				p_momentums[len]=ratedDEDS+p_momentums[len]*m_rMomentumRate;
			//	mlp->m_pWeights[len]-=p_momentums[len];
				p_copy_w[len]-=p_momentums[len];
			}
			double* temp=m_pDEDSs[mlp_index][0];
			m_pDEDSs[mlp_index][0]=m_pDEDSs[mlp_index][1];
			m_pDEDSs[mlp_index][1]=temp;
		}
		memcpy(mlp->m_pWeights,p_copy_w,sizeof(double)*mlp->m_nWeightCount);
		delete[] p_copy_w;
	}
	void Train()
	{
		m_nBestMLPIndex=-1;
#ifdef DEBUG_PRINT
		int *stop_count=new int[m_nMLPCount];
#endif

#ifdef USE_OPENMP_FOR_MULTI_TRAIN
#pragma omp parallel for
#endif
		for(int i=0;i<m_nMLPCount;i++)
		{
			m_pBestMaxErrors[i]=-1;
			int count=0;
			for(int j=0;j<m_pMLPs[i].m_nWeightCount;j++)
				m_pMomentums[i][j]=0;
			double *max_errs=new double[m_nDataCount];
			double max_err_threshold=0;
			do
			{
				for(int index=0;index<m_nDataCount;index++)
				{
					if(count>m_nDataCount*3 && max_errs[index]<max_err_threshold)
						continue;
					m_pMLPs[i].Stimulate(m_pXs+index*m_nXSize);
					CalcError(i,index);
					ErrorBackPropagationWithMomentum(i);
					count++;
				}
				double max__max_err=0;
				int max__max_err__index;
#ifdef USE_OPENMP_FOR_CALC_TOTAL_ERROR
#pragma omp parallel for
#endif
				for(int j=0;j<m_nDataCount;j++)
				{
					KuniMLP mlp(m_pMLPs[i]);
					mlp.Stimulate(m_pXs+j*m_nXSize);
					max_errs[j]=CalcMaxError(&mlp,j);
					if(max__max_err<max_errs[j])
					{
						max__max_err=max_errs[j];
						max__max_err__index=j;
					}
				}
				if(max__max_err<m_pBestMaxErrors[i] || m_pBestMaxErrors[i]==-1)
				{
					m_pBestMaxErrors[i]=max__max_err;
					memcpy(m_pBestWeights[i],m_pMLPs[i].m_pWeights,m_pMLPs[i].m_nWeightCount*sizeof(double));
				}
				max_err_threshold=max__max_err*0.85;

#ifdef DEBUG_PRINT
				printf("-%d MLP %d: max err:%lf best err sum %lf\n",i,count,max__max_err,m_pBestMaxErrors[i]);
#endif
			}while((count<m_nKillCount || m_pBestMaxErrors[i]<m_rKillMaxError) && count<m_nMaxTrainingCount && m_pBestMaxErrors[i]>m_rEscapeMaxError);

			memcpy(m_pMLPs[i].m_pWeights,m_pBestWeights[i],m_pMLPs[i].m_nWeightCount*sizeof(double));
			if(m_nBestMLPIndex==-1 || m_pBestMaxErrors[m_nBestMLPIndex]>m_pBestMaxErrors[i])
				m_nBestMLPIndex=i;

#ifdef DEBUG_PRINT
			stop_count[i]=count;
#endif

			delete[] max_errs;
		}

#ifdef DEBUG_PRINT
		for(int i=0;i<m_nMLPCount;i++)
		{
			printf("\n\n-%d MLP  %dLayers\nPerceptronCounts:",i,m_pMLPs[i].m_nLayerCount);
			for(int j=0;j<m_pMLPs[i].m_nLayerCount;j++)
				printf("%d ",m_pMLPs[i].m_pLayers[j].nPerceptronCount);
			printf("\nTrainingRate: %lf",m_pTrainingRates[i]);
			printf("\nActiveRate: %lf",m_pMLPs[i].m_rActiveRate);
			printf("\nStopCount: %d",stop_count[i]);
			printf("\nMaxErr: %.3lf\n",m_pBestMaxErrors[i]);
		}
		printf("\nBest MLP is: index %d\n",m_nBestMLPIndex);
		delete[] stop_count;
#endif

	}
};

