#include "GHA.h"

#include <math.h>
#include <fstream>

#include "../Utils/Image/ImageDisplay.h"

GHA::GHA(const shared_ptr<CGHANetowork> & pNN, double LearningRate, double eps) :
	m_NN(pNN),
	m_LeaningRate(LearningRate),
	m_EPS(eps)
{
}

void GHA::Training(const Vector2 & inputData, CImageDisplay * imageDisplay)
{
	int NeuronCount = this->m_NN->m_Neurons.size();

	Vector2  YJ(this->m_NN->m_InputDimension);
	Vector2  WY(this->m_NN->m_InputDimension);
	Vector2  WK(this->m_NN->m_InputDimension);
	Vector2  W(this->m_NN->m_InputDimension);
	Vector2  YX(this->m_NN->m_InputDimension);
	Vector2  DeltaW(this->m_NN->m_InputDimension);
	Vector2  LastYJ(this->m_NN->m_InputDimension);

	Matrix   matW(this->m_NN->m_InputDimension, NeuronCount);
	Matrix   matY(NeuronCount, 1);
	Matrix   matX(this->m_NN->m_InputDimension, 1);
	Vector2  XR(inputData.GetDimesion());

	LastYJ = this->m_NN->Execute(inputData);

	int T = 0;
	while (true /*T++ < 100000*/)
	{
		YJ = this->m_NN->Execute(inputData);

		for (int j = 0; j < NeuronCount; j++)
		{
			WY.Clear();
			for (int k = 0; k <= j; k++)
			{
				WK = this->m_NN->m_Neurons[k]->GetWeightVector();
				double YK = YJ[k];

				WK = WK * YK;

				WY = WY + WK;
			}

			WY = WY * (YJ[j] * this->m_LeaningRate);

			YX = inputData;
			YX = YX * (YJ[j] * this->m_LeaningRate);

			DeltaW = YX;
			DeltaW = DeltaW - WY;

			W = this->m_NN->m_Neurons[j]->GetWeightVector();
			W = W + DeltaW;
			this->m_NN->m_Neurons[j]->SetWeightVector(W);
		}

		YJ = this->m_NN->Execute(inputData);
		
		/*
		matW = this->m_NN->TransToMatrix();
		matW.TransposeMatrix();
		for (int i = 0; i < NeuronCount; i++)
		{
			matY.SetMatrixElement(i, 0, YJ[i]);
		}
		matX = matY * matW;
		XR = matX.
		*/

		bool bLoopJump = true;
		for (int n = 0; n < NeuronCount; n++)
		{
			if (::fabs(LastYJ[n] - YJ[n]) > this->m_EPS)
			{
				bLoopJump = false;
				LastYJ = YJ;

				break;
			}
		}
		
		if (bLoopJump)
		{
			printf("YJ = %s\n", YJ.ToString().c_str());

			break;
		}
	}

}