#pragma once
/***********************************************************************************************
COPYRIGHT 2011 Mafahir Fairoze

This file is part of Neural++.
(Project Website : http://mafahir.wordpress.com/projects/neuralplusplus)

Neural++ is a free software. You can redistribute it and/or modify it under the terms of
the GNU General Public License as published by the Free Software Foundation, either version 3
of the License, or (at your option) any later version.

Neural++ is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License <http://www.gnu.org/licenses/> for more details.

***********************************************************************************************/

#include "ActivationLayer.h"
#include "../Network.h"

namespace NeuralPlusPlus
	{
	namespace Core
		{
		namespace Backpropagation        
			{
			/// <summary>
			/// This class extends a <see cref="Network"/> and represents a Backpropagation neural network.
			/// </summary>
			class BackpropagationNetwork : public Network
				{
				private: double meanSquaredError;
				private: bool isValidMSE;

						 /// <summary>
						 /// Creates a new Back Propagation Network, with the specified input and output layers. (You
						 /// are required to connect all layers using appropriate synapses, before using the constructor.
						 /// Any changes made to the structure of the network after its creation may lead to complete
						 /// malfunctioning)
						 /// </summary>
				public: BackpropagationNetwork(ActivationLayer *inputLayer, ActivationLayer *outputLayer);

						/// <summary>
						/// <para>
						/// Trains the network for the given training sample (Online training mode). Note that this->
						/// method trains the sample only once irrespective of the values of <c>currentIteration</c>
						/// and <c>trainingEpochs</c>. Those arguments are just used to adjust training parameters
						/// which are dependent on training progress.
						/// </para>
						/// </summary>
				public: void Learn(TrainingSample *trainingSample, int currentIteration, int trainingEpochs) override;
				
						/// <summary>
						/// Trains the neural network for the given training set (Batch Training)
						/// </summary>
				public: void Learn(TrainingSet *trainingSet, int trainingEpochs);

						/// <summary>
						/// Invokes BeginEpochEvent
						/// </summary>
				protected: void OnBeginEpoch(int currentIteration, TrainingSet *trainingSet) override;

						   /// <summary>
						   /// Invokes EndEpochEvent
						   /// </summary>
				protected: void OnEndEpoch(int currentIteration, TrainingSet *trainingSet) override;

						   /// <summary>
						   /// A protected: helper function used to train single learning sample
						   /// </summary>
				protected: void LearnSample(TrainingSample *trainingSample, int currentIteration, int trainingEpochs) override;
				};
			}
		}
	}