package pl.edu.agh.neural.core;

import pl.edu.agh.bp.momentum.IMomentumFunction;
import pl.edu.agh.som.learning.ILearningFunction;

import java.util.ArrayList;
import java.util.List;

public class BasicTrainableNetworkWithTeacher extends BasicNetwork implements ITrainableNetworkWithTeacher {

    protected List<ITrainableLayerWithTeacher> trainableLayers;
    protected ITrainableLayerWithTeacher outputLayer;

    public BasicTrainableNetworkWithTeacher(InputLayer inputLayer, List<? extends ITrainableLayerWithTeacher> layers) {
        super(inputLayer, layers);
        this.trainableLayers = new ArrayList<>(layers);
        this.outputLayer = this.trainableLayers.get(this.trainableLayers.size()-1);
    }

    @Override
    public void train(
            double[][] dataVectors,
            double[][] expectedVectors,
            int steps,
            ILearningFunction learningFunction,
            IMomentumFunction momentumFunction) {
        int dataVectorsCount = dataVectors.length;
        if (dataVectorsCount != expectedVectors.length) {
            throw new RuntimeException("Unequal layer number of data and expected vectors");
        }

        for (int step = 0; step < steps; step++) {
            double maxerr = 0.0;
            double err = 0.0;
            for (int i = 0; i < dataVectorsCount; i++) {
                evaluate(dataVectors[i]);

                double errsum = 0.0;
                for (INeuron neuron : outputLayer.getNeurons()) {
                    errsum += neuron.getError()*neuron.getError();
                }

                err = Math.sqrt(errsum/outputLayer.getNeurons().size());
                if (err > maxerr) {
                    maxerr = err;
                }
                //System.out.println(step + " " + Math.sqrt(errsum/outputLayer.getNeurons().size()));

                double learningSpeed = learningFunction.evaluate(step);
                double momentum = momentumFunction.evaluate(step);

                outputLayer.train(learningSpeed, momentum, expectedVectors[i]);
                for (int j = trainableLayers.size() - 2; j >= 0; j--) {
                    trainableLayers.get(j).train(learningSpeed, momentum, trainableLayers.get(j+1));
                }
            }
            System.out.println(step + " " + maxerr);
//            if (maxerr < 0.004)
//                return;
        }
    }
}
