/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
package com.rultax.neural.feedforward.train.backpropagation;

import com.rultax.exception.NeuralNetworkError;
import com.rultax.neural.feedforward.FeedForwardLayer;
import com.rultax.neural.feedforward.FeedForwardNetwork;
import com.rultax.neural.feedforward.train.Train;
import java.util.HashMap;
import java.util.Map;

/**
 *
 * @author Scott
 */
public class Backpropagation implements Train {

    private double error;
    private final double learnRate;
    private final double momentum;
    private final FeedForwardNetwork network;
    private final Map<FeedForwardLayer, BackpropagationLayer> layerMap = new HashMap<FeedForwardLayer, BackpropagationLayer>();
    private final double input[][];
    private final double ideal[][];
    
    public Backpropagation(final FeedForwardNetwork network,
            final double[][] input, final double ideal[][],
            final double learnRate, final double momentum){
        this.network = network;
        this.learnRate = learnRate;
        this.momentum = momentum;
        this.input = input;
        this.ideal = ideal;
        
        for(final FeedForwardLayer layer : network.getLayers()){
            final BackpropagationLayer bpl = new BackpropagationLayer(this, layer);
            this.layerMap.put(layer, bpl);
        }
    }
    
    public void calcError(final double ideal[]){
        if(ideal.length != this.network.getOutputLayer().getNeuronCount())
            throw new NeuralNetworkError("Size mismatch: Can't calcError for ideal input size="
                    + ideal.length + " for output layer size="
                    + this.network.getOutputLayer().getNeuronCount());
        
        for (final FeedForwardLayer layer : this.network.getLayers()) {
            getBackpropagationLayer(layer).clearError();            
        }
        
        for(int i = this.network.getLayers().size() - 1; i >= 0; i--){
            final FeedForwardLayer layer = this.network.getLayers().get(i);
            if(layer.isOutput())
                getBackpropagationLayer(layer).calcError(ideal);
            else
                getBackpropagationLayer(layer).calcError();
        }
    }
    
    public BackpropagationLayer getBackpropagationLayer(final FeedForwardLayer layer){
        final BackpropagationLayer result = this.layerMap.get(layer);
        
        if(result == null)
            throw new NeuralNetworkError("Layer unknown to backpropagation trainer, was a layer added after training began?");
        
        return result;
    }
    
    @Override
    public double getError() {
        return this.error;
    }

    @Override
    public FeedForwardNetwork getNetwork() {
        return this.network;
    }

    @Override
    public void iteration() {
        for (int i = 0; i < this.input.length; i++) {
            this.network.computeOutputs(this.input[i]);
            calcError(this.ideal[i]);
        }
        learn();
        
        this.error = this.network.calculateError(this.input, this.ideal);
    }
    
    public void learn(){
        for (FeedForwardLayer feedForwardLayer : this.network.getLayers()) {
            getBackpropagationLayer(feedForwardLayer).learn(learnRate, momentum);
        }
    }
}
