/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */

package poker.neuralnetwork;

import java.util.Iterator;
import java.util.List;

/**
 * Decorator pattern. The BackPropNet has an internal network and implements the
 * backpropagation algorithm onto the network.
 *
 *
 * @author Benjamin L. Brodie <blbrodie@gmail.com>
 */
public class BackPropagationNetwork implements NeuralNetwork, BackPropagation{
    NeuralNetwork network;


    public BackPropagationNetwork(NeuralNetwork network){
        this.network = network;
    }

    /**
     * A backpropagation network takes inputs and ideal outputs and propagates
     * the error based on the difference between the actual outputs and the ideal
     * outputs.
     *
     * 
     * @param inputs the inputs the the network
     * @param idealOutputs the ideal outputs, what the actual outputs will
     *                     be measured against
     */
    public void update(List<Double> inputs, List<Double> idealOutputs){
        //get the actual outputs
        List<Double> actualOutputs = process(inputs);
        
        //foreach loop
        //because NeuralNetwork has an Iterator implemented, you can do this
        //starting from the output layer, it gets each layer until the last layer
        //A layer is a List of Edge. Layer itself is not an object, its just the name
        //implicitly defined in the foreach loop.
        for (List<Edge> layer : network){
            //algorithm goes in here.
            for (Edge edge : layer){
                //edge.getWeight();
                //edge.addToWeight(some value);
               

            }
        }

    }

    public List<Double> process(List<Double> inputs) {
        return network.process(inputs);
    }

    public Iterator<List<Edge>> iterator() {
        return network.iterator();
    }





}
