package neuralnet;

public class NodeLayer {

    private double[] outputs;
    private double[][] inputWeights;
    private double[] weightedInputSums;
    private double[][] weightChanges;
    private double[] biasWeights;
    private double[] errors;
    
    private NodeLayer inputLayer, outputLayer;
    
    private int numOutputs;
    
    private int numInputs;
    
    private double bias;
    
    private double momentum;
    
    private double learningRate;
    
    public NodeLayer(NodeLayer inputLayer, NodeLayer outputLayer, int numOutputs, double bias, 
            double learningRate, double momentum) {
         
        this.learningRate = learningRate;
        this.momentum = momentum;
        this.bias = bias;
        this.numOutputs = numOutputs;
        this.inputLayer = inputLayer;
        this.outputLayer = outputLayer;
        
        // Set the outputs to be, well...a layer of output values.
        // Additionally, we need as many summed inputs as we have outputs.
        outputs = new double[numOutputs];
        weightedInputSums = new double[numOutputs];
        biasWeights = new double[numOutputs];
        errors = new double[numOutputs];
     
        // Do we have inputs? If so, initialize the input sums and weights accordingly.
        if(inputLayer != null) {
            numInputs = inputLayer.getNumOutputs();
            inputWeights = new double[numOutputs][numInputs];
            weightChanges = new double[numOutputs][numInputs];
            
        } else {
            numInputs = 0;
        }
    }
    
    /**
     * Initialize the node layer so everything is randomized.
     */
    public void initialize() {
        for(int i = 0; i < numOutputs; i++) {
            biasWeights[i] = Math.random();
            
            for(int j = 0; j < numInputs; j++) {
                inputWeights[i][j] = Math.random();
            }
        }
    }
    
    /**
     * Sets the output layer of the node layer.
     */
    public void setOutputLayer(NodeLayer o) {
        outputLayer = o;
    }
    /**
     * Gets the number of outputs of this layer.
     *
     * @return The number of outputs of this layer.
     */
    public int getNumOutputs() {
        return numOutputs;
    }
    
    /**
     * Sets the inputs of an input layer. Has no effect if the layer is not an input
     * layer.
     * 
     * @return true iff the operation was successful.
     */
    public boolean setInputs(double[] newInputs) {
        
        // Well that didn't work.
        if(!(inputLayer == null && newInputs.length != weightedInputSums.length)) {
            return false;
        } else {
            weightedInputSums = newInputs;
            return true;
        }
        
    }

    /**
     * Returns the outputs of this layer.
     * 
     * @return The output values of this layer.
     */
    public double[] getOutputs() {
        
        if(inputLayer == null) {
            return weightedInputSums;
        } else {
            return outputs;
        }
    }
    
    /**
     * Feeds signals from the input layer forward through this layer.
     * 
     * @return The output values of this layer.
     */
    public double[] feedForward() {
        
        if(inputLayer == null) return weightedInputSums;
        
        double[] inputValues = inputLayer.getOutputs();
        
        for(int i = 0; i < numOutputs; i++) {
            
            weightedInputSums[i] = 0.0;
            
            for(int j = 0; j < numInputs; j++) {
                weightedInputSums[i] += inputWeights[i][j] * inputValues[j];
            }
            weightedInputSums[i] += biasWeights[i] * bias; 
            
            outputs[i] = 1.0/(1.0 + Math.exp(-weightedInputSums[i]));
        }
        
        return outputs;
    }
    
    /**
     * Calculates the errors from the desired values in this layer.
     * 
     * @param desiredVals The desired output values of this layer.
     * 
     * @return The array of errors from the desired values. 
     */
    public double[] calcErrors(double[] desiredVals) {
        
        //System.out.println(desiredVals[0] + " " + outputs[0]);
        
        if(desiredVals.length != outputs.length) return null;
        
        // This works differently depending on whether or not we have an output layer.
        if(outputLayer != null) {
            double errorSum;
            
            for(int i = 0; i < numOutputs; i++) {
                
                errorSum = 0.0;
                
                for(int j = 0; j < outputLayer.getNumOutputs(); j++) {
                    errorSum += outputLayer.inputWeights[j][i] * outputLayer.errors[j];
                }
                
                errors[i] = errorSum * outputs[i] * (1 - outputs[i]);
            }
        } else {
            for(int i = 0; i < numOutputs; i++) {
                errors[i] = (desiredVals[i] - outputs[i]) * outputs[i] * (1 - outputs[i]);
            }
        }
        
        return errors;
    }
    
    /**
     * Adjusts the weights of the network based on previous errors. Has no effect
     * if the layer is an input layer.
     * 
     * @return true iff the operation was successful.
     */
    public boolean adjustWeights() {
        
        if(inputLayer == null) return false;
        
        double[] inputOutputs = inputLayer.getOutputs();
        
        
        for(int i = 0; i < numOutputs; i++) {
            for(int j = 0; j < numInputs; j++) {
                //System.out.print("(" + i + "," + j + ") ");
                double delta = learningRate * errors[i] * inputOutputs[j];
                
                inputWeights[i][j] += (learningRate * errors[i] * inputOutputs[j]) +
                                      (momentum * weightChanges[i][j]);
                
                weightChanges[i][j] = delta;
            }
            biasWeights[i] = errors[i] * bias;
            //System.out.println();
        }
        
        return true;
    }
}
