package archive;

import java.io.FileWriter;
import java.io.IOException;
import java.util.Scanner;

import neuralnetworks.OutputFunction;
import neuralnetworks.OutputFunction.Type;

public class NeuronLayer
{
    //Note that this implementation of a layer is different form normal implementations
    //The weigths are the weigths used on the incoming signals not the outgoing ones this was done to make to avoid having three different layer types.
    
    double []       d_augmentedinputs;
    double [] []    d_weights; // weights from the last layer d_weigths[1][2] is the weight from the second neuron in layer 1 to the first neuron in layer 2
    double []       d_activations;
    double []       d_outputs;
    double []       d_errors; //Errors on the last layer neurons. Used to adapt THIS layers weights.
    OutputFunction  d_outputfunction;
    
    public NeuronLayer(int thislayersize, int lastlayersize) // If lastlayersize =  0 throw error?
    {
        d_outputfunction  = new OutputFunction(OutputFunction.Type.SIGMOID);
        d_augmentedinputs = new double [lastlayersize + 1];
        d_weights         = new double [thislayersize] [lastlayersize + 1]; // + 1 because of bias witch is always the last weight
        d_activations     = new double [thislayersize];
        d_outputs         = new double [thislayersize];
        d_errors          = new double [thislayersize];    
        for(int thislayerindex = 0; thislayerindex < thislayersize; ++thislayerindex)
            for(int lastlayerindex = 0; lastlayerindex <lastlayersize + 1; ++lastlayerindex)
                d_weights[thislayerindex][lastlayerindex] = (Math.random() * 2) - 1; // Initialize all weights to random numbers between -1 and 1
    }

    public NeuronLayer(Scanner scanner)
    {
        int thislayersize = scanner.nextInt();
        int lastlayersize = scanner.nextInt();  // If lastlayersize =  0 throw error?
        d_outputfunction  = new OutputFunction(OutputFunction.Type.SIGMOID);
        d_augmentedinputs = new double [lastlayersize];
        d_weights         = new double [thislayersize] [lastlayersize]; // no + 1 because of bias is already included in file
        d_activations     = new double [thislayersize];
        d_outputs         = new double [thislayersize];
        d_errors          = new double [thislayersize];                 // no + 1 because of bias is already included in file
        for(int thislayerindex = 0; thislayerindex < thislayersize; ++thislayerindex)
            for(int lastlayerindex = 0; lastlayerindex <lastlayersize; ++lastlayerindex) //no + 1 because of bias is already included in file
                d_weights[thislayerindex][lastlayerindex] = scanner.nextDouble(); // Initialize all weights to values specified in file;
    }


    public double[] forwardPass(double[] input)
    {
        augmentinputs(input);
        computeActivations();
        computeOutputs();
        return d_outputs.clone();
    }
    
    private void computeActivations ()
    {
        for(int activationindex = 0; activationindex < d_activations.length; ++activationindex)
        {
            d_activations[activationindex] = 0;
            for(int inputindex = 0; inputindex < d_augmentedinputs.length; ++inputindex)
                d_activations[activationindex] += d_weights[activationindex][inputindex] * d_augmentedinputs[inputindex];
        }
    }
    
    private void computeOutputs()
    {
        for(int index = 0; index < d_activations.length; ++index)
            d_outputs[index] = outputFunction(d_activations[index]);
    }



    public void writeToFile(FileWriter writer) throws IOException
    {
        writer.write(Integer.toString(d_outputs.length)    + " ");
        writer.write(Integer.toString(d_weights[0].length) + " ");
        for(int thislayerindex = 0; thislayerindex < d_outputs.length; ++thislayerindex)
            for(int lastlayerindex = 0; lastlayerindex < d_weights[0].length; ++lastlayerindex)
                writer.write(Double.toString(d_weights[thislayerindex][lastlayerindex])+ " ");                        // Write away weights
    }
    
    public double outputFunction(double input)      
    {
        return d_outputfunction.normal(input);
    }
    
    public double outputFunctionDerivative(double input) 
    {

        return d_outputfunction.derivative(input);
    }
    
    public double [] outputFunction(double [] activation)           
    {
        double [] result = new double [activation.length];
        for(int index = 0; index < d_activations.length; ++index)
            result[index] = outputFunction(activation[index]);
        return result;
    }
    
    public double [] outputFunctionDerivative(double [] activation)
    {
        double [] result = new double [activation.length];
        for(int index = 0; index < d_activations.length; ++index)
            result[index] = outputFunctionDerivative(activation[index]);
        return result;
    }

    public double [] assignError(double [] error) // output error of this layer uncorrected for transfer function
    {
        
        for(int index = 0; index < error.length; ++index)
            d_errors[index] = outputFunctionDerivative(d_activations[index]) * error[index]; //transform for this layer
        
        double [] lastlayererror = new double [d_weights[0].length - 1]; //minus one because it is no use asinging error to the threashold
        for(int lastlayerindex = 0; lastlayerindex < d_weights[0].length -1; ++lastlayerindex) //minus one because it is no use asinging error to the threashold
            for(int thislayerindex = 0; thislayerindex < d_outputs.length;   ++thislayerindex)
                lastlayererror[lastlayerindex] += d_errors[thislayerindex] * d_weights[thislayerindex] [lastlayerindex];
                
        return lastlayererror;
    }

    public void changeWeigths(double learningrate)
    {
        for(int lastlayerindex = 0; lastlayerindex < d_weights[0].length; ++lastlayerindex) //minus one because it is no use asinging error to the threashold
            for(int thislayerindex = 0; thislayerindex < d_outputs.length;   ++thislayerindex)
                d_weights[thislayerindex] [lastlayerindex] += learningrate * d_errors[thislayerindex] * d_augmentedinputs[lastlayerindex];
    }

    private void augmentinputs(double [] inputvector)
    {
        for(int index = 0; index < inputvector.length;   ++index)
            d_augmentedinputs[index] = inputvector[index];
        d_augmentedinputs[inputvector.length] = -1; // augment
    }
}
