/**
 * Copyright (C) 2010 - 2013 Harry Glasgow
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
package com.googlecode.jaden.engine;

import com.googlecode.jaden.common.enums.LayerType;

import java.util.Collections;
import java.util.Map;
import java.util.TreeMap;
import java.util.HashMap;

public class Network {

    private final Map<Integer, Layer> layers; // <0 to n-1, Layer>
    private final double learningRate;
    private final double momentum;

    private boolean trainable;
    private final Map<Integer, double[]> intermediateInputs;
    private final Map<Integer, double[]> intermediateOutputs;
    private final Map<Integer, double[]> intermediateErrors;

    private final Map<Integer, double[][]> previousWeightDeltas; // [currentLayerWidth][precedingLayerWidth]
    private final Map<Integer, double[]> previousBiasDeltas; // [currentLayerWidth]

    public Network(Map<Integer, Layer> layers, double learningRate, double momentum) {
        this.layers = new HashMap<Integer, Layer>();
        this.layers.putAll(layers);
        this.learningRate = learningRate;
        this.momentum = momentum;

        intermediateInputs = new TreeMap<Integer, double[]>();
        intermediateOutputs = new TreeMap<Integer, double[]>();
        intermediateErrors = new TreeMap<Integer, double[]>();

        previousBiasDeltas = new TreeMap<Integer, double[]>();
        previousWeightDeltas = new TreeMap<Integer, double[][]>();
    }

    public double[] forward(double[] inputs) {
        intermediateInputs.clear();
        intermediateOutputs.clear();
        double[] workingDoubles = inputs;
        for (int i = 0; i < layers.size(); i++) {
            intermediateInputs.put(i, workingDoubles);
            workingDoubles = processLayerForward(i, workingDoubles);
            intermediateOutputs.put(i, workingDoubles);
        }

        trainable = true;
        return workingDoubles;
    }

    public void reset() {
        for (Layer layer : layers.values()) {
            layer.reset();
        }
    }

    private double[] processLayerForward(int layerNumber, double[] workingDoubles) {
        Layer layer = layers.get(layerNumber);
        LayerType type = layer.getLayerType();
        int width = layer.getLayerWidth();
        double[][] ws = layer.getWeights();
        double[] bs = layer.getBiases();
        double[] result = new double[width];
        if (workingDoubles.length != ws[0].length) {
            throw new IllegalStateException("Inputs mismatch " + layerNumber + ' ' + workingDoubles.length + ' ' +
                    ws[0].length);
        }
        for (int layerIndex = 0; layerIndex < width; layerIndex++) {
            double inputTotal = bs[layerIndex];
            for (int inputIndex = 0; inputIndex < ws[0].length; inputIndex++) {
                inputTotal += ws[layerIndex][inputIndex] * workingDoubles[inputIndex];
            }
            switch (type) {
                case Soft:
                    result[layerIndex] = 1.0 - 2.0 / (1.0 + StrictMath.exp(inputTotal / 2));
                    break;
                case Normal:
                    result[layerIndex] = 1.0 - 2.0 / (1.0 + StrictMath.exp(inputTotal));
                    break;
                case Sharp:
                    result[layerIndex] = 1.0 - 2.0 / (1.0 + StrictMath.exp(2 * inputTotal));
                    break;
            }
        }
        return result;
    }

    public void backward(double[] errors) {

        if (!trainable) {
            throw new IllegalStateException("Trying to train before forward pass");
        }

        intermediateErrors.clear();
        double[] workingDoubles = errors;
        for (int i = layers.size() - 1; i >= 0; i--) {
            intermediateErrors.put(i, workingDoubles);
            workingDoubles = processLayerBackward(i, workingDoubles);
        }

        // Train
        for (int i = 0; i < layers.size(); i++) {
            double[][] weights = layers.get(i).getWeights();
            double[] biases = layers.get(i).getBiases();
            double[] ies = intermediateErrors.get(i);
            double[] iis = intermediateInputs.get(i);
            double[] ios = intermediateOutputs.get(i);

            double[][] previousWeightDelta = previousWeightDeltas.get(i);
            if (previousWeightDelta == null) {
                previousWeightDelta = new double[weights.length][weights[0].length];
                previousWeightDeltas.put(i, previousWeightDelta);
            }

            double[] previousBiasDelta = previousBiasDeltas.get(i);
            if (previousBiasDelta == null) {
                previousBiasDelta = new double[weights.length];
                previousBiasDeltas.put(i, previousBiasDelta);
            }

            for (int j = 0; j < weights.length; j++) {
                if (ios[j] > -1 && ios[j] < 1) {
                    for (int k = 0; k < weights[0].length; k++) {
                        double weightDelta = learningRate * ies[j] * (1 - ios[j]) * (1 + ios[j]) * iis[k];
                        weights[j][k] += weightDelta + momentum * previousWeightDelta[j][k];
                        previousWeightDelta[j][k] = weightDelta;
                    }
                    double biasDelta = learningRate * ies[j] * (1 - ios[j]) * (1 + ios[j]);
                    biases[j] += biasDelta + momentum * previousBiasDelta[j];
                    previousBiasDelta[j] = biasDelta;
                }
            }
        }

        trainable = false;
    }

    private double[] processLayerBackward(int layerNumber, double[] workingDoubles) {
        Layer layer = layers.get(layerNumber);
        double[][] weights = layer.getWeights();
        double[] result = new double[weights[0].length];
        for (int i = 0; i < weights.length; i++) {
            for (int j = 0; j < weights[0].length; j++) {
                result[j] += weights[i][j] * workingDoubles[i];
            }
        }
        return result;
    }

    public Map<Integer, Layer> getLayers() {
        return Collections.unmodifiableMap(layers);
    }

    public int getInputWidth() {
        return layers.values().iterator().next().getWeights()[0].length;
    }

    public int getOutputWidth() {
        int x = 0;
        for (Integer layerNumber : layers.keySet()) {
            Layer layer = layers.get(layerNumber);
            x = layer.getLayerWidth();
        }
        return x;
    }

    public void jog() {
        for (Layer layer : layers.values()) {
            layer.jog();
        }
    }
}
