package ferp.core.ai.nn.bp.training;

import ferp.core.ai.nn.bp.BPNN;

import java.util.Random;

/**
 * User: igorgok
 * Date: 1/19/14 11:50 AM
 */
public class Backprop
{
  public static final boolean DEBUG = false;

  public Backprop(BPNN nn)
  {
    int layers = nn.getActiveLayersCount();

    this.nn = nn;
    this.gradients = new double[layers][];
    this.deltas = new double[layers][][];

    initGradientsAndDeltas(layers);

    for (int l = 0; l < layers; ++l)
      randomize(nn.weights[l]);
  }

  public long train(double[][] set, double rate, double momentum, double error)
  {
    int is = nn.getInputSize();
    double pe = Double.MAX_VALUE;
    int ei = 0;

    System.out.println("# inputs: " + is + ", hidden layer size: " + nn.getLayerSize(0));

    for (long epoch = 0; true; ++epoch)
    {
      // average error
      double me = 0;
      int tcn = 0;

      // randomize the training set
      //shuffle(set);

      for (double[] sample : set)
      {
        if (DEBUG)
          System.out.println("case #" + tcn++);

        // forward propagation
        double[] out = nn.process(sample);

        // calculate error
        me += error(sample, is, out);
        // learn
        learn(sample, rate, momentum);
      }

      // final mean square error for the set
      me /= set.length;

      if (me < error)
      {
        System.out.println("trained after " + epoch + " epochs, mean error: " + me);

        return epoch;
      }

      if (me >= pe)
      {
        if (++ei > MAX_BAD_EPOCHS)
        {
          System.out.println("force stop after " + epoch + " epochs, mean error: " + me + ", prev. error " + pe);

          return -epoch;
        }
      }
      else
      {
        // reset the bad epoch counter
        ei = 0;
        // save previous error value
        pe = me;
      }

      if (epoch % REPORT_EPOCHS == 0)
        System.out.println("#" + epoch + " - mean error: " + me);
    }
  }

  private static final int MAX_BAD_EPOCHS = 10;
  private static final int REPORT_EPOCHS =  1000;

  private static final Random random = new Random();

  // expected contains inputs and outputs
  private static double error(double[] expected, int inputs, double[] actual)
  {
    int j = 0;
    double error = 0;

    // absolute mean error for this sample
    for (double a : actual)
      error += Math.abs(a - expected[inputs + j++]);

    error /= actual.length;

    if (Backprop.DEBUG)
      System.out.println("  error: " + error);

    return error;
  }

  private void learn(double[] sample, double rate, double momentum)
  {
    int is = nn.getInputSize();
    int ol = nn.getHiddenLayersCount();

    // back propagation
    // gradients for output layer
    for (int u = 0, size = nn.getOutputSize(); u < size; ++u)
    {
      gradients[ol][u] = (sample[is + u] - nn.outputs[ol][u]) * nn.activations[ol].derivative(nn.outputs[ol][u]);

      if (DEBUG)
        System.out.println("output gradient " + u + ": " + gradients[ol][u]);
    }

    // gradients for the hidden layers
    if (ol > 0)
      for (int h = ol - 1; h >= 0; --h)
      {
        for (int n = 0, next = h + 1, hls = nn.getLayerSize(h); n < hls; ++n)
        {
          double sum = 0;

          // bias does not participate in the gradient calculation process
          for (int u = 0, nls = nn.getLayerSize(next); u < nls; ++u)
            sum += gradients[next][u] * nn.weights[next][u][n + 1];

          gradients[h][n] = sum * nn.activations[h].derivative(nn.outputs[h][n]);

          if (DEBUG)
            System.out.println("HL " + h + " gradient " + n + ": " + gradients[h][n] + " based on output " + nn.outputs[h][n]);
        }
      }

    // current learning rate as a function of the epoch
    //double lr = time == 0 ? rate : rate / (1 + epoch / time);

    // deltas and weight update
    for (int l = 0, alc = nn.getActiveLayersCount(); l < alc; ++l)
    {
      for (int u = 0, ls = nn.getLayerSize(l); u < ls; ++u)
      {
        // bias is at position 0
        // store the previous delta for momentum
        double pdb = deltas[l][u][0];
        // new delta
        double ndb = deltas[l][u][0] = rate * gradients[l][u];

        // new weight
        nn.weights[l][u][0] += ndb + momentum * pdb;

        if (DEBUG)
          System.out.println("layer " + l + " unit " + u + " dw[bias]=" + ndb);

        for (int i = 1; i <= is; ++i)
        {
          // store the previous delta for momentum
          double pdw = deltas[l][u][i];
          // new delta
          double ndw = deltas[l][u][i] = rate * gradients[l][u] * sample[i - 1];

          // new weight
          nn.weights[l][u][i] += ndw + momentum * pdw;

          if (DEBUG)
            System.out.println("layer " + l + " unit " + u + " dw[" + i + "]=" + ndw);
        }
      }

      // update sample
      sample = nn.outputs[l];
      is = sample.length;
    }
  }

  // grow hidden layer by 1 unit
  private void grow(int layer)
  {
    // old size
    int os = nn.getLayerSize(layer), arity = nn.getLayerArity(layer);
    // old weights
    double[][] ow = nn.weights[layer];

    // new outputs
    nn.outputs[layer] = new double[os + 1];
    // new weights
    nn.weights[layer] = BPNN.matrix(os + 1, arity);

    // copy old weights (including bias)
    for (int u = 0; u < os; ++u)
      System.arraycopy(ow[u], 0, nn.weights[layer][u], 0, arity + 1);

    // initialize new unit's weights
    randomize(nn.weights[layer][os]);

    // next layer's arity has changed, add weights to each unit
    for (int u = 0, nl = layer + 1, ls = nn.getLayerSize(nl); u < ls; ++u)
    {
      double[] pw = nn.weights[nl][u];

      // recreate weights vector for the unit (including bias)
      nn.weights[nl][u] = new double[arity + 2];
      // copy old weights
      System.arraycopy(pw, 0, nn.weights[nl][u], 0, arity + 1);
      // initialize the new weight
      nn.weights[nl][u][arity + 1] = getRandomWeight();
    }

    initGradientsAndDeltas(nn.getActiveLayersCount());
  }

  // get random weight
  private static double getRandomWeight()
  {
    return (random.nextDouble() - 0.5) / 10;
  }

  // randomize vector
  private static void randomize(double[] vector)
  {
    for (int i = 0; i < vector.length; ++i)
      vector[i] = getRandomWeight();
  }

  // randomize matrix
  private static void randomize(double[][] matrix)
  {
    for (double[] row : matrix)
      randomize(row);
  }

  private static void shuffle(double[][] array)
  {
    for (int p = 0; p < 7; ++p)
      for (int i = array.length - 1; i > 0; --i)
        swap(array, i, random.nextInt(i + 1));
  }

  private static void swap(double[][] array, int i1, int i2)
  {
    double t[] = array[i1];

    array[i1] = array[i2];
    array[i2] = t;
  }

  private void initGradientsAndDeltas(int layers)
  {
    for (int l = 0; l < layers; ++l)
      gradients[l] = new double[nn.getLayerSize(l)];

    for (int l = 0; l < layers; ++l)
      deltas[l] = BPNN.matrix(nn.getLayerSize(l), nn.getLayerArity(l) + 1);
  }

  private final BPNN nn;
  private final double[][] gradients;
  private final double[][][] deltas;
}
