#ifndef _LOBE_
#define _LOBE_

#include <iostream>
#include <assert.h>
#include <myTypes.h>
#include <myMacros.h>
#include <sigmoidNeuron.h>
#include <linearNeuron.h>
#include <dbg.h>

using namespace std;

class lobe {
  private:
    uiVec_t arch;
    net_t net;
    dVec2d_t op;
    const static double trainingRate = 0.001;
   
  public:
    lobe (uiVec_t a);
    void show ();
    void feedFwd (dVec_t ip);
    void train (dVec_t ip, dVec_t realOp);
    dVec_t getOutput (dVec_t ip);
};

lobe::lobe (uiVec_t a) : arch(a) {
  assert (arch.size() > 1);

  // nPrev  - no. of neurons in previous layer (no. of ip to current layer)
  unsigned int nPrev = arch[0];
  dVec_t iWeights;  // Initial Weights for each neuron

  forEach (i, 0, arch.size() - 1) {
    net.push_back(layer_t());

    forEach (j, 0, arch[i]) {
      iWeights.clear();
      forEach (k, 0, nPrev) iWeights.push_back((double)(rand())/(RAND_MAX>>1) - 1);
      sigmoidNeuron* cell = new sigmoidNeuron ((double)(rand())/(RAND_MAX>>1) - 1, iWeights);
      net[i].push_back(cell);
    }

    nPrev = arch[i];
  }

  // Last layer with linearNeuron
  forEach (i, arch.size() - 1, arch.size()) {
    net.push_back(layer_t());

    forEach (j, 0, arch[i]) {
      iWeights.clear();
      forEach (k, 0, nPrev) iWeights.push_back((double)(rand())/(RAND_MAX) - 1);
      linearNeuron* cell = new linearNeuron  ((double)(rand())/(RAND_MAX) - 1, iWeights);
      net[i].push_back(cell);
    }
  }
}

void lobe::show () {
  forEach (i, 0, net.size()) {
    forEach (j, 0, net[i].size()) {
      net[i][j]->show();
      cout << "\t";
    }
    cout << "\n";
  }
  cout << string(150, '-') << "\n";
}

void lobe::feedFwd (dVec_t ip) {
  assert (ip.size() == arch[0]);
  op.clear();

  dVec_t buf;
  forEach (i, 0, net.size()) {
    buf.clear();

    forEach (j, 0, net[i].size())
      buf.push_back(net[i][j]->getOutput(ip));
    
    op.push_back(buf);
    ip = buf;
  }
}

void lobe::train (dVec_t ip, dVec_t realOp) {
  feedFwd (ip);
  assert (op[op.size() - 1].size() == realOp.size());
  
  double e;
  dVec2d_t error;
  error.insert(error.begin(), dVec_t());
  
  // Back propagate error

  // Last layer
  dVec_t curOp = op[op.size() - 1];
  forEach (i, 0, realOp.size())
    error[0].push_back(realOp[i] - curOp[i]);

  // For each other layer in reverse
  forHcae (i, net.size() - 1, 0) {
    error.insert(error.begin(), dVec_t());
    forEach (j, 0, net[i].size()) {   // For each neuron in current layer
      e = 0;
      forEach (k, 0, net[i+1].size()) // Back propagate errors from nxt layer
        e += error[1][k] * net[i+1][k]->weights[j];
      error[0].push_back(e * op[i][j] * (1 - op[i][j]));
    }
  }
  //showdVec2d ("op", op);
  //showdVec2d ("error", error);

  // Forward pass Weight adjustments
  // First layer
  forEach (j, 0, net[0].size()) {
    forEach (k, 0, net[0][j]->weights.size())
      net[0][j]->weights[k] += trainingRate * ip[k] * error[0][j];
    net[0][j]->bias += trainingRate * error[0][j];
  }

  // For each other layer
  forEach (i, 1, net.size()) {
    forEach (j, 0, net[i].size()) {
      forEach (k, 0, net[i][j]->weights.size())
        net[i][j]->weights[k] += trainingRate * op[i-1][k] * error[i][j];
      net[i][j]->bias += trainingRate * error[i][j];
    }
  }
}

dVec_t lobe::getOutput (dVec_t ip) {
  feedFwd (ip);
  return op[op.size() - 1];
}

#endif
