#include "MLP.h"

/*
 *--------------------------------------------------------------------------------------
 *       Class:  MLP
 *      Method:  MLP
 * Description:  Carrega um MLP salvo no arquivo passado
 *--------------------------------------------------------------------------------------
 */
MLP::MLP(const char *file)
{
    CSVFilePtr csv(new CSVFile(file, CSV_IN));

    csv->read(nInVars, nOutVars);
    csv->read(nLayers);
    csv->readV(nNeurons, nLayers);

    build(csv);
}

/*
 *--------------------------------------------------------------------------------------
 *       Class:  MLP
 *      Method:  MLP
 * Description:  Constrói um MLP passando o número de variáveis de entrada e de saída
 *               e a quantidades de neurônios em cada camada
 *--------------------------------------------------------------------------------------
 */
MLP::MLP(uint nInVars, vuint& nNeurons)
{
    assert(nInVars > 0);
    assert(nNeurons.size() > 0);

    this->nInVars = nInVars;
    this->nLayers = nNeurons.size();
    this->nOutVars = nNeurons[nLayers-1];

    for(uint i = 0; i < nLayers; i++)
        this->nNeurons.push_back(nNeurons[i]);

    build();
}

/*
 *--------------------------------------------------------------------------------------
 *       Class:  MLP
 *      Method:  build
 * Description:  Constrói um MLP
 *--------------------------------------------------------------------------------------
 */
void MLP::build(CSVFilePtr csv)
{
    ActivationPtr activation(new Activation(LOGISTIC));
    learning = LearningPtr(new Learning(SUPERVISIONED, activation));

    for(uint i = 0; i < nLayers; i++)
    {
        PerceptronPtr perceptron;

        uint percIn = (i == 0) ? nInVars : this->nNeurons[i-1];
        uint percOut = this->nNeurons[i];

        perceptron = PerceptronPtr(new Perceptron(percIn, percOut, activation, learning, csv));

        layers.push_back(perceptron);
    }

    print();
}

/*
 *--------------------------------------------------------------------------------------
 *       Class:  MLP
 *      Method:  saveToFile
 * Description:  Salva o MLP em um arquivo
 *--------------------------------------------------------------------------------------
 */
void MLP::saveToFile(const char *file)
{
    CSVFilePtr csv(new CSVFile(file, CSV_OUT));

    csv->write(nInVars, nOutVars);
    csv->write(nLayers);
    csv->writeV(nNeurons, true);

    for(uint i = 0; i < nLayers; i++)
        for(uint j = 0; j < nNeurons[i]; j++)
            csv->writeV(layers[i]->getNeuron(j)->getWeights(), true);
}

/*
 *--------------------------------------------------------------------------------------
 *       Class:  MLP
 *      Method:  train
 * Description:  Treina o MLP passando um conjunto de entradas e saídas e o erro máximo
 *               permitido
 *--------------------------------------------------------------------------------------
 */
void MLP::train(DataSet& ds, double tolerance, uint maxIt)
{
    // Garante que o número de variáveis de entrada e de saída sejam o mesmo
    assert(nInVars == ds.getNInVars());
    assert(nOutVars == ds.getNOutVars());

    bool end = false;
    uint cycle = 0;

    Chronometer c;

    // Conta os ciclos
    for(cycle = 0; !end && cycle < maxIt; cycle++)
    {
        learning->calcLearningRate(cycle);

        // Loop para cada entrada
        ds.randomize();
        for(uint i = 0; i < ds.size(); i++)
        {
            uint j = ds.getRandom(i);
            DataPtr data = ds.get(j);

            // Realiza o feedforward
            vdoublePtr output = feedforward(data->getInput());
            data->setNeuralOutput(output);

            feedbackward(data->getOutput());
        }

        end = ds.hasSucceeded(tolerance);
    }

    ds.setTime(c.getMiliseconds());
    ds.setNIterations(cycle);
    ds.calcSuccessRate(tolerance);

    cout << endl;

    cout << "#=| Multi-Layer Perceptron Training |==================#" << endl << endl;
    ds.print(true);

    cout << "#=| Training Results |=================================#" << endl << endl;
    print();

    if(cycle != maxIt)
        cout << "TRAINING WAS SUCCESSFUL!" << endl << endl;
    else
        cout << "TRAINING FAILED!" << endl << endl;
}

/*
 *--------------------------------------------------------------------------------------
 *       Class:  MLP
 *      Method:  test
 * Description:  Testa o MLP passando um conjunto de entradas
 *--------------------------------------------------------------------------------------
 */
void MLP::test(DataSet& ds, double tolerance)
{
    // Garante que o número de variáveis de entrada e de saída sejam o mesmo
    assert(nInVars == ds.getNInVars());
    assert(nOutVars == ds.getNOutVars());

    // Inicia um cronômetro
    Chronometer c;

    // Loop para cada entrada
    for(uint j = 0; j < ds.size(); j++)
    {
        DataPtr data = ds.get(j);
        vdoublePtr output = feedforward(data->getInput());
        data->setNeuralOutput(output);
    }

    ds.setTime(c.getMiliseconds());
    ds.setNIterations(1);
    ds.calcSuccessRate(tolerance);

    cout << "#=| Multi-Layer Perceptron Test |======================#" << endl << endl;
    ds.print(true);
}

/*
 *--------------------------------------------------------------------------------------
 *       Class:  MLP
 *      Method:  feedforward
 * Description:  Realiza o feedforward
 *--------------------------------------------------------------------------------------
 */
vdoublePtr MLP::feedforward(vdoublePtr input)
{
    vdoublePtr output;

    // Propaga a saída em todas as camadas
    for(uint i = 0; i < layers.size(); i++)
    {
        output = layers[i]->feedforward(input);
        input = output;
    }

    return output;
}

/*
 *--------------------------------------------------------------------------------------
 *       Class:  MLP
 *      Method:  feedbackward
 * Description:  Realiza o feedbackward
 *--------------------------------------------------------------------------------------
 */
void MLP::feedbackward(vdoublePtr expected)
{
    // Erro da camada de saída
    vdoublePtr error = layers[nLayers-1]->feedbackward(expected);
    layers[nLayers-1]->updateNeurons();

    // Propaga os erros para as camadas escondidas
    for(int j = nLayers - 2; j >= 0; j--)
    {
        uint i = (uint) j;
        error = layers[i]->feedbackward(error, layers[i+1]->getNeurons());
        layers[i]->updateNeurons();
    }
}

/*
 *--------------------------------------------------------------------------------------
 *       Class:  MLP
 *      Method:  print
 * Description:  Imprime o MLP
 *--------------------------------------------------------------------------------------
 */
void MLP::print()
{
    cout << "#=| Multi-Layer Perceptron |===========================#" << endl << endl;

    cout << "Number of input variables: " << nInVars << endl;
    cout << "Number of output variables: " << nOutVars << endl;
    cout << "Number of layers: " << nLayers << endl;

    for(uint i = 0; i < nLayers; i++)
    {
        cout << endl;

        cout << "Layer " << (i + 1) << endl;
        cout << " |--> Number of neurons: " << nNeurons[i] << endl;
        layers[i]->print();
    }
    cout << endl << "#======================================================#" << endl << endl;
}

