#include "statsxx/machine_learning/NeuralNet.hpp"

// STL
#include <vector>    // std::vector


//
// DESC: Assigns weights w and theta to the NN
//
inline void NEURAL_NET::calulate_Ederiv(DataSet TS, std::vector<double> &dEdw)
{
    double LAMBDA = 0.0;

    //*********************************************************

    dEdw.assign(m_links.size(), 0.0);

    // PERFORM ``BACKPROPAGATION'' FOR EACH POINT IN THE TRAINING SET
    for( const auto &pt : TS.pt )
    {
        // PASS THE INPUT THROUGH THE NETWORK TO GET BOTH THE OUTPUT AND ACTIVATE ALL NEURONS (WITH MEMORY) ...
        std::vector<double> output;
        this->evaluate( pt.in, output );

        this->clear_errors();

        // ... NOW, FOR EACH RECURRENT PATTERN, IN THE ORDER t, t-1, t-2 [OPPOSITE TO evaluate()], BACKPROP ERROR ....
        for( decltype(pt.in.size()) i = 0; i < pt.in.size(); ++i )
        {
            this->deactivate_network();

            for( auto &n : m_neurons )
            {
                // note: as opposed to evaluate(), we insert errors at the end, not the beginning
                n.m_errors.push_back(0.0);
            }

            this->backward_prop(i, pt.out, output);

            // UPDATE WEIGHTS AND BIAS
            // ! the variation of a weight is the product of the input neuron output value with the error of the output neuron for that connection, so:
            //          DeltaW_ij = delta_j*F(x_i)
            //               DeltaW_ij == change in weight from node i to node j
            //               x_i       == value of neuron now at node i
            for( decltype(m_links.size()) j = 0; j < m_links.size(); ++j )
            {
                dEdw[j] += m_neurons[m_links[j].source].get_output(i + m_links[j].tdelay)*m_neurons[m_links[j].target].m_errors.back();
            }
        }
    }

    // REGULARIZATION TERM ...
    for( decltype(m_links.size()) i = 0; i < m_links.size(); ++i )
    {
        dEdw[i] += LAMBDA*m_links[i].weight;
    }
}
