#include "statsxx/machine_learning/NeuralNet.hpp"

// STL
#include <iostream>


//========================================================================
//========================================================================
//
// NAME: void NEURAL_NET::load_output_errors(const std::vector<double> &actual_out, const std::vector<double> &calc_out, std::vector<int> &neurons)
//
// DESC: Calculates the errors at the output neurons at time t, given actual (correct) output (actual_out) and calculated output (calc_out).
//
// NOTES:
//     ! for continuous output (??something about error being Gaussian??) error at the output nodes is the difference between the provided ideal output and the calculated actual output multiplied with the activation function derivate on that output point, so:
//          delta_k = (F(x_k) - t_k)*F'(x_k)
//               x_k  = neuron value at node k
//               F(x) = value of x passed through activation function
//               t_k  = desired output at node k
//     ! for binomial or multinomial output, the appropriate error function at the output nodes is the cross-entropy error:
//          delta_k = -t_k*ln(F(x_k)) - (1 - t_k)*ln(1 - F(x_k)) ...
//     ! ... fortunately, when logistic or softmax functions are used dE/dnet becomes simply:
//          dE/dnet = O_k - t_k ...
//     ! ... which is the same as delta_k above, but NOT multiplied by F'(x_k)
//
//========================================================================
//========================================================================
inline void NEURAL_NET::load_output_errors(const std::vector<double> &actual_out, const std::vector<double> &calc_out)
{
    if( m_isClassif )
    {
        // note: the ca
        int i = 0;
        for( auto n : m_out_neurons )
        {
            m_neurons[n].m_errors.back() = (calc_out[i] - actual_out[i]);
            m_neurons[n].override_activation(true); // TO AVOID MULTILPLICATION BY dFx
            ++i;
        }
    }
    else
    {
        int i = 0;
        for( auto n : m_out_neurons )
        {
            m_neurons[n].m_errors.back() = (calc_out[i] - actual_out[i]);
            m_neurons[n].activate_error(); // MULTILPLY BY dFx
            ++i;
        }
    }
}
