/*
 Copyright 2013--Present JMM_PROGNAME
 
 This file is distributed under the terms of the JMM_PROGNAME License.
 
 You should have received a copy of the JMM_PROGNAME License.
 If not, see <JMM_PROGNAME WEBSITE>.
*/
// CREATED    : 10/12/2015
// LAST UPDATE: 10/12/2015

// note: this file should be updated in tandem with DBN_prop_back.cpp ...
//
// ... and additional notes to those below can be found therein

#include "statsxx/machine_learning/neural_network/deep_belief_network/DBN.hpp"

// STL
#include <stdexcept> // std::runtime_error()
#include <vector>    // std::vector<>

// jScience
#include "jScience/linalg/Vector.hpp" // Vector<>

// stats++
#include "statsxx/distribution.hpp" // distribution::binomial()
#include "statsxx/machine_learning/activation_functions.hpp" // activation_function::Logistic


inline Vector<double> neural_network::DBN::prop_forward(
                                                        const int layer0,                  // layer to propagate signal from
                                                        const int layern,                  // "                       " to
                                                        // -----
                                                        const PropagationType prop_type,   // type of propagation of signal
                                                        // -----
                                                        Vector<double> x                   // data
                                                        ) const
{
    //=========================================================
    // ERROR CHECKS
    //=========================================================
    
    if(layern < layer0)
    {
        throw std::runtime_error("error in neural_network::DBN::prop_forward(): (layern < layer0)");
    }
    
    if(layer0 < 0)
    {
        throw std::runtime_error("error in neural_network::DBN::prop_forward(): (layer0 < 0)");
    }

    if(layern > (this->RBM.size()+1))
    {
        throw std::runtime_error("error in neural_network::DBN::prop_forward(): (layern > (this->RBM.size()+1))");
    }
    
    
    //=========================================================
    // PROPAGATION
    //=========================================================
    
    activation_function::Logistic logistic;
    
    //---------------------------------------------------------
    // STOCHASTIC SAMPLING
    //---------------------------------------------------------
    if(prop_type == neural_network::DBN::PropagationType::stochastic)
    {
        // note: see the notes for deterministic propagation below
        
        for(auto i = layer0; i < layern; ++i)
        {
            x = logistic.f(this->RBM[i].c + this->RBM[i].W*x);
            
            x = distribution::binomial<double>(1, x);
        }
    }
    //---------------------------------------------------------
    // PSEUDO-STOCHASTIC SAMPLING
    //---------------------------------------------------------
    else if(prop_type == neural_network::DBN::PropagationType::pseudo_stochastic)
    {
        // note: see the notes for deterministic propagation below
        
        // first compute the deterministic propagation ...
        for(auto i = layer0; i < layern; ++i)
        {
            x = logistic.f(this->RBM[i].c + this->RBM[i].W*x);
        }
        
        // ... and then sample from it
        if(layern != layer0)
        {
            x = distribution::binomial<double>(1, x);
        }
    }
    //---------------------------------------------------------
    // DETERMINISTIC SAMPLING
    //---------------------------------------------------------
    else if(prop_type == neural_network::DBN::PropagationType::deterministic)
    {
        // note: we start by assuming that x is the visible activations of the RBM of layer0
        
        for(auto i = layer0; i < layern; ++i)
        {
            // note: [nh x nv] * [nv x 1] = [nh x 1]
            x = logistic.f(this->RBM[i].c + this->RBM[i].W*x);
        }
    }
 
    return x;
}
