/*
 Copyright 2013--Present JMM_PROGNAME
 
 This file is distributed under the terms of the JMM_PROGNAME License.
 
 You should have received a copy of the JMM_PROGNAME License.
 If not, see <JMM_PROGNAME WEBSITE>.
*/
// CREATED    : 9/18/2015
// LAST UPDATE: 9/30/2015

#include "statsxx/machine_learning/neural_network/deep_belief_network/DBN.hpp"

// STL
#include <vector> // std::vector<>

// jScience
#include "jScience/linalg/Vector.hpp" // Vector<>

// stats++
#include "statsxx/distribution.hpp" // distribution::binomial()
#include "statsxx/machine_learning/activation_functions.hpp" // activation_function::Logistic


// note: one should NEVER repeatedly call this for approximate stochastic sampling; instead, in the calling routine, one should first perform a deterministic propagation of the signal, followed by local sampling
inline std::vector<Vector<double>> neural_network::DBN::activations(
                                                                    const PropagationType prop_type,  // type of propagation of signal to use in greedy training
                                                                    // -----
                                                                    const Vector<double> &x           // data
                                                                    ) const
{
    std::vector<Vector<double>> H;
    
    activation_function::Logistic logistic;
    
    H.push_back(x);
    
    //---------------------------------------------------------
    // STOCHASTIC SAMPLING
    //---------------------------------------------------------
    if(prop_type == neural_network::DBN::PropagationType::stochastic)
    {
        Vector<double> h = logistic.f(this->RBM[0].c + this->RBM[0].W*x);
        
        h = distribution::binomial<double>(1, h);
        
        H.push_back(h);
        
        // note: we want to propagate all the way to (layer-1)
        for(auto i = 1; i < RBM.size(); ++i)
        {
            h = logistic.f(this->RBM[i].c + this->RBM[i].W*h);
            
            h = distribution::binomial<double>(1, h);
            
            H.push_back(h);
        }
    }
    //---------------------------------------------------------
    // PSEUDO-STOCHASTIC SAMPLING
    //---------------------------------------------------------
    else if(prop_type == neural_network::DBN::PropagationType::pseudo_stochastic)
    {
        throw std::runtime_error("error in neural_network::DBN::activations(): pseudo-stochastic propagation does not make sense for getting the activations of the entire network");
    }
    //---------------------------------------------------------
    // DETERMINISTIC SAMPLING
    //---------------------------------------------------------
    else if(prop_type == neural_network::DBN::PropagationType::deterministic)
    {
        Vector<double> h = logistic.f(this->RBM[0].c + this->RBM[0].W*x);
        
        H.push_back(h);
        
        for(auto i = 1; i < RBM.size(); ++i)
        {
            h = logistic.f(this->RBM[i].c + this->RBM[i].W*h);
            
            H.push_back(h);
        }
    }
    
    return H;
}


// note: one should NEVER repeatedly call this for approximate stochastic sampling; instead, in the calling routine, one should first perform a deterministic propagation of the signal, followed by local sampling
inline Vector<double> neural_network::DBN::activations(
                                                       const int layer,                  // layer to propagate signal to
                                                       // -----
                                                       const PropagationType prop_type,  // type of propagation of signal to use in greedy training
                                                       // -----
                                                       const Vector<double> &x           // data
                                                       ) const
{
    Vector<double> h;
    
    activation_function::Logistic logistic;
    
    //---------------------------------------------------------
    // STOCHASTIC SAMPLING
    //---------------------------------------------------------
    if(prop_type == neural_network::DBN::PropagationType::stochastic)
    {
        h = logistic.f(this->RBM[0].c + this->RBM[0].W*x);
        
        h = distribution::binomial<double>(1, h);
        
        // note: we want to propagate all the way to (layer-1)
        for(auto i = 1; i < layer; ++i)
        {
            h = logistic.f(this->RBM[i].c + this->RBM[i].W*h);
            
            h = distribution::binomial<double>(1, h);
        }
    }
    //---------------------------------------------------------
    // PSEUDO-STOCHASTIC SAMPLING
    //---------------------------------------------------------
    else if(prop_type == neural_network::DBN::PropagationType::pseudo_stochastic)
    {
        // first compute the deterministic propagation ...
        h = this->RBM_propagate(neural_network::DBN::PropagationType::deterministic, layer, x);
        
        // ... and then sample from it ...
        h = distribution::binomial<double>(1, h);
    }
    //---------------------------------------------------------
    // DETERMINISTIC SAMPLING
    //---------------------------------------------------------
    else if(prop_type == neural_network::DBN::PropagationType::deterministic)
    {
        h = logistic.f(this->RBM[0].c + this->RBM[0].W*x);

        for(auto i = 1; i < layer; ++i)
        {
            h = logistic.f(this->RBM[i].c + this->RBM[i].W*h);
        }
    }
    
    return h;
}
