// (c) Yongjin Park, 2013
//     ypark28@jhu.edu

#ifndef FIT_HSBLOCK_MODEL_HH_
#define FIT_HSBLOCK_MODEL_HH_

#include "btree.hh"
#include "btree_io.hh"
#include "distrib.hh"
#include "sparse_data.hh"
#include "sparse_io.hh"
#include "cnetwork.hh"

#include <vector>
#include <iostream>
#include <algorithm>

#include <boost/ptr_container/ptr_vector.hpp>
#include <boost/function.hpp>
#include <boost/lexical_cast.hpp>
#include <unistd.h>

#include "random.hh"

using namespace std;

#ifdef DEGREE_CORRECT
typedef btree_t<poisson_t> model_tree_t;
#else
typedef btree_t<bernoulli_t> model_tree_t;
#endif


struct
param_t
{
  param_t()
  {
    depth = 1;
    size_cutoff = -1;
    eta = 1.;
    num_iter = 100;
    num_repeat = 1;
    deterministic = false;
    full_computation = false;
    output = "./out";
  }

  int depth;
  string net_file;
  double size_cutoff;
  double eta;
  int num_iter;
  int num_repeat;

  string latent_file;
  string output;
  bool deterministic;
  bool full_computation;
};


void print_help( const char* exe, const param_t& param );

int
parse_cmd_args( param_t& param, const int argc, const char* argv[] );

void
row_for_each( const sp_stat_mat_t& G, const dim_t& r,
	      boost::function< void (const dim_t&, const dim_t&, double) > func );



////////////////////////////////////////////////////////////////
// visit column neighbors in G
// accumulate vector of d(i->k)
// G and Z must be sparse, otherwise slow
class func_degree_t
{
public:
  explicit func_degree_t( const sp_stat_mat_t& _mu, sp_stat_mat_t& _D ): Z(_mu), D(_D) {}

  ~func_degree_t(){ }

  // column name and edge weight
  void operator() (const dim_t& r, const dim_t& c, double edge)
  {
    if( r != c && Z.has_row(c) )
      {
        if( ! D.has_row(r) ) D.add_row(r);
        sp_stat_vec_t& ret = D.pull_row(r);
        for( sp_stat_mat_t::col_iterator_t ki=Z.begin_col(c);
	     ki != Z.end_col(c); ++ki )
          {
            int k = ki.dim();
            double z = ki.value();
            ret.increase( dim_t(k), z*edge );
          }
      }
  }

private:
  const sp_stat_mat_t& Z;
  sp_stat_mat_t& D;
};

////////////////////////////////////////////////////////////////
// initialize tree model using full graph
void
init_model( model_tree_t& T, const collapsible_network_t& G );

void
init_model_sub( model_tree_t::node_ptr_t r, double Ntot,
		double K, double scale, double& h, double& n );

////////////////////////////////////////////////////////////////
// just to ease burden of function arguments
struct
optim_state_t
{
  explicit optim_state_t( const collapsible_network_t& _g,
			  model_tree_t& _tree,
			  sp_stat_mat_t& _z,
			  sp_stat_vec_t& _bsz,
			  sp_stat_vec_t& _vol ):
    G(_g), Tree(_tree), Z(_z), Bsz(_bsz), vol(_vol)
  { }

  const collapsible_network_t& G;              // collapsed network
  model_tree_t& Tree;                        // network tree model
  sp_stat_mat_t& Z;                          // latent assignment matrix
  sp_stat_vec_t& Bsz;                        // cluster (bottom) size
  sp_stat_vec_t& vol;                        // cluster volume
  sp_stat_mat_t clust_deg_mat;               // cluster-specific degree matrix
  sp_stat_vec_t left_score_memo;              // partial score memoizer from left
  sp_stat_vec_t right_score_memo;             // partial score memoizer from right
  int ii;                                    // current index
  int oiter;                                 // current outer-loop 
  int iiter;                                 // current inner-loop 
};

////////////////////////////////////////////////////////////////
// variational update routines

struct iterations_t {
  explicit iterations_t(const int t): val(t) {}
  const int val;
};

double
variational_inference( const collapsible_network_t& G,
		       boost::shared_ptr< model_tree_t>& TreePtr,
		       boost::shared_ptr< sp_stat_mat_t>& ZPtr,
		       const param_t& param );

void
update_bottom( optim_state_t& ostate );

// generate random latent assignment matrix
boost::shared_ptr< sp_stat_mat_t >
build_random_latent( const collapsible_network_t& G, const random_index_t& rIdx );

// maximum score calculation
double
max_score_path( model_tree_t::node_ptr_t r, double& deg_sum, double& sz_sum, int& argmax, optim_state_t& ostate );


// exact score calculation
void
calc_partial_score( model_tree_t::node_ptr_t r, double& deg_sum, double& sz_sum, optim_state_t& ostate );

void
sum_partial_score( model_tree_t::node_ptr_t r, sp_stat_vec_t& scores, optim_state_t& ostate, double accum );

// tree model update
const int edge_idx = 1;
const int tot_idx = 2;

// update tree parameters
double
update_tree(model_tree_t::node_ptr_t r, sp_stat_mat_t& is, double eta);


boost::shared_ptr< sp_stat_mat_t >
collect_tree_stat( optim_state_t& ostate );


void
_collect_tree_stat( model_tree_t::node_ptr_t r, sp_stat_mat_t& is,
		    double& degsum, double& szsum, double& volsum,
		    const dim_t& i, optim_state_t& ostate );

// argmax assignment, deterministic case -- more efficient
boost::shared_ptr< sp_stat_mat_t >
collect_tree_stat_deterministic( optim_state_t& ostate );


void
_collect_bottom_stat( model_tree_t::node_ptr_t r, sp_stat_mat_t& is,
		      const dim_t& i, optim_state_t& ostate );


void
_collect_intern_edge_stat( model_tree_t::node_ptr_t r, sp_stat_mat_t& is,
			   double& degsum, double& szsum,
			   const dim_t& i, optim_state_t& ostate );


////////////////////////////////////////////////////////////////
double
log_marg_null( const collapsible_network_t& G0 );

template<typename D>
double
log_marg_tree( btree_t<D>& Tree );

////////////////////////////////////////////////////////////////
// collapsing tree
double
collapse_tree( model_tree_t& Tree, sp_stat_mat_t& Z );

double
collapse_tree_sub( model_tree_t::node_ptr_t r, sp_stat_vec_t& children, int& k,
		   boost::unordered_map<int,int>& cTab );

// merge latent variables if they were collapsed
double
merge_collapsed( model_tree_t::node_ptr_t r, sp_stat_vec_t& cTab, sp_stat_vec_t& zvec );

double
_merge_collapsed( model_tree_t::node_ptr_t r, sp_stat_vec_t& cTab, sp_stat_vec_t& zvec,
		  int& k, bool upCollapsed );

////////////////////////////////////////////////////////////////
// housekeeping stuff
void
write_argmax( const sp_stat_mat_t& Z, ostream& fout );

boost::shared_ptr< sp_stat_mat_t >
read_latent( const char* latent_file, const collapsible_network_t& G0, const random_index_t& );


#endif /* FIT_HSBLOCK_MODEL_HH_ */
