// (c) Yongjin Park, 2013
// multi-layered latent variable inference

#include "fit_hsblock_dyn.hh"

void repeated_fit(const collapsible_network_t& G, const param_t& param)
{

  const int depth = param.depth;
  const int num_repeat = param.num_repeat;

  string latent_file = param.latent_file;

  // number of bottom-level clusters
  int K = 1 << depth;

  // read neighboring trees and weights
  const vector<double>& weights = param.adj_weight;
  const vector<string>& adj_tree_names = param.adj_tree;
#ifdef DEBUG
  assert(weights.size() == adj_tree_names.size());
#endif
  vector<boost::shared_ptr<model_tree_t> > adj_trees;
  try
  {
    for (int t = 0; t < weights.size(); ++t)
    {
      ifstream ifs(adj_tree_names.at(t).c_str(), ios::in);
      adj_trees.push_back(read_btree(ifs));
      ifs.close();
    }
  } catch (ofstream::failure& e)
  {
    cerr << " -- cannot read the tree files" << endl;
  }

  for (int t = 0; t < adj_trees.size(); ++t)
    assert(adj_trees.at(t).get()->get_depth() == depth);

  random_index_t rIdx(K);

  // repeat multiple times if necessary
  boost::shared_ptr<sp_stat_mat_t> ZBestPtr;
  boost::shared_ptr<model_tree_t> CurrTreeBestPtr;
  double scoreBest;

  for (int nr = 1; nr <= num_repeat; ++nr)
  {
    boost::shared_ptr<sp_stat_mat_t> Zptr(new sp_stat_mat_t);
    boost::shared_ptr<model_tree_t> CurrTreePtr(new model_tree_t(depth));
    model_tree_t& Tree = *(CurrTreePtr.get());

    if (num_repeat > 1)
      cerr << " -- repeat = " << nr << " / " << num_repeat << endl;

    double log_marg;

    // initialize model and latent assignment
    init_model(Tree, G);

    if (latent_file.size() > 0)
    {
      Zptr = read_latent(latent_file.c_str(), G, rIdx);
    }
    else
    {
      Zptr = build_random_latent(G, rIdx);
    }
    sp_stat_mat_t& Z = *(Zptr.get());

    log_marg = variational_inference(G, CurrTreePtr, Zptr, adj_trees, weights,
        param);

    if (nr == 1 || scoreBest < log_marg)
    {
      ZBestPtr = Zptr;
      CurrTreeBestPtr = CurrTreePtr;
      scoreBest = log_marg;
    }
  }

  sp_stat_mat_t& Z = *(ZBestPtr.get());
  model_tree_t& CurrTree = *(CurrTreeBestPtr.get());

  // to calculate Bayes factor
  double lm_null = log_marg_null(G);

  // output before collapsing
  try
  {
    { // argmax membership
      ofstream ofs((param.output + "-nocollapse.argmax").c_str(), ios::out);
      write_argmax(Z, ofs);
      ofs.close();
    }

    { // latent variable matrix
      write_pairs(Z, (param.output + "-nocollapse.latent").c_str());
    }

    { // output tree structure
      ofstream tree_ofs((param.output + "-nocollapse.tree").c_str(), ios::out);
      write_btree(CurrTree, tree_ofs);
      tree_ofs.close();
    }

    // output info
    ofstream info_out((param.output + "-nocollapse.info").c_str());
    info_out << "tree_depth\t" << param.depth << endl;
    info_out << "logP(model)\t" << scoreBest << endl;
    info_out << "logP(null)\t" << lm_null << endl;
    info_out << "bayes_factor\t" << (scoreBest - lm_null) << endl;
    info_out.close();

  } catch (ofstream::failure& e)
  {
    cerr << " -- cannot write output files" << endl;
  }

  ////////////////////////////////////////////////////////////////
  // collapse the tree and resolve the final model
  cerr << " -- collapsing tree" << endl;
  double lm_tree = collapse_tree(CurrTree, Z);

  cerr << " -- writing output files" << endl;
  // output after collapsing
  try
  {
    { // argmax membership
      ofstream ofs((param.output + ".argmax").c_str(), ios::out);
      write_argmax(Z, ofs);
      ofs.close();
    }

    { // latent variable matrix
      write_pairs(Z, (param.output + ".latent").c_str());
    }

    { // output tree structure
      ofstream tree_ofs((param.output + ".tree").c_str(), ios::out);
      write_btree(CurrTree, tree_ofs);
      tree_ofs.close();
    }

    // output info
    ofstream info_out((param.output + ".info").c_str());
    info_out << "tree_depth\t" << param.depth << endl;
    info_out << "logP(model)\t" << lm_tree << endl;
    info_out << "logP(null)\t" << lm_null << endl;
    info_out << "bayes_factor\t" << (lm_tree - lm_null) << endl;
    info_out.close();

  } catch (ofstream::failure& e)
  {
    cerr << " -- cannot write output files" << endl;
  }
}

int main(const int argc, const char* argv[])
{
  param_t param;
  if (parse_cmd_args(param, argc, argv) < 0)
  {
    print_help(argv[0], param);
    return -1;
  }

  string net_file = param.net_file;
  boost::shared_ptr<sp_stat_mat_t> edgesPtr = read_pairs(net_file.c_str(),
      true);
  boost::shared_ptr<collapsible_network_t> G0ptr(
      new collapsible_network_t(*(edgesPtr.get())));
  const collapsible_network_t& G = *(G0ptr.get());

  repeated_fit(G, param);

  return 0;
}

////////////////////////////////////////////////////////////////
inline
void print_opt(string opt, string lopt, string msg)
{
  cerr << setw(10) << (opt.size() > 0 ? "-" + opt : "");
  cerr << setw(15) << (lopt.size() > 0 ? "--" + lopt : "");
  cerr << " :    " << msg << endl;
}

void print_help(const char* exe, const param_t& param)
{
  cerr << "\nDynamic Stochastic Block Model: ";
#ifdef DEGREE_CORRECT
  cerr << " with degree-corrected Poisson" << endl;
#else
  cerr << " with Bernoulli" << endl;
#endif
  cerr << "Inference algorithm: ";
#ifdef LCVI
  cerr << " locally collapsed variational inference" << endl;
#else
  cerr << " mean-field variational inference" << endl;
#endif
  cerr << "\n<usage>\n" << endl;
  cerr << exe << " [options] [network_file]" << endl;
  cerr << endl;
  cerr << " network_file is a list of weighted pairs" << endl;
  cerr << " each line contains: u <\\t> v <\\t> weight <\\n> " << endl;
  cerr << " saves files [out].argmax, [out].info" << endl;
  cerr << endl;
  cerr << " [options]" << endl;
  cerr << endl;
  print_opt("o", "out", "Output file header (default: " + param.output + ")");
  print_opt("l", "latent",
      "Latent membership file with each line: u <\\t> k <\\t> p");
  print_opt("d", "depth",
      "Depth of a tree model (default: "
          + boost::lexical_cast<string>(param.depth) + ")");
  print_opt("e", "eta",
      "learning rate Eta in (0,1] (default: "
          + boost::lexical_cast<string>(param.eta) + ")");
  print_opt("i", "iter",
      "number of update Iterations (default: "
          + boost::lexical_cast<string>(param.num_iter) + ")");
  print_opt("r", "repeat",
      "number of Repeated fitting (default: "
          + boost::lexical_cast<string>(param.num_repeat) + ")");
  print_opt("a", "adj", "adjacent tree model and weight (needs two arguments)");
  print_opt("t", "determ", "deterministic update (just take argmax)");
  print_opt("u", "full", "use full computation (more accurate? but slow)");
  print_opt("h", "help", "print Help");
  cerr << endl;
  cerr << " example:" << endl;
  cerr << " " << exe << " -a adj.tree 0.1" << endl;
  cerr << endl;
}

int parse_cmd_args(param_t& param, const int argc, const char* argv[])
{
  for (int pos = 1; pos < argc; ++pos)
  {
    string curr = argv[pos];
    try
    {
      if ((curr == "-o" || curr == "--out") && (++pos < argc))
      {
        param.output = argv[pos];
      }
      else if ((curr == "-e" || curr == "--eta") && (++pos < argc))
      {
        param.eta = boost::lexical_cast<double>(argv[pos]);
      }
      else if ((curr == "-a" || curr == "--adj") && ((pos + 2) < argc))
      {
        param.adj_tree.push_back(argv[++pos]);
        double w = boost::lexical_cast<double>(argv[++pos]);
        if (w < 0 || w > 1)
        {
          cerr << " -- must have w in [0,1]" << endl;
          return -1;
        }
        param.adj_weight.push_back(w);
      }
      else if ((curr == "-d" || curr == "--depth") && (++pos < argc))
      {
        param.depth = boost::lexical_cast<int>(argv[pos]);
      }
      else if ((curr == "-l" || curr == "--latent") && (++pos < argc))
      {
        param.latent_file = argv[pos];
      }
      else if ((curr == "-i" || curr == "--iter") && (++pos < argc))
      {
        param.num_iter = boost::lexical_cast<int>(argv[pos]);
      }
      else if ((curr == "-r" || curr == "--repeat") && (++pos < argc))
      {
        param.num_repeat = boost::lexical_cast<int>(argv[pos]);
      }
      else if (curr == "-t" || curr == "--determ")
      {
        param.deterministic = true;
      }
      else if (curr == "-u" || curr == "--full")
      {
        param.full_computation = true;
      }
      else if (curr == "-h" || curr == "--help")
      {
        return -1;
      }
      else
      {
        param.net_file = curr;
      }
    } catch (bad_lexical_cast&)
    {
      cerr << " -- check out your input: " << curr << endl;
      return -1;
    }
  }

  if (param.net_file.size() == 0)
  {
    cerr << " -- empty network data!" << endl;
    return -1;
  }

  if (access(param.net_file.c_str(), F_OK) == -1)
  {
    cerr << " -- cannot access file: " << param.net_file << endl;
    return -1;
  }

  return 0;
}

////////////////////////////////////////////////////////////////
void row_for_each(const sp_stat_mat_t& G, const dim_t& r,
    boost::function<void(const dim_t&, const dim_t&, double)> func)
{
  if (G.has_row(r))
  {
    const sp_stat_vec_t& row = G.pull_const_row(r);
    for (sp_stat_vec_t::iterator_t it = row.begin_nonzero();
        it != row.end_nonzero(); ++it)
      func(r, dim_t(it.dim()), it.value());
  }
}

////////////////////////////////////////////////////////////////
// initialize tree model using full graph
void init_model_sub(model_tree_t::node_ptr_t r, double Ntot, double K,
    double scale, double& h, double& n)
{
  network_distrib_t& distrib = r->data;
  distrib.discount_stat(rate_t(1e-5));

  if (r->is_leaf())
  {
    h = 0.;
    double p = 0.9999;

    n = max(2., Ntot / K);

    // # edges = s * 2^{-h} * n*(n-1)/2.
    // # degree prod = n*(n-1)/2.
    // where n grows bottom up
    // most of 'degree' explained by the bottom
    // i.e., n-1 ~ max, n-1 ~ bottom
    distrib.add_edge(network_distrib_t::edge_t(scale * p * n * (n - 1.) / 2.));
    distrib.add_tot(network_distrib_t::tot_t(n * (n - 1.) / 2.));
  }
  else
  {
    double nL, nR, hL, hR;
    init_model_sub(r->get_left(), Ntot, K, scale, hL, nL);
    init_model_sub(r->get_right(), Ntot, K, scale, hR, nR);

    h = max(hL, hR) + 1.;
    n = nL + nR;
    double p = 0.9999 * pow(2., -h);

    p = max(1e-10, p);
    distrib.add_edge(network_distrib_t::edge_t(scale * p * nL * nR));
    distrib.add_tot(network_distrib_t::tot_t(nL * nR));
  }

  distrib.update_param();
  return;
}

void init_model(model_tree_t& T, const collapsible_network_t& G)
{

#ifdef DEGREE_CORRECT
  double nnz = 0.;
  for( sp_stat_mat_t::row_iterator_t ri=G.edges.begin_row(); ri!=G.edges.end_row(); ++ri)
  nnz += sum( G.edges.pull_const_row(dim_t(*ri)), 0 );

  double scale = G.Dtot / nnz;
#else
  double scale = 1.;
#endif
  double h, n;
  init_model_sub(T.root_node_obj(), G.edges.size(), T.num_leaves(), scale, h,
      n);
}

////////////////////////////////////////////////////////////////

void update_bottom(optim_state_t& ostate)
{
  // keep track of bottom size and volume
  // at most ~ O(V) computation, should be fast
  sp_stat_vec_t& Bsz = ostate.Bsz;
  sp_stat_vec_t& Vol = ostate.vol;
  const sp_stat_vec_t& degree_sum = ostate.G.degree_sum;
  const sp_stat_vec_t& vertex_size = ostate.G.size_vec;

  Bsz.clear();
  Vol.clear();
  const sp_stat_mat_t& Z = ostate.Z;
  for (sp_stat_mat_t::row_iterator_t ri = Z.begin_row(); ri != Z.end_row();
      ++ri)
  {
    const sp_stat_vec_t& zvec = Z.pull_const_row(dim_t(*ri));
    double d = degree_sum(dim_t(*ri));
    double vsz = vertex_size(dim_t(*ri));
    Bsz.increase(zvec, vsz); // sum_i z_ik
    Vol.increase(zvec, d);   // sum_i d_i z_ik
  }
}

////////////////////////////////////////////////////////////////
// generate random latent assignment matrix
boost::shared_ptr<sp_stat_mat_t> build_random_latent(
    const collapsible_network_t& G, const random_index_t& rIdx)
{
  boost::shared_ptr<sp_stat_mat_t> Zptr(new sp_stat_mat_t);
  sp_stat_mat_t& Z = *(Zptr.get());

  for (sp_stat_mat_t::row_iterator_t ri = G.edges.begin_row();
      ri != G.edges.end_row(); ++ri)
    Z(dim_t(*ri), dim_t(rIdx()), 1.);

  return Zptr;
}

// normalize z vector
void normalize(sp_stat_vec_t& zvec)
{
  double denom = sum(zvec, 1);
  zvec *= (1. / denom);
  // sparse vector might need be normalized twice
  // since some small value can be eliminated
  denom = sum(zvec, 1);
  zvec *= (1. / denom);
#ifdef DEBUG
  assert(abs(sum(zvec, 1) - 1.) < 1e-5);
#endif
}

////////////////////////////////////////////////////////////////
double max_score_path(model_tree_t::node_ptr_t r, double& deg_sum,
    double& sz_sum, int& argmax, optim_state_t& ostate)
{
  network_distrib_t& F = r->data;

  if (r->is_leaf())
  {
    dim_t ii(ostate.ii);
    double d_i = ostate.G.degree_sum(ii);
    double Dtot = ostate.G.Dtot;

    int k = r->leaf_idx();
    dim_t kk(r->leaf_idx());
    deg_sum = ostate.clust_deg_mat(ii, kk);

#ifdef DEGREE_CORRECT
    sz_sum = d_i * ostate.vol(kk) / Dtot;
#else
    sz_sum = ostate.Bsz(kk);
#endif

#ifdef LCVI
    // calculate log-predictive
    double ret = F.log_pred( network_distrib_t::edge_t(deg_sum),
        network_distrib_t::tot_t(sz_sum) );
#else
    // calculate partial gradient
    double ret = F.gradient(network_distrib_t::edge_t(deg_sum),
        network_distrib_t::tot_t(sz_sum));
#endif

    // contribution from the neighbors
    for (int t = 0; t < ostate.adj_weights.size(); ++t)
    {
      double w = ostate.adj_weights.at(t);
      const model_tree_t& T = *ostate.adj_trees.at(t).get();
      const network_distrib_t& Fadj = T.get_nth_node_const(r->hash());
#ifdef LCVI
      // calculate log-predictive
      ret += w * Fadj.log_pred( network_distrib_t::edge_t(deg_sum),
          network_distrib_t::tot_t(sz_sum) );
#else
      // calculate partial gradient
      ret += w
          * Fadj.gradient(network_distrib_t::edge_t(deg_sum),
              network_distrib_t::tot_t(sz_sum));
#endif
    }

    argmax = k;
    return ret;
  }

  int arg_left, arg_right;
  double deg2left, deg2right, n2left, n2right;

  double Gleft = max_score_path(r->get_left(), deg2left, n2left, arg_left,
      ostate);
  double Gright = max_score_path(r->get_right(), deg2right, n2right, arg_right,
      ostate);

#ifdef LCVI
  Gleft += F.log_pred( network_distrib_t::edge_t(deg2right),
      network_distrib_t::tot_t(n2right) );

  Gright += F.log_pred( network_distrib_t::edge_t(deg2left),
      network_distrib_t::tot_t(n2left) );
#else
  Gleft += F.gradient(network_distrib_t::edge_t(deg2right),
      network_distrib_t::tot_t(n2right));

  Gright += F.gradient(network_distrib_t::edge_t(deg2left),
      network_distrib_t::tot_t(n2left));
#endif

  // Contribution from the neighbors
  for (int t = 0; t < ostate.adj_weights.size(); ++t)
  {
    double w = ostate.adj_weights.at(t);
    const model_tree_t& T = *ostate.adj_trees.at(t).get();
    const network_distrib_t& Fadj = T.get_nth_node_const(r->hash());
#ifdef LCVI
    Gleft += w * Fadj.log_pred( network_distrib_t::edge_t(deg2right),
        network_distrib_t::tot_t(n2right) );

    Gright += w * Fadj.log_pred( network_distrib_t::edge_t(deg2left),
        network_distrib_t::tot_t(n2left) );
#else
    Gleft += w
        * Fadj.gradient(network_distrib_t::edge_t(deg2right),
            network_distrib_t::tot_t(n2right));

    Gright += 2
        * Fadj.gradient(network_distrib_t::edge_t(deg2left),
            network_distrib_t::tot_t(n2left));
#endif
  }

  deg_sum = deg2left + deg2right;
  sz_sum = n2left + n2right;

  argmax = (Gleft > Gright) ? arg_left : arg_right;
  return (Gleft > Gright) ? Gleft : Gright;
}

////////////////////////////////////////////////////////////////

void calc_partial_score(model_tree_t::node_ptr_t r, double& deg_sum,
    double& sz_sum, optim_state_t& ostate)
{
  network_distrib_t& F = r->data;

  if (r->is_leaf())
  {
    dim_t ii(ostate.ii);
    double d_i = ostate.G.degree_sum(ii);
    double Dtot = ostate.G.Dtot;

    dim_t k(r->leaf_idx());
    deg_sum = ostate.clust_deg_mat(ii, k);

#ifdef DEGREE_CORRECT
    sz_sum = d_i * ostate.vol(k) / Dtot;
#else
    sz_sum = ostate.Bsz(k);
#endif
    // cerr << deg_sum << ", " << sz_sum << ", " << w_e << ", " << w_d << endl;

#ifdef LCVI
    double score = F.log_pred( network_distrib_t::edge_t(deg_sum),
        network_distrib_t::tot_t(sz_sum) );
#else
    // calculate partial gradient
    double score = F.gradient(network_distrib_t::edge_t(deg_sum),
        network_distrib_t::tot_t(sz_sum));
#endif

    // contribution from the neighbors
    for (int t = 0; t < ostate.adj_weights.size(); ++t)
    {
      double w = ostate.adj_weights.at(t);
      const model_tree_t& T = *ostate.adj_trees.at(t).get();
      const network_distrib_t& Fadj = T.get_nth_node_const(r->hash());
#ifdef LCVI
      // calculate log-predictive
      score += w * Fadj.log_pred( network_distrib_t::edge_t(deg_sum),
          network_distrib_t::tot_t(sz_sum) );
#else
      // calculate partial gradient
      score += w
          * Fadj.gradient(network_distrib_t::edge_t(deg_sum),
              network_distrib_t::tot_t(sz_sum));
#endif
    }

    ostate.left_score_memo(dim_t(r->hash()), score);
    return;
  }

  double deg2left, deg2right, n2left, n2right;

  calc_partial_score(r->get_left(), deg2left, n2left, ostate);
  calc_partial_score(r->get_right(), deg2right, n2right, ostate);

#ifdef LCVI
  double Gleft = F.log_pred( network_distrib_t::edge_t(deg2right),
      network_distrib_t::tot_t(n2right) );

  double Gright = F.log_pred( network_distrib_t::edge_t(deg2left),
      network_distrib_t::tot_t(n2left) );
#else
  double Gleft = F.gradient(network_distrib_t::edge_t(deg2right),
      network_distrib_t::tot_t(n2right));

  double Gright = F.gradient(network_distrib_t::edge_t(deg2left),
      network_distrib_t::tot_t(n2left));
#endif

  // Contribution from the neighbors
  for (int t = 0; t < ostate.adj_weights.size(); ++t)
  {
    double w = ostate.adj_weights.at(t);
    const model_tree_t& T = *ostate.adj_trees.at(t).get();
    const network_distrib_t& Fadj = T.get_nth_node_const(r->hash());
#ifdef LCVI
    Gleft += w* Fadj.log_pred( network_distrib_t::edge_t(deg2right),
        network_distrib_t::tot_t(n2right) );

    Gright += w* Fadj.log_pred( network_distrib_t::edge_t(deg2left),
        network_distrib_t::tot_t(n2left) );
#else
    Gleft += w
        * Fadj.gradient(network_distrib_t::edge_t(deg2right),
            network_distrib_t::tot_t(n2right));

    Gright += w
        * Fadj.gradient(network_distrib_t::edge_t(deg2left),
            network_distrib_t::tot_t(n2left));
#endif
  }

  deg_sum = deg2left + deg2right;
  sz_sum = n2left + n2right;

  ostate.left_score_memo(dim_t(r->hash()), Gleft);
  ostate.right_score_memo(dim_t(r->hash()), Gright);
  return;
}

void sum_partial_score(model_tree_t::node_ptr_t r, sp_stat_vec_t& scores,
    optim_state_t& ostate, double accum)
{
  if (r->is_leaf())
  {
    int k = r->leaf_idx();
    double memo = ostate.left_score_memo(dim_t(r->hash()));
    scores.increase(dim_t(k), accum + memo);
    return;
  }
  sum_partial_score(r->get_left(), scores, ostate,
      accum + ostate.left_score_memo(dim_t(r->hash())));
  sum_partial_score(r->get_right(), scores, ostate,
      accum + ostate.right_score_memo(dim_t(r->hash())));
}

////////////////////////////////////////////////////////////////

void _collect_tree_stat(model_tree_t::node_ptr_t r, sp_stat_mat_t& is,
    double& degsum, double& szsum, double& totsum, const dim_t& i,
    optim_state_t& ostate)
{

  if (!is.has_row(dim_t(r->hash())))
    is.add_row(dim_t(r->hash()));

  sp_stat_vec_t& stat = is.pull_row(dim_t(r->hash()));

  if (r->is_leaf())
  {
    dim_t k(r->leaf_idx());
    double z_ik = ostate.Z(i, k);

    degsum = ostate.clust_deg_mat(i, k);
    szsum = z_ik;

    // E = sum_ij A_ij z_ik z_jk / 2 + sum_i z_ik e_i
    //     sum_i [ d_ik * z_ik / 2 + e_i * z_ik]
    double E = (degsum / 2. + ostate.G.within_edges(i)) * z_ik;

#ifdef DEGREE_CORRECT
    totsum = ostate.vol(k);
    double d_i = ostate.G.degree_sum(i);
    double voltot = ostate.G.Dtot;

    // T = sum_ij I[i!=j] d_i z_ik d_j z_jk / 2 / 2E + sum_i z_ik*dp_i / 2E
    //   = 1/2 * sum_i d_i z_ik sum_{j!=i} d_j z_jk / 2E + sum_i z_ik*dp_i / 2E
    //   = 1/2 * sum_i d_i z_ik ( vol(k) - d_i*z_ik ) / 2E + sum_i z_ik*dp_i / 2E
    //   = sum_i [ d_i*z_ik * (vol(k)-d_i*z_ik) /2/2E + z_ik * dp_i / 2E ]
    double T = d_i*z_ik*(totsum-d_i*z_ik) / 2. / voltot;
    T += z_ik * ostate.G.within_degrees(i) / voltot;
#else
    // T = sum_ij z_ik z_jk I[i!=j] / 2
    //   = sum_i [ z_ik * (n_k-z_ik) /2 ]
    double n_k = ostate.Bsz(k);
    totsum = n_k;
    double T = z_ik * (n_k - z_ik) * 0.5;
#endif

    stat.increase(dim_t(edge_idx), E);
    stat.increase(dim_t(tot_idx), T);

    return;
  }

  double deg2left, deg2right, szleft, szright, totleft, totright;
  _collect_tree_stat(r->get_left(), is, deg2left, szleft, totleft, i, ostate);
  _collect_tree_stat(r->get_right(), is, deg2right, szright, totright, i,
      ostate);

  degsum = deg2left + deg2right;
  szsum = szleft + szright;
  totsum = totleft + totright;

  // internal level

  // E = sum_ij A_ij sum_{l in L} sum_{r in R} z_il z_jr
  //     sum_i sum_{l in L} z_il sum_{r in R} sum_j A_ij z_jr
  //     sum_i ( n_iL * d_iR )
  double E = szleft * deg2right;

  double d_i = ostate.G.degree_sum(i);
  double voltot = ostate.G.Dtot;

#ifdef DEGREE_CORRECT
  // T = sum_ij sum_{l in L} sum_{r in R} d_i z_il d_j z_jr /2Etot
  //   = sum_i d_i sum_{l in L} z_il * sum_{r in R} sum_{j!=i} d_j z_jr /2Etot
  //   = sum_i d_i n_iL * sum_{r in R} [ vol_r - d_i z_ir ] /2Etot
  //   = sum_i [ d_i n_iL * (vol_R - d_i n_iR) /2Etot ]
  double T = d_i*szleft*(totright-d_i*szright) / voltot;
#else
  // T = sum_{i!=j} sum_{l in L} sum_{r in R} z_il z_jr
  //   = sum_i sum_{l in L} z_il sum_{r in R} sum_{j!=i} z_jr
  //   = sum_i n_iL sum_{r in R} [ n_r - z_ir ]
  //   = sum_i [ n_iL (n_R - n_iR) ]
  double T = szleft * (totright - szright);
#endif

  stat.increase(dim_t(edge_idx), E);
  stat.increase(dim_t(tot_idx), T);

  return;
}

boost::shared_ptr<sp_stat_mat_t> collect_tree_stat(optim_state_t& ostate)
{
  const sp_stat_mat_t& Z = ostate.Z;
  const sp_stat_mat_t& G = ostate.G.edges;
  model_tree_t& T = ostate.Tree;

  boost::shared_ptr<sp_stat_mat_t> ret(new sp_stat_mat_t);
  sp_stat_mat_t& intern_stat = *(ret.get());

  for (sp_stat_mat_t::row_iterator_t it = G.begin_row(); it != G.end_row();
      ++it)
  {
    dim_t i(*it);

    // collect sufficient statistics
    double degsum, szsum, totsum;
    model_tree_t::node_ptr_t root = T.root_node_obj();
    _collect_tree_stat(root, intern_stat, degsum, szsum, totsum, i, ostate);
  }
  return ret;
}

////////////////////////////////////////////////////////////////
// for faster update
// bottom level statistics

void _collect_bottom_stat(model_tree_t::node_ptr_t r, sp_stat_mat_t& is,
    const dim_t& i, optim_state_t& ostate)
{
#ifdef DEBUG
  assert(r->is_leaf());
#endif

  if (!is.has_row(dim_t(r->hash())))
    is.add_row(dim_t(r->hash()));

  sp_stat_vec_t& stat = is.pull_row(dim_t(r->hash()));

  dim_t k(r->leaf_idx());
  double z_ik = ostate.Z(i, k);

  // cerr << "z[" << i.val << "][" << k.val << "] = " << z_ik << endl;

  // E = sum_ij A_ij z_ik z_jk / 2 + sum_i z_ik e_i
  //     sum_i [ d_ik * z_ik / 2 + z_ik e_i ]
  double E = ostate.clust_deg_mat(i, k) * z_ik * 0.5
      + z_ik * ostate.G.within_edges(i);

#ifdef DEGREE_CORRECT
  double d_i = ostate.G.degree_sum(i);
  double vol_k = ostate.vol(k);
  double voltot = ostate.G.Dtot;
  // T = sum_ij I[i!=j] d_i z_ik d_j z_jk / 2 / 2E + sum_i z_ik*dp_i / 2E
  //   = 1/2 * sum_i d_i z_ik sum_{j!=i} d_j z_jk / 2E + sum_i z_ik*dp_i / 2E
  //   = 1/2 * sum_i d_i z_ik ( vol(k) - d_i*z_ik ) / 2E + sum_i z_ik*dp_i / 2E
  //   = sum_i [ d_i*z_ik * (vol(k)-d_i*z_ik) /2/2E + z_ik * dp_i/2E ]
  double T = d_i*z_ik*(vol_k-d_i*z_ik) / 2. / voltot;
  T += z_ik * ostate.G.within_degrees(i) / voltot;
#else
  double n_k = ostate.Bsz(k);
  // T = sum_ij z_ik z_jk I[i!=j] / 2
  //   = sum_i [ z_ik * (n_k-z_ik) /2 ]
  double T = z_ik * (n_k - z_ik) * 0.5;
#endif

  stat.increase(dim_t(edge_idx), E);
  stat.increase(dim_t(tot_idx), T);
  return;
}

// just collect internal nodes edge stats
void _collect_intern_edge_stat(model_tree_t::node_ptr_t r, sp_stat_mat_t& is,
    double& degsum, double& szsum, const dim_t& i, optim_state_t& ostate)
{
  if (!is.has_row(dim_t(r->hash())))
    is.add_row(dim_t(r->hash()));

  sp_stat_vec_t& stat = is.pull_row(dim_t(r->hash()));

  if (r->is_leaf())
  {
    dim_t k(r->leaf_idx());
    degsum = ostate.clust_deg_mat(i, k);
    szsum = ostate.Z(i, k);
    return;
  }

  double deg2left, deg2right, szleft, szright;

  _collect_intern_edge_stat(r->get_left(), is, deg2left, szleft, i, ostate);
  _collect_intern_edge_stat(r->get_right(), is, deg2right, szright, i, ostate);

  degsum = deg2left + deg2right;
  szsum = szleft + szright;

  // E = sum_ij A_ij sum_{l in L} sum_{r in R} z_il z_jr
  //     sum_i sum_{l in L} z_il sum_{r in R} sum_j A_ij z_jr
  //     sum_i ( n_iL * d_iR )
  double E = szleft * deg2right;
  stat.increase(dim_t(edge_idx), E);

  return;
}

// just collect tot stat
// update is valid for approximate update
void _collect_intern_total_stat(model_tree_t::node_ptr_t r, sp_stat_mat_t& is,
    double& sumval, optim_state_t& ostate)
{
  if (!is.has_row(dim_t(r->hash())))
    is.add_row(dim_t(r->hash()));

  sp_stat_vec_t& stat = is.pull_row(dim_t(r->hash()));

  if (r->is_leaf())
  {
#ifdef DEGREE_CORRECT
    sumval = ostate.vol( dim_t(r->leaf_idx()) );
#else
    sumval = ostate.Bsz(dim_t(r->leaf_idx()));
#endif
    return;
  }

  double sum2left, sum2right;
  _collect_intern_total_stat(r->get_left(), is, sum2left, ostate);
  _collect_intern_total_stat(r->get_right(), is, sum2right, ostate);

  sumval = sum2left + sum2right;

#ifdef DEGREE_CORRECT
  // T = sum_i d_i sum_{l in L} z_il sum_{j!=i} d_j sum_{r in R} z_jr / 2E
  //   = sum_i d_i z_iL sum_{j!=i} d_j z_jR / 2E
  //   = (sum_i d_i z_iL) (sum_j d_j z_jR) - sum_i d_i*d_i z_iL*z_iR / 2E
  //   ~ (sum_i d_i z_iL) (sum_j d_j z_jR) / 2E
  //   = vol_L * vol_R / 2E
  double voltot = ostate.G.Dtot;
  double T = sum2left * sum2right / voltot;
#else
  // T = n_L * n_R
  double T = sum2left * sum2right;
#endif

  stat.increase(dim_t(tot_idx), T);

  return;
}

boost::shared_ptr<sp_stat_mat_t> collect_tree_stat_deterministic(optim_state_t& ostate)
{
  const sp_stat_mat_t& Z = ostate.Z;
  const sp_stat_mat_t& G = ostate.G.edges;
  model_tree_t& T = ostate.Tree;

  boost::shared_ptr<sp_stat_mat_t> ret(new sp_stat_mat_t);
  sp_stat_mat_t& intern_stat = *(ret.get());

  int kMin, kMax;

  // collect statistics of edges
  for (sp_stat_mat_t::row_iterator_t it = G.begin_row(); it != G.end_row();
      ++it)
  {
    dim_t i(*it);
    const sp_stat_vec_t& zvec = Z.pull_const_row(i);
    const sp_stat_vec_t& clust_deg = ostate.clust_deg_mat.pull_const_row(i);

    if (clust_deg.size() > 0)
    {
      kMin = clust_deg.get_min_dim();
      kMax = clust_deg.get_max_dim();
    }
    else
    {
      kMin = 0;
      kMax = T.num_leaves() - 1;
    }

    // make sure to cover all valid range in [K]
    kMin = min(kMin, zvec.get_min_dim());
    kMax = max(kMax, zvec.get_max_dim());

    // bottom level edges & holes
    // ~ nearly constant time if Z is sparse
    for (sp_stat_vec_t::iterator_t ki = zvec.begin_nonzero();
        ki != zvec.end_nonzero(); ++ki)
      _collect_bottom_stat(T.get_leaf_node_obj(ki.dim()), intern_stat, i,
          ostate);

    // update internal node edge stat
    if (kMax >= 0 && kMin != kMax)
    {
      double degsum, szsum;
      model_tree_t::node_ptr_t lca = T.get_lca_node_obj(kMin, kMax);
      _collect_intern_edge_stat(lca, intern_stat, degsum, szsum, i, ostate);
    }
  }

  // total count stat
  model_tree_t::node_ptr_t root = T.root_node_obj();
  double sumval;
  _collect_intern_total_stat(root, intern_stat, sumval, ostate);

#ifdef DEBUG

  boost::shared_ptr<sp_stat_mat_t> ret_debug = collect_tree_stat(ostate);

  sp_stat_mat_t& intern_stat_debug = *(ret_debug.get());

  // check two statistics
  for (sp_stat_mat_t::row_iterator_t it = intern_stat.begin_row();
      it != intern_stat.end_row(); ++it)
  {
    const sp_stat_vec_t& stat = intern_stat.pull_const_row(dim_t(*it));
    const sp_stat_vec_t& stat_debug = intern_stat_debug.pull_const_row(
        (dim_t(*it)));

    if (!approx_equal(stat, stat_debug))
    {
      cerr << "\n r=" << *it << endl;
      stat.dump();
      stat_debug.dump();
      cerr << "values do not match!" << endl;
    }
  }
#endif

  return ret;
}

// update tree parameters
double update_tree(model_tree_t::node_ptr_t r, sp_stat_mat_t& is, double eta)
{
  const sp_stat_vec_t& stat = is.pull_const_row(dim_t(r->hash()));

  double E = stat(dim_t(edge_idx));
  double T = stat(dim_t(tot_idx));

#ifdef DEBUG
  assert(E > -1e-10);
  assert(T > -1e-10);
#endif

  network_distrib_t& F = r->data;
  F.discount_stat(rate_t(1. - eta));
  F.add_edge(network_distrib_t::edge_t(E), rate_t(eta));
  F.add_tot(network_distrib_t::tot_t(T), rate_t(eta));

  double delt = F.update_param();

#ifdef DEBUG
  if (boost::math::isnan(delt))
    cerr << F.num_edge() << ", " << F.num_tot() << ", " << delt << endl;
  assert(!boost::math::isnan(delt));
#endif

  if (r->is_leaf())
    return delt;

  return delt + update_tree(r->get_left(), is, eta)
      + update_tree(r->get_right(), is, eta);
}

// ================ model comparison ================
// model score (not collapsed)

////////////////////////////////////////////////////////////////
// collapsing tree
// *caution* this will mess up distributions
double collapse_tree(model_tree_t& Tree, sp_stat_mat_t& Z)
{
  // figure out where to collapse
  sp_stat_vec_t children;

  // map: k to collapsed k'
  int k;
  boost::unordered_map<int, int> cTab;
  double ret = collapse_tree_sub(Tree.root_node_obj(), children, k, cTab);

  // change Z matrix
  for (sp_stat_mat_t::row_iterator_t ri = Z.begin_row(); ri != Z.end_row();
      ++ri)
  {
    sp_stat_vec_t& zvec = Z.pull_row(dim_t(*ri));
    for (sp_stat_vec_t::iterator_t zi = zvec.begin_nonzero();
        zi != zvec.end_nonzero();)
    {
      int k = zi.dim();
      double z_ik = zi.value();
      ++zi;
      if (cTab[k] != k)
      {
        zvec.increase(dim_t(cTab.at(k)), z_ik);
        zvec(dim_t(k), 0.);
      }
    }
  }

  return ret;
}

double collapse_tree_sub(model_tree_t::node_ptr_t r, sp_stat_vec_t& children,
    int& k, boost::unordered_map<int, int>& cTab)
{

  // ================ leaf node ================
  if (r->is_leaf())
  {
    k = r->leaf_idx();
    network_distrib_t& F = r->data;
    cTab[k] = k;
    children(dim_t(k), 1);
    return F.log_marg();
  }

  network_distrib_t& F = r->data;
  double marg = F.log_marg();
  int kLeft, kRight;

  sp_stat_vec_t left;
  sp_stat_vec_t right;
  marg += collapse_tree_sub(r->get_left(), children, kLeft, cTab);
  marg += collapse_tree_sub(r->get_right(), right, kRight, cTab);

  children = left;
  children += right;

  k = min(kLeft, kRight);

  // accumulate distributions from left and right subtrees
  network_distrib_t& Fc = r->data;
  network_distrib_t& Fl = r->get_left()->data;
  network_distrib_t& Fr = r->get_right()->data;
  Fc += Fl;
  Fc += Fr;
  double margCollapsed = Fc.log_marg();

  // test if this should be collased or not
  // consider some numerical error
  if ((margCollapsed - marg) >= 0.)
  {
    for (sp_stat_vec_t::iterator_t ci = children.begin_nonzero();
        ci != children.end_nonzero(); ++ci)
      cTab[ci.dim()] = k;

    // cerr << " -- collapsed to " << k << endl;
    return margCollapsed;
  }

  return marg;
}

////////////////////////////////////////////////////////////////
// variational inference on collapsed network
// given initial latent matrix Z and Tree model

double variational_inference(const collapsible_network_t& G,
    boost::shared_ptr<model_tree_t>& TreePtr,
    boost::shared_ptr<sp_stat_mat_t>& ZPtr,
    const vector<boost::shared_ptr<model_tree_t> >& adj_trees,
    const vector<double>& adj_weights, const param_t& param)
{

  const int num_iter = param.num_iter;
  const bool deterministic = param.deterministic;
  const bool full_computation = param.full_computation;
  const double eta = param.eta;

  model_tree_t& Tree = *(TreePtr.get());
  sp_stat_mat_t& Z = *(ZPtr.get());

  const double Ktot = Tree.num_leaves();
  const int K = Tree.num_leaves();

  sp_stat_vec_t Bsz;
  sp_stat_vec_t Vol;

  optim_state_t ostate(G, Tree, Z, Bsz, Vol, adj_trees, adj_weights);

  update_bottom(ostate);

  const sp_stat_vec_t& degree_sum = G.degree_sum;
  const sp_stat_vec_t& vertex_size = G.size_vec;

  sp_stat_vec_t sz_old;
  int lmvsz = 4;
  vector<double> log_marg_vec(lmvsz, 0.);

  // pre-calculated cluter-specific degree matrix
  //  |V| x K sparse matrix
  sp_stat_mat_t& clust_deg_mat = ostate.clust_deg_mat;
  func_degree_t clust_deg_func(Z, clust_deg_mat);

  // need full cluster-specific degree matrix
  for (sp_stat_mat_t::row_iterator_t ri = G.edges.begin_row();
      ri != G.edges.end_row(); ++ri)
    row_for_each(G.edges, dim_t(*ri), clust_deg_func);

  int kMin, kMax;

  sp_stat_vec_t latent_score;

  // update iteration
  for (int oiter = 1; oiter <= num_iter; ++oiter)
  {

    bool conv = false;
    double diff = Tree.num_leaves();
    cerr.precision(4);

    ostate.oiter = oiter;

    // will change latent assignment until convergence
    for (int iter = 1; iter <= num_iter; ++iter)
    {

      ostate.iiter = iter;

      cerr << "\r" << " -- latent update [" << setw(15) << iter << "] ";

      for (sp_stat_mat_t::row_iterator_t ri = Z.begin_row(); ri != Z.end_row();
          ++ri)
      {
        ostate.ii = *ri;
        dim_t ii(*ri);

        if (!G.edges.has_row(ii))
          continue;

        const sp_stat_vec_t& clust_deg = clust_deg_mat.pull_const_row(ii);
        sp_stat_vec_t& zvec = Z.pull_row(ii);

        if (!full_computation)
        {
          // not doing full computation
          // determine the range of latent assignments
          if (clust_deg.size() > 0)
          {
            kMin = clust_deg.get_min_dim();
            kMax = clust_deg.get_max_dim();
          }
          else
          {
            kMin = 0;
            kMax = Tree.num_leaves() - 1;
          }

          if (kMin == kMax && kMin == zvec.get_min_dim() && zvec.size() == 1)
          {
            continue;
          }
        }
        else
        {
          kMin = 0;
          kMax = Tree.num_leaves() - 1;
        }
        double d_i = degree_sum(ii);
        double vsz = vertex_size(ii);

        // remove current assignment
        Bsz.decrease(zvec, vsz);
        Vol.decrease(zvec, d_i);

        // delta-update of cluster-specific degree
        {
          const sp_stat_vec_t& neigh = G.edges.pull_const_row(ii);
          for (sp_stat_vec_t::iterator_t ni = neigh.begin_nonzero();
              ni != neigh.end_nonzero(); ++ni)
          {
            int j = ni.dim();
            double e_ij = ni.value();
            sp_stat_vec_t& clust_deg_j = clust_deg_mat.pull_row(dim_t(j));
            clust_deg_j.decrease(zvec, e_ij);
          }
        }

        zvec.clear();

        int kMaxBound = kMax + 1;

        // calculate latent scores
        model_tree_t::node_ptr_t root = Tree.get_lca_node_obj(kMin, kMax);

        // ================================================================
        // update latent membership
        // just update by maximum value
        // this generally gives more robust results

        if (deterministic)
        {
          double e, t;
          int argmax;
          max_score_path(root, e, t, argmax, ostate);
          zvec(dim_t(argmax), 1.);
        }
        else
        {
          double e, t;
          latent_score.clear();
          calc_partial_score(root, e, t, ostate);
          sum_partial_score(root, latent_score, ostate, 0.);

          double denom = log_sum(latent_score);
          for (sp_stat_vec_t::iterator_t li = latent_score.begin_nonzero();
              li != latent_score.end_nonzero(); ++li)
          {
            int k = li.dim();
            double lp = li.value();
            double p = exp(lp - denom);
            zvec(dim_t(k), p);
          }
        }

        // update bottom size and volume
        Bsz.increase(zvec, vsz);
        Vol.increase(zvec, d_i);

        // delta-update of cluster-specific degree
        {
          const sp_stat_vec_t& neigh = G.edges.pull_const_row(ii);
          for (sp_stat_vec_t::iterator_t ni = neigh.begin_nonzero();
              ni != neigh.end_nonzero(); ++ni)
          {
            int j = ni.dim();
            double e_ij = ni.value();
            sp_stat_vec_t& clust_deg_j = clust_deg_mat.pull_row(dim_t(j));
            clust_deg_j.increase(zvec, e_ij);
          }
        }

      } // end of node loop

      // check convergence
      {
        sz_old -= Bsz;
        diff = norm(sz_old, 2.) / Ktot;
        cerr << " diff [" << setw(15) << diff << "]";
        if (diff < 1e-2)
        {
          conv = true;
          break;
        }
        sz_old = Bsz;
      }
    } // end of latent update

#ifdef DEBUG
    // check delta-update
    {
      sp_stat_mat_t clust_deg_debug;
      func_degree_t debug_func(Z, clust_deg_debug);
      for (sp_stat_mat_t::row_iterator_t ri = Z.begin_row(); ri != Z.end_row();
          ++ri)
        row_for_each(G.edges, dim_t(*ri), debug_func);

      cerr << " -- check cluster-specific degree matrix";

      for (sp_stat_mat_t::row_iterator_t ri = Z.begin_row(); ri != Z.end_row();
          ++ri)
      {
        dim_t ii(*ri);
        const sp_stat_vec_t& debug_vec = clust_deg_debug.pull_const_row(ii);
        const sp_stat_vec_t& org_vec = clust_deg_mat.pull_const_row(ii);

        if (!approx_equal(debug_vec, org_vec))
        {
          org_vec.dump();
          debug_vec.dump();
        }
      }
      cerr << " done" << endl;
    }
#endif // end of debugging
    if (conv)
      cerr << " -> converged  "; // << endl;
    else
      cerr << " -> max reached"; //  << endl;

    // update tree model
    double delt;
    boost::shared_ptr<sp_stat_mat_t> isPtr;

    if (deterministic)
      isPtr = collect_tree_stat_deterministic(ostate);
    else
      isPtr = collect_tree_stat(ostate);

    delt = update_tree(Tree.root_node_obj(), *(isPtr.get()), eta);
    delt /= ((double) Tree.num_nodes());

    // check convergence
    double log_marg = log_marg_tree(Tree);

    // take running average of last 3 log-marginal
    double log_marg_prev = accumulate(log_marg_vec.begin(), log_marg_vec.end(),
        0.) / ((double) lmvsz);
    log_marg_vec[oiter % lmvsz] = log_marg;
    double log_marg_curr = accumulate(log_marg_vec.begin(), log_marg_vec.end(),
        0.) / ((double) lmvsz);
    double log_marg_diff = log_marg_prev - log_marg_curr;

    cerr << " [" << setw(15) << log_marg << "]" << endl;

    if (delt < 1e-2 || (oiter > 3 && log_marg_diff * log_marg_diff < 1e-2))
    {
      cerr << " -- converged " << endl;
      break;
    }
  }

  double ret = log_marg_vec[lmvsz - 1];
  return ret;
} // end of latent inference

////////////////////////////////////////////////////////////////
// log marginal prob under the null model
double log_marg_null(const collapsible_network_t& G0)
{
  double E = G0.Dtot * 0.5;

#ifdef DEGREE_CORRECT
  // .5 * sum_{i<j} d_i d_j = .5 * [sum_i d_i]^2 - sum_i d_i^2
  double T = .5 * G0.Dtot * G0.Dtot - sum( G0.degree_sum, 2 );
  T /= G0.Dtot;

  poisson_t F;
  F.add_edge( network_distrib_t::edge_t(E) );
  F.add_tot( network_distrib_t::tot_t(T) );
#else
  double n = G0.edges.size(); // number of rows = number of nodes
  double T = n * (n - 1.) * 0.5;
  poisson_t F;
  F.add_edge(network_distrib_t::edge_t(E));
  F.add_tot(network_distrib_t::tot_t(T));
#endif
  return F.log_marg();
}

// log marginal prob under the tree model
template<typename D>
double log_marg_tree(btree_t<D>& Tree)
{
  double log_marg = 0.;
  for (int n = 0; n < Tree.num_nodes(); ++n)
    log_marg += Tree.get_nth_node(n).log_marg();
  return log_marg;
}

////////////////////////////////////////////////////////////////
// housekeeping stuff
void write_argmax(const sp_stat_mat_t& Z, ostream& fout)
{
  for (sp_stat_mat_t::row_iterator_t it = Z.begin_row(); it != Z.end_row();
      ++it)
  {
    double max_val = 0.;
    int argmax = -1;
    for (sp_stat_mat_t::col_iterator_t ci = Z.begin_col(dim_t(*it));
        ci != Z.end_col(dim_t(*it)); ++ci)
      if (ci.value() > max_val)
      {
        max_val = ci.value();
        argmax = ci.dim();
      }
    fout << *it << "\t" << argmax << endl;
  }
}

boost::shared_ptr<sp_stat_mat_t> read_latent(const char* latent_file,
    const collapsible_network_t& G0, const random_index_t& rIdx)
{
  boost::shared_ptr<sp_stat_mat_t> ret(new sp_stat_mat_t);
  sp_stat_mat_t& Z = *(ret.get());

  ifstream ifs(latent_file, ios::in);
  int u, k;
  double p;
  while (ifs >> u >> k >> p)
    if (k >= 0 && k < rIdx.kmax)
      if (G0.edges.has_row(dim_t(u)))
        Z(dim_t(u), dim_t(k), p);

  ifs.close();

  for (sp_stat_mat_t::row_iterator_t ri = G0.edges.begin_row();
      ri != G0.edges.end_row(); ++ri)
  {
    // add missing rows if needed
    if (!Z.has_row(dim_t(*ri)))
    {
      Z.add_row(dim_t(*ri));
      Z(dim_t(*ri), dim_t(rIdx()), 1.);
    }
    else
    {
      normalize(Z.pull_row(dim_t(*ri)));
    }
  }
  return ret;
}

