#ifndef NET_BLOCK_HH_
#define NET_BLOCK_HH_

#include "btree.hh"
#include "util.hh"
#include "random.hh"
#include "network.hh"
#include <string>
#include <boost/shared_ptr.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/math/special_functions.hpp>

////////////////////////////////////////////////////////////////
// network statistics
struct edge_t
{
  explicit edge_t(double val) :
      value(val)
  {
  }
  double value;
};

struct tot_t
{
  explicit tot_t(double val) :
      value(val)
  {
  }
  double value;
};

struct network_stat_t
{

  network_stat_t() :
      edge(0.), tot(0.)
  {
  }

  network_stat_t(const edge_t& edge, const tot_t& tot) :
      edge(edge.value), tot(tot.value)
  {
  }

  network_stat_t&
  operator +=(const network_stat_t& rhs)
  {
    edge += rhs.edge;
    tot += rhs.tot;
    return *this;
  }

  network_stat_t&
  operator =(const network_stat_t& rhs)
  {
    edge = rhs.edge;
    tot = rhs.tot;
    return *this;
  }

  double edge;
  double tot;
};

std::ostream& operator <<(std::ostream& os, const network_stat_t& net_stat)
{
  os << "(" << net_stat.edge << ", " << net_stat.tot << ")";
  return os;
}

std::ostream& operator <<(std::ostream& os,
    const boost::shared_ptr<network_stat_t> net_stat_pt)
{
  os << "(" << net_stat_pt->edge << ", " << net_stat_pt->tot << ")";
  return os;
}

template<typename T>
void merge_two(const T& src1, const T& src2, T& tgt)
{
  tgt = src1;
  tgt += src2;
}

template<typename T>
void merge_two(boost::shared_ptr<T> src1_pt, boost::shared_ptr<T> src2_pt,
    boost::shared_ptr<T> tgt_pt)
{
  merge_two(*(src1_pt.get()), *(src2_pt.get()), *(tgt_pt.get()));
}

////////////////////////////////////////////////////////////////
// a single component of the degree corrected stochastic block model
// (1) store necessary sufficient stats
// - variational parameter
// - hyper-parameter
// (2) evaluate log probability score
template<typename T>
struct func_poisson_t
{

  explicit func_poisson_t(double a0, double b0) :
      a0(a0), b0(b0), alpha(0.), beta(0.)
  {
  }

  double score(const T& stat) const
  {
    // score by locally collapsed variational
    // works better than mean-field
    double s1 = boost::math::lgamma(a0 + alpha + stat.edge)
        - boost::math::lgamma(a0 + alpha);
    double s2 = (a0 + alpha) * std::log(b0 + beta)
        - (a0 + alpha + stat.edge) * std::log(b0 + beta + stat.tot);
    return s1 + s2;
  }

  // marginal log-likelihood score
  double score() const
  {
    double s1 = boost::math::lgamma(a0 + alpha) - boost::math::lgamma(a0)
        - boost::math::lgamma(1. + alpha);
    double s2 = a0 * std::log(b0) - (a0 + alpha) * std::log(b0 + beta);
    return s1 + s2;
  }

  double update(const T& stat, double rate)
  {
#ifdef DEBUG
    assert_msg((rate > 0) & (rate <= 1.), "rate out of bound");
#endif
    double old_alpha = alpha;
    double old_beta = beta;

    alpha = (1. - rate) * alpha + rate * stat.edge;
    beta = (1. - rate) * beta + rate * stat.tot;

    double ret = std::sqrt((old_alpha - alpha) * (old_alpha - alpha))
        / std::sqrt(old_alpha * old_alpha + 1e-10)
        + std::sqrt((old_beta - beta) * (old_beta - beta))
            / std::sqrt(old_beta * old_beta + 1e-10);

    return ret;
  }

  double a0;
  double b0;
  double alpha;
  double beta;
};

template<typename T>
void update_hyper(func_poisson_t<T>& F, double a0, double b0)
{
  F.a0 = a0;
  F.b0 = b0;
}

template<typename T>
void update_hyper(boost::shared_ptr<func_poisson_t<T> > F_pt, double a0,
    double b0)
{
  update_hyper(*(F_pt.get()), a0, b0);
}

template<typename T>
boost::tuple<double, double> get_hyper(func_poisson_t<T>& F)
{
  return boost::make_tuple(F.a0, F.b0);
}

template<typename T>
boost::tuple<double, double> get_hyper(
    boost::shared_ptr<func_poisson_t<T> > F_pt)
{
  return get_hyper(*(F_pt.get()));
}

template<typename T>
double update_stat(func_poisson_t<T>& F, const T& stat, double rate)
{
  return F.update(stat, rate);
}

template<typename T>
double update_stat(typename boost::shared_ptr<func_poisson_t<T> >& f_pt,
    const T& stat, double rate)
{
  func_poisson_t<T>& F = *(f_pt.get());
  return F.update(stat, rate);
}

template<typename T>
double update_stat(typename boost::shared_ptr<func_poisson_t<T> >& f_pt,
    const typename boost::shared_ptr<T>& stat_pt, double rate)
{
  func_poisson_t<T>& F = *(f_pt.get());
  return F.update(*(stat_pt.get()), rate);
}

// evaluate score along the tree
template<typename F, typename T>
double eval(const F& f_pt, const typename boost::shared_ptr<T>& stat_pt)
{
  return f_pt->score(*(stat_pt.get()));
}

template<typename F, typename T>
double eval(const F& f, const T& stat)
{
  return f.score(stat);
}

// insert and remove from a pointer to map
template<typename Map, typename T>
void insert(typename boost::shared_ptr<Map>& map_pt,
    typename boost::shared_ptr<T>& obj)
{
  map_pt->insert(obj);
}

template<typename Map, typename T>
void remove(typename boost::shared_ptr<Map>& map_pt,
    typename boost::shared_ptr<T>& obj)
{
  map_pt->remove(obj);
}

template<typename score, typename T>
double update_score(typename boost::shared_ptr<score>& score_pt,
    typename boost::shared_ptr<T>& obj, double rate)
{
  return score_pt->update(*(obj.get()), rate);
}

////////////////////////////////////////////////////////////////
// Initialize tree model
template<typename F, typename T>
struct tree_initializer_t
{
  explicit tree_initializer_t(const network_t& G, const size_t K)
  {
    boost::property_map<network_t, boost::vertex_degree_t>::const_type degree =
        boost::get(boost::vertex_degree, G);
    boost::graph_traits<network_t>::vertex_iterator vi, vEnd;

    double deg_tot = 0.;
    for (boost::tie(vi, vEnd) = boost::vertices(G); vi != vEnd; ++vi)
    {
      deg_tot += boost::get(degree, *vi);
    }
    double m = boost::num_edges(G);
    double n = boost::num_vertices(G);
    scale = deg_tot / m * 0.5;
    nk = std::max(2., n / std::min(n * 0.5, (double) K));
  }

  void operator()(btree_t<F>& Tree)
  {
    double h, n;
    initializer(Tree.root_node_iterator(), h, n);
  }

private:
  typedef typename btree_t<F>::node_iterator_t node_iterator_t;

  void initializer(node_iterator_t& r, double& height, double& n)
  {
    if (r.is_leaf())
    {
      double p = 0.99;
      double e = scale * p * nk * (nk - 1.) * .5;
      double t = nk * (nk - 1.) * .5;
      height = 0.;
      n = nk;

      // std::cerr << height << ": " << e << ", " << t << std::endl;

      update_stat(r.data, T(edge_t(e), tot_t(t)), 1.);
    }
    else
    {
      double hL, hR, nL, nR;
      initializer(r.get_left(), hL, nL);
      initializer(r.get_right(), hR, nR);
      height = std::max(hL, hR) + 1.;
      n = nL + nR;
      double p = 0.99 * std::pow(2., -height);
      p = std::max(p, 1e-10);
      double e = scale * p * nL * nR;
      double t = nL * nR;

      // std::cerr << height << ": " << e << ", " << t << std::endl;

      update_stat(r.data, T(edge_t(e), tot_t(t)), 1.);
    }
  }
  double scale; // sum A_ij / m
  double nk;    // n / K
};

////////////////////////////////////////////////////////////////
// find argmax path for each vertex
// assign argmax color to vertices
template<typename NetScore, typename NetStat>
struct tree_path_finder_t
{
  typedef std::vector<double> vec_t;
  typedef typename boost::shared_ptr<NetStat> net_stat_ptr;
  typedef typename btree_t<NetScore>::node_iterator_t node_iterator_t;

  explicit tree_path_finder_t(btree_t<NetScore>& tree) :
      model_tree(tree),         // model tree
      CDeg(tree.num_leaves()),  // color-specific degree
      CVol(tree.num_leaves()),  // color-specific volume
      d(0), VolTot(0), i(-1)    // network degree and vol tot, and index
  {
#ifdef DEBUG
    assert_msg(CVol.size() == CDeg.size(), "must have the same size K");
#endif
    // prepare tree statistics map
    net_stat_map.clear();
    for (int j = 0; j < tree.num_nodes(); ++j)
      net_stat_map.push_back(net_stat_ptr(new NetStat));
  }

  // 1. calibrate color-volume   O(n)
  // 2. for each i in G and      O(n)
  //  i. calculate color-degree  O(d*k)
  //  ii. discount color-volume
  //  iii. run max_path algorithm
  //  iv. readjust membership
  //      color-volume
  // 3. repeat until convergence
  void operator()(network_t& G)
  {
    make_color_volume(G, CVol);
    VolTot = std::accumulate(CVol.begin(), CVol.end(), 0.);

    boost::property_map<network_t, boost::vertex_degree_t>::type degree =
        boost::get(boost::vertex_degree, G);

    boost::property_map<network_t, boost::vertex_color_t>::type color =
        boost::get(boost::vertex_color, G);

    const int K = model_tree.num_leaves();
    const int max_iter = 1000;
    int kMin, kMax;
    int argmax_old, argmax;

    boost::graph_traits<network_t>::vertex_iterator vi, vEnd;

    const int num_scr = 4;
    const double tol = 1e-3;
    vec_t score_vec(num_scr, 0.);

    for (int iter = 1; iter <= max_iter; ++iter)
    {
      double scr_sum = 0.;
      int num_change = 0;
      for (boost::tie(vi, vEnd) = boost::vertices(G); vi != vEnd; ++vi)
      {
        make_color_degree(*vi, G, CDeg, kMin, kMax); // i. calculate color-degree
        if (!has_neighbor(kMin, kMax))               // skip the isolated one
          continue;

        i = *vi;
        d = boost::get(degree, *vi);                 // ii. discount volume
        argmax_old = boost::get(color, *vi);         // previous assignment
        CVol[argmax_old] -= d;                       // by d_i

        // iii. find max path
        node_iterator_t& r = model_tree.get_lca_node_iterator(kMin, kMax);
        scr_sum += max_path(r, argmax);

        CVol[argmax] += d;                           // re-adjust volume
        erase_color_degree(*vi, G, CDeg);            // clean up data structure

        if (argmax != argmax_old)
        {
          boost::put(color, *vi, argmax);              // change vertex color
          num_change++;
        }
      }

      if (num_change == 0)
        break;

      double old_avg = accumulate(score_vec.begin(), score_vec.end(), 0.)
          / ((double) num_scr);
      score_vec[iter % num_scr] = scr_sum;
      double new_avg = accumulate(score_vec.begin(), score_vec.end(), 0.)
          / ((double) num_scr);
      double delta = std::sqrt((old_avg - new_avg) * (old_avg - new_avg));
      delta /= std::max(1., std::sqrt(old_avg * old_avg));

      if (delta < tol)
        break;

      std::cerr << "\r" << "iter=" << std::setw(20) << iter << ", score="
          << std::setw(20) << scr_sum;
    }
    std::cerr << std::endl;
  }

private:
  btree_t<NetScore> & model_tree;
  vec_t CDeg;
  vec_t CVol;
  std::vector<net_stat_ptr> net_stat_map;

  double d;
  double VolTot;
  int i;

  // we only do argmax path
  double max_path(node_iterator_t& r, int& argmax)
  {
    if (r.is_leaf()) // base case
    {
      int k = r.leaf_idx();
      argmax = k;
      net_stat_ptr net_ptr = net_stat_map.at(r.hash());
      NetStat& net = *(net_ptr.get());
      net.edge = CDeg[k];
      net.tot = d * CVol[k] / VolTot;
      return eval(r.data, net_ptr);
    }

    // to the left and right
    node_iterator_t& left = r.get_left();
    node_iterator_t& right = r.get_right();
    int argmax_left, argmax_right;

    double score_left = max_path(left, argmax_left);
    double score_right = max_path(right, argmax_right);

    // score assigning current to the left
    // relation with the right
    score_left += eval(r.data, net_stat_map[right.hash()]);

    // score assigning current to the right
    // relation with the left
    score_right += eval(r.data, net_stat_map[left.hash()]);

    // merge two stats from the left and right
    merge_two(net_stat_map[left.hash()], net_stat_map[right.hash()],
        net_stat_map[r.hash()]);

    if (score_left > score_right)
    {
      argmax = argmax_left;
      return score_left;
    }
    else
    {
      argmax = argmax_right;
      return score_right;
    }
  }
};

////////////////////////////////////////////////////////////////
// update tree model
template<typename F, typename T>
struct tree_updater_t
{
  typedef std::vector<double> vec_t;
  typedef typename boost::shared_ptr<T> net_stat_ptr;
  typedef typename btree_t<F>::node_iterator_t node_iterator_t;

  explicit tree_updater_t(btree_t<F>& tree) :
      model_tree(tree), K(tree.num_leaves()), CDeg(K, 0), CVol(K, 0), CSz(K, 0)
  {
    d = 0;
    VolTot = 0;
    current_k = -1;

    // tree statistics map
    net_stat_map.clear();
    for (int j = 0; j < tree.num_nodes(); ++j)
      net_stat_map.push_back(net_stat_ptr(new T));

    // just temporary scratch pad
    tmp_stat_map.clear();
    for (int j = 0; j < tree.num_nodes(); ++j)
      tmp_stat_map.push_back(net_stat_ptr(new T));
  }

  double operator()(network_t& G, double rate = 1.)
  {
    boost::property_map<network_t, boost::vertex_color_t>::type color =
        boost::get(boost::vertex_color, G);

    boost::property_map<network_t, boost::vertex_degree_t>::type degree =
        boost::get(boost::vertex_degree, G);

    clear(model_tree.root_node_iterator());

    boost::graph_traits<network_t>::vertex_iterator vi, vEnd;

    make_color_size(G, CSz);
    make_color_volume(G, CVol);
    VolTot = std::accumulate(CVol.begin(), CVol.end(), 0.);

    // std::cerr << std::accumulate(CSz.begin(), CSz.end(), 0.) << std::endl;
    // std::cerr << VolTot << std::endl;

    // collect statistics
    int kMin, kMax;
    for (boost::tie(vi, vEnd) = boost::vertices(G); vi != vEnd; ++vi)
    {
      make_color_degree(*vi, G, CDeg, kMin, kMax); // i. calculate color-degree
      if (!has_neighbor(kMin, kMax))               // skip the isolated one
        continue;

      d = boost::get(degree, *vi);

      // update bottom
      current_k = boost::get(color, *vi);
      {
        int hash = model_tree.get_leaf_node_iterator(current_k).hash();
        T& stat = *(net_stat_map[hash].get());

        // E_k = sum_ij A_ij z_ik z_jk /2
        //     = sum_i d_ik z_ik /2
        stat.edge += CDeg[current_k] * 0.5;

        // T_k = sum_ij I[i!=j] d_i z_ik d_j z_jk / 2 / 2m
        //     = 1/2 sum_i d_i z_ik sum_{j!=i} d_j z_jk / 2m
        //     = 1/2 sum_i d_i z_ik ( vol_k - d_i z_ik ) / 2m
        stat.tot += 0.5 * d * (CVol[current_k] - d) / VolTot;
      }

      collect_internal_edge(model_tree.get_lca_node_iterator(kMin, kMax));
      // collect_internal_edge(model_tree.root_node_iterator());

      erase_color_degree(*vi, G, CDeg); // clean up data structure
    }

    // collect total stat
    collect_internal_total(model_tree.root_node_iterator());

    // update new stat
    double ret = update_to_tree(model_tree.root_node_iterator(), rate);
    ret /= ((double) model_tree.num_nodes());
    return ret;
  }

private:
  btree_t<F> & model_tree;
  const int K;
  vec_t CDeg;
  vec_t CVol;
  vec_t CSz;
  std::vector<net_stat_ptr> net_stat_map;
  std::vector<net_stat_ptr> tmp_stat_map;
  int current_k;

  // altered leaf-level
  // std::vector<bool> altered;

  // update to the tree
  double update_to_tree(node_iterator_t& r, const double rate)
  {
    T& stat = *(net_stat_map[r.hash()].get());
    double ret = update_stat(r.data, stat, rate);

    if (r.is_leaf())
    {
      return ret;
    }
    ret += update_to_tree(r.get_left(), rate);
    ret += update_to_tree(r.get_right(), rate);
    return ret;
  }

  // for each i in [n]
  // - assume CDeg and CSz are ready
  void collect_internal_edge(node_iterator_t& r)
  {
    if (r.is_leaf())
    {
      T& stat_tmp = *(tmp_stat_map[r.hash()].get());
      stat_tmp.edge = CDeg[r.leaf_idx()];
      stat_tmp.tot = r.leaf_idx() == current_k ? 1. : 0.;
      return;
    }

    node_iterator_t& left = r.get_left();
    node_iterator_t& right = r.get_right();
    collect_internal_edge(left);
    collect_internal_edge(right);

    merge_two(tmp_stat_map[left.hash()], tmp_stat_map[right.hash()],
        tmp_stat_map[r.hash()]);

    // increase edge count
    // E = sum_ij A_ij sum_{l in L} sum_{r in R} z_il z_jr
    //   = sum_i sum_{l in L} z_il sum_{r in R} sum_j A_ij z_jr
    //   = sum_i n_iL d_iR
    double n_iL = tmp_stat_map[left.hash()]->tot;
    double d_iR = tmp_stat_map[right.hash()]->edge;
    T& stat = *(net_stat_map[r.hash()].get());
    stat.edge += d_iR * n_iL;
  }

  // VolTot is calculated
  void collect_internal_total(node_iterator_t& r)
  {
    T& stat_tmp = *(tmp_stat_map[r.hash()].get());
    if (r.is_leaf())
    {
      stat_tmp.edge = 0;
      stat_tmp.tot = CVol[r.leaf_idx()];
      return;
    }
    node_iterator_t& left = r.get_left();
    node_iterator_t& right = r.get_right();
    collect_internal_total(left);
    collect_internal_total(right);

    merge_two(tmp_stat_map[left.hash()], tmp_stat_map[right.hash()],
        tmp_stat_map[r.hash()]);

    double vL = tmp_stat_map[left.hash()]->tot;
    double vR = tmp_stat_map[right.hash()]->tot;

    T& stat = *(net_stat_map[r.hash()].get());
    stat.tot = vL * vR / VolTot;
  }

  void clear(node_iterator_t& r)
  {
    T& stat = *(net_stat_map[r.hash()].get());
    T& tmp_stat = *(tmp_stat_map[r.hash()].get());
    stat.edge = 0.;
    stat.tot = 0.;
    tmp_stat.edge = 0.;
    tmp_stat.tot = 0.;

    if (!r.is_leaf())
    {
      clear(r.get_left());
      clear(r.get_right());
    }
  }

  double d;
  double VolTot;

};

////////////////////////////////////////////////////////////////
// hyper-parameter tuning of poisson-gamma
// use Clayton & Kaldor, Biometrics (1987)
// basically use moment-matching
template<typename T>
boost::tuple<double, double> empirical_bayes_gamma(
    typename std::vector<T>& func_vec)
{
  assert_msg(func_vec.size() > 0, "must have at least one component");

  double a_old, b_old;
  boost::tie(a_old, b_old) = get_hyper(func_vec[0]);

  double n = 0.;
  for (int i = 0; i < func_vec.size(); ++i)
  {
    if (is_empty_gamma(func_vec[i]))
      continue;
    ++n;
  }

  double a = a_old, b = b_old;

  if (n > 1)
  {
    for (int t = 0; t < 1000; ++t)
    {
      // lmd_i = (a+alpha_i)/(b+beta_i)
      // a/b   = 1/n * sum_i lmd_i
      double mean_lmd = 0.;
      for (int i = 0; i < func_vec.size(); ++i)
      {
        if (is_empty_gamma(func_vec[i]))
          continue;
        mean_lmd += get_lambda_map(func_vec[i]);
      }
      mean_lmd /= n;

      // a/b^2 = 1/(n-1) * sum_i (1+b/beta_i)(lmd_i - a/b)^2
      double var_lmd = 0.;
      for (int i = 0; i < func_vec.size(); ++i)
      {
        if (is_empty_gamma(func_vec[i]))
          continue;
        double tot = get_tot_count(func_vec[i]);
        double lmd = get_lambda_map(func_vec[i]);
        var_lmd += (1. + b / tot) * std::pow((lmd - mean_lmd), 2.);
      }
      var_lmd /= (n - 1.);

      if (var_lmd <= 0.)
        var_lmd += 1e-10;

      b = mean_lmd / var_lmd;
      a = mean_lmd * b;

      if (std::sqrt((a - a_old) * (a - a_old))
          + std::sqrt((b - b_old) * (b - b_old)) < 1e-4)
        break;
      a_old = a;
      b_old = b;
    }
  }

  for (int i = 0; i < func_vec.size(); ++i)
    update_hyper(func_vec[i], a, b);

  return boost::make_tuple(a, b);
} // end of empirical bayes

template<typename T>
bool is_empty_gamma(func_poisson_t<T>& F)
{
  return F.alpha < 1e-5;
}

template<typename T>
bool is_empty_gamma(boost::shared_ptr<func_poisson_t<T> >& F_pt)
{
  return is_empty_gamma(*(F_pt.get()));
}

template<typename T>
double get_tot_count(func_poisson_t<T>& F)
{
  return F.beta + 1e-10; // add very small number to prevent numerical error
}

template<typename T>
double get_tot_count(boost::shared_ptr<func_poisson_t<T> > F_pt)
{
  return get_tot_count(*(F_pt.get()));
}

template<typename T>
double get_lambda_map(func_poisson_t<T>& F)
{
  return (F.alpha + F.a0) / (F.beta + F.b0);
}

template<typename T>
double get_lambda_map(boost::shared_ptr<func_poisson_t<T> > F_pt)
{
  return get_lambda_map(*(F_pt.get()));
}

template<typename T>
double get_log_lambda_map(func_poisson_t<T>& F)
{
  return boost::math::digamma(F.alpha + F.a0) - std::log(F.beta + F.b0);
}

template<typename T>
double get_log_lambda_map(boost::shared_ptr<func_poisson_t<T> > F_pt)
{
  return get_log_lambda_mle(*(F_pt.get()));
}

#endif
