// truncated dirichlet process mixture
// (c) Yongjin Park, 2013
#ifndef DPM_HH_
#define DPM_HH_

#include "util.hh"
#include "random.hh"
#include "latent.hh"
#include "sampler.hh"

#include <iostream>
#include <algorithm>
#include <boost/ptr_container/ptr_map.hpp>
#include <boost/ptr_container/ptr_vector.hpp>
#include <boost/assign/ptr_map_inserter.hpp>
#include <vector>
#include <boost/unordered_map.hpp>

void random_seeding(latent_matrix_t& Z, size_t K, size_t pernum = 5)
{
  std::vector<int> idx;
  const size_t n = Z.size();
  for (size_t i = 0; i < n; ++i)
    idx.push_back(i);

  std::random_shuffle(idx.begin(), idx.end());

  for (size_t i = 0; i < std::min(pernum * K, n); ++i)
  {
    latent_matrix_t::vec_t& z_i = Z(idx[i]);
    z_i.clear();
    z_i[i % K] = 1.;
  }
}

// Dirichlet Process Mixture
// prior distribution (truncated version)
struct tdpm_t
{
  typedef std::vector<double> vec_t;

  explicit tdpm_t(int T, double a0) :
      T(T), a0(a0), u_vec(T, 0.), v_vec(T, 0.), stat(T, 0.), prior(T, 0.)
  {
    assert_msg(T > 0, "at least one component");
  }

  // locally collapsed variational inference
  const vec_t&
  operator()()
  {
    // todo: can include t > T
    // prob[t] = u_t / (u_t + v_t) * prod_s=1^{t-1} v_s / (u_s + v_s)
    double cum = 0.;
    for (size_t t = 0; t < T; ++t)
    {
      double log_denom = std::log(u_vec[t] + v_vec[t]);
      prior[t] = std::log(u_vec[t]) - log_denom + cum;
      cum += std::log(v_vec[t]) - log_denom;
    }
    return prior;
  }

  // update variational parameters
  double update(const latent_matrix_t& Z, double rate = 1., double size_factor =
      1.)
  {
    assert_msg(rate <= 1. && rate >= 0., "rate in [0, 1]");

    std::fill(stat.begin(), stat.end(), 0.);
    for (size_t i = 0; i < Z.size(); ++i)
    {
      const latent_matrix_t::vec_t& z_i = Z(i);
      for (latent_matrix_t::vec_t::const_iterator it = z_i.cbegin();
          it != z_i.cend(); ++it)
      {
        size_t k = it->first;
        double z_ik = it->second;
        if (k >= T)
          continue;
        stat[k] += z_ik;
      }
    }

    double delt = 0.;

    // u[t] = 1 + sum_i z_it
    // v[t] = a + sum_l=t+1 sum_i z_il
    double Ntot = std::accumulate(stat.begin(), stat.end(), 0.);
    double cumsum = 0.;
    for (size_t t = 0; t < T; ++t)
    {
      cumsum += stat[t];
      double u_old = u_vec[t];
      double v_old = v_vec[t];
      u_vec[t] = (1. - rate) * u_old + rate * (1. + stat[t]) * size_factor;
      v_vec[t] = (1. - rate) * v_old
          + rate * (a0 + Ntot - cumsum) * size_factor;

      delt += std::sqrt((u_old - u_vec[t]) * (u_old - u_vec[t]));
      delt += std::sqrt((v_old - v_vec[t]) * (v_old - v_vec[t]));
    }
    return delt;
  }

  int T;       // current truncation level
  double a0;   // hyper-parameter

private:
  vec_t u_vec; // variational parameters (w/o 1)
  vec_t v_vec; // variational parameters (w/o a0)
  vec_t stat;  // number of observations (temporary)
  vec_t prior; // resulting prior probability
};

// =============== initialization ===============
template<typename T, typename F>
void initialize(const std::vector<boost::shared_ptr<T> >& data_vec,
    boost::ptr_vector<expr_ptr_set_t<T> >& pair_sets,
    boost::ptr_vector<F>& components, latent_matrix_t& Z, const size_t n,
    const size_t K, const size_t pernum = 5)
{

  std::vector<int> idx;

  for (size_t i = 0; i < n; ++i)
    idx.push_back(i);

  std::random_shuffle(idx.begin(), idx.end());

  // random seeding
  size_t num_rand = std::min(pernum * K, n);
  for (size_t i = 0; i < num_rand; ++i)
  {
    latent_matrix_t::vec_t& z_i = Z(idx[i]);
    z_i.clear();
    int k = i % K;
    z_i[k] = 1.;

    pair_sets[k].insert(data_vec[idx[i]], 1.);
  }

  for (size_t k = 0; k < K; ++k)
    components[k].update(pair_sets[k]);

  // greedy seeding
  for (size_t i = num_rand; i < n; ++i)
  {
    const T& expr = *data_vec.at(idx[i]).get();
    size_t argmax = 0;
    double maxval = components[argmax].score(expr);
    for (size_t k = 1; k < K; ++k)
    {
      double curr = components[k].score(expr);
      if (curr > maxval)
      {
        maxval = curr;
        argmax = k;
      }
    }
    pair_sets[argmax].insert(data_vec.at(idx[i]));
  }
}

////////////////////////////////////////////////////////////////
template<typename T>
T& get_obj(boost::ptr_vector<T>& obj_vec, size_t k)
{
  return obj_vec[k];
}

template<typename T>
T& get_obj( std::vector<boost::shared_ptr<T> >& obj_vec, size_t k)  
{
  return *(obj_vec[k].get());
}

template<typename Container>
double empirical_bayes( Container& obj_vec, double rate = 1. )
{
  double diff = 0.;
  for(size_t j=0; j<obj_vec.size(); ++j)
    diff += empirical_bayes( get_obj( obj_vec, j ), rate );

  diff /= (double) obj_vec.size();

  return diff;
}

////////////////////////////////////////////////////////////////
// training
template<typename T, typename F>
double fit_dpm(const std::vector<boost::shared_ptr<T> >& data_vec,
    boost::ptr_vector<F>& components, const int max_iter, const int sample,
    const double tol)
{
  const int n = data_vec.size();
  const int K = components.size();

  double a0 = 1.;                // hyper-parameter for DPM

  sampler_t sampler;             // sampler with specified probabilities
  latent_matrix_t Z(n);          // latent assignment matrix
  tdpm_t dpm(K, a0);             // dirichlet process mixture

  boost::ptr_vector<expr_ptr_set_t<T> > train_sets;
  for (size_t k = 0; k < K; ++k)
    train_sets.push_back(new expr_ptr_set_t<T>);

  const size_t pernum = 5;
  initialize<T, F>(data_vec, train_sets, components, Z, n, K, pernum);

  double rate = 1.;
  double eb_rate = 1.;
  const double rate_discount = 0.55;
  const double t0 = 1.;

  for (size_t k = 0; k < K; ++k)                //
    components[k].update(train_sets[k], rate);   // initial update of components
  dpm.update(Z, rate);                          // & DPM probability

  typedef tdpm_t::vec_t vec_t;
  vec_t log_prob(K, 0.);
  vec_t count(K);

  for (size_t eb_iter = 1; eb_iter <= max_iter; ++eb_iter)
  {

    eb_rate = std::pow((double) eb_iter + t0, -rate_discount);

    for (size_t opt_iter = 1; opt_iter <= max_iter; ++opt_iter)
    {

      rate = std::pow(((double) opt_iter) + t0, -rate_discount);

      const vec_t& prior = dpm();

      // re-estimate z_i by sampling
      for (size_t i = 0; i < n; ++i)
      {
        latent_matrix_t::vec_t& z_i = Z(i);
        for (latent_matrix_t::vec_t::iterator kt = z_i.begin(); kt != z_i.end();
            ++kt)
          train_sets[kt->first].remove(data_vec[i]);
        z_i.clear();

        const T& data_pt = *data_vec.at(i).get();
        std::fill(count.begin(), count.end(), 0.);

        for (size_t k = 0; k < K; ++k)
          log_prob[k] = prior.at(k) + components[k].score(data_pt);

        double S = (double) sample;
        for (size_t s = 1; s <= S; ++s)
        {
          int k = sampler.log_sample(log_prob);
          count[k]++;
        }

        for (size_t k = 0; k < K; ++k)
        {
          if (count[k] > 0)
          {
            z_i[k] = count[k] / S;
            train_sets[k].insert(data_vec[i], z_i[k]);
          }
        }
      }

      double delt = 0.;
      for (size_t k = 0; k < K; ++k)
        delt += components[k].update(train_sets[k], rate);

      delt += dpm.update(Z, rate);
      delt /= (double) K;

      TLOG(std::setw(15) << rate << std::setw(15) << delt);

      if (delt < tol)
        break;
    }

    double eb_delt = empirical_bayes(components, eb_rate);

    if (eb_delt < tol)
      break;

    TLOG("empirical Bayes " << eb_delt << " > " << tol)
  }

  // compute log-likelihood by re-sampling
  double llik = 0.;
  const vec_t& prior = dpm();


  for (size_t k = 0; k < train_sets.size(); ++k)
    train_sets[k].clear();

  // re-estimate z_i by sampling
  for (size_t i = 0; i < n; ++i)
  {
    latent_matrix_t::vec_t& z_i = Z(i);
    z_i.clear();

    const T& data_pt = *data_vec.at(i).get();
    std::fill(count.begin(), count.end(), 0.);

    double S = (double) sample;
    for (size_t k = 0; k < K; ++k)
      log_prob[k] = prior.at(k) + components[k].score(data_pt);

    for (size_t s = 1; s <= S; ++s)
    {
      int k = sampler.log_sample(log_prob);
      count[k]++;
    }

    for (size_t k = 0; k < K; ++k)
    {
      if (count[k] > 0)
      {
        z_i[k] = count[k] / S;
        llik += z_i[k] * log_prob[k];
	train_sets[k].insert(data_vec[i], z_i[k]);
      }
    }
  }

  // re-estimate parameters
  for (size_t k = 0; k < K; ++k)
    components[k].update(train_sets[k]);

  return llik;
}

////////////////////////////////////////////////////////////////
// online training
template<typename T, typename F>
double fit_dpm_online(const std::vector<boost::shared_ptr<T> >& data_vec,
    boost::ptr_vector<F>& components, const int max_iter, const int sample,
    const double tol, const int minibatch)
{
  size_t n = data_vec.size();
  const int K = components.size();

  double a0 = 1.;                // hyper-parameter for DPM

  sampler_t sampler;             // sampler with specified probabilities
  latent_matrix_t Z(n);          // latent assignment matrix
  tdpm_t dpm(K, a0);             // dirichlet process mixture

  boost::ptr_vector<expr_ptr_set_t<T> > train_sets;
  for (size_t k = 0; k < K; ++k)
    train_sets.push_back(new expr_ptr_set_t<T>);

  const size_t pernum = 5;
  initialize<T, F>(data_vec, train_sets, components, Z, n, K, pernum);

  double rate = 1.;
  double eb_rate = 1.;
  const double rate_discount = 0.55;
  const double t0 = 1.;

  for (size_t k = 0; k < K; ++k)                //
    components[k].update(train_sets[k], rate);  // initial update of components
  dpm.update(Z, rate);                          // & DPM probability

  typedef tdpm_t::vec_t vec_t;
  vec_t log_prob(K, 0.);
  vec_t count(K);

  // required for stochastic online update
  std::vector<int> data_order;
  for (size_t j = 0; j < n; ++j)
    data_order.push_back(j);

  for (size_t eb_iter = 1; eb_iter <= max_iter; ++eb_iter)
  {

    eb_rate = std::pow((double) eb_iter + t0, -rate_discount);

    size_t m = minibatch;

    for (size_t s = 0; s < n; s += minibatch)
    {
      double t = 1. + ((double) s);
      rate = std::pow(t + t0, -rate_discount);
      const vec_t& prior = dpm();

      // re-estimate some of observations
      // online learning
      for (size_t k = 0; k < train_sets.size(); ++k)
        train_sets[k].clear();

      double S = 0.;
      double D = n;

      // estimate each point by sampling
      // construct train_set
      for (size_t j = s; j < std::min(s + m, n); ++j)
      {
        S++;
        size_t i = data_order[j];
        latent_matrix_t::vec_t& z_i = Z(i);
        z_i.clear();

        const T& data_pt = *data_vec.at(i).get();
        std::fill(count.begin(), count.end(), 0.);

        for (size_t k = 0; k < K; ++k)
          log_prob[k] = prior.at(k) + components[k].score(data_pt);

        for (double s = 1; s <= sample; ++s)
        {
          int k = sampler.log_sample(log_prob);
          count[k]++;
        }
        for (size_t k = 0; k < K; ++k)
        {
          if (count[k] > 0)
          {
            z_i[k] = count[k] / sample;
            train_sets[k].insert(data_vec[i], z_i[k]);
          }
        }
      }

      // actually update components
      if (S > 0)
      {
        for (size_t k = 0; k < K; ++k)
          components[k].update(train_sets[k], rate, D / S);
        dpm.update(Z, rate, D / S);
      }
    }

    double eb_delt = empirical_bayes(components, eb_rate);

    if (eb_delt < tol)
      break;

    TLOG("empirical Bayes " << eb_delt << " > " << tol)
  }
  // compute log-likelihood by re-sampling
  double llik = 0.;
  const vec_t& prior = dpm();

  for (size_t k = 0; k < train_sets.size(); ++k)
    train_sets[k].clear();

  // re-estimate z_i by sampling
  for (size_t i = 0; i < n; ++i)
  {
    latent_matrix_t::vec_t& z_i = Z(i);
    z_i.clear();

    const T& data_pt = *data_vec.at(i).get();
    std::fill(count.begin(), count.end(), 0.);

    double S = (double) sample;
    for (size_t k = 0; k < K; ++k)
      log_prob[k] = prior.at(k) + components[k].score(data_pt);

    for (size_t s = 1; s <= S; ++s)
    {
      int k = sampler.log_sample(log_prob);
      count[k]++;
    }

    for (size_t k = 0; k < K; ++k)
    {
      if (count[k] > 0)
      {
        z_i[k] = count[k] / S;
        llik += z_i[k] * log_prob[k];
	train_sets[k].insert(data_vec[i], z_i[k]);
      }
    }
  }

  // re-estimate parameters
  for (size_t k = 0; k < K; ++k)
    components[k].update(train_sets[k]);

  return llik;
}


#endif /* DPM_HH_ */
