// Latent Dirichlet Allocation

#ifndef LDA_HH_
#define LDA_HH_

#include "latent.hh"
#include "sampler.hh"
#include "expression.hh"
#include "util.hh"
#include <vector>
#include <boost/ptr_container/ptr_vector.hpp>
#include <numeric>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <sstream>

// topic proportion
struct topic_proportion_t
{
  typedef std::vector<double> vec_t;

  explicit topic_proportion_t(const size_t T, double a0) :
      T(T), a0(a0), stat(T, 0.), stat_tmp(T, 0.), prior(T, 0.)
  {
    assert_msg(T > 0, "at least one component");
  }

  // locally collapsed variational inference
  const vec_t&
  operator()()
  {
    double tot = a0 * ((double) T)
        + std::accumulate(stat.begin(), stat.end(), 0.);
    double log_denom = std::log(tot);

    for (size_t t = 0; t < T; ++t)
      prior[t] = std::log(stat[t] + a0) - log_denom;

    return prior;
  }

  const vec_t&
  mle()
  {
    return stat;
  }

  // update variational parameters
  double update(const latent_matrix_t& Z, double rate = 1.)
  {
    assert_msg(rate <= 1. && rate >= 0., "rate in [0, 1]");

    std::fill(stat_tmp.begin(), stat_tmp.end(), 0.);
    for (size_t i = 0; i < Z.size(); ++i)
    {
      const latent_matrix_t::vec_t& z_i = Z(i);
      for (latent_matrix_t::vec_t::const_iterator it = z_i.cbegin();
          it != z_i.cend(); ++it)
      {
        size_t k = it->first;
        double z_ik = it->second;
        if (k >= T)
          continue;
        stat_tmp[k] += z_ik;
      }
    }

    double delt = 0.;
    for (size_t t = 0; t < T; ++t)
    {
      double old = stat[t];
      stat[t] = stat[t] * (1. - rate) + stat_tmp[t] * rate;
      delt += (stat[t] - old) * (stat[t] - old);
    }

    return delt / ((double) T);
  }

  const size_t T; // # topics
  double a0;      // hyper-parameter

private:
  vec_t stat;     // number of observations
  vec_t stat_tmp; // number of observations (temporary)
  vec_t prior;    // resulting prior probability
};

// a geneset == a document in LDA
template<typename T>
struct geneset_t
{
  explicit geneset_t(const size_t n, const size_t K, double a0) :
      n(n), K(K), pi(K, a0), Z(n), llik(0.)
  {
  }

  const size_t n;        // number of genes
  const size_t K;        // number of possible topics
  topic_proportion_t pi; // topic proportion
  latent_matrix_t Z;     // topic assignment of genes
  typename std::vector<boost::shared_ptr<T> > data;  // expression pair data
  double llik;

};

// read genesets and assign datapoint
// to appropriate genesets
template<typename T>
bool read_genesets(const char* file,
    typename std::vector<boost::shared_ptr<T> >& data, const double a0,
    const size_t K, typename boost::ptr_vector<geneset_t<T> >& out)
{
  std::ifstream ifs(file, std::ios::in);

  const int ntot = data.size();

  std::string line;

  int x;
  std::vector<int> idxset;
  int sid = -1;

  while (std::getline(ifs, line, '\n'))
  {
    std::stringstream ss(line, std::ios::in);
    idxset.clear();
    while (ss >> x)
    {
      if (x >= ntot || x < 0)
        continue;
      idxset.push_back(x);
    }
    if (idxset.size() == 0)
      continue;

    // create a geneset
    ++sid;
    out.push_back(new geneset_t<T>(idxset.size(), K, a0));
    geneset_t<T>& geneset = out[sid];

    // assign expression pair
    for (std::vector<int>::iterator g = idxset.begin(); g != idxset.end(); ++g)
      geneset.data.push_back(data[*g]);
  }

  ifs.close();

  return true;
}

// update latent assignment by gibbs sampling
template<typename T, typename F>
struct gibbs_latent_update_t
{
  typedef std::vector<double> vec_t;
  typedef typename boost::ptr_vector<F> topic_vec_t;

  explicit gibbs_latent_update_t(topic_vec_t& topics, const size_t num_sample) :
      topics(topics), sampler(), num_sample(num_sample), //
      count(topics.size()), log_prob(topics.size())
  {
  }

  void operator()(geneset_t<T>& geneset)
  {
    latent_matrix_t& Z = geneset.Z;
    const topic_proportion_t::vec_t& prior = geneset.pi();
    const double S = (double) num_sample;

    double sum_score = 0.;
    for (size_t i = 0; i < geneset.n; ++i)
    {
      const T& expr = *geneset.data.at(i).get();
      latent_matrix_t::vec_t& z_i = Z(i);
      z_i.clear();
      std::fill(count.begin(), count.end(), 0.);

      for (size_t k = 0; k < geneset.K; ++k)
      {
        log_prob[k] = prior.at(k) + topics.at(k).score(expr);
      }

      for (size_t s = 0; s < num_sample; ++s)
      {
        int k = sampler.log_sample(log_prob);
        count[k]++;
      }

      for (size_t k = 0; k < geneset.K; ++k)
      {
        z_i[k] = count[k] / S;
        sum_score += z_i[k] * log_prob[k];
      }
    }

    geneset.pi.update(Z);
    geneset.llik = sum_score;
  }

  topic_vec_t& topics;
  sampler_t sampler;
  const size_t num_sample;
  vec_t count;
  vec_t log_prob;
};

template<typename T, typename F>
void random_seeding(typename std::vector<boost::shared_ptr<T> >& data_vec,
    typename boost::ptr_vector<expr_ptr_set_t<T> >& data_sets,
    typename boost::ptr_vector<F>& topics, const size_t K)
{
  const size_t pernum = 5;
  std::vector<int> order;
  const size_t n = data_vec.size();
  for (size_t i = 0; i < n; ++i)
    order.push_back(i);

  std::random_shuffle(order.begin(), order.end());

  size_t num_rand = std::min(pernum * K, n);
  for (size_t i = 0; i < num_rand; ++i)
    data_sets[i % K].insert(data_vec[order[i]], 1.);

  for (size_t k = 0; k < K; ++k)
    topics[k].update(data_sets[k]);
}

// output results
template<typename T>
void vec_out(std::ofstream& ofs, const std::vector<T>& vec)
{
  if (vec.size() == 0)
    return;
  ofs << vec.at(0);
  for (size_t j = 1; j < vec.size(); ++j)
  {
    ofs << "\t" << vec.at(j);
  }
}

struct sqrt_op_t
{
  double operator()(const double x) const
  {
    return std::sqrt(x);
  }
};

struct exp_op_t
{
  double operator()(const double x) const
  {
    return std::exp(x);
  }
};

struct div_op_t
{
  explicit div_op_t(double _d) :
      denom(_d)
  {
  }
  double operator()(const double x) const
  {
    return x / denom;
  }
  double denom;
};

template<typename T, typename F>
void vec_out(std::ofstream& ofs, const std::vector<T>& vec, F f)
{
  if (vec.size() == 0)
    return;
  ofs << f(vec.at(0));
  for (size_t j = 1; j < vec.size(); ++j)
  {
    ofs << "\t" << f(vec.at(j));
  }
}

template<typename T>
void output_genesets(std::string& output,
    boost::ptr_vector<geneset_t<T> >& genesets)
{
  std::ofstream ofs((output + ".genesets").c_str(), std::ios::out);

  for (size_t d = 0; d < genesets.size(); ++d)
  {
    geneset_t<T>& geneset = genesets.at(d);
    const topic_proportion_t::vec_t& mle = geneset.pi.mle();
    ofs << d << "\t";
    double n = geneset.n;
    vec_out(ofs, mle, div_op_t(n));
    ofs << std::endl;
  }

  ofs.close();
}

template<typename T>
double likelihood(const boost::ptr_vector<geneset_t<T> >& genesets)
{
  double ret = 0.;
  for (size_t d = 0; d < genesets.size(); ++d)
  {
    const geneset_t<T>& geneset = genesets.at(d);
    ret += geneset.llik;
  }
  return ret;
}

// construct topic-specific pair-sets
template<typename T>
void construct_training_sets(boost::ptr_vector<geneset_t<T> >& genesets,
    boost::ptr_vector<expr_ptr_set_t<T> >& train_sets)
{
  for (size_t k = 0; k < train_sets.size(); ++k)
    train_sets[k].clear();

  for (size_t d = 0; d < genesets.size(); ++d)
  {
    const geneset_t<T>& geneset = genesets[d];
    for (size_t i = 0; i < geneset.n; ++i)
    {
      const latent_matrix_t::vec_t& z_i = geneset.Z(i);
      for (size_t k = 0; k < geneset.K; ++k)
        if (z_i.at(k) > 0)
          train_sets[k].insert(geneset.data.at(i), z_i.at(k));
    }
  }
}

template<typename T, typename F>
void output_argmax(std::string& output,
    std::vector<boost::shared_ptr<T> >& pair_data, boost::ptr_vector<F>& topics)
{
  if (topics.size() == 0)
    return;

  std::ofstream ofs((output + ".argmax").c_str(), std::ios::out);
  for (size_t i = 0; i < pair_data.size(); ++i)
  {
    const T& expr = *pair_data.at(i).get();
    size_t argmax = 0;
    double maxval = topics[argmax].score(expr);
    for (size_t k = 1; k < topics.size(); ++k)
    {
      double curr = topics[k].score(expr);
      if (curr > maxval)
      {
        maxval = curr;
        argmax = k;
      }
    }
    ofs << i << "\t" << argmax << std::endl;
  }
  ofs.close();
}

template<typename F>
void output_topics(std::string& output, boost::ptr_vector<F>& topics)
{
}

////////////////////////////////////////////////////////////////
template<typename T>
T& get_obj(boost::ptr_vector<T>& obj_vec, size_t k)
{
  return obj_vec[k];
}

template<typename T>
T& get_obj( std::vector<boost::shared_ptr<T> >& obj_vec, size_t k)  
{
  return *(obj_vec[k].get());
}

template<typename Container>
double empirical_bayes( Container& obj_vec, double rate = 1. )
{
  double diff = 0.;
  for(size_t j=0; j<obj_vec.size(); ++j)
    diff += empirical_bayes( get_obj( obj_vec, j ), rate );

  diff /= (double) obj_vec.size();

  return diff;
}

////////////////////////////////////////////////////////////////
template<typename T, typename F, typename prog_args_t>
void fit_lda(std::vector<boost::shared_ptr<T> >& data_vec,
    boost::ptr_vector<geneset_t<T> >& genesets, const size_t p,
    const prog_args_t& args)
{
  double llik_max = 0.;

  for (size_t r = 1; r <= args.repeat; ++r)
  {
    // initialize K topics
    boost::ptr_vector<F> topics;
    for (size_t k = 0; k < args.K; ++k)
      topics.push_back(new F(p));

    boost::ptr_vector<expr_ptr_set_t<T> > train_sets;
    for (size_t k = 0; k < args.K; ++k)
      train_sets.push_back(new expr_ptr_set_t<T>);

    // initialization by random seeding of genes to K topics
    random_seeding(data_vec, train_sets, topics, args.K);

    // update function by Gibbs sampling
    typedef gibbs_latent_update_t<T, F> gibbs_type;
    gibbs_type gibbs_update(topics, args.sample);

    const double t0 = 1.;
    const double rate_discount = 0.75;

    for (size_t eb_iter = 1; eb_iter <= args.max_iter; ++eb_iter)
    {
      for (double t = 1; t <= args.max_iter; ++t)
      {
        double rate = std::pow(t + t0, -rate_discount);

        std::for_each(genesets.begin(), genesets.end(), gibbs_update);

        construct_training_sets(genesets, train_sets);

        double delta = 0.;
        for (size_t k = 0; k < args.K; ++k)
          delta += topics[k].update(train_sets[k], rate);
        delta /= ((double) args.K);

        TLOG("Rate: " << rate << ", delta: " << delta);

        if (delta < args.tol)
          break;
      }

      double eb_rate = std::pow((double) eb_iter + t0, -rate_discount);
      double eb_delta = empirical_bayes(topics, eb_rate);
      TLOG("EB delta: " << eb_delta);
      std::cerr << std::endl << std::endl;

      if (eb_delta < args.tol)
        break;
    }

    // final latent update
    std::for_each(genesets.begin(), genesets.end(), gibbs_update);
    double llik = likelihood(genesets);

    TLOG("log-likelihood = " << llik);

    if (r == 1 || llik > llik_max)
    {
      llik_max = llik;
      write(data_vec, genesets, topics, args.output);
      std::ofstream ofs((args.output + ".llik").c_str(), std::ios::out);
      ofs << llik << std::endl;
      ofs.close();
    }
  }
}

template<typename T, typename F, typename prog_args_t>
void fit_lda_online(std::vector<boost::shared_ptr<T> >& data_vec,
    boost::ptr_vector<geneset_t<T> >& genesets, const size_t p,
    const prog_args_t& args)
{
  double llik_max = 0.;

  // required for stochastic online update
  std::vector<int> geneset_order;
  for (size_t j = 0; j < genesets.size(); ++j)
    geneset_order.push_back(j);

  for (size_t r = 1; r <= args.repeat; ++r)
  {
    // initialize K topics
    boost::ptr_vector<F> topics;
    for (size_t k = 0; k < args.K; ++k)
      topics.push_back(new F(p));

    boost::ptr_vector<expr_ptr_set_t<T> > train_sets;
    for (size_t k = 0; k < args.K; ++k)
      train_sets.push_back(new expr_ptr_set_t<T>);

    // initialization by random seeding of genes to K topics
    random_seeding_lasso(data_vec, train_sets, topics, args.K);

    // update function by Gibbs sampling
    typedef gibbs_latent_update_t<T, F> gibbs_type;
    gibbs_type gibbs_update(topics, args.sample);

    // stochastic online
    const double t0 = 1.;
    const double rate_discount = 0.75;

    for (size_t eb_iter = 1; eb_iter <= args.max_iter; ++eb_iter)
    {

      std::random_shuffle(geneset_order.begin(), geneset_order.end());

      const size_t n = genesets.size();
      const size_t m = std::min(n, args.minibatch);
      const double D = genesets.size();

      for (size_t s = 0; s < n; s += m)
      {
        double t = 1. + ((double) s);
        double rate = std::pow(t + t0, -rate_discount);

        for (size_t k = 0; k < train_sets.size(); ++k)
          train_sets[k].clear();

        double S = 0.;
        for (size_t j = s; j < std::min(s + m, n); ++j)
        {
          S++;
          geneset_t<T>& geneset = genesets[geneset_order[j]];
          gibbs_update(geneset);
          // construct training set
          for (size_t i = 0; i < geneset.n; ++i)
          {
            const latent_matrix_t::vec_t& z_i = geneset.Z(i);
            for (size_t k = 0; k < geneset.K; ++k)
              if (z_i.at(k) > 0)
                train_sets[k].insert(geneset.data.at(i), z_i.at(k));
          }
        }

        // actually update topics
        for (size_t k = 0; k < args.K; ++k)
          topics[k].update(train_sets[k], rate, D / S);
      }

      double eb_rate = std::pow((double) eb_iter + t0, -rate_discount);
      double eb_delta = empirical_bayes(topics, eb_rate);
      TLOG("EB delta: " << eb_delta);
      std::cerr << std::endl << std::endl;

      if (eb_delta < args.tol)
        break;
    }

    // final latent update
    std::for_each(genesets.begin(), genesets.end(), gibbs_update);
    double llik = likelihood(genesets);

    TLOG("log-likelihood = " << llik);

    if (r == 1 || llik > llik_max)
    {
      llik_max = llik;
      write(data_vec, genesets, topics, args.output);
      std::ofstream ofs((args.output + ".llik").c_str(), std::ios::out);
      ofs << llik << std::endl;
      ofs.close();
    }
  }
}


#endif /* LDA_HH_ */
