// time-series expression divergence
// (c) Yongjin Park, 2013
#ifndef EXPR_DIFF_HH_
#define EXPR_DIFF_HH_

#include <cmath>
#include <vector>
#include <numeric>
#include <boost/unordered_set.hpp>
#include <boost/unordered_map.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/ptr_container/ptr_vector.hpp>
#include <fstream>
#include <boost/lexical_cast.hpp>
#include <iomanip>
#include "util.hh"
#include "expression.hh"
#include "expression_pair.hh"

// expression divergence score
// functor
struct func_expr_div_t
{
  typedef std::vector<double> vec_t;

  explicit func_expr_div_t(const size_t p, const double beta_min,
      const double beta_max) :
      p(p), n(0),                                            // dimensionality
      beta(p, 0), prec(p, 0), var(p, 0), cov_pair(p - 1, 0), // d/s of interest
      beta_sq(p, 0), beta_pair_sq(p - 1, 0),                 // expected squares
      inv_tau_sq(p, 0), inv_kappa_sq(p - 1, 0),              // auxiliary
      penalty_static(0.0), penalty_kinetic(0.0),             // penalty
      D(p), E(p),                                            // temporary stuff
      beta_min(beta_min), beta_max(beta_max),                // range of beta
      eta_stoch(p, 0.), prec_stoch(p, 1.)                   // stochastic update
  {
  }

  explicit func_expr_div_t(const size_t p, const double beta_min,
      const double beta_max, vec_t& beta_in, vec_t& prec_in) :
      p(p), n(0),                                            // dimensionality
      beta(p, 0), prec(p, 0), var(p, 0), cov_pair(p - 1, 0), // d/s of interest
      beta_sq(p, 0), beta_pair_sq(p - 1, 0),                 // expected squares
      inv_tau_sq(p, 0), inv_kappa_sq(p - 1, 0),              // auxiliary
      penalty_static(0.0), penalty_kinetic(0.0),             // penalty
      D(p), E(p),                                            // temporary stuff
      beta_min(beta_min), beta_max(beta_max),                // range of beta
      eta_stoch(p, 0.), prec_stoch(p, 1.)                    // stochastic update
  {
    assert_msg(p == beta_in.size(), "size should match");
    assert_msg(p == prec_in.size(), "size should match");

    for(size_t j=0; j<p; ++j)
    {
      double b = beta_in.at(j);
      double r = prec_in.at(j);
      beta[j] = b;
      eta_stoch[j] = b * r;
      prec[j] = r;
      prec_stoch[j] = r;
    }
    n = 1.;
  }

  // get score for mixture assignment
  // we use locally collapsed variational inference score
  // Wang & Blei (2012)
  double score(const expr_pair_t& expr_pair) const
  {
    if (n == 0)
      return score_prior();

    const vec_t& xv = expr_pair.x;
    const vec_t& yv = expr_pair.y;
    double ret = 0.;
    // f(beta) = log_sigmoid(beta*x) + log_sigmoid(-beta*y)
    // f'(beta) = x*sigmoid(-beta*x) - y*sigmoid(beta*y)
    // f''(beta) = - x^2 * sigvar(beta*x) - y^2 * sigvar(beta*y)
    // 1. contribution from the 1st order term:
    //    f(beta)
    // 2. 2nd term:
    //    0.5 * f'(beta)^2 / (lambda - f''(beta))
    // 3. 3rd term:
    //    0.5 * ( log(lambda) - log(lambda-f''(beta)) )
    for (size_t j = 0; j < p; ++j)
    {
      double x = xv[j];
      double y = yv[j];
      double lmd = std::max(1e-10, prec_stoch[j]);
      double bx = eta_stoch[j] / lmd * x;
      double by = eta_stoch[j] / lmd * y;
      double f = log_sigmoid(bx) + log_sigmoid(-by);
      double f1 = x * sigmoid(-bx) - y * sigmoid(by);
      double f2 = -x * x * sigvar(bx) - y * y * sigvar(by);

      ret += f + 0.5 * f1 * f1 / (lmd - f2);
      ret += 0.5 * std::log(lmd) - 0.5 * std::log(lmd - f2);
    }

    return ret;
  }

  // score without observed data point
  // we treat beta ~ N(0,tau^2)
  // where 1/tau^2 = penalty / |beta| = infinity
  // because |beta| tends to 0
  // then,
  // score = exp( sum f(0) )
  double score_prior() const
  {
    return ((double) p) * 2. * log_sigmoid(0);
  }

  // update distributions
  // by non-conjugate variational update
  // Wang & Blei (2013)
  double update(const expr_pair_set_t& data_set, double rate = 1., double size_factor = 1.)
  {
    n = data_set.size();
    if (n < 1)
      return 0.;

    for (size_t j = 0; j < p; ++j)
    {
      beta[j] = eta_stoch[j] / prec_stoch[j];
      prec[j] = prec_stoch[j];
    }

    const size_t max_iter = 100;
    const double tol = 1e-3;
    double diff;
    for (size_t iter = 1; iter <= max_iter; ++iter)
    {
      optimize_beta(data_set);
      calc_full_cov();
      optmize_beta_sq();
      diff = optimize_tau() + optimize_kappa();
      if (diff < tol)
        break;
    }

    // stochastic update of beta and precision
    double delta = 0.;
#ifdef DEBUG
    assert_msg((rate > 0) & (rate <= 1.), "rate out of bound");
#endif
    for (size_t j = 0; j < p; ++j)
    {
      double p_old = prec_stoch[j];
      double e_old = eta_stoch[j];

      eta_stoch[j] = e_old * (1. - rate) + beta[j] * prec[j] * rate * size_factor;
      prec_stoch[j] = p_old * (1. - rate) + prec[j] * rate * size_factor;

      delta += std::sqrt(std::pow(e_old - eta_stoch[j], 2.));
    }

    delta /= ((double) p);

    return delta;
  }

  const vec_t&
  mean_vec()
  {
    for(size_t j=0; j<p ;++j)
      beta[j] = eta_stoch[j] / prec_stoch[j];
    return beta;
  }

  const vec_t&
  prec_vec()
  {
    return prec_stoch;
  }

  const vec_t&
  var_vec()
  {
    return var;
  }

  const double get_penalty_static()
  {
    return penalty_static;
  }

  const double get_penalty_kinetic()
  {
    return penalty_kinetic;
  }

  void set_penalty_static(double p)
  {
#ifdef DEBUG
    assert_msg( p >= 0, "must be non-negative static penalty");
#endif
    penalty_static = p;
  }

  void set_penalty_kinetic(double p)
  {
#ifdef DEBUG
    assert_msg( p >= 0, "must be non-negative kinetic penalty");
#endif
    penalty_kinetic = p;
  }

  const vec_t&
  beta_sq_vec()
  {
    return beta_sq;
  }

  const vec_t&
  beta_pair_sq_vec()
  {
    return beta_pair_sq;
  }

  const double size()
  {
    return n;
  }

private:

  const size_t p;
  size_t n;

  vec_t beta;             // optimized beta, also mean vector
  vec_t prec;             // diagonal of the precision matrix
  vec_t var;              // diagonal variance
  vec_t cov_pair;         // cov_pair[j] = Cov[j,j+1], j in [0, p-1)
  vec_t beta_sq;          // E[beta^2]
  vec_t beta_pair_sq;     // E[(beta[j]-beta[j+1])^2]
  vec_t inv_tau_sq;       // E[1/tau^2][j]
  vec_t inv_kappa_sq;     // E[1/kappa^2][j] for (j,j+1), j in [0, p-1)

  // stochastic update

  double penalty_static;  // lasso penalty for static term
  double penalty_kinetic; // lasso penalty for kinetic term

  // temporary stuff ~ O(p) space
  vec_t D;
  vec_t E;

  const double beta_min;
  const double beta_max;

  vec_t eta_stoch;       // for stochastic update
  vec_t prec_stoch;       // for stochastic update

  // coefficients and precision
  void optimize_beta(const expr_pair_set_t& data_set)
  {
    const int max_iter = 1000;
    const double tol = 1e-3;

    double x, y, z;

    for (int iter = 1; iter <= max_iter; ++iter)
    {
      double delta = 0.;

      for (size_t j = 0; j < p; ++j)
      {
        double b = beta.at(j);
        double b_old = b;

        // construct local quadratic problem &
        // then set to a stationary point
        double num = 0.;
        double denom = 0.;
        double w_sum = 0.;
        for (eps_iterator it = begin(data_set, j); it != end(data_set, j); ++it)
        {
          it.get_xy(x, y);
          it.get_prob(z);

          double w = x * x * sigvar(b * x) + y * y * sigvar(b * y);
          double v = x * sigmoid(-b * x) - y * sigmoid(b * y);
          w = std::max(1e-10, w); // to prevent zero denominator

          v = b + v / w;

          w_sum += z * w;
          num += z * w * v;
          denom += z * w;
        }

        denom += inv_tau_sq.at(j);
        // j-th pt has future
        if ((j + 1) < p)
        {
          num += inv_kappa_sq.at(j) * beta.at(j + 1);
          denom += inv_kappa_sq.at(j);
        }
        // j-th pt has past
        if (j > 0)
        {
          num += inv_kappa_sq.at(j - 1) * beta.at(j - 1);
          denom += inv_kappa_sq.at(j - 1);
        }

        double opt_val = num / denom;
        if (opt_val > beta_max)
          opt_val = beta_max;
        if (opt_val < beta_min)
          opt_val = beta_min;

        beta[j] = opt_val;

        // update precision & avoid zero precision
        prec[j] = std::max(1e-10, w_sum + inv_tau_sq.at(j));

        // dependency with the future
        if ((j + 1) < p)
        {
          prec[j] += inv_kappa_sq.at(j);
        }
        // dependency with the past
        if (j > 0)
        {
          prec[j] += inv_kappa_sq.at(j - 1);
        }

        delta += std::sqrt(
            (b_old - beta[j]) * (b_old - beta[j]) / (b_old * b_old + 1e-10));
      }
      if (delta < tol)
        break;
    }

  }

  // compute covariance
  // just get diagonal and off-diagonal terms
  // of the covariance matrix in O(n) operations
  // we use Rybicky and Hummer (1991)
  void calc_full_cov()
  {
    if (p < 2)
    {
      var[p] = 1 / prec[p];
      cov_pair.resize(0);
      return;
    }

    // precision matrix
    // diagonal = prec vector
    // off-diagonal[j,j+1] = -1/kappa^2[j]

    // implicitly we assume tri-bands
    // A = [0,               inv_kappa_sq[0], ...,                    inv_kappa_sq[p-2]]
    // B = [prec[0],         prec[1],         ..., prec[p-2],         prec[p-1]]
    // C = [inv_kappa_sq[0], inv_kappa_sq[1], ..., inv_kappa_sq[p-2], 0]

    // fill in D stuff
    D[0] = inv_kappa_sq[0] / prec[0];

    // D[j] <- C[j] / (B[j] - A[j]*D[j-1])
    for (size_t j = 1; j < (p - 1); ++j)
      D[j] = inv_kappa_sq[j] / (prec[j] - inv_kappa_sq[j - 1] * D[j - 1]);

    D[p - 1] = 0.; // C[p-1] = 0

    // fill in E stuff
    E[p - 1] = inv_kappa_sq[p - 2] / prec[p - 1];

    // E[j] <- A[j] / (B[j] - C[j]*E[j+1])
    for (size_t j = p - 2; j > 0; --j)
      E[j] = inv_kappa_sq[j - 1] / (prec[j] - inv_kappa_sq[j] * E[j + 1]);

    E[0] = 0; // A[0] = 0

    // estimate diagonal terms

    // V[0] <- 1 / (1 - D[0]*E[1]) * 1 / B[0]
    var[0] = 1. / (1. - D[0] * E[1]) / prec[0];
    // V[p-1] <- 1 / (B[p-1] - A[p-1]*D[p-2])
    var[p - 1] = 1. / (prec[p - 1] - inv_kappa_sq[p - 2] * D[p - 2]);

    // V[j] <- 1/(1-D[j]*E[j+1]) * 1/(B[j]-A[j]*D[j-1])
    for (size_t j = 1; j < (p - 1); ++j)
      var[j] = 1. / (1. - D[j] * E[j + 1])
          / (prec[j] - inv_kappa_sq[j - 1] * D[j - 1]);

    // estimate covariance matrix
    // only (j,j+1) relations
    // V[j,j+1] <- D[j] * V[j+1]
    for (size_t j = 0; j < (p - 1); ++j)
    {
      cov_pair[j] = D[j] * var[j + 1];
    }

#ifdef DEBUG
    // confirm (j+1, j)
    for( size_t j = 0; j < (p-1); ++j )
    {
      double cov_check = E[j+1] * var[j];
      double diff = cov_pair[j] - cov_check;
      if( diff*diff >= 1e-10 )
      {
        std::cerr << "\n\n" << cov_pair[j] << " vs " << cov_check << std::endl;
        dump_vec( cov_pair);
      }
      assert_msg( diff*diff < 1e-10, "symmetric");
    }
#endif
  }

  // E[beta^2] and E[delta_beta^2]
  // assume we have var and cov_pair
  void optmize_beta_sq()
  {
    // E[beta[j]^2] = beta[j]^2 + var[j]
    for (size_t j = 0; j < p; ++j)
    {
      beta_sq[j] = beta[j] * beta[j] + var[j];
#ifdef DEBUG
      assert_msg(beta_sq[j]>=0,"beta must be non-negative");
#endif
    }
    // E[ (beta[j] - beta[j+1])^2 ]
    //  = beta^2[j] + beta^2[j+1] -2 E[beta[j]*beta[j+1]]
    //  = beta^2[j] + beta^2[j+1] -2 E[beta[j]*beta[j+1]]
    for (size_t j = 0; j < (p - 1); ++j)
    {
      double e1 = beta_sq[j];
      double e2 = beta_sq[j + 1];
      beta_pair_sq[j] = e1 + e2 - 2. * (beta[j] * beta[j + 1] + cov_pair[j]);
#ifdef DEBUG
      assert_msg(beta_pair_sq[j]>=0, "beta_pair must be non-negative");
#endif
    }
  }

  // static prior precision (auxiliary variable)
  // assumes beta_sq ready
  // returns difference from the previous
  double optimize_tau()
  {
    double ret = 0.;
    // inv_tau_sq[j] = sqrt( penalty^2 / E[beta[j]^2] )
    for (size_t j = 0; j < p; ++j)
    {
      double old = inv_tau_sq[j];
      inv_tau_sq[j] = penalty_static / std::sqrt(beta_sq[j]);
      ret += (old - inv_tau_sq[j]) * (old - inv_tau_sq[j]);
    }
    return ret / ((double) p);
  }

  // kinetic prior precision (auxiliary variable)
  // assumes we have beta_pair_sq
  double optimize_kappa()
  {
    double ret = 0.;
    // E[ (beta[j] - beta[j+1])^2 ]
    // = E[ beta[j]^2 ] + E[ beta[j+1]^2 ] - 2*E[ beta[j]*beta[j+1] ]
    // = E[ beta[j]^2 ] + E[ beta[j+1]^2 ] - 2*(beta[j]*beta[j+1] + Cov[j,j+1])
    for (size_t j = 0; j < (p - 1); ++j)
    {
      double old = inv_kappa_sq[j];
      inv_kappa_sq[j] = penalty_kinetic / std::sqrt(beta_pair_sq[j]);
      ret += (old - inv_kappa_sq[j]) * (old - inv_kappa_sq[j]);
    }
    return ret / std::max(1., (double) p - 1.);
  }

  double sigmoid(const double z) const
  {
    return 1. / std::min(1e10, (1. + std::exp(-z))); // [1e-10, 1]
  }

  double log_sigmoid(const double z) const
  {
    return -std::log(1. + std::exp(-z));
  }

  double sigvar(const double z) const
  {
    return sigmoid(z) * sigmoid(-z); // [1e-10, 1/4]
  }

};

double sq_sum(double acc, double x)
{
  return acc + std::sqrt(x);
}

double sq_dist(double x, double y)
{
  return (x - y) * (x - y);
}

double abs_dist(double x, double y)
{
  return std::sqrt((x - y) * (x - y));
}

// hyper-parameter tuning of penalty_static
// and penalty_kinetic by empirical Bayes
double empirical_bayes(func_expr_div_t& obj, double rate= 1.)
{
  if (obj.size() < 1)
    return 0;

  const func_expr_div_t::vec_t & beta_sq = obj.beta_sq_vec();
  const func_expr_div_t::vec_t & beta_pair_sq = obj.beta_pair_sq_vec();
  typedef func_expr_div_t::vec_t::iterator iterator;

  double beta_abs_sum = std::accumulate(beta_sq.begin(), beta_sq.end(), 0.,
      sq_sum);

  double beta_pair_abs_sum = std::accumulate(beta_pair_sq.begin(),
      beta_pair_sq.end(), 0., sq_sum);

  double p_static = obj.get_penalty_static(), p_kinetic =
      obj.get_penalty_kinetic();
  if (p_static < 1e-10)
  {
    p_static = ((double) beta_sq.size()) / beta_abs_sum;
  }
  else
  {
    double p = beta_sq.size();
    for (int iter = 1; iter <= 1000; ++iter)
    {
      double p_old = p_static;
      double stuff = 0.5 / (p_static * p_static) + 0.5 * beta_abs_sum / p;
      p_static = 1 / std::sqrt(stuff);
      if (abs_dist(p_static, p_old) / std::max(1e-10, p_old) < 1e-3)
        break;
    }
  }

  if (p_kinetic < 1e-10)
  {
    p_kinetic = ((double) beta_pair_sq.size()) / beta_pair_abs_sum;
  }
  else
  {
    double p = beta_pair_sq.size();
    for (int iter = 1; iter <= 1000; ++iter)
    {
      double p_old = p_kinetic;
      double stuff = 0.5 / (p_kinetic * p_kinetic)
          + 0.5 * beta_pair_abs_sum / p;
      p_kinetic = 1 / std::sqrt(stuff);
      if (abs_dist(p_kinetic, p_old) / std::max(1e-10, p_old) < 1e-3)
        break;
    }
  }

  p_static = (1.-rate)* obj.get_penalty_static() + rate * p_static;
  p_kinetic = (1.-rate)* obj.get_penalty_kinetic() + rate * p_kinetic;

  double diff = abs_dist(p_static, obj.get_penalty_static())
      + abs_dist(p_kinetic, obj.get_penalty_kinetic());

  obj.set_penalty_static(p_static);
  obj.set_penalty_kinetic(p_kinetic);

  return diff;
}

////////////////////////////////////////////////////////////////
// output mean and sd
template<typename Container>
void write_file(Container& obj_vec, const char* mean_file,
    const char* sd_file)
{
  typedef func_expr_div_t::vec_t vec_t;

  std::ofstream mean_out(mean_file, std::ios::out);
  std::ofstream sd_out(sd_file, std::ios::out);

  for (size_t k = 0; k < obj_vec.size(); ++k)
  {
    func_expr_div_t& obj = get_obj(obj_vec, k);
    mean_out << k;
    sd_out << k;

    const vec_t& M = obj.mean_vec();
    const vec_t& V = obj.var_vec();

    for (size_t j = 0; j < M.size(); ++j)
      mean_out << "\t" << M.at(j);

    for (size_t j = 0; j < V.size(); ++j)
      sd_out << "\t" << std::sqrt(V.at(j));

    mean_out << std::endl;
    sd_out << std::endl;
  }

  mean_out.close();
  sd_out.close();
}

#endif /* EXPR_DIFF_HH_ */
