// multivariate Gaussian
// (c) Yongjin Park, 2013

#ifndef GAUSSIAN_HH_
#define GAUSSIAN_HH_
#include <vector>
#include <boost/shared_ptr.hpp>
#include <boost/unordered_map.hpp>
#include <boost/math/special_functions.hpp>
#include <fstream>
#include "util.hh"

// multivariate Gaussian with a single
// variance parameter
struct multi_gaussian_t
{
  typedef std::vector<double> vec_type;
  typedef expr_ptr_set_t<expr_vector_t> mat_type;
  typedef expr_ptr_set_iterator_t<expr_vector_t> iterator;

  explicit multi_gaussian_t(const size_t _p) :
      p(_p), d((double) _p), n(0.), s1(p, 0.), s2(0.), //
      scale(1.), a0(1.), b0(1.), //
      mu(p, 0.), r(0.), musq(0.)
  {
  }

  // locally collapsed variational inference
  double score(const expr_vector_t& obs)
  {
    using namespace boost::math;

    const vec_type& x = obs.vec;

    double C = dot(x, x) + scale * dot(mu, mu);
    for (size_t j = 0; j < p; ++j)
    {
      double m = x.at(j) + scale * mu.at(j);
      C -= m * m / (1. + scale);
    }

#ifdef DEBUG
    if( C < 0. )
    {
      std::cerr << "C = " << C << std::endl;
      dump_vec(x);
    }
    assert_msg(C >= 0., "must be non-negative");
#endif

    double ret = 0.5 * d * (std::log(scale) - std::log(1. + scale));
    ret += lgamma(a0 + 0.5 * d) - lgamma(a0);
    ret += a0 * std::log(b0) - (a0 + d * 0.5) * std::log(b0 + 0.5 * C);

    return ret;
  }

  double update(const mat_type data, double rate = 1., double size_factor = 1.)
  {
    double s2_old = s2;

    discount(s1, 1. - rate);
    n *= (1. - rate);
    s2 *= (1. - rate);

    for (iterator it = begin(data); it != end(data); ++it)
    {
      double z = it.get_prob();
      const vec_type& x = (*it).vec;

      increase(s1, x, rate * z * size_factor);
      s2 += rate * z * dot(x, x) * size_factor;
      n += rate * z * size_factor;
    }

    double rss = 0.;

    for (size_t t = 0; t < 1000; ++t)
    {
      double rss_old = rss;
      rss = s2 + n * musq - 2. * dot(mu, s1);
      r = (a0 + 0.5 * d + 0.5 * d * n) / (b0 + rss * 0.5); // precision

      for (size_t j = 0; j < p; ++j)              // mu = s1 / (n+scale)
        mu[j] = s1[j] / (n + scale);              //

      musq = dot(mu, mu) + 1. / (r * (n + scale));

      if (std::sqrt(std::pow(rss - rss_old, 2.)) < 1e-4)
        break;
    }

#ifdef DEBUG
    dump_vec(mu);
#endif

    double diff = std::sqrt(std::pow(s2 - s2_old, 2.));
    return diff;
  }

  double precision() const
  {
    return r;
  }

  double update_ab0(double _a0, double _b0)
  {
    double b_old = b0;
    a0 = _a0; b0 = _b0;
    return std::sqrt(std::pow(b0 - b_old,2.));
  }

  double optimze_scale()
  {
    double s_old = scale;
    scale = std::max(0.001, std::min(d / (musq * r),1.));
    return std::sqrt(std::pow(s_old - scale, 2.));
  }

  void write_hyper(std::ofstream& ofs)
  const
  {
    ofs << a0 << "\t" << b0 << "\t" << scale;
  }

  void write_mean(std::ofstream& ofs)
  const
  {
    ofs << mu.at(0);
    for(size_t j=1; j<p; ++j)
      ofs << "\t" << mu.at(j);
  }

private:

  const size_t p;
  const double d;

  vec_type s1;      // sum_i z_i x_i
  double s2;        // sum_i z_i <x_i,x_i>
  double n;         // sum_i z_i

  double a0;        // hyper for precision
  double b0;        // hyper for precision
  vec_type mu;      // variational mu
  double r;         // variational precision
  double musq;      // E[mu^T mu]

  double scale;     // hyper-parameter

  void increase(vec_type& lhs, const vec_type& rhs, double factor)
  {
    assert_msg(lhs.size() == rhs.size(), "size should match");
    for (size_t j = 0; j < rhs.size(); ++j)
      lhs[j] += rhs.at(j) * factor;
  }

  void discount(vec_type& vec, double factor)
  {
    for (size_t j = 0; j < vec.size(); ++j)
      vec[j] *= factor;
  }

  double dot(const vec_type& lhs, const vec_type& rhs) const
  {
    assert_msg(lhs.size() == rhs.size(), "size should match");
    double ret = 0.;
    for (size_t j = 0; j < rhs.size(); ++j)
      ret += lhs.at(j) * rhs.at(j);
    return ret;
  }
};

#endif /* GAUSSIAN_HH_ */
