#ifndef LASSO_HH_
#define LASSO_HH_

#include <vector>
#include <boost/ptr_container/ptr_vector.hpp>
#include <boost/math/special_functions.hpp>
#include "expression.hh"
#include <fstream>
#include <sstream>
#include <string>
#include <numeric>
#include <algorithm>
#include "util.hh"

template<typename vec_type>
double dot(const vec_type& lhs, const vec_type& rhs)
{
  assert_msg(lhs.size() == rhs.size(), "size should match");
  double ret = 0.;
  for (size_t j = 0; j < rhs.size(); ++j)
    ret += lhs.at(j) * rhs.at(j);
  return ret;
}


// lasso functor for prediction only
struct func_lasso_pred_t
{
  typedef std::vector<double> vec_type;
  typedef boost::ptr_vector<vec_type> mat_type;
  typedef expr_lm_t data_type;

  explicit func_lasso_pred_t(const double _a0, const double _b0,
      const vec_type& beta_org, const mat_type& prec_org) :
      p(beta_org.size()), a0(_a0), b0(_b0), beta_hat(beta_org.size()), prec()
  {
#ifdef DEBUG
    assert_msg(prec_org.size() == p, "must have p rows");
    dump_vec(beta_org);
#endif
    std::copy(beta_org.begin(), beta_org.end(), beta_hat.begin());
    copy_mat(prec_org, prec);
#ifdef DEBUG
    for(size_t j=0; j<p; ++j)
    std::cerr << "\t" << prec.at(j).at(j);
    std::cerr << std::endl;
#endif
  }

  double operator()(const data_type& data) const
  {
    return score(data);
  }

  double score(const data_type& data) const
  {
    using namespace boost::math;

    const data_type::vec_type& x = data.x;
    const data_type::value_type y = data.y;

    double y_hat = dot(beta_hat, x);
    double D = y - y_hat;
    double S = 1.;
    for (size_t j = 0; j < p; ++j)
      S += std::pow(x.at(j), 2.) / prec.at(j).at(j);

    double ret = -0.5 * std::log(S) + lgamma(a0 + 0.5) - lgamma(a0);
    ret += a0 * std::log(b0) - (a0 + 0.5) * std::log(b0 + .5 * D * D / S);

    return ret;
  }

protected:

  void copy_mat(const mat_type& src, mat_type& dst)
  {
    dst.clear();
    for (size_t j = 0; j < src.size(); ++j)
    {
      const vec_type& src_j = src.at(j);
      dst.push_back(new vec_type(src_j.size()));
      vec_type& dst_j = dst.at(j);
      std::copy(src_j.begin(), src_j.end(), dst_j.begin());
    }
  }

  const size_t p;
  mat_type prec;
  vec_type beta_hat;
  const double a0;
  const double b0;

};

// lasso functor for model-fitting
struct func_lasso_t
{
  typedef expr_lm_t data_type;
  typedef data_type::vec_type vec_type;
  typedef boost::ptr_vector<vec_type> mat_type;

protected:

  // lasso
  // y ~ x

  const size_t p;       // number of predictors
  vec_type beta_hat;    // coefficients
  vec_type betasq;      // beta square
  mat_type x2stat;      // 2nd order statistics p x p
  vec_type xystat;      // x*y statistics 1 x p
  vec_type invtausq;    // E[1/tau^2]   1 x p

  double invsigmasq;    // E[1/sigma^2]
  double n;             // E[n]
  double rss;           // residual sums of squares

  double penalty;       // L1 penalty (hyper)
  double a0;            // hyper for p(1/sigma^2|a0,b0)
  double b0;            // hyper

public:
  explicit func_lasso_t(const size_t _p) :
      p(_p), beta_hat(p, 0.), betasq(p, 0.), xystat(p, 0.), invtausq(p, 0.), //
      n(0.), invsigmasq(1.), rss(0.),   // 1. lasso regression
      penalty(0.), a0(1.), b0(1.)       //  hyper-parameters
  {
    for (size_t j = 0; j < p; ++j)
      x2stat.push_back(new vec_type(p, 0.));
  }

  ~func_lasso_t()
  {
  }

  double score(const data_type& data) const
  {
    using namespace boost::math;

    const data_type::value_type y = data.y;
    double y_hat = dot(beta_hat, data.x);
    double D = y - y_hat;

    double S = 1.;
    for (size_t j = 0; j < p; ++j)
      S += std::pow(data.x.at(j), 2.) / (x2stat.at(j).at(j) + invtausq.at(j));

    double ret = -0.5 * std::log(S) + lgamma(a0 + 0.5) - lgamma(a0);
    ret += a0 * std::log(b0) - (a0 + 0.5) * std::log(b0 + .5 * D * D / S);

    return ret;
  }

  double update(const expr_lm_set_t& data_set, double rate = 1., double size_factor = 1.)
  {
    assert_msg(rate >= 0. && rate <= 1., "rate should be [0, 1]");

    // discount xy and x2, multiplying by (1-rate)
    discount(x2stat, 1. - rate);
    discount(xystat, 1. - rate);
    n *= 1. - rate;

    // update xy[j] = sum_i x[i][j] * y[i]
    // update x2[j][k] = sum_i x[i][j] * x[i][k]
    for (expr_lm_set_iter_t it = begin(data_set); it != end(data_set); ++it)
    {
      double z = it.get_prob();
      const data_type& data = *it;
      const data_type::vec_type& x = data.x;
      const data_type::value_type y = data.y;
      n += rate * z * size_factor;

      increase(xystat, x, rate * y * z * size_factor);

      for (size_t j = 0; j < p; ++j)
        increase(x2stat[j], x, x[j] * rate * z * size_factor);

    }

    double rss_old = rss;

    // iterative update of q(beta), q(tau), q(sigma)
    // until convergence of rss
    size_t max_iter = 1000;
    double tol = 1e-3;

    for (size_t iter = 0; iter < max_iter; ++iter)
    {
      update_q_beta();
      double curr_delt = calc_rss(data_set, rate);
      calc_betasq();
      update_tau();
      update_sigma();
#ifdef DEBUG
      std::cerr << "\r" << std::setw(10) << iter << "\tRSS = " << rss << std::endl;
#endif
      if (curr_delt < tol)
        break;
    }
#ifdef DEBUG
    std::cerr << std::endl;
#endif

    update_q_beta();

    return std::sqrt(std::pow(rss_old - rss, 2.)) / std::max(1., rss_old);
  }

  double get_penalty() const
  {
    return penalty;
  }

  void set_penalty(double _pen)
  {
    penalty = _pen;
  }

  double get_a0() const
  {
    return a0;
  }

  double get_b0() const
  {
    return b0;
  }

  double get_a() const
  {
    return 0.5 * n + 0.5 * ((double) p);
  }

  double get_b() const
  {
    return 0.5 * rss + 0.5 * dot(invtausq, betasq);
  }

  void set_ab0(double _a0, double _b0)
  {
    a0 = _a0;
    b0 = _b0;
  }

  double betasq_sum() const
  {
    return sum(betasq);
  }

  double betaabs_sum() const
  {
    sqrt_op f;
    return sum_op(betasq, f);
  }

  double precision() const
  {
    return invsigmasq;
  }

  size_t num_pred() const
  {
    return p;
  }

  double size() const
  {
    return n;
  }

  void write_beta(std::ofstream& ofs) const
  {
    ofs << beta_hat.at(0);
    for (size_t j = 1; j < p; ++j)
      ofs << "\t" << beta_hat.at(j);
  }

  void write_sd(std::ofstream& ofs) const
  {
    ofs << 1. / std::sqrt(x2stat.at(0).at(0) + invtausq.at(0));
    for (size_t j = 1; j < p; ++j)
      ofs << "\t" << 1. / std::sqrt(x2stat.at(j).at(j) + invtausq.at(j));
  }

  void write_hyper(std::ofstream& ofs) const
  {
    ofs << penalty << "\t" << a0 << "\t" << b0;
  }

  void write_prec(std::ofstream& ofs) const
  {
    ofs << x2stat.at(0).at(0) + invtausq.at(0);

    for (size_t k = 1; k < p; k++)
      ofs << "\t" << x2stat.at(0).at(k);

    for (size_t j = 1; j < p; ++j)
      for (size_t k = 0; k < p; ++k)
      {
        double val = x2stat.at(j).at(k);
        if (j == k)
          val += invtausq.at(j);
        ofs << "\t" << val;
      }
  }

protected:

  // define q(beta|beta_hat, cov)
  void update_q_beta()
  {
#ifdef DEBUG
    std::cerr << "n = " << n << std::endl;
#endif

    // find by coordinate descent
    // we just use covariance udpate of Friedman et al. 2010
    for (size_t iter = 0; iter < 500; ++iter)
    {
      double diff = 0.;

      for (size_t j = 0; j < p; ++j)
      {
        double beta_old = beta_hat.at(j);

        double resid = invsigmasq * xystat[j];
        for (size_t k = 0; k < p; ++k)
          if (j != k)
            resid -= invsigmasq * x2stat[j][k] * beta_hat.at(k);

        double denom = invsigmasq * x2stat[j][j] + invtausq[j] + 0.001;

        double beta = resid / denom;
        beta_hat[j] = beta;
        diff += std::pow(beta - beta_old, 2.);
      }

      if (std::sqrt(diff / ((double) p)) < 1e-2)
      {
#ifdef DEBUG
        TLOG("Iter = " << iter << ", DIFF = " << diff);
        dump_vec(beta_hat);
#endif
        break;
      }
    }
  }

  void calc_betasq()
  {
    for (size_t j = 0; j < p; ++j)
    {
      double b = beta_hat.at(j);
      double prec = x2stat[j][j] + invtausq.at(j);
      double var = 1. / prec;
      betasq[j] = b * b + var;
    }
  }

  double calc_rss(const expr_lm_set_t& data_set, double rate)
  {
    double rss_old = rss;
    rss *= 1. - rate;
    for (expr_lm_set_iter_t it = begin(data_set); it != end(data_set); ++it)
    {
      double z = it.get_prob();
      const data_type& data = *it;
      const data_type::vec_type& x = data.x;
      const data_type::value_type y = data.y;
      double y_hat = dot(beta_hat, x);
      rss += rate * z * std::pow(y - y_hat, 2.);
    }

    return std::sqrt(std::pow(rss_old - rss, 2.)) / std::max(1., rss_old);
  }

  void update_sigma()
  {
    double a = a0 + 0.5 * n + 0.5 * ((double) p);
    double b = b0 + 0.5 * rss + 0.5 * dot(invtausq, betasq);
    invsigmasq = a / b;
  }

  void update_tau()
  {
    if (penalty == 0)
    {
      std::fill(invtausq.begin(), invtausq.end(), 0.01);
      return;
    }

    for (size_t j = 0; j < p; ++j)
      invtausq[j] = penalty * std::sqrt(invsigmasq / betasq.at(j));
  }

  void discount(vec_type& vec, double r)
  {
    for (size_t j = 0; j < vec.size(); ++j)
      vec[j] *= r;
  }

  void discount(mat_type& mat, double r)
  {
    for (size_t j = 0; j < mat.size(); ++j)
      discount(mat[j], r);
  }

  void increase(vec_type& lhs, const vec_type& rhs, double factor)
  {
    assert_msg(lhs.size() == rhs.size(), "size should match");
    for (size_t j = 0; j < rhs.size(); ++j)
      lhs[j] += rhs.at(j) * factor;
  }

  double sum(const vec_type& vec) const
  {
    return std::accumulate(vec.begin(), vec.end(), 0.);
  }

  struct sqrt_op
  {
    double operator ()(double x)
    {
      return std::sqrt(x);
    }
  };

  template<typename op>
  double sum_op(const vec_type& vec, op f) const
  {
    double ret = 0.;
    for (size_t j = 0; j < vec.size(); ++j)
      ret += f(vec.at(j));
    return ret;
  }
};

////////////////////////////////////////////////////////////////
// empirical Bayes to estimate Lasso penalty
double empirical_bayes_lasso(func_lasso_t& lasso_obj, double rate = 1.)
{
  double penalty_old = lasso_obj.get_penalty();

  double p = lasso_obj.num_pred();
  double r = lasso_obj.precision();
  double beta_abs_mean = std::sqrt(r) * lasso_obj.betaabs_sum() / p;

  if (beta_abs_mean < 0.0001)
  {
    TLOG("almost all coefficients are zero");
    // some big number
    double lambda = 5000.;
    lasso_obj.set_penalty(lambda);
    return lambda;
  }

  if (penalty_old < 0.001)
    {
      lasso_obj.set_penalty(1. / beta_abs_mean);
      penalty_old = lasso_obj.get_penalty();
    }

  double lambda = lasso_obj.get_penalty();
  for (size_t iter = 0; iter < 1000; ++iter)
  {
    double lambda_old = lambda;

    double stuff = 0.5 / lambda / lambda;
    stuff += 0.5 / lambda * beta_abs_mean;

    lambda = 1. / std::sqrt(stuff);

    if (std::sqrt(std::pow(lambda_old - lambda, 2.)) / (0.001 + lambda_old)
        < 1e-4)
      break;    
  }

  lambda = rate * lambda + (1.-rate) * penalty_old;
  lasso_obj.set_penalty(lambda);

  double diff = std::sqrt(std::pow(lasso_obj.get_penalty() - penalty_old, 2.));
  diff /= std::max(1., penalty_old);
  return diff;
}

double empirical_bayes(func_lasso_t& lasso_obj, double rate = 1.)
{
  return empirical_bayes_lasso( lasso_obj, rate );
}

////////////////////////////////////////////////////////////////
//
// Extension of lasso P(y|x) with P(x) modeled by isotropic gaussian
//
////////////////////////////////////////////////////////////////
struct func_lasso_gauss_pred_t: public func_lasso_pred_t
{

private:

  const double d;
  double c0;        // hyper for precision
  double d0;        // hyper for precision
  vec_type mu;      // variational mu
  double scale;     // hyper-parameter

public:

  func_lasso_gauss_pred_t(const double _a0, // hyper parameter lasso
      const double _b0,                     // hyper parameter lasso
      const vec_type& beta_org,             // lasso variational
      const mat_type& prec_org,             // lasso variational
      const double _c0, const double _d0, const double _s, // hyper gaussian
      const vec_type& mu_org) :
      func_lasso_pred_t(_a0, _b0, beta_org, prec_org), // lasso obj
      d(beta_org.size()), c0(_c0), d0(_d0), scale(_s), mu(mu_org.size())
  {
    std::copy(mu_org.begin(), mu_org.end(), mu.begin());
  }

  double score(const data_type& data) const
  {
    double ret = func_lasso_pred_t::score(data);

    using namespace boost::math;

    const vec_type& x = data.x;

    double C = dot(x, x) + scale * dot(mu, mu);
    for (size_t j = 0; j < p; ++j)
    {
      double m = x.at(j) + scale * mu.at(j);
      C -= m * m / (1. + scale);
    }

    ret += 0.5 * d * (std::log(scale) - std::log(1. + scale));
    ret += lgamma(c0 + 0.5 * d) - lgamma(c0);
    ret += c0 * std::log(d0) - (c0 + d * 0.5) * std::log(d0 + 0.5 * C);

    return ret;
  }
};

struct func_lasso_gauss_t: public func_lasso_t
{
private:
  // independent gaussian vector
  // p(x)
  const double d;

  vec_type s1;      // sum_i z_i x_i
  double s2;        // sum_i z_i <x_i,x_i>

  double c0;        // hyper for precision
  double d0;        // hyper for precision
  vec_type mu;      // variational mu
  double r;         // variational precision
  double musq;      // E[mu^T mu]

  double scale;     // hyper-parameter

public:

  explicit func_lasso_gauss_t(size_t _p) :
      func_lasso_t(_p),           // lasso stuff
      d(p), s1(p, 0.), s2(0.),    // iostropic guassian vector
      mu(p, 0.), r(0.), musq(0.), //  variational parameters
      c0(1.), d0(1.), scale(1.)   //  hyper-parameters
  {
  }

  double score(const data_type& data) const
  {
    double ret = func_lasso_t::score(data);

    using namespace boost::math;

    const vec_type& x = data.x;

    double C = dot(x, x) + scale * dot(mu, mu);
    for (size_t j = 0; j < p; ++j)
    {
      double m = x.at(j) + scale * mu.at(j);
      C -= m * m / (1. + scale);
    }

    ret += 0.5 * d * (std::log(scale) - std::log(1. + scale));
    ret += lgamma(c0 + 0.5 * d) - lgamma(c0);
    ret += c0 * std::log(d0) - (c0 + d * 0.5) * std::log(d0 + 0.5 * C);

    return ret;
  }

  double update(const expr_lm_set_t& data_set, double rate = 1., double size_factor = 1.)
  {
    double ret = func_lasso_t::update(data_set, rate, size_factor);

    // update gaussian stuff
    double s2_old = s2;

    discount(s1, 1. - rate);
    s2 *= (1. - rate);

    for (expr_lm_set_iter_t it = begin(data_set); it != end(data_set); ++it)
    {
      double z = it.get_prob();
      const data_type& data = *it;
      const data_type::vec_type& x = data.x;
      increase(s1, x, rate * z * size_factor);
      s2 += rate * z * dot(x, x) * size_factor;
    }

    double rss = 0.;

    for (size_t t = 0; t < 1000; ++t)
    {
      double rss_old = rss;
      rss = s2 + n * musq - 2. * dot(mu, s1);
      r = (c0 + 0.5 * d + 0.5 * d * n) / (d0 + rss * 0.5); // precision

      for (size_t j = 0; j < p; ++j)              // mu = s1 / (n+scale)
        mu[j] = s1[j] / (n + scale);              //

      musq = dot(mu, mu) + 1. / (r * (n + scale));

      if (std::sqrt(std::pow(rss - rss_old, 2.)) < 1e-4)
        break;
    }
    ret += std::sqrt(std::pow(s2 - s2_old, 2.));

    return ret;
  }

  double gauss_precision() const
  {
    return r;
  }

  double get_c0() const
  {
    return c0;
  }

  double get_d0() const
  {
    return d0;
  }

  double set_cd0(double _c0, double _d0)
  {
    double d_old = d0;
    c0 = _c0;
    d0 = _d0;
    return std::sqrt(std::pow(d0 - d_old, 2.));
  }

  double optimize_scale()
  {
    double s_old = scale;
    scale = std::max(0.001, std::min(d / (musq * r), 1.));
    return std::sqrt(std::pow(s_old - scale, 2.));
  }

  void write_hyper(std::ofstream& ofs) const
  {
    func_lasso_t::write_hyper(ofs);
    ofs << "\t" << c0 << "\t" << d0 << "\t" << scale;
  }

  void write_mu(std::ofstream& ofs) const
  {
    ofs << mu.at(0);
    for (size_t j = 1; j < p; ++j)
      ofs << "\t" << mu.at(j);
  }

};

// empirical bayes of lasso penalty and gaussian precision
double empirical_bayes(func_lasso_gauss_t& obj, double rate = 1.)
{
  double ret = empirical_bayes_lasso( obj, rate );
  return ret;
}


#endif /* LASSO_HH_ */
