/*
 * infofunctional.h
 *
 *  Created on: Mar 6, 2011
 *      Author: tqlong
 */

#ifndef MULTILOGIT_H_
#define MULTILOGIT_H_

#include <vector>
#include <cmath>
#include <cassert>
#include <boost/math/special_functions/log1p.hpp>

//#include "graddescent.h"
#include "lbfgs.h"

/** Train a Multi-Logit classifier using maximum log-likelihood */
template<typename VEC, typename DATASET>
class MultiLogit
{
public:
  typedef VEC               vec_type;
  typedef DATASET           dataset_type;
  typedef std::vector<std::vector<double> > vec_dvec_type;
  typedef std::vector<double> dvec_type;
public:
  /** Variable for optimization, see infofunctional.h */
  class Weight : public std::vector<VEC> {
    typedef std::vector<VEC> __Base;
  public:
    std::vector<double> bias_;

    void print() const
    {
      for (unsigned int k = 0; k < this->size(); k++) {
        std::cout << "class " << k << " w = \n"
            << this->at(k) << "b = " << bias_[k] << "\n";
      }
    }
    Weight(int K, int D) : __Base(K), bias_(K, 0.0)
    {
      for (int i = 0; i < K; i++) {
        this->at(i) = VEC(D);
        this->at(i).zeros();
      }
    }

    Weight& ones()
    {
      for (unsigned int i = 0; i < this->size(); i++) {
        this->at(i).ones();
        this->bias_[i] = 1.0;
      }
      return *this;
    }

    Weight& operator*=(double s)
    {
      for (unsigned int k = 0; k < this->size(); k++) {
        this->at(k) *= s;
        this->bias_[k] *= s;
      }
      return *this;
    }

    Weight operator - (const Weight& x) const
    {
      Weight ret(*this);
      for (unsigned int k = 0; k < this->size(); k++) {
        ret.at(k) -= x.at(k);
        ret.bias_[k] -= x.bias_[k];
      }
      return ret;
    }

    Weight& add(double s, const Weight& x)
    {
      for (unsigned int k = 0; k < this->size(); k++) {
        this->at(k) += (s*x.at(k));
        this->bias_[k] += s*x.bias_[k];
      }
      return *this;
    }

    double dot(const Weight& x) const
    {
      double s = 0;
      for (unsigned int k = 0; k < this->size(); k++) {
        s += arma::dot(this->at(k), x.at(k));
        s += this->bias_[k] * x.bias_[k];
      }
      return s;
    }

    void mulElem(const Weight& x)
    {
      for (unsigned int k = 0; k < this->size(); k++) {
        this->at(k).mulElem(x.at(k));
        this->bias_[k] *= x.bias_[k];
      }
    }
  };

  typedef Weight            variable_type;
protected:
  const dataset_type& data_;
  int n_class_;
  int n_sample_;
  int n_dim_;
  Weight w_;
  paramset_type params_;
  double lambda_;
public:
  MultiLogit(const dataset_type& data, int K, double lambda)
    : data_(data), n_class_(K), n_sample_(data.n()), n_dim_(data.dim()),
      w_(n_class_, n_dim_)
  {
    for (int k = 0; k < K; k++)
      w_[k].randn();
  }

  void setParams(const paramset_type& p)
  {
    params_ = p;
    if (params_.contains("lambda")) lambda_ = params_.get("lambda");
    std::cerr << "logit lambda = " << lambda_ << "\n";
  }

  variable_type initVariable()
  {
    return variable_type(n_class_, data_.dim()); // zeros
  }

  const variable_type& currentVariable() const
  {
    return w_;
  }

  void setCurrentVariable(const variable_type& w)
  {
    w_ = w;
  }

  /** FValue = Log likehood - regularization term */
  double evaluateFunction(const variable_type& w)
  {
    vec_dvec_type z(n_class_);
    dvec_type s(n_sample_);

    computeLogProbabilities(w, z, s);

    double log_likelihood = 0;
    for (int i = 0; i < n_sample_; i++) {
      log_likelihood += z[(int)data_.y(i)][i];
      log_likelihood -= s[i];
    }

    double regularization_term = 0.0;
    for (int k = 0; k < n_class_; k++)
      regularization_term += lambda_ * arma::dot(w[k], w[k]);

    double f = log_likelihood - regularization_term;
    return f;
  }

  /** grad_wk = sum_i [I(y_i == k) x_i - \sum_k p_ki x_i ] - 2lambda * wk */
  void evaluateGradient(const variable_type& w, variable_type& grad)
  {
    vec_dvec_type z(n_class_);
    dvec_type s(n_sample_);

    computeLogProbabilities(w, z, s);
    computeProbabilities(z, s);

    for (int k = 0; k < n_class_; k++) {
      for (int i = 0; i < n_sample_; i++) {
        if ((int)data_.y(i) == k) {
          grad[k] += data_[i];
          grad.bias_[k] += 1.0;
        }
        grad[k] += (-z[k][i])*data_[i];
        grad.bias_[k] -= z[k][i];
      }
      grad[k] += (-2.0*lambda_)*w[k];
    }
  }

  /** Compute both function value and gradient
   *  Reduce time on redundancy computation
   */
  double evaluateFuncGrad(const variable_type& w, variable_type& grad, variable_type* diagHessian = NULL)
  {
    vec_dvec_type z(n_class_);
    dvec_type s(n_sample_);

    computeLogProbabilities(w, z, s);

    double log_likelihood = 0;
    for (int i = 0; i < n_sample_; i++) {
      log_likelihood += z[(int)data_.y(i)][i];
      log_likelihood -= s[i];
    }

    double regularization_term = 0.0;
    for (int k = 0; k < n_class_; k++)
      regularization_term += lambda_ * arma::dot(w[k], w[k]);

    double f = log_likelihood - regularization_term;

    computeProbabilities(z, s);

    for (int k = 0; k < n_class_; k++) {
      for (int i = 0; i < n_sample_; i++) {
        if ((int)data_.y(i) == k) {
          grad[k] += data_[i];
          grad.bias_[k] += 1.0;
        }
        grad[k] += (-z[k][i])*data_[i];
        grad.bias_[k] -= z[k][i];
      }
      grad[k] += (-2.0*lambda_)*w[k];
    }

    /** Compute 1 / diag(Hessian) */
    if (diagHessian) {
//      diagHessian->ones();   // use this if Hessian diagonal is impossible to compute
                               // otherwise write something like below
      for (int k = 0; k < n_class_; k++) {
        diagHessian->at(k).ones();
        diagHessian->at(k) *= (-1e-5)-2.0*lambda_;
        diagHessian->bias_[k] = (-1e-5);
        for (int i = 0; i < n_sample_; i++) {
          diagHessian->at(k) += (-z[k][i]*(1-z[k][i]))*(data_[i]%data_[i]);
          diagHessian->bias_[k] += (-z[k][i]*(1-z[k][i]));
        }
        diagHessian->bias_[k] = 1.0 / diagHessian->bias_[k];
        for (int i = 0; i < n_dim_; i++)
          diagHessian->at(k).ref(i) = 1.0 / diagHessian->at(k).get(i);
      }
    }

    return f;
  }

  /** compute log(a+b) given log(a) and log(b) */
  double logsum(double loga, double logb)
  {
    return logb+log1p(::exp(loga-logb));
  }

  void computeLogProbabilities(const variable_type& w,
      vec_dvec_type& z, dvec_type& s)
  {
    for (int k = 0; k < n_class_; k++) {
      z[k] = dvec_type(n_sample_);
    }

    for (int i = 0; i < n_sample_; i++) {
      z[0][i] = arma::dot(w[0], data_[i])+w.bias_[0];
      s[i] = z[0][i];
      for (int k = 1; k < n_class_; k++) {
        z[k][i] = arma::dot(w[k], data_[i])+w.bias_[k];
        s[i] = logsum(s[i], z[k][i]);
      }
    }
  }

  void computeProbabilities(vec_dvec_type& z, const dvec_type& s)
  {
    for (int i = 0; i < n_sample_; i++) {
      for (int k = 0; k < n_class_; k++)
        z[k][i] = ::exp(z[k][i]-s[i]);
//      std::cerr << "sum = " << sum << "\n";
//      assert(fabs(sum-1) < 1e-10);
    }
  }

  double computeEntropy(const variable_type& w, const vec_type& x)
  {
    dvec_type ps(n_class_);
    double entropy = 0.0;
    ps[0] = arma::dot(w[0], x)+w.bias_[0];
    double s = ps[0];
    for (int k = 1; k < n_class_; k++) {
      ps[k] = arma::dot(w[k], x)+w.bias_[k];
      s = logsum(s, ps[k]); // s = log(sum_k ps_ki)
    }
    for (int k = 0; k < n_class_; k++) {
      ps[k] = ::exp(ps[k]-s);
      entropy += -ps[k]*::log(ps[k]);
    }
    return entropy;
  }

  int computeClass(const variable_type& w, const vec_type& x)
  {
    dvec_type ps(n_class_);
    ps[0] = arma::dot(w[0], x)+w.bias_[0];
    int maxk = 0;
    for (int k = 1; k < n_class_; k++) {
      ps[k] = arma::dot(w[k], x)+w.bias_[k];
      if (ps[k] > ps[maxk]) maxk = k;
    }
    return maxk;
  }

  /** Optimize weight vectors and biases */
  void optimize()
  {
//    typedef GradientDescent<MultiLogit> Optimization;
//    typedef GradientDescentLineSearch<MultiLogit> Optimization;
    typedef LBFGS<MultiLogit> Optimization;
    Optimization optim(*this);
    paramset_type p;
    p["iter"] = params_.contains("iter") ? params_.get("iter") : 10;
    p["mem"] = params_.contains("mem") ? params_.get("mem") : 4;
    optim.setParams(p);
    optim.optimize(Optimization::MAXIMIZE);
  }

  /** Print result */
  void print()
  {
    w_.print();
  }
};


#endif /* MULTILOGIT_H_ */
