/*
 * entropykmean.h
 *
 *  Created on: Mar 14, 2011
 *      Author: tqlong
 */

#ifndef ENTROPYKMEAN_H_
#define ENTROPYKMEAN_H_

#include <vector>
#include <cmath>
#include <cassert>
#include <boost/math/special_functions/log1p.hpp>

#include "common/lbfgs.h"
#include "kmean.h"
//#include "multilogit_log.h"
#include "datatransform.h"
#include "common/lognumber.h"

/** Functional of p(y|x; W)
 *  F(p(y|x;W)) = H{p(y|W)} - E_x H{p(y|x;W)} - lambda tr[W'W]
 */
template<typename VEC, typename DATASET>
class EntropyKMean
{
public:
  typedef VEC                      vec_type;
  typedef DATASET                  dataset_type;
  typedef std::vector<lognum>      dvec_type;
  typedef std::vector<dvec_type>   vec_dvec_type;
public:
  /** Optimizing variable: center vector for each class and
   *  corresponding biases. Implement
   *    operator *= (double), add(s, w), dot(w)
   *    typedef Center variable_type
   *  for use in optimization algorithms (see graddescent.h)
   */
  class Center : public std::vector<VEC> {
    typedef std::vector<VEC> __Base;
  public:
    std::vector<double> bias_;

    void print() const
    {
      for (unsigned int k = 0; k < this->size(); k++) {
        std::cout << "class " << k << " w = \n"
            << this->at(k) << "b = " << bias_[k] << "\n";
      }
    }
    Center(int K, int D) : __Base(K), bias_(K, 0.0)
    {
      for (int i = 0; i < K; i++) {
        this->at(i) = VEC(D);
        this->at(i).zeros();
      }
    }

    Center& ones()
    {
      for (unsigned int i = 0; i < this->size(); i++) {
        this->at(i).ones();
        this->bias_[i] = 1.0;
      }
      return *this;
    }

    Center& operator*=(double s)
    {
      for (unsigned int k = 0; k < this->size(); k++) {
        this->at(k) *= s;
        this->bias_[k] *= s;
      }
      return *this;
    }

    Center operator - (const Center& x) const
    {
      Center ret(*this);
      for (unsigned int k = 0; k < this->size(); k++) {
        ret.at(k) -= x.at(k);
        ret.bias_[k] -= x.bias_[k];
      }
      return ret;
    }

    Center& add(double s, const Center& x)
    {
      for (unsigned int k = 0; k < this->size(); k++) {
        this->at(k) += (s*x.at(k));
        this->bias_[k] += s*x.bias_[k];
      }
      return *this;
    }

    double dot(const Center& x) const
    {
      double s = 0;
      for (unsigned int k = 0; k < this->size(); k++) {
        s += arma::dot(this->at(k), x.at(k));
        s += this->bias_[k] * x.bias_[k];
      }
      return s;
    }

    void mulElem(const Center& x)
    {
      for (unsigned int k = 0; k < this->size(); k++) {
        this->at(k).mulElem(x.at(k));
        this->bias_[k] *= x.bias_[k];
      }
    }
  };
  typedef Center            variable_type;

  /** A KMean derived class, use to initialize first iteration
   *  Implement n(), dim(), operator[](int) similar to a Dataset
   *  in order to use MultiLogit later (see multilogit.h)
   */
  class LabeledData : public KMean<VEC, DATASET> {
  public:
    typedef KMean<VEC, DATASET> __Base;
    typedef typename __Base::vec_type vec_type;
  public:
    LabeledData(const dataset_type& data, int K) : __Base(data, K) {}
    int dim() const { return this->data_.dim(); }
    int n() const { return this->data_.n(); }
    const vec_type& operator[](int i) const { return this->data_[i]; }
    double y(int i) const { return __Base::y_[i]; }
  };

//  typedef MultiLogit<VEC, LabeledData> MultiLogitFromKMean;
protected:
  const dataset_type& data_;                   // the data
  int n_class_;                                // number of classes
  int n_sample_;                               // number of samples
  int n_dim_;                                  // dimensionality
  Center w_;                                   // weight vectors and biases
  paramset_type params_;                       // algorithm's parameters
  double lambda_;                              // regularization parameter
public:
  EntropyKMean(const dataset_type& data, int K)
    : data_(data), n_class_(K), n_sample_(data.n()), n_dim_(data.dim()),
      w_(n_class_, n_dim_)
  {
    for (int k = 0; k < K; k++)
      w_[k].randn();
  }

  void setParams(const paramset_type& p)
  {
    params_ = p;
    if (params_.contains("lambda")) lambda_ = params_.get("lambda");
  }

  /** init a variable for use in optimization algorithm */
  variable_type initVariable()
  {
    return variable_type(n_class_, data_.dim()); // zeros
  }

  const variable_type& currentVariable() const
  {
    return w_;
  }

  void setCurrentVariable(const variable_type& w)
  {
    w_ = w;
  }

  /** function value */
  double evaluateFunction(const variable_type& w)
  {
    vec_dvec_type ps(n_class_);
    dvec_type pl(n_class_);

    computeProbabilities(w, ps, pl);

    lognum label_entropy = 0.0;
    for (int k = 0; k < n_class_; k++)
      label_entropy += -pl[k]*pl[k].log();           // H{p(y|W)}


    lognum sample_entropy = 0.0;
    for (int k = 0; k < n_class_; k++)
      for (int i = 0; i < n_sample_; i++)
        sample_entropy += -ps[k][i]*ps[k][i].log();  // H{p(y|x_i;W)}
    sample_entropy /= n_sample_;

//    lognum regularization_term = 0.0;                 // lambda tr[W'W]
//    for (int k = 0; k < n_class_; k++)
//      regularization_term += lambda_ * arma::dot(w[k], w[k]);

    lognum f = label_entropy - sample_entropy;// - regularization_term;
    return f.toDouble();
  }

  void evaluateGradient(const variable_type& w, variable_type& grad)
  {
    vec_dvec_type ps(n_class_);
    dvec_type pl(n_class_);
//    dvec_type common_term(n_sample_);

    computeProbabilities(w, ps, pl);
//    computeCommon(ps, pl, common_term);

    for (int u = 0; u < n_class_; u++) {
      for (int k = 0; k < n_class_; k++) {
        for (int i = 0; i < n_sample_; i++) {
          lognum c = ((u==k?1.0:0)-ps[u][i]);
          lognum coeff = c * ps[k][i] * (1.0/n_sample_) * (ps[k][i]/pl[k]).log();
          grad[u] += coeff.toDouble() * (data_[i] - w[u]);
          grad.bias_[u] += coeff.toDouble();
        }
//        grad[k] += (-2.0*lambda_)*w[k];
      }
    }
  }

  /** Compute both function value and gradient
   *  Reduce time on redundancy computation
   */
  double evaluateFuncGrad(const variable_type& w, variable_type& grad, variable_type* diagHessian = NULL)
  {
    vec_dvec_type ps(n_class_);
    dvec_type pl(n_class_);
//    dvec_type common_term(n_sample_);

    computeProbabilities(w, ps, pl);

    lognum label_entropy = 0.0;
    for (int k = 0; k < n_class_; k++)
      label_entropy += -pl[k]*pl[k].log();           // H{p(y|W)}


    lognum sample_entropy = 0.0;
    for (int k = 0; k < n_class_; k++)
      for (int i = 0; i < n_sample_; i++)
        sample_entropy += -ps[k][i]*ps[k][i].log();  // H{p(y|x_i;W)}
    sample_entropy /= n_sample_;

//    lognum regularization_term = 0.0;                 // lambda tr[W'W]
//    for (int k = 0; k < n_class_; k++)
//      regularization_term += lambda_ * arma::dot(w[k], w[k]);

    lognum f = label_entropy - sample_entropy;// - regularization_term;

//    computeCommon(ps, pl, common_term);

    for (int u = 0; u < n_class_; u++) {
      for (int k = 0; k < n_class_; k++) {
        for (int i = 0; i < n_sample_; i++) {
          lognum c = ((u==k?1.0:0)-ps[u][i]);
          lognum coeff = c * ps[k][i] * (1.0/n_sample_) * (ps[k][i]/pl[k]).log();
          grad[u] += coeff.toDouble() * (data_[i] - w[u]);
          grad.bias_[u] += coeff.toDouble();
        }
//        grad[k] += (-2.0*lambda_)*w[k];
      }
    }

    if (diagHessian) {
      diagHessian->ones();
      (*diagHessian) *= -1.0;
    }

    return f.toDouble();
  }

  double distance(const vec_type& x, const vec_type& y)
  {
    return arma::norm(x-y,2);
  }
  double sqr(double x)
  {
    return x*x;
  }

  /** z_ki = exp{-0.5d(xi, ck)^2 + bk}  s_i = sum_k z_ki
   *  ps_ki = z_ki / s_i, pl_k = 1/N sum_i ps_ki
   */
  void computeProbabilities(const variable_type& c,
      vec_dvec_type& ps, dvec_type& pl)
  {
    for (int k = 0; k < n_class_; k++) {
      ps[k] = dvec_type(n_sample_);
      pl[k] = 0.0;
    }

    for (int i = 0; i < n_sample_; i++) {                  // work in log scale here
      ps[0][i] = lognum(-0.5*sqr(distance(c[0],data_[i]))+c.bias_[0], 1);
      lognum s = ps[0][i];
      for (int k = 1; k < n_class_; k++) {
        ps[k][i] = lognum(-0.5*sqr(distance(c[k],data_[i]))+c.bias_[k], 1);
        s += ps[k][i];
      }

//      if (::fabs(s) > 1e-16)
        for (int k = 0; k < n_class_; k++) {
          ps[k][i] /= s;
          pl[k] += ps[k][i]/n_sample_;
        }
//      else
//        for (int k = 0; k < n_class_; k++) {
//          ps[k][i] = 1.0/k;
//          pl[k] += ps[k][i]/n_sample_;
//        }
    }
  }

  /** Compute H{p(y|x;W)} given x, W */
  double computeEntropy(const variable_type& c, const vec_type& x)
  {
    dvec_type ps(n_class_);
    lognum entropy = 0.0;
    lognum s = 0;
    for (int k = 0; k < n_class_; k++) {
      ps[k] = lognum(-0.5*sqr(distance(c[k],x))+c.bias_[k], 1);
      s += ps[k]; // s = log(sum_k ps_ki)
    }
    if (s < 1e-16) {
      int maxk = 0;
      for (int k = 1; k < n_class_; k++)
        if (ps[k] > ps[maxk]) maxk = k;
      for (int k = 1; k < n_class_; k++)
        ps[k] = (k == maxk) ? 1.0 : 0.0;
    }
    else
      for (int k = 0; k < n_class_; k++) {
        ps[k] /= s;
        entropy += -ps[k]*ps[k].log();
      }
    return entropy.toDouble();
  }

  /** Compute argmax_y p(y|x, W) */
  int computeClass(const variable_type& c, const vec_type& x)
  {
    dvec_type ps(n_class_);
    ps[0] = lognum(-0.5*sqr(distance(c[0],x))+c.bias_[0], 1);
    int maxk = 0;
    for (int k = 1; k < n_class_; k++) {
      ps[k] = lognum(-0.5*sqr(distance(c[k],x))+c.bias_[k], 1);
      if (ps[k] > ps[maxk]) maxk = k;
    }
    return maxk;
  }

  void computeCommon(const vec_dvec_type& ps, const dvec_type& pl,
      dvec_type& common_term)
  {
    for (int i = 0; i < n_sample_; i++) {
      common_term[i] = 0.0;
      for (int k = 0; k < n_class_; k++)
        common_term[i] += ps[k][i]*(ps[k][i]/pl[k]).log();
    }
  }

  /** Optimize weight vectors and biases */
  void optimize()
  {
    LabeledData kmean(data_, n_class_);                              // initialize using kmean
    kmean.optimize();

//    MultiLogitFromKMean multilogit(kmean, n_class_);        // multilogit --> initial weight
//    paramset_type p;
//    p["lambda"] = lambda_;
//    p["iter"] = 50;
//    p["mem"] = params_.contains("mem") ? params_.get("mem") : 4;
//    multilogit.setParams(p);
//    multilogit.optimize();

    for (int k = 0; k < n_class_; k++) {
      this->w_[k] = kmean.center(k); //multilogit.currentVariable()[k];
      this->w_.bias_[k] = 0; //multilogit.currentVariable().bias_[k];
    }

//    typedef GradientDescent<InfoFunctional> Optimization1;
//    typedef GradientDescentLineSearch<InfoFunctional> Optimization;  // optimization of the infomation functional
    typedef LBFGS<EntropyKMean> Optimization;  // optimization of the infomation functional
    Optimization lbfgs(*this);
    std::cerr << "rim lambda = " << lambda_ << "\n";
    lbfgs.setParams(params_);
    lbfgs.optimize(Optimization::MAXIMIZE);                             // maximize it

//    Optimization1 gd(*this);
//    gd.setParams(params_);
//    gd.optimize(Optimization1::MAXIMIZE);
  }

  /** Print result */
  void print()
  {
    w_.print();
  }
};


#endif /* ENTROPYKMEAN_H_ */
