/*
 * l2regSVM_impl.h
 *
 *  Created on: Feb 28, 2011
 *      Author: tqlong
 */

#ifndef L2REGSVM_IMPL_H_
#define L2REGSVM_IMPL_H_

#include "learning_tpl.h"
#include <algorithm>
#include <limits>

#define DOUBLE_INFINITY std::numeric_limits<double>::infinity()

template <typename T>
void L1LossL2Reg<T>::setParameters(const std::vector<double>& params)
{
  if (params.size() > 0) alpha_ = params[0];
  if (params.size() > 1) C_ = params[1];
  if (params.size() > 2) maxiter_ = (int)params[2];
}

/** Solving w \in Rd
 *      min_w 1/2 dot(w,w) + C \sum xi_i
 *      s.t. xi_i >= 0 && xi_i >= 1-y_i dot(w,x_i)
 *  in dual form a \in Rn
 *      min_a 1/2 dot(a,Qa) - dot(1,a)
 *      s.t. 0 <= a <= C
 *      where Q_ij = y_i y_j dot(x_i,x_j)
 */
template <typename T>
double L1LossL2Reg<T>::update(const dataset_type& data)
{
  a_ = vec_type(data.n());
  w_ = vec_type(data.dim());
  a_.zeros();
  w_.zeros();
  prepareQD(data);

  int iter = 0;
  double error = 1;
  std::cout << "maxiter = " << maxiter_ << "\n";
  while (iter < maxiter_) {
    //data.shuffle();                                    // shuffle data for faster convergence
    iter++;

    vec_type a_old(a_);
    iterate(data);                                     // do an iteration

    error = 0;
    for (int i = 0; i < data.n(); i++) {
      double pred = predict(data.x(i));
      error += (pred * data.y(i) <= 0 ? 1 : 0);
      //std::cerr << "err " << i << " " << data.y(i) << " " << pred << " "<< (pred * data.y(i) <= 0) << "\n";
    }

    double a_change = arma::norm(a_old-a_,"inf");
    std::cout << "iter = " << iter << " error = " << error << " change = " << a_change << "\n";
    std::cerr << "iter = " << iter << " error = " << error
        << " change = " << a_change << "\n";

    if (Compare(1e-5).equal(a_change, 0.0)) {
      std::cout << "Changes too small\n";
      break;
    }
  }
  return error;
}

template <typename T>
void L1LossL2Reg<T>::iterate(const dataset_type& data)
{
  Compare comp;
  std::vector<int> idx(data.n());
  for (int i = 0; i < data.n(); i++) idx[i] = i;
  for (int j = 0; j < data.n(); j++) {
    int i = ::rand() % (data.n()-j) + j;
    int tmp = idx[i]; idx[i] = idx[j]; idx[j] = tmp;
  }

  for (int k = 0; k < data.n(); k++) {
    int i = idx[k];
    double G = data.y(i)*arma::dot(w_, data.x(i)) - 1 + Dii_.get(i)*a_.get(i);  // gradient at 0

//    std::cerr << "G = " << G << "\n";

    double PG = G;                                                      // projected gradient
    if (comp.equal(a_.get(i), 0.0))
      PG = std::min(G, 0.0);
    else if (comp.equal(a_.get(i), U_))
      PG = std::max(G, 0.0);

//    std::cerr << "PG = " << PG << "\n";

    if (!comp.equal(PG, 0.0)) {                                         // check optimal condition
      double a_old = a_.get(i);
      a_.ref(i) = std::min(std::max(a_.get(i) - G/Qii_.get(i), 0.0), U_);
//      std::cerr << "a[i] = " << a_[i] << "\n";
      w_ += (data.y(i) * (a_.get(i)-a_old)) * data.x(i);
    }
  }
}

template <typename T>
double L1LossL2Reg<T>::predict(const vec_type& x)
{
  double y = arma::dot(w_, x);
  this->predict_ = y > 0 ? 1 : -1;
  return this->predict_;
}

template <typename T>
void L1LossL2Reg<T>::prepareQD(const dataset_type& data)
{
  std::cout << "prepare Q and D for L1Loss L2Reg\n";
  Qii_ = vec_type(data.n());
  Qii_.zeros();
  Dii_ = vec_type(data.n());
  Dii_.zeros();
  U_ = C_;
  std::cerr << "U = " << U_ << "\n";
  for (int i = 0; i < data.n(); i++)
    Qii_.ref(i) = arma::dot(data.x(), data.x()) + Dii_.get(i);
}

template <typename T>
void L2LossL2Reg<T>::prepareQD(const dataset_type& data)
{
  std::cout << "prepare Q and D for L2Loss L2Reg\n";
  __Base::Qii_ = vec_type(data.n());
  __Base::Qii_.zeros();
  __Base::Dii_ = vec_type(data.n());
  __Base::Dii_.ones();
  __Base::Dii_ *= 0.5/__Base::C_;
  __Base::U_ = DOUBLE_INFINITY;
  for (int i = 0; i < data.n(); i++)
    __Base::Qii_.ref(i) = arma::dot(data.x(), data.x()) + __Base::Dii_.get(i);
}

#endif /* L2REGSVM_IMPL_H_ */
