#pragma once
#include "Perceptron.h"

// Regularized Dual Averaging algorithm
// Dual Averaging Methods for Regularized Stochastic Learning and Online Optimization
// Lin Xiao (2010)

// Need REGULARIZER.Solve(g_w, g_b, beta_t_t) 
// which solves min_w  { C (<g_w,weight>+<g_b,bias>) + Omega(w) + beta_t_t h(w) }
// where g_w, g_b are computed by averaging gradients obtained by LOSS.grad()

template<typename LOSS, typename REGULARIZER>
class RDAPerceptron : public Perceptron
{
protected:
  double m_C;
  arma::sp_mat m_AverageWeightGrad;
  double m_AverageBiasGrad;
protected:
  RDAPerceptron() {}
public:
  RDAPerceptron(double rate, double C)
    : Perceptron(rate), m_C(C)
  {
  }

  virtual ~RDAPerceptron(void)
  {
  }

  void CheckDimension(const arma::sp_mat& x)
  {
    if (m_Weight.n_elem == 0) { // first time
      m_AverageWeightGrad = m_Weight = arma::sp_mat(x.n_rows, x.n_cols);
      m_AverageBiasGrad = m_Bias = 0;
      //std::cerr << "First time" << std::endl;
    }
    else if (m_Weight.n_rows != x.n_rows || m_Weight.n_cols != x.n_cols){
      std::cerr << "Dimension mismatch" << std::endl;
      exit(1);
    }
  }

  // subgradient descent method
  virtual double Learn(const Example& example, int iter)
  {
    const arma::sp_mat& x = example.GetInput();
    CheckDimension(x);

    double predict_label = Predict(x);

    arma::sp_mat lossWeightGrad;
    double lossBiasGrad;

    LOSS loss(example);
    REGULARIZER regularizer;

    loss.grad(m_Weight, m_Bias, lossWeightGrad, lossBiasGrad);
    m_AverageBiasGrad = m_AverageBiasGrad * ((iter)/(iter+1)) + lossBiasGrad / (iter+1);
    m_AverageWeightGrad = m_AverageWeightGrad * ((iter)/(iter+1)) + lossWeightGrad / (iter+1);
    
    double beta_t_t = regularizer.SetBeta(m_Rate, iter);
    regularizer.Solve(m_AverageWeightGrad, m_AverageBiasGrad, beta_t_t, m_Weight, m_Bias);

    // TODO Learning here
    return predict_label;	
  }
};

