#pragma once
#include <armadillo>

class L2Regularizer
{
public:
  double operator() (const arma::sp_mat& weight, double bias)
  {
    double output = 0.5*arma::dot(weight, weight);
    return output;
  }

  void grad(const arma::sp_mat& weight, double bias, arma::sp_mat& weightGrad, double& biasGrad )
  {
    weightGrad = weight;
    biasGrad = 0;
  }

  // for RDAPerceptron
  double SetBeta(double beta, int iter) 
  {
    return beta / sqrt( (iter+1) <= 0 ? 1 : (iter+1));
  }

  void Solve(const arma::sp_mat& g_w, double g_b, double beta_t_t, arma::sp_mat& weight, double& bias)
  {

  }
};

#define L1_GRAD_TOLERANCE 1e-16
class L1Regularizer
{
public:
  double operator() (const arma::sp_mat& weight, double bias)
  {
    double output = arma::norm(weight, 1);
    return output;
  }

  void grad(const arma::sp_mat& weight, double bias, arma::sp_mat& weightGrad, double& biasGrad )
  {
    weightGrad = weight;
    for (arma::sp_mat::iterator iter = weightGrad.begin(); iter != weightGrad.end(); iter++) {
      double val = *iter;
      if (val > L1_GRAD_TOLERANCE) *iter = 1;
      else if (val < -L1_GRAD_TOLERANCE) *iter = -1;
      else *iter = 0;
    }
    biasGrad = 0;
  }
};

