#ifndef SPARSE_CODING_H_
#define SPARSE_CODING_H_
#include <iostream>
#include <vector>
#include <fstream>
#include <functional>
#include <numeric>
#include <cmath>
#include <cstdlib>
#include <time.h>
#include <string>
#include <tr1/unordered_map>
#include <Eigen/Core>
#include <random>

#include "utils.h"

#define RHO 0.95
#define EPSILON 0.000001
#define RATE 0.05

using namespace std;
using namespace Eigen;

template <typename T> int sgn(T val) {
  return (T(0) < val) - (val < T(0));
}

/* General parameters of the model */
template <typename T>
class Param {

 public:
  T var;

  void Init(const int& rows, const int& cols) {
    if (cols == 1) {
      var = (0.6 / sqrt (rows)) * T::Random(rows, 1);
      _del_var = T::Zero(rows, 1);
      _del_grad = T::Zero(rows, 1);
    }
    var = (0.6 / sqrt (rows + cols)) * T::Random(rows, cols);
    _del_var = T::Zero(rows, cols);
    _del_grad = T::Zero(rows, cols);
    _grad_sum = T::Zero(rows, cols);
    _epsilon = EPSILON * T::Ones(rows, cols);
  }

  void AdagradUpdate(const double& rate, const T& grad) {
    _del_grad += grad.cwiseAbs2();
    _grad_sum += grad;
    var -= rate * grad.cwiseQuotient(_del_grad.cwiseSqrt());
  }

  void AdagradUpdateWithL1Reg(const double& rate, const T& grad,
                              const double& l1_reg) {
    _update_num += 1;
    _del_grad += grad.cwiseAbs2();
    _grad_sum += grad;
    for (int i = 0; i < var.rows(); ++i) {
      for (int j = 0; j < var.cols(); ++j) {
        double diff = abs(_grad_sum(i, j)) - _update_num * l1_reg;
        if (diff <= 0)
          var(i, j) = 0;
        else
          var(i, j) = -sgn(_grad_sum(i, j)) * rate * diff / sqrt(_del_grad(i, j));
      }
    }
  }

  void AdagradUpdateWithL1RegNonNeg(const double& rate, const T& grad,
                                    const double& l1_reg) {
    _update_num += 1;
    _del_grad += grad.cwiseAbs2();
    _grad_sum += grad;
    for (int i = 0; i < var.rows(); ++i) {
      for (int j = 0; j < var.cols(); ++j) {
        double diff = abs(_grad_sum(i, j)) - _update_num * l1_reg;
        if (diff <= 0)
          var(i, j) = 0;
        else {
          double temp = -sgn(_grad_sum(i, j)) * rate * diff /
                        sqrt(_del_grad(i, j));
          if (temp >= 0) var(i, j) = temp;
          else var(i, j) = 0;
        }
      }
    }
  }

  void WriteToFile(ofstream& out) {
    out << var.rows() << " " << var.cols() << " ";
    for (unsigned i = 0; i < var.rows(); ++i) {
      for(unsigned j = 0; j < var.cols(); ++j) 
        out << var(i, j) << " ";
    }
    out << endl;
  }

  void ReadFromFile(ifstream& in) {
    string line;
    getline(in, line);
    vector<string> data = split_line(line, ' ');
    int rows = stoi(data[0]), cols = stoi(data[1]);
    var = T::Zero(rows, cols);
    for (int i = 2; i < data.size(); ++i)
      var((i-2)/cols, (i-2)%cols) = stod(data[i]);
  }

 private:
  T _del_var, _del_grad, _grad_sum;  // updates/gradient memory
  T _epsilon;
  int _update_num = 0;
};




/* Main class definition that learns the word vectors */
class Model {

 public:
  /* The parameters of the model */
  vector<Param<Col> > atom;
  Param<Mat> dict;
  int vec_len, factor;
      
  Model(const int& times, const int& vector_len, const int& vocab_len) {
    vec_len = vector_len;
    factor = times;
    dict.Init(vec_len, factor * vec_len);
    /* Params initialization */
    for (int i = 0; i < vocab_len; ++i) {
      Param<Col> vec;
      vec.Init(factor * vec_len, 1);
      atom.push_back(vec);
    }
  }

  template<typename T> void NonLinearity(T* vec) { ElemwiseHardTanh(vec); }

  void PredictVector(const Col& word_vec, const int& word_index,
                     Col* pred_vec) ;

  void UpdateParams(const int& word_index, const double& rate,
                    const Col& diff_vec, const double& l1_reg,
                    const double& l2_reg) ;

  void WriteVectorsToFile(const string& filename,
                          const mapUnsignedStr& vocab);

  void WriteDictToFile(const string& filename);

};

vector<Param<Col> >  Train( const int& factor,const int& cores, const double& l1_reg, 
            const double& l2_reg,const vector<Col>& word_vecs);


#endif


