#include "../SparseCoding/sparse.h"



/* Main class definition that learns the word vectors */
  void Model::PredictVector(const Col& word_vec, const int& word_index,
                     Col* pred_vec) {
    *pred_vec = dict.var * atom[word_index].var;
  }

  void Model::UpdateParams(const int& word_index, const double& rate,
                    const Col& diff_vec, const double& l1_reg,
                    const double& l2_reg) {
    Mat dict_grad = -2 * diff_vec * atom[word_index].var.transpose() +
                    2 * l2_reg * dict.var;
    dict.AdagradUpdate(rate, dict_grad);
    Col atom_elem_grad = -2 * dict.var.transpose() * diff_vec;
    atom[word_index].AdagradUpdateWithL1Reg(rate, atom_elem_grad, l1_reg);
  }

  void Model::WriteVectorsToFile(const string& filename,
                          const mapUnsignedStr& vocab) {
    ofstream outfile(filename);
    if (outfile.is_open()) {
      outfile.precision(3);
      for(unsigned i = 0; i < atom.size(); ++i) {
        auto it = vocab.find(i);
        outfile << it->second << " ";
        for (unsigned j = 0; j < atom[i].var.rows(); ++j)
          outfile << atom[i].var[j] << " ";
        outfile << endl;
      }
      outfile.close();
      cerr << "\nWritten vectors to: " << filename;
    } else {
      cerr << "\nFailed to open " << filename;
    }
  }

  void Model::WriteDictToFile(const string& filename) {
    ofstream outfile(filename);
    if (outfile.is_open()) {
      outfile.precision(3);
      dict.WriteToFile(outfile);
      outfile.close();
      cerr << "\nWritten atom to: " << filename;
    } else {
      cerr << "\nFailed to open " << filename;
    }
  }


vector<Param<Col> >  Train( const int& factor,const int& cores, const double& l1_reg, 
            const double& l2_reg,const vector<Col>& word_vecs) {
  Model model(factor, word_vecs[0].size(), word_vecs.size());
  double avg_error = 1, prev_avg_err = 0;
  int iter = 0;
  while (iter < 20 || (avg_error > 0.05 && iter < 50 &&
         abs(avg_error - prev_avg_err) > 0.005)) {
    iter += 1;
    cerr << "\nIteration: " << iter << endl;
    unsigned num_words = 0;
    double total_error = 0, atom_l1_norm = 0;
    int word_id;
    #pragma omp parallel num_threads(cores) shared(total_error,atom_l1_norm)
    #pragma omp for nowait private(word_id)
    for (int word_id = 0; word_id < word_vecs.size(); ++word_id) {
      /* Predict the i-th word and compute error */
      Col pred_vec;
      model.PredictVector(word_vecs[word_id], word_id, &pred_vec);
      Col diff_vec = word_vecs[word_id] - pred_vec;
      double error = diff_vec.squaredNorm();
      #pragma omp critical
      {
        total_error += error;
        num_words += 1;
        atom_l1_norm += model.atom[word_id].var.lpNorm<1>();
        cerr << num_words << "\r";
      }
      model.UpdateParams(word_id, RATE, diff_vec, l1_reg, l2_reg);
    }
    prev_avg_err = avg_error;
    avg_error = total_error / num_words;
    cerr << "\nError per example: "<< total_error / num_words;
    cerr << "\nDict L2 norm: " << model.dict.var.lpNorm<2>();
    cerr << "\nAvg Atom L1 norm: " << atom_l1_norm / num_words;
    return model.atom;
    //model.WriteVectorsToFile(out_file, vocab);
  }
  //model.WriteVectorsToFile(out_file, vocab);
  // model.WriteDictToFile(out_file + "_dict");
}
