/*********************************************************************
 * Software License Agreement (Modified BSD License)
 *
 * Copyright (c) 2010, Daniel Munoz
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *   * Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   * Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in the
 *     documentation and/or other materials provided with the distribution.
 *   * Neither the name of the copyright holders' organizations nor the
 *     names of its contributors may be used to endorse or promote products
 *     derived from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *********************************************************************/

#include <m3n/regressors/linear_regression.h>

// import most common Eigen types 
USING_PART_OF_NAMESPACE_EIGEN

using namespace std;

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
LinearRegression::LinearRegression() :
  RegressorWrapper(RegressorWrapper::LINEAR_REGRESSION)
{
  m_regularization = 0.0;
}

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
LinearRegression::LinearRegression(double regularization) :
  RegressorWrapper(RegressorWrapper::LINEAR_REGRESSION)
{
  if (regularization < 0.0)
  {
    cerr << "LinearRegression regularization must be non-negative" << endl;
    throw;
  }
  m_regularization = regularization;
}

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
RegressorWrapper* LinearRegression::paramClone() const
{
  return new LinearRegression(m_regularization);
}

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
void LinearRegression::doClear()
{
  m_weights.assign(m_stacked_feature_dim, 0.0);
  // do NOT clear m_regularization
}

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
int LinearRegression::saveToFile(const std::string directory,
                                 const string basename)
{
  // -------------------------------------------
  // Verify regression tree is trainedtrained
  if (!m_trained)
  {
    cerr << "LinearRegression::saveToFile Not trained yet" << endl;
    return -1;
  }

  // -------------------------------------------
  // Create filename: <basename>.linear_regression
  string out_filename = directory;
  out_filename.append("/");
  out_filename.append(basename);
  out_filename.append(".linear_regression");
  ofstream outfile(out_filename.c_str());
  if (outfile.is_open() == false)
  {
    cerr << "LinearRegression::saveToFile Could not save to: " << basename << endl;
    return -1;
  }

  // -------------------------------------------
  // File format:
  // algorithm_type_
  // stacked_feature_dim_
  //
  // m_lr_params.m_regularization
  //
  // m_weights

  // -------------------------------------------
  outfile << m_algorithm_type << endl;
  outfile << m_stacked_feature_dim << endl;
  outfile << endl;
  outfile << m_regularization << endl;
  outfile << endl;
  for (unsigned int i = 0 ; i < m_stacked_feature_dim ; i++)
  {
    outfile << m_weights[i] << " ";
  }
  outfile << endl;

  outfile.close();
  return 0;
}

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
int LinearRegression::loadFromFile(const std::string directory,
                                   const string basename)
{
  clear();

  // -------------------------------------------
  // Create filename: <basename>.linear_regression
  string in_filename = directory;
  in_filename.append("/");
  in_filename.append(basename);
  in_filename.append(".linear_regression");
  ifstream infile(in_filename.c_str());
  if (infile.is_open() == false)
  {
    cerr << "LinearRegression::loadFromFile Could not load from: " << basename << endl;
    return -1;
  }

  // -------------------------------------------
  // File format:
  // algorithm_type_
  // stacked_feature_dim_
  //
  //
  // m_lr_params.m_regularization
  //
  // m_weights

  // -------------------------------------------
  // Verify loading a regression tree with expected stacked feature dimension
  int tempo_algo_type;
  infile >> tempo_algo_type;
  if (tempo_algo_type != LINEAR_REGRESSION)
  {
    cerr << "LinearRegression::loadFromFile Algorithm mismatch: " << tempo_algo_type << endl;
    return -1;
  }
  infile >> m_stacked_feature_dim;

  // Fields
  infile >> m_regularization;
  m_weights.assign(m_stacked_feature_dim, 0.0);
  for (unsigned int i = 0 ; i < m_stacked_feature_dim ; i++)
  {
    infile >> m_weights[i];
  }
  infile.close();

  m_trained = true;
  return 0;
}

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
int LinearRegression::doTrain(const vector<const vector<double>*>& interm_feature_vals,
                              const vector<unsigned int>& interm_start_idx,
                              const vector<double>& interm_target)
{
  m_weights.assign(m_stacked_feature_dim, 0.0);
  unsigned int nbr_total_samples = interm_target.size();

  // Create mapping from start_idx -> [input indices]
  map<unsigned int, vector<unsigned int> > start2input_indices;
  for (unsigned int i = 0 ; i < nbr_total_samples ; i++)
  {
    unsigned int start_idx = interm_start_idx[i];
    if (start2input_indices.count(start_idx) == 0)
    {
      start2input_indices[start_idx] = vector<unsigned int> ();
    }
    start2input_indices.find(start_idx)->second.push_back(i);
  }

  // Vectorize the map
  vector<unsigned int> unique_start_indices;
  vector<vector<unsigned int> > ordered_input_indices;
  map<unsigned int, vector<unsigned int> >::const_iterator iter;
  for (iter = start2input_indices.begin(); iter != start2input_indices.end() ; iter++)
  {
    unique_start_indices.push_back(iter->first);
    ordered_input_indices.push_back(iter->second);
  }

  // Perform least squares for each block
  int nbr_unique_start_indices = unique_start_indices.size();
#pragma omp parallel for
  for (int i = 0 ; i < nbr_unique_start_indices ; i++)
  {
    unsigned int curr_start_idx = unique_start_indices[i];
    const vector<unsigned int>& curr_input_indices = ordered_input_indices[i];

    // Determine the number of samples with curr_start_idx
    unsigned int curr_nbr_samples = curr_input_indices.size();
    // Determine the feature dimension of all these samples
    int curr_feature_dim = (interm_feature_vals[curr_input_indices.front()])->size();

    /// ----------------
    // Initialize X'*X and X'y
    MatrixXd XtX(curr_feature_dim, curr_feature_dim);
    MatrixXd Xty(curr_feature_dim, 1);
    for (int row = 0 ; row < curr_feature_dim ; row++)
    {
      for (int col = 0 ; col < curr_feature_dim ; col++)
      {
        XtX(row, col) = 0.0;
        // Tikhonov regularization
        if (row == col)
        {
          XtX(row, row) = m_regularization;
        }
      }
      Xty(row, 0) = 0.0;
    }

    /// ----------------
    // Populate X' * X and X'y
    for (unsigned int sample_idx = 0 ; sample_idx < curr_nbr_samples ; sample_idx++)
    {
      unsigned int curr_input_idx = curr_input_indices[sample_idx];
      double curr_target = interm_target[curr_input_idx];
      const vector<double>& curr_feature = *(interm_feature_vals[curr_input_idx]);
      if (static_cast<int> (curr_feature.size()) != curr_feature_dim)
      {
        cerr << "Inconsistent feature dimensions" << endl;
        abort();
      }

      for (int row = 0 ; row < curr_feature_dim ; row++)
      {
        // Compute outer product
        for (int col = 0 ; col < curr_feature_dim ; col++)
        {
          XtX(row, col) = XtX(row, col) + (curr_feature[row] * curr_feature[col]);
        }

        // Compute inner product
        Xty(row, 0) = Xty(row, 0) + (curr_feature[row] * curr_target);
      }
    }

    MatrixXd invXtX;
    XtX.computeInverse(&invXtX);

    MatrixXd w = invXtX * Xty;
    for (int row = 0 ; row < curr_feature_dim ; row++)
    {
      m_weights[curr_start_idx + row] = w(row, 0);
    }
  }

  m_trained = true;
  return 0;
}

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
int LinearRegression::doPredict(const vector<double>& feature_vals,
                                const unsigned int start_idx,
                                double& predicted_val) const
{
  unsigned int length = feature_vals.size();

  predicted_val = 0.0;
  for (unsigned int i = 0 ; i < length ; i++)
  {
    predicted_val += (m_weights[start_idx + i] * feature_vals[i]);
  }

  return 0;
}
