/*********************************************************************
 * Software License Agreement (Modified BSD License)
 *
 * Copyright (c) 2009-2010, Willow Garage, Daniel Munoz
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *   * Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   * Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in the
 *     documentation and/or other materials provided with the distribution.
 *   * Neither the name of the copyright holders' organizations nor the
 *     names of its contributors may be used to endorse or promote products
 *     derived from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *********************************************************************/

#include <m3n/regressors/regressor_wrapper.h>

// Ignore all samples that have target less than this value
const double RegressorWrapper::SMALLEST_ABS_TARGET = 0.05; //0.1 - 1e-5;

using namespace std;

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
RegressorWrapper::RegressorWrapper(RegressorWrapper::algorithm_t algorithm_type)
{
  // These should never be cleared
  m_algorithm_type = algorithm_type;
  m_stacked_feature_dim = 0;
  m_trained = false;
}

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
void RegressorWrapper::clear()
{
  // do NOT clear stacked_feature_dim_, algorithm_type_
  // (these are permanent)

  m_trained = false;
  m_interm_feature_targets.clear();
  m_ordered_samples.clear();
  m_seen_samples.clear();
  doClear();
}

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
int RegressorWrapper::addTrainingSample(const vector<double>& feature_vals,
                                        const unsigned int start_idx,
                                        const double target)
{
  // -------------------------------------------
  // Verify not already trained
  if (m_trained)
  {
    cerr << "Cannot add training sample because already trained" << endl;
    return -1;
  }

  // -------------------------------------------
  // Verify the feature dimension is within bounds
  unsigned int length = feature_vals.size();
  if (start_idx + length > m_stacked_feature_dim)
  {
    cerr << "Training sample location (" << start_idx << "," << length
        << ") exceeds total feature dimension " << m_stacked_feature_dim << endl;
    return -1;
  }

  // -------------------------------------------
  // Save value for training stage
  if (m_interm_feature_targets.count(&feature_vals) == 0)
  {
    map<unsigned int, double> empty_map;
    m_interm_feature_targets[&feature_vals] = empty_map;
  }
  if (m_interm_feature_targets[&feature_vals].count(start_idx) == 0)
  {
    m_interm_feature_targets[&feature_vals][start_idx] = 0.0;
  }

  double the_val = m_interm_feature_targets[&feature_vals][start_idx];
  the_val += target;
  m_interm_feature_targets[&feature_vals][start_idx] = the_val;

  // Record the sequence in which the feature was originally received
  // (This was done to ensure repeatability across different machines)
  if (m_seen_samples.count(&feature_vals) == 0)
  {
    m_seen_samples.insert(&feature_vals);
    m_ordered_samples.push_back(&feature_vals);
  }

  return 0;
}

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
int RegressorWrapper::train()
{
  // -------------------------------------------
  // Verify not already trained
  if (m_trained)
  {
    cerr << "Error: already trained, cannot re-train" << endl;
    return -1;
  }

  // -------------------------------------------
  // Vectorize the features to train on and their target values
  // Ensure the target values are non-zero
  vector<const vector<double>*> interm_feature_vals;
  vector<unsigned int> interm_start_idx;
  vector<double> interm_target;
  unsigned int nbr_added_samples = m_ordered_samples.size();
  for (unsigned int sample_idx = 0 ; sample_idx < nbr_added_samples ; sample_idx++)
  {
    const vector<double>* curr_feature_vals = m_ordered_samples[sample_idx];

    const map<unsigned int, double>& curr_start_idx_target_map =
        m_interm_feature_targets.find(curr_feature_vals)->second;

    // For current feature vector, find its different starting indices and target value to boost to
    for (map<unsigned int, double>::const_iterator iter_idx_target_map =
        curr_start_idx_target_map.begin() ; iter_idx_target_map != curr_start_idx_target_map.end() ; iter_idx_target_map++)
    {
      unsigned int curr_start_idx = iter_idx_target_map->first;
      double curr_target = iter_idx_target_map->second;

      // Ignore samples that do not meet the minimum target value requirement
      if (fabs(curr_target) > RegressorWrapper::SMALLEST_ABS_TARGET)
      {
        interm_feature_vals.push_back(curr_feature_vals);
        interm_start_idx.push_back(curr_start_idx);
        interm_target.push_back(curr_target);
      }
    }
  }
  m_interm_feature_targets.clear();
  m_ordered_samples.clear();
  m_seen_samples.clear();

  // -------------------------------------------
  // Create vector of indices to be used for shuffling order
  unsigned int nbr_samples = interm_feature_vals.size();
  vector<unsigned int> random_indices(nbr_samples);
  for (unsigned int i = 0 ; i < nbr_samples ; i++)
  {
    random_indices[i] = i;
  }
  random_shuffle(random_indices.begin(), random_indices.end());

  // -------------------------------------------
  // Randomize order of values
  vector<const vector<double>*> rnd_interm_feature_vals(nbr_samples);
  vector<unsigned int> rnd_interm_start_idx(nbr_samples);
  vector<double> rnd_interm_target(nbr_samples);
  for (unsigned int i = 0 ; i < nbr_samples ; i++)
  {
    unsigned int rnd_idx = random_indices[i];
    rnd_interm_feature_vals[i] = interm_feature_vals[rnd_idx];
    rnd_interm_start_idx[i] = interm_start_idx[rnd_idx];
    rnd_interm_target[i] = interm_target[rnd_idx];
  }
  interm_feature_vals.clear();
  interm_start_idx.clear();
  interm_target.clear();

  int ret_val = doTrain(rnd_interm_feature_vals, rnd_interm_start_idx, rnd_interm_target);
  return ret_val;
}

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
int RegressorWrapper::predict(const vector<double>& feature_vals,
                              const unsigned int start_idx,
                              double& predicted_val) const
{
  unsigned int length = feature_vals.size();
  if (!m_trained || (start_idx + length > m_stacked_feature_dim))
  {
    cerr << "Not initialized, or exceeded feature dimensions" << endl;
    return -1;
  }

  return doPredict(feature_vals, start_idx, predicted_val);
}

// --------------------------------------------------------------
/*! See function definition */
// --------------------------------------------------------------
int RegressorWrapper::copyTrainingSamples(const RegressorWrapper& source)
{
  map<const vector<double>*, map<unsigned int, double> >::const_iterator iter_features;
  for (iter_features = source.m_interm_feature_targets.begin(); iter_features
      != source.m_interm_feature_targets.end() ; iter_features++)
  {
    const vector<double>* features = iter_features->first;
    const map<unsigned int, double>& start_targets = iter_features->second;

    map<unsigned int, double>::const_iterator iter_start_targets;
    for (iter_start_targets = start_targets.begin(); iter_start_targets != start_targets.end() ; iter_start_targets++)
    {
      if (addTrainingSample(*features, iter_start_targets->first, iter_start_targets->second) < 0)
      {
        cerr << "Failed to copy all training samples" << endl;
        return -1;
      }
    }
  }
  return 0;
}
