#ifndef CSPACE_LEARNING_REASONING_H
#define CSPACE_LEARNING_REASONING_H

#include <iostream>

extern "C"
{
#include <svm_light/svm_common.h>
#include <svm_light/svm_learn.h>
}

#include <cstring>
#include <vector>
#include <algorithm>
#include <limits>

namespace cspace_learning
{


class LinearClassifier_SVM
{
public:
  struct Element
  {
    Element(float* d, float w)
    {
      data = d;
      weight = w;
    }

    float* data;
    float weight;
  };


  LinearClassifier_SVM(unsigned int dim)
  {
    dim_ = dim;
    strcpy(learn_parm_.predfile, "");
    strcpy(learn_parm_.alphafile, "");
    learn_parm_.biased_hyperplane = 1;
    learn_parm_.sharedslack = 0;
    learn_parm_.remove_inconsistent = 0;
    learn_parm_.skip_final_opt_check = 0;
    learn_parm_.svm_maxqpsize = 10;
    learn_parm_.svm_newvarsinqp = 0;
    learn_parm_.svm_iter_to_shrink = -9999;
    learn_parm_.maxiter = 100000;
    learn_parm_.kernel_cache_size = 40;
    learn_parm_.svm_c = 10;
    learn_parm_.eps = 0.1;
    learn_parm_.transduction_posratio = -1.0;
    learn_parm_.svm_costratio = 1.0;
    learn_parm_.svm_costratio_unlab = 1.0;
    learn_parm_.svm_unlabbound = 1E-5;
    learn_parm_.epsilon_crit = 0.001;
    learn_parm_.epsilon_a = 1E-15;
    learn_parm_.compute_loo = 0;
    learn_parm_.rho = 1.0;
    learn_parm_.xa_depth = 0;
    learn_parm_.type = CLASSIFICATION;
    kernel_parm_.kernel_type = LINEAR;
    kernel_parm_.poly_degree = 3;
    kernel_parm_.rbf_gamma = 1.0;
    kernel_parm_.coef_lin = 1;
    kernel_parm_.coef_const = 1;
    strcpy(kernel_parm_.custom, "empty");

    bool res = true;

    if(learn_parm_.svm_iter_to_shrink == -9999)
    {
      if(kernel_parm_.kernel_type == LINEAR)
        learn_parm_.svm_iter_to_shrink = 2;
      else
        learn_parm_.svm_iter_to_shrink = 100;
    }

    if((learn_parm_.skip_final_opt_check)
        && (kernel_parm_.kernel_type == LINEAR))
    {
      std::cout << "It does not make sense to skip the final optimality check for linear kernels." << std::endl;
      learn_parm_.skip_final_opt_check = 0;
    }
    if((learn_parm_.skip_final_opt_check)
        && (learn_parm_.remove_inconsistent))
    {
      std::cout << "It is necessary to do the final optimality check when removing inconsistent examples." << std::endl;
      res = false;
    }
    if((learn_parm_.svm_maxqpsize < 2))
    {
      std::cout << "Maximum size of QP-subproblems not in valid range: " << learn_parm_.svm_maxqpsize << "[2..]" << std::endl;
      res = false;
    }
    if((learn_parm_.svm_maxqpsize < learn_parm_.svm_newvarsinqp))
    {
      std::cout << "Maximum size of QP-subproblems " << learn_parm_.svm_maxqpsize << " must be larger than the number of new variables [" << learn_parm_.svm_newvarsinqp << "] entering the working set in each iteration." << std::endl;
      res = false;
    }
    if(learn_parm_.svm_iter_to_shrink < 1)
    {
      std::cout << "Maximum number of iterations for shrinking not in valid range: " << learn_parm_.svm_iter_to_shrink << " [1,..]." << std::endl;
      res = false;
    }
    if(learn_parm_.svm_c < 0)
    {
      std::cout << "The C parameter must be greater than zero!" << std::endl;
      res = false;
    }
    if(learn_parm_.transduction_posratio > 1)
    {
      std::cout << "The fraction of unlabeled examples to classify as positives must be less than 1.0." << std::endl;
      res = false;
    }
    if(learn_parm_.svm_costratio <= 0)
    {
      std::cout << "The COSTRATIO parameter must be greater than zero!" << std::endl;
      res = false;
    }
    if(learn_parm_.epsilon_crit <= 0)
    {
      std::cout << "The epsilon parameter must be greater than zero!" << std::endl;
      res = false;
    }
    if(learn_parm_.rho < 0)
    {
      std::cout << "The parameter rho for xi/alpha-estimates and leave-one-out pruning must be greater than zero (typically 1.0 or 2.0, see T. Joachims, Estimating the Generalization Performance of an SVM Efficiently, ICML, 2000.)" << std::endl;
      res = false;
    }
    if((learn_parm_.xa_depth < 0) || (learn_parm_.xa_depth > 100))
    {
      std::cout << "The parameter depth for ext. xi/alpha-estimates must be in [0..100] (zero for switching to the conventional xa/estimates described in T. Joachims, Estimating the Generalization Performance of an SVM Efficiently, ICML, 2000.)" << std::endl;
      res = false;
    }

    if(!res)
    {
      std::cout << "Solver initialization fails" << std::endl;
    }


    words = (WORD *)my_malloc(sizeof(WORD) * (dim_ + 1));
    w_ = new float[dim_];
    b_ = 0;

    dim_ = dim;
  }

  ~LinearClassifier_SVM()
  {
    free(words);
    delete [] w_;
  }

  void reset()
  {
    positive_.clear();
    negative_.clear();
  }

  void addItem(float* data, bool is_positive, float weight = 1.0)
  {
    if(is_positive)
      positive_.push_back(Element(data, weight));
    else
      negative_.push_back(Element(data, weight));
  }

  void train()
  {
    KERNEL_CACHE *kernel_cache;
    MODEL* model = (MODEL*)my_malloc(sizeof(MODEL));

    unsigned int nPositiveExamples = positive_.size();
    unsigned int nNegativeExamples = negative_.size();
    unsigned int totdoc = nPositiveExamples + nNegativeExamples;

    int queryid = 0;
    int slackid = 0;
    DOC** docs = (DOC **)my_malloc(sizeof(DOC *) * (nPositiveExamples + nNegativeExamples));
    double* label = (double *)my_malloc(sizeof(double) * (nPositiveExamples + nNegativeExamples));

    for(unsigned int i = 0; i < nPositiveExamples; ++i)
    {
      int example_id = i;
      label[example_id] = 1;

      for(unsigned int j = 0; j < dim_; ++j)
      {
        words[j].wnum = j + 1;
        words[j].weight = positive_[i].data[j];
      }

      words[dim_].wnum = 0;
      words[dim_].weight = dim_;

      docs[example_id] = create_example(example_id, queryid, slackid, positive_[i].weight, create_svector(words, (char*)"", 1.0));
    }

    for(unsigned int i = 0; i < nNegativeExamples; ++i)
    {
      int example_id = i + nPositiveExamples;
      label[example_id] = -1;

      for(unsigned int j = 0; j < dim_; ++j)
      {
        words[j].wnum = j + 1;
        words[j].weight = negative_[i].data[j];
      }

      words[dim_].wnum = 0;
      words[dim_].weight = dim_;

      docs[example_id] = create_example(example_id, queryid, slackid, negative_[i].weight, create_svector(words, (char*)"", 1.0));
    }


    if(kernel_parm_.kernel_type == LINEAR)
      kernel_cache = NULL;
    else
    {
      kernel_cache = kernel_cache_init(totdoc, learn_parm_.kernel_cache_size);
    }

    double* alpha_in = NULL;


    svm_learn_classification(docs, label, totdoc, dim_, &learn_parm_, &kernel_parm_, kernel_cache, model, alpha_in);

    add_weight_vector_to_linear_model(model);
    if(model->lin_weights == NULL)
      std::cout << "null" << std::endl;
    b_ = -model->b;
    for(unsigned int i = 0; i < dim_; ++i)
    {
      w_[i] = model->lin_weights[i];
    }


    /*
    unsigned int false_num = 0;
    float sum_weight = 0;
    float false_weight = 0;
    for(unsigned int i = 0; i < totdoc; ++i)
    {
      float dist = classify_example_linear(model, docs[i]);
      if(dist * label[i] < 0)
      {
        false_num++;
        false_weight += docs[i]->costfactor;
      }
      sum_weight += docs[i]->costfactor;
    }

    std::cout << "false ratio " << false_num / (float)totdoc << std::endl;
    std::cout << "weighted false ratio " << false_weight / sum_weight << std::endl;
    */


    if(kernel_cache)
    {
      kernel_cache_cleanup(kernel_cache);
      kernel_cache = NULL;
    }
    free(alpha_in);
    free_model(model, 0);
    for(unsigned int i = 0; i < totdoc; ++i)
      free_example(docs[i], 1);
    free(docs);
    free(label);

  }

  LEARN_PARM learn_parm_;
  KERNEL_PARM kernel_parm_;

  unsigned int dim_;

  std::vector<Element> positive_;
  std::vector<Element> negative_;

  WORD* words;

  float* w_;
  float b_;
};


class LinearClassifier_Bisect
{
public:

  struct Element
  {
    Element(bool t, float p, float w)
    {
      type = t;
      pos = p;
      weight = w;
    }
    bool type;
    float pos;
    float weight;
  };

  struct Element_cmp
  {
    bool operator () (const Element& a, const Element& b)
    {
      return a.pos < b.pos;
    }
  };

  LinearClassifier_Bisect(unsigned int dim)
  {
    v_ = NULL;
    o_ = NULL;
    dim_ = dim;
    proj_ = new float[dim_];
  }

  ~LinearClassifier_Bisect()
  {
    delete [] proj_;
  }


  void reset(float* v, float* o)
  {
    elems_.clear();
    v_ = v;
    o_ = o;
  }

  void addItem(float* data, bool is_positive, float weight = 1.0)
  {
    float len_sqr = 0;
    for(unsigned int i = 0; i < dim_; ++i)
    {
      proj_[i] = data[i] - o_[i];
      len_sqr += proj_[i] * proj_[i];
    }

    float proj_len = 0;
    for(unsigned int i = 0; i < dim_; ++i)
    {
      proj_len += v_[i] * proj_[i];
    }

    if(proj_len < 0) proj_len = 0;

    elems_.push_back(Element(is_positive, proj_len, weight));
  }

  void train()
  {
    std::sort(elems_.begin(), elems_.end(), Element_cmp());

    float sum_f_left = 0;
    float sum_t_left = 0;
    float sum_f_right = 0;
    float sum_t_right = 0;
    for(unsigned int i = 0; i < elems_.size(); ++i)
    {
      if(elems_[i].type) sum_t_right += elems_[i].weight;
      else sum_f_right += elems_[i].weight;
    }

    bool direction = true; // true: t | f; false: f | t
    float best_error = 0;
    unsigned int best_i = -1;
    if(sum_f_right > sum_t_right)
    {
      direction = true;
      best_error = sum_t_right;
    }
    else
    {
      direction = false;
      best_error = sum_f_right;
    }

    for(unsigned int i = 0; i < elems_.size() - 1; ++i)
    {
      if(elems_[i].type)
      {
        sum_t_left += elems_[i].weight;
        sum_t_right -= elems_[i].weight;
      }
      else
      {
        sum_f_left += elems_[i].weight;
        sum_f_right -= elems_[i].weight;
      }

      float error_true = sum_f_left + sum_t_right;
      float error_false = sum_f_right + sum_t_left;

      if(error_true < error_false) // error by setting true direction is smaller
      {
        if(best_error > error_true)
        {
          best_error = error_true;
          direction = true;
          best_i = i;
        }
      }
      else
      {
        if(best_error > error_false)
        {
          best_error = error_false;
          direction = false;
          best_i = i;
        }
      }
    }

    if(best_i == -1) b_ = elems_[0].pos;
    else if(best_i == elems_.size() - 1) b_ = elems_[elems_.size() - 1].pos;
    else
    {
      b_ = (elems_[best_i].pos + elems_[best_i + 1].pos) * 0.5;
    }


    /*
    float sum = 0;
    for(unsigned int i = 0; i < elems_.size(); ++i)
      sum += elems_[i].weight;

    std::cout << "error ratio " << (best_error / sum) << std::endl;
    */
  }

  std::vector<Element> elems_;
  unsigned int dim_;
  float* v_;
  float* o_;

  float* proj_;

  float b_;
};




}

#endif
