/*=====================================================================*
 *                   Copyright (C) 2011 Paul Mineiro                   *
 * All rights reserved.                                                *
 *                                                                     *
 * Redistribution and use in source and binary forms, with             *
 * or without modification, are permitted provided that the            *
 * following conditions are met:                                       *
 *                                                                     *
 *     * Redistributions of source code must retain the                *
 *     above copyright notice, this list of conditions and             *
 *     the following disclaimer.                                       *
 *                                                                     *
 *     * Redistributions in binary form must reproduce the             *
 *     above copyright notice, this list of conditions and             *
 *     the following disclaimer in the documentation and/or            *
 *     other materials provided with the distribution.                 *
 *                                                                     *
 *     * Neither the name of Paul Mineiro nor the names                *
 *     of other contributors may be used to endorse or promote         *
 *     products derived from this software without specific            *
 *     prior written permission.                                       *
 *                                                                     *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND              *
 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,         *
 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES               *
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE             *
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER               *
 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,                 *
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES            *
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE           *
 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR                *
 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF          *
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT           *
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY              *
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE             *
 * POSSIBILITY OF SUCH DAMAGE.                                         *
 *                                                                     *
 * Contact: Paul Mineiro <paul@mineiro.com>                            *
 *=====================================================================*/

#include "commandline.hh"
#include "learningrate.hh"
#include "loss.hh"
#include "mode.hh"
#include "parse.hh"
#include "regressor.hh"
#include "regressorworker.hh"

#include <boost/format.hpp>
#include <boost/optional.hpp>
#include <boost/none.hpp>
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/program_options/positional_options.hpp>
#include <boost/program_options/variables_map.hpp>
#include <boost/unordered_map.hpp>
#include <exception>
#include <fstream>
#include <iostream>
#include <memory>
#include <string>

namespace
{
using namespace flassol;

class SparseVector
  {
    private:
      boost::unordered_map<unsigned int, float> map;

      template<typename Impl>
      class ElementTemplate
        {
          private:
            Impl&         map;
            unsigned int  x;

          public:
            ElementTemplate (Impl&        _map,
                             unsigned int _x)
              : map (_map),
                x (_x)
              {
              }

            ElementTemplate&
            operator= (float y)
              {
                if (y == 0)
                  {
                    typename Impl::iterator z = map.find (x);

                    if (z != map.end ())
                      {
                        map.erase (z);
                      }
                  }
                else
                  {
                    map[x] = y;
                  }

                return *this;
              }

            operator float () const
              {
                typename Impl::const_iterator z = map.find (x);

                return (z == map.end ()) ? 0 : z->second;
              }
        };

      typedef ElementTemplate<boost::unordered_map<unsigned int, float> >
              Element;
      typedef ElementTemplate<const boost::unordered_map<unsigned int, float> >
              ConstElement;

    public:
      Element
      operator[] (const unsigned int x)
        {
          return Element (map, x);
        }

      const ConstElement
      operator[] (const unsigned int x) const
        {
          return ConstElement (map, x);
        }

      typedef boost::unordered_map<unsigned int, float>::const_iterator 
              const_iterator;
      
      const_iterator
      begin () const 
        {
          return map.begin ();
        }

      const_iterator
      end () const 
        {
          return map.end ();
        }

      bool 
      empty ()
        {
          return map.empty ();
        }
  };

struct CSMCExample
  {
    boost::optional<SparseVector>               cost;
    char*                                       tag;
    boost::shared_ptr<IndexedFeatureSet>        f;
  };

typedef boost::optional<CSMCExample> MaybeCSMCExample;

void
operator+= (std::vector<float>&         a,
            const std::vector<float>&   b)
  {
    for (unsigned int n = 0; n < std::min (a.size (), b.size ()); ++n)
      {
        a[n] += b[n];
      }
  }

struct Stats
  {
    boost::optional<uint64_t>     bad_lineno;
    uint64_t                      badlines;
    uint64_t                      goodlines;
    unsigned int                  cur_predict;
    boost::optional<unsigned int> cur_label;
    uint64_t                      cur_features;
    uint64_t                      example_count;
    float                         loss;
    float                         zo_loss;
    std::vector<float>            constant_loss;
    std::vector<float>            constant_zo_loss;

    explicit
    Stats (unsigned int k) : bad_lineno (boost::none),
                             badlines (0),
                             goodlines (0),
                             cur_predict (0),
                             cur_label (boost::none),
                             cur_features (0),
                             example_count (0),
                             loss (0),
                             zo_loss (0),
                             constant_loss (k, 0),
                             constant_zo_loss (k, 0)
      {
      }

    Stats&
    operator+= (const Stats& s)
      {
        bad_lineno = s.bad_lineno;
        badlines += s.badlines;
        goodlines += s.goodlines;
        cur_predict = s.cur_predict;
        cur_label = s.cur_label;
        cur_features = s.cur_features;
        example_count += s.example_count;
        loss += s.loss;
        zo_loss += s.zo_loss;
        constant_loss += s.constant_loss;
        constant_zo_loss += s.constant_zo_loss;

        return *this;
      }

    void
    reset ()
      {
        bad_lineno = boost::none;
        badlines = 0;
        goodlines = 0;
        cur_predict = 0;
        cur_label = boost::none;
        cur_features = 0;
        example_count = 0;
        loss = 0;
        zo_loss = 0;
        std::fill (constant_loss.begin (), constant_loss.end (), 0);
        std::fill (constant_zo_loss.begin (), constant_zo_loss.end (), 0);
      }
  };

inline float
square (float x)
{
  return x * x;
}

struct BadParse
{
};

unsigned int
parse_maybe_uint (const char* s)
{
  unsigned int rv = 0;

  if (s)
    {
      char* endptr;

      rv = strtoul (s, &endptr, 0);

      if (*endptr != '\0')
        {
          throw BadParse ();
        }
    }

  return rv;
}

SparseVector
parse_sparse_vector (const char*  s,
                     unsigned int next_index,
                     unsigned int k)
{
  SparseVector rv;

  // value(:index)?,value(:index)?...
  
  while (*s != '\0')
    {
      char* endptr;
      float value = strtof (s, &endptr);
      unsigned int index = next_index;

      if (*endptr == ':')
        {
          index = strtoul (endptr + 1, &endptr, 0);
        }

      if (*endptr == ',')
        {
          ++endptr;
        }
      else if (*endptr != '\0')
        {
          throw BadParse ();
        }

      if (index >= k)
        {
          throw BadParse ();
        }

      s = endptr;
      rv[index] = -value;
      next_index = index + 1;
    }

  return rv;
}

SparseVector
delta_sparse_vector (unsigned int index,
                     unsigned int k)
{
  SparseVector rv;

  if (index >= k)
    {
      throw BadParse ();
    }

  rv[index] = -1;

  return rv;
}

MaybeCSMCExample
from_general_example (const boost::optional<GeneralExample>& e,
                      unsigned int                           k)
{
  if (e)
    {
      try
        {
          CSMCExample r;

          if (e->importance)
            {
              r.cost = parse_sparse_vector (e->importance,
                                            parse_maybe_uint (e->label),
                                            k);
            }
          else if (e->label)
            {
              r.cost = delta_sparse_vector (parse_maybe_uint (e->label), k);
            }

          r.tag = e->tag;
          r.f = e->f;

          return r;
        }
      catch (BadParse&)
        {
        }
    }

  return boost::none;
}

void
do_report (std::ostream&                  report,
           const Stats&                   since_last,
           const Stats&                   cumulative)
{
  report << boost::format ("%-9s %-9s %-9s %-9s %9llu %9s %9f %8llu")
    % ((cumulative.example_count > 0)
         ? (boost::format ("%-9.6f")
              % (cumulative.loss / cumulative.example_count)
           ).str ()
         : "undefined"
      )
    % ((since_last.example_count > 0)
         ? (boost::format ("%-9.6f")
              % (since_last.loss / since_last.example_count)
           ).str ()
         : "undefined"
      )
    % ((cumulative.example_count > 0)
         ? (boost::format ("%-9.6f")
              % (cumulative.zo_loss / cumulative.example_count)
           ).str ()
         : "undefined"
      )
    % ((since_last.example_count > 0)
         ? (boost::format ("%-9.6f")
              % (since_last.zo_loss / since_last.example_count)
           ).str ()
         : "undefined"
      )
    % cumulative.example_count
    % (cumulative.cur_label
          ? (boost::format ("%9f") % *cumulative.cur_label).str ()
          : "unknown")
    % cumulative.cur_predict
    % cumulative.cur_features
  << std::endl;

  if (since_last.bad_lineno)
    {
      report << since_last.badlines
             << " bad lines encountered since last, latest at offset " 
             << *since_last.bad_lineno 
             << std::endl;
    }
}

template<typename T>
boost::optional<unsigned int>
argmin (const boost::optional<T>& x,
        unsigned int              k)
{
  if (x)
    {
      return argmin (*x, k);
    }
  else
    {
      return boost::none;
    }
}

template<typename T>
unsigned int
argmin (const T&     x,
        unsigned int k)
{
  unsigned int rv = 0;

  for (unsigned int n = 1; n < k; ++n)
    {
      if (x[n] < x[rv])
        {
          rv = n;
        }
    }

  return rv;
}

unsigned int
argmax_zerozero (const std::vector<Regressor::EstimateResult>& x,
                 unsigned int                                  k)
{
  unsigned int rv = 0;
  unsigned int max = 0;

  for (unsigned int n = 1; n < k; ++n)
    {
      if (x[n - 1].p > max)
        {
          rv = n;
          max = x[n - 1].p;
        }
    }

  return rv;
}

int
input_loop (std::istream&                       in,
            boost::optional<std::ostream&>      report,
            boost::optional<std::ofstream&>     predict,
            bool                                test_only,
            Regressor&                          r,
            std::auto_ptr<Loss>&                loss,
            std::auto_ptr<LearningRate>&        eta,
            float                               lambda,
            bool                                adaptive,
            WorkerPool&                         worker_pool)
{
  using std::endl;

  enum StandardParse::hash_option_value ho = 
    static_cast<StandardParse::hash_option_value> 
      (r.get_header ().hash_option);
  StandardParse parse = StandardParse ().hash_option (ho);
  unsigned int k = r.get_header ().num_classes;
  Stats cumulative (k);
  Stats since_last (k);
  std::vector<std::vector<unsigned int> > 
    confusion_matrix (k, std::vector<unsigned int> (k, 0));

  if (report)
    {
       *report <<
"cumul     since     cumul     since       example  current   current  current"
           << endl <<
"loss      last      0/1 loss  last 0/1    counter    label   predict features"
           << endl;
    }

  for (uint64_t lineno = 0, output_number = 0; in.good (); ++lineno)
    {
      char buf[1024768];
      boost::optional<GeneralExample> example = parse (in, buf, sizeof (buf));

      if (in.good ())
        {
          MaybeCSMCExample csmc (from_general_example (example, k));

          ++since_last.example_count;

          if (! csmc)
            {
              since_last.bad_lineno = lineno;
              ++since_last.badlines;
            }
          else
            {
              ++since_last.goodlines;

              std::vector<Regressor::EstimateResult> result (k - 1);
              worker_pool.estimate (r, csmc->f.get (), result);

              since_last.cur_predict = argmax_zerozero (result, k);
              since_last.cur_label = argmin (csmc->cost, k);
              since_last.cur_features = result[0].n;

              if (predict)
                {
                  *predict << since_last.cur_predict;

                  for (unsigned int l = 0; l + 1 < k; ++l)
                    {
                      *predict << "\t" << result[l].p;
                    }

                  if (csmc->tag)
                    {
                      *predict << "\t" << csmc->tag;
                    }

                  *predict << endl;
                }

              if (csmc->cost)
                {
                  SparseVector& cost (*csmc->cost);

                  since_last.loss += cost[since_last.cur_predict];
                  since_last.zo_loss += 
                    (since_last.cur_predict == *since_last.cur_label) ? 0 : 1;
                  ++confusion_matrix[since_last.cur_predict][*since_last.cur_label];

                  for (unsigned int l = 0; l < k; ++l)
                    {
                      since_last.constant_loss[l] += cost[l];
                      since_last.constant_zo_loss[l] += 
                        (l == *since_last.cur_label) ? 0 : 1;
                    }
                  
                  // I decided to go with re-estimation 
                  // on each level of the tree.  This seems 
                  // the safest option with respect to 
                  // invariant updates, doubly so for dyadic models.

                  if (result[0].xnorm > 0 && ! test_only)
                    {
                      unsigned int winner[k];
                      double t = cumulative.example_count +
                                 since_last.example_count;
                      float thiseta = eta->eta (t);

                      for (unsigned int l = 0; l < k; ++l)
                        {
                          winner[l] = l;
                        }

                      for (unsigned int l = k; 
                           l > 1 && ! cost.empty (); 
                           l = ((l >> 1) + (l % 2)))
                        {
                          std::vector<Regressor::UpdateInfo> info (k - 1);

                          if (l != k)
                            {
                              worker_pool.estimate (r, csmc->f.get (), result);
                            }

                          for (unsigned int n = 0; 
                               2 * n + 1 < l && ! cost.empty (); 
                               ++n)
                            {
                              unsigned int left = winner[2 * n];
                              unsigned int right = winner[2 * n + 1];

                              float leftp = 
                                (left == 0) ? 0 : result[left - 1].p;
                              float rightp = 
                                (right == 0) ? 0 : result[right - 1].p;

                              float thisimp = 
                                fabsf (cost[left] - cost[right]);

                              if (thisimp > 0)
                                {
                                  float leftwdotx =
                                    (left == 0) ? 0 : result[left - 1].wdotx;
                                  float rightwdotx =
                                    (right == 0) ? 0 : result[right - 1].wdotx;
                                  float leftadotb =
                                    (left == 0) ? 0 : result[left - 1].adotb;
                                  float rightadotb =
                                    (right == 0) ? 0 : result[right - 1].adotb;
                                  float leftanormplusbnorm =
                                    (left == 0) 
                                      ? 0 : result[left - 1].anormplusbnorm;
                                  float rightanormplusbnorm =
                                    (right == 0) 
                                      ? 0 : result[right - 1].anormplusbnorm;

                                  float thisp = leftp - rightp;
                                  float thiswdotx = leftwdotx - rightwdotx;
                                  float thisadotb = leftadotb - rightadotb;
                                  float thisanormplusbnorm = 
                                    leftanormplusbnorm + rightanormplusbnorm;

                                  float thislabel = 
                                    (cost[left] < cost[right])
                                      ? 1 : -1;

                                  float gsq = 
                                    square (  loss->dldp (thisp, thislabel)
                                            * thisimp);
                                  
                                  // ugh ... this is expensive ... 
                                  // there has to be a way to compute
                                  // this in one pass

                                  if (adaptive)
                                    {
                                      std::vector<Regressor::UpdateInfo> tmp (1);

                                      tmp[0].gsq = gsq;
                                      worker_pool.adaptive_norm (r,
                                                                 csmc->f.get (),
                                                                 tmp);

                                      if (left > 0)
                                        {
                                          info[left - 1].adaptivexnorm =
                                            tmp[0].adaptivexnorm;
                                        }

                                      if (right > 0)
                                        {
                                          info[right - 1].adaptivexnorm = 
                                            tmp[0].adaptivexnorm;
                                        }
                                    }

                                  std::pair<float, float> update = 
                                    loss->invariant_update 
                                      (thisp,
                                       thiswdotx,
                                       thisadotb,
                                       thislabel,
                                       thisimp * thiseta,
                                       thisanormplusbnorm,
                                       result[0].xnorm,
                                       adaptive,
                                       info[std::max (left, right) - 1].adaptivexnorm,
                                       lambda);

                                  if (left > 0)
                                    {
                                      info[left - 1].gsq = gsq;
                                      info[left - 1].sh = update.first;
                                      info[left - 1].decay = update.second;
                                    }

                                  if (right > 0)
                                    {
                                      info[right - 1].gsq = gsq;
                                      info[right - 1].sh = -update.first;
                                      info[right - 1].decay = update.second;
                                    }
                              }

                              if (leftp > rightp)
                                {
                                  winner[n] = left;
                                  cost[right] = 0;
                                  if (right > 0)
                                    {
                                      result[right - 1].skip = true;
                                    }
                                }
                              else
                                {
                                  winner[n] = right;
                                  cost[left] = 0;
                                  if (left > 0)
                                    {
                                      result[left - 1].skip = true;
                                    }
                                }
                            }

                          if (l % 2)
                            {
                              winner[(l - 1) / 2] = winner[l - 1];
                            }

                          worker_pool.update (r, csmc->f.get (), info);
                        }
                    }
                }
            }

          if (since_last.example_count > (1ULL << output_number))
            {
              cumulative += since_last;

              if (report) 
                {
                  do_report (*report, since_last, cumulative);
                }

              since_last.reset ();

              ++output_number;
            }
        }
    }

  cumulative += since_last;

  if (report)
    {
      do_report (*report, since_last, cumulative);
      loss->report (*report, Loss::WARN);

      if (cumulative.example_count > 0)
        {
          unsigned int best = argmin (cumulative.constant_loss, k);
          unsigned int best_zo = argmin (cumulative.constant_zo_loss, k);

          std::cerr << "best constant = " << best << std::endl;
          std::cerr << "best constant loss = " 
                    << static_cast<double> (cumulative.constant_loss[best]) / 
                       static_cast<double> (cumulative.example_count)
                    << std::endl;

          std::cerr << "best constant 0/1 = " << best_zo << std::endl;
          std::cerr << "best constant 0/1 loss = " 
                    << static_cast<double> (cumulative.constant_zo_loss[best_zo]) / 
                       static_cast<double> (cumulative.example_count)
                    << std::endl;

          std::cerr << "confusion matrix (rows=prediction,columns=actual)" << std::endl;

          for (unsigned int m = 0; m < k; ++m)
            {
              for (unsigned int n = 0; n < k; ++n)
                {
                  std::cerr << confusion_matrix[m][n] << "\t";
                }

              std::cerr << std::endl;
            }
        }
    }

  return 0;
}

int
common_main (int argc,
             char* argv[],
             const boost::program_options::options_description& desc,
             const boost::program_options::options_description& all,
             const boost::program_options::positional_options_description& pd)
{
  using namespace boost::program_options;
  using std::cerr;
  using std::endl;
  using std::exception;
  using std::string;
  using std::vector;

  variables_map vm;

  try
    { 
      command_line_parser parser (argc, argv);
      parser.options (all);
      parser.positional (pd);
      store (parser.run (), vm);
      notify (vm);
    }
  catch (exception& e)
    { 
      cerr << "ERROR: " << e.what () << endl;
      cerr << desc;
      return 1;
    }

  if (vm.count ("help"))
    { 
      cerr << "scoring filter tree based cost-sensitive multiclass-classification" << endl;
      cerr << "  estimate a class label which (hopefully) minimizes " << endl;
      cerr << "  the conditional expected cost given the input" << endl << endl;
      cerr << desc;
      return 1;
    }

  try
    {
      std::auto_ptr<Loss> tmp = get_loss (vm["loss"].as<string> ());
    }
  catch (std::invalid_argument& ia)
    {
      cerr << "ERROR: invalid loss specification: '" 
           << vm["loss"].as<string> ()
           << "': " 
           << ia.what () 
           << endl;
      return 1;
    }

  if (vm["num_classes"].as<unsigned int> () < 2)
    {
      cerr << "ERROR: num_classes underflow (= "
           << vm["num_classes"].as<unsigned int> ()
           << " )" << endl;
      return 1;
    }

  if (vm["num_threads"].as<unsigned int> () == 0)
    {
      cerr << "ERROR: num_threads underflow (= "
           << vm["num_threads"].as<unsigned int> ()
           << " )" << endl;
      return 1;
    }

  if (vm["num_weight_bits"].as<unsigned int> () > 63)
    {
      cerr << "ERROR: num_weight_bits overflow (= " 
           << vm["num_weight_bits"].as<unsigned int> ()
           << " )" << endl;
      return 1;
    }

  std::vector<std::pair<uint8_t, Regressor::NGramSpec> > ngram;

  if (vm.count ("ngram"))
    {
      vector<string> ns = vm["ngram"].as<vector<string> > ();

      for (vector<string>::iterator n = ns.begin (); n != ns.end (); ++n)
        {
          boost::optional<std::pair<uint8_t, Regressor::NGramSpec> > 
            parsed_n = parse_ngram (*n);

          if (! parsed_n)
            {
              return 1;
            }

          ngram.push_back (*parsed_n);
        }
    }

  std::vector<std::pair<uint8_t, uint8_t> > quadratic;

  if (vm.count ("quadratic"))
    {
      vector<string> qs = vm["quadratic"].as<vector<string> > ();

      for (vector<string>::iterator q = qs.begin (); q != qs.end (); ++q)
        {
          boost::optional<std::pair<uint8_t, uint8_t> > 
            parsed_q = parse_quadratic (*q);

          if (! parsed_q)
            {
              return 1;
            }

          quadratic.push_back (*parsed_q);
        }
    }

  std::vector<std::pair<uint8_t, uint8_t> > dotproduct;

  if (vm.count ("dotproduct"))
    {
      vector<string> qs = vm["dotproduct"].as<vector<string> > ();

      for (vector<string>::iterator q = qs.begin (); q != qs.end (); ++q)
        {
          boost::optional<std::pair<uint8_t, uint8_t> > 
            parsed_q = parse_dotproduct (*q);

          if (! parsed_q)
            {
              return 1;
            }

          dotproduct.push_back (*parsed_q);
        }
    }

  std::auto_ptr<LearningRate> eta;
  
  try
    {
      std::auto_ptr<LearningRate> tmp = get_learning_rate (vm["eta"].as<string> ());
      eta = tmp;
    }
  catch (std::invalid_argument& ia)
    {
      cerr << "ERROR: invalid learning rate specification '" 
           << vm["eta"].as<string> ()
           << "': " 
           << ia.what () 
           << endl;
      return 1;
    }

  if (vm["lambda"].as<float> () < 0.0f)
    {
      cerr << "ERROR: invalid dyadic regularizer specification "
           << vm["lambda"].as<float> ()
           << endl;
      return 1;
    }

  Regressor r = Regressor::Open (vm["model"].as<string> ())
                  .num_weights (1 << vm["num_weight_bits"].as<unsigned int> ())
                  .loss (vm["loss"].as<string> ())
                  .read_only (vm.count ("test"))
                  .add_quadratic (quadratic)
                  .add_dotproduct (dotproduct)
                  .add_ngram (ngram)
                  .num_classes (vm["num_classes"].as<unsigned int> ())
                ;

  std::auto_ptr<Loss> loss;
  
  try
    {
      std::auto_ptr<Loss> tmp = get_loss (r.get_header ().loss);
      loss = tmp;
    }
  catch (std::invalid_argument& ia)
    {
      cerr << "ERROR: invalid loss specification '" 
           << r.get_header ().loss 
           << "': " 
           << ia.what () 
           << endl;
      return 1;
    }

  std::ofstream predict;

  if (vm.count ("predict"))
    {
      predict.open (vm["predict"].as<string> ().c_str ());

      if (! predict.good ())
        {
          cerr << "ERROR: can't open prediction file " 
               << vm["predict"].as<string> ()
               << ": " << strerror (errno)
               << endl;

          return 1;
        }
    }

  if (vm.count ("quiet") == 0)
    {
      cerr << "adaptive = " << (vm["adaptive"].as<bool> () ? 1 : 0) << endl;
      cerr << "eta = " << vm["eta"].as<string> () << endl;
      cerr << "lambda = " << vm["lambda"].as<float> () << endl;
      cerr << "num_threads = " << vm["num_threads"].as<unsigned int> () << endl;
      if (vm.count ("test"))
        {
          cerr << "test only" << endl;
        }

      if (vm.count ("predict"))
        {
          cerr << "write predictions to " 
               << vm["predict"].as<string> ()
               << endl;
        }

      cerr << r.get_header ();
    }

  WorkerPool worker_pool (vm["num_threads"].as<unsigned int> () - 1);

  return input_loop (std::cin, 
                     (vm.count ("quiet") 
                        ? boost::optional<std::ostream&> (boost::none)
                        : boost::optional<std::ostream&> (cerr)),
                     (vm.count ("predict")
                        ? boost::optional<std::ofstream&> (predict)
                        : boost::optional<std::ofstream&> (boost::none)),
                     vm.count ("test"),
                     r, 
                     loss,
                     eta,
                     vm["lambda"].as<float> (),
                     vm["adaptive"].as<bool> (),
                     worker_pool);
}

}

namespace flassol
{

int
sft_main (int   argc,
          char* argv[])
  {
    using namespace boost::program_options;
    using std::string;
    using std::vector;

    options_description desc ("Allowed options");
    options_description all ("");
    positional_options_description pd;

    desc.add_options ()
      ("help", "produce help message")
      ("adaptive", value<bool> ()->default_value (true), "use adaptive learning rate")
      ("eta", value<string> ()->default_value ("powerlaw (1, 1, 0)"), "learning rate")
      ("lambda", value<float> ()->default_value (0.1f), "dyadic l2 regularizer")
      ("loss", value<string> ()->default_value ("hinge"), get_registered_losses ().c_str ())
      ("model", value<string> ()->default_value ("model"), "model file")
      ("num_classes", value<unsigned int> ()->default_value (2), "number of classes")
      ("num_threads", value<unsigned int> ()->default_value (1), "number of threads")
      ("num_weight_bits", value<unsigned int> ()->default_value (16), "log_2 number of weights")
      ("predict", value<string> (), "(optional) file to output predictions")
      ("ngram", value<vector<string> > (), "ngram spec")
      ("quadratic", value<vector<string> > (), "quadratic interactions spec")
      ("dotproduct", value<vector<string> > (), "dotproduct interactions spec")
      ("quiet", "suppress output")
      ("test", "only test: do not learn")
      ;
    all.add (desc).add_options ()
      ("mode", value<string> ()->default_value ("sft"), modes_string.c_str ());

    pd.add ("mode", 1);
    pd.add ("model", -1);

    return common_main (argc, argv, desc, all, pd);
  }

}
