
#include "defs.h"

#include <math.h>
#include <stdlib.h>
#include <string.h>

#include <fstream>

#include <tr1/unordered_set>


store_t g_store;
int     g_new_variable_index = 1;
string  g_exec_options;

void algorithm::infer( logical_function_t *p_out_best_h, sparse_vector_t *p_out_fv, lp_inference_cache_t *p_out_cache, inference_configuration_t& c, const logical_function_t &obs, const knowledge_base_t& kb ) {

  double time_start = function::getTimeofDaySec();

  if( !c.use_cache ) {
    function::enumeratePotentialElementalHypotheses( &p_out_cache->pg, &p_out_cache->evc, obs, kb, c );
    function::convertToLP( &p_out_cache->lp, &p_out_cache->lprel, p_out_cache, kb, p_out_cache->pg, p_out_cache->evc, c );
  }

  /* Adjust the setting according to cache. */
  if( c.use_cache ) {
    
    for( int i=0; i<p_out_cache->node_label.size(); i++ ) {
      int n = p_out_cache->node_label[i];

      double loss = (LossAugmented == c.objfunc ? c.loss : 0.0);
      p_out_cache->lp.variables[ p_out_cache->lprel.n2v[n] ].obj_val   = p_out_cache->node_score[n] + p_out_cache->node_score_plus[n] + loss;
      p_out_cache->lp.variables[ p_out_cache->lprel.n2v[n]+1 ].obj_val = -p_out_cache->node_score[n];
      
    }

    if( LabelGiven == c.objfunc )
      p_out_cache->lp.activateConstraint( p_out_cache->con_label_aug );
    
  }

  if( c.ilp ) {
    cout << "<ilp>" << endl
         << p_out_cache->lp.toString() << endl
         << "</ilp>" << endl;
  }
  
  p_out_cache->elapsed_prepare = function::getTimeofDaySec();
  
  function::solveLP_BnB( &p_out_cache->lp, c );
  p_out_cache->elapsed_ilp = function::getTimeofDaySec();
  
  function::convertLPToHypothesis( p_out_best_h, p_out_fv, p_out_cache->lp, p_out_cache->lprel, p_out_cache->pg );

  if( LossAugmented == c.objfunc ) {

    p_out_cache->loss = 0;
    
    for( int i=0; i<p_out_cache->node_label.size(); i++ ) {
      int n = p_out_cache->node_label[i];
      if( 0.0 == p_out_cache->lp.variables[ p_out_cache->lprel.n2v[n] ].optimized )
        p_out_cache->loss += c.loss;
    }
    
  }

  p_out_cache->elapsed_prepare -= time_start;
  p_out_cache->elapsed_ilp     -= time_start;
  
}

void algorithm::learn( score_function_t *p_out_sfunc, const learn_configuration_t &c, const vector<training_data_t>& t, const knowledge_base_t& kb ) {

  /* Start learning online. */
  double previous_updates = 0.0;
  
  for( int n=0; n<c.N; n++ ) {

    cerr << "# -- Iteration: " << 1+n << endl;

    double total_updates = 0.0;

    for( int i=0; i<t.size(); i++ ) {

      cerr << "# Instance No. " << 1+i << endl
           << "#   O:     " << t[i].x.toString() << endl
           << "#   H*:    " << t[i].y.toString() << endl;
      
      /* Perform loss-augmented inference. */
      inference_configuration_t ci = c.ci;
      logical_function_t        h_current, h_correct;
      sparse_vector_t           v_current, v_correct;
      lp_inference_cache_t      cache;
      double                    s_current, s_correct;

      ci.objfunc = LossAugmented;
      ci.label   = t[i].y;

      /* arg max_{x_i, y^, h^} */
      infer( &h_current, &v_current, &cache, ci, t[i].x, kb );
      s_current = cache.lp.optimized_obj;
      
      cerr << "# Loss-augmented inference: " << endl
           << "#   H:     " << h_current.toString() << endl
           << "#   C(H):  " << cache.lp.optimized_obj << " >= " << ci.p_sfunc->getScore( v_current ) << endl
           << "#   Loss:  " << cache.loss << endl;
      
      /* We need to update weights if it performed incorrect inference. */
      if( 0.0 == cache.loss ) continue;

      /* Calculate a feature vector that outputs a correct label. */
      cerr << "# Label-augmented inference:" << endl;
      cerr << "#   Label: " << t[i].y.toString() << endl;
      
      ci.objfunc   = LabelGiven;
      ci.use_cache = true;

      /* arg max_{x_i, y_i, h} */
      infer( &h_correct, &v_correct, &cache, ci, t[i].x, kb );
      s_correct = cache.lp.optimized_obj;
      
      cerr << "#   H*:    " << h_correct.toString() << endl
           << "#   C(H*): " << cache.lp.optimized_obj << " == " << ci.p_sfunc->getScore( v_correct ) << endl;
      
      /* Calculate the update coefficient. */
      double             numerator = s_correct - s_current + cache.loss, denominator = 0.0;
      unordered_set<int> feature_indices;

      function::getVectorIndices( &feature_indices, v_current );
      function::getVectorIndices( &feature_indices, v_correct );

      for( unordered_set<int>::iterator iter_fi = feature_indices.begin(); feature_indices.end() != iter_fi; ++iter_fi ) {
        int j = *iter_fi;
        denominator += pow(v_correct[j] - v_current[j], 2);
      }

      double tau;

      if( TauTolerance > abs(numerator) )   numerator = numerator >= 0 ? TauTolerance : -TauTolerance;
      
      if( 0.0 == denominator ) tau = 0.0;
      else tau                     = min( c.C, numerator / denominator );

      cerr << "# Weight update: " << endl;
      cerr << "#   Update coefficient: " << tau << " = min(" << c.C << ", " << numerator << " / " << denominator << ")" << endl
           << "#   Current feature vector: " << function::toString( v_current ) << endl
           << "#   Correct feature vector: " << function::toString( v_correct ) << endl;
      
      /* Update the weights. */
      if( 0.0 == tau ) continue;

      total_updates += abs(tau);
      
      for( unordered_set<int>::iterator iter_fi = feature_indices.begin(); feature_indices.end() != iter_fi; ++iter_fi ) {
        int j = *iter_fi;
        if( 0 == v_correct[j] - v_current[j] ) continue;
        
        cerr << "#   w_" << j << " <- " << p_out_sfunc->weights[j] + tau * (v_correct[j] - v_current[j]) << " = " << p_out_sfunc->weights[j] << " + " << tau * (v_current[j] - v_correct[j]) << endl;
        
        p_out_sfunc->weights[j] += tau * (v_current[j] - v_correct[j]);
      }
      
    }

    cerr << "# -- Total update: " << total_updates;
    
    if( 0.0 == total_updates || (previous_updates <= c.E && total_updates <= c.E) ) {
      cerr << " <= " << c.E << endl
           << "# ... Ok, that's enough. "
           << "Henry terminated the training procedure in " << 1+n << "-th iteration." << endl;
      break;

    }
    
    previous_updates = total_updates;

    cerr << " > " << c.E << endl << "# " << endl;
    
  }
  
}

/* Welcome to the world of Henry. */

bool _moduleProcessInput( vector<training_data_t>   *p_out_t,
                          score_function_t          *p_out_sfunc,
                          knowledge_base_t          *p_out_kb,
                          precompiled_kb_t          *p_out_pckb,
                          learn_configuration_t     *p_out_lc,
                          inference_configuration_t *p_out_ic,
                          command_option_t          &cmd, vector<string> &args ) {

  if( 0 == args.size() ) args.push_back( "-" );

  cout << "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>" << endl
       << "<henry-output parameter=\"" << g_exec_options << "\">" << endl;

  /* Read the precompiled knowledge base. */
  if( has_key( cmd, 'b' ) && NULL != p_out_kb ) {
    if( !function::readPrecompiledKB( p_out_kb, cmd[ 'b' ] ) ) {
      cerr << "ERROR: Could not read the precomplied knowledge base." << endl;
      return false;
    }
  }
  
  for( int a=0; a<args.size(); a++ ) {
  
    /* Start interpreting the input. */
    istream                   *p_is = &cin;
    ifstream                   file;

    if( "-" != args[a] ) {
      file.open( args[a].c_str() );
      p_is = &file;
    }

    for( sexp_reader_t sr(*p_is); !sr.isEnd(); ++sr ) {
    
      if( sr.stack.isFunctor( "include" ) ) {
        vector<string> args_once( 1, sr.stack.children[1]->getString() );
        _moduleProcessInput( p_out_t, p_out_sfunc, p_out_kb, p_out_pckb, p_out_lc, p_out_ic, cmd, args_once );
      }

      if( sr.stack.isFunctor( "model" ) ) {
        for( int i=1; i<sr.stack.children.size(); i++ ) {
          
          if( sr.stack.children[i]->isFunctor( "weight" ) ) {
            
            /* Set the model weights. */
            p_out_ic->ignore_weight = false;
        
            for( int j=1; j<sr.stack.children[i]->children.size(); j++ ) {
              int    index  = atoi( sr.stack.children[i]->children[j]->children[0]->getString().c_str() );
              double weight = atof( sr.stack.children[i]->children[j]->children[1]->getString().c_str() );
              p_out_sfunc->weights[ index ] = weight;
            }

          }

        }
      }
    
      if( sr.stack.isFunctor( "T" ) ) {

        /* Usage: (T (^ label) (^ p1 p2 p3 ...)) */
      
        /* Identify the label part. */
        p_out_t->push_back( training_data_t( *sr.stack.children[2], *sr.stack.children[1] ) );

      }
    
      if( sr.stack.isFunctor( "B" ) ) {

        if( has_key( cmd, 'b' ) ) continue;
          
        /* Identify the LF part. */
        for( int i=1; i<sr.stack.children.size(); i++ ) {
          if( sr.stack.children[i]->isFunctor( "=>" ) ) {

            if( NULL != p_out_pckb ) {
              logical_function_t lf( *sr.stack.children[i] );
              (*p_out_pckb)[ lf.branches[1].lit.predicate ][ lf.branches[1].lit.terms.size() ].push_back( sr.stack.children[i]->toString() );
            }
            
          }
        }
      
      }

      if( sr.stack.isFunctor( "O" ) && NULL != p_out_ic ) {

        /* Identify the LF part. */
        int
          i_lf = sr.stack.findFunctorArgument( AndString ),
          i_name = sr.stack.findFunctorArgument( "name" );

        if( has_key( cmd, 'p' ) ) {
          if( -1 == i_name ) continue;
          if( sr.stack.children[i_name]->children[1]->getString() != cmd[ 'p' ] ) continue;
        }
        
        if( -1 == i_lf ) { cerr << "No logical forms: " << sr.stack.toString() << endl; continue; }

        /* Compile the knowledge base. */
        if( !has_key( cmd, 'b' ) && 0 == p_out_kb->axioms.size() )
          if( !function::compileKB( p_out_kb, *p_out_pckb ) ) {
            cerr << "ERROR: Knowledge compilation failed." << endl; continue;
          }

        logical_function_t   best_h, obs( *sr.stack.children[i_lf] );
        lp_inference_cache_t cache;
        sparse_vector_t      v_current;
        
        cout << "<result-inference target=\"" << (-1 != i_name ? sr.stack.children[i_name]->children[1]->getString() : "") << "\">" << endl;

        function::enumerateConstatns( &p_out_kb->constants, obs );
        algorithm::infer( &best_h, &v_current, &cache, *p_out_ic, obs, *p_out_kb );
        
        /* Basic output. */
        vector<const literal_t*> literals_obs;
        obs.getAllLiterals( &literals_obs );
          
        cout << "<statistics>" << endl
             << "<time prepare=\"" << cache.elapsed_prepare << "\" ilp=\"" << cache.elapsed_ilp << "\" />"<< endl
             << "<ilp variables=\"" << cache.lp.variables.size() << "\" constraints=\"" << cache.lp.constraints.size() << "\" />"<< endl
             << "</statistics>" << endl
             << "<observable size=\"" << literals_obs.size() << "\" domain_size=\"" << p_out_kb->constants.size() << "\">" << obs.toString() << "</observable>" << endl
             << "<hypothesis cost=\"" << cache.lp.optimized_obj << "\">" << best_h.toString() << "</hypothesis>" << endl;

        cout << "<variable-equivalence>" << endl;

        unordered_map<int, unordered_set<store_item_t> > var_cluster;
        for( unordered_map<store_item_t, unordered_map<int, int> >::iterator iter_v2cv=cache.lprel.vc2v.begin(); cache.lprel.vc2v.end()!=iter_v2cv; ++iter_v2cv )
          for( unordered_map<int, int>::iterator iter_vc2v=iter_v2cv->second.begin(); iter_v2cv->second.end()!=iter_vc2v; ++iter_vc2v )
            if( 1.0 == cache.lp.variables[ iter_vc2v->second ].optimized ) var_cluster[ iter_vc2v->first ].insert( iter_v2cv->first );

        for( unordered_map<int, unordered_set<store_item_t> >::iterator iter_vc=var_cluster.begin(); var_cluster.end()!=iter_vc; ++iter_vc )
          cout << "<cluster id=\"" << iter_vc->first << "\">" << g_store.toString(iter_vc->second) << "</cluster>" << endl;

        cout << "</variable-equivalence>" << endl;
        
        if( p_out_ic->proofgraph ) {
          cout << "<proofgraph>" << endl;
          cache.pg.printGraph( cache.lp, cache.lprel );
          cout << "</proofgraph>" << endl;
        }
        
        cout << "</result-inference>" << endl;
        
      }
    
    }

    if( "-" != args[a] ) file.close();
    
  }

  cout << "</henry-output>" << endl;

  return true;
  
}

bool _moduleCompileKb( command_option_t &cmd, vector<string> &args ) {

  precompiled_kb_t pckb;
  _moduleProcessInput( NULL, NULL, NULL, &pckb, NULL, NULL, cmd, args );

  if( !function::writePrecompiledKB( pckb, cmd[ 'o' ] ) ) {
    cerr << "ERROR: Precompilation failed." << endl;
  }
  
  return true;
  
}

bool _moduleProcessInferOptions( inference_configuration_t *p_out_con, command_option_t &cmd ) {
  
  if( !has_key( cmd, 'd' ) ) cmd[ 'd' ] = "9999";
  if( !has_key( cmd, 'c' ) ) cmd[ 'c' ] = "9999";
  if( !has_key( cmd, 'T' ) ) cmd[ 'T' ] = "9999";
  if( !has_key( cmd, 't' ) ) cmd[ 't' ] = "4";
  if( !has_key( cmd, 'O' ) ) cmd[ 'O' ] = "";

  p_out_con->max_variable_clusters = atoi( cmd[ 'c' ].c_str() );
  p_out_con->depthlimit            = atoi( cmd[ 'd' ].c_str() );
  p_out_con->timelimit             = atof( cmd[ 'T' ].c_str() );
  p_out_con->nbthreads             = atof( cmd[ 't' ].c_str() );

  if( string::npos != cmd[ 'O' ].find( "proofgraph" ) ) p_out_con->proofgraph = true;
  if( string::npos != cmd[ 'O' ].find( "ilp" ) ) p_out_con->ilp               = true;
  
}

bool _moduleInfer( command_option_t &cmd, vector<string> &args ) {

  score_function_t           sfunc;
  inference_configuration_t  c( sfunc );
  knowledge_base_t           kb;
  precompiled_kb_t           pckb;
  
  /* Setting the parameters. */
  c.ignore_weight = true;
  
  _moduleProcessInferOptions( &c, cmd );
  _moduleProcessInput( NULL, &sfunc, &kb, &pckb, NULL, &c, cmd, args );
  
  return true;
  
}

bool _moduleLearn( command_option_t &cmd, vector<string> &args ) {

  vector<training_data_t>  t;
  score_function_t         sfunc;
  knowledge_base_t         kb;
  precompiled_kb_t         pckb;
  learn_configuration_t    c( sfunc );

  /* Setting the parameters. */
  _moduleProcessInferOptions( &c.ci, cmd );

  if( !has_key( cmd, 'C' ) ) cmd[ 'C' ] = "1.0";
  if( !has_key( cmd, 'N' ) ) cmd[ 'N' ] = "10";
  if( !has_key( cmd, 'E' ) ) cmd[ 'E' ] = "10e-05";
  
  c.method = OnlinePassiveAggressive;
  c.C      = atof(cmd['C'].c_str());
  c.N      = atoi(cmd['N'].c_str());
  c.E      = atof(cmd['E'].c_str());

  _moduleProcessInput( &t, &sfunc, &kb, &pckb, &c, &c.ci, cmd, args );
    
  algorithm::learn( &sfunc, c, t, kb );

  cout << "(model " << endl
       << " (weight " << endl;
  for( weight_vector_t::iterator iter_fi = sfunc.weights.begin(); sfunc.weights.end() != iter_fi; ++iter_fi ) {
    cout << "  (" << iter_fi->first << " " << iter_fi->second << ")" << endl;
  }

  cout << " ))" << endl;
  
  return true;
  
}

int main( int argc, char **pp_args ) {

  srand( time(NULL) );

  for( int i=1; i<argc; i++ ) g_exec_options += (1 != i ? " " : "") + string( pp_args[i] );
  
  if( 1 == argc ) { cerr << str_usage << endl; return 1; }

  command_option_t cmd;
  vector<string>   args;
  function::getParsedOption( &cmd, &args, "m:i:b:C:N:t:T:w:E:O:o:p:d:c:", argc, pp_args );

  if( !has_key( cmd, 'm' ) ) { cerr << str_usage << endl; return 1; }
  
  bool ret = false;
  
  if( "compile_kb" == cmd['m'] ) {
    if( !has_key( cmd, 'o' ) ) { cerr << str_usage << endl; return 1; }
    ret = _moduleCompileKb( cmd, args );
  } else if( "infer" == cmd['m'] ) ret = _moduleInfer( cmd, args );
  else if( "learn" == cmd['m'] ) ret = _moduleLearn( cmd, args );

  if( !ret ) { cerr << str_usage << endl; return 1; }
  
  return 0;
  
}
