#pragma once

#include <vector>
#include <deque>
#include <string>
#include <list>

#include <iostream>
#include <sstream>

#include <tr1/unordered_map>
#include <tr1/unordered_set>

#include <sys/time.h>
#include <stdlib.h>

#include "darts.h"

#define has_key( dict, key ) (dict.end() != dict.find( key ))

#define TauTolerance           0.0001
#define PrecompiledAxiomLength 1024

#define ImplicationString "=>"
#define AndString "^"
#define OrString "v"

#define PredicateSubstitution "="

using namespace std;
using namespace std::tr1;

/* Usage! */
static const char *str_usage = 
  "Usage: \n"
  "  henry -m <command> [options...] <input_file> <input_file> ...]\n"
  "\n"
  "Command: \n"
  " infer       Performs abductive inference on the specified dataset.\n"
  " learn       Learns the parameters of scoring model given the (incomplete) training instances.\n"
  " compile_kb  Builds a hash database for generating candidate hypotheses efficiently.\n"
  "\n"
  "Options:\n"
  " compile_kb:\n"
  "   -o F   Write a compiled knowledge base into F.\n"
  ""
  " infer:\n"
  "   -b B   Use B as a precompiled background knowledge.\n"
  "   -p P   Perform inference only for the observation P.\n"
  "   -d D   Stop backward chaining up to the depth D.\n"
  "   -T t   Performs abductive inference under the t seconds.\n"
  "   -t T   Use T threads for inference.\n"
  "   -O o   Outputs information about o. (o=proofgraph|ilp)\n"
  "   -c C   Limit the number of variable clusters to c. \n"
  ""
  "   -A    Lists used axioms.\n"
  "   -B    Lists used axioms in the best interpretation.\n"
  "   -S    Lists substituted variables.\n"
  " learn (EXPERIMENTAL):\n"
  "   -C    Sets a margin parameter (default = 1.0).\n"
  "   -N    Iterates a learning procedure for a specified number of times (default = 10).\n"  
  "   -E    Sets a termination criterion (default = 10e-05).\n"
  "   -b    Specifies a background knowledge for learning.\n"
  "";

/* Basic double. */
class random_double_t {
 private:
  double m_value;
 public:
  inline random_double_t() { m_value = (rand() % 10000) / 10000.0; }
  inline operator double () const { return m_value; }
  inline double operator += ( double other ) { m_value += other; }
  inline double operator = ( double other ) { m_value = other; }
};

/* Data types. */
typedef int                                 store_item_t;
typedef int                                 pg_hypernode_t;
typedef unordered_map<int, string>          command_option_t;
typedef unordered_map<int, vector<pg_hypernode_t> > pg_edge_set_t;
typedef unordered_map<int, random_double_t> weight_vector_t;
typedef unordered_map<int, double>          sparse_vector_t;

enum logical_operator_t { UnderspecifiedOperator, Literal, AndOperator, OrOperator, ImplicationOperator };
enum sampling_method_t { Random, Uniform };
enum sexp_stack_type_t { ListStack, StringStack, TupleStack };
enum inference_method_t { TreeSearch, LocalSearch, RoundLP,  };
enum objective_function_t { Cost, LossAugmented, LabelGiven };
enum learn_method_t { OnlinePassiveAggressive };
enum pg_node_type_t { UnderspecifiedNode, LogicalNetworkNode, ObservableNode, HypothesisNode };
enum lp_constraint_operator_t { UnderspecifiedCopr, Equal, LessEqual, GreaterEqual, Range };
enum feature_function_t { NodeFeature, EdgeFeature };

/* */
template <class T> string toString( const T &s_begin, const T &s_end, const char *fmt ) {
  string exp;
  for( T i=s_begin; s_end!=i; ++i ) {
    char buffer[1024]; sprintf( buffer, fmt, *i );
    exp += buffer;
  }
  return exp;
}

template <class T> string toString( T x, const string &format ) { char buffer[1024]; sprintf(buffer, format.c_str(), x); return string(buffer); }

/* Definition of data structures. */
struct store_t {
  unordered_map<string, store_item_t> s2s;
  vector<string>                      items;

  inline store_item_t cashier( const string &s ) {
    unordered_map<string, store_item_t>::iterator iter_i = s2s.find( s );
    if( s2s.end() != iter_i ) return iter_i->second;
    items.push_back( s );
    s2s[s] = items.size()-1;
    return items.size()-1;
  }

  inline string toString( const unordered_set<store_item_t> &var_set ) const {
    string exp;
    for( unordered_set<store_item_t>::const_iterator iter_v=var_set.begin(); var_set.end()!=iter_v; ++iter_v )
      exp += (iter_v == var_set.begin() ? "" : ", ") + claim( *iter_v );
    return exp;
  }

  inline string claim( store_item_t i ) const { return 0 <= i && i < items.size() ? items[ i ] : ""; }
  inline bool isConstant( store_item_t i ) const { char c = items[i][0]; return ('0' <= c && c <= '9') || ('A' <= c && c <= 'Z'); }
  inline bool isUnknown( store_item_t i ) const { return 'u' == items[i][0]; };
  inline bool isEqual( store_item_t i, const string &val ) { return val == items[ i ]; }
  
} extern g_store;

struct sexp_stack_t {

  sexp_stack_type_t    type;
  deque<sexp_stack_t*> children;
  string               str;
  
  sexp_stack_t() { type = ListStack; }
  sexp_stack_t( sexp_stack_type_t t ) { type = t; }
  sexp_stack_t( sexp_stack_type_t t, const string& e, list<sexp_stack_t> &stack_list ) {
    type = t;
    if( TupleStack == t ) {
      stack_list.push_back( sexp_stack_t( StringStack, e, stack_list ) );
      children.push_back( &(stack_list.back()) );
    } else str = e;
  }

  inline void _print( string *p_out_str ) const {
    switch( type ) {
    case StringStack: { (*p_out_str) += str; break; }
    case TupleStack: { for( int i=0; i<children.size(); i++ ) { children[i]->_print( p_out_str ); } break; }
    case ListStack: { (*p_out_str) += "("; for( int i=0; i<children.size(); i++ ) { children[i]->_print( p_out_str ); if( i < children.size()-1 ) (*p_out_str) += " "; } (*p_out_str) += ")"; break; }
    }
  }

  inline string toString() const { string exp; _print( &exp ); return exp; }

  inline int findFunctorArgument( string func_name ) const {
    for( int i=0; i<children.size(); i++ ) { if( children[i]->isFunctor( func_name ) ) return i; }
    return -1;
  }

  inline bool isFunctor( const string &func_name = "" ) const {
    if( 1 >= children.size() ) return false;
    if( 0 == children[0]->children.size() ) return false;
    return "" == func_name ? true : func_name == children[0]->children[0]->str;
  }

  inline string getString() const { return children[0]->str; }
  
};

class sexp_reader_t {
 private:
  istream              &m_stream;
  deque<sexp_stack_t*>  m_stack;
  sexp_stack_t          m_damn;
  list<sexp_stack_t>    m_stack_list;

  sexp_stack_t* new_stack( const sexp_stack_t &ss ) {
    m_stack_list.push_back(ss); return &(m_stack_list.back());
  }
  
 public:
  sexp_stack_t &stack;
  
  inline sexp_reader_t( istream &_stream ) : m_stream( _stream ), stack( m_damn ) { m_stack.push_back( new_stack( sexp_stack_t(ListStack) ) ); ++(*this); };
  sexp_reader_t& operator++();
  bool   isEnd() { return !m_stream.good(); }
};

struct literal_t {
  store_item_t         predicate;
  vector<store_item_t> terms;

  inline literal_t() {};
  inline literal_t( const literal_t& lit ) { predicate = lit.predicate; terms.insert( terms.begin(), lit.terms.begin(), lit.terms.end() ); };
  inline literal_t( const sexp_stack_t &s ) {
    if( s.isFunctor() ) {
      predicate = g_store.cashier( s.children[0]->children[0]->str );
      for( int i=1; i<s.children.size(); i++ )
        terms.push_back( g_store.cashier( s.children[i]->children[0]->str ) );
    } else
      predicate = g_store.cashier( s.children[0]->str );
  }

  inline literal_t( const string &_predicate, store_item_t term1, store_item_t term2 ) {
    predicate = g_store.cashier( _predicate );
    terms.push_back( term1 );
    terms.push_back( term2 );
  }
  
  inline literal_t( const string &_predicate, store_item_t term1, store_item_t term2, store_item_t term3 ) {
    predicate = g_store.cashier( _predicate );
    terms.push_back( term1 );
    terms.push_back( term2 );
    terms.push_back( term3 );
  }

  inline literal_t( const string &_predicate, const string &term1, const string &term2 ) {
    predicate = g_store.cashier( _predicate );
    terms.push_back( g_store.cashier( term1 ) );
    terms.push_back( g_store.cashier( term2 ) );
  }
  
  inline bool operator==(const literal_t &other) const {
    if( predicate != other.predicate ) return false;
    if( terms.size() != other.terms.size() ) return false;
    for( int i=0; i<terms.size(); i++ ) if( terms[i] != other.terms[i] ) return false;
    return true;
  }

  inline void _print( string *p_out_str ) const {
    (*p_out_str) += g_store.claim( predicate );
    for( int i=0; i<terms.size(); i++ ) {
      if( 0 == i ) (*p_out_str) += "(";
      (*p_out_str) += g_store.claim( terms[i] );
      if( i == terms.size()-1 ) (*p_out_str) += ")"; else (*p_out_str) += ",";
    }
  }

  inline string toString() const { string exp; _print( &exp ); return exp; }
  inline string toPredicateArity() const { char buffer[1024]; sprintf( buffer, "%s/%d", g_store.claim( predicate ).c_str(), (int)terms.size() ); return string( buffer ); }
  
};

struct unifier_t {
  vector<literal_t>                substitutions;
  unordered_map<store_item_t, int> shortcuts;

  inline unifier_t() {};
  
  inline unifier_t( store_item_t x, store_item_t y ) {
    add( x, y );
  }
  
  inline bool apply( literal_t *p_out_lit ) const {
    for( int i=0; i<p_out_lit->terms.size(); i++ ) {
      unordered_map<store_item_t, int>::const_iterator iter_sc = shortcuts.find( p_out_lit->terms[i] );
      
      if( shortcuts.end() == iter_sc ) continue;
      
      if( p_out_lit->terms[i] == substitutions[ iter_sc->second ].terms[0] )
        p_out_lit->terms[i] = substitutions[ iter_sc->second ].terms[1];
      
    }
  }

  inline bool isApplied( store_item_t x ) {
    return shortcuts.end() != shortcuts.find(x);
  }

  inline void add( store_item_t x, store_item_t y ) {
    if( shortcuts.end() != shortcuts.find(x) || shortcuts.end() != shortcuts.find(y) ) return;
    substitutions.push_back( literal_t( "/", x, y ) );
    shortcuts[x] = substitutions.size()-1;
  }

  inline void add( store_item_t x, const string &variable ) {
    store_item_t y = g_store.cashier( variable );
    add( x, y );
  }
  
  inline string toString() {
    string exp;
    for( int i=0; i<substitutions.size(); i++ ) {
      exp += g_store.claim(substitutions[i].terms[0]) + "/" + g_store.claim(substitutions[i].terms[1]);
      if( i < substitutions.size()-1 ) exp += ", ";
    }
    return "{" + exp + "}";
  }
};

struct logical_function_t {
  logical_operator_t         opr;
  literal_t                  lit;
  vector<logical_function_t> branches;

  inline logical_function_t() : opr( UnderspecifiedOperator ) {}
  inline logical_function_t( const sexp_stack_t &s ) : opr( UnderspecifiedOperator ) {
    if( s.isFunctor( ImplicationString ) ) {
      opr = ImplicationOperator;
      branches.push_back( logical_function_t( *s.children[1] ) ); branches.push_back( logical_function_t( *s.children[2] ) );
    } else if( s.isFunctor( AndString ) || s.isFunctor( OrString ) ) {
      opr = s.isFunctor( AndString ) ? AndOperator : OrOperator;
      for( int i=1; i<s.children.size(); i++ )
        branches.push_back( logical_function_t( *s.children[i] ) );
    } else { /* Assuming s is a literal. */
      opr = Literal;
      lit = literal_t( s );
    }
  }
  
  inline logical_function_t( logical_operator_t _opr, const vector<literal_t> &literals ) : opr( _opr ) {
    for( int i=0; i<literals.size(); i++ ) branches.push_back( logical_function_t( literals[i] ) );
  }

  inline logical_function_t( const literal_t& _lit ) : lit( _lit ), opr( Literal ) {};

  inline void _print( string *p_out_str ) const {
    switch( opr ) {
    case Literal: { (*p_out_str) += lit.toString(); break; }
    case ImplicationOperator: { branches[0]._print( p_out_str ); (*p_out_str) += " => "; branches[1]._print( p_out_str ); break; }
    case OrOperator:
    case AndOperator: {
      for( int i=0; i<branches.size(); i++ ) {
        if( Literal != branches[i].opr ) (*p_out_str) += "(";
        branches[i]._print( p_out_str );
        if( Literal != branches[i].opr ) (*p_out_str) += ")";
        if( i < branches.size()-1 ) (*p_out_str) += AndOperator == opr ? " " AndString " " : " " OrString " ";
      }
      break; }
    }
  }

  inline string toString() const { string exp; _print( &exp ); return exp; }

  inline void getAllLiterals( vector<const literal_t*> *p_out_list ) const {
    switch( opr ) {
    case Literal: { p_out_list->push_back( &lit ); break; }
    case ImplicationOperator: { branches[0].getAllLiterals( p_out_list ); branches[1].getAllLiterals( p_out_list ); break; }
    case OrOperator:
    case AndOperator: {
      for( int i=0; i<branches.size(); i++ ) branches[i].getAllLiterals( p_out_list );
      break; }
    }
  }

  inline bool includes( const literal_t& lit ) const {
    vector<const literal_t*> my_literals;
    getAllLiterals( &my_literals );
    for( int i=0; i<my_literals.size(); i++ )
      if( *my_literals[i] == lit ) return true;
    return false;
  }
  
};

typedef unordered_map<store_item_t, unordered_map<int, vector<string> > > precompiled_kb_t;

struct training_data_t {
  logical_function_t x, y;
  inline training_data_t( sexp_stack_t &_x, sexp_stack_t &_y ) : x( _x ), y( _y ) {};
};

typedef unordered_map<int, unordered_map<int, unifier_t> > pg_unifier_edges_t;
typedef unordered_map<store_item_t, unordered_map<int, vector<int> > > pg_node_map_t;

struct pg_node_t {
  literal_t             lit;
  pg_node_type_t        type;
  double                distance;
  int                   n, depth, obs_node;
  sparse_vector_t       fv;
  double                score;
  unordered_set<string> axiom_used;
  unordered_set<int>    nodes_appeared;
  
  inline pg_node_t( const literal_t &_lit, pg_node_type_t _type, int _n ) : n(_n), score(0.0), depth(0), distance(1.0), lit( _lit ), type( _type ) {};

  inline string toString() const {
    static char buffer[1024]; sprintf( buffer, "%s:%d:%d:%.2f", lit.toString().c_str(), n, type, score );
    return string( buffer );
  }
  
};

struct lp_variable_t {
  string name;
  double optimized;
  double obj_val;
  inline lp_variable_t( const string &n ) : name(n), obj_val(0), optimized(0) {};
};

struct lp_constraint_t {
  lp_constraint_operator_t opr;
  vector<int>              vars;
  vector<double>           coes;
  double                   lhs, rhs;
  bool                     is_active;

  inline lp_constraint_t() : is_active(true), opr(LessEqual), lhs(0), rhs(0) {}
  inline lp_constraint_t( lp_constraint_operator_t _opr, double val ) : is_active(true), opr(_opr), lhs(val), rhs(val) {}
  inline lp_constraint_t( lp_constraint_operator_t _opr, double coe, int var, double val ) : is_active(true), opr( _opr ), lhs( val ), rhs( val ) {
    vars.push_back( var ); coes.push_back( coe );
  }
  
  inline lp_constraint_t( lp_constraint_operator_t _opr, double coe1, int var1, double coe2, int var2 ) : is_active(true), opr( _opr ), lhs( 0 ), rhs( 0 ) {
    vars.push_back( var1 ); coes.push_back( coe1 );
    vars.push_back( var2 ); coes.push_back( -coe2 );
  }
  
  inline lp_constraint_t( lp_constraint_operator_t _opr, double n, double coe1, int var1, double coe2, int var2 ) : is_active(true), opr( _opr ), lhs( n ), rhs( n ) {
    vars.push_back( var1 ); coes.push_back( coe1 );
    vars.push_back( var2 ); coes.push_back( coe2 );
  }

  inline void push_back( int var, double coe ) {
    vars.push_back( var ); coes.push_back( coe );
  }

  inline void _print( string *p_out, const vector<lp_variable_t> &var_instances ) {
    
    for( int i=0; i<vars.size(); i++ ) {
      char buffer[1024]; sprintf( buffer, "%.2f * %s", coes[i], var_instances[ vars[i] ].name.c_str() );
      (*p_out) += buffer;
      if( i < vars.size()-1 ) (*p_out) += " + ";
    }
    
    switch( opr ) {
    case Equal: {
      char buffer[1024]; sprintf( buffer, " = %.2f", lhs );
      (*p_out) += buffer;
      break; }

    case LessEqual: {
      char buffer[1024]; sprintf( buffer, " <= %.2f", rhs );
      (*p_out) += buffer;
      break; }
      
    case GreaterEqual: {
      char buffer[1024]; sprintf( buffer, " >= %.2f", rhs );
      (*p_out) += buffer;
      break; }
      
    }
    
  }
};

struct linear_programming_problem_t {
  vector<lp_variable_t>   variables;
  vector<lp_constraint_t> constraints;
  double                  optimized_obj;
  
  inline int addVariable( const lp_variable_t &var ) { variables.push_back( var ); return variables.size()-1; }
  inline int addConstraint( const lp_constraint_t &con ) { if( 0 == con.vars.size() ) { return -1; } constraints.push_back( con ); return constraints.size()-1; }
  inline void deactivateConstraint( int con ) { if( -1 != con ) constraints[con].is_active = false; }
  inline void activateConstraint( int con ) { if( -1 != con ) constraints[con].is_active = true; }
  
  inline string toString() {
    string exp;
    exp += "<variables size=\"" + ::toString(variables.size(), "%d") + "\">";
    for( int i=0; i<variables.size(); i++ ) {
      char buffer[1024]; sprintf( buffer, "%s: %.2f", variables[i].name.c_str(), variables[i].obj_val );
      exp += buffer; exp += "\n";
    }
    exp += "</variables>\n<constraints size=\"" + ::toString(constraints.size(), "%d") + "\">";
    for( int i=0; i<constraints.size(); i++ ) { constraints[i]._print( &exp, variables ); exp += "\n"; }
    exp += "</constraints>";    
    return exp;
  }
};

typedef unordered_map<store_item_t, unordered_map<store_item_t, int> > substitution2v_t;

struct lp_problem_mapping_t {
  substitution2v_t sub2v;
  unordered_map<int, int> n2v;
  unordered_map<int, int> hn2v;
  unordered_map<int, int> n2lc;
  unordered_map<store_item_t, unordered_map<int, int> > vc2v;
  
  unordered_map<int, unordered_map<pg_hypernode_t, sparse_vector_t> > fv_edge;
};

struct proof_graph_t {
  vector<pg_node_t>  nodes;
  vector<pair<int, int> > mutual_exclusive_nodes;
  vector<vector<int> > hypernodes;
  pg_node_map_t      p2n;
  pg_edge_set_t      edges;
  pg_unifier_edges_t uedges;

  inline bool getNode( const vector<int> **p_out_nodes, store_item_t predicate, int arity ) const {

    pg_node_map_t::const_iterator iter_nm = p2n.find( predicate );
    if( p2n.end() == iter_nm ) return false;

    unordered_map<int, vector<int> >::const_iterator iter_an = iter_nm->second.find( arity );
    if( iter_nm->second.end() == iter_an ) return false;

    (*p_out_nodes) = &iter_an->second;
      
    return true;
    
  }

  inline bool getNode( vector<int> *p_out_nodes, const literal_t &lit ) const {

    const vector<int> *pa_list;
    if( !getNode( &pa_list, lit.predicate, lit.terms.size() ) ) return false;

    for( int i=0; i<pa_list->size(); i++ ) {
      if( nodes[ (*pa_list)[i] ].lit == lit ) p_out_nodes->push_back( (*pa_list)[i] );
    }

    if( 0 == p_out_nodes->size() ) return false;
    
    return true;
    
  }
  
  bool getNodeHerbrand( vector<int> *p_out_nodes, const literal_t &lit ) const;

  inline void addMutualExclusiveness( int n1, int n2 ) {
    mutual_exclusive_nodes.push_back( make_pair( n1, n2 ) );
  }
      
  inline int addNode( const literal_t &lit, pg_node_type_t type ) {

    nodes.push_back( pg_node_t( lit, type, nodes.size() ) );
    p2n[ lit.predicate ][ lit.terms.size() ].push_back( nodes.size()-1 );
    
    return nodes.size()-1;
    
  }

  inline int addHyperNode( vector<int> &v ) {
    hypernodes.push_back(v);
    return hypernodes.size()-1;
  }
  
  inline int addEdge( int v1, pg_hypernode_t hv2 ) {
    edges[v1].push_back(hv2);
  }

  void printGraph( const linear_programming_problem_t& lpp, const lp_problem_mapping_t &lprel ) const;
  
};

struct knowledge_base_t {
  Darts::DoubleArray          da;
  vector<string>              keys;
  vector<string>              axioms;
  unordered_set<store_item_t> constants;
};

typedef double (*sf_node_t)(const proof_graph_t &gp, int i );
typedef double (*sf_edge_t)(const proof_graph_t &gp, int i, const vector<int> &js );

struct score_function_t {

  weight_vector_t   weights;

  void featureFunction( sparse_vector_t *p_out_v, const proof_graph_t &gp, int i );
  void featureFunction( sparse_vector_t *p_out_v, const proof_graph_t &gp, int i, pg_hypernode_t j );

  double getScore( const sparse_vector_t &v_feature, bool f_ignore_weight = false ) {
    double s = 0;

    if( f_ignore_weight ) 
      for( sparse_vector_t::const_iterator iter_f = v_feature.begin(); v_feature.end() != iter_f; ++iter_f )
        s += iter_f->second;
    else
      for( sparse_vector_t::const_iterator iter_f = v_feature.begin(); v_feature.end() != iter_f; ++iter_f )
        s += weights[iter_f->first] * iter_f->second;
      
    return s;
  }
  
};

struct variable_cluster_t {

  typedef unordered_map<int, unordered_set<store_item_t> > cluster_t;
  typedef unordered_map<store_item_t, int> variable_mapper_t;
  
  cluster_t         clusters;
  variable_mapper_t map_v2c;

  inline void add( store_item_t t1, store_item_t t2 ) {
    
    variable_mapper_t::iterator iter_c1 = map_v2c.find( t1 ), iter_c2 = map_v2c.find( t2 );
    
    if( map_v2c.end() == iter_c1 && map_v2c.end() == iter_c2 ) {
      static int new_cluster = 0; new_cluster++;
      clusters[ new_cluster ].insert( t1 );
      clusters[ new_cluster ].insert( t2 );
      map_v2c[ t1 ] = new_cluster;
      map_v2c[ t2 ] = new_cluster;
    } else if( map_v2c.end() != iter_c1 && map_v2c.end() != iter_c2 ) {
      if( iter_c1->second != iter_c2->second ) {
        clusters[ iter_c1->second ].insert( clusters[ iter_c2->second ].begin(), clusters[ iter_c2->second ].end() );
        clusters[ iter_c2->second ].clear();
        map_v2c[ t1 ] = iter_c1->second;
        map_v2c[ t2 ] = iter_c1->second;
      }
    } else if( map_v2c.end() != iter_c1 && map_v2c.end() == iter_c2 ) {
      clusters[ iter_c1->second ].insert( t2 );
      map_v2c[ t2 ] = iter_c1->second;
    } else if( map_v2c.end() == iter_c1 && map_v2c.end() != iter_c2 ) {
      clusters[ iter_c2->second ].insert( t1 );
      map_v2c[ t1 ] = iter_c2->second;
    }
    
  }

  inline string dump() const {
    
    for( cluster_t::const_iterator iter_ec = clusters.begin(); clusters.end() != iter_ec; ++iter_ec ) {

      cerr << iter_ec->first << ": ";

      if( 0 == iter_ec->second.size() ) continue;
      
      for( unordered_set<store_item_t>::const_iterator iter_vars = iter_ec->second.begin(); iter_ec->second.end() != iter_vars; ++iter_vars )
        cerr << g_store.claim(*iter_vars) << ", ";

      cerr << endl;
      
    }
    
  }
  
};

struct lp_inference_cache_t {
  proof_graph_t                pg;
  linear_programming_problem_t lp;
  lp_problem_mapping_t         lprel;
  double                       loss;
  double                       elapsed_prepare, elapsed_ilp;
  variable_cluster_t           evc;
  int                          con_label_aug;
  unordered_map<int, double>   node_score;
  unordered_map<int, double>   node_score_plus;
  unordered_map<int, unordered_map<pg_hypernode_t, double> > edge_score;
  vector<int>                  node_label;
};

struct inference_configuration_t {
  logical_function_t    label;
  double                loss;
  score_function_t     *p_sfunc;
  double                timelimit, nbthreads;
  int                   depthlimit, max_variable_clusters;
  inference_method_t    method;
  objective_function_t  objfunc;
  bool                  use_cache, ignore_weight, proofgraph, ilp;
  
  inline inference_configuration_t( score_function_t &s ) :
    ilp(false), proofgraph(false), ignore_weight(false), use_cache(false),
    loss(1), p_sfunc( &s ),
    method(LocalSearch), objfunc(Cost), nbthreads(8) {};
};

struct learn_configuration_t {
  double                    C, E;
  int                       N;
  learn_method_t            method;
  inference_configuration_t ci;
  
  inline learn_configuration_t( score_function_t &s ) : ci(s), C(0.5), N(10), method(OnlinePassiveAggressive) {};
};

/* Algorithms. */
namespace algorithm {
  void infer( logical_function_t *p_out_best_h, sparse_vector_t *p_out_fv, lp_inference_cache_t *p_out_cache, inference_configuration_t& c, const logical_function_t &obs, const knowledge_base_t& kb );
  void learn( score_function_t *p_out_sfunc, const learn_configuration_t &c, const vector<training_data_t>& t, const knowledge_base_t& kb );
}

/* Functions. */
namespace function {

  void instantiateBackwardChainings( proof_graph_t *p_out_gp, variable_cluster_t *p_out_evc, int n_obs, const knowledge_base_t &kb, const inference_configuration_t &c );
  void enumeratePotentialElementalHypotheses( proof_graph_t *p_out_gp, variable_cluster_t *p_out_evc, const logical_function_t &obs, const knowledge_base_t &kb, const inference_configuration_t &c );
  void addToLogicalNetwork( proof_graph_t *p_out_n, const sexp_stack_t &s_exp_lf );
  
  void convertToLP( linear_programming_problem_t *p_out_lp, lp_problem_mapping_t *p_out_lprel, lp_inference_cache_t *p_out_cache, const knowledge_base_t &kb, const proof_graph_t &gp, const variable_cluster_t &evc, inference_configuration_t &c );
  void solveLP_BnB( linear_programming_problem_t *p_out_lp, const inference_configuration_t &c );
  void solveLP_LS( linear_programming_problem_t *p_out_lp, const inference_configuration_t &c );
  void roundUpLP( linear_programming_problem_t *p_out_lp );
  void convertLPToHypothesis( logical_function_t *p_out_h, sparse_vector_t *p_out_fv, const linear_programming_problem_t &lp, const lp_problem_mapping_t &lprel, const proof_graph_t &gp );
  
  void sample( vector<double> *p_out_array, const sampling_method_t m );

  bool compileKB( knowledge_base_t *p_out_kb, const precompiled_kb_t &pckb );
  bool writePrecompiledKB( precompiled_kb_t &pckb, const string &filename );
  bool readPrecompiledKB( knowledge_base_t *p_out_kb, const string &filename );
  void getParsedOption( command_option_t *p_out_opt, vector<string> *p_out_args, const string &acceptable, int argc, char **argv );

  inline bool isSexpSep( char c ) { return '(' == c || ')' == c || '"' == c || '\'' == c || ' ' == c || '\t' == c || '\n' == c || '\r' == c; };

  inline void enumerateConstatns( unordered_set<store_item_t> *p_out_cons, const logical_function_t &lf ) {
    vector<const literal_t*> literals;
    lf.getAllLiterals( &literals );
    p_out_cons->clear();
    for( int i=0; i<literals.size(); i++ )
      for( int j=0; j<literals[i]->terms.size(); j++ ) {
        if( g_store.isConstant( literals[i]->terms[j] ) ) p_out_cons->insert( literals[i]->terms[j] );
      }
  }

  inline void enumerateTerms( unordered_set<store_item_t> *p_out_cons, const logical_function_t &lf ) {
    vector<const literal_t*> literals;
    lf.getAllLiterals( &literals );
    for( int i=0; i<literals.size(); i++ )
      for( int j=0; j<literals[i]->terms.size(); j++ )
        p_out_cons->insert( literals[i]->terms[j] );
  }
  
  inline void addVector( sparse_vector_t *p_out, const sparse_vector_t &sv ) {
    for( sparse_vector_t::const_iterator iter_sv = sv.begin(); sv.end() != iter_sv; ++iter_sv )
      (*p_out)[ iter_sv->first ] += iter_sv->second;
  }
  
  inline void dumpVector( const sparse_vector_t &sv ) {
    for( sparse_vector_t::const_iterator iter_sv = sv.begin(); sv.end() != iter_sv; ++iter_sv )
      cerr << iter_sv->first << ":" << iter_sv->second << ", ";
    cerr << endl;
  }
  
  inline string toString( const sparse_vector_t &sv ) {
    string exp;
    for( sparse_vector_t::const_iterator iter_sv = sv.begin(); sv.end() != iter_sv; ++iter_sv ) {
      char buffer[ 1024 ]; sprintf( buffer, "%d:%.2f ", iter_sv->first, iter_sv->second );
      exp += buffer;
    }
    return exp;
  }

  inline void getVectorIndices( unordered_set<int> *p_out_indices, const sparse_vector_t &s ) {
    for( sparse_vector_t::const_iterator iter_f = s.begin(); s.end() != iter_f; ++iter_f )
      p_out_indices->insert( iter_f->first );
  }

  inline bool getMGU( unifier_t *p_out_u, const literal_t &p1, const literal_t &p2 ) {
    if( p1.predicate != p2.predicate ) return false;
    if( p1.terms.size() != p2.terms.size() ) return false;
    for( int i=0; i<p1.terms.size(); i++ ) {
      if( p1.terms[i] == p2.terms[i] ) { p_out_u->add( p1.terms[i], p2.terms[i] ); continue; }
      if( g_store.isConstant( p1.terms[i] ) && g_store.isConstant( p2.terms[i] ) ) return false;
      p_out_u->add( p1.terms[i], p2.terms[i] );
    }
    return true;
  }

  inline double getTimeofDaySec() {
    timeval tv;
    gettimeofday(&tv, NULL);
    return tv.tv_sec + (double)tv.tv_usec*1e-6;
  }
 
};

/* Variables. */
int extern g_new_variable_index;





















// void explainByUnification( proof_graph_t *p_out_gp, variable_cluster_t *p_out_evc, const inference_configuration_t &c );
