
#include "defs.h"

#include <time.h>
#include <stdlib.h>
#include <math.h>

#include <fstream>
#include <algorithm>

#include "darts.h"


using namespace function;

void function::enumeratePotentialElementalHypotheses( proof_graph_t *p_out_pg, variable_cluster_t *p_out_evc, const logical_function_t &obs, const knowledge_base_t &kb, const inference_configuration_t &c ) {

  vector<const literal_t*> literals;
  obs.getAllLiterals( &literals );

  for( int i=0; i<literals.size(); i++ ) {

    int n_obs = p_out_pg->addNode( *literals[i], ObservableNode );
    p_out_pg->nodes[ n_obs ].obs_node = n_obs;
      
    c.p_sfunc->featureFunction( &p_out_pg->nodes[ n_obs ].fv, *p_out_pg, n_obs );
    p_out_pg->nodes[ n_obs ].score = c.p_sfunc->getScore( p_out_pg->nodes[ n_obs ].fv, c.ignore_weight );
      
    instantiateBackwardChainings( p_out_pg, p_out_evc, n_obs, kb, c );
    
  }

}

void function::instantiateBackwardChainings( proof_graph_t *p_out_pg, variable_cluster_t *p_out_evc, int n_obs, const knowledge_base_t &kb, const inference_configuration_t &c ) {

  if( p_out_pg->nodes[ n_obs ].depth > c.depthlimit-1 ) return;
  
  int unifiable_axioms;
  kb.da.exactMatchSearch( p_out_pg->nodes[ n_obs ].lit.toPredicateArity().c_str(), unifiable_axioms );

  if( -1 == unifiable_axioms ) return;

  /* For each unifiable axiom, */
  char  buffer[ 1024 ]; strcpy( buffer, kb.axioms[ unifiable_axioms ].c_str() );
  char *p_axiom = strtok( buffer, "\t" );
  
  while( NULL != p_axiom ) {

    stringstream ss( p_axiom );
    
    for( sexp_reader_t sr(ss); !sr.isEnd(); ++sr ) {

      /* For each clause that has the literal n_obs in its right-hand side, */
      if( sr.stack.isFunctor( "=>" ) ) {
        
        logical_function_t lf( sr.stack );
        string             axiom_str = lf.toString();
        
        if( p_out_pg->nodes[ n_obs ].axiom_used.end() != p_out_pg->nodes[ n_obs ].axiom_used.find( axiom_str ) )
          continue; /* That's loopy axiom. */
      
        /* Produce substitution. */
        unifier_t theta;
        if( !getMGU( &theta, lf.branches[1].lit, p_out_pg->nodes[ n_obs ].lit ) ) continue;
        
        /* For each literal, */
        vector<literal_t> lhs_literals;

        if( Literal == lf.branches[0].opr )
          lhs_literals.push_back( lf.branches[0].lit );
        else {
          for( int j=0; j<lf.branches[0].branches.size(); j++ )
            lhs_literals.push_back( lf.branches[0].branches[j].lit );
        }

        /* Perform backward-chaining. */
        vector<int> backchained_literals;
        
        for( int j=0; j<lhs_literals.size(); j++ ) {

          literal_t &lit = lhs_literals[j];

          for( int k=0; k<lit.terms.size(); k++ ) {
            if( !theta.isApplied( lit.terms[k] ) ) {
              char buffer[1024]; sprintf( buffer, "u%d", g_new_variable_index++ );
              theta.add( lit.terms[k], buffer );
            }
          }
          
          theta.apply( &lit );

          int         n_backchained;
          vector<int> nodes_backchained;
          
          n_backchained = p_out_pg->addNode( lit, HypothesisNode );
        
          /* Set the node parameters. */
          p_out_pg->nodes[ n_backchained ].depth      = p_out_pg->nodes[ n_obs ].depth + 1;
          p_out_pg->nodes[ n_backchained ].distance   = p_out_pg->nodes[ n_obs ].distance * 1.2 / lhs_literals.size();
          p_out_pg->nodes[ n_backchained ].obs_node   = p_out_pg->nodes[ n_obs ].obs_node;
          p_out_pg->nodes[ n_backchained ].axiom_used = p_out_pg->nodes[ n_obs ].axiom_used;
          p_out_pg->nodes[ n_backchained ].axiom_used.insert( axiom_str );
          p_out_pg->nodes[ n_backchained ].nodes_appeared.insert( n_obs );
        
          c.p_sfunc->featureFunction( &p_out_pg->nodes[ n_backchained ].fv, *p_out_pg, n_backchained );
          p_out_pg->nodes[ n_backchained ].score    = c.p_sfunc->getScore( p_out_pg->nodes[ n_backchained ].fv, c.ignore_weight );
          
          /* Perform further backward-inference on the back-chained literal. */
          instantiateBackwardChainings( p_out_pg, p_out_evc, n_backchained, kb, c );
          backchained_literals.push_back( n_backchained );
        
        }

        if( backchained_literals.size() > 0 ) {
          pg_hypernode_t hn = p_out_pg->addHyperNode( backchained_literals );
          p_out_pg->addEdge( n_obs, hn );
        }
        
      }
    }
    
    p_axiom = strtok( NULL, "\t" );
    
  }
    
}

void function::convertToLP( linear_programming_problem_t *p_out_lp, lp_problem_mapping_t *p_out_lprel, lp_inference_cache_t *p_out_cache, const knowledge_base_t &kb, const proof_graph_t &pg, const variable_cluster_t &evc, inference_configuration_t& c ) {

  for( int i=0; i<pg.nodes.size(); i++ ) {
    int              v_h    = p_out_lp->addVariable( lp_variable_t( "h_" + pg.nodes[i].lit.toString() ) ),
                     v_w    = p_out_lp->addVariable( lp_variable_t( "w_" + pg.nodes[i].lit.toString() ) );
    
    double w_score = pg.nodes[i].score;

    lp_constraint_t con_nopay( LessEqual, 0.0 );
    con_nopay.push_back( v_w, 1.0 );
    con_nopay.push_back( v_h, -1.0 );
    p_out_lp->addConstraint( con_nopay );
    
    /* For gold labels, */
    if( c.label.includes( pg.nodes[i].lit ) ) {
      p_out_cache->node_label.push_back(i);

      if( LossAugmented == c.objfunc ) {
        throw;
        //h_score += c.loss;
      }
    }

    p_out_lp->variables[v_w].obj_val = w_score;
    p_out_cache->node_score[i]       = w_score;
    
    /* h. */
    if( ObservableNode == pg.nodes[i].type )
      p_out_lp->addConstraint( lp_constraint_t( Equal, 1.0, v_h, 1.0 ) );

    p_out_lprel->n2v[i] = v_h;
  }

  /* Create a label-augmented inference constraint. */
  lp_constraint_t con( GreaterEqual, 1.0 );

  for( int i=0; i<p_out_cache->node_label.size(); i++ ) {
    int n = p_out_cache->node_label[i];
    con.push_back( p_out_lprel->n2v[n], 1.0 );
  }
  
  p_out_cache->con_label_aug = p_out_lp->addConstraint( con );
  
  if( LabelGiven != c.objfunc )
    p_out_lp->deactivateConstraint( p_out_cache->con_label_aug );

  for( pg_edge_set_t::const_iterator iter_eg = pg.edges.begin(); pg.edges.end() != iter_eg; ++iter_eg ) {
    for( int i=0; i<iter_eg->second.size(); i++ ) {

      /* Assign an edge score. */
      sparse_vector_t &v_edge = p_out_lprel->fv_edge[ iter_eg->first ][ iter_eg->second[i] ];
      c.p_sfunc->featureFunction( &v_edge, pg, i, iter_eg->second[i] );
    
      double edge_score                                                      = c.p_sfunc->getScore( v_edge, c.ignore_weight );
      p_out_cache->edge_score[ iter_eg->first ][ iter_eg->second[i] ]        = edge_score;
      
       /* h_c1 ^ h_c2 ^ ... ^ h_c3. */
      if( pg.hypernodes[ iter_eg->second[i] ].size() > 1 ) {

        char            buffer[1024]; sprintf( buffer, "and_%d", iter_eg->second[i] );
        int             v_hn = p_out_lp->addVariable( lp_variable_t( string( buffer ) ) );
        lp_constraint_t con( LessEqual, 0.0 );

        con.push_back( v_hn, 1.0 * pg.hypernodes[ iter_eg->second[i] ].size() );
        
        for( int j=0;j<pg.hypernodes[ iter_eg->second[i] ].size(); j++ )
          con.push_back( p_out_lprel->n2v[ pg.hypernodes[ iter_eg->second[i] ][j] ], -1.0 );

        p_out_lprel->hn2v[ iter_eg->second[i] ] = v_hn;
        p_out_lp->addConstraint( con );
        
      } else
        p_out_lprel->hn2v[ iter_eg->second[i] ] = p_out_lprel->n2v[ pg.hypernodes[ iter_eg->second[i] ][0] ];

      /* Objective coefficient = */
      p_out_lp->variables[ p_out_lprel->hn2v[ iter_eg->second[i] ] ].obj_val += edge_score;
      
    }
  }

  /* For each pair of unification, */
  variable_cluster_t vc;
  unordered_map<int, vector<int> > hwu;
  int num_cluster = min( c.max_variable_clusters, (int)kb.constants.size() );
  
  for( pg_node_map_t::const_iterator iter_p2an=pg.p2n.begin(); pg.p2n.end()!=iter_p2an; ++iter_p2an ) {
    for( unordered_map<int, vector<int> >::const_iterator iter_pa2n=iter_p2an->second.begin(); iter_p2an->second.end()!=iter_pa2n; ++iter_pa2n ) {
      if( iter_pa2n->second.size() <= 1 ) continue;

      const vector<int> &nodes = iter_pa2n->second;
      
      for( int i=0; i<nodes.size(); i++ ) {
        for( int j=i+1; j<nodes.size(); j++ ) {

          unifier_t uni;
          if( !getMGU( &uni, pg.nodes[ nodes[i] ].lit, pg.nodes[ nodes[j] ].lit ) ) continue;

          if( pg.nodes[ nodes[i] ].nodes_appeared.end() != pg.nodes[ nodes[i] ].nodes_appeared.find( nodes[j] ) ||
              pg.nodes[ nodes[j] ].nodes_appeared.end() != pg.nodes[ nodes[j] ].nodes_appeared.find( nodes[i] ) )
            continue; /* Circular unification. */
            
          const literal_t &l_i = pg.nodes[ nodes[i] ].lit, &l_j = pg.nodes[ nodes[j] ].lit;
          int erased_node = nodes[i];

          if( p_out_cache->node_score[ nodes[i] ] < p_out_cache->node_score[ nodes[j] ] )
            erased_node = nodes[j];

          /* Variable of scoring for unification. */
          int v_u = p_out_lp->addVariable( lp_variable_t( "u_" + l_i.toString() + "~" + l_j.toString() ) );
          hwu[ erased_node ].push_back( v_u );
            
          /* We create ILP variable for each sub. */
          int             n_subs = 0;
          lp_constraint_t con_u1( LessEqual, 0 ), con_u2( LessEqual, 0 );
          lp_constraint_t con_u_sub_upper( LessEqual, 9999.0 ), con_u_sub_lower( LessEqual, 9999.0 );
          
          for( int k=0; k<uni.substitutions.size(); k++ ) {

            if( uni.substitutions[k].terms[0] == uni.substitutions[k].terms[1] ) continue;

            store_item_t t1 = uni.substitutions[k].terms[0], t2 = uni.substitutions[k].terms[1];
            n_subs++;
            
            /* The terms are clustered into the same equivalent cluster. */
            for( int c=0; c<num_cluster; c++ ) {
              int v_t1c = p_out_lprel->vc2v[t1][c], v_t2c = p_out_lprel->vc2v[t2][c];
              if( 0 == v_t1c ) { v_t1c = p_out_lprel->vc2v[t1][c] = p_out_lp->addVariable( lp_variable_t("c_"+g_store.claim(t1)+"?") ); p_out_lp->variables[ v_t1c ].obj_val = 0==c ? 0.0 : 0.1; }
              if( 0 == v_t2c ) { v_t2c = p_out_lprel->vc2v[t2][c] = p_out_lp->addVariable( lp_variable_t("c_"+g_store.claim(t2)+"?") ); p_out_lp->variables[ v_t2c ].obj_val = 0==c ? 0.0 : 0.1; }
              if( 0 == c ) continue;
              con_u_sub_upper.push_back( v_t1c, k*100.0 + (c+1) );
              con_u_sub_upper.push_back( v_t2c, -(k*100.0 + (c+1)) );
              con_u_sub_lower.push_back( v_t1c, -(k*100.0 + (c+1)) );
              con_u_sub_lower.push_back( v_t2c, k*100.0 + (c+1) );
              con_u2.push_back( v_t1c, -1.0 );
              con_u2.push_back( v_t2c, -1.0 );
            }

            vc.add( t1, t2 );
            
          }

          con_u1.push_back( v_u, 2.0 );
          con_u1.push_back( p_out_lprel->n2v[ nodes[i] ], -1.0 );
          con_u1.push_back( p_out_lprel->n2v[ nodes[j] ], -1.0 );

          con_u2.push_back( v_u, n_subs*2 );
          
          con_u_sub_upper.push_back( v_u, 9999.0 );
          con_u_sub_lower.push_back( v_u, 9999.0 );
          
          p_out_lp->addConstraint( con_u1 );
          p_out_lp->addConstraint( con_u2 );
          p_out_lp->addConstraint( con_u_sub_upper );
          p_out_lp->addConstraint( con_u_sub_lower );
          
        } }
      
    }
  }

  /* For each node, */
  for( int i=0; i<pg.nodes.size(); i++ ) {
    
    /* !w -> !h v h_e1 v h_e2 v ... v h_en v u_1 v u_2 v ... */
    /* h ^ !h_e1 ^ !h_e2 ^ ... ^ !h_en ^ !u_1 ^ !u_2 ^ ... -> w */
    pg_edge_set_t::const_iterator              iter_eg = pg.edges.find(i);
    unordered_map<int, vector<int> >::iterator iter_us = hwu.find(i);
    lp_constraint_t                            con( LessEqual, -1.0+1.0 );

    con.push_back( p_out_lprel->n2v[i]+1, -1.0 );
    con.push_back( p_out_lprel->n2v[i], 1.0 );
    
    if( pg.edges.end() != iter_eg )
      for( int j=0; j<iter_eg->second.size(); j++ )
        con.push_back( p_out_lprel->hn2v[ iter_eg->second[j] ], -1.0 );

    if( hwu.end() != iter_us )
      for( int j=0; j<iter_us->second.size(); j++ )
        con.push_back( iter_us->second[j], -1.0 );

    p_out_lp->addConstraint( con );
    
    /* h_e1 -> h */
    if( pg.edges.end() != iter_eg ) {
      for( int j=0; j<iter_eg->second.size(); j++ ) {
        lp_constraint_t con_exp( LessEqual, 1.0, p_out_lprel->hn2v[ iter_eg->second[j] ], 1.0, p_out_lprel->n2v[i] );
        p_out_lp->addConstraint( con_exp );
      }
    }
    
  }

  /* For each variable cluster. */
  unordered_map<int, lp_constraint_t> con_c;
  
  for( unordered_map<store_item_t, unordered_map<int, int> >::iterator iter_v2cv=p_out_lprel->vc2v.begin(); p_out_lprel->vc2v.end()!=iter_v2cv; ++iter_v2cv ) {
    lp_constraint_t con_m( Equal, 1.0 );
    
    for( unordered_map<int, int>::iterator iter_vc2v=iter_v2cv->second.begin(); iter_v2cv->second.end()!=iter_vc2v; ++iter_vc2v ) {
      if( g_store.isConstant(iter_v2cv->first) ) con_c[ iter_vc2v->first ].push_back( iter_vc2v->second, 1.0 );
      con_m.push_back( iter_vc2v->second, 1.0 );
    }

    p_out_lp->addConstraint( con_m );
  }

  for( unordered_map<int, lp_constraint_t>::iterator iter_c=con_c.begin(); con_c.end()!=iter_c; ++iter_c ) {
    if( 0 == iter_c->first ) continue;
    if( 1 == iter_c->second.vars.size() ) continue;
    iter_c->second.opr = Equal;
    iter_c->second.rhs = 1;
    iter_c->second.lhs = 1;
    p_out_lp->addConstraint( iter_c->second );
  }

  /* Potential equivalent cluster. */
  int current_cluster = 1;
  for( variable_cluster_t::cluster_t::iterator iter_c2v=vc.clusters.begin(); vc.clusters.end()!=iter_c2v; ++iter_c2v ) {
    
    int num_cluster_required = 0;

    for( unordered_set<store_item_t>::iterator iter_v=iter_c2v->second.begin(); iter_c2v->second.end()!=iter_v; ++iter_v )
      if( g_store.isConstant(*iter_v) ) num_cluster_required++;

    num_cluster_required = max( 1, num_cluster_required );
    
    for( unordered_set<store_item_t>::iterator iter_v=iter_c2v->second.begin(); iter_c2v->second.end()!=iter_v; ++iter_v ) {
      for( int i=1; i<num_cluster; i++ ) {
        if( i >= current_cluster && current_cluster+num_cluster_required > i ) continue;
        
        lp_constraint_t con( Equal, 0.0 );
        con.push_back( p_out_lprel->vc2v[ *iter_v ][ i ], 1.0 );
        p_out_lp->addConstraint( con );
        
      }
    }

    current_cluster += num_cluster_required;
    
  }
  
}

void function::convertLPToHypothesis( logical_function_t *p_out_h, sparse_vector_t *p_out_fv, const linear_programming_problem_t &lp, const lp_problem_mapping_t &lprel, const proof_graph_t &pg ) {

  p_out_h->opr = AndOperator;
  p_out_h->branches.clear();
  
  for( int i=0; i<pg.nodes.size(); i++ ) {
    
    /* Is it included in a hypothesis? */
    if( 1.0 == lp.variables[ lprel.n2v.find(i)->second ].optimized )
      p_out_h->branches.push_back( logical_function_t( pg.nodes[i].lit ) );

    /* Collecting the feature vector. */
    if( 1.0 == lp.variables[ lprel.n2v.find(i)->second ].optimized &&
        1.0 == lp.variables[ lprel.n2v.find(i)->second+1 ].optimized )
      if( NULL != p_out_fv ) addVector( p_out_fv, pg.nodes[i].fv );
    
  }

  for( substitution2v_t::const_iterator iter_t2tv=lprel.sub2v.begin(); lprel.sub2v.end()!=iter_t2tv; ++iter_t2tv ) {
    for( unordered_map<store_item_t, int>::const_iterator iter_tt2v=iter_t2tv->second.begin(); iter_t2tv->second.end()!=iter_tt2v; ++iter_tt2v ) {
      if( 1.0 == lp.variables[ iter_tt2v->second ].optimized )
        p_out_h->branches.push_back( logical_function_t( literal_t(PredicateSubstitution, iter_t2tv->first, iter_tt2v->first) ) );
    } }

  for( pg_edge_set_t::const_iterator iter_eg = pg.edges.begin(); pg.edges.end() != iter_eg; ++iter_eg ) {
    for( int i=0; i<iter_eg->second.size(); i++ ) {

      /* If the edge appears in the hypothesis, then add the feature vector. */
      if( 1.0 == lp.variables[ lprel.n2v.find( pg.hypernodes[ iter_eg->second[i] ][0] )->second ].optimized )
        if( NULL != p_out_fv ) addVector( p_out_fv, lprel.fv_edge.find( iter_eg->first )->second.find( iter_eg->second[i] )->second );
      
    }
  }
  
}

 void function::addToLogicalNetwork( proof_graph_t *p_out_n, const sexp_stack_t &s_exp_lf ) {
  
  logical_function_t axiom( s_exp_lf );
  
  /* Add literals in LHS to the proof graph. */
  vector<const literal_t*> lhs, rhs;

  axiom.branches[0].getAllLiterals( &lhs );
  axiom.branches[1].getAllLiterals( &rhs );

  vector<int> set_lhs;
      
  for( int j=0; j<lhs.size(); j++ )
    set_lhs.push_back( p_out_n->addNode( *lhs[j], LogicalNetworkNode ) );

  pg_hypernode_t hn_lhs = p_out_n->addHyperNode( set_lhs );
    
  for( int i=0; i<rhs.size(); i++ ) {

    /* Create an new node and edge! */
    p_out_n->addEdge( p_out_n->addNode( *rhs[i], LogicalNetworkNode ), hn_lhs );
    
  }
    
}

void function::sample( vector<double> *p_out_array, const sampling_method_t m ) {

  switch(m) {
    
  case Random: {
    srand( time(NULL) );
    for( int i=0; i<p_out_array->size(); i++ ) (*p_out_array)[i] = (rand() % 10000) / 10000.0;
    break; }
    
  case Uniform: {
    double val = 1.0 / p_out_array->size();
    for( int i=0; i<p_out_array->size(); i++ ) (*p_out_array)[i] = val;
    break; }
    
  }
    
}

bool function::compileKB( knowledge_base_t *p_out_kb, const precompiled_kb_t &pckb ) {

  unordered_map<string, string> pa2axioms;
  
  for( precompiled_kb_t::const_iterator iter_p = pckb.begin(); pckb.end() != iter_p; ++iter_p )
    for( unordered_map<int, vector<string> >::const_iterator iter_a = iter_p->second.begin(); iter_p->second.end() != iter_a; ++iter_a ) {
      char buffer[ 1024 ];
      sprintf( buffer, "%s/%d", g_store.claim( iter_p->first ).c_str(), iter_a->first );
      p_out_kb->keys.push_back( string(buffer) );

      for( int i=0; i<iter_a->second.size(); i++ )
        pa2axioms[ string(buffer) ] += (0 != i ? "\t" : "") + iter_a->second[i];
      
    }

  /* Sort it! */
  sort( p_out_kb->keys.begin(), p_out_kb->keys.end() );

  /* Prepare the key-index pairs for DARTS. */
  vector<const char*> da_keys;
  vector<int>         da_vals;
  
  for( int i=0; i<p_out_kb->keys.size(); i++ ) {
    da_keys.push_back( p_out_kb->keys[i].c_str() ); da_vals.push_back( i );
    p_out_kb->axioms.push_back( pa2axioms[ p_out_kb->keys[i] ] );
  }

  /* Write the hash table. */
  if( 0 != p_out_kb->da.build( da_keys.size(), &da_keys[0], 0, &da_vals[0] ) ) return false;

  return true;
  
}

bool function::writePrecompiledKB( precompiled_kb_t &pckb, const string &filename ) {

  knowledge_base_t kb;
  compileKB( &kb, pckb );

  /* Write all the axioms at once. */
  ofstream ofs( filename.c_str(), ios::binary );
  int      size = kb.axioms.size();
  ofs.write( (char*)&size, sizeof(int) );
  
  for( int i=0; i<kb.axioms.size(); i++ ) {
    size = kb.axioms[i].size();
    ofs.write( (char *)&size, sizeof(int) );
    ofs.write( (char *)kb.axioms[i].c_str(), size );
  }
  
  ofs.close();
  
  /* Write the hash table. */
  if( 0 != kb.da.save( filename.c_str(), "ab" ) ) return false;
  
  return true;
  
}

bool function::readPrecompiledKB( knowledge_base_t *p_out_kb, const string &filename ) {
  
  ifstream ifs_pckb( filename.c_str(), ios::binary );
  if( !ifs_pckb.is_open() ) return false;
  
  /* Read the header. */
  int num_axioms, size_header = 0;
  ifs_pckb.read( (char *)&num_axioms, sizeof(int) );
  size_header += sizeof(int);

  for( int i=0; i<num_axioms; i++ ) {
    int axiom_length;
    ifs_pckb.read( (char *)&axiom_length, sizeof(int) );

    char buffer[ 1024 ];
    ifs_pckb.read( (char *)buffer, axiom_length );
    p_out_kb->axioms.push_back( string(buffer) );

    size_header += sizeof(int) + axiom_length;
  }

  ifs_pckb.close();

  /* Read the hash table. */
  if( 0 != p_out_kb->da.open( filename.c_str(), "rb", size_header ) )
    return false;

  return true;

}

void function::getParsedOption( command_option_t *p_out_opt, vector<string> *p_out_args, const string &acceptable, int argc, char **argv ) {
  
  int              option;
  
  /* Hmm... let me see. */
  while( -1 != (option = getopt( argc, argv, acceptable.c_str() )) ) {
    if( NULL == optarg ) (*p_out_opt)[ option ] = "";
    else                 (*p_out_opt)[ option ] = optarg;
  }

  for( int i=optind; i<argc; i++ )
    p_out_args->push_back( argv[i] );

}

void proof_graph_t::printGraph( const linear_programming_problem_t &lpp, const lp_problem_mapping_t &lprel ) const {

  for( pg_edge_set_t::const_iterator iter_eg = edges.begin(); edges.end() != iter_eg; ++iter_eg ) {

    if( lpp.variables[ lprel.n2v.find( iter_eg->first )->second ].optimized < 1.0 ) continue;
    
    for( int i=0; i<iter_eg->second.size(); i++ ) {

      int n_active = 0;
      
      for( int j=0; j<hypernodes[ iter_eg->second[i] ].size(); j++ )
        if( lpp.variables[ lprel.n2v.find( hypernodes[ iter_eg->second[i] ][j] )->second ].optimized == 1.0 ) n_active++;

      if( hypernodes[ iter_eg->second[i] ].size() != n_active ) continue;
      
      cout << "<explanation axiom=\"\">";
      
      for( int j=0; j<hypernodes[ iter_eg->second[i] ].size(); j++ ) {
        cout << nodes[ hypernodes[ iter_eg->second[i] ][j] ].toString();
        if( j < hypernodes[ iter_eg->second[i] ].size()-1 ) cout << " ^ ";
      }

      cout << " => " << nodes[ iter_eg->first ].toString() << "</explanation>" << endl;
      
    }
        
  }
  
}

bool proof_graph_t::getNodeHerbrand( vector<int> *p_out_nodes, const literal_t &lit ) const {

  const vector<int> *pa_list;
  if( !getNode( &pa_list, lit.predicate, lit.terms.size() ) ) return false;

  for( int i=0; i<pa_list->size(); i++ ) {

    unifier_t uni;
    if( !function::getMGU( &uni, nodes[ (*pa_list)[i] ].lit, lit ) ) continue;

    bool f_noway = false;
        
    for( int j=0; j<uni.substitutions.size(); j++ ) {
      if( g_store.isUnknown( uni.substitutions[j].terms[0] ) && g_store.isUnknown( uni.substitutions[j].terms[1] )) continue;
      if( uni.substitutions[j].terms[0] == uni.substitutions[j].terms[1] ) continue;
      f_noway = true; break;
    }

    if( f_noway ) continue;
      
    p_out_nodes->push_back( (*pa_list)[i] );
        
  }

  if( 0 == p_out_nodes->size() ) return false;
    
  return true;
  
}


/* Thanks for https://gist.github.com/240957. */
sexp_reader_t &sexp_reader_t::operator++() {

  bool f_comment = false;

  while( m_stream.good() ) {

    char c = m_stream.get();
    
    if( ';' == c ) { f_comment = true; continue; }
    if( f_comment ) {
      if( '\n' == c ) f_comment = false;
      continue;
    }
    
    switch( m_stack.back()->type ) {
    
    case ListStack: {
      if( '(' == c ) { m_stack.push_back( new_stack( sexp_stack_t(ListStack) ) ); }
      else if( ')' == c ) {
        m_stack[ m_stack.size()-2 ]->children.push_back( m_stack.back() ); m_stack.pop_back();
        if( TupleStack == m_stack.back()->children[0]->type && "quote" == m_stack.back()->children[0]->children[0]->str ) {
          m_stack[ m_stack.size()-2 ]->children.push_back( m_stack.back() ); m_stack.pop_back();
        }
        stack = *m_stack.back()->children.back();
        return *this;
      } else if( '"' == c ) m_stack.push_back( new_stack( sexp_stack_t(StringStack) ) );
      else   if( '\'' == c ) m_stack.push_back( new_stack( sexp_stack_t(TupleStack, "quote", m_stack_list) ) );
      else if( isSexpSep(c) ) break;
      else m_stack.push_back( new_stack( sexp_stack_t(TupleStack, string(1, c), m_stack_list) ) );
      break; }
      
    case StringStack: {
      if( '"' == c ) {
        m_stack[ m_stack.size()-2 ]->children.push_back( m_stack.back() ); m_stack.pop_back();
        if( m_stack.back()->children[0]->type == TupleStack && m_stack.back()->children[0]->children[0]->str == "quote" ) {
          m_stack[ m_stack.size()-2 ]->children.push_back( m_stack.back() ); m_stack.pop_back();
        }
      } else if( '\\' == c ) m_stack.back()->str += m_stream.get();
      else m_stack.back()->str += c;
      break; }

    case TupleStack: {
      if( isSexpSep(c) ) {
        sexp_stack_t *p_atom = m_stack.back(); m_stack.pop_back();
        m_stack.back()->children.push_back(p_atom);
        if( TupleStack == m_stack.back()->children[0]->type && "quote" == m_stack.back()->children[0]->children[0]->str ) {
          m_stack[ m_stack.size()-2 ]->children.push_back( m_stack.back() ); m_stack.pop_back();
        }
        m_stream.unget();
      } else m_stack.back()->children[0]->str += c;
      break; }
    }
    
  }

  return *this;
  
}
































// void function::explainByUnification( proof_graph_t *p_out_pg, variable_cluster_t *p_out_evc, const inference_configuration_t &c ) {

//   /* Explanations by unification. */
//   vector<pair<pair<int, unifier_t>, int> > edges_new;

//   for( pg_node_map_t::iterator iter_pg = p_out_pg->p2n.begin(); p_out_pg->p2n.end() != iter_pg; ++iter_pg ) {
//     for( unordered_map<int, vector<int> >::iterator iter_pa2n = iter_pg->second.begin(); iter_pg->second.end() != iter_pa2n; ++iter_pa2n ) {

//       if( 1 >= iter_pa2n->second.size() ) continue;
      
//       /* Create explanation-links. */
//       for( int i=0; i<iter_pa2n->second.size(); i++ ) {
//         for( int j=0; j<iter_pa2n->second.size(); j++ ) {
//           int n_i = iter_pa2n->second[i], n_j = iter_pa2n->second[j];
          
//           if( i == j ) continue;
//           if( (p_out_pg->nodes[ n_i ].score == p_out_pg->nodes[ n_j ].score && i < j) ||
//                p_out_pg->nodes[ n_i ].score < p_out_pg->nodes[ n_j ].score ) continue;

//           /* What's your subs? */
//           unifier_t un;
          
//           if( getMGU( &un, p_out_pg->nodes[ n_j ].lit, p_out_pg->nodes[ n_i ].lit ) ) {

//             /* Then literal i can be explained when subst ^ literal j are hypothesized. */
//             edges_new.push_back( make_pair( make_pair( n_j, un ), n_i ) );

//             /* Add these variables to the cluster. */
//             for( int s=0; s<un.substitutions.size(); s++ ) {
//               if( un.substitutions[s].terms[0] == un.substitutions[s].terms[1] ) continue;
//               p_out_evc->add( un.substitutions[s].terms[0], un.substitutions[s].terms[1] );
//             }
            
//           }
          
//         }
//       }
      
//     }
//   }

//   /* Create new explainer-explainee relationships. */  
//   for( int i=0; i<edges_new.size(); i++ ) {

//     vector<vector<pair<int, int> > > pic;
        
//     for( int s=0; s<edges_new[i].first.second.substitutions.size(); s++ ) {

//       store_item_t t1 = edges_new[i].first.second.substitutions[s].terms[0], t2 = edges_new[i].first.second.substitutions[s].terms[1];

//       if( t1 == t2 ) continue;
      
//       /* Ensure t1 <= t2. */
//       if( t1 > t2 ) { store_item_t tt = t1; t1 = t2; t2 = tt; }
      
//       vector<pair<int, int> > pic_s;

//       /* C_{x,c} ^ C_{y,c} */
//       char buffer[ 1024 ]; sprintf( buffer, "%d", p_out_evc->map_v2c[t1] );
//       store_item_t i_potential_cluster = g_store.cashier( buffer );
//       unordered_set<store_item_t> &content = p_out_evc->clusters[ p_out_evc->map_v2c[t1] ];

//       /* Decide the number of clusters. */        
//       int num_potential_clusters = 0;

//       for( unordered_set<store_item_t>::iterator iter_v=content.begin(); content.end()!=iter_v; ++iter_v )
//         if( g_store.isConstant( *iter_v ) ) num_potential_clusters++;
      
//       for( int cl=0; cl<max(1, num_potential_clusters); cl++ ) {

//         char           buffer[ 1024 ]; sprintf( buffer, "%d", cl );
//         store_item_t   i_clno    = g_store.cashier( buffer );
//         literal_t      l_put[] = { literal_t( PredicateSubstitution, t1, i_potential_cluster, i_clno ), literal_t( PredicateSubstitution, t2, i_potential_cluster, i_clno ) };
//         pair<int, int> pic_s_cl;

//         for( int j=0; j<2; j++ ) {

//           vector<int> nodes_subst;
          
//           if( !p_out_pg->getNode( &nodes_subst, l_put[j] ) ) {
//             int sn = p_out_pg->addNode( l_put[j], HypothesisNode );
//             p_out_pg->nodes[ sn ].distance = 0.01;
        
//             c.p_sfunc->featureFunction( &p_out_pg->nodes[ sn ].fv, *p_out_pg, sn );
//             p_out_pg->nodes[ sn ].score    = c.p_sfunc->getScore( p_out_pg->nodes[ sn ].fv, c.ignore_weight );
        
//             (0 == j ? pic_s_cl.first : pic_s_cl.second) = sn;
//           } else
//             (0 == j ? pic_s_cl.first : pic_s_cl.second) = nodes_subst[0];
          
//         }

//         pic_s.push_back( pic_s_cl );

//       }

//       pic.push_back( pic_s );
      
//     }

//     vector<int> counter( pic.size(), 0 );

//     while( -1 != counter[0] ) {
      
//       vector<int> explainer;
    
//       explainer.push_back( edges_new[i].first.first );

//       for( int s=0; s<pic.size(); s++ ) {
//         explainer.push_back( pic[s][ counter[s] ].first );
//         explainer.push_back( pic[s][ counter[s] ].second );
//       }
      
//       pg_hypernode_t hn_subst_plus_j = p_out_pg->addHyperNode( explainer );
//       p_out_pg->addEdge( edges_new[i].second, hn_subst_plus_j );
      
//       /* Increment the last counter and propagate it to the previous one. */
//       counter[ counter.size()-1 ]++;

//       for( int cl=counter.size()-1; cl >= 0; cl-- ) {
//         if( counter[cl] == pic[cl].size() ) {
//           if( 0 == cl ) counter[cl] = -1;
//           else { counter[cl] = 0; counter[ cl-1 ]++; }
//         } else break;
//       }

//     }
    
//   }
  
// }

//   /* Create mutual exclusivity constraints between mutual exclusivity constraints. */
//   for( unordered_map<int, unordered_set<int> >::iterator iter_evc = pevc2nodes.begin(); pevc2nodes.end() != iter_evc; ++iter_evc ) {

//     lp_constraint_t con_m( LessEqual, 1.0 );
    
//     for( unordered_set<int>::iterator iter_var = iter_evc->second.begin(); iter_evc->second.end() != iter_var; ++iter_var )
//       con_m.push_back( p_out_lprel->n2v[ *iter_var ], 1.0 );

//     if( con_m.vars.size() > 1 )
//       p_out_lp->addConstraint( con_m );
    
//   }
  
//         cout << "<variable-equivalence>" << endl;

//         unordered_map<int, vector<store_item_t> > cluster;
        
//         for( int i=0; i<best_h.branches.size(); i++ ) {
//           if( g_store.isEqual( best_h.branches[i].lit.predicate, PredicateSubstitution ) )
//             cluster[ best_h.branches[i].lit.terms[1] * 1000 + best_h.branches[i].lit.terms[2] ].push_back( best_h.branches[i].lit.terms[0] );
//         }

//         for( unordered_map<int, vector<store_item_t> >::iterator iter_c = cluster.begin(); cluster.end() != iter_c; ++iter_c ) {
//           cout << "<cluster id=\"" << iter_c->first << "\">";
          
//           for( int i=0; i<iter_c->second.size(); i++ )
//             cout << g_store.claim( iter_c->second[i] ) << (i < iter_c->second.size()-1 ? " " : "");

//           cout << "</cluster>" << endl;
//         }
        
//         cout << "</variable-equivalence>" << endl;





//           /* Search for a unifiable literals. */
//           const vector<int> *unifiable_nodes;

//           if( p_out_pg->getNode( &unifiable_nodes, lhs_literals[j].predicate, lhs_literals[j].terms.size() ) ) {
      
//             for( int k=0; k<unifiable_nodes->size(); k++ )
//               for( int s=0; s<p_out_pg->nodes[ (*unifiable_nodes)[k] ].lit.terms.size(); s++ )
//                 p_out_evc->add( lit.terms[s], p_out_pg->nodes[ (*unifiable_nodes)[k] ].lit.terms[s] );
            
//           }
          
