//==============================================================================================================================================================
// Author(s): Roman Khmelichek, Wei Jiang
//
//==============================================================================================================================================================
#include "query_processor.h"
#include <boost/lexical_cast.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/algorithm/string.hpp>

#include <algorithm>
#include <arpa/inet.h>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <errno.h>
#include <iomanip>
#include <iostream>
#include <limits>
#include <map>
#include <netinet/in.h>
#include <netdb.h>
#include <signal.h>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <string.h>
#include <unistd.h>

#include "cache_manager.h"
#include "config_file_properties.h"
#include "configuration.h"
#include "external_index.h"
#include "globals.h"
#include "logger.h"
#include "meta_file_properties.h"
#include "timer.h"

/**************************************************************************************************************************************************************
 * QueryProcessor
 *
 **************************************************************************************************************************************************************/
LocalQueryProcessor::LocalQueryProcessor(const IndexFiles& input_index_files, const char* stop_words_list_filename, QueryAlgorithm query_algorithm, QueryMode query_mode,
                               ResultFormat result_format) :
  query_algorithm_(query_algorithm),
  query_mode_(query_mode),
  result_format_(result_format),
  max_num_results_(Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kMaxNumberResults))),
  silent_mode_(false),
  warm_up_mode_(false),
  use_positions_(Configuration::GetResultValue(Configuration::GetConfiguration().GetBooleanValue(config_properties::kUsePositions))),
  collection_average_doc_len_(0),
  collection_total_num_docs_(0),
  external_index_reader_(GetExternalIndexReader(query_algorithm_, input_index_files.external_index_filename().c_str())),
  cache_policy_(GetCacheManager(input_index_files.index_filename().c_str())),
  index_reader_(IndexReader::kRandomQuery,
                *cache_policy_,
                input_index_files.lexicon_filename().c_str(),
                input_index_files.document_map_basic_filename().c_str(),
                input_index_files.document_map_extended_filename().c_str(),
                input_index_files.meta_info_filename().c_str(),
                use_positions_,
                external_index_reader_),
  index_included_precomputed_score_(false),
  index_use_precomputed_score_(false),
  index_layered_(false),
  index_overlapping_layers_(false),
  index_num_layers_(1),
  total_querying_time_(0),
  total_num_queries_(0),
  num_early_terminated_queries_(0),
  num_single_term_queries_(0),

  not_enough_results_definitely_(0),
  not_enough_results_possibly_(0),
  num_queries_containing_single_layered_terms_(0),
  num_queries_kth_result_meeting_threshold_(0),
  num_queries_kth_result_not_meeting_threshold_(0),

  num_postings_scored_(0),
  num_postings_skipped_(0),
  universal_threshold_socre_of_posting_(0.0),
  queryTermPredictionModelValue_(-1){

  if (max_num_results_ <= 0) {
    Configuration::ErroneousValue(config_properties::kMaxNumberResults, Configuration::GetConfiguration().GetValue(config_properties::kMaxNumberResults));
  }

  if (stop_words_list_filename != NULL) {
    LoadStopWordsList(stop_words_list_filename);
  }

  LoadIndexProperties();
  PrintQueryingParameters();

  // Updated by Wei 2013/08/31 afternoon at school
  // LoadUpTheCombinationOfSecondANDThirdFactorProbabilityTable1D();

  // Updated by Wei 2014/01/05 night at school
  LoadUpTermPieceInfoForRelRank();

  // Updated by Wei 2013/12/10 afternoon at school
  LoadUpTheCombinationOfSecondANDThirdFactorProbabilityTable2D();

  // current version
  // Updated by Wei on 2013/08/12 night at school
  // Put this operation into the constructor and do this ONLY once
  // OLD version
  // for the pruning research project. Update 2012/08/29
  // Load the aux file for the pruning project. This aux file has the length of each query term in it.
  LoadUpAuxFileForPruningProject();

  // Updated by Wei 2013/08/06 afternoon
  // LoadUpAuxFilesForSecondProbabilityFactor();

  // Updated by Wei 2013/08/02 afternoon
  LoadUpAuxFilesForFirstProbabilityFactor();

  // Updated by Wei 2013/06/19, need to load the three maps as well
  // LoadUpThreeFeatureValuesForMachineLearnedTraining();

  queryTermPredictionModelValue_ = Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kQueryTermPredictionModelMethodValue));

  // Updated by Wei 2013/06/19, support some functions
  // LoadUpQueryTermsProbabilityDistribution_Advance();

  // Updated by Wei 2013/02/28 afternoon
  // load the map<string,vector<float> > term_thresholds_based_on_percentage_ for the pruning method TCP and TCP-QV(maybe);
  LoadQueryTermThresholdsKeptBasedOnPercentage();

  // Updated by Wei on 2013/03/01 afternoon
  // support function for the pruning method TCP-QV
  LoadQueryView();


  /*bool in_memory_index = IndexConfiguration::GetResultValue(Configuration::GetConfiguration().GetBooleanValue(config_properties::kMemoryResidentIndex), false);
  bool memory_mapped_index = IndexConfiguration::GetResultValue(Configuration::GetConfiguration().GetBooleanValue(config_properties::kMemoryMappedIndex), false);*/
  bool use_block_level_index = IndexConfiguration::GetResultValue(Configuration::GetConfiguration().GetBooleanValue(config_properties::kUseBlockLevelIndex), false);

  // TODO: Using an in-memory block index (for standard DAAT-AND) does not provide us any benefit. Most likely, the blocks should be smaller, or we should instead index the chunk last docIDs.
  //       Sequential block search performs better than binary block search in this case.
  //       This might be a better speed up for when the index is on disk and we are I/O bounded. Then we should also configure so we don't read ahead many blocks at a time.
  //       If the index is in main memory, the only improvement would be to avoid decoding the block header, and the overhead of that should be small.
  if (use_block_level_index) {
    cout << "Building in-memory block level index." << endl;
    BuildBlockLevelIndex();
  }
  /*if (memory_mapped_index || in_memory_index) {
    if (query_algorithm_ != kDaatOr && query_algorithm_ != kTaatOr) {
      cout << "Building in-memory block level index." << endl;
      BuildBlockLevelIndex();
    }
  }*/

  string batch_query_input = "";
  string batch_request_input = "";

  if (query_mode_ == kBatch or query_mode_ == kBatchBench){
	  // For the case where we run batch queries, input can either come from stdin or from a file.
	  // Reading input directly from a file is especially useful in case you can't redirect a file to stdin,
	  // like when using gdb to debug (or at least I can't figure out how to do redirection when using gdb).
	  batch_query_input = IndexConfiguration::GetResultValue(Configuration::GetConfiguration().GetStringValue(config_properties::kBatchQueryInputFile), false);
  }

  if (query_mode_ == kGetPostingRankInListBatch){
	  // For the case where we run batch requests, input can either come from stdin or from a file or from a program internally
	  // Reading input directly from a file is especially useful in case you can't redirect a file to stdin,
	  // like when using gdb to debug (or at least I can't figure out how to do redirection when using gdb).
	  batch_request_input = IndexConfiguration::GetResultValue(Configuration::GetConfiguration().GetStringValue(config_properties::kBatchRequestInputFile), false);
  }

  string query_view_query_input = "";
  if(query_mode_ == kQueryView){
	  query_view_query_input = IndexConfiguration::GetResultValue(Configuration::GetConfiguration().GetStringValue(config_properties::kQueryViewQueryInputFileName), false);
  }

  string flagString = "";

  switch (query_mode_) {
    case kInteractive:
    case kInteractiveSingle:
      silent_mode_ = false;
      AcceptQuery();
      break;
    case kQueryView:
    	RunQueryViewQueries(query_view_query_input);
    	break;
    case kSpecialBM25wsep:
    	cout << "value 1: output individual term BM25 score only list." << endl;
    	cout << "value 2: output only one file, 3 columns total, 1st column is the term, 2ed column is the docID, 3th column is the BM25 Score." << endl;
        cout << "Enter the Flag value:";
        getline(cin, flagString);
        boost::algorithm::trim(flagString);
    	ComputeBM25BasedOnCentainList(flagString);
      break;
    case kSpecialPurpose1:
    	outputAllDocIDWARCTRECIDPair();
      break;
    case kGetPostingRankInListInteractive:
    case kGetPostingRankInListInteractiveSingle:
    	silent_mode_ = false;
    	AcceptRequest();
    	break;
    case kGetPostingRankInListBatch:
    	RunBatchRequests(batch_request_input);
    	break;

    // In this mode, the query log will only be run once, and the output of each query will be printed to the screen.
    // If running in mode for TREC results, we will suppress some output, so as not to interfere with the 'trec_eval' program.
    case kBatch:
      if (result_format_ == kTrec)
        silent_mode_ = true;
      else
        silent_mode_ = false;
      RunBatchQueries(batch_query_input, false, 1);
      break;

    // In this mode, the query log will be run once without any timing to warm up the caches. Then, it will be run a second time to generate the timed run.
    // These runs will be silent, with only the querying statistics being displayed at the end of the final run.
    case kBatchBench:
      silent_mode_ = true;
      RunBatchQueries(batch_query_input, true, 1);
      break;

    default:
      assert(false);
      break;
  }

  double total_num_queries_issued = total_num_queries_;

  cout << "Number of queries executed: " << total_num_queries_ << endl;
  cout << "Number of single term queries: " << num_single_term_queries_ << endl;
  cout << "Total querying time: " << total_querying_time_ << " seconds\n";

  cout << "\n";
  cout << "Early Termination Statistics:\n";
  cout << "Number of early terminated queries: " << num_early_terminated_queries_ << endl;
  cout << "not_enough_results_definitely_: " << not_enough_results_definitely_ << endl;
  cout << "not_enough_results_possibly_: " << not_enough_results_possibly_ << endl;
  cout << "num_queries_containing_single_layered_terms_: " << num_queries_containing_single_layered_terms_ << endl;
  cout << "num_queries_kth_result_meeting_threshold_: " << num_queries_kth_result_meeting_threshold_ << endl;
  cout << "num_queries_kth_result_not_meeting_threshold_: " << num_queries_kth_result_not_meeting_threshold_ << endl;

  cout << "Average postings scored: " << (num_postings_scored_ / total_num_queries_issued) << endl;
  cout << "Average postings skipped: " << (num_postings_skipped_ / total_num_queries_issued) << endl;

  cout << "\n";
  cout << "Per Query Statistics:\n";
  cout << "  Average data read from cache: " << (index_reader_.total_cached_bytes_read() / total_num_queries_issued / (1 << 20)) << " MiB\n";
  cout << "  Average data read from disk: " << (index_reader_.total_disk_bytes_read() / total_num_queries_issued / (1 << 20)) << " MiB\n";
  cout << "  Average number of blocks skipped: " << (index_reader_.total_num_blocks_skipped() / total_num_queries_issued) << "\n";

  cout << "  Average query running time (latency): " << (total_querying_time_ / total_num_queries_issued * (1000)) << " ms\n";
}


LocalQueryProcessor::LocalQueryProcessor(const IndexFiles& input_index_files, const char* stop_words_list_filename, QueryAlgorithm query_algorithm, QueryMode query_mode,
                               ResultFormat result_format, string operation_mode) :
  query_algorithm_(query_algorithm),
  query_mode_(query_mode),
  result_format_(result_format),
  max_num_results_(Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kMaxNumberResults))),
  silent_mode_(false),
  warm_up_mode_(false),
  use_positions_(Configuration::GetResultValue(Configuration::GetConfiguration().GetBooleanValue(config_properties::kUsePositions))),
  collection_average_doc_len_(0),
  collection_total_num_docs_(0),
  external_index_reader_(GetExternalIndexReader(query_algorithm_, input_index_files.external_index_filename().c_str())),
  cache_policy_(GetCacheManager(input_index_files.index_filename().c_str())),
  index_reader_(IndexReader::kRandomQuery,
                *cache_policy_,
                input_index_files.lexicon_filename().c_str(),
                input_index_files.document_map_basic_filename().c_str(),
                input_index_files.document_map_extended_filename().c_str(),
                input_index_files.meta_info_filename().c_str(),
                use_positions_,
                external_index_reader_),
  index_included_precomputed_score_(false),
  index_use_precomputed_score_(false),
  index_layered_(false),
  index_overlapping_layers_(false),
  index_num_layers_(1),
  total_querying_time_(0),
  total_num_queries_(0),
  num_early_terminated_queries_(0),
  num_single_term_queries_(0),

  not_enough_results_definitely_(0),
  not_enough_results_possibly_(0),
  num_queries_containing_single_layered_terms_(0),
  num_queries_kth_result_meeting_threshold_(0),
  num_queries_kth_result_not_meeting_threshold_(0),

  num_postings_scored_(0),
  num_postings_skipped_(0),
  universal_threshold_socre_of_posting_(0.0){

  LoadIndexProperties();
  PrintQueryingParameters();
  //cout << "Query Processor Init Completed." << endl;
}

//Just for the lib purposes. May change over time.
void LocalQueryProcessor::GetOverallQueryProcessingStatistics(){
	  // Output some querying statistics.
	  double total_num_queries_issued = total_num_queries_;

	  cout << "Number of queries executed: " << total_num_queries_ << endl;
	  cout << "Number of single term queries: " << num_single_term_queries_ << endl;
	  cout << "Total querying time: " << total_querying_time_ << " seconds\n";

	  cout << "\n";
	  cout << "Early Termination Statistics:\n";
	  cout << "Number of early terminated queries: " << num_early_terminated_queries_ << endl;
	  cout << "not_enough_results_definitely_: " << not_enough_results_definitely_ << endl;
	  cout << "not_enough_results_possibly_: " << not_enough_results_possibly_ << endl;
	  cout << "num_queries_containing_single_layered_terms_: " << num_queries_containing_single_layered_terms_ << endl;
	  cout << "num_queries_kth_result_meeting_threshold_: " << num_queries_kth_result_meeting_threshold_ << endl;
	  cout << "num_queries_kth_result_not_meeting_threshold_: " << num_queries_kth_result_not_meeting_threshold_ << endl;

	  cout << "Average postings scored: " << (num_postings_scored_ / total_num_queries_issued) << endl;
	  cout << "Average postings skipped: " << (num_postings_skipped_ / total_num_queries_issued) << endl;

	  cout << "\n";
	  cout << "Per Query Statistics:\n";
	  cout << "  Average data read from cache: " << (index_reader_.total_cached_bytes_read() / total_num_queries_issued / (1 << 20)) << " MiB\n";
	  cout << "  Average data read from disk: " << (index_reader_.total_disk_bytes_read() / total_num_queries_issued / (1 << 20)) << " MiB\n";
	  cout << "  Average number of blocks skipped: " << (index_reader_.total_num_blocks_skipped() / total_num_queries_issued) << "\n";

	  cout << "  Average query running time (latency): " << (total_querying_time_ / total_num_queries_issued * (1000)) << " ms\n";
}


LocalQueryProcessor::~LocalQueryProcessor() {
  delete external_index_reader_;
  delete cache_policy_;
}

void LocalQueryProcessor::LoadUpQueryTermsProbabilityDistribution_Advance(){
	// clean up procedure for the 4 class variables:
	// (1) queryTermsTrueProbabilityDistributionMap_
	// (2) queryTerms1DProbabilityDistributionMap_
	// (3) queryTerms2DProbabilityDistributionMap_
	// (4) queryTermsGoodTuringProbabilityDistributionMap_
	if(queryTermsTrueProbabilityDistributionMap_.size() != 0 or queryTerms1DProbabilityDistributionMap_.size() != 0 or queryTerms2DProbabilityDistributionMap_.size() != 0 or queryTermsGoodTuringProbabilityDistributionMap_.size() != 0){
		queryTermsTrueProbabilityDistributionMap_.clear();
		queryTerms1DProbabilityDistributionMap_.clear();
		queryTerms2DProbabilityDistributionMap_.clear();
		queryTermsGoodTuringProbabilityDistributionMap_.clear();
	}

	string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kQueryTermProbablityDistributionFileNameAdvanced));
    // for debug ONLY
    // cout << "inputFileName:" << inputFileName << endl;
    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	if ( inputfile.good() ){
		// ignore the headline
		// the headline is: queryTerm goldStandardRealProbability 1D 2D goodTuring
		getline (inputfile,currentLine);
	}

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		      boost::algorithm::trim(currentLine);

			  istringstream iss( currentLine );
		      string term;
		      string trueProbability;
		      string oneDProbability;
		      string twoDProbability;
		      string goodTuringProbability;
			  iss >> term;
			  iss >> trueProbability;
			  iss >> oneDProbability;
			  iss >> twoDProbability;
			  iss >> goodTuringProbability;

			  // for debug ONLY
			  // *

			  // Let's try this first.
			  // If not OK, change to boost or sth.
			  queryTermsTrueProbabilityDistributionMap_[term] = atof(trueProbability.c_str());
			  queryTerms1DProbabilityDistributionMap_[term] = atof(oneDProbability.c_str());
			  queryTerms2DProbabilityDistributionMap_[term] = atof(twoDProbability.c_str());
			  queryTermsGoodTuringProbabilityDistributionMap_[term] = atof(goodTuringProbability.c_str());

			  // for debug ONLY
			  // *
		}
	}

	inputfile.close();

    if(queryTermsTrueProbabilityDistributionMap_.size() == 0 or queryTerms1DProbabilityDistributionMap_.size() == 0 or queryTerms2DProbabilityDistributionMap_.size() == 0 or queryTermsGoodTuringProbabilityDistributionMap_.size() == 0){
	    GetDefaultLogger().Log("Load query Terms True Probability Distribution Map NOT done --- Take Care", false);
	    GetDefaultLogger().Log("Load query Terms 1D Probability Distribution Map NOT done --- Take Care", false);
	    GetDefaultLogger().Log("Load query Terms 2D Probability Distribution Map NOT done --- Take Care", false);
	    GetDefaultLogger().Log("Load query Terms Good Turing Probability Distribution Map NOT done --- Take Care", false);
    }
    else{
    	//cout << "The length of the queryTerms is:" << queryTerms.size() << endl;
    	//Currently, nothing has been done for this logic.
	    GetDefaultLogger().Log(Stringify(queryTermsTrueProbabilityDistributionMap_.size()) + " <term, trueProbability> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(queryTerms1DProbabilityDistributionMap_.size()) + " <term, 1DProbability> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(queryTerms2DProbabilityDistributionMap_.size()) + " <term, 2DProbability> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(queryTermsGoodTuringProbabilityDistributionMap_.size()) + " <term, GoodTuringProbability> pairs have been loaded.", false);
    }
}

void LocalQueryProcessor::LoadQueryView(){
	string inputFileName = IndexConfiguration::GetResultValue(Configuration::GetConfiguration().GetStringValue(config_properties::kQueryViewQueryInputFileName), false);
	ifstream inputfile(inputFileName.c_str());
    // for debug ONLY
	// cout << "inputFileName:" << inputFileName << endl;

    string currentLine;

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		      string term;
		      string numOfLinesForThisTermInStringFormat;
		      int NUM_OF_LINES_FOR_THIS_TERM;
			  boost::algorithm::trim(currentLine);
			  istringstream iss( currentLine );
		      iss >> term;
			  iss >> numOfLinesForThisTermInStringFormat;

			  NUM_OF_LINES_FOR_THIS_TERM = atoi( numOfLinesForThisTermInStringFormat.c_str() );
			  for(int tempCounter = 0; tempCounter < NUM_OF_LINES_FOR_THIS_TERM; tempCounter++ ){
				  getline (inputfile,currentLine);
				  if(currentLine != ""){
					  string trecIDWithDocIDKey;
					  uint32_t docIDKey;
					  string numOfTimesThePostingsBegingTouchedInStringFormat;
					  // OLD version
					  // int numOfTimesThePostingsBegingTouched;
					  // NEW version
					  float totalScoreForPosting = 0.0;
					  float currentScorePartitionForPosting = 0.0;
					  float currentRankingPartitionForPosting = 0.0;
					  boost::algorithm::trim(currentLine);
					  istringstream iss( currentLine );

					  iss >> trecIDWithDocIDKey;
					  iss >> numOfTimesThePostingsBegingTouchedInStringFormat;
					  // for debug ONLY
					  // cout << "numOfTimesThePostingsBegingTouchedInStringFormat:" << numOfTimesThePostingsBegingTouchedInStringFormat << endl;
					  for(int i = 0; i < atoi( numOfTimesThePostingsBegingTouchedInStringFormat.c_str() ); i++ ){

						  iss >> currentRankingPartitionForPosting;
						  currentScorePartitionForPosting = 1 / currentRankingPartitionForPosting;
						  totalScoreForPosting += currentScorePartitionForPosting;

						  if (trecIDWithDocIDKey == "GX270-07-8113795_24989609" and term == "of"){
							  cout << "currentRankingPartitionForPosting: " << currentRankingPartitionForPosting << endl;
							  cout << "currentScorePartitionForPosting: " << currentScorePartitionForPosting << endl;
						  }
					  }
					  // trecIDWithDocIDKey split
					  vector<string> stringElements;
		    		  boost::algorithm::split(stringElements, trecIDWithDocIDKey, boost::algorithm::is_any_of("_") );
		    		  docIDKey = strtoul(stringElements[1].c_str(), NULL, 0);

					  queryView_[term][docIDKey] = totalScoreForPosting;
				  }
			  }


			  // term_thresholds_based_on_percentage_[term].push_back( atof(thresholdDot01KeptInStringFormat.c_str()) );

		}
	}

	// check point
	// for debug ONLY
	/*
	cout << "term_thresholds_based_on_percentage_['0197'][1]:" << term_thresholds_based_on_percentage_["0197"][1] << endl;
	cout << "term_thresholds_based_on_percentage_['virginia'][10]:" << term_thresholds_based_on_percentage_["virginia"][10] << endl;
	cout << "term_thresholds_based_on_percentage_['youth'][11]:" << term_thresholds_based_on_percentage_["youth"][11] << endl;
	*/

	inputfile.close();

    if(queryView_.size() == 0){
	    GetDefaultLogger().Log("Load Query View is NOT Done --- Take Care", false);
    }
    else{
    	// The logic of creating the smart query view has been deleted and ignored.
    	// Updated on 2014/01/26 night at school by Wei
	    // Comment this out cause I want to parse the output file the same as the previous result file
    	GetDefaultLogger().Log(Stringify(queryView_.size()) + " terms with their QV have been loaded.", false);
    }
}

void LocalQueryProcessor::LoadQueryTermThresholdsKeptBasedOnPercentage(){
	string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kQueryTermThresholdsKeptBasedOnPercentage));
	ifstream inputfile(inputFileName.c_str());

    // cout << "inputFileName:" << inputFileName << endl;
    string currentLine;

    // ignore the header line
    getline (inputfile,currentLine);

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		      string term;
		      string thresholdDot01KeptInStringFormat;
		      string thresholdDot05KeptInStringFormat;
		      string thresholdDot1KeptInStringFormat;
		      string thresholdDot2KeptInStringFormat;
		      string thresholdDot3KeptInStringFormat;
		      string thresholdDot4KeptInStringFormat;
		      string thresholdDot5KeptInStringFormat;
		      string thresholdDot6KeptInStringFormat;
		      string thresholdDot7KeptInStringFormat;
		      string thresholdDot8KeptInStringFormat;
		      string thresholdDot9KeptInStringFormat;
		      string threshold1DotKeptInStringFormat;

			  boost::algorithm::trim(currentLine);

			  istringstream iss( currentLine );

		      iss >> term;
			  iss >> thresholdDot01KeptInStringFormat;
			  iss >> thresholdDot05KeptInStringFormat;
			  iss >> thresholdDot1KeptInStringFormat;
			  iss >> thresholdDot2KeptInStringFormat;
			  iss >> thresholdDot3KeptInStringFormat;
			  iss >> thresholdDot4KeptInStringFormat;
			  iss >> thresholdDot5KeptInStringFormat;
			  iss >> thresholdDot6KeptInStringFormat;
			  iss >> thresholdDot7KeptInStringFormat;
			  iss >> thresholdDot8KeptInStringFormat;
			  iss >> thresholdDot9KeptInStringFormat;
			  iss >> threshold1DotKeptInStringFormat;

			  term_thresholds_based_on_percentage_[term].push_back( atof(thresholdDot01KeptInStringFormat.c_str()) );
			  term_thresholds_based_on_percentage_[term].push_back( atof(thresholdDot05KeptInStringFormat.c_str()) );
			  term_thresholds_based_on_percentage_[term].push_back( atof(thresholdDot1KeptInStringFormat.c_str()) );
			  term_thresholds_based_on_percentage_[term].push_back( atof(thresholdDot2KeptInStringFormat.c_str()) );
			  term_thresholds_based_on_percentage_[term].push_back( atof(thresholdDot3KeptInStringFormat.c_str()) );
			  term_thresholds_based_on_percentage_[term].push_back( atof(thresholdDot4KeptInStringFormat.c_str()) );
			  term_thresholds_based_on_percentage_[term].push_back( atof(thresholdDot5KeptInStringFormat.c_str()) );
			  term_thresholds_based_on_percentage_[term].push_back( atof(thresholdDot6KeptInStringFormat.c_str()) );
			  term_thresholds_based_on_percentage_[term].push_back( atof(thresholdDot7KeptInStringFormat.c_str()) );
			  term_thresholds_based_on_percentage_[term].push_back( atof(thresholdDot8KeptInStringFormat.c_str()) );
			  term_thresholds_based_on_percentage_[term].push_back( atof(thresholdDot9KeptInStringFormat.c_str()) );
			  term_thresholds_based_on_percentage_[term].push_back( atof(threshold1DotKeptInStringFormat.c_str()) );
		}
	}

	// check point
	// for debug ONLY
	/*
	cout << "term_thresholds_based_on_percentage_['0197'][1]:" << term_thresholds_based_on_percentage_["0197"][1] << endl;
	cout << "term_thresholds_based_on_percentage_['virginia'][10]:" << term_thresholds_based_on_percentage_["virginia"][10] << endl;
	cout << "term_thresholds_based_on_percentage_['youth'][11]:" << term_thresholds_based_on_percentage_["youth"][11] << endl;
	*/

	inputfile.close();

    if(term_thresholds_based_on_percentage_.size() == 0){
	    GetDefaultLogger().Log("Load Query Term Thresholds Kept Based On Percentage is NOT Done --- Take Care ", false);
    }
    else{
	    // Comment this out cause I want to parse the output file the same as the previous result file
    	GetDefaultLogger().Log(Stringify(term_thresholds_based_on_percentage_.size()) + " terms with their % cut have been loaded.", false);
    }
}

void LocalQueryProcessor::LoadStopWordsList(const char* stop_words_list_filename) {
  assert(stop_words_list_filename != NULL);

  std::ifstream ifs(stop_words_list_filename);
  if (!ifs) {
    GetErrorLogger().Log("Could not load stop word list file '" + string(stop_words_list_filename) + "'", true);
  }

  std::string stop_word;
  while (ifs >> stop_word) {
    stop_words_.insert(stop_word);
  }
}

// Create a block level index to speed up "random" accesses and skips.
// We iterate through the lexicon and decode all the block headers for the current inverted list.
// We then make a block level index by storing the last docID of each block for our current inverted list.
// Each inverted list layer will have it's own block level index (pointed to by the lexicon).
void LocalQueryProcessor::BuildBlockLevelIndex() {
  /*SetDebugFlag(false);*/

  index_reader_.set_block_skipping_enabled(true);

  // We make one long array for keeping all the block level indices.
  int num_per_term_blocks = IndexConfiguration::GetResultValue(index_reader_.meta_info().GetNumericalValue(meta_properties::kTotalNumPerTermBlocks), true);

  uint32_t* block_level_index = new uint32_t[num_per_term_blocks];
  int block_level_index_pos = 0;

  MoveToFrontHashTable<LexiconData>* lexicon = index_reader_.lexicon().lexicon();
  for (MoveToFrontHashTable<LexiconData>::Iterator it = lexicon->begin(); it != lexicon->end(); ++it) {
    LexiconData* curr_term_entry = *it;
    if (curr_term_entry != NULL) {
      int num_layers = curr_term_entry->num_layers();
      for (int i = 0; i < num_layers; ++i) {
        ListData* list_data = index_reader_.OpenList(*curr_term_entry, i, true);

        int num_chunks_left = curr_term_entry->layer_num_chunks(i);

        assert(block_level_index_pos < num_per_term_blocks);
        curr_term_entry->set_last_doc_ids_layer_ptr(block_level_index + block_level_index_pos, i);

        while (num_chunks_left > 0) {
          const BlockDecoder& block = list_data->curr_block_decoder();

          // We index only the last chunk in each block that's related to our current term.
          // So we always use the last chunk in a block, except the last block of this list, since that last chunk might belong to another list.
          int total_num_chunks = block.num_chunks();  // The total number of chunks in our current block.
          int chunk_num = block.starting_chunk() + num_chunks_left;
          int last_list_chunk_in_block = ((total_num_chunks > chunk_num) ? chunk_num : total_num_chunks);

          uint32_t last_block_doc_id = block.chunk_last_doc_id(last_list_chunk_in_block - 1);

          assert(block_level_index_pos < num_per_term_blocks);
          block_level_index[block_level_index_pos++] = last_block_doc_id;

          num_chunks_left -= block.num_actual_chunks();

          if (num_chunks_left > 0) {
            // We're moving on to process the next block. This block is of no use to us anymore.
            list_data->AdvanceBlock();
          }
        }

        index_reader_.CloseList(list_data);
      }
    }
  }

  // If everything is correct, these should be equal at the end.
  assert(num_per_term_blocks == block_level_index_pos);

  // Reset statistics about how much we read from disk/cache and how many lists we accessed.
  index_reader_.ResetStats();

  /*SetDebugFlag(true);*/
}

void LocalQueryProcessor::AcceptQuery() {
  while (true) {
    cout << "--->Search(2014Jan): ";
    string queryLine;
    getline(cin, queryLine);


    if (cin.eof())
      break;

    ExecuteQuery(queryLine, 0);

    // Wei Added:If the query_mode_ is: interactive-single, then break. If it is:interactive, continue.
    if (query_mode_ != kInteractive)
      break;
  }
}

void LocalQueryProcessor::AcceptRequest() {
  while (true) {
    cout << "--->GetPostingRankInList(2013Sep): ";
    string requestLine;
    getline(cin, requestLine);


    if (cin.eof())
      break;

    ExecuteRequest(requestLine);


    if (query_mode_ != kGetPostingRankInListInteractive)
      break;
  }
}


void LocalQueryProcessor::LoadUpBM25TermList(vector<string> &BM25TermList){

      string termListFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kTermForBM25ScoreList));
	  string currentLine = "";
	  ifstream inputfile(termListFileName.c_str());
	  vector<string> priorityListElements;
	  while ( inputfile.good() )
	  {
		  getline (inputfile,currentLine);
		  if(currentLine != ""){
		      boost::algorithm::trim(currentLine);
		      BM25TermList.push_back( currentLine );
		  }
	  }
	  inputfile.close();

	  if(BM25TermList.size() == 0){
		  GetDefaultLogger().Log("Load terms for BM25 scores is NOT Done", true);
	  }
	  else{
		  GetDefaultLogger().Log(Stringify(BM25TermList.size()) + " terms for BM25 scores have been loaded.", false);
	  }
}

void LocalQueryProcessor::outputAllDocIDWARCTRECIDPair(){
	string outputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kDocIDWARCTRECID1MPairFile));
	ofstream outputFileHandler(outputFileName.c_str());
	for(uint32_t i=0; i < collection_total_num_docs_; i++){
		cout << i << endl;
		outputFileHandler << i << " " << index_reader_.document_map().GetDocumentUrl(i) << endl;
	}
	outputFileHandler.close();
}

void LocalQueryProcessor::ComputeBM25BasedOnCentainList(string flagString){
	  vector<string> BM25TermList;
	  LoadUpBM25TermList(BM25TermList);
	  int counter = 0;
	  string termString = "";
	  map<string, float> bm25LookUpDict;

	  if(flagString == "1"){
		  string bm25ScoreOutputFilePath = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kIndividualTermBM25ScoreListOutputPath));
		  counter = 0;
		  for( unsigned int i = 0; i < BM25TermList.size(); i++ ) {
			  GetDefaultLogger().Log(Stringify(counter) + " processing term:" + BM25TermList[i], false);
			  string bm25ScoreOutputFileName = bm25ScoreOutputFilePath + BM25TermList[i] + ".txt";
			  ofstream bm25ScoreOutputFileHandler(bm25ScoreOutputFileName.c_str());
			  computeBM25ScoreGivenTerm(BM25TermList[i], 0, bm25ScoreOutputFileHandler, bm25LookUpDict, flagString);
			  counter ++;
			  bm25ScoreOutputFileHandler.close();
			  cout << endl;
		  }
		  GetDefaultLogger().Log(Stringify(counter) + " individual term BM25 score lists have been generated.", false);
	  }
	  else if(flagString == "2"){
		  cout << "Currently, this function is under construction." << endl;
		  /*
		  string bm25ScoreOutputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kQueryTermDocIDPairBM25ScoreOutputFile));
		  ofstream bm25ScoreOutputFileHandler(bm25ScoreOutputFileName.c_str());
		  counter = 0;
		  for( unsigned int i = 0; i < BM25TermList.size(); i++ ) {
			  GetDefaultLogger().Log(Stringify(counter) + " processing term:" + BM25TermList[i], false);
			  computeBM25ScoreGivenTerm(BM25TermList[i], 0, bm25ScoreOutputFileHandler, bm25LookUpDict, flagString);
			  counter ++;
			  cout << endl;
		  }
		  bm25ScoreOutputFileHandler.close();
		  GetDefaultLogger().Log(Stringify(counter) + " terms have been processed.", false);
		  */
	  }
	  else{
		  cout << "System do NOT the flag value. Exit()" << endl;
		  exit(1);
	  }
}

void LocalQueryProcessor::GetPostingRankInListGivenASpecifcPostingIdentifier(){

}

int LocalQueryProcessor::CombineBM25ScoreIntoExistingTermFeatureFile(){
	cout << "UNDER CONSUTRCTION. Updated by Wei on 2013/09/10 night at school" << endl;
	exit(1);
	return -1;
}

int LocalQueryProcessor::assignBM25ScoreToTermFeatureFile(map<string, double> &lookUpDictForBM25Score){
	  //Load the term_features file as well and append the score to each of the document.

	  string partialFeatureFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kTermFeaturePartialOutputFilePath));
	  string completeFeatureFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kTermFeatureCompleteOutputFilePath));

	  string oldLine = "";
	  string newLine = "";
      string mapKey = "";
	  double mapValue = 0.0;

	  int totalCounter = 0;
	  int notZeroCounter = 0;
	  int zeroCounter = 0;

	  ifstream inputfileForFeatures(partialFeatureFileName.c_str());
	  ofstream outputfileForFeatures(completeFeatureFileName.c_str());

	  vector<string> elementsOfFeatureLine;

	  while ( inputfileForFeatures.good() )
	  {
		  getline (inputfileForFeatures,oldLine);
		  if(oldLine != ""){
			  boost::algorithm::trim(oldLine);
			  boost::algorithm::split(elementsOfFeatureLine, oldLine, boost::algorithm::is_any_of(" ") );
		      mapKey = elementsOfFeatureLine[0] + "_" + elementsOfFeatureLine[1];
		      mapValue = lookUpDictForBM25Score[mapKey];
		      if(mapValue != 0){
		    	  notZeroCounter++;
		      }
		      else{
		    	  zeroCounter++;
		    	  cout << "mapKey:" << mapKey << " " << "mapValue:" <<mapValue << endl;
		      }
		      //cout << "mapKey:" << mapKey << endl;
		      //cout << "mapValue:" << mapValue << endl;
		      ostringstream mapValueStringStream;
		      mapValueStringStream << mapValue;
		      newLine = elementsOfFeatureLine[0] + " " + elementsOfFeatureLine[1] + " " + elementsOfFeatureLine[2] + " " + elementsOfFeatureLine[3] + " " + mapValueStringStream.str();
		      //cout << newLine << endl;
		      outputfileForFeatures << newLine << endl;
		      totalCounter++;
		  }
	  }
	  inputfileForFeatures.close();
	  outputfileForFeatures.close();

	  cout << "this is for testing:" << endl;
	  cout << "lookUpDictForBM25Score['1024px_0']:" << lookUpDictForBM25Score["1024px_0"] << endl;
	  cout << "totalCounter:" << totalCounter << endl;
	  cout << "notZeroCounter:" << notZeroCounter << endl;
	  cout << "zeroCounter:" << zeroCounter << endl;
	  cout << "Job Done." << endl;

	  return 0;
}

int LocalQueryProcessor::ComputeBM25ScoreForSpecifcTerm(LexiconData** query_term_data, int num_query_terms, Result* results, int* num_results, string term){
	  const int kMaxNumResults = *num_results;
	  ListData* list_data_pointers[num_query_terms];  // Using a variable length array here.

	  bool single_term_query = false;
	  if (num_query_terms == 1) {
	    if (!warm_up_mode_)
	      ++num_single_term_queries_;
	    single_term_query = true;
	  }

	  for (int i = 0; i < num_query_terms; ++i) {
	    // Here, we always open the last layer for a term. This way, we can support standard querying on layered indices, however, if loading the entire
	    // index into main memory, we'll also be loading list layers we'll never be using.
	    // TODO: This only applies to indices with overlapping layers; need to check that first.
	    //       Also need to override that the index is not layered, so that this function will be called.
	    list_data_pointers[i] = index_reader_.OpenList(*query_term_data[i], query_term_data[i]->num_layers() - 1, single_term_query);
	  }

	  int total_num_results;

	  // But since this is just one term computing BM25 score, it doesn't depend much. But this is a good point.
	  // Query terms must be arranged in order from shortest list to longest list.
	  sort(list_data_pointers, list_data_pointers + num_query_terms, ListCompare());
	  total_num_results = ComputeBM25Score(list_data_pointers, num_query_terms, results, kMaxNumResults, term);

	  *num_results = min(total_num_results, kMaxNumResults);
	  for (int i = 0; i < num_query_terms; ++i) {
	    index_reader_.CloseList(list_data_pointers[i]);
	  }
	  return total_num_results;
}

void LocalQueryProcessor::LoadUpAuxFileForPruningProject(){
		// cout << "--->[serverHiddenInfo]Load up the original length of inverted index for each query term." << endl;
		// Updated by Wei 2013/01/11.
	    // You don't need to load the whole lexicon, but just the query terms and will be fine
	    // load the whole lexicon will be slow, but ONLY load the query terms will be fast
        string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kAuxFileForPruning));
	    string currentLine = "";
	    ifstream inputfile(inputFileName.c_str());
		while ( inputfile.good() )
		{
			getline (inputfile,currentLine);
			if(currentLine != ""){
			      boost::algorithm::trim(currentLine);

				  istringstream iss( currentLine );
			      string term;
			      string length;
				  iss >> term;
				  iss >> length;
				  termWithTheLengthOfListDict_[term] = atoi(length.c_str());
			}
		}
		inputfile.close();

	    if(termWithTheLengthOfListDict_.size() == 0){
		    GetDefaultLogger().Log("Load Aux File For Pruning Project is NOT Done --- Take Care", false);
	    }
	    else{
		    GetDefaultLogger().Log(Stringify(termWithTheLengthOfListDict_.size()) + " terms with their lists length have been loaded.", false);
	    }
}

int LocalQueryProcessor::ProcessUpdateInvertedIndexesPreComputedScores(LexiconData** query_term_data, string query_term, uint32_t doc_id, float new_score) {
	  //cout << "ProcessModifyInvertedIndexesPreComputedScores(...) called" << endl;
	  ListData* list_data_pointers[1];  // Using a variable length array here.

	  bool single_term_query = true;



	  // Here, we always open the last layer for a term. This way, we can support standard querying on layered indices, however, if loading the entire
	  // index into main memory, we'll also be loading list layers we'll never be using.
	  // TODO: This only applies to indices with overlapping layers; need to check that first.
	  //       Also need to override that the index is not layered, so that this function will be called.

	  list_data_pointers[0] = index_reader_.OpenList(*query_term_data[0], query_term_data[0]->num_layers() - 1, single_term_query);


	  UpdateExternalPreComputedScores(query_term_data, list_data_pointers, query_term, doc_id, new_score);

	  index_reader_.CloseList(list_data_pointers[0]);

	  // return 0 means managed to update the external inverted index score.
	  return 0;
}

int LocalQueryProcessor::ProcessModifyInvertedIndexesPreComputedScores(LexiconData** query_term_data, int num_query_terms, string mode) {
	  cout << "ProcessModifyInvertedIndexesPreComputedScores(...) called" << endl;
	  ListData* list_data_pointers[num_query_terms];  // Using a variable length array here.

	  bool single_term_query = false;
	  if (num_query_terms == 1){
	    single_term_query = true;
	  }

	  //Wei special info storing operation for pruning project. Update 2012/07/26
	  //Load the aux file for the pruning project.

	  //map<string, int> lengthLookUpDict;
	  //LoadUpAuxFileForPruningProject(lengthLookUpDict);


	  //originalListLengthsVectorForPruningProjectForCurrentQueries_.clear();

	  for (int i = 0; i < num_query_terms; ++i) {
	    // Here, we always open the last layer for a term. This way, we can support standard querying on layered indices, however, if loading the entire
	    // index into main memory, we'll also be loading list layers we'll never be using.
	    // TODO: This only applies to indices with overlapping layers; need to check that first.
	    //       Also need to override that the index is not layered, so that this function will be called.

	    list_data_pointers[i] = index_reader_.OpenList(*query_term_data[i], query_term_data[i]->num_layers() - 1, single_term_query);

	    //wei special info storing operation for pruning project. Update 2012/07/26
	    string curr_term = string( query_term_data[i]->term(), query_term_data[i]->term_len() );

	    //originalListLengthsVectorForPruningProjectForCurrentQueries_.push_back(lengthLookUpDict[curr_term]);

	  }

	  /*
	  if(signed(originalListLengthsVectorForPruningProjectForCurrentQueries_.size()) != num_query_terms){
		  cout << "Vital error for the system" << endl;
		  exit(1);
	  }
	  */

	  int total_num_results;

	  // For AND semantics, all query terms must exist in the lexicon for query processing to proceed.
	  // For OR semantics, any of the query terms can be in the lexicon.
	  enum ProcessingSemantics {
	    kAnd, kOr, kUndefined
	  };

      // The query_term_data must be arranged in order from shortest list to longest list.
      sort(query_term_data, query_term_data + num_query_terms, ListCompare2());

      // The inverted list of the query terms must be arranged in order from shortest list to longest list.
      sort(list_data_pointers, list_data_pointers + num_query_terms, ListCompare());

      // Sort the auxInfoForInvertedIndex length from smallest to largest as well.
      // sort(originalListLengthsVectorForPruningProjectForCurrentQueries_.begin(),originalListLengthsVectorForPruningProjectForCurrentQueries_.end());


      // posting results in the array.
	  // the max is set to 200000
	  POSTING_RESULT posting_results[200000];  // Using a variable length array here.

	  switch (processing_semantics_) {
	    case kAnd:
	      cout << "in kAnd semantics." << endl;
	      //for testing using the old overall statistics(NOT changed from each pruned inverted index).
	      total_num_results = IntersectListsForModifyingPreComputedScores(query_term_data, list_data_pointers, num_query_terms, mode,posting_results);
	      break;
	    case kOr:
	      cout << "in kOr semantics." << endl;
	      //for testing using the old overall statistics(NOT changed from each pruned inverted index).
	      total_num_results = MergeListsForModifyingPreComputedScores(query_term_data, list_data_pointers, num_query_terms, mode,posting_results);
	      break;
	    default:
	      total_num_results = 0;
	      assert(false);
	      break;
	  }
	  for (int i = 0; i < num_query_terms; ++i) {
	    index_reader_.CloseList(list_data_pointers[i]);
	  }

	  //Wei added, 2012/08/10
	  posting_results_ptr_ = posting_results;
	  return total_num_results;
}

// Updated by Wei 2013/09/11 afternoon at school
int LocalQueryProcessor::GetPostingRankInList(LexiconData** term_data,string term,uint32_t docID){
	// for DEBUG
	// cout << "int LocalQueryProcessor::GetPostingRankInList(...) called." << endl;
	ListData* current_list_data_pointer;
	bool single_term_ONLY = true;
	int posting_rank_in_list = -1;
	float valueStoredInExternalIndex = 0.0;
	current_list_data_pointer = index_reader_.OpenList(*term_data[0], term_data[0]->num_layers() - 1, single_term_ONLY);
	current_list_data_pointer->NextGEQRomanRead(docID,valueStoredInExternalIndex,false);
	posting_rank_in_list = (int)valueStoredInExternalIndex;
	return posting_rank_in_list;
}


//The results set is different. Result_Wei_2012
int LocalQueryProcessor::ProcessQuery2(LexiconData** query_term_data, int num_query_terms, Result_Wei_2012* results, int* num_results) {
  // cout << "--->[serverHiddenInfo]LocalQueryProcessor::ProcessQuery2 called(...)" << endl;
  const int kMaxNumResults = *num_results;
  ListData* list_data_pointers[num_query_terms];  // Using a variable length array here.

  bool single_term_query = false;
  if (num_query_terms == 1) {
    if (!warm_up_mode_)
      ++num_single_term_queries_;
    single_term_query = true;
  }

  originalListLengthsVectorForPruningProjectForCurrentQueries_.clear();

  for (int i = 0; i < num_query_terms; ++i) {
    // Here, we always open the last layer for a term. This way, we can support standard querying on layered indices, however, if loading the entire
    // index into main memory, we'll also be loading list layers we'll never be using.
    // TODO: This only applies to indices with overlapping layers; need to check that first.
    //       Also need to override that the index is not layered, so that this function will be called.

    list_data_pointers[i] = index_reader_.OpenList(*query_term_data[i], query_term_data[i]->num_layers() - 1, single_term_query);

    //wei special info storing operation for pruning project. Update 2012/07/26
    string curr_term = string( query_term_data[i]->term(), query_term_data[i]->term_len() );

    originalListLengthsVectorForPruningProjectForCurrentQueries_.push_back(termWithTheLengthOfListDict_[curr_term]);

  }

  if(signed(originalListLengthsVectorForPruningProjectForCurrentQueries_.size()) != num_query_terms){
	  cout << "Vital error for the system" << endl;
	  exit(1);
  }

  int total_num_results;
  switch (query_algorithm_) {
    case kDaatAnd:
      // cout << "--->[serverHiddenInfo]kDaatAnd query alg applied." << endl;

	  // sorting operations.
      // The query_term_data must be arranged in order from shortest list to longest list.
      sort(query_term_data, query_term_data + num_query_terms, ListCompare2());

      // Query terms must be arranged in order from shortest list to longest list.
      sort(list_data_pointers, list_data_pointers + num_query_terms, ListCompare());

      // Sort the auxInfoForInvertedIndex length from smallest to largest as well.
      sort(originalListLengthsVectorForPruningProjectForCurrentQueries_.begin(),originalListLengthsVectorForPruningProjectForCurrentQueries_.end());

	  // after the sorting, show the queryTerm and the Index to the screen with the following sample format:
      // e.g. oil:0 industry:1 history:2 u:3 s:4
      queryTermPostionIndexPairs_.clear();
	  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
          string queryTermInStringFormat = "";
          string posting_result_output_string = "";
		  for(int tempCounter2 = 0; tempCounter2 < query_term_data[tempCounter]->term_len(); tempCounter2++){
			  queryTermInStringFormat += query_term_data[tempCounter]->term()[tempCounter2];
		  }
		  // debug purpose
		  // cout << "queryTerm:" << queryTermInStringFormat << " queryTermIndex:" << tempCounter << endl;

		  queryTermPostionIndexPair pair = make_pair(queryTermInStringFormat, tempCounter);

		  queryTermPostionIndexPairs_.push_back(pair);
	  }
	  // output that very important line for feature extraction
	  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
		  // output the following sample format
		  // oil:0 industry:1 history:2 u:3 s:4
		  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
	  }
	  cout << endl;

      // current version
      total_num_results = IntersectListsEspecaillyForPruningProject2(list_data_pointers, num_query_terms, results, kMaxNumResults);

      // OLD version
      //total_num_results =                          IntersectLists(list_data_pointers, num_query_terms, results, kMaxNumResults);

      break;
    case kDaatOr:
      // for debug ONLY
      // cout << "--->kDaatOr query alg applied." << endl;

      // The query_term_data must be arranged in order from shortest list to longest list.
      sort(query_term_data, query_term_data + num_query_terms, ListCompare2());

      // Query terms maybe arranged in order from shortest list to longest list.
      // Add by Wei Jiang, updated 2012/07/30
      sort(list_data_pointers, list_data_pointers + num_query_terms, ListCompare());

      // Sort the auxInfoForInvertedIndex length from smallest to largest as well.
      sort(originalListLengthsVectorForPruningProjectForCurrentQueries_.begin(),originalListLengthsVectorForPruningProjectForCurrentQueries_.end());

	  // after the sorting, show the queryTerm and the Index to the screen with the following sample format:
      // oil:0 industry:1 history:2 u:3 s:4
      queryTermPostionIndexPairs_.clear();
	  // after the sorting, show the queryTermIndex(ID) to the screen.
	  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
          string queryTermInStringFormat = "";
          string posting_result_output_string = "";
		  for(int tempCounter2 = 0; tempCounter2 < query_term_data[tempCounter]->term_len(); tempCounter2++){
			  queryTermInStringFormat += query_term_data[tempCounter]->term()[tempCounter2];
		  }

		  //for debug purposes.
		  //cout << "queryTerm:" << queryTermInStringFormat << " queryTermIndex:" << tempCounter << endl;

		  queryTermPostionIndexPair pair = make_pair(queryTermInStringFormat, tempCounter);
		  queryTermPostionIndexPairs_.push_back(pair);
	  }
	  // output that very important line for feature extraction
	  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
		  // output the following sample format
		  // oil:0 industry:1 history:2 u:3 s:4
		  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
	  }
	  cout << endl;

      total_num_results = MergeListsEspeciallyForPruningProject2(list_data_pointers, num_query_terms, results, kMaxNumResults);

      break;
    default:
      total_num_results = 0;
      assert(false);
  }

  *num_results = min(total_num_results, kMaxNumResults);
  for (int i = 0; i < num_query_terms; ++i) {
    index_reader_.CloseList(list_data_pointers[i]);
  }
  return total_num_results;
}

int LocalQueryProcessor::ProcessQuery(LexiconData** query_term_data, int num_query_terms, Result* results, int* num_results) {
  cout << "LocalQueryProcessor::ProcessQuery called(...)" << endl;
  const int kMaxNumResults = *num_results;
  ListData* list_data_pointers[num_query_terms];  // Using a variable length array here.



  bool single_term_query = false;
  if (num_query_terms == 1) {
    if (!warm_up_mode_)
      ++num_single_term_queries_;
    single_term_query = true;
  }


  //wei special info storing operation for pruning project. Update 2012/07/26
  //Load the aux file for the pruning project.

  originalListLengthsVectorForPruningProjectForCurrentQueries_.clear();

  for (int i = 0; i < num_query_terms; ++i) {
    // Here, we always open the last layer for a term. This way, we can support standard querying on layered indices, however, if loading the entire
    // index into main memory, we'll also be loading list layers we'll never be using.
    // TODO: This only applies to indices with overlapping layers; need to check that first.
    //       Also need to override that the index is not layered, so that this function will be called.

    list_data_pointers[i] = index_reader_.OpenList(*query_term_data[i], query_term_data[i]->num_layers() - 1, single_term_query);

    //wei special info storing operation for pruning project. Update 2012/07/26
    string curr_term = string( query_term_data[i]->term(), query_term_data[i]->term_len() );

    originalListLengthsVectorForPruningProjectForCurrentQueries_.push_back(termWithTheLengthOfListDict_[curr_term]);

  }

  if(signed(originalListLengthsVectorForPruningProjectForCurrentQueries_.size()) != num_query_terms){
	  cout << "Vital error for the system" << endl;
	  exit(1);
  }

  int total_num_results;
  switch (query_algorithm_) {
    case kDaatAnd:
      cout << "kDaatAnd query alg applied." << endl;

      // Query terms must be arranged in order from shortest list to longest list.
      sort(list_data_pointers, list_data_pointers + num_query_terms, ListCompare());

      // Sort the auxInfoForInvertedIndex length from smallest to largest as well.
      sort(originalListLengthsVectorForPruningProjectForCurrentQueries_.begin(),originalListLengthsVectorForPruningProjectForCurrentQueries_.end());

      //for testing using the old overall statistics.
      total_num_results = IntersectListsEspecaillyForPruningProject(list_data_pointers, num_query_terms, results, kMaxNumResults);

      //for testing using the original overall statistics.
      //total_num_results =                          IntersectLists(list_data_pointers, num_query_terms, results, kMaxNumResults);

      break;
    case kDaatOr:
      cout << "kDaatOr query alg applied." << endl;

      // Query terms maybe arranged in order from shortest list to longest list.
      // Add by Wei Jiang, updated 2012/07/30
      sort(list_data_pointers, list_data_pointers + num_query_terms, ListCompare());

      // Sort the auxInfoForInvertedIndex length from smallest to largest as well.
      sort(originalListLengthsVectorForPruningProjectForCurrentQueries_.begin(),originalListLengthsVectorForPruningProjectForCurrentQueries_.end());

      total_num_results = MergeLists(list_data_pointers, num_query_terms, results, kMaxNumResults, true);
      break;
    case kDaatAndTopPositions:
      // Query terms must be arranged in order from shortest list to longest list.
      sort(list_data_pointers, list_data_pointers + num_query_terms, ListCompare());

      total_num_results = IntersectListsTopPositions(list_data_pointers, num_query_terms, results, kMaxNumResults);
      break;
    default:
      total_num_results = 0;
      assert(false);
  }

  *num_results = min(total_num_results, kMaxNumResults);
  for (int i = 0; i < num_query_terms; ++i) {
    index_reader_.CloseList(list_data_pointers[i]);
  }
  return total_num_results;
}

// Used by the query processing methods that utilize list layers.
void LocalQueryProcessor::OpenListLayers(LexiconData** query_term_data, int num_query_terms, int max_layers, ListData* list_data_pointers[][MAX_LIST_LAYERS],
                                    bool* single_term_query, int* single_layer_list_idx, int* total_num_layers) {
#ifdef IRTK_DEBUG
  // Build the query string.
  string query;
  for (int i = 0; i < num_query_terms; ++i) {
    query += string(query_term_data[i]->term(), query_term_data[i]->term_len()) + string(" ");
  }
  cout << "Processing layered query: " << query << endl;
#endif

  *single_term_query = false;
  if (num_query_terms == 1) {
    if (!warm_up_mode_)
      ++num_single_term_queries_;
    *single_term_query = true;
  }

  *single_layer_list_idx = -1;
  *total_num_layers = 0;
  // Open up all the lists for processing (each layer of one list is considered a separate list for our purposes here).
  for (int i = 0; i < num_query_terms; ++i) {
    // Find the first list that's single layered (we'll be using this info to speed things up).
    if (query_term_data[i]->num_layers() == 1 && *single_layer_list_idx == -1) {
      *single_layer_list_idx = i;
    }

    for (int j = 0; j < max_layers; ++j) {
      // We might not always have all the layers.
      if (j < query_term_data[i]->num_layers()) {
        ++(*total_num_layers);
        list_data_pointers[i][j] = index_reader_.OpenList(*query_term_data[i], j, *single_term_query, i);

#ifdef IRTK_DEBUG
        cout << "Score threshold for list '" << string(query_term_data[i]->term(), query_term_data[i]->term_len()) << "', layer #" << j << " is: "
            << query_term_data[i]->layer_score_threshold(j) << ", num_docs: " << query_term_data[i]->layer_num_docs(j) << "\n";
#endif
      } else {
        // We set any remaining layers to NULL.
        list_data_pointers[i][j] = NULL;

        // TODO: We no longer do this. The only algorithm that depends on this behavior is 'ProcessLayeredQuery()'.
        // For any remaining layers we don't have, we just open up the last layer.
        /*list_data_pointers[i][j] = index_reader_.OpenList(*query_term_data[i], query_term_data[i]->num_layers() - 1, *single_term_query, i);*/
      }
    }

#ifdef IRTK_DEBUG
    cout << endl;
#endif
  }
}

void LocalQueryProcessor::CloseListLayers(int num_query_terms, int max_layers, ListData* list_data_pointers[][MAX_LIST_LAYERS]) {
  for (int i = 0; i < num_query_terms; ++i) {
    for (int j = 0; j < max_layers; ++j) {
      if (list_data_pointers[i][j] != NULL)
        index_reader_.CloseList(list_data_pointers[i][j]);
    }
  }
}

// A DAAT based approach to multi-layered, non-overlapping lists.
// This is an exhaustive algorithm. For optimization, use WAND or MaxScore.
int LocalQueryProcessor::ProcessMultiLayeredDaatOrQuery(LexiconData** query_term_data, int num_query_terms, Result* results, int* num_results) {
  const int kMaxLayers = MAX_LIST_LAYERS;  // Assume our lists can contain this many layers.
  const int kMaxNumResults = *num_results;

  ListData* list_data_pointers[num_query_terms][kMaxLayers];  // Using a variable length array here.
  bool single_term_query;
  int single_layer_list_idx;
  int total_num_layers;
  OpenListLayers(query_term_data, num_query_terms, kMaxLayers, list_data_pointers, &single_term_query, &single_layer_list_idx, &total_num_layers);

  ListData** lists = new ListData*[total_num_layers];
  int curr_layer = 0;
  for (int i = 0; i < num_query_terms; ++i) {
    for (int j = 0; j < query_term_data[i]->num_layers(); ++j) {
      lists[curr_layer] = list_data_pointers[i][j];
      ++curr_layer;
    }
  }

  int total_num_results = MergeLists(lists, total_num_layers, results, kMaxNumResults, false);

  // Clean up.
  for (int i = 0; i < num_query_terms; ++i) {
    for (int j = 0; j < kMaxLayers; ++j) {
      if (list_data_pointers[i][j] != NULL)
        index_reader_.CloseList(list_data_pointers[i][j]);
    }
  }

  *num_results = min(total_num_results, kMaxNumResults);

  delete[] lists;
  return total_num_results;
}

// A DAAT based approach to multi-layered, non-overlapping lists. This is using MaxScore to speed stuff up.
int LocalQueryProcessor::ProcessMultiLayeredDaatOrMaxScoreQuery(LexiconData** query_term_data, int num_query_terms, Result* results, int* num_results) {
  const int kMaxLayers = MAX_LIST_LAYERS;  // Assume our lists can contain this many layers.
  const int kMaxNumResults = *num_results;

  ListData* lists[num_query_terms][kMaxLayers];  // Using a variable length array here.
  bool single_term_query;
  int single_layer_list_idx;
  int total_num_layers;
  OpenListLayers(query_term_data, num_query_terms, kMaxLayers, lists, &single_term_query, &single_layer_list_idx, &total_num_layers);

  ListData* list_data_pointers[total_num_layers];  // Using a variable length array here.
  int curr_layer = 0;
  for (int i = 0; i < num_query_terms; ++i) {
    for (int j = 0; j < query_term_data[i]->num_layers(); ++j) {
      list_data_pointers[curr_layer] = lists[i][j];
      ++curr_layer;
    }
  }

  // For MaxScore to work correctly, need term upperbounds on the whole list.
  float list_thresholds[total_num_layers];  // Using a variable length array here.
  for (int i = 0; i < total_num_layers; ++i) {
    list_thresholds[i] = list_data_pointers[i]->score_threshold();
#ifdef IRTK_DEBUG
    cout << "Layer for Term Num: " << list_data_pointers[i]->term_num()
        << ", Layer Num: 0, Score Threshold: " << list_data_pointers[i]->score_threshold()
        << ", Num Docs: " << list_data_pointers[i]->num_docs()
        << ", Num Blocks: " << list_data_pointers[i]->num_blocks()
        << ", Num Chunks: " << list_data_pointers[i]->num_chunks() << endl;
#endif
  }

  int total_num_results = 0;

  float threshold = 0;

  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
  const float kBm25K1 = 2.0; // k1
  const float kBm25B = 0.75; // b

  // We can precompute a few of the BM25 values here.
  const float kBm25NumeratorMul = kBm25K1 + 1;
  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

  // BM25 components.
  float bm25_sum; // The BM25 sum for the current document we're processing in the intersection.
  int doc_len;
  uint32_t f_d_t;

  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
  float idf_t[total_num_layers]; // Using a variable length array here.
  int num_docs_t;
  for (int i = 0; i < total_num_layers; ++i) {
    num_docs_t = list_data_pointers[i]->num_docs_complete_list();
    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
  }

  // We use this to get the next lowest docID from all the lists.
  uint32_t lists_curr_postings[total_num_layers]; // Using a variable length array here.
  for (int i = 0; i < total_num_layers; ++i) {
    lists_curr_postings[i] = list_data_pointers[i]->NextGEQ(0);
  }

  pair<float, int> list_upperbounds[total_num_layers]; // Using a variable length array here.
  int num_lists_remaining = 0; // The number of lists with postings remaining.
  for (int i = 0; i < total_num_layers; ++i) {
    if (lists_curr_postings[i] != ListData::kNoMoreDocs) {
      list_upperbounds[num_lists_remaining++] = make_pair(list_thresholds[i], i);
    }
  }

  sort(list_upperbounds, list_upperbounds + num_lists_remaining, greater<pair<float, int> > ());

  // Precalculate the upperbounds for all possibilities.
  for (int i = num_lists_remaining - 2; i >= 0; --i) {
    list_upperbounds[i].first += list_upperbounds[i + 1].first;
  }

  int i, j;
  int curr_list_idx;
  pair<float, int>* top;
  uint32_t curr_doc_id; // Current docID we're processing the score for.

  while (num_lists_remaining) {
    top = &list_upperbounds[0];
    // Find the lowest docID that can still possibly make it into the top-k (while being able to make it into the top-k).
    for (i = 1; i < num_lists_remaining; ++i) {
      curr_list_idx = list_upperbounds[i].second;
      if (threshold > list_upperbounds[i].first) {
        break;
      }

      if (lists_curr_postings[curr_list_idx] < lists_curr_postings[top->second]) {
        top = &list_upperbounds[i];
      }
    }

    // Check if we can early terminate. This might happen only after we have finished traversing at least one list.
    // This is because our upperbounds don't decrease unless we are totally finished traversing one list.
    // Must check this since we initialize top to point to the first element in the list upperbounds array by default.
    if (threshold > list_upperbounds[0].first) {
      break;
    }

    // At this point, 'curr_doc_id' can either not be able to exceed the threshold score, or it can be the max possible docID sentinel value.
    curr_doc_id = lists_curr_postings[top->second];

    // We score a docID fully here, making any necessary lookups right away into other lists.
    // Disadvantage with this approach is that you'll be doing a NextGEQ() more than once for some lists on the same docID.
    bm25_sum = 0;
    for (i = 0; i < num_lists_remaining; ++i) {
      curr_list_idx = list_upperbounds[i].second;

      // Check if we can early terminate the scoring of this particular docID.
      if (threshold > bm25_sum + list_upperbounds[i].first) {
        break;
      }

      // Move to the curr docID we're scoring.
      lists_curr_postings[curr_list_idx] = list_data_pointers[curr_list_idx]->NextGEQ(curr_doc_id);

      if (lists_curr_postings[curr_list_idx] == curr_doc_id) {
        // Compute BM25 score from frequencies.
        f_d_t = list_data_pointers[curr_list_idx]->GetFreq();
        doc_len = index_reader_.document_map().GetDocumentLength(lists_curr_postings[curr_list_idx]);
        bm25_sum += idf_t[curr_list_idx] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);

        ++num_postings_scored_;

        // Can now move the list pointer further.
        lists_curr_postings[curr_list_idx] = list_data_pointers[curr_list_idx]->NextGEQ(lists_curr_postings[curr_list_idx] + 1);
      }

      if (lists_curr_postings[curr_list_idx] == ListData::kNoMoreDocs) {
        --num_lists_remaining;
        float curr_list_upperbound = list_thresholds[curr_list_idx];

        // Compact the list upperbounds array.
        for (j = i; j < num_lists_remaining; ++j) {
          list_upperbounds[j] = list_upperbounds[j + 1];
        }

        // Recalculate the list upperbounds. Note that we only need to recalculate those entries less than i.
        for (j = 0; j < i; ++j) {
          list_upperbounds[j].first -= curr_list_upperbound;
        }
        --i;
      }
    }

    // Need to keep track of the top-k documents.
    if (total_num_results < kMaxNumResults) {
      // We insert a document if we don't have k documents yet.
      results[total_num_results] = make_pair(bm25_sum, curr_doc_id);
      push_heap(results, results + total_num_results + 1, ResultCompare());
    } else {
      if (bm25_sum > results->first) {
        // We insert a document only if it's score is greater than the minimum scoring document in the heap.
        pop_heap(results, results + kMaxNumResults, ResultCompare());
        results[kMaxNumResults - 1].first = bm25_sum;
        results[kMaxNumResults - 1].second = curr_doc_id;
        push_heap(results, results + kMaxNumResults, ResultCompare());

        // Update the threshold.
        threshold = results->first;
      }
    }
    ++total_num_results;
  }

  // Sort top-k results in descending order by document score.
  sort(results, results + min(kMaxNumResults, total_num_results), ResultCompare());

  *num_results = min(total_num_results, kMaxNumResults);
  for (int i = 0; i < total_num_layers; ++i) {
    index_reader_.CloseList(list_data_pointers[i]);
  }

  return total_num_results;
}

// Implements approach described by Anh/Moffat with improvements by Strohman/Croft, but with standard BM25 scoring, instead of impacts.
// This technique is not score safe, but it is still rank safe.
int LocalQueryProcessor::ProcessLayeredTaatPrunedEarlyTerminatedQuery(LexiconData** query_term_data, int num_query_terms, Result* results, int* num_results) {
  const int kMaxLayers = MAX_LIST_LAYERS;  // Assume our lists can contain this many layers.
  const int kMaxNumResults = *num_results;

  ListData* list_data_pointers[num_query_terms][kMaxLayers];  // Using a variable length array here.
  bool single_term_query;
  int single_layer_list_idx;
  int total_num_layers;
  OpenListLayers(query_term_data, num_query_terms, kMaxLayers, list_data_pointers, &single_term_query, &single_layer_list_idx, &total_num_layers);

  // TODO: We can only support queries of a certain length (32 words). Can fix this by doing unoptimized processing for the shortest lists which do not
  //       fit within the 32 word limit. This is not an issue on our current query log.
  assert(num_query_terms <= static_cast<int>((sizeof(uint32_t) * 8)));

  ListData* max_score_sorted_list_data_pointers[total_num_layers];  // Using a variable length array here.
  uint32_t max_num_accumulators = 0;
  int curr_layer = 0;
  for (int i = 0; i < num_query_terms; ++i) {
    for (int j = 0; j < query_term_data[i]->num_layers(); ++j) {
      max_score_sorted_list_data_pointers[curr_layer] = list_data_pointers[i][j];
      ++curr_layer;

      max_num_accumulators += list_data_pointers[i][j]->num_docs();
    }
  }
  assert(curr_layer == total_num_layers);

  // This is a very crude upperbound on the maximum number of accumulators we might need (the lists will have docIDs in common). This uses more memory, but
  // avoids resizing the accumulator array if we find that it's too small.
  // Note: If the accumulator array is sized to contain all docs in the collection, we can just update accumulators by finding them by their docID as the index.
  //       This is only necessary for OR mode processing. After it's safe to move into AND mode, we can just compact the array to get better locality and thus
  //       better cache performance.
  max_num_accumulators = min(max_num_accumulators, collection_total_num_docs_);

  // Sort all the layers by their max score.
  sort(max_score_sorted_list_data_pointers, max_score_sorted_list_data_pointers + total_num_layers, ListLayerMaxScoreCompare());

  enum ProcessingMode {
    kAnd, kOr
  };
  ProcessingMode curr_processing_mode = kOr;

  int accumulators_size = max_num_accumulators;
  Accumulator* accumulators = new Accumulator[accumulators_size];
  float threshold = -numeric_limits<float>::max();  // We set the initial threshold to the lowest possible score. Note that our partial BM25 scores cannot be
                                                    // negative (although the standard BM25 formula does allow negative scores).
  float total_remainder = 0;  // The upperbound for the score of any new document encountered.
  int num_accumulators = 0;

  // Necessary to keep track of the threshold. This is a min-heap. For each layer we process, we need to keep track of the top-k scores to figure out the
  // threshold. The big problem we have is that we need to reinitialize the heap before starting to process the next layer. We do this to handle updated
  // accumulators; when an accumulator is updated (and it's score is already in the heap), if we simply add it to the
  // heap again, we'll be artificially increasing the threshold score, which could lead to incorrect results. So, we have to reinitialize the heap and insert
  // every accumulator (whether updated or not) to the heap again. This method takes O(n*log(k)) where k is the number of scores we keep in the heap --- since
  // k is a constant, it's really O(n), but the constant factor is something to keep in mind.
  // -----------------------------------------------------------------------------------------------------------------------------------------------------------
  // Another solution is to keep a hash table of docIDs that are currently in the top-k heap. Then, when we update the score of an accumulator that is in the
  // heap, we just check the hash table, which is a cheap operation. If we find that the accumulator is in the top-k, to update the score of this accumulator,
  // we do have to linearly search the heap until we find the matching docID; we then can do a bubble down operation on just this accumulator (since it's score
  // can only increase and this is a min-heap). If the accumulator is not in the top-k heap we can insert it if it's new score is greater than the min
  // accumulator. When an accumulator is removed from the top-k heap, it must also be marked deleted or removed from the hash table.
  // This scheme would be good when the majority of accumulators are not updated, that is, their score won't make it into the top-k. According to a sample
  // query, there are significantly less updates than old accumulator scores. This is the solution we use; it's benchmarked faster, more so for large values of
  // top-k.
  // -----------------------------------------------------------------------------------------------------------------------------------------------------------
  // Another solution is to use a select (quick-select) algorithm to find the k-th largest score from the accumulators. This can be done after finishing
  // processing a layer. This is a O(n) operation, where n is the number of accumulators. Note: after testing, this does not work too well in practice.
#ifdef HASH_HEAP_METHOD_OR
  pair<uint32_t, float> top_k[kMaxNumResults];  // Using a variable length array here.
#endif
  TopKTable top_k_table(kMaxNumResults);  // Indicates whether a docID is present in the top-k heap.
  int num_top_k = 0;

  float term_upperbounds[num_query_terms];  // Using a variable length array here.

  int total_num_accumulators_created = 0;
  for (int i = 0; i < total_num_layers; ++i) {
#ifdef IRTK_DEBUG
    cout << "Processing layer #" << i << ", with upperbound " << max_score_sorted_list_data_pointers[i]->score_threshold() << ", for term #"
        << max_score_sorted_list_data_pointers[i]->term_num() << endl;
#endif

#ifdef IRTK_DEBUG
    switch (curr_processing_mode) {
      case kOr:
        cout << "Using OR mode (total_remainder: " << total_remainder << ", threshold: " << threshold << ")." << endl;
        break;
      case kAnd:
        cout << "Using AND mode (total_remainder: " << total_remainder << ", threshold: " << threshold << ")." << endl;
        break;
      default:
        assert(false);
    }
#endif

    // Accumulators should always be in docID sorted order before we start processing a layer.
    for (int j = 0; j < (num_accumulators - 1); ++j) {
      assert(accumulators[j+1].doc_id >= accumulators[j].doc_id);
    }

    // Process postings based on the mode we're in.
    // Note: Look into using binary search on the accumulator array to find the docID we need.
    //       Binary search is a good option here since we always start with a sorted accumulator array.
    switch (curr_processing_mode) {
      case kOr:
#ifdef HASH_HEAP_METHOD_OR
        threshold = ProcessListLayerOr(max_score_sorted_list_data_pointers[i], &accumulators, &accumulators_size, &num_accumulators, top_k, num_top_k,
                                       top_k_table, kMaxNumResults, &total_num_accumulators_created);
#else
        threshold = ProcessListLayerOr(max_score_sorted_list_data_pointers[i], &accumulators, &accumulators_size, &num_accumulators, NULL, num_top_k,
                                       top_k_table, kMaxNumResults, &total_num_accumulators_created);
#endif
        break;
      case kAnd:
#ifdef HASH_HEAP_METHOD_OR
        threshold = ProcessListLayerAnd(max_score_sorted_list_data_pointers[i], accumulators, num_accumulators, top_k, num_top_k, top_k_table, kMaxNumResults);
#else
        threshold = ProcessListLayerAnd(max_score_sorted_list_data_pointers[i], accumulators, num_accumulators, NULL, num_top_k, top_k_table, kMaxNumResults);
#endif
        break;
      default:
        assert(false);
    }

    // Figure out the new upperbounds for each of the terms based on the layer max scores.
    for (int j = 0; j < num_query_terms; ++j) {
      term_upperbounds[j] = 0;
      // We start at 'i+1' since we just processed this layer, and all accumulator scores are updated from within the current layer.
      for (int k = i + 1; k < total_num_layers; ++k) {
        if (max_score_sorted_list_data_pointers[k]->term_num() == j) {
          term_upperbounds[j] = max_score_sorted_list_data_pointers[k]->score_threshold();
          break;
        }
      }
#ifdef IRTK_DEBUG
      cout << "Now, the upperbound for term #" << j << " is: " << term_upperbounds[j] << endl;
#endif
    }

    // Check accumulators to see whether we can switch to AND mode.
    // We calculate the remainder function here over all terms; this is the upperbound score of any newly discovered docID.
    total_remainder = 0;
    for (int j = 0; j < num_query_terms; ++j) {
      for (int k = (i + 1); k < total_num_layers; ++k) {
        if (max_score_sorted_list_data_pointers[k]->term_num() == j) {
          total_remainder += max_score_sorted_list_data_pointers[k]->score_threshold();
          break;
        }
      }
    }

    // Set processing mode to AND for the next layer if the conditions are right.
    if (curr_processing_mode == kOr && total_remainder < threshold) {
      curr_processing_mode = kAnd;
    }

    // A slight deviation from the published algorithm, we only prune the accumulators and check for the early termination conditions only if we're already in
    // AND mode; made on the observation that it's rare that we prune any accumulators before moving into AND mode processing. During benchmarking, this
    // produced lower latencies among a range of top-k.
    if (curr_processing_mode == kAnd) {
      bool early_termination_condition_one = true;  // No documents with current scores below the threshold can make it above the threshold.
      bool early_termination_condition_two = true;  // All documents with potential scores above the threshold cannot change their final order.

      // Here we calculate the upperbound for each accumulator, and remove those whose upperbound is lower than the threshold.
      // We also compact the accumulator table here too, by moving accumulators together.
      int num_invalidated_accumulators = 0;
      for (int j = 0; j < num_accumulators; ++j) {
        Accumulator& acc = accumulators[j];

        float acc_upperbound = acc.curr_score;
        for (int k = 0; k < num_query_terms; ++k) {
          if (((acc.term_bitmap >> k) & 1) == 0) {
            acc_upperbound += term_upperbounds[k];
          }
        }

        // Checks for the first of the early termination conditions.
        if (early_termination_condition_one && acc.curr_score < threshold && acc_upperbound > threshold) {
          early_termination_condition_one = false;
        }

        if (acc_upperbound < threshold) {
          // Remove accumulator.
          ++num_invalidated_accumulators;
        } else {
          // We move the accumulator left, to compact the array. Note that this does not affect any accumulators beyond this one.
          accumulators[j - num_invalidated_accumulators] = acc;
        }
      }
      num_accumulators -= num_invalidated_accumulators;

  #ifdef IRTK_DEBUG
      cout << "Num Invalidated Accumulators: " << num_invalidated_accumulators << endl;
      cout << "Num Accumulators Remaining: " << num_accumulators << endl;
  #endif

      // Note: A problem that prevents us from early termination is when the upperbounds on some accumulators are all the same because of some low
      // scoring layer and we can't guarantee rank safety because the current scores are too close.
      // A possible solution is to just make lookups for the remaining accumulators (say, we could do this when we narrowed down the list of top-k candidates to
      // just the k accumulators; but we don't know the exact ranks of these, so we can't terminate processing).
      // For each accumulator, just skip ahead into the lists for which we don't have a score yet.
      // For this purpose it might make sense to have an overlapping layer; this will avoid making lookups into multiple layers, but at the same time, it's hard
      // to choose at which point to make an overlapping layer, and it's also expensive in storage costs (especially if we're memory mapping the index).

      // Check the other early termination condition.
      if (early_termination_condition_one) {
        // Sort accumulators in ascending order by their scores.
        sort(accumulators, accumulators + num_accumulators, AccumulatorScoreAscendingCompare());

        for (int j = 0; j < num_accumulators - 1; ++j) {
          float acc_upperbound = 0;
          for (int k = 0; k < num_query_terms; ++k) {
            // Note that there could be accumulators that are missing a partial score from a particular term even if the score upperbound for that term is
            // already 0 (meaning we processed all the layers of that term list); this is normal, because during AND mode processing we skip docIDs that do not
            // intersect with any accumulators. This does not affect early termination.
            if (((accumulators[j].term_bitmap >> k) & 1) == 0) {
              acc_upperbound += term_upperbounds[k];
            }
          }

          if (accumulators[j].curr_score == accumulators[j+1].curr_score && acc_upperbound > 0) {
            early_termination_condition_two = false;
            break;
          }

          if (acc_upperbound > (accumulators[j+1].curr_score - accumulators[j].curr_score)) {
            early_termination_condition_two = false;
            break;
          }
        }

        // Need to sort accumulator array by docID again if we couldn't terminate.
        sort(accumulators, accumulators + num_accumulators);  // Uses the internal operator<() of the Accumulator class to sort.
      }

      // We can terminate further processing.
      if (early_termination_condition_one && early_termination_condition_two) {
  #ifdef IRTK_DEBUG
        if (i < (total_num_layers - 1)) {
          cout << "Terminating at layer " << (i + 1) << " out of " << total_num_layers << " total layers." << endl;
        }
  #endif
        break;
      }
    }
  }

  // Sort accumulators by score and return the top-k.
  sort(accumulators, accumulators + num_accumulators, AccumulatorScoreDescendingCompare());
  for (int i = 0; i < min(kMaxNumResults, num_accumulators); ++i) {
    results[i].first = accumulators[i].curr_score;
    results[i].second = accumulators[i].doc_id;
  }

  delete[] accumulators;

  // Clean up.
  for (int i = 0; i < num_query_terms; ++i) {
    for (int j = 0; j < kMaxLayers; ++j) {
      if (list_data_pointers[i][j] != NULL)
        index_reader_.CloseList(list_data_pointers[i][j]);
    }
  }

  *num_results = min(num_accumulators, kMaxNumResults);

  // We use the total number of accumulators created as the total number of results;
  // there are possibly more results, but we couldn't count them because they couldn't make it into the top-k.
  return total_num_accumulators_created;
}

float LocalQueryProcessor::ProcessListLayerOr(ListData* list, Accumulator** accumulators_array, int* accumulators_array_size, int* num_accumulators,
                                         pair<uint32_t, float>* top_k, int& num_top_k, TopKTable& top_k_table, int k, int* total_num_accumulators_created) {
  assert(list != NULL);
  assert(accumulators_array != NULL && *accumulators_array != NULL);
  assert(accumulators_array_size != NULL && *accumulators_array_size > 0);
  assert(*num_accumulators <= *accumulators_array_size);

#ifndef HASH_HEAP_METHOD_OR
  float top_k_scores[k];  // Using a variable length array here.
  int num_top_k_scores = 0;
#endif

  Accumulator* accumulators = *accumulators_array;
  int accumulators_size = *accumulators_array_size;

  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
  const float kBm25K1 =  2.0;  // k1
  const float kBm25B = 0.75;   // b

  // We can precompute a few of the BM25 values here.
  const float kBm25NumeratorMul = kBm25K1 + 1;
  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

  // BM25 components.
  float partial_bm25_sum;  // The BM25 sum for the current document we're processing in the intersection.
  int doc_len;
  uint32_t f_d_t;

  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for this list.
  int num_docs_t = list->num_docs_complete_list();
  float idf_t = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));

  int num_sorted_accumulators = *num_accumulators;  // This marks the point at which our newly inserted, unsorted accumulators start.
  int curr_accumulator_idx = 0;  // We start the search for a docID at the start of the accumulator table.
  uint32_t curr_doc_id = 0;

  while ((curr_doc_id = list->NextGEQ(curr_doc_id)) < ListData::kNoMoreDocs) {
    // Search for an accumulator corresponding to the current docID or insert if not found.
    while (curr_accumulator_idx < num_sorted_accumulators && accumulators[curr_accumulator_idx].doc_id < curr_doc_id) {

#ifndef HASH_HEAP_METHOD_OR
      // Maintain the threshold score.
      // This is for all the old accumulators, whose scores we won't be updating, but still need to be accounted for.
      KthScore(accumulators[curr_accumulator_idx].curr_score, top_k_scores, num_top_k_scores++, k);
#endif

      ++curr_accumulator_idx;
    }

    // Compute partial BM25 sum.
    f_d_t = list->GetFreq();
    doc_len = index_reader_.document_map().GetDocumentLength(curr_doc_id);
    partial_bm25_sum = idf_t * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);

    if (curr_accumulator_idx < num_sorted_accumulators && accumulators[curr_accumulator_idx].doc_id == curr_doc_id) {  // Found a matching accumulator.
      accumulators[curr_accumulator_idx].curr_score += partial_bm25_sum;
      accumulators[curr_accumulator_idx].term_bitmap |= (1 << list->term_num());

#ifndef HASH_HEAP_METHOD_OR
      // Maintain the threshold score.
      // This is for the updated accumulator scores.
      KthScore(accumulators[curr_accumulator_idx].curr_score, top_k_scores, num_top_k_scores++, k);
#else
      // Must rebuild the heap after we update the score, only if this accumulator is already in the heap.
#ifdef CUSTOM_HASH
      if (top_k_table.Exists(accumulators[curr_accumulator_idx].doc_id)) {
#else
      if (top_k_table.find(accumulators[curr_accumulator_idx].doc_id) != top_k_table.end()) {
#endif
        // Already in the heap, so find it's score in the heap, update it, and make the heap again, so it satisfies the heap property.
        // This is expensive, hopefully, we won't do it much.
        int heap_size = min(num_top_k, k);
        for (int i = 0; i < heap_size; ++i) {
          if (top_k[i].first == accumulators[curr_accumulator_idx].doc_id) {
            top_k[i].second = accumulators[curr_accumulator_idx].curr_score;
            BubbleDownHeap(top_k, heap_size, i);
            break;
          }
        }
      } else {
        // Insert it, because it's score has been updated, and it's not currently in the top-k heap, so it might make it there now (with the updated score).
        // Don't need to update 'num_top_k' in this case, because we must have seen this accumulator before, and since it's not in the top-k, it must have
        // been evicted, so 'num_top_k' must be >= 'k' already.
        KthAccumulator(accumulators[curr_accumulator_idx], top_k, num_top_k, top_k_table, k);
      }
#endif

      ++curr_accumulator_idx;
    } else {  // Need to insert accumulator.
      if (*num_accumulators >= accumulators_size) {
#ifdef IRTK_DEBUG
        cout << "Resizing accumulator array (curr size: " + *num_accumulators << ", new size: " << (*num_accumulators * 2) << ")." << endl;
#endif
        // Resize accumulator array.
        *accumulators_array_size *= 2;
        Accumulator* new_accumulators = new Accumulator[*accumulators_array_size];
        memcpy(new_accumulators, accumulators, (*num_accumulators) * sizeof(Accumulator));
        delete[] *accumulators_array;
        *accumulators_array = new_accumulators;

        accumulators = *accumulators_array;
        accumulators_size = *accumulators_array_size;
      }
      accumulators[*num_accumulators].doc_id = curr_doc_id;
      accumulators[*num_accumulators].curr_score = partial_bm25_sum;
      accumulators[*num_accumulators].term_bitmap = (1 << list->term_num());

#ifndef HASH_HEAP_METHOD_OR
      // Maintain the threshold score.
      // This is for the new accumulator scores.
      KthScore(accumulators[*num_accumulators].curr_score, top_k_scores, num_top_k_scores++, k);
#else
      KthAccumulator(accumulators[*num_accumulators], top_k, num_top_k++, top_k_table, k);
#endif

      ++(*num_accumulators);
      ++(*total_num_accumulators_created);
    }

    ++curr_doc_id;
  }

  // Sort the accumulator array by docID.
  // Note that we only really need to sort any new accumulators we inserted and merge it with the already sorted part of the array.
  sort(accumulators + num_sorted_accumulators, accumulators + *num_accumulators);

  // Note: An in-place merge would still require a buffer if you want to take O(n) time instead of O(n*log(n))...
  //       This is probably what Strohman/Croft meant, writing that they always needed to allocate a new array for each segment they process.
  //       We don't do that here right now, so the merge below is free to implement either the O(n) or O(n*log(n)) scheme, depending on how much free memory is
  //       available (according to the documentation). The 'inplace_merge' used here is slightly faster in benchmarking than allocating another buffer and doing
  //       a merge, as in the commented out code below.
  inplace_merge(accumulators, accumulators + num_sorted_accumulators, accumulators + *num_accumulators);

  // Alternative to the 'inplace_merge' used above; this is slightly slower in practice.
  /*Accumulator* merged_accumulators = new Accumulator[*accumulators_array_size];
  merge (accumulators, accumulators + num_sorted_accumulators, accumulators + num_sorted_accumulators, accumulators + *num_accumulators, merged_accumulators);
  delete[] *accumulators_array;
  *accumulators_array = merged_accumulators;*/

#ifdef HASH_HEAP_METHOD_OR
  // We return the threshold score.
  if (num_top_k < k) {
    return -numeric_limits<float>::max();
  }

  return top_k[0].second;
#else
  if (num_top_k_scores < k) {
    return -numeric_limits<float>::max();
  }

  return top_k_scores[0];
#endif
}

float LocalQueryProcessor::ProcessListLayerAnd(ListData* list, Accumulator* accumulators, int num_accumulators, pair<uint32_t, float>* top_k, int& num_top_k,
                                          TopKTable& top_k_table, int k) {
  assert(list != NULL);
  assert(accumulators != NULL);
  assert(num_accumulators >= 0);

#ifndef HASH_HEAP_METHOD_AND
  float top_k_scores[k];  // Using a variable length array here.
  int num_top_k_scores = 0;
#endif

  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
  const float kBm25K1 =  2.0;  // k1
  const float kBm25B = 0.75;   // b

  // We can precompute a few of the BM25 values here.
  const float kBm25NumeratorMul = kBm25K1 + 1;
  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

  // BM25 components.
  float partial_bm25_sum;  // The BM25 sum for the current document we're processing in the intersection.
  int doc_len;
  uint32_t f_d_t;

  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for this list.
  int num_docs_t = list->num_docs_complete_list();
  float idf_t = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));

  int accumulator_offset = 0;
  uint32_t curr_doc_id;

  while (accumulator_offset < num_accumulators) {
    curr_doc_id = list->NextGEQ(accumulators[accumulator_offset].doc_id);
    if (curr_doc_id == accumulators[accumulator_offset].doc_id) {
      // Compute partial BM25 sum.
      f_d_t = list->GetFreq();
      doc_len = index_reader_.document_map().GetDocumentLength(curr_doc_id);
      partial_bm25_sum = idf_t * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);

      // Update accumulator with the document score.
      accumulators[accumulator_offset].curr_score += partial_bm25_sum;
      accumulators[accumulator_offset].term_bitmap |= (1 << list->term_num());

#ifndef HASH_HEAP_METHOD_AND
      // Maintain the threshold score.
      // This is for the updated accumulator scores.
      KthScore(accumulators[accumulator_offset].curr_score, top_k_scores, num_top_k_scores++, k);
#else
      // Must rebuild the heap after we update the score, only if this accumulator is already in the heap.
#ifdef CUSTOM_HASH
      if (top_k_table.Exists(accumulators[accumulator_offset].doc_id)) {
#else
      if (top_k_table.find(accumulators[accumulator_offset].doc_id) != top_k_table.end()) {
#endif
        // Already in the heap, so find it's score in the heap, update it, and make the heap again, so it satisfies the heap property.
        // This is expensive, hopefully, we won't do it much.
        int heap_size = min(num_top_k, k);
        for (int i = 0; i < heap_size; ++i) {
          if (top_k[i].first == accumulators[accumulator_offset].doc_id) {
            top_k[i].second = accumulators[accumulator_offset].curr_score;
            BubbleDownHeap(top_k, heap_size, i);
            break;
          }
        }
      } else {
        // Insert it, because it's score has been updated, and it's not currently in the top-k heap, so it might make it there now (with the updated score).
        // Don't need to update 'num_top_k' in this case, because we must have seen this accumulator before, and since it's not in the top-k, it must have
        // been evicted, so 'num_top_k' must be >= 'k' already.
        KthAccumulator(accumulators[accumulator_offset], top_k, num_top_k, top_k_table, k);
      }
#endif
    } else {
#ifndef HASH_HEAP_METHOD_AND
      // Maintain the threshold score.
      // This is for all the old accumulators, whose scores we won't be updating, but still need to be accounted for.
      KthScore(accumulators[accumulator_offset].curr_score, top_k_scores, num_top_k_scores++, k);
#endif
    }

    ++accumulator_offset;
  }

#ifdef HASH_HEAP_METHOD_AND
  // We return the threshold score.
  if (num_top_k < k) {
    return -numeric_limits<float>::max();
  }

  return top_k[0].second;
#else
  if (num_top_k_scores < k) {
    return -numeric_limits<float>::max();
  }

  return top_k_scores[0];
#endif
}

void LocalQueryProcessor::KthAccumulator(const Accumulator& new_accumulator, std::pair<uint32_t, float>* accumulators, int num_accumulators, TopKTable& top_k_table, int kth_score) {
  // We use a min heap to determine the k-th largest score (the lowest score of the k scores we keep).
  // Notice that we don't have to explicitly make the heap, since it's assumed to be maintained from the start.
  if (num_accumulators < kth_score) {  // We insert a document score if we don't have k documents yet.
    // Mark that this docID has been inserted into the top-k heap.
#ifdef CUSTOM_HASH
    top_k_table.Insert(new_accumulator.doc_id);
#else
    top_k_table.insert(new_accumulator.doc_id);
#endif
    accumulators[num_accumulators++] = make_pair(new_accumulator.doc_id, new_accumulator.curr_score);
    push_heap(accumulators, accumulators + num_accumulators, DocIdScorePairScoreDescendingCompare());
  } else {
    if (new_accumulator.curr_score > accumulators[0].second) {  // We insert a score only if it is greater than the minimum score in the heap.
      // Mark that this accumulator has been inserted into the top-k heap.
#ifdef CUSTOM_HASH
      top_k_table.Insert(new_accumulator.doc_id);
#else
      top_k_table.insert(new_accumulator.doc_id);
#endif
      pop_heap(accumulators, accumulators + kth_score, DocIdScorePairScoreDescendingCompare());
      // Unmark this accumulator (no longer in the heap).
#ifdef CUSTOM_HASH
      top_k_table.Remove(accumulators[kth_score - 1].first);
#else
      top_k_table.erase(accumulators[kth_score - 1].first);
#endif
      accumulators[kth_score - 1].first = new_accumulator.doc_id;
      accumulators[kth_score - 1].second = new_accumulator.curr_score;
      push_heap(accumulators, accumulators + kth_score, DocIdScorePairScoreDescendingCompare());
    }
  }
}

// A function that does a bubble down operation on the min heap 'top_k' with size 'top_k_size', on the node at index 'node_idx'.
// The score of the accumulator represented at 'node_idx' could only have increased.
// So we push it down (in place of it's lowest scoring child) if it's greater than either of its children.
void LocalQueryProcessor::BubbleDownHeap(pair<uint32_t, float>* top_k, int top_k_size, int node_idx) {
  while (true) {
    int left_child_idx = (node_idx << 1) + 1;
    if (left_child_idx >= top_k_size) {
      // No more children.
      break;
    }

    int right_child_idx = left_child_idx + 1;
    int lowest_scoring_idx;
    if (right_child_idx >= top_k_size) {  // The right child does not exist.
      lowest_scoring_idx = left_child_idx;
    } else {  // Find the lower scoring of the children
      lowest_scoring_idx = (top_k[left_child_idx].second < top_k[right_child_idx].second) ? left_child_idx : right_child_idx;
    }

    pair<uint32_t, float> node = top_k[node_idx];
    if (node.second > top_k[lowest_scoring_idx].second) {
      top_k[node_idx] = top_k[lowest_scoring_idx];
      top_k[lowest_scoring_idx] = node;
      node_idx = lowest_scoring_idx;
      continue;
    }
    break;
  }
}

// This is used to keep track of the threshold value (the score of the k-th highest scoring accumulator).
// The 'max_scores' array is assumed to be the size of at least 'kth_score'.
void LocalQueryProcessor::KthScore(float new_score, float* scores, int num_scores, int kth_score) {
  // We use a min heap to determine the k-th largest score (the lowest score of the k scores we keep).
  // Notice that we don't have to explicitly make the heap, since it's assumed to be maintained from the start.
  if (num_scores < kth_score) {
    // We insert a document score if we don't have k documents yet.
    scores[num_scores++] = new_score;
    push_heap(scores, scores + num_scores, greater<float> ());
  } else {
    if (new_score > scores[0]) {
      // We insert a score only if it is greater than the minimum score in the heap.
      pop_heap(scores, scores + kth_score, greater<float> ());
      scores[kth_score - 1] = new_score;
      push_heap(scores, scores + kth_score, greater<float> ());
    }
  }
}

// This is for querying indices with dual overlapping layers.
// We can actually process more than 2 terms at a time as follows:
// Say we have 3 lists, A, B, C (each with at most 2 layers, the higher levels having duplicated docID info):
// Process (A_1 x B x C), (B_1 x A x C), (C_1 x A x B), where A, B, C are the whole lists and A_1, B_1, C_1 are the first (or only) layers.
// Note that intersecting all 3 terms should give good skipping performance.
// We also assume that the whole index is in main memory.
// Now, we can also run all 3 intersections in parallel (this should be good given that all 3 lists are in main memory).
// Merge all the results using a heap.  Or store the results in an array (which is then sorted) and the top results determined (preferable).
// We only need k results from each of the 3 separate intersections.
//
// The drawback here (for queries that have lists with all layers) is you have to scan all the 2nd layers twice for the number of lists you have in the query.
// (But we intersect with small lists and with good skipping performance (the index is in main memory, plus a block level index to skip decoding block headers),
// so it makes the costs acceptable).
//
// Intersecting for more than 2 layers is redundant right now. That's why for 3 or more term queries, we get pretty high latencies.
// Idea: After each intersection, can already check the threshold...
//       If there are 3 lists A,B,C, then A_1 x B_2 x C_2 determines the intersection of the top A documents with everything else...including the top B and C
//       since the layers are overlapping.  This is even true for 2 lists.  After doing A_1 x B_2, we can check the kth score
//       (if we got k scores in the intersection) against the threshold m(A_2) + m(B_1).
//
// TODO: Two bugs that need fixing:
//       * k results from each intersection is not enough since some of them are duplicates. Solution is to merge all the results --- see comments within
//         function for more details.
//       * Some docIDs appear more than once in the final results --- because heapifying only by score
//         (the same docIDs have different scores for different intersections because of rounding errors during float addition).
//         Solution: don't sort by score in IntersectLists(), sort by the docIDs for each intersection, merge docIDs
//                   (output array needs to be 'num_query_terms' * 'num_results' large to fit everything, in case all docIDs are unique).
//                   then after merging and eliminating duplicate docIDs, sort by score.
//
// TODO: Find percentage of queries that can early terminate in each category of number of query terms.
// TODO: Find out how much work is A_2 x B_2 vs A_1 x B_2 && B_1 x A_2. Do we traverse significantly less elements (when we are able to early terminate)?
//       (And also for queries with more than 2 terms).
//       If the answer is yes, we traverse less elements --- then it would be good to keep the A_2, B_2 lists decompressed in main memory, with the BM25 score
//       precomputed. Otherwise, the costs of decompression and BM25 computation are too large overheads.
//
// Idea: Traverse all (or maybe some?) intersections in an interleaved manner. Then check threshold every N documents processed (after we get k unique docs)
//       if we can early terminate on one of the intersections.  We can also use the threshold info to skip chunks/blocks
//       if we store threshold info within the index or load it into main memory).  This is because not all intersections are equal...some are gonna have lower
//       max scores, the latter intersections due to IDF.
//
// TODO: Investigate different result for the query 'cam glacier national park'.
// TODO: Implement:
//       Non overlapping index; keep an unresolved docID pool and an upperbound score so we can eliminate documents.
int LocalQueryProcessor::ProcessLayeredQuery(LexiconData** query_term_data, int num_query_terms, Result* results, int* num_results) {
  const int kMaxLayers = MAX_LIST_LAYERS;  // Assume our lists can contain this many layers.
  const int kMaxNumResults = *num_results;

  ListData* list_data_pointers[num_query_terms][kMaxLayers];  // Using a variable length array here.
  bool single_term_query;
  int single_layer_list_idx;
  int total_num_layers;
  OpenListLayers(query_term_data, num_query_terms, kMaxLayers, list_data_pointers, &single_term_query, &single_layer_list_idx, &total_num_layers);

  // Run the appropriate intersections.
  ListData* curr_intersection_list_data_pointers[num_query_terms];  // Using a variable length array here.
  int total_num_results = 0;
  bool run_standard_intersection = false;
  if (single_layer_list_idx == -1) { // We have 2 layers for each term in the query.
    // For only 2 query terms, the other method is better.
    if (query_algorithm_ == kDualLayeredOverlappingMergeDaat && num_query_terms > 2) {
      // Here, we merge all the first layers together (while removing duplicate docIDs) and then treat it as one virtual list and intersect with all the 2nd layers.
      // We do the merge in an interleaved manner with the intersections to improve processing speed.
      // This allows us to avoid allocating an in-memory list (previous attempt to do this resulted in significantly slower running times).
      // This method will wind up traversing and scoring more documents, but it also a sort of way to do "bulk lookups".
      // I think this method, combined with docID reordering could provide even larger gains.

      ListData* merge_list_data_pointers[num_query_terms];  // Using a variable length array here.

      // Now do the intersection using the virtual list to drive which documents we're looking up.
      // Note that the virtual list could be larger than one of the 2nd layers.
      for (int i = 0; i < num_query_terms; ++i) {
        // Use only the first layer for each term.
        merge_list_data_pointers[i] = list_data_pointers[i][0];
        curr_intersection_list_data_pointers[i] = list_data_pointers[i][1];
      }

      sort(curr_intersection_list_data_pointers, curr_intersection_list_data_pointers + num_query_terms, ListCompare());
      total_num_results = IntersectLists(merge_list_data_pointers, num_query_terms, curr_intersection_list_data_pointers, num_query_terms, results, kMaxNumResults, false);
      *num_results = min(total_num_results, kMaxNumResults);
    } else {
      // TODO: It's not enough for each intersection to return just k results because there might be duplicate docIDs that we'll be filtering...
      //       This should probably be solved by writing a new function for intersect lists --- that will also combine the same docIDs from different list
      //       intersections right away so we do a merge of all the results in one step.

      Result all_results[num_query_terms][kMaxNumResults]; // Using a variable length array here.
      int num_intersection_results[num_query_terms]; // Using a variable length array here.
      for (int i = 0; i < num_query_terms; ++i) {
        // Build the intersection list.
        // We always intersect with the first layer of each list.
        curr_intersection_list_data_pointers[i] = list_data_pointers[i][0];

        // We also intersect with all the second layers of all the other lists.
        for (int j = 0; j < num_query_terms; ++j) {
          if (j != i) {
            curr_intersection_list_data_pointers[j] = list_data_pointers[j][1];
          }
        }

        // List intersections must be arranged in order from shortest list to longest list.
        sort(curr_intersection_list_data_pointers, curr_intersection_list_data_pointers + num_query_terms, ListCompare());
        int curr_total_num_results = IntersectLists(curr_intersection_list_data_pointers, num_query_terms, all_results[i], kMaxNumResults);
        num_intersection_results[i] = min(curr_total_num_results, kMaxNumResults);
        total_num_results += curr_total_num_results;

        for (int j = 0; j < num_query_terms; ++j) {
          // Need to reset the 2nd layers after running the query since we'll be using them again in the next iteration.
          // In our current setup of 2 layers, we really need to only reset each 2nd layer once, and the 2nd time, it doesn't particularly matter.
          // But this is pretty cheap.
          if (curr_intersection_list_data_pointers[j]->layer_num() == 1) {
            curr_intersection_list_data_pointers[j]->ResetList(single_term_query);
          }
        }

        // Print results of individual intersections for debugging.
        if (!silent_mode_) {
          for (int j = 0; j < num_intersection_results[i]; ++j) {
            cout << all_results[i][j].second << ", score: " << all_results[i][j].first << endl;
          }
          cout << endl;
        }
      }

      // Merge the results from all the previous intersection(s) using a heap.

      // The 'pair<int, int>' is for keeping track of the index of the intersection as well as the index of the current Result entry within the intersection.
      pair<Result, pair<int, int> > result_heap[num_query_terms]; // Using a variable length array here.
      int result_heap_size = 0;
      for (int i = 0; i < num_query_terms; ++i) {
        if (num_intersection_results[i] > 0) {
          result_heap[i] = make_pair(all_results[i][0], make_pair(i, 1));
          ++result_heap_size;
          --num_intersection_results[i];
        }
      }

      make_heap(result_heap, result_heap + result_heap_size); // Default is max heap, which is what we want.
      int curr_result = 0;
      while (result_heap_size && curr_result < kMaxNumResults) {
        pop_heap(result_heap, result_heap + result_heap_size);

        Result& curr_top_result = result_heap[result_heap_size - 1].first;
        pair<int, int>& curr_top_result_idx = result_heap[result_heap_size - 1].second;

        // If the previous result we stored is the same as the current, we don't need to insert it.
        // We only compare the docIDs because the scores could be different when the order of the addition of the partial BM25 sums is different.
        // This is due to floating point rounding errors.
        if (curr_result == 0 || results[curr_result - 1].second != curr_top_result.second) {
          results[curr_result++] = curr_top_result;
        }

        int top_intersection_index = curr_top_result_idx.first;
        if (num_intersection_results[top_intersection_index] > 0) {
          --num_intersection_results[top_intersection_index];
          result_heap[result_heap_size - 1] = make_pair(all_results[top_intersection_index][curr_top_result_idx.second], make_pair(top_intersection_index,
                                                                                                                                   curr_top_result_idx.second
                                                                                                                                       + 1));
          push_heap(result_heap, result_heap + result_heap_size);
        } else {
          --result_heap_size;
        }
      }

      *num_results = curr_result;
    }

    // Need to satisfy the early termination conditions.

    // Check if we have enough results first.
    if (*num_results >= kMaxNumResults) {
      // We have enough results to possibly early terminate.
      // Check whether we meet the early termination requirements.
      Result& min_result = results[min(kMaxNumResults - 1, *num_results - 1)];
      float remaining_document_score_upperbound = 0;
      for (int i = 0; i < num_query_terms; ++i) {
        float bm25_partial_score = query_term_data[i]->layer_score_threshold(query_term_data[i]->num_layers() - 1);
        assert(!isnan(bm25_partial_score));
        remaining_document_score_upperbound += bm25_partial_score;
      }

      if (min_result.first > remaining_document_score_upperbound) {
        ////////////TODO: print the properly early terminated query.
        //        if(num_query_terms == 1) {
        //          static int QUERY_COUNT = 0;
        //          cout << QUERY_COUNT++ << ":" << query << endl;
        //        }
        ////////////////

        ++num_queries_kth_result_meeting_threshold_;
        if (!silent_mode_)
          cout << "Early termination possible!" << endl;

        if (!warm_up_mode_)
          ++num_early_terminated_queries_;
      } else {
        ++num_queries_kth_result_not_meeting_threshold_;
        if (!silent_mode_)
          cout << "Cannot early terminate due to score thresholds." << endl;

        run_standard_intersection = true;
      }

    } else {
      // Don't have enough results from the first layers, execute query on the 2nd layer.
      if (!warm_up_mode_ && *num_results < kMaxNumResults) {
        if (total_num_results < kMaxNumResults) {
          ++not_enough_results_definitely_;
          if (!silent_mode_)
            cout << "Definitely don't have enough results." << endl;
        } else {
          ++not_enough_results_possibly_;
          if (!silent_mode_)
            cout << "Potentially don't have enough results." << endl;
        }
      }

      run_standard_intersection = true;
    }
  } else {
    // If we have at least one term in the query that has only a single layer,
    // we can get away with doing only on intersection on the last layers of each inverted list.
    ++num_queries_containing_single_layered_terms_;
    if (!silent_mode_)
      cout << "Query includes term with only a single layer." << endl;

    run_standard_intersection = true;

    // We count this as an early terminated query.
    if (!warm_up_mode_)
      ++num_early_terminated_queries_;
  }

  if (run_standard_intersection) {
    // Need to re-run the query on the last layers for each list (this is actually the standard DAAT approach).
    for (int i = 0; i < num_query_terms; ++i) {
      // Before we rerun the query, we need to reset the list information so we start from the beginning.
      list_data_pointers[i][query_term_data[i]->num_layers() - 1]->ResetList(single_term_query);
      curr_intersection_list_data_pointers[i] = list_data_pointers[i][query_term_data[i]->num_layers() - 1];
    }

    sort(curr_intersection_list_data_pointers, curr_intersection_list_data_pointers + num_query_terms, ListCompare());
    total_num_results = IntersectLists(curr_intersection_list_data_pointers, num_query_terms, results, kMaxNumResults);
    *num_results = min(total_num_results, kMaxNumResults);
  }

  CloseListLayers(num_query_terms, kMaxLayers, list_data_pointers);

  // TODO: This is incorrect for some queries where we don't actually open and traverse the lower layers (such as one word queries).
  return total_num_results;
}

// Merges the lists into an in-memory list that only contains docIDs; it also removes duplicate docIDs that might be present in multiple lists.
// We do not score any documents here.
// TODO: We can also potentially score documents here and keep track of which lists the score came from, then we'd have to do less work scoring
//       when we intersect with the 2nd layers --- but the logic here would be more complicated.
//       Potentially we can also set up some thresholds...since we're doing OR mode processing --- look at the Efficient Query Processing in Main Memory paper...
int LocalQueryProcessor::MergeLists(ListData** lists, int num_lists, uint32_t* merged_doc_ids, int max_merged_doc_ids) {
  pair<uint32_t, int> heap[num_lists];  // Using a variable length array here.
  int heap_size = 0;

  // Initialize the heap.
  for (int i = 0; i < num_lists; ++i) {
    uint32_t curr_doc_id;
    if ((curr_doc_id = lists[i]->NextGEQ(0)) < ListData::kNoMoreDocs) {
      heap[heap_size++] = make_pair(curr_doc_id, i);
    }
  }

  // We use the default comparison --- which is fine, but the comparison for a pair checks both values, and we really only need to check the docID part
  // so it could be more efficient to write your own simple comparator.
  make_heap(heap, heap + heap_size, greater<pair<uint32_t, int> >());

  int i = 0;
  while (heap_size) {
    pair<uint32_t, int> top = heap[0];

    // Don't insert duplicate docIDs.
    assert(i < max_merged_doc_ids);
    if (i == 0 || merged_doc_ids[i - 1] != top.first) {
      merged_doc_ids[i++] = top.first;
    }

    // Need to pop and push to make sure heap property is maintained.
    pop_heap(heap, heap + heap_size, greater<pair<uint32_t, int> >());

    uint32_t curr_doc_id;
    if ((curr_doc_id = lists[top.second]->NextGEQ(top.first + 1)) < ListData::kNoMoreDocs) {
      heap[heap_size - 1] = make_pair(curr_doc_id, top.second);
      // TODO: OR Instead of making a new pair, can just update the pair, with the correct docID, and (possibly the list idx?, might depend on whether the heap size decreased previously).
      //       or maybe use the 'top' we have created.
      push_heap(heap, heap + heap_size, greater<pair<uint32_t, int> >());
    } else {
      --heap_size;
    }
  }

  return i;
}

int LocalQueryProcessor::MergeListsForModifyingPreComputedScores(LexiconData** query_term_data, ListData** lists, int num_lists, string mode,POSTING_RESULT* posting_results){

	if(index_use_precomputed_score_)
	{
		return MergeListsForModifyingPreComputedScores(query_term_data, lists, num_lists, true, mode,posting_results); //new version using pre-computed scores by Wei.
	}
	else
	{
		cout << "This set of indexes do NOT have external pre-computed scores stored." << endl;
		cout << "Please select another set of indexes." << endl;
		exit(1);
		// the last argument is true for pruning project.
		// return IntersectLists(NULL, 0, lists, num_lists, results, num_results, true); //original version by Roman.
	}
}



// Standard DAAT OR mode processing for Modifying Pre-Computed Scores purposes.
int LocalQueryProcessor::MergeLists2ScoreExtractedFromExternal(ListData** lists, int num_lists, Result_Wei_2012* results, int num_results, bool debugFlag,int computerMode){
	  cout << "Notices:" << endl;
	  cout << "Tomorrow TODO list:" << endl;
	  cout << "Updated by Wei 2012/08/28 evening" << endl;
	  cout << "The logic is still under construction." << endl;
	  cout << "Please refer to mergeList for modifying the scores for more recent development" << endl;
	  return 0;
}

// Standard DAAT OR mode processing for Modifying Pre-Computed Scores purposes.
int LocalQueryProcessor::MergeListsForModifyingPreComputedScores(LexiconData** query_term_data, ListData** lists, int num_lists,bool scoreModificationFlag, string mode,POSTING_RESULT* posting_results){
	  cout << "Notices:" << endl;
	  cout << "Updated by Wei 2012/08/02 afternoon" << endl;
	  cout << "The logic of this function is the following:" << endl;
	  cout << "(1)The scores are directly got from external index(No online computing)." << endl;
	  cout << "(2)Show the option whether you need to change the score or not." << endl;

	  // Show the queryTermID to the screen.
	  for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
          string queryTermInStringFormat = "";
          string posting_result_output_string = "";
		  for(int tempCounter2 = 0; tempCounter2 < query_term_data[tempCounter]->term_len(); tempCounter2++){
			  queryTermInStringFormat += query_term_data[tempCounter]->term()[tempCounter2];
		  }
		  cout << "queryTermInStringFormat:" << queryTermInStringFormat << " queryTermID:" << tempCounter << endl;
	  }

	  string outputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kNewScoreIntermediateFileForPruning));
	  ofstream outputFileHandler(outputFileName.c_str());

	  // Setting this option to 'true' makes a considerable difference in average query latency (> 100ms).
	  // When we score the complete doc, we first find the lowest docID in the array, and then scan the array for that docID, and completely score it.
	  // All lists from which the docID was scored have their list pointers moved forward.
	  // When we don't score the complete doc, at each turn of the while loop, we find a partial score of the lowest docID posting.
	  // We add these together for a particular docID to get the complete score -- but it requires several iterations of the main while loop.
	  // This is less efficient, since we have to do a complete linear search through the array for every posting.
	  // On the other hand, when we score the complete doc right away, we only have to do one more linear search through all postings to score all the lists.
	  // Clearly, if the majority of the docIDs are present in more than one list, we'll be getting a speedup.
	  const bool kScoreCompleteDoc = true;

	  // Use an array instead of a heap for selecting the list with the lowest docID at each step.
	  // Using a heap for picking the list with the lowest docID is only implemented for when 'kScoreCompleteDoc' is false.
	  // For compatibility with 'kScoreCompleteDoc' equal to true, you'd need to use the heap to choose the next list to score, instead of iterating through the array, which is what's done now.
	  // Array based method is faster than the heap based method for choosing the lowest docID from all the lists, so this option should be set to 'true'.
	  // TODO: Try another array based strategy: keep a sorted array of docIDs. When updating, only need to find the spot for the new docID and re-sort the array up to that spot.
	  // TODO: Can also use a linked list for this. Then can just find the right spot, and do pointer changes. The locality here wouldn't be too good though.
	  const bool kUseArrayInsteadOfHeapList = true;

	  int total_num_results = 0;

	  // BM25 components.
	  float old_bm25_sum = 0;  // The old BM25 sum for the current document we're processing in OR semantics.
	  float new_bm25_sum = 0;  // The new BM25 sum for the current document we're processing in OR semantics.
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in OR semantics.
	  int doc_index = 0;
	  int posting_index = 0;
	  int total_num_posting = 0;
	  float input_external_scores[num_lists];
	  float output_external_scores[num_lists];



	  //TODO Need to clear the input_external_scores, somewhere.

	  // We use this to get the next lowest docID from all the lists.
	  pair<uint32_t, int> lists_curr_postings[num_lists];  // Using a variable length array here.
	  int num_lists_remaining = 0;  // The number of lists with postings remaining.
	  for (int i = 0; i < num_lists; ++i) {
	    uint32_t curr_doc_id;
	    if ((curr_doc_id = lists[i]->NextGEQRomanRead(0,input_external_scores[i],false)) < ListData::kNoMoreDocs) {
	      lists_curr_postings[num_lists_remaining++] = make_pair(curr_doc_id, i);
	    }
	  }

	  if (num_lists_remaining > 0)
	  {
		  //Still doesn't understand this part of logic.
	  }
	  else
	  {
		  outputFileHandler.close();
		  return total_num_results;
	  }

	  // For the heap based method, the lowest element will always be the first element in the array.
	  // So we can keep 'top' constant since it's just a pointer to the first element and just push/pop the heap.
	  // For the array based method, we need to initialize it to the first element in the array, and then find the lowest value at the top of the while loop.
	  // We have to find the lowest element here as well, since we need to initialize 'curr_doc_id' to the right value before we start the loop.
	  pair<uint32_t, int>* top = &lists_curr_postings[0];

	  if (kUseArrayInsteadOfHeapList) {
	    for (int i = 1; i < num_lists_remaining; ++i) {
	      if (lists_curr_postings[i].first < top->first) {
	        top = &lists_curr_postings[i];
	      }
	    }
	  }

	  int i;
	  uint32_t curr_doc_id = top->first;  // Current docID we're processing the score for.

	  while (num_lists_remaining) {
	    if (kUseArrayInsteadOfHeapList) {
	      top = &lists_curr_postings[0];
	      for (i = 1; i < num_lists_remaining; ++i) {
	        if (lists_curr_postings[i].first < top->first) {
	          top = &lists_curr_postings[i];
	        }
	      }
	    }

	    if (kScoreCompleteDoc)
	    {
	      curr_doc_id = top->first;
	      old_bm25_sum = 0;
	      new_bm25_sum = 0;

	      // Can start searching from the position of 'top' since it'll be the first lowest element in the array.
	      while (top != &lists_curr_postings[num_lists_remaining]) {
	        if (top->first == curr_doc_id) {

	          // Get BM25 directly from the external index.
	          partial_bm25 = input_external_scores[top->second];
	          old_bm25_sum += partial_bm25;
	          if(!scoreModificationFlag){
	        	  new_bm25_sum += partial_bm25;
	          }

	          if(scoreModificationFlag){
				  if(mode == "Interactive"){
		        	  cout << "--->New Score for the posting(";
					  for(int tempCounter2 = 0; tempCounter2 < query_term_data[top->second]->term_len(); tempCounter2++){
						  cout << query_term_data[top->second]->term()[tempCounter2];
					  }
					  cout << "," << curr_doc_id << ") from " << input_external_scores[top->second] << " to:";
					  string tempInputNewScoreLine = "";
					  getline(cin,tempInputNewScoreLine);
					  if(tempInputNewScoreLine == ""){
						  cout << "(The score is NOT changed)" << endl;
						  output_external_scores[top->second] = input_external_scores[top->second];
						  new_bm25_sum += output_external_scores[top->second];
					  }
					  else{
						  // first, write the corresponding query term.
						  for(int tempCounter2 = 0; tempCounter2 < query_term_data[top->second]->term_len(); tempCounter2++){
							  outputFileHandler << query_term_data[top->second]->term()[tempCounter2];
						  }
						  outputFileHandler << " ";

						  // write the corresponding did and new score.
						  float new_score = atof(tempInputNewScoreLine.c_str());
						  output_external_scores[top->second] = new_score;
						  new_bm25_sum += output_external_scores[top->second];
						  outputFileHandler << curr_doc_id << " " << output_external_scores[top->second] << endl;
					  }
				  }
				  else if(mode == "Programmatic"){
					  string queryTermInStringFormat = "";
					  string posting_result_output_string = "";
					  for(int tempCounter2 = 0; tempCounter2 < query_term_data[top->second]->term_len(); tempCounter2++){
						  queryTermInStringFormat += query_term_data[top->second]->term()[tempCounter2];
					  }
					  posting_result_output_string = queryTermInStringFormat + " " + boost::lexical_cast<string>( curr_doc_id ) + " " + boost::lexical_cast<string>( input_external_scores[top->second] );

					  // debug purpose
					  //cout << posting_index << " " << top->second << " " << posting_result_output_string << endl;

					  posting_results[posting_index].queryTerm = queryTermInStringFormat.c_str();
					  posting_results[posting_index].queryTermLength = queryTermInStringFormat.length();
					  posting_results[posting_index].queryTermID = top->second;
					  posting_results[posting_index].docId = curr_doc_id;
					  posting_results[posting_index].score = input_external_scores[top->second];
					  posting_index += 1;
				  }
	          }

	          ++num_postings_scored_;

	          // I do NOT this logic, what is that? Updated by wei, 2012/08/03
	          // TODO: And do NOT know the effect of the following operation, either.
	          if ((top->first = lists[top->second]->NextGEQRomanRead(top->first + 1, input_external_scores[top->second], false)) == ListData::kNoMoreDocs) {
	            // Need to compact the array by one.
	            // Just copy over the last value in the array and overwrite the top value, since we'll be removing it.
	            // Now, we can declare our list one shorter.
	            // If top happens to already point to the last value in the array, this step is superfluous.
	            --num_lists_remaining;
	            *top = lists_curr_postings[num_lists_remaining];
	            --top;
	          }
	        }
	        ++top;
	      }

	      // debug purpose
		  // cout << "doc_index: " << doc_index << " did: " << curr_doc_id << " old_bm25_sum:" << old_bm25_sum << " new_bm25_sum:" << new_bm25_sum << endl;
		  // cout << endl;


	      doc_index += 1;
	      ++total_num_results;
	    }
	    else
	    {
	      // I completely delete the whole kScoreCompleteDoc = false case cause I think I will not use them in this case.
	    }
	  }
	  outputFileHandler.close();
	  // in order to return the # of postings, I have increased the variable posting_index by 2.
	  total_num_posting = posting_index;
	  return total_num_posting;
}

// Standard DAAT OR mode processing for comparison purposes.
int LocalQueryProcessor::MergeLists2ScoreComputeOnlineForSIGIR2014(ListData** lists, int num_lists, Result_Wei_2012* results, int num_results, bool pruningProjectSwitch, int computation_method) {
  // for debug ONLY
  // cout << "LocalQueryProcessor::MergeLists2ScoreComputeOnlineForSIGIR2014(...) called.(Mostly Original from Roman)" << endl;
  // cout << "WIKI setting, option0, AND" << endl;
  // cout << "Buettcher, Clarke trec2004 setting for the parameters K1,B, option1, AND" << endl;
  // cout << "own setting option2, AND" << endl;
  cout << "own setting option3, AND" << endl;
  // cout << "own setting option4, AND" << endl;
  // cout << "own setting option5, AND" << endl;
  // cout << "own setting option6, AND" << endl;

  int total_num_results = 0;

  // Updated by Wei on 2013/01/01 night at school
  // Note: NOT support the following computation_method any more: 1,2,3,4,5,6 instantly
  if (computation_method == 0){
	  // Setting this option to 'true' makes a considerable difference in average query latency (> 100ms).
	  // When we score the complete doc, we first find the lowest docID in the array, and then scan the array for that docID, and completely score it.
	  // All lists from which the docID was scored have their list pointers moved forward.
	  // When we don't score the complete doc, at each turn of the while loop, we find a partial score of the lowest docID posting.
	  // We add these together for a particular docID to get the complete score -- but it requires several iterations of the main while loop.
	  // This is less efficient, since we have to do a complete linear search through the array for every posting.
	  // On the other hand, when we score the complete doc right away, we only have to do one more linear search through all postings to score all the lists.
	  // Clearly, if the majority of the docIDs are present in more than one list, we'll be getting a speedup.
	  const bool kScoreCompleteDoc = true;

	  // Use an array instead of a heap for selecting the list with the lowest docID at each step.
	  // Using a heap for picking the list with the lowest docID is only implemented for when 'kScoreCompleteDoc' is false.
	  // For compatibility with 'kScoreCompleteDoc' equal to true, you'd need to use the heap to choose the next list to score, instead of iterating through the array, which is what's done now.
	  // Array based method is faster than the heap based method for choosing the lowest docID from all the lists, so this option should be set to 'true'.
	  // TODO: Try another array based strategy: keep a sorted array of docIDs. When updating, only need to find the spot for the new docID and re-sort the array up to that spot.
	  // TODO: Can also use a linked list for this. Then can just find the right spot, and do pointer changes. The locality here wouldn't be too good though.
	  const bool kUseArrayInsteadOfHeapList = true;

	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25', option0
	  // const float kBm25K1 =  2.0;  // k1
	  // const float kBm25B = 0.75;   // b

	  // Buettcher, Clarke setting for trec2004, option1
	  // const float kBm25K1 =  1.2;  // k1
	  // const float kBm25B = 0.5;   // b

	  // own setting, option2
	  // const float kBm25K1 =  1.2;  // k1
	  // const float kBm25B = 0.75;   // b

	  // own setting, option3
	  const float kBm25K1 = 2;  // k1
	  const float kBm25B = 0.5;   // b

	  // own setting, option4
	  // const float kBm25K1 = 2;  // k1
	  // const float kBm25B = 0.3;   // b

	  // own setting, option5
	  // const float kBm25K1 = 2;  // k1
	  // const float kBm25B = 0.1;   // b

	  // own setting, option6
	  // const float kBm25K1 = 2;  // k1
	  // const float kBm25B = 0.2;   // b

	  // We can precompute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

	  // BM25 components.
	  float original_bm25_sum = 0;  // The BM25 sum for the current document we're processing.
	  float actual_bm25_sum = 0;
	  SCORES_SET scores_set;
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in OR semantics.
	  int doc_len;
	  uint32_t f_d_t;

	  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
	  float idf_t[num_lists];  // Using a variable length array here.
	  int lengthOfTheInvertedListForThisTerm[num_lists];
	  int num_docs_t;
	  for (int i = 0; i < num_lists; ++i) {
		// If for the pruning project, then use the same overall statistics for all the pruned index.
		if(pruningProjectSwitch){
			num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
		}
		// If not for the pruning project, we can use what the index has.
		else{
			num_docs_t = lists[i]->num_docs_complete_list();
		}

		lengthOfTheInvertedListForThisTerm[i] = num_docs_t;
		// for debug ONLY.
		// cout << "i:" << i << " num_docs_t:" << num_docs_t << endl;
	    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
	  }

	  // We use this to get the next lowest docID from all the lists.
	  pair<uint32_t, int> lists_curr_postings[num_lists];  // Using a variable length array here.
	  int num_lists_remaining = 0;  // The number of lists with postings remaining.
	  for (int i = 0; i < num_lists; ++i) {
	    uint32_t curr_doc_id;
	    if ((curr_doc_id = lists[i]->NextGEQ(0)) < ListData::kNoMoreDocs) {
	      lists_curr_postings[num_lists_remaining++] = make_pair(curr_doc_id, i);
	    }
	  }

	  if (num_lists_remaining > 0) {
	    if (!kUseArrayInsteadOfHeapList) {
	      // We use our own comparator, that only checks the docID part.
	      make_heap(lists_curr_postings, lists_curr_postings + num_lists_remaining, ListMaxDocIdCompare());
	    }
	  } else {
	    return total_num_results;
	  }

	  // For the heap based method, the lowest element will always be the first element in the array.
	  // So we can keep 'top' constant since it's just a pointer to the first element and just push/pop the heap.
	  // For the array based method, we need to initialize it to the first element in the array, and then find the lowest value at the top of the while loop.
	  // We have to find the lowest element here as well, since we need to initialize 'curr_doc_id' to the right value before we start the loop.
	  pair<uint32_t, int>* top = &lists_curr_postings[0];
	  if (kUseArrayInsteadOfHeapList) {
	    for (int i = 1; i < num_lists_remaining; ++i) {
	      if (lists_curr_postings[i].first < top->first) {
	        top = &lists_curr_postings[i];
	      }
	    }
	  }

	  int i;
	  uint32_t curr_doc_id = top->first;  // Current docID we're processing the score for.

	  while (num_lists_remaining) {
	    if (kUseArrayInsteadOfHeapList) {
	      top = &lists_curr_postings[0];
	      for (i = 1; i < num_lists_remaining; ++i) {
	        if (lists_curr_postings[i].first < top->first) {
	          top = &lists_curr_postings[i];
	        }
	      }
	    }

	    if (kScoreCompleteDoc)
	    {
	      curr_doc_id = top->first;
	      actual_bm25_sum = 0;
	      original_bm25_sum = 0;
	      // init of the variable scores_set
		  scores_set.totalScore = 0.0;
		  scores_set.actualTotalScore = 0.0;

		  scores_set.doc_length = 0;
		  scores_set.docCandidateQualifyStatus = true;
		  scores_set.posting0QualifyStatus = true;
		  scores_set.posting1QualifyStatus = true;
		  scores_set.posting2QualifyStatus = true;
		  scores_set.posting3QualifyStatus = true;
		  scores_set.posting4QualifyStatus = true;
		  scores_set.posting5QualifyStatus = true;
		  scores_set.posting6QualifyStatus = true;
		  scores_set.posting7QualifyStatus = true;
		  scores_set.posting8QualifyStatus = true;
		  scores_set.posting9QualifyStatus = true;

		  scores_set.posting0RankInList = 0;
		  scores_set.posting1RankInList = 0;
		  scores_set.posting2RankInList = 0;
		  scores_set.posting3RankInList = 0;
		  scores_set.posting4RankInList = 0;
		  scores_set.posting5RankInList = 0;
		  scores_set.posting6RankInList = 0;
		  scores_set.posting7RankInList = 0;
		  scores_set.posting8RankInList = 0;
		  scores_set.posting9RankInList = 0;

		  scores_set.postingFirstProbabilities0 = 0.0;
		  scores_set.postingFirstProbabilities1 = 0.0;
		  scores_set.postingFirstProbabilities2 = 0.0;
		  scores_set.postingFirstProbabilities3 = 0.0;
		  scores_set.postingFirstProbabilities4 = 0.0;
		  scores_set.postingFirstProbabilities5 = 0.0;
		  scores_set.postingFirstProbabilities6 = 0.0;
		  scores_set.postingFirstProbabilities7 = 0.0;
		  scores_set.postingFirstProbabilities8 = 0.0;
		  scores_set.postingFirstProbabilities9 = 0.0;

		  scores_set.postingSecondANDThirdProbabilities0 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities1 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities2 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities3 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities4 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities5 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities6 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities7 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities8 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities9 = 0.0;

		  scores_set.postingThreeFactorProbabilities0 = 0.0;
		  scores_set.postingThreeFactorProbabilities1 = 0.0;
		  scores_set.postingThreeFactorProbabilities2 = 0.0;
		  scores_set.postingThreeFactorProbabilities3 = 0.0;
		  scores_set.postingThreeFactorProbabilities4 = 0.0;
		  scores_set.postingThreeFactorProbabilities5 = 0.0;
		  scores_set.postingThreeFactorProbabilities6 = 0.0;
		  scores_set.postingThreeFactorProbabilities7 = 0.0;
		  scores_set.postingThreeFactorProbabilities8 = 0.0;
		  scores_set.postingThreeFactorProbabilities9 = 0.0;

		  scores_set.posting0ScoreComponentPart1 = 0.0;
		  scores_set.posting1ScoreComponentPart1 = 0.0;
		  scores_set.posting2ScoreComponentPart1 = 0.0;
		  scores_set.posting3ScoreComponentPart1 = 0.0;
		  scores_set.posting4ScoreComponentPart1 = 0.0;
		  scores_set.posting5ScoreComponentPart1 = 0.0;
		  scores_set.posting6ScoreComponentPart1 = 0.0;
		  scores_set.posting7ScoreComponentPart1 = 0.0;
		  scores_set.posting8ScoreComponentPart1 = 0.0;
		  scores_set.posting9ScoreComponentPart1 = 0.0;

		  scores_set.posting0ScoreComponentPart2 = 0.0;
		  scores_set.posting1ScoreComponentPart2 = 0.0;
		  scores_set.posting2ScoreComponentPart2 = 0.0;
		  scores_set.posting3ScoreComponentPart2 = 0.0;
		  scores_set.posting4ScoreComponentPart2 = 0.0;
		  scores_set.posting5ScoreComponentPart2 = 0.0;
		  scores_set.posting6ScoreComponentPart2 = 0.0;
		  scores_set.posting7ScoreComponentPart2 = 0.0;
		  scores_set.posting8ScoreComponentPart2 = 0.0;
		  scores_set.posting9ScoreComponentPart2 = 0.0;

	      scores_set.postingScore0 = 0;
	      scores_set.postingScore1 = 0;
	      scores_set.postingScore2 = 0;
	      scores_set.postingScore3 = 0;
	      scores_set.postingScore4 = 0;
	      scores_set.postingScore5 = 0;
	      scores_set.postingScore6 = 0;
	      scores_set.postingScore7 = 0;
	      scores_set.postingScore8 = 0;
	      scores_set.postingScore9 = 0;

		  scores_set.lengthOfTheInvertedList0 = 0;
		  scores_set.lengthOfTheInvertedList1 = 0;
		  scores_set.lengthOfTheInvertedList2 = 0;
		  scores_set.lengthOfTheInvertedList3 = 0;
		  scores_set.lengthOfTheInvertedList4 = 0;
		  scores_set.lengthOfTheInvertedList5 = 0;
		  scores_set.lengthOfTheInvertedList6 = 0;
		  scores_set.lengthOfTheInvertedList7 = 0;
		  scores_set.lengthOfTheInvertedList8 = 0;
		  scores_set.lengthOfTheInvertedList9 = 0;

		  scores_set.postingTermFrequency0 = 0;
		  scores_set.postingTermFrequency1 = 0;
		  scores_set.postingTermFrequency2 = 0;
		  scores_set.postingTermFrequency3 = 0;
		  scores_set.postingTermFrequency4 = 0;
		  scores_set.postingTermFrequency5 = 0;
		  scores_set.postingTermFrequency6 = 0;
		  scores_set.postingTermFrequency7 = 0;
		  scores_set.postingTermFrequency8 = 0;
		  scores_set.postingTermFrequency9 = 0;

	      // for debug purposes.
	      // cout << curr_doc_id << " ";
	      // Can start searching from the position of 'top' since it'll be the first lowest element in the array.
	      while (top != &lists_curr_postings[num_lists_remaining]) {
	        if (top->first == curr_doc_id) {

	          // Compute BM25 score from frequencies.
	          f_d_t = lists[top->second]->GetFreq();
	          doc_len = index_reader_.document_map().GetDocumentLength(top->first);
	          scores_set.doc_length = doc_len;
	          partial_bm25 = idf_t[top->second] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
	          //debug option1:
	          //cout << "f_d_t:" << f_d_t << " doc_len:" << doc_len << " idf_t[i]:" << idf_t[i] << " partial_bm25:"<< partial_bm25 << " ";
	          //debug option2:
	          // for debug purposes.
	          // cout << partial_bm25 << " ";

	          // for OR semantics, all I need to control is that unqualified score has NOT been added to the sum of the bm25 score.
	          // every document is still getting a chance into to the top-k even those unqualified documents are NOT added.
	          if(partial_bm25 < universal_threshold_socre_of_posting_){
	        	  // In this case, I just do NOTHING cause the that posting score is NOT qualified.

	          }
	          else{
	        	  actual_bm25_sum += partial_bm25;
	          }
	          original_bm25_sum += partial_bm25;

	          if (top->second == 0){
	        	  if(partial_bm25 <= universal_threshold_socre_of_posting_){
	        		  scores_set.posting0QualifyStatus = false;
	        	  }

				  scores_set.postingScore0 = partial_bm25;
				  scores_set.lengthOfTheInvertedList0 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency0 = f_d_t;

	          }
	          else if (top->second == 1){
	        	  if(partial_bm25 <= universal_threshold_socre_of_posting_){
	        		  scores_set.posting1QualifyStatus = false;
	        	  }

				  scores_set.postingScore1 = partial_bm25;
				  scores_set.lengthOfTheInvertedList1 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency1 = f_d_t;

	          }
	          else if (top->second == 2){
	        	  if(partial_bm25 <= universal_threshold_socre_of_posting_){
	        		  scores_set.posting2QualifyStatus = false;
	        	  }

				  scores_set.postingScore2 = partial_bm25;
				  scores_set.lengthOfTheInvertedList2 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency2 = f_d_t;

	          }
	          else if (top->second == 3){
	        	  if(partial_bm25 <= universal_threshold_socre_of_posting_){
	        		  scores_set.posting3QualifyStatus = false;
	        	  }

				  scores_set.postingScore3 = partial_bm25;
				  scores_set.lengthOfTheInvertedList3 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency3 = f_d_t;


	          }
	          else if (top->second == 4){
	        	  if(partial_bm25 <= universal_threshold_socre_of_posting_){
	        		  scores_set.posting4QualifyStatus = false;
	        	  }

				  scores_set.postingScore4 = partial_bm25;
				  scores_set.lengthOfTheInvertedList4 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency4 = f_d_t;

	          }
	          else if (top->second == 5){
	        	  if(partial_bm25 <= universal_threshold_socre_of_posting_){
	        		  scores_set.posting5QualifyStatus = false;
	        	  }

				  scores_set.postingScore5 = partial_bm25;
				  scores_set.lengthOfTheInvertedList5 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency5 = f_d_t;


	          }
	          else if (top->second == 6){
	        	  if(partial_bm25 <= universal_threshold_socre_of_posting_){
	        		  scores_set.posting6QualifyStatus = false;
	        	  }

				  scores_set.postingScore6 = partial_bm25;
				  scores_set.lengthOfTheInvertedList6 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency6 = f_d_t;


	          }
	          else if (top->second == 7){
	        	  if(partial_bm25 <= universal_threshold_socre_of_posting_){
	        		  scores_set.posting7QualifyStatus = false;
	        	  }

				  scores_set.postingScore7 = partial_bm25;
				  scores_set.lengthOfTheInvertedList7 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency7 = f_d_t;


	          }
	          else if (top->second == 8){
	        	  if(partial_bm25 <= universal_threshold_socre_of_posting_){
	        		  scores_set.posting8QualifyStatus = false;
	        	  }

				  scores_set.postingScore8 = partial_bm25;
				  scores_set.lengthOfTheInvertedList8 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency8 = f_d_t;


	          }
	          else if (top->second == 9){
	        	  if(partial_bm25 <= universal_threshold_socre_of_posting_){
	        		  scores_set.posting9QualifyStatus = false;
	        	  }

				  scores_set.postingScore9 = partial_bm25;
				  scores_set.lengthOfTheInvertedList9 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency9 = f_d_t;


	          }

	          ++num_postings_scored_;

	          if ((top->first = lists[top->second]->NextGEQ(top->first + 1)) == ListData::kNoMoreDocs) {
	            // Need to compact the array by one.
	            // Just copy over the last value in the array and overwrite the top value, since we'll be removing it.
	            // Now, we can declare our list one shorter.
	            // If top happens to already point to the last value in the array, this step is superfluous.
	            --num_lists_remaining;
	            *top = lists_curr_postings[num_lists_remaining];
	            --top;
	          }
	        }
	        ++top;
	      }
	      // for debug purposes.
	      // cout << endl;
	      scores_set.totalScore = original_bm25_sum;
	      scores_set.actualTotalScore = actual_bm25_sum;
	      // TODO: may have some problems. Updated by Wei 2013/02/25
	      // Now, it is just simple checking and may have some problem
	      // If the bm25_sum == 0, then this document will NOT show up in the result list
	      // If the bm25_sum != 0, then this document will be in the result list for sorting


	      // version1
		  // Need to keep track of the top-k documents.
		  if (total_num_results < num_results) {
			// We insert a document if we don't have k documents yet.
			results[total_num_results] = make_pair(scores_set, curr_doc_id);
			push_heap(results, results + total_num_results + 1, ResultCompare2());
		  } else {
			if (scores_set.totalScore > results->first.totalScore) {
			  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
			  pop_heap(results, results + num_results, ResultCompare2());
			  results[num_results - 1].first = scores_set;
			  results[num_results - 1].second = curr_doc_id;
			  push_heap(results, results + num_results, ResultCompare2());
			}
		  }
		  ++total_num_results;


	      /*
	      // version 2
	      if (original_bm25_sum == 0.0){
	    	  // This document will just passed.
	      }
	      else{
	          // Need to keep track of the top-k documents.
	          if (total_num_results < num_results) {
	            // We insert a document if we don't have k documents yet.
	            results[total_num_results] = make_pair(scores_set, curr_doc_id);
	            push_heap(results, results + total_num_results + 1, ResultCompare2());
	          } else {
	            if (scores_set.totalScore > results->first.totalScore) {
	              // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	              pop_heap(results, results + num_results, ResultCompare2());
	              results[num_results - 1].first = scores_set;
	              results[num_results - 1].second = curr_doc_id;
	              push_heap(results, results + num_results, ResultCompare2());
	            }
	          }
	          ++total_num_results;
	      }
	      */

	      /*
	      // version 3 for the trec evaluation
	      if (original_bm25_sum == 0.0){
	    	  // This document will just passed.
	      }
	      else{
	          // Need to keep track of the top-k documents.
	          if (total_num_results < num_results) {
	            // We insert a document if we don't have k documents yet.
	            results[total_num_results] = make_pair(scores_set, curr_doc_id);
	            push_heap(results, results + total_num_results + 1, ResultCompare3());
	          } else {
	            if (scores_set.actualTotalScore > results->first.actualTotalScore) {
	              // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	              pop_heap(results, results + num_results, ResultCompare3());
	              results[num_results - 1].first = scores_set;
	              results[num_results - 1].second = curr_doc_id;
	              push_heap(results, results + num_results, ResultCompare3());
	            }
	          }
	          ++total_num_results;
	      }
	      */

	    }
	    else
	    {
	    	//delete and do not handle this situation.
	    }
	  }

	  // Sort top-k results in descending order by document score.
	  sort(results, results + min(num_results, total_num_results), ResultCompare2());

	  return total_num_results;
  }
  else if (computation_method == 2){
	  // Setting this option to 'true' makes a considerable difference in average query latency (> 100ms).
	  // When we score the complete doc, we first find the lowest docID in the array, and then scan the array for that docID, and completely score it.
	  // All lists from which the docID was scored have their list pointers moved forward.
	  // When we don't score the complete doc, at each turn of the while loop, we find a partial score of the lowest docID posting.
	  // We add these together for a particular docID to get the complete score -- but it requires several iterations of the main while loop.
	  // This is less efficient, since we have to do a complete linear search through the array for every posting.
	  // On the other hand, when we score the complete doc right away, we only have to do one more linear search through all postings to score all the lists.
	  // Clearly, if the majority of the docIDs are present in more than one list, we'll be getting a speedup.
	  const bool kScoreCompleteDoc = true;

	  // Use an array instead of a heap for selecting the list with the lowest docID at each step.
	  // Using a heap for picking the list with the lowest docID is only implemented for when 'kScoreCompleteDoc' is false.
	  // For compatibility with 'kScoreCompleteDoc' equal to true, you'd need to use the heap to choose the next list to score, instead of iterating through the array, which is what's done now.
	  // Array based method is faster than the heap based method for choosing the lowest docID from all the lists, so this option should be set to 'true'.
	  // TODO: Try another array based strategy: keep a sorted array of docIDs. When updating, only need to find the spot for the new docID and re-sort the array up to that spot.
	  // TODO: Can also use a linked list for this. Then can just find the right spot, and do pointer changes. The locality here wouldn't be too good though.
	  const bool kUseArrayInsteadOfHeapList = true;



	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  const float kBm25K1 =  2.0;  // k1
	  const float kBm25B = 0.75;   // b

	  // We can precompute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

	  // BM25 components.
	  float original_bm25_sum = 0;  // The BM25 sum for the current document we're processing.
	  float actual_bm25_sum = 0;
	  SCORES_SET scores_set;
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in OR semantics.
	  int doc_len;
	  uint32_t f_d_t;

	  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
	  float idf_t[num_lists];  // Using a variable length array here.
	  int lengthOfTheInvertedListForThisTerm[num_lists];
	  int num_docs_t;
	  for (int i = 0; i < num_lists; ++i) {
		// If for the pruning project, then use the same overall statistics for all the pruned index.
		if(pruningProjectSwitch){
			num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
		}
		// If not for the pruning project, we can use what the index has.
		else{
			num_docs_t = lists[i]->num_docs_complete_list();
		}

		lengthOfTheInvertedListForThisTerm[i] = num_docs_t;
		// for debug ONLY.
		// cout << "i:" << i << " num_docs_t:" << num_docs_t << endl;
	    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
	  }

	  // We use this to get the next lowest docID from all the lists.
	  pair<uint32_t, int> lists_curr_postings[num_lists];  // Using a variable length array here.
	  int num_lists_remaining = 0;  // The number of lists with postings remaining.
	  for (int i = 0; i < num_lists; ++i) {
	    uint32_t curr_doc_id;
	    if ((curr_doc_id = lists[i]->NextGEQ(0)) < ListData::kNoMoreDocs) {
	      lists_curr_postings[num_lists_remaining++] = make_pair(curr_doc_id, i);
	    }
	  }

	  if (num_lists_remaining > 0) {
	    if (!kUseArrayInsteadOfHeapList) {
	      // We use our own comparator, that only checks the docID part.
	      make_heap(lists_curr_postings, lists_curr_postings + num_lists_remaining, ListMaxDocIdCompare());
	    }
	  } else {
	    return total_num_results;
	  }

	  // For the heap based method, the lowest element will always be the first element in the array.
	  // So we can keep 'top' constant since it's just a pointer to the first element and just push/pop the heap.
	  // For the array based method, we need to initialize it to the first element in the array, and then find the lowest value at the top of the while loop.
	  // We have to find the lowest element here as well, since we need to initialize 'curr_doc_id' to the right value before we start the loop.
	  pair<uint32_t, int>* top = &lists_curr_postings[0];
	  if (kUseArrayInsteadOfHeapList) {
	    for (int i = 1; i < num_lists_remaining; ++i) {
	      if (lists_curr_postings[i].first < top->first) {
	        top = &lists_curr_postings[i];
	      }
	    }
	  }

	  int i;
	  uint32_t curr_doc_id = top->first;  // Current docID we're processing the score for.
	  float threshold_socre_for_current_list = 0.0;	// for the TCP method, there is a threshold cut for every list
	  while (num_lists_remaining) {
	    if (kUseArrayInsteadOfHeapList) {
	      top = &lists_curr_postings[0];
	      for (i = 1; i < num_lists_remaining; ++i) {
	        if (lists_curr_postings[i].first < top->first) {
	          top = &lists_curr_postings[i];
	        }
	      }
	    }

	    if (kScoreCompleteDoc)
	    {
	      curr_doc_id = top->first;
	      actual_bm25_sum = 0;
	      original_bm25_sum = 0;
	      // init of the variable scores_set
		  scores_set.totalScore = 0.0;
		  scores_set.actualTotalScore = 0.0;
		  scores_set.doc_length = 0;

		  scores_set.docCandidateQualifyStatus = true;
		  scores_set.posting0QualifyStatus = true;
		  scores_set.posting1QualifyStatus = true;
		  scores_set.posting2QualifyStatus = true;
		  scores_set.posting3QualifyStatus = true;
		  scores_set.posting4QualifyStatus = true;
		  scores_set.posting5QualifyStatus = true;
		  scores_set.posting6QualifyStatus = true;
		  scores_set.posting7QualifyStatus = true;
		  scores_set.posting8QualifyStatus = true;
		  scores_set.posting9QualifyStatus = true;

		  scores_set.posting0RankInList = 0;
		  scores_set.posting1RankInList = 0;
		  scores_set.posting2RankInList = 0;
		  scores_set.posting3RankInList = 0;
		  scores_set.posting4RankInList = 0;
		  scores_set.posting5RankInList = 0;
		  scores_set.posting6RankInList = 0;
		  scores_set.posting7RankInList = 0;
		  scores_set.posting8RankInList = 0;
		  scores_set.posting9RankInList = 0;

		  scores_set.postingFirstProbabilities0 = 0.0;
		  scores_set.postingFirstProbabilities1 = 0.0;
		  scores_set.postingFirstProbabilities2 = 0.0;
		  scores_set.postingFirstProbabilities3 = 0.0;
		  scores_set.postingFirstProbabilities4 = 0.0;
		  scores_set.postingFirstProbabilities5 = 0.0;
		  scores_set.postingFirstProbabilities6 = 0.0;
		  scores_set.postingFirstProbabilities7 = 0.0;
		  scores_set.postingFirstProbabilities8 = 0.0;
		  scores_set.postingFirstProbabilities9 = 0.0;

		  scores_set.postingSecondANDThirdProbabilities0 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities1 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities2 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities3 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities4 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities5 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities6 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities7 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities8 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities9 = 0.0;

		  scores_set.postingThreeFactorProbabilities0 = 0.0;
		  scores_set.postingThreeFactorProbabilities1 = 0.0;
		  scores_set.postingThreeFactorProbabilities2 = 0.0;
		  scores_set.postingThreeFactorProbabilities3 = 0.0;
		  scores_set.postingThreeFactorProbabilities4 = 0.0;
		  scores_set.postingThreeFactorProbabilities5 = 0.0;
		  scores_set.postingThreeFactorProbabilities6 = 0.0;
		  scores_set.postingThreeFactorProbabilities7 = 0.0;
		  scores_set.postingThreeFactorProbabilities8 = 0.0;
		  scores_set.postingThreeFactorProbabilities9 = 0.0;

		  scores_set.posting0ScoreComponentPart1 = 0.0;
		  scores_set.posting1ScoreComponentPart1 = 0.0;
		  scores_set.posting2ScoreComponentPart1 = 0.0;
		  scores_set.posting3ScoreComponentPart1 = 0.0;
		  scores_set.posting4ScoreComponentPart1 = 0.0;
		  scores_set.posting5ScoreComponentPart1 = 0.0;
		  scores_set.posting6ScoreComponentPart1 = 0.0;
		  scores_set.posting7ScoreComponentPart1 = 0.0;
		  scores_set.posting8ScoreComponentPart1 = 0.0;
		  scores_set.posting9ScoreComponentPart1 = 0.0;

		  scores_set.posting0ScoreComponentPart2 = 0.0;
		  scores_set.posting1ScoreComponentPart2 = 0.0;
		  scores_set.posting2ScoreComponentPart2 = 0.0;
		  scores_set.posting3ScoreComponentPart2 = 0.0;
		  scores_set.posting4ScoreComponentPart2 = 0.0;
		  scores_set.posting5ScoreComponentPart2 = 0.0;
		  scores_set.posting6ScoreComponentPart2 = 0.0;
		  scores_set.posting7ScoreComponentPart2 = 0.0;
		  scores_set.posting8ScoreComponentPart2 = 0.0;
		  scores_set.posting9ScoreComponentPart2 = 0.0;

	      scores_set.postingScore0 = 0;
	      scores_set.postingScore1 = 0;
	      scores_set.postingScore2 = 0;
	      scores_set.postingScore3 = 0;
	      scores_set.postingScore4 = 0;
	      scores_set.postingScore5 = 0;
	      scores_set.postingScore6 = 0;
	      scores_set.postingScore7 = 0;
	      scores_set.postingScore8 = 0;
	      scores_set.postingScore9 = 0;

		  scores_set.lengthOfTheInvertedList0 = 0;
		  scores_set.lengthOfTheInvertedList1 = 0;
		  scores_set.lengthOfTheInvertedList2 = 0;
		  scores_set.lengthOfTheInvertedList3 = 0;
		  scores_set.lengthOfTheInvertedList4 = 0;
		  scores_set.lengthOfTheInvertedList5 = 0;
		  scores_set.lengthOfTheInvertedList6 = 0;
		  scores_set.lengthOfTheInvertedList7 = 0;
		  scores_set.lengthOfTheInvertedList8 = 0;
		  scores_set.lengthOfTheInvertedList9 = 0;

		  scores_set.postingTermFrequency0 = 0;
		  scores_set.postingTermFrequency1 = 0;
		  scores_set.postingTermFrequency2 = 0;
		  scores_set.postingTermFrequency3 = 0;
		  scores_set.postingTermFrequency4 = 0;
		  scores_set.postingTermFrequency5 = 0;
		  scores_set.postingTermFrequency6 = 0;
		  scores_set.postingTermFrequency7 = 0;
		  scores_set.postingTermFrequency8 = 0;
		  scores_set.postingTermFrequency9 = 0;

	      // for debug purposes.
	      // cout << curr_doc_id << " ";
	      // Can start searching from the position of 'top' since it'll be the first lowest element in the array.
	      while (top != &lists_curr_postings[num_lists_remaining]) {
	        if (top->first == curr_doc_id) {

	          // Compute BM25 score from frequencies.
	          f_d_t = lists[top->second]->GetFreq();
	          doc_len = index_reader_.document_map().GetDocumentLength(top->first);
	          scores_set.doc_length = doc_len;
	          partial_bm25 = idf_t[top->second] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
	          //debug option1:
	          //cout << "f_d_t:" << f_d_t << " doc_len:" << doc_len << " idf_t[i]:" << idf_t[i] << " partial_bm25:"<< partial_bm25 << " ";
	          //debug option2:
	          // for debug purposes.
	          // cout << partial_bm25 << " ";

	          // for OR semantics, all I need to control is that unqualified score has NOT been added to the sum of the bm25 score.
	          // every document is still getting a chance into to the top-k even those unqualified documents are NOT added.
	          threshold_socre_for_current_list = term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[ top->second ].first ][indexPositionOfCorrespondingThresholdToUse_];
	          if(partial_bm25 <= threshold_socre_for_current_list){
	        	  // In this case, I just do NOTHING cause the that posting score is NOT qualified.

	          }
	          else{
	        	  actual_bm25_sum += partial_bm25;
	          }
	          original_bm25_sum += partial_bm25;

	          if (top->second == 0){
	        	  if(partial_bm25 <= threshold_socre_for_current_list){
	        		  scores_set.posting0QualifyStatus = false;
	        	  }

				  scores_set.postingScore0 = partial_bm25;
				  scores_set.lengthOfTheInvertedList0 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency0 = f_d_t;

	          }
	          else if (top->second == 1){
	        	  if(partial_bm25 <= threshold_socre_for_current_list){
	        		  scores_set.posting1QualifyStatus = false;
	        	  }

				  scores_set.postingScore1 = partial_bm25;
				  scores_set.lengthOfTheInvertedList1 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency1 = f_d_t;

	          }
	          else if (top->second == 2){
	        	  if(partial_bm25 <= threshold_socre_for_current_list){
	        		  scores_set.posting2QualifyStatus = false;
	        	  }

				  scores_set.postingScore2 = partial_bm25;
				  scores_set.lengthOfTheInvertedList2 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency2 = f_d_t;

	          }
	          else if (top->second == 3){
	        	  if(partial_bm25 <= threshold_socre_for_current_list){
	        		  scores_set.posting3QualifyStatus = false;
	        	  }

				  scores_set.postingScore3 = partial_bm25;
				  scores_set.lengthOfTheInvertedList3 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency3 = f_d_t;


	          }
	          else if (top->second == 4){
	        	  if(partial_bm25 <= threshold_socre_for_current_list){
	        		  scores_set.posting4QualifyStatus = false;
	        	  }

				  scores_set.postingScore4 = partial_bm25;
				  scores_set.lengthOfTheInvertedList4 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency4 = f_d_t;

	          }
	          else if (top->second == 5){
	        	  if(partial_bm25 <= threshold_socre_for_current_list){
	        		  scores_set.posting5QualifyStatus = false;
	        	  }

				  scores_set.postingScore5 = partial_bm25;
				  scores_set.lengthOfTheInvertedList5 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency5 = f_d_t;


	          }
	          else if (top->second == 6){
	        	  if(partial_bm25 <= threshold_socre_for_current_list){
	        		  scores_set.posting6QualifyStatus = false;
	        	  }

				  scores_set.postingScore6 = partial_bm25;
				  scores_set.lengthOfTheInvertedList6 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency6 = f_d_t;


	          }
	          else if (top->second == 7){
	        	  if(partial_bm25 <= threshold_socre_for_current_list){
	        		  scores_set.posting7QualifyStatus = false;
	        	  }

				  scores_set.postingScore7 = partial_bm25;
				  scores_set.lengthOfTheInvertedList7 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency7 = f_d_t;


	          }
	          else if (top->second == 8){
	        	  if(partial_bm25 <= threshold_socre_for_current_list){
	        		  scores_set.posting8QualifyStatus = false;
	        	  }

				  scores_set.postingScore8 = partial_bm25;
				  scores_set.lengthOfTheInvertedList8 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency8 = f_d_t;


	          }
	          else if (top->second == 9){
	        	  if(partial_bm25 <= threshold_socre_for_current_list){
	        		  scores_set.posting9QualifyStatus = false;
	        	  }

				  scores_set.postingScore9 = partial_bm25;
				  scores_set.lengthOfTheInvertedList9 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency9 = f_d_t;


	          }

	          ++num_postings_scored_;

	          if ((top->first = lists[top->second]->NextGEQ(top->first + 1)) == ListData::kNoMoreDocs) {
	            // Need to compact the array by one.
	            // Just copy over the last value in the array and overwrite the top value, since we'll be removing it.
	            // Now, we can declare our list one shorter.
	            // If top happens to already point to the last value in the array, this step is superfluous.
	            --num_lists_remaining;
	            *top = lists_curr_postings[num_lists_remaining];
	            --top;
	          }
	        }
	        ++top;
	      }
	      // for debug purposes.
	      // cout << endl;
	      scores_set.totalScore = original_bm25_sum;
	      scores_set.actualTotalScore = actual_bm25_sum;
	      // TODO: may have some problems. Updated by Wei 2013/02/25
	      // Now, it is just simple checking and may have some problem
	      // If the bm25_sum == 0, then this document will NOT show up in the result list
	      // If the bm25_sum != 0, then this document will be in the result list for sorting

	      /*
	      // version1
		  // Need to keep track of the top-k documents.
		  if (total_num_results < num_results) {
			// We insert a document if we don't have k documents yet.
			results[total_num_results] = make_pair(scores_set, curr_doc_id);
			push_heap(results, results + total_num_results + 1, ResultCompare2());
		  } else {
			if (scores_set.totalScore > results->first.totalScore) {
			  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
			  pop_heap(results, results + num_results, ResultCompare2());
			  results[num_results - 1].first = scores_set;
			  results[num_results - 1].second = curr_doc_id;
			  push_heap(results, results + num_results, ResultCompare2());
			}
		  }
		  ++total_num_results;
		  */

	      /*
	      // version 2
	      if (original_bm25_sum == 0.0){
	    	  // This document will just passed.
	      }
	      else{
	          // Need to keep track of the top-k documents.
	          if (total_num_results < num_results) {
	            // We insert a document if we don't have k documents yet.
	            results[total_num_results] = make_pair(scores_set, curr_doc_id);
	            push_heap(results, results + total_num_results + 1, ResultCompare2());
	          } else {
	            if (scores_set.totalScore > results->first.totalScore) {
	              // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	              pop_heap(results, results + num_results, ResultCompare2());
	              results[num_results - 1].first = scores_set;
	              results[num_results - 1].second = curr_doc_id;
	              push_heap(results, results + num_results, ResultCompare2());
	            }
	          }
	          ++total_num_results;
	      }
	      */

	      // for OR trec evaluation
	      if (original_bm25_sum == 0.0){
	    	  // This document will just passed.
	      }
	      else{
	          // Need to keep track of the top-k documents.
	          if (total_num_results < num_results) {
	            // We insert a document if we don't have k documents yet.
	            results[total_num_results] = make_pair(scores_set, curr_doc_id);
	            push_heap(results, results + total_num_results + 1, ResultCompare3());
	          } else {
	            if (scores_set.actualTotalScore > results->first.actualTotalScore) {
	              // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	              pop_heap(results, results + num_results, ResultCompare3());
	              results[num_results - 1].first = scores_set;
	              results[num_results - 1].second = curr_doc_id;
	              push_heap(results, results + num_results, ResultCompare3());
	            }
	          }
	          ++total_num_results;
	      }

	    }
	    else
	    {
	    	//delete and do not handle this situation.
	    }
	  }

	  // Sort top-k results in descending order by document score.
	  // sort(results, results + min(num_results, total_num_results), ResultCompare2());

	  sort(results, results + min(num_results, total_num_results), ResultCompare3());

	  return total_num_results;
  }
  else if (computation_method == 4){
	  // Setting this option to 'true' makes a considerable difference in average query latency (> 100ms).
	  // When we score the complete doc, we first find the lowest docID in the array, and then scan the array for that docID, and completely score it.
	  // All lists from which the docID was scored have their list pointers moved forward.
	  // When we don't score the complete doc, at each turn of the while loop, we find a partial score of the lowest docID posting.
	  // We add these together for a particular docID to get the complete score -- but it requires several iterations of the main while loop.
	  // This is less efficient, since we have to do a complete linear search through the array for every posting.
	  // On the other hand, when we score the complete doc right away, we only have to do one more linear search through all postings to score all the lists.
	  // Clearly, if the majority of the docIDs are present in more than one list, we'll be getting a speedup.
	  const bool kScoreCompleteDoc = true;

	  // Use an array instead of a heap for selecting the list with the lowest docID at each step.
	  // Using a heap for picking the list with the lowest docID is only implemented for when 'kScoreCompleteDoc' is false.
	  // For compatibility with 'kScoreCompleteDoc' equal to true, you'd need to use the heap to choose the next list to score, instead of iterating through the array, which is what's done now.
	  // Array based method is faster than the heap based method for choosing the lowest docID from all the lists, so this option should be set to 'true'.
	  // TODO: Try another array based strategy: keep a sorted array of docIDs. When updating, only need to find the spot for the new docID and re-sort the array up to that spot.
	  // TODO: Can also use a linked list for this. Then can just find the right spot, and do pointer changes. The locality here wouldn't be too good though.
	  const bool kUseArrayInsteadOfHeapList = true;



	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  const float kBm25K1 =  2.0;  // k1
	  const float kBm25B = 0.75;   // b

	  // We can precompute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

	  // BM25 components.
	  float original_bm25_sum = 0;  // The BM25 sum for the current document we're processing.
	  float actual_bm25_sum = 0;
	  SCORES_SET scores_set;
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in OR semantics.
	  int doc_len;
	  uint32_t f_d_t;

	  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
	  float idf_t[num_lists];  // Using a variable length array here.
	  int lengthOfTheInvertedListForThisTerm[num_lists];
	  int num_docs_t;
	  for (int i = 0; i < num_lists; ++i) {
		// If for the pruning project, then use the same overall statistics for all the pruned index.
		if(pruningProjectSwitch){
			num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
		}
		// If not for the pruning project, we can use what the index has.
		else{
			num_docs_t = lists[i]->num_docs_complete_list();
		}

		lengthOfTheInvertedListForThisTerm[i] = num_docs_t;
		// for debug ONLY.
		// cout << "i:" << i << " num_docs_t:" << num_docs_t << endl;
	    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
	  }

	  // We use this to get the next lowest docID from all the lists.
	  pair<uint32_t, int> lists_curr_postings[num_lists];  // Using a variable length array here.
	  int num_lists_remaining = 0;  // The number of lists with postings remaining.
	  for (int i = 0; i < num_lists; ++i) {
	    uint32_t curr_doc_id;
	    if ((curr_doc_id = lists[i]->NextGEQ(0)) < ListData::kNoMoreDocs) {
	      lists_curr_postings[num_lists_remaining++] = make_pair(curr_doc_id, i);
	    }
	  }

	  if (num_lists_remaining > 0) {
	    if (!kUseArrayInsteadOfHeapList) {
	      // We use our own comparator, that only checks the docID part.
	      make_heap(lists_curr_postings, lists_curr_postings + num_lists_remaining, ListMaxDocIdCompare());
	    }
	  } else {
	    return total_num_results;
	  }

	  // For the heap based method, the lowest element will always be the first element in the array.
	  // So we can keep 'top' constant since it's just a pointer to the first element and just push/pop the heap.
	  // For the array based method, we need to initialize it to the first element in the array, and then find the lowest value at the top of the while loop.
	  // We have to find the lowest element here as well, since we need to initialize 'curr_doc_id' to the right value before we start the loop.
	  pair<uint32_t, int>* top = &lists_curr_postings[0];
	  if (kUseArrayInsteadOfHeapList) {
	    for (int i = 1; i < num_lists_remaining; ++i) {
	      if (lists_curr_postings[i].first < top->first) {
	        top = &lists_curr_postings[i];
	      }
	    }
	  }

	  int i;
	  uint32_t curr_doc_id = top->first;  // Current docID we're processing the score for.
	  float threshold_socre_for_current_list = 0.0;	// for the TCP method, there is a threshold cut for every list
	  while (num_lists_remaining) {
	    if (kUseArrayInsteadOfHeapList) {
	      top = &lists_curr_postings[0];
	      for (i = 1; i < num_lists_remaining; ++i) {
	        if (lists_curr_postings[i].first < top->first) {
	          top = &lists_curr_postings[i];
	        }
	      }
	    }

	    if (kScoreCompleteDoc)
	    {
	      curr_doc_id = top->first;
	      actual_bm25_sum = 0;
	      original_bm25_sum = 0;
	      // init of the variable scores_set
		  scores_set.totalScore = 0.0;
		  scores_set.actualTotalScore = 0.0;
		  scores_set.doc_length = 0;

		  scores_set.docCandidateQualifyStatus = true;
		  scores_set.posting0QualifyStatus = true;
		  scores_set.posting1QualifyStatus = true;
		  scores_set.posting2QualifyStatus = true;
		  scores_set.posting3QualifyStatus = true;
		  scores_set.posting4QualifyStatus = true;
		  scores_set.posting5QualifyStatus = true;
		  scores_set.posting6QualifyStatus = true;
		  scores_set.posting7QualifyStatus = true;
		  scores_set.posting8QualifyStatus = true;
		  scores_set.posting9QualifyStatus = true;

		  scores_set.posting0RankInList = 0;
		  scores_set.posting1RankInList = 0;
		  scores_set.posting2RankInList = 0;
		  scores_set.posting3RankInList = 0;
		  scores_set.posting4RankInList = 0;
		  scores_set.posting5RankInList = 0;
		  scores_set.posting6RankInList = 0;
		  scores_set.posting7RankInList = 0;
		  scores_set.posting8RankInList = 0;
		  scores_set.posting9RankInList = 0;

		  scores_set.postingFirstProbabilities0 = 0.0;
		  scores_set.postingFirstProbabilities1 = 0.0;
		  scores_set.postingFirstProbabilities2 = 0.0;
		  scores_set.postingFirstProbabilities3 = 0.0;
		  scores_set.postingFirstProbabilities4 = 0.0;
		  scores_set.postingFirstProbabilities5 = 0.0;
		  scores_set.postingFirstProbabilities6 = 0.0;
		  scores_set.postingFirstProbabilities7 = 0.0;
		  scores_set.postingFirstProbabilities8 = 0.0;
		  scores_set.postingFirstProbabilities9 = 0.0;

		  scores_set.postingSecondANDThirdProbabilities0 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities1 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities2 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities3 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities4 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities5 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities6 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities7 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities8 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities9 = 0.0;

		  scores_set.postingThreeFactorProbabilities0 = 0.0;
		  scores_set.postingThreeFactorProbabilities1 = 0.0;
		  scores_set.postingThreeFactorProbabilities2 = 0.0;
		  scores_set.postingThreeFactorProbabilities3 = 0.0;
		  scores_set.postingThreeFactorProbabilities4 = 0.0;
		  scores_set.postingThreeFactorProbabilities5 = 0.0;
		  scores_set.postingThreeFactorProbabilities6 = 0.0;
		  scores_set.postingThreeFactorProbabilities7 = 0.0;
		  scores_set.postingThreeFactorProbabilities8 = 0.0;
		  scores_set.postingThreeFactorProbabilities9 = 0.0;

		  scores_set.posting0ScoreComponentPart1 = 0.0;
		  scores_set.posting1ScoreComponentPart1 = 0.0;
		  scores_set.posting2ScoreComponentPart1 = 0.0;
		  scores_set.posting3ScoreComponentPart1 = 0.0;
		  scores_set.posting4ScoreComponentPart1 = 0.0;
		  scores_set.posting5ScoreComponentPart1 = 0.0;
		  scores_set.posting6ScoreComponentPart1 = 0.0;
		  scores_set.posting7ScoreComponentPart1 = 0.0;
		  scores_set.posting8ScoreComponentPart1 = 0.0;
		  scores_set.posting9ScoreComponentPart1 = 0.0;

		  scores_set.posting0ScoreComponentPart2 = 0.0;
		  scores_set.posting1ScoreComponentPart2 = 0.0;
		  scores_set.posting2ScoreComponentPart2 = 0.0;
		  scores_set.posting3ScoreComponentPart2 = 0.0;
		  scores_set.posting4ScoreComponentPart2 = 0.0;
		  scores_set.posting5ScoreComponentPart2 = 0.0;
		  scores_set.posting6ScoreComponentPart2 = 0.0;
		  scores_set.posting7ScoreComponentPart2 = 0.0;
		  scores_set.posting8ScoreComponentPart2 = 0.0;
		  scores_set.posting9ScoreComponentPart2 = 0.0;

	      scores_set.postingScore0 = 0;
	      scores_set.postingScore1 = 0;
	      scores_set.postingScore2 = 0;
	      scores_set.postingScore3 = 0;
	      scores_set.postingScore4 = 0;
	      scores_set.postingScore5 = 0;
	      scores_set.postingScore6 = 0;
	      scores_set.postingScore7 = 0;
	      scores_set.postingScore8 = 0;
	      scores_set.postingScore9 = 0;

		  scores_set.lengthOfTheInvertedList0 = 0;
		  scores_set.lengthOfTheInvertedList1 = 0;
		  scores_set.lengthOfTheInvertedList2 = 0;
		  scores_set.lengthOfTheInvertedList3 = 0;
		  scores_set.lengthOfTheInvertedList4 = 0;
		  scores_set.lengthOfTheInvertedList5 = 0;
		  scores_set.lengthOfTheInvertedList6 = 0;
		  scores_set.lengthOfTheInvertedList7 = 0;
		  scores_set.lengthOfTheInvertedList8 = 0;
		  scores_set.lengthOfTheInvertedList9 = 0;

		  scores_set.postingTermFrequency0 = 0;
		  scores_set.postingTermFrequency1 = 0;
		  scores_set.postingTermFrequency2 = 0;
		  scores_set.postingTermFrequency3 = 0;
		  scores_set.postingTermFrequency4 = 0;
		  scores_set.postingTermFrequency5 = 0;
		  scores_set.postingTermFrequency6 = 0;
		  scores_set.postingTermFrequency7 = 0;
		  scores_set.postingTermFrequency8 = 0;
		  scores_set.postingTermFrequency9 = 0;

	      // for debug purposes.
	      // cout << curr_doc_id << " ";
	      // Can start searching from the position of 'top' since it'll be the first lowest element in the array.
	      while (top != &lists_curr_postings[num_lists_remaining]) {
	        if (top->first == curr_doc_id) {

	          // Compute BM25 score from frequencies.
	          f_d_t = lists[top->second]->GetFreq();
	          doc_len = index_reader_.document_map().GetDocumentLength(top->first);
	          scores_set.doc_length = doc_len;
	          partial_bm25 = idf_t[top->second] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
	          //debug option1:
	          //cout << "f_d_t:" << f_d_t << " doc_len:" << doc_len << " idf_t[i]:" << idf_t[i] << " partial_bm25:"<< partial_bm25 << " ";
	          //debug option2:
	          // for debug purposes.
	          // cout << partial_bm25 << " ";

	          // for OR semantics, all I need to control is that unqualified score has NOT been added to the sum of the bm25 score.
	          // every document is still getting a chance into to the top-k even those unqualified documents are NOT added.
	          threshold_socre_for_current_list = term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[ top->second ].first ][indexPositionOfCorrespondingThresholdToUse_];
	          if(partial_bm25 <= threshold_socre_for_current_list and queryView_[queryTermPostionIndexPairs_[top->second].first].count(curr_doc_id) <= 0 ){
	        	  // In this case, I just do NOTHING cause the that posting score is NOT qualified.

	          }
	          else{
	        	  actual_bm25_sum += partial_bm25;
	          }
	          original_bm25_sum += partial_bm25;

	          if (top->second == 0){
	        	  if(partial_bm25 <= threshold_socre_for_current_list and queryView_[queryTermPostionIndexPairs_[top->second].first].count(curr_doc_id) <= 0 ){
	        		  scores_set.posting0QualifyStatus = false;
	        	  }

				  scores_set.postingScore0 = partial_bm25;
				  scores_set.lengthOfTheInvertedList0 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency0 = f_d_t;

	          }
	          else if (top->second == 1){
	        	  if(partial_bm25 <= threshold_socre_for_current_list and queryView_[queryTermPostionIndexPairs_[top->second].first].count(curr_doc_id) <= 0 ){
	        		  scores_set.posting1QualifyStatus = false;
	        	  }

				  scores_set.postingScore1 = partial_bm25;
				  scores_set.lengthOfTheInvertedList1 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency1 = f_d_t;

	          }
	          else if (top->second == 2){
	        	  if(partial_bm25 <= threshold_socre_for_current_list and queryView_[queryTermPostionIndexPairs_[top->second].first].count(curr_doc_id) <= 0 ){
	        		  scores_set.posting2QualifyStatus = false;
	        	  }

				  scores_set.postingScore2 = partial_bm25;
				  scores_set.lengthOfTheInvertedList2 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency2 = f_d_t;

	          }
	          else if (top->second == 3){
	        	  if(partial_bm25 <= threshold_socre_for_current_list and queryView_[queryTermPostionIndexPairs_[top->second].first].count(curr_doc_id) <= 0 ){
	        		  scores_set.posting3QualifyStatus = false;
	        	  }

				  scores_set.postingScore3 = partial_bm25;
				  scores_set.lengthOfTheInvertedList3 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency3 = f_d_t;


	          }
	          else if (top->second == 4){
	        	  if(partial_bm25 <= threshold_socre_for_current_list and queryView_[queryTermPostionIndexPairs_[top->second].first].count(curr_doc_id) <= 0 ){
	        		  scores_set.posting4QualifyStatus = false;
	        	  }

				  scores_set.postingScore4 = partial_bm25;
				  scores_set.lengthOfTheInvertedList4 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency4 = f_d_t;

	          }
	          else if (top->second == 5){
	        	  if(partial_bm25 <= threshold_socre_for_current_list and queryView_[queryTermPostionIndexPairs_[top->second].first].count(curr_doc_id) <= 0 ){
	        		  scores_set.posting5QualifyStatus = false;
	        	  }

				  scores_set.postingScore5 = partial_bm25;
				  scores_set.lengthOfTheInvertedList5 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency5 = f_d_t;


	          }
	          else if (top->second == 6){
	        	  if(partial_bm25 <= threshold_socre_for_current_list and queryView_[queryTermPostionIndexPairs_[top->second].first].count(curr_doc_id) <= 0 ){
	        		  scores_set.posting6QualifyStatus = false;
	        	  }

				  scores_set.postingScore6 = partial_bm25;
				  scores_set.lengthOfTheInvertedList6 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency6 = f_d_t;


	          }
	          else if (top->second == 7){
	        	  if(partial_bm25 <= threshold_socre_for_current_list and queryView_[queryTermPostionIndexPairs_[top->second].first].count(curr_doc_id) <= 0 ){
	        		  scores_set.posting7QualifyStatus = false;
	        	  }

				  scores_set.postingScore7 = partial_bm25;
				  scores_set.lengthOfTheInvertedList7 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency7 = f_d_t;


	          }
	          else if (top->second == 8){
	        	  if(partial_bm25 <= threshold_socre_for_current_list and queryView_[queryTermPostionIndexPairs_[top->second].first].count(curr_doc_id) <= 0 ){
	        		  scores_set.posting8QualifyStatus = false;
	        	  }

				  scores_set.postingScore8 = partial_bm25;
				  scores_set.lengthOfTheInvertedList8 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency8 = f_d_t;


	          }
	          else if (top->second == 9){
	        	  if(partial_bm25 <= threshold_socre_for_current_list and queryView_[queryTermPostionIndexPairs_[top->second].first].count(curr_doc_id) <= 0 ){
	        		  scores_set.posting9QualifyStatus = false;
	        	  }

				  scores_set.postingScore9 = partial_bm25;
				  scores_set.lengthOfTheInvertedList9 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency9 = f_d_t;


	          }

	          ++num_postings_scored_;

	          if ((top->first = lists[top->second]->NextGEQ(top->first + 1)) == ListData::kNoMoreDocs) {
	            // Need to compact the array by one.
	            // Just copy over the last value in the array and overwrite the top value, since we'll be removing it.
	            // Now, we can declare our list one shorter.
	            // If top happens to already point to the last value in the array, this step is superfluous.
	            --num_lists_remaining;
	            *top = lists_curr_postings[num_lists_remaining];
	            --top;
	          }
	        }
	        ++top;
	      }
	      // for debug purposes.
	      // cout << endl;
	      scores_set.totalScore = original_bm25_sum;
	      scores_set.actualTotalScore = actual_bm25_sum;
	      // TODO: may have some problems. Updated by Wei 2013/02/25
	      // Now, it is just simple checking and may have some problem
	      // If the bm25_sum == 0, then this document will NOT show up in the result list
	      // If the bm25_sum != 0, then this document will be in the result list for sorting

	      /*
	      // version1
		  // Need to keep track of the top-k documents.
		  if (total_num_results < num_results) {
			// We insert a document if we don't have k documents yet.
			results[total_num_results] = make_pair(scores_set, curr_doc_id);
			push_heap(results, results + total_num_results + 1, ResultCompare2());
		  } else {
			if (scores_set.totalScore > results->first.totalScore) {
			  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
			  pop_heap(results, results + num_results, ResultCompare2());
			  results[num_results - 1].first = scores_set;
			  results[num_results - 1].second = curr_doc_id;
			  push_heap(results, results + num_results, ResultCompare2());
			}
		  }
		  ++total_num_results;
		  */

	      /*
	      // version 2, USAGE: Unknown
	      if (original_bm25_sum == 0.0){
	    	  // This document will just passed.
	      }
	      else{
	          // Need to keep track of the top-k documents.
	          if (total_num_results < num_results) {
	            // We insert a document if we don't have k documents yet.
	            results[total_num_results] = make_pair(scores_set, curr_doc_id);
	            push_heap(results, results + total_num_results + 1, ResultCompare2());
	          } else {
	            if (scores_set.totalScore > results->first.totalScore) {
	              // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	              pop_heap(results, results + num_results, ResultCompare2());
	              results[num_results - 1].first = scores_set;
	              results[num_results - 1].second = curr_doc_id;
	              push_heap(results, results + num_results, ResultCompare2());
	            }
	          }
	          ++total_num_results;
	      }
	      */


	      // for OR trec evaluation
	      if (original_bm25_sum == 0.0){
	    	  // This document will just passed.
	      }
	      else{
	          // Need to keep track of the top-k documents.
	          if (total_num_results < num_results) {
	            // We insert a document if we don't have k documents yet.
	            results[total_num_results] = make_pair(scores_set, curr_doc_id);
	            push_heap(results, results + total_num_results + 1, ResultCompare3());
	          } else {
	            if (scores_set.actualTotalScore > results->first.actualTotalScore) {
	              // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	              pop_heap(results, results + num_results, ResultCompare3());
	              results[num_results - 1].first = scores_set;
	              results[num_results - 1].second = curr_doc_id;
	              push_heap(results, results + num_results, ResultCompare3());
	            }
	          }
	          ++total_num_results;
	      }


	    }
	    else
	    {
	    	//delete and do not handle this situation.
	    }
	  }

	  // Sort top-k results in descending order by document score.
	  // sort(results, results + min(num_results, total_num_results), ResultCompare2());

	  // for the trec evaluation ONLY
	  sort(results, results + min(num_results, total_num_results), ResultCompare3());

	  return total_num_results;
  }
  else if (computation_method == 7){
	  // Setting this option to 'true' makes a considerable difference in average query latency (> 100ms).
	  // When we score the complete doc, we first find the lowest docID in the array, and then scan the array for that docID, and completely score it.
	  // All lists from which the docID was scored have their list pointers moved forward.
	  // When we don't score the complete doc, at each turn of the while loop, we find a partial score of the lowest docID posting.
	  // We add these together for a particular docID to get the complete score -- but it requires several iterations of the main while loop.
	  // This is less efficient, since we have to do a complete linear search through the array for every posting.
	  // On the other hand, when we score the complete doc right away, we only have to do one more linear search through all postings to score all the lists.
	  // Clearly, if the majority of the docIDs are present in more than one list, we'll be getting a speedup.
	  const bool kScoreCompleteDoc = true;

	  // Use an array instead of a heap for selecting the list with the lowest docID at each step.
	  // Using a heap for picking the list with the lowest docID is only implemented for when 'kScoreCompleteDoc' is false.
	  // For compatibility with 'kScoreCompleteDoc' equal to true, you'd need to use the heap to choose the next list to score, instead of iterating through the array, which is what's done now.
	  // Array based method is faster than the heap based method for choosing the lowest docID from all the lists, so this option should be set to 'true'.
	  // TODO: Try another array based strategy: keep a sorted array of docIDs. When updating, only need to find the spot for the new docID and re-sort the array up to that spot.
	  // TODO: Can also use a linked list for this. Then can just find the right spot, and do pointer changes. The locality here wouldn't be too good though.
	  const bool kUseArrayInsteadOfHeapList = true;



	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  const float kBm25K1 =  2.0;  // k1
	  const float kBm25B = 0.75;   // b

	  // We can precompute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

	  // BM25 components.
	  float actural_bm25_sum = 0; // The actual BM25 sum(take pruned postings into consideration) for the current document we're processing. On 2014/01/20
	  float original_bm25_sum = 0;  // The BM25 sum for the current document we're processing.
	  SCORES_SET scores_set;
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in OR semantics.
	  int doc_len;
	  uint32_t f_d_t;

	  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
	  float idf_t[num_lists];  // Using a variable length array here.
	  float external_scores[num_lists];
	  for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
		  external_scores[tempCounter] = 0.0;
	  }
	  int lengthOfTheInvertedListForThisTerm[num_lists];
	  int num_docs_t;
	  for (int i = 0; i < num_lists; ++i) {
		// If for the pruning project, then use the same overall statistics for all the pruned index.
		if(pruningProjectSwitch){
			num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
		}
		// If not for the pruning project, we can use what the index has.
		else{
			num_docs_t = lists[i]->num_docs_complete_list();
		}

		lengthOfTheInvertedListForThisTerm[i] = num_docs_t;
		// for debug ONLY.
		// cout << "i:" << i << " num_docs_t:" << num_docs_t << endl;
	    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
	  }

	  // We use this to get the next lowest docID from all the lists.
	  pair<uint32_t, int> lists_curr_postings[num_lists];  // Using a variable length array here.
	  int num_lists_remaining = 0;  // The number of lists with postings remaining.
	  for (int i = 0; i < num_lists; ++i) {
	    uint32_t curr_doc_id;
	    // Current version
	    if ((curr_doc_id = lists[i]->NextGEQRomanRead(0,external_scores[i],false)) < ListData::kNoMoreDocs) {
	    // OLD version
	    // if ((curr_doc_id = lists[i]->NextGEQ(0)) < ListData::kNoMoreDocs) {
	      lists_curr_postings[num_lists_remaining++] = make_pair(curr_doc_id, i);
	    }
	  }

	  if (num_lists_remaining > 0) {
	    if (!kUseArrayInsteadOfHeapList) {
	      // We use our own comparator, that only checks the docID part.
	      make_heap(lists_curr_postings, lists_curr_postings + num_lists_remaining, ListMaxDocIdCompare());
	    }
	  } else {
	    return total_num_results;
	  }

	  // For the heap based method, the lowest element will always be the first element in the array.
	  // So we can keep 'top' constant since it's just a pointer to the first element and just push/pop the heap.
	  // For the array based method, we need to initialize it to the first element in the array, and then find the lowest value at the top of the while loop.
	  // We have to find the lowest element here as well, since we need to initialize 'curr_doc_id' to the right value before we start the loop.
	  pair<uint32_t, int>* top = &lists_curr_postings[0];
	  if (kUseArrayInsteadOfHeapList) {
	    for (int i = 1; i < num_lists_remaining; ++i) {
	      if (lists_curr_postings[i].first < top->first) {
	        top = &lists_curr_postings[i];
	      }
	    }
	  }

	  int i;
	  uint32_t curr_doc_id = top->first;  // Current docID we're processing the score for.

	  while (num_lists_remaining) {
	    if (kUseArrayInsteadOfHeapList) {
	      top = &lists_curr_postings[0];
	      for (i = 1; i < num_lists_remaining; ++i) {
	        if (lists_curr_postings[i].first < top->first) {
	          top = &lists_curr_postings[i];
	        }
	      }
	    }

	    if (kScoreCompleteDoc)
	    {
	      curr_doc_id = top->first;
	      actural_bm25_sum = 0;
	      original_bm25_sum = 0;

	      // init some variables
		  scores_set.totalScore = 0.0;
		  scores_set.actualTotalScore = 0.0;

		  scores_set.doc_length = 0;
		  scores_set.docCandidateQualifyStatus = true;
		  scores_set.posting0QualifyStatus = true;
		  scores_set.posting1QualifyStatus = true;
		  scores_set.posting2QualifyStatus = true;
		  scores_set.posting3QualifyStatus = true;
		  scores_set.posting4QualifyStatus = true;
		  scores_set.posting5QualifyStatus = true;
		  scores_set.posting6QualifyStatus = true;
		  scores_set.posting7QualifyStatus = true;
		  scores_set.posting8QualifyStatus = true;
		  scores_set.posting9QualifyStatus = true;

		  scores_set.posting0RankInList = 0;
		  scores_set.posting1RankInList = 0;
		  scores_set.posting2RankInList = 0;
		  scores_set.posting3RankInList = 0;
		  scores_set.posting4RankInList = 0;
		  scores_set.posting5RankInList = 0;
		  scores_set.posting6RankInList = 0;
		  scores_set.posting7RankInList = 0;
		  scores_set.posting8RankInList = 0;
		  scores_set.posting9RankInList = 0;

		  scores_set.postingFirstProbabilities0 = 0.0;
		  scores_set.postingFirstProbabilities1 = 0.0;
		  scores_set.postingFirstProbabilities2 = 0.0;
		  scores_set.postingFirstProbabilities3 = 0.0;
		  scores_set.postingFirstProbabilities4 = 0.0;
		  scores_set.postingFirstProbabilities5 = 0.0;
		  scores_set.postingFirstProbabilities6 = 0.0;
		  scores_set.postingFirstProbabilities7 = 0.0;
		  scores_set.postingFirstProbabilities8 = 0.0;
		  scores_set.postingFirstProbabilities9 = 0.0;

		  scores_set.postingSecondANDThirdProbabilities0 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities1 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities2 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities3 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities4 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities5 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities6 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities7 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities8 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities9 = 0.0;

		  scores_set.postingThreeFactorProbabilities0 = 0.0;
		  scores_set.postingThreeFactorProbabilities1 = 0.0;
		  scores_set.postingThreeFactorProbabilities2 = 0.0;
		  scores_set.postingThreeFactorProbabilities3 = 0.0;
		  scores_set.postingThreeFactorProbabilities4 = 0.0;
		  scores_set.postingThreeFactorProbabilities5 = 0.0;
		  scores_set.postingThreeFactorProbabilities6 = 0.0;
		  scores_set.postingThreeFactorProbabilities7 = 0.0;
		  scores_set.postingThreeFactorProbabilities8 = 0.0;
		  scores_set.postingThreeFactorProbabilities9 = 0.0;

		  scores_set.posting0ScoreComponentPart1 = 0.0;
		  scores_set.posting1ScoreComponentPart1 = 0.0;
		  scores_set.posting2ScoreComponentPart1 = 0.0;
		  scores_set.posting3ScoreComponentPart1 = 0.0;
		  scores_set.posting4ScoreComponentPart1 = 0.0;
		  scores_set.posting5ScoreComponentPart1 = 0.0;
		  scores_set.posting6ScoreComponentPart1 = 0.0;
		  scores_set.posting7ScoreComponentPart1 = 0.0;
		  scores_set.posting8ScoreComponentPart1 = 0.0;
		  scores_set.posting9ScoreComponentPart1 = 0.0;

		  scores_set.posting0ScoreComponentPart2 = 0.0;
		  scores_set.posting1ScoreComponentPart2 = 0.0;
		  scores_set.posting2ScoreComponentPart2 = 0.0;
		  scores_set.posting3ScoreComponentPart2 = 0.0;
		  scores_set.posting4ScoreComponentPart2 = 0.0;
		  scores_set.posting5ScoreComponentPart2 = 0.0;
		  scores_set.posting6ScoreComponentPart2 = 0.0;
		  scores_set.posting7ScoreComponentPart2 = 0.0;
		  scores_set.posting8ScoreComponentPart2 = 0.0;
		  scores_set.posting9ScoreComponentPart2 = 0.0;

	      scores_set.postingScore0 = 0;
	      scores_set.postingScore1 = 0;
	      scores_set.postingScore2 = 0;
	      scores_set.postingScore3 = 0;
	      scores_set.postingScore4 = 0;
	      scores_set.postingScore5 = 0;
	      scores_set.postingScore6 = 0;
	      scores_set.postingScore7 = 0;
	      scores_set.postingScore8 = 0;
	      scores_set.postingScore9 = 0;

		  scores_set.lengthOfTheInvertedList0 = 0;
		  scores_set.lengthOfTheInvertedList1 = 0;
		  scores_set.lengthOfTheInvertedList2 = 0;
		  scores_set.lengthOfTheInvertedList3 = 0;
		  scores_set.lengthOfTheInvertedList4 = 0;
		  scores_set.lengthOfTheInvertedList5 = 0;
		  scores_set.lengthOfTheInvertedList6 = 0;
		  scores_set.lengthOfTheInvertedList7 = 0;
		  scores_set.lengthOfTheInvertedList8 = 0;
		  scores_set.lengthOfTheInvertedList9 = 0;

		  scores_set.postingTermFrequency0 = 0;
		  scores_set.postingTermFrequency1 = 0;
		  scores_set.postingTermFrequency2 = 0;
		  scores_set.postingTermFrequency3 = 0;
		  scores_set.postingTermFrequency4 = 0;
		  scores_set.postingTermFrequency5 = 0;
		  scores_set.postingTermFrequency6 = 0;
		  scores_set.postingTermFrequency7 = 0;
		  scores_set.postingTermFrequency8 = 0;
		  scores_set.postingTermFrequency9 = 0;

	      // for debug purposes.
	      // cout << curr_doc_id << " ";
	      // Can start searching from the position of 'top' since it'll be the first lowest element in the array.
	      while (top != &lists_curr_postings[num_lists_remaining]) {
	        if (top->first == curr_doc_id) {

	          // Compute BM25 score from frequencies.
	          f_d_t = lists[top->second]->GetFreq();
	          doc_len = index_reader_.document_map().GetDocumentLength(top->first);
	          scores_set.doc_length = doc_len;
	          partial_bm25 = idf_t[top->second] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);

	          // compute the posting probability
			  double first_factor_probability_value = 0.0;
			  double factor_2_3_combined_probability_value = 0.0;
			  double final_3_factors_probability_combined_value = 0.0;
			  double final_posting_probability_used_for_judgement = 0.0;

			  // (1) compute P(t)
			  string curr_look_up_term = queryTermPostionIndexPairs_[top->second].first;
			  if (terms_with_corresponding_species_belonging_to_map_.count(curr_look_up_term) > 0 ){
				  first_factor_probability_value = freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[curr_look_up_term] ];
			  }
			  else{
				  first_factor_probability_value = freq_first_factor_probability_map_[0];
			  }

		      // (2) compute P(unknown) for the second and third factor
	          // the computation of the value of relRank here
	          int previousClassLabelOfListLengthInIntFormat = term_with_their_belonging_class_map_[ queryTermPostionIndexPairs_[top->second].first ];
	          unsigned int previousClassUpperBound = class_label_with_lower_bounds_of_list_length_map_[previousClassLabelOfListLengthInIntFormat + 1];
	          float compareRelRank = float(external_scores[top->second]) / float(lengthOfTheInvertedListForThisTerm[top->second]);
			  float currentRangeUpperBound = 1.0;
			  float previousRangeUpperBound = currentRangeUpperBound;
			  unsigned int numOfPieces = int( class_label_with_lower_bounds_of_impact_scores_map_[previousClassLabelOfListLengthInIntFormat] );

			  // This component has the stepGap to be 0.5
			  int counter = 0;
			  // debug
			  // cout << counter << " " << currentRangeUpperBound << endl;
			  counter += 1;
			  for(; counter < numOfPieces; counter++){
				  currentRangeUpperBound = currentRangeUpperBound / 2;
			  }
			  // at this point, currentRangeUpperBound has the smallest upper bound except 0

			  unsigned int currentClassLabelBasedOnRelRankInIntFormat = numOfPieces - 1;
			  // debug
			  // cout << "compareRelRank: " << compareRelRank << endl;
			  // cout << "currentRangeUpperBound: " << currentRangeUpperBound << endl;

			  while (compareRelRank > currentRangeUpperBound){
				  currentRangeUpperBound = currentRangeUpperBound * 2;
				  currentClassLabelBasedOnRelRankInIntFormat -= 1;
			  }

			  stringstream ss1;
			  ss1 << previousClassLabelOfListLengthInIntFormat;
			  string part1InStringFormat = ss1.str();
			  stringstream ss2;
			  ss2 << currentClassLabelBasedOnRelRankInIntFormat;
			  string part2InStringFormat = ss2.str();

			  string probabilityAccessKeyInStringFormat = part1InStringFormat + "_" + part2InStringFormat;
			  if (class_label_with_probability_of_2D_ranges_map_.count(probabilityAccessKeyInStringFormat) > 0){
				  // relRank version with correction factor on 2014/01/06 morning by Wei at school
				  // factor_2_3_combined_probability_value =  class_label_with_probability_of_2D_ranges_map_[ probabilityAccessKeyInStringFormat ] * previousClassUpperBound / valueOfTermWithLengthOfTheList ;
				  // original relRank version
				  factor_2_3_combined_probability_value =  class_label_with_probability_of_2D_ranges_map_[ probabilityAccessKeyInStringFormat ];
			  }
			  else{
				  cout << "probabilityAccessKeyInStringFormat: " << probabilityAccessKeyInStringFormat << " not in class_label_with_probability_of_2D_ranges_map_" << endl;
				  cout << "critical error" << endl;
				  exit(1);
			  }

			  final_3_factors_probability_combined_value = first_factor_probability_value * factor_2_3_combined_probability_value;
			  final_posting_probability_used_for_judgement = final_3_factors_probability_combined_value;

			  if (first_factor_probability_value == 0 or factor_2_3_combined_probability_value == 0){
				  // for debug for each approach
				  cout << "----->ONLY fit for relRank approach NOW<------" << endl;
				  cout << "curr_doc_id: " << curr_doc_id << endl;
				  cout << "PartialBM25Score: " << partial_bm25 << endl;
				  cout << "external_scores["<< top->second << "]: " << external_scores[top->second] << endl;
				  cout << "LengthOfTheList_: " << lengthOfTheInvertedListForThisTerm[top->second] << endl;
				  cout << "compareRelRank: " << compareRelRank << endl;
				  cout << "previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;
				  cout << "currentClassLabelBasedOnRelRankInIntFormat: " << currentClassLabelBasedOnRelRankInIntFormat << endl;
				  cout << "previousClassUpperBound: " << previousClassUpperBound << endl;
				  cout << "second_AND_third_factor_combination_probability_value: " << factor_2_3_combined_probability_value << endl;
				  cout << "first_factor_probability_value: " << first_factor_probability_value << endl;
				  cout << "second_AND_third_factor_combination_probability_value: " << factor_2_3_combined_probability_value << endl;
				  cout << "final_3_factors_probability_combined_value: " << final_3_factors_probability_combined_value << endl;
				  cout << endl;
				  exit(1);
			  }


			  // for OR semantics, all I need to control is that unqualified score has NOT been added to the sum of the bm25 score.
	          // every document is still getting a chance into to the top-k even those unqualified documents are NOT added.
	          // Updated by Wei on 2014/01/20 night at school
			  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
	        	  // In this case, I just do NOTHING cause the that posting score is NOT qualified.
	          }
	          else{
	        	  actural_bm25_sum += partial_bm25;
	          }
	          original_bm25_sum += partial_bm25;

	          if (top->second == 0){
	        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
	        		  scores_set.posting0QualifyStatus = false;
	        	  }

				  scores_set.posting0RankInList = external_scores[top->second];
				  scores_set.postingScore0 = partial_bm25;
				  scores_set.lengthOfTheInvertedList0 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency0 = f_d_t;
				  scores_set.postingFirstProbabilities0 = first_factor_probability_value;
				  scores_set.postingSecondANDThirdProbabilities0 = factor_2_3_combined_probability_value;
				  scores_set.postingThreeFactorProbabilities0 = final_3_factors_probability_combined_value;

	          }
	          else if (top->second == 1){
	        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
	        		  scores_set.posting1QualifyStatus = false;
	        	  }

				  scores_set.posting1RankInList = external_scores[top->second];
				  scores_set.postingScore1 = partial_bm25;
				  scores_set.lengthOfTheInvertedList1 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency1 = f_d_t;
				  scores_set.postingFirstProbabilities1 = first_factor_probability_value;
				  scores_set.postingSecondANDThirdProbabilities1 = factor_2_3_combined_probability_value;
				  scores_set.postingThreeFactorProbabilities1 = final_3_factors_probability_combined_value;
	          }
	          else if (top->second == 2){
	        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
	        		  scores_set.posting2QualifyStatus = false;
	        	  }

				  scores_set.posting2RankInList = external_scores[top->second];
				  scores_set.postingScore2 = partial_bm25;
				  scores_set.lengthOfTheInvertedList2 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency2 = f_d_t;
				  scores_set.postingFirstProbabilities2 = first_factor_probability_value;
				  scores_set.postingSecondANDThirdProbabilities2 = factor_2_3_combined_probability_value;
				  scores_set.postingThreeFactorProbabilities2 = final_3_factors_probability_combined_value;
	          }
	          else if (top->second == 3){
	        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
	        		  scores_set.posting3QualifyStatus = false;
	        	  }

				  scores_set.posting3RankInList = external_scores[top->second];
				  scores_set.postingScore3 = partial_bm25;
				  scores_set.lengthOfTheInvertedList3 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency3 = f_d_t;
				  scores_set.postingFirstProbabilities3 = first_factor_probability_value;
				  scores_set.postingSecondANDThirdProbabilities3 = factor_2_3_combined_probability_value;
				  scores_set.postingThreeFactorProbabilities3 = final_3_factors_probability_combined_value;

	          }
	          else if (top->second == 4){
	        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
	        		  scores_set.posting4QualifyStatus = false;
	        	  }

				  scores_set.posting4RankInList = external_scores[top->second];
				  scores_set.postingScore4 = partial_bm25;
				  scores_set.lengthOfTheInvertedList4 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency4 = f_d_t;
				  scores_set.postingFirstProbabilities4 = first_factor_probability_value;
				  scores_set.postingSecondANDThirdProbabilities4 = factor_2_3_combined_probability_value;
				  scores_set.postingThreeFactorProbabilities4 = final_3_factors_probability_combined_value;

	          }
	          else if (top->second == 5){
	        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
	        		  scores_set.posting5QualifyStatus = false;
	        	  }

				  scores_set.posting5RankInList = external_scores[top->second];
				  scores_set.postingScore5 = partial_bm25;
				  scores_set.lengthOfTheInvertedList5 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency5 = f_d_t;
				  scores_set.postingFirstProbabilities5 = first_factor_probability_value;
				  scores_set.postingSecondANDThirdProbabilities5 = factor_2_3_combined_probability_value;
				  scores_set.postingThreeFactorProbabilities5 = final_3_factors_probability_combined_value;

	          }
	          else if (top->second == 6){
	        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
	        		  scores_set.posting6QualifyStatus = false;
	        	  }

				  scores_set.posting6RankInList = external_scores[top->second];
				  scores_set.postingScore6 = partial_bm25;
				  scores_set.lengthOfTheInvertedList6 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency6 = f_d_t;
				  scores_set.postingFirstProbabilities6 = first_factor_probability_value;
				  scores_set.postingSecondANDThirdProbabilities6 = factor_2_3_combined_probability_value;
				  scores_set.postingThreeFactorProbabilities6 = final_3_factors_probability_combined_value;

	          }
	          else if (top->second == 7){
	        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
	        		  scores_set.posting7QualifyStatus = false;
	        	  }

				  scores_set.posting7RankInList = external_scores[top->second];
				  scores_set.postingScore7 = partial_bm25;
				  scores_set.lengthOfTheInvertedList7 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency7 = f_d_t;
				  scores_set.postingFirstProbabilities7 = first_factor_probability_value;
				  scores_set.postingSecondANDThirdProbabilities7 = factor_2_3_combined_probability_value;
				  scores_set.postingThreeFactorProbabilities7 = final_3_factors_probability_combined_value;

	          }
	          else if (top->second == 8){
	        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
	        		  scores_set.posting8QualifyStatus = false;
	        	  }

				  scores_set.posting8RankInList = external_scores[top->second];
				  scores_set.postingScore8 = partial_bm25;
				  scores_set.lengthOfTheInvertedList8 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency8 = f_d_t;
				  scores_set.postingFirstProbabilities8 = first_factor_probability_value;
				  scores_set.postingSecondANDThirdProbabilities8 = factor_2_3_combined_probability_value;
				  scores_set.postingThreeFactorProbabilities8 = final_3_factors_probability_combined_value;

	          }
	          else if (top->second == 9){
	        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
	        		  scores_set.posting9QualifyStatus = false;
	        	  }

				  scores_set.posting9RankInList = external_scores[top->second];
				  scores_set.postingScore9 = partial_bm25;
				  scores_set.lengthOfTheInvertedList9 = lengthOfTheInvertedListForThisTerm[top->second];
				  scores_set.postingTermFrequency9 = f_d_t;
				  scores_set.postingFirstProbabilities9 = first_factor_probability_value;
				  scores_set.postingSecondANDThirdProbabilities9 = factor_2_3_combined_probability_value;
				  scores_set.postingThreeFactorProbabilities9 = final_3_factors_probability_combined_value;

	          }

	          ++num_postings_scored_;
	          // CURRENT version
	          if ((top->first = lists[top->second]->NextGEQRomanRead(top->first + 1,external_scores[top->second],false)) == ListData::kNoMoreDocs) {
	          // OLD version
	          // if ((top->first = lists[top->second]->NextGEQ(top->first + 1)) == ListData::kNoMoreDocs) {
	            // Need to compact the array by one.
	            // Just copy over the last value in the array and overwrite the top value, since we'll be removing it.
	            // Now, we can declare our list one shorter.
	            // If top happens to already point to the last value in the array, this step is superfluous.
	            --num_lists_remaining;
	            *top = lists_curr_postings[num_lists_remaining];
	            --top;
	            /////////////////////////////////////////////////////////////////////////////////////////
	            // I do NOT know whether I need it or NOT on 2014/01/20 night at school
		          if (top->second == 0){
		        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
		        		  scores_set.posting0QualifyStatus = false;
		        	  }

					  scores_set.posting0RankInList = external_scores[top->second];
					  scores_set.postingScore0 = partial_bm25;
					  scores_set.lengthOfTheInvertedList0 = lengthOfTheInvertedListForThisTerm[top->second];
					  scores_set.postingTermFrequency0 = f_d_t;
					  scores_set.postingFirstProbabilities0 = first_factor_probability_value;
					  scores_set.postingSecondANDThirdProbabilities0 = factor_2_3_combined_probability_value;
					  scores_set.postingThreeFactorProbabilities0 = final_3_factors_probability_combined_value;

		          }
		          else if (top->second == 1){
		        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
		        		  scores_set.posting1QualifyStatus = false;
		        	  }

					  scores_set.posting1RankInList = external_scores[top->second];
					  scores_set.postingScore1 = partial_bm25;
					  scores_set.lengthOfTheInvertedList1 = lengthOfTheInvertedListForThisTerm[top->second];
					  scores_set.postingTermFrequency1 = f_d_t;
					  scores_set.postingFirstProbabilities1 = first_factor_probability_value;
					  scores_set.postingSecondANDThirdProbabilities1 = factor_2_3_combined_probability_value;
					  scores_set.postingThreeFactorProbabilities1 = final_3_factors_probability_combined_value;
		          }
		          else if (top->second == 2){
		        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
		        		  scores_set.posting2QualifyStatus = false;
		        	  }

					  scores_set.posting2RankInList = external_scores[top->second];
					  scores_set.postingScore2 = partial_bm25;
					  scores_set.lengthOfTheInvertedList2 = lengthOfTheInvertedListForThisTerm[top->second];
					  scores_set.postingTermFrequency2 = f_d_t;
					  scores_set.postingFirstProbabilities2 = first_factor_probability_value;
					  scores_set.postingSecondANDThirdProbabilities2 = factor_2_3_combined_probability_value;
					  scores_set.postingThreeFactorProbabilities2 = final_3_factors_probability_combined_value;
		          }
		          else if (top->second == 3){
		        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
		        		  scores_set.posting3QualifyStatus = false;
		        	  }

					  scores_set.posting3RankInList = external_scores[top->second];
					  scores_set.postingScore3 = partial_bm25;
					  scores_set.lengthOfTheInvertedList3 = lengthOfTheInvertedListForThisTerm[top->second];
					  scores_set.postingTermFrequency3 = f_d_t;
					  scores_set.postingFirstProbabilities3 = first_factor_probability_value;
					  scores_set.postingSecondANDThirdProbabilities3 = factor_2_3_combined_probability_value;
					  scores_set.postingThreeFactorProbabilities3 = final_3_factors_probability_combined_value;

		          }
		          else if (top->second == 4){
		        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
		        		  scores_set.posting4QualifyStatus = false;
		        	  }

					  scores_set.posting4RankInList = external_scores[top->second];
					  scores_set.postingScore4 = partial_bm25;
					  scores_set.lengthOfTheInvertedList4 = lengthOfTheInvertedListForThisTerm[top->second];
					  scores_set.postingTermFrequency4 = f_d_t;
					  scores_set.postingFirstProbabilities4 = first_factor_probability_value;
					  scores_set.postingSecondANDThirdProbabilities4 = factor_2_3_combined_probability_value;
					  scores_set.postingThreeFactorProbabilities4 = final_3_factors_probability_combined_value;

		          }
		          else if (top->second == 5){
		        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
		        		  scores_set.posting5QualifyStatus = false;
		        	  }

					  scores_set.posting5RankInList = external_scores[top->second];
					  scores_set.postingScore5 = partial_bm25;
					  scores_set.lengthOfTheInvertedList5 = lengthOfTheInvertedListForThisTerm[top->second];
					  scores_set.postingTermFrequency5 = f_d_t;
					  scores_set.postingFirstProbabilities5 = first_factor_probability_value;
					  scores_set.postingSecondANDThirdProbabilities5 = factor_2_3_combined_probability_value;
					  scores_set.postingThreeFactorProbabilities5 = final_3_factors_probability_combined_value;

		          }
		          else if (top->second == 6){
		        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
		        		  scores_set.posting6QualifyStatus = false;
		        	  }

					  scores_set.posting6RankInList = external_scores[top->second];
					  scores_set.postingScore6 = partial_bm25;
					  scores_set.lengthOfTheInvertedList6 = lengthOfTheInvertedListForThisTerm[top->second];
					  scores_set.postingTermFrequency6 = f_d_t;
					  scores_set.postingFirstProbabilities6 = first_factor_probability_value;
					  scores_set.postingSecondANDThirdProbabilities6 = factor_2_3_combined_probability_value;
					  scores_set.postingThreeFactorProbabilities6 = final_3_factors_probability_combined_value;

		          }
		          else if (top->second == 7){
		        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
		        		  scores_set.posting7QualifyStatus = false;
		        	  }

					  scores_set.posting7RankInList = external_scores[top->second];
					  scores_set.postingScore7 = partial_bm25;
					  scores_set.lengthOfTheInvertedList7 = lengthOfTheInvertedListForThisTerm[top->second];
					  scores_set.postingTermFrequency7 = f_d_t;
					  scores_set.postingFirstProbabilities7 = first_factor_probability_value;
					  scores_set.postingSecondANDThirdProbabilities7 = factor_2_3_combined_probability_value;
					  scores_set.postingThreeFactorProbabilities7 = final_3_factors_probability_combined_value;

		          }
		          else if (top->second == 8){
		        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
		        		  scores_set.posting8QualifyStatus = false;
		        	  }

					  scores_set.posting8RankInList = external_scores[top->second];
					  scores_set.postingScore8 = partial_bm25;
					  scores_set.lengthOfTheInvertedList8 = lengthOfTheInvertedListForThisTerm[top->second];
					  scores_set.postingTermFrequency8 = f_d_t;
					  scores_set.postingFirstProbabilities8 = first_factor_probability_value;
					  scores_set.postingSecondANDThirdProbabilities8 = factor_2_3_combined_probability_value;
					  scores_set.postingThreeFactorProbabilities8 = final_3_factors_probability_combined_value;

		          }
		          else if (top->second == 9){
		        	  if(final_posting_probability_used_for_judgement < universal_threshold_socre_of_posting_){
		        		  scores_set.posting9QualifyStatus = false;
		        	  }

					  scores_set.posting9RankInList = external_scores[top->second];
					  scores_set.postingScore9 = partial_bm25;
					  scores_set.lengthOfTheInvertedList9 = lengthOfTheInvertedListForThisTerm[top->second];
					  scores_set.postingTermFrequency9 = f_d_t;
					  scores_set.postingFirstProbabilities9 = first_factor_probability_value;
					  scores_set.postingSecondANDThirdProbabilities9 = factor_2_3_combined_probability_value;
					  scores_set.postingThreeFactorProbabilities9 = final_3_factors_probability_combined_value;

		          }
	            /////////////////////////////////////////////////////////////////////////////////////////



	          }
	        }
	        ++top;
	      }
	      // for debug purposes.
	      // cout << endl;
	      scores_set.totalScore = original_bm25_sum;
	      scores_set.actualTotalScore = actural_bm25_sum;

	      // TODO: may have some problems. Updated by Wei 2013/02/25
	      // Now, it is just simple checking and may have some problem
	      // If the bm25_sum == 0, then this document will NOT show up in the result list
	      // If the bm25_sum != 0, then this document will be in the result list for sorting


	      /*
		  // Need to keep track of the top-k documents.
		  if (total_num_results < num_results) {
			// We insert a document if we don't have k documents yet.
			results[total_num_results] = make_pair(scores_set, curr_doc_id);
			push_heap(results, results + total_num_results + 1, ResultCompare2());
		  } else {
			if (scores_set.totalScore > results->first.totalScore) {
			  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
			  pop_heap(results, results + num_results, ResultCompare2());
			  results[num_results - 1].first = scores_set;
			  results[num_results - 1].second = curr_doc_id;
			  push_heap(results, results + num_results, ResultCompare2());
			}
		  }
		  ++total_num_results;
		  */

	      /*
	      // version1:
	      if (original_bm25_sum == 0.0){
	    	  // This document will just passed.
	      }
	      else{
	          // Need to keep track of the top-k documents.
	          if (total_num_results < num_results) {
	            // We insert a document if we don't have k documents yet.
	            results[total_num_results] = make_pair(scores_set, curr_doc_id);
	            push_heap(results, results + total_num_results + 1, ResultCompare2());
	          } else {
	            if (scores_set.totalScore > results->first.totalScore) {
	              // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	              pop_heap(results, results + num_results, ResultCompare2());
	              results[num_results - 1].first = scores_set;
	              results[num_results - 1].second = curr_doc_id;
	              push_heap(results, results + num_results, ResultCompare2());
	            }
	          }
	          ++total_num_results;
	      }
	      */

	      // current version for the human judge queries
	      if (original_bm25_sum == 0.0){
	    	  // This document will just passed.
	      }
	      else{
	          // Need to keep track of the top-k documents.
	          if (total_num_results < num_results) {
	            // We insert a document if we don't have k documents yet.
	            results[total_num_results] = make_pair(scores_set, curr_doc_id);
	            push_heap(results, results + total_num_results + 1, ResultCompare2());
	          } else {
	            if (scores_set.actualTotalScore > results->first.actualTotalScore) {
	              // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	              pop_heap(results, results + num_results, ResultCompare3());
	              results[num_results - 1].first = scores_set;
	              results[num_results - 1].second = curr_doc_id;
	              push_heap(results, results + num_results, ResultCompare3());
	            }
	          }
	          ++total_num_results;
	      }

	    }
	    else
	    {
	    	//delete and do not handle this situation.
	    }
	  }

	  // OLD version
	  // Sort top-k results in descending order by document score.
	  // sort(results, results + min(num_results, total_num_results), ResultCompare2());

	  // Sort top-k results in descending order by document score.
	  sort(results, results + min(num_results, total_num_results), ResultCompare3());

	  return total_num_results;
  }
  else{
	  cout << "unsupported logic" << endl;
	  return total_num_results;
  }

}

// Standard DAAT OR mode processing for comparison purposes.
int LocalQueryProcessor::MergeLists2ScoreComputeOnline(ListData** lists, int num_lists, Result_Wei_2012* results, int num_results, bool pruningProjectSwitch, float lowerBoundThreshold, int computation_method) {
  // for debug ONLY
  cout << "LocalQueryProcessor::MergeLists2ScoreComputeOnline(...) called.(Mostly Original from Roman)" << endl;

  int total_num_results = 0;

  // Updated by Wei on 2013/01/01 night at school
  // Note: NOT support the following computation_method any more: 0,1,2,3,4,5,6 instantly
  if (computation_method == 7){
	  // Setting this option to 'true' makes a considerable difference in average query latency (> 100ms).
	  // When we score the complete doc, we first find the lowest docID in the array, and then scan the array for that docID, and completely score it.
	  // All lists from which the docID was scored have their list pointers moved forward.
	  // When we don't score the complete doc, at each turn of the while loop, we find a partial score of the lowest docID posting.
	  // We add these together for a particular docID to get the complete score -- but it requires several iterations of the main while loop.
	  // This is less efficient, since we have to do a complete linear search through the array for every posting.
	  // On the other hand, when we score the complete doc right away, we only have to do one more linear search through all postings to score all the lists.
	  // Clearly, if the majority of the docIDs are present in more than one list, we'll be getting a speedup.
	  const bool kScoreCompleteDoc = true;

	  // Use an array instead of a heap for selecting the list with the lowest docID at each step.
	  // Using a heap for picking the list with the lowest docID is only implemented for when 'kScoreCompleteDoc' is false.
	  // For compatibility with 'kScoreCompleteDoc' equal to true, you'd need to use the heap to choose the next list to score, instead of iterating through the array, which is what's done now.
	  // Array based method is faster than the heap based method for choosing the lowest docID from all the lists, so this option should be set to 'true'.
	  // TODO: Try another array based strategy: keep a sorted array of docIDs. When updating, only need to find the spot for the new docID and re-sort the array up to that spot.
	  // TODO: Can also use a linked list for this. Then can just find the right spot, and do pointer changes. The locality here wouldn't be too good though.
	  const bool kUseArrayInsteadOfHeapList = true;



	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  const float kBm25K1 =  2.0;  // k1
	  const float kBm25B = 0.75;   // b

	  // We can precompute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

	  // BM25 components.
	  float bm25_sum = 0;  // The BM25 sum for the current document we're processing.
	  SCORES_SET scores_set;
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in OR semantics.
	  int doc_len;
	  uint32_t f_d_t;

	  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
	  float idf_t[num_lists];  // Using a variable length array here.
	  int lengthOfTheInvertedListForThisTerm[num_lists];
	  int num_docs_t;
	  for (int i = 0; i < num_lists; ++i) {
		// If for the pruning project, then use the same overall statistics for all the pruned index.
		if(pruningProjectSwitch){
			num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
		}
		// If not for the pruning project, we can use what the index has.
		else{
			num_docs_t = lists[i]->num_docs_complete_list();
		}

		lengthOfTheInvertedListForThisTerm[i] = num_docs_t;
		// for debug ONLY.
		// cout << "i:" << i << " num_docs_t:" << num_docs_t << endl;
	    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
	  }

	  // We use this to get the next lowest docID from all the lists.
	  pair<uint32_t, int> lists_curr_postings[num_lists];  // Using a variable length array here.
	  int num_lists_remaining = 0;  // The number of lists with postings remaining.
	  for (int i = 0; i < num_lists; ++i) {
	    uint32_t curr_doc_id;
	    if ((curr_doc_id = lists[i]->NextGEQ(0)) < ListData::kNoMoreDocs) {
	      lists_curr_postings[num_lists_remaining++] = make_pair(curr_doc_id, i);
	    }
	  }

	  if (num_lists_remaining > 0) {
	    if (!kUseArrayInsteadOfHeapList) {
	      // We use our own comparator, that only checks the docID part.
	      make_heap(lists_curr_postings, lists_curr_postings + num_lists_remaining, ListMaxDocIdCompare());
	    }
	  } else {
	    return total_num_results;
	  }

	  // For the heap based method, the lowest element will always be the first element in the array.
	  // So we can keep 'top' constant since it's just a pointer to the first element and just push/pop the heap.
	  // For the array based method, we need to initialize it to the first element in the array, and then find the lowest value at the top of the while loop.
	  // We have to find the lowest element here as well, since we need to initialize 'curr_doc_id' to the right value before we start the loop.
	  pair<uint32_t, int>* top = &lists_curr_postings[0];
	  if (kUseArrayInsteadOfHeapList) {
	    for (int i = 1; i < num_lists_remaining; ++i) {
	      if (lists_curr_postings[i].first < top->first) {
	        top = &lists_curr_postings[i];
	      }
	    }
	  }

	  int i;
	  uint32_t curr_doc_id = top->first;  // Current docID we're processing the score for.

	  while (num_lists_remaining) {
	    if (kUseArrayInsteadOfHeapList) {
	      top = &lists_curr_postings[0];
	      for (i = 1; i < num_lists_remaining; ++i) {
	        if (lists_curr_postings[i].first < top->first) {
	          top = &lists_curr_postings[i];
	        }
	      }
	    }

	    if (kScoreCompleteDoc)
	    {
	      curr_doc_id = top->first;
	      bm25_sum = 0;
	      // init of the variable scores_set
		  scores_set.totalScore = 0.0;
		  scores_set.doc_length = 0;

		  // init variables
		  scores_set.postingFirstProbabilities0 = 0.0;
		  scores_set.postingFirstProbabilities1 = 0.0;
		  scores_set.postingFirstProbabilities2 = 0.0;
		  scores_set.postingFirstProbabilities3 = 0.0;
		  scores_set.postingFirstProbabilities4 = 0.0;
		  scores_set.postingFirstProbabilities5 = 0.0;
		  scores_set.postingFirstProbabilities6 = 0.0;
		  scores_set.postingFirstProbabilities7 = 0.0;
		  scores_set.postingFirstProbabilities8 = 0.0;
		  scores_set.postingFirstProbabilities9 = 0.0;

		  scores_set.postingSecondANDThirdProbabilities0 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities1 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities2 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities3 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities4 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities5 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities6 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities7 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities8 = 0.0;
		  scores_set.postingSecondANDThirdProbabilities9 = 0.0;

		  scores_set.postingThreeFactorProbabilities0 = 0.0;
		  scores_set.postingThreeFactorProbabilities1 = 0.0;
		  scores_set.postingThreeFactorProbabilities2 = 0.0;
		  scores_set.postingThreeFactorProbabilities3 = 0.0;
		  scores_set.postingThreeFactorProbabilities4 = 0.0;
		  scores_set.postingThreeFactorProbabilities5 = 0.0;
		  scores_set.postingThreeFactorProbabilities6 = 0.0;
		  scores_set.postingThreeFactorProbabilities7 = 0.0;
		  scores_set.postingThreeFactorProbabilities8 = 0.0;
		  scores_set.postingThreeFactorProbabilities9 = 0.0;

		  scores_set.posting0ScoreComponentPart1 = 0.0;
		  scores_set.posting1ScoreComponentPart1 = 0.0;
		  scores_set.posting2ScoreComponentPart1 = 0.0;
		  scores_set.posting3ScoreComponentPart1 = 0.0;
		  scores_set.posting4ScoreComponentPart1 = 0.0;
		  scores_set.posting5ScoreComponentPart1 = 0.0;
		  scores_set.posting6ScoreComponentPart1 = 0.0;
		  scores_set.posting7ScoreComponentPart1 = 0.0;
		  scores_set.posting8ScoreComponentPart1 = 0.0;
		  scores_set.posting9ScoreComponentPart1 = 0.0;

		  scores_set.posting0ScoreComponentPart2 = 0.0;
		  scores_set.posting1ScoreComponentPart2 = 0.0;
		  scores_set.posting2ScoreComponentPart2 = 0.0;
		  scores_set.posting3ScoreComponentPart2 = 0.0;
		  scores_set.posting4ScoreComponentPart2 = 0.0;
		  scores_set.posting5ScoreComponentPart2 = 0.0;
		  scores_set.posting6ScoreComponentPart2 = 0.0;
		  scores_set.posting7ScoreComponentPart2 = 0.0;
		  scores_set.posting8ScoreComponentPart2 = 0.0;
		  scores_set.posting9ScoreComponentPart2 = 0.0;

	      scores_set.postingScore0 = 0;
	      scores_set.postingScore1 = 0;
	      scores_set.postingScore2 = 0;
	      scores_set.postingScore3 = 0;
	      scores_set.postingScore4 = 0;
	      scores_set.postingScore5 = 0;
	      scores_set.postingScore6 = 0;
	      scores_set.postingScore7 = 0;
	      scores_set.postingScore8 = 0;
	      scores_set.postingScore9 = 0;

		  scores_set.lengthOfTheInvertedList0 = 0;
		  scores_set.lengthOfTheInvertedList1 = 0;
		  scores_set.lengthOfTheInvertedList2 = 0;
		  scores_set.lengthOfTheInvertedList3 = 0;
		  scores_set.lengthOfTheInvertedList4 = 0;
		  scores_set.lengthOfTheInvertedList5 = 0;
		  scores_set.lengthOfTheInvertedList6 = 0;
		  scores_set.lengthOfTheInvertedList7 = 0;
		  scores_set.lengthOfTheInvertedList8 = 0;
		  scores_set.lengthOfTheInvertedList9 = 0;

		  scores_set.postingTermFrequency0 = 0;
		  scores_set.postingTermFrequency1 = 0;
		  scores_set.postingTermFrequency2 = 0;
		  scores_set.postingTermFrequency3 = 0;
		  scores_set.postingTermFrequency4 = 0;
		  scores_set.postingTermFrequency5 = 0;
		  scores_set.postingTermFrequency6 = 0;
		  scores_set.postingTermFrequency7 = 0;
		  scores_set.postingTermFrequency8 = 0;
		  scores_set.postingTermFrequency9 = 0;

	      // for debug purposes.
	      // cout << curr_doc_id << " ";
	      // Can start searching from the position of 'top' since it'll be the first lowest element in the array.
	      while (top != &lists_curr_postings[num_lists_remaining]) {
	        if (top->first == curr_doc_id) {

	          // Compute BM25 score from frequencies.
	          f_d_t = lists[top->second]->GetFreq();
	          doc_len = index_reader_.document_map().GetDocumentLength(top->first);
	          scores_set.doc_length = doc_len;
	          partial_bm25 = idf_t[top->second] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
	          //debug option1:
	          //cout << "f_d_t:" << f_d_t << " doc_len:" << doc_len << " idf_t[i]:" << idf_t[i] << " partial_bm25:"<< partial_bm25 << " ";
	          //debug option2:
	          // for debug purposes.
	          // cout << partial_bm25 << " ";

	          // for OR semantics, all I need to control is that unqualified score has NOT been added to the sum of the bm25 score.
	          // every document is still getting a chance into to the top-k even those unqualified documents are NOT added.
	          if(partial_bm25 <= lowerBoundThreshold){
	        	  // In this case, I just do NOTHING cause the that posting score is NOT qualified.
	          }
	          else{
	        	  bm25_sum += partial_bm25;
	          }


	          if (top->second == 0){
	        	  if(partial_bm25 <= lowerBoundThreshold){
	        		  scores_set.postingScore0 = 0.0;
	        		  scores_set.lengthOfTheInvertedList0 = -1;
	        		  scores_set.postingTermFrequency0 = -1;
	        	  }
	        	  else{
	        		  scores_set.postingScore0 = partial_bm25;
	        		  scores_set.lengthOfTheInvertedList0 = lengthOfTheInvertedListForThisTerm[top->second];
	        		  scores_set.postingTermFrequency0 = f_d_t;
	        	  }
	          }
	          else if (top->second == 1){
	        	  if(partial_bm25 <= lowerBoundThreshold){
	        		  scores_set.postingScore1 = 0.0;
	        		  scores_set.lengthOfTheInvertedList1 = -1;
	        		  scores_set.postingTermFrequency1 = -1;
	        	  }
	        	  else{
	        		  scores_set.postingScore1 = partial_bm25;
	        		  scores_set.lengthOfTheInvertedList1 = lengthOfTheInvertedListForThisTerm[top->second];
	        		  scores_set.postingTermFrequency1 = f_d_t;
	        	  }
	          }
	          else if (top->second == 2){
	        	  if(partial_bm25 <= lowerBoundThreshold){
	        		  scores_set.postingScore2 = 0.0;
	        		  scores_set.lengthOfTheInvertedList2 = -1;
	        		  scores_set.postingTermFrequency2 = -1;
	        	  }
	        	  else{
	        		  scores_set.postingScore2 = partial_bm25;
	        		  scores_set.lengthOfTheInvertedList2 = lengthOfTheInvertedListForThisTerm[top->second];
	        		  scores_set.postingTermFrequency2 = f_d_t;
	        	  }
	          }
	          else if (top->second == 3){
	        	  if(partial_bm25 <= lowerBoundThreshold){
	        		  scores_set.postingScore3 = 0.0;
	        		  scores_set.lengthOfTheInvertedList3 = -1;
	        		  scores_set.postingTermFrequency3 = -1;
	        	  }
	        	  else{
	        		  scores_set.postingScore3 = partial_bm25;
	        		  scores_set.lengthOfTheInvertedList3 = lengthOfTheInvertedListForThisTerm[top->second];
	        		  scores_set.postingTermFrequency3 = f_d_t;
	        	  }

	          }
	          else if (top->second == 4){
	        	  if(partial_bm25 <= lowerBoundThreshold){
	        		  scores_set.postingScore4 = 0.0;
	        		  scores_set.lengthOfTheInvertedList4 = -1;
	        		  scores_set.postingTermFrequency4 = -1;
	        	  }
	        	  else{
	        		  scores_set.postingScore4 = partial_bm25;
	        		  scores_set.lengthOfTheInvertedList4 = lengthOfTheInvertedListForThisTerm[top->second];
	        		  scores_set.postingTermFrequency4 = f_d_t;
	        	  }

	          }
	          else if (top->second == 5){
	        	  if(partial_bm25 <= lowerBoundThreshold){
	        		  scores_set.postingScore5 = 0.0;
	        		  scores_set.lengthOfTheInvertedList5 = -1;
	        		  scores_set.postingTermFrequency5 = -1;
	        	  }
	        	  else{
	        		  scores_set.postingScore5 = partial_bm25;
	        		  scores_set.lengthOfTheInvertedList5 = lengthOfTheInvertedListForThisTerm[top->second];
	        		  scores_set.postingTermFrequency5 = f_d_t;
	        	  }

	          }
	          else if (top->second == 6){
	        	  if(partial_bm25 <= lowerBoundThreshold){
	        		  scores_set.postingScore6 = 0.0;
	        		  scores_set.lengthOfTheInvertedList6 = -1;
	        		  scores_set.postingTermFrequency6 = -1;
	        	  }
	        	  else{
	        		  scores_set.postingScore6 = partial_bm25;
	        		  scores_set.lengthOfTheInvertedList6 = lengthOfTheInvertedListForThisTerm[top->second];
	        		  scores_set.postingTermFrequency6 = f_d_t;
	        	  }

	          }
	          else if (top->second == 7){
	        	  if(partial_bm25 <= lowerBoundThreshold){
	        		  scores_set.postingScore7 = 0.0;
	        		  scores_set.lengthOfTheInvertedList7 = -1;
	        		  scores_set.postingTermFrequency7 = -1;
	        	  }
	        	  else{
	        		  scores_set.postingScore7 = partial_bm25;
	        		  scores_set.lengthOfTheInvertedList7 = lengthOfTheInvertedListForThisTerm[top->second];
	        		  scores_set.postingTermFrequency7 = f_d_t;
	        	  }

	          }
	          else if (top->second == 8){
	        	  if(partial_bm25 <= lowerBoundThreshold){
	        		  scores_set.postingScore8 = 0.0;
	        		  scores_set.lengthOfTheInvertedList8 = -1;
	        		  scores_set.postingTermFrequency8 = -1;
	        	  }
	        	  else{
	        		  scores_set.postingScore8 = partial_bm25;
	        		  scores_set.lengthOfTheInvertedList8 = lengthOfTheInvertedListForThisTerm[top->second];
	        		  scores_set.postingTermFrequency8 = f_d_t;
	        	  }

	          }
	          else if (top->second == 9){
	        	  if(partial_bm25 <= lowerBoundThreshold){
	        		  scores_set.postingScore9 = 0.0;
	        		  scores_set.lengthOfTheInvertedList9 = -1;
	        		  scores_set.postingTermFrequency9 = -1;
	        	  }
	        	  else{
	        		  scores_set.postingScore9 = partial_bm25;
	        		  scores_set.lengthOfTheInvertedList9 = lengthOfTheInvertedListForThisTerm[top->second];
	        		  scores_set.postingTermFrequency9 = f_d_t;
	        	  }

	          }

	          ++num_postings_scored_;

	          if ((top->first = lists[top->second]->NextGEQ(top->first + 1)) == ListData::kNoMoreDocs) {
	            // Need to compact the array by one.
	            // Just copy over the last value in the array and overwrite the top value, since we'll be removing it.
	            // Now, we can declare our list one shorter.
	            // If top happens to already point to the last value in the array, this step is superfluous.
	          --num_lists_remaining;
	            *top = lists_curr_postings[num_lists_remaining];
	            --top;
	          }
	        }
	        ++top;
	      }
	      // for debug purposes.
	      // cout << endl;
	      scores_set.totalScore = bm25_sum;

	      // TODO: may have some problems. Updated by Wei 2013/02/25
	      // Now, it is just simple checking and may have some problem
	      // If the bm25_sum == 0, then this document will NOT show up in the result list
	      // If the bm25_sum != 0, then this document will be in the result list for sorting


	      if (bm25_sum == 0.0){
	    	  // This document will just passed.
	      }
	      else{
	          // Need to keep track of the top-k documents.
	          if (total_num_results < num_results) {
	            // We insert a document if we don't have k documents yet.
	            results[total_num_results] = make_pair(scores_set, curr_doc_id);
	            push_heap(results, results + total_num_results + 1, ResultCompare2());
	          } else {
	            if (scores_set.totalScore > results->first.totalScore) {
	              // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	              pop_heap(results, results + num_results, ResultCompare2());
	              results[num_results - 1].first = scores_set;
	              results[num_results - 1].second = curr_doc_id;
	              push_heap(results, results + num_results, ResultCompare2());
	            }
	          }
	          ++total_num_results;
	      }
	    }
	    else
	    {
	    	//delete and do not handle this situation.
	    }
	  }

	  // Sort top-k results in descending order by document score.
	  sort(results, results + min(num_results, total_num_results), ResultCompare2());

	  return total_num_results;
  }
  else{
	  cout << "unsupported logic" << endl;
	  return total_num_results;
  }
}

// Standard DAAT OR mode processing for comparison purposes.
int LocalQueryProcessor::MergeLists(ListData** lists, int num_lists, Result* results, int num_results, bool pruningProjectSwitch) {
  // Setting this option to 'true' makes a considerable difference in average query latency (> 100ms).
  // When we score the complete doc, we first find the lowest docID in the array, and then scan the array for that docID, and completely score it.
  // All lists from which the docID was scored have their list pointers moved forward.
  // When we don't score the complete doc, at each turn of the while loop, we find a partial score of the lowest docID posting.
  // We add these together for a particular docID to get the complete score -- but it requires several iterations of the main while loop.
  // This is less efficient, since we have to do a complete linear search through the array for every posting.
  // On the other hand, when we score the complete doc right away, we only have to do one more linear search through all postings to score all the lists.
  // Clearly, if the majority of the docIDs are present in more than one list, we'll be getting a speedup.
  const bool kScoreCompleteDoc = true;

  // Use an array instead of a heap for selecting the list with the lowest docID at each step.
  // Using a heap for picking the list with the lowest docID is only implemented for when 'kScoreCompleteDoc' is false.
  // For compatibility with 'kScoreCompleteDoc' equal to true, you'd need to use the heap to choose the next list to score, instead of iterating through the array, which is what's done now.
  // Array based method is faster than the heap based method for choosing the lowest docID from all the lists, so this option should be set to 'true'.
  // TODO: Try another array based strategy: keep a sorted array of docIDs. When updating, only need to find the spot for the new docID and re-sort the array up to that spot.
  // TODO: Can also use a linked list for this. Then can just find the right spot, and do pointer changes. The locality here wouldn't be too good though.
  const bool kUseArrayInsteadOfHeapList = true;

  int total_num_results = 0;

  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
  const float kBm25K1 =  2.0;  // k1
  const float kBm25B = 0.75;   // b

  // We can precompute a few of the BM25 values here.
  const float kBm25NumeratorMul = kBm25K1 + 1;
  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

  // BM25 components.
  float bm25_sum = 0;  // The BM25 sum for the current document we're processing in the intersection.
  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in OR semantics.
  float partial_bm25_sum;
  int doc_len;
  uint32_t f_d_t;

  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
  float idf_t[num_lists];  // Using a variable length array here.
  int num_docs_t;
  for (int i = 0; i < num_lists; ++i) {
	// If for the pruning project, then use the same overall statistics for all the pruned index.
	if(pruningProjectSwitch){
		num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
	}
	// If not for the pruning project, we can use what the index has.
	else{
		num_docs_t = lists[i]->num_docs_complete_list();
	}

	cout << "i:" << i << " num_docs_t:" << num_docs_t << endl;

    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
  }

  // We use this to get the next lowest docID from all the lists.
  pair<uint32_t, int> lists_curr_postings[num_lists];  // Using a variable length array here.
  int num_lists_remaining = 0;  // The number of lists with postings remaining.
  for (int i = 0; i < num_lists; ++i) {
    uint32_t curr_doc_id;
    if ((curr_doc_id = lists[i]->NextGEQ(0)) < ListData::kNoMoreDocs) {
      lists_curr_postings[num_lists_remaining++] = make_pair(curr_doc_id, i);
    }
  }

  if (num_lists_remaining > 0) {
    if (!kUseArrayInsteadOfHeapList) {
      // We use our own comparator, that only checks the docID part.
      make_heap(lists_curr_postings, lists_curr_postings + num_lists_remaining, ListMaxDocIdCompare());
    }
  } else {
    return total_num_results;
  }

  // For the heap based method, the lowest element will always be the first element in the array.
  // So we can keep 'top' constant since it's just a pointer to the first element and just push/pop the heap.
  // For the array based method, we need to initialize it to the first element in the array, and then find the lowest value at the top of the while loop.
  // We have to find the lowest element here as well, since we need to initialize 'curr_doc_id' to the right value before we start the loop.
  pair<uint32_t, int>* top = &lists_curr_postings[0];
  if (kUseArrayInsteadOfHeapList) {
    for (int i = 1; i < num_lists_remaining; ++i) {
      if (lists_curr_postings[i].first < top->first) {
        top = &lists_curr_postings[i];
      }
    }
  }

  int i;
  uint32_t curr_doc_id = top->first;  // Current docID we're processing the score for.

  while (num_lists_remaining) {
    if (kUseArrayInsteadOfHeapList) {
      top = &lists_curr_postings[0];
      for (i = 1; i < num_lists_remaining; ++i) {
        if (lists_curr_postings[i].first < top->first) {
          top = &lists_curr_postings[i];
        }
      }
    }

    if (kScoreCompleteDoc)
    {
      curr_doc_id = top->first;
      bm25_sum = 0;

      cout << curr_doc_id << " ";
      // Can start searching from the position of 'top' since it'll be the first lowest element in the array.
      while (top != &lists_curr_postings[num_lists_remaining]) {
        if (top->first == curr_doc_id) {

          // Compute BM25 score from frequencies.
          f_d_t = lists[top->second]->GetFreq();
          doc_len = index_reader_.document_map().GetDocumentLength(top->first);
          partial_bm25 = idf_t[top->second] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
          //debug option1:
          //cout << "f_d_t:" << f_d_t << " doc_len:" << doc_len << " idf_t[i]:" << idf_t[i] << " partial_bm25:"<< partial_bm25 << " ";
          //debug option2:
          cout << partial_bm25 << " ";

          bm25_sum += partial_bm25;

          ++num_postings_scored_;

          if ((top->first = lists[top->second]->NextGEQ(top->first + 1)) == ListData::kNoMoreDocs) {
            // Need to compact the array by one.
            // Just copy over the last value in the array and overwrite the top value, since we'll be removing it.
            // Now, we can declare our list one shorter.
            // If top happens to already point to the last value in the array, this step is superfluous.
          --num_lists_remaining;
            *top = lists_curr_postings[num_lists_remaining];
            --top;
          }
        }
        ++top;
      }
      cout << endl;

      // Need to keep track of the top-k documents.
      if (total_num_results < num_results) {
        // We insert a document if we don't have k documents yet.
        results[total_num_results] = make_pair(bm25_sum, curr_doc_id);
        push_heap(results, results + total_num_results + 1, ResultCompare());
      } else {
        if (bm25_sum > results->first) {
          // We insert a document only if it's score is greater than the minimum scoring document in the heap.
          pop_heap(results, results + num_results, ResultCompare());
          results[num_results - 1].first = bm25_sum;
          results[num_results - 1].second = curr_doc_id;
          push_heap(results, results + num_results, ResultCompare());
        }
      }
      ++total_num_results;
    }
    else
    {
      // Compute BM25 score from frequencies.
      f_d_t = lists[top->second]->GetFreq();
      doc_len = index_reader_.document_map().GetDocumentLength(top->first);
      partial_bm25_sum = idf_t[top->second] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);

      ++num_postings_scored_;

#ifdef QUERY_PROCESSOR_DEBUG
      // Set 'kScoreCompleteDoc' to false to use this code path.
      cout << "doc_id: " << top->first << ", bm25: " << partial_bm25_sum << endl;
#endif

      // When we encounter the same docID as the current we'be been processing, we update it's score.
      // Otherwise, we know we're processing a new docID.
      if (top->first == curr_doc_id) {
        bm25_sum += partial_bm25_sum;
      } else if (top->first > curr_doc_id) {
        // Need to keep track of the top-k documents.
        if (total_num_results < num_results) {
          // We insert a document if we don't have k documents yet.
          results[total_num_results] = make_pair(bm25_sum, curr_doc_id);
          push_heap(results, results + total_num_results + 1, ResultCompare());
        } else {
          if (bm25_sum > results->first) {
            // We insert a document only if it's score is greater than the minimum scoring document in the heap.
            pop_heap(results, results + num_results, ResultCompare());
            results[num_results - 1].first = bm25_sum;
            results[num_results - 1].second = curr_doc_id;
            push_heap(results, results + num_results, ResultCompare());
          }
        }

        curr_doc_id = top->first;
        bm25_sum = partial_bm25_sum;
        ++total_num_results;
      } else {
        assert(false);
      }

      uint32_t next_doc_id;
      if ((next_doc_id = lists[top->second]->NextGEQ(top->first + 1)) < ListData::kNoMoreDocs) {
        if (kUseArrayInsteadOfHeapList) {
          top->first = next_doc_id;
        } else {
          // Need to pop and push to make sure heap property is maintained.
          pop_heap(lists_curr_postings, lists_curr_postings + num_lists_remaining, ListMaxDocIdCompare());
          lists_curr_postings[num_lists_remaining - 1].first = next_doc_id;
          push_heap(lists_curr_postings, lists_curr_postings + num_lists_remaining, ListMaxDocIdCompare());
        }
      } else {
        if (kUseArrayInsteadOfHeapList) {
          // Need to compact the array by one.
          // Just copy over the last value in the array and overwrite the top value, since we'll be removing it.
          // Now, we can declare our list one shorter.
          // If top happens to already point to the last value in the array, this step is superfluous.
          *top = lists_curr_postings[num_lists_remaining - 1];
        } else {
          pop_heap(lists_curr_postings, lists_curr_postings + num_lists_remaining, ListMaxDocIdCompare());
        }

        --num_lists_remaining;
      }
    }
  }

  if (!kScoreCompleteDoc) {
    // We always have a leftover result that we need to insert.
    // Note that there is no need to push the heap since we'll just be sorting all the results by their score next.
    if (total_num_results < num_results) {
      // We insert a document if we don't have k documents yet.
      results[total_num_results] = make_pair(bm25_sum, curr_doc_id);
    } else {
      if (bm25_sum > results->first) {
        // We insert a document only if it's score is greater than the minimum scoring document in the heap.
        pop_heap(results, results + num_results, ResultCompare());
        results[num_results - 1].first = bm25_sum;
        results[num_results - 1].second = curr_doc_id;
      }
    }
    ++total_num_results;
  }

  // Sort top-k results in descending order by document score.
  sort(results, results + min(num_results, total_num_results), ResultCompare());

  return total_num_results;
}

// The two tiered WAND first merges (OR mode) and scores the top docs lists, so that we know the k-th threshold (a better approximation of the lower bound).
// TODO: It seems to me that a two tiered WAND doesn't save us any computation. We'll be evaluating the top docs lists and then we'll be able to skip some docIDs from the 2nd layers.
//       NOTE: It WOULD save computation if the number of crappy, low scoring docIDs we'll be able to skip (due to an initially high threshold)
//             exceeds the extra number of top docs docIDs we had to compute scores for.
//       It would be really beneficial if you could decrease the upperbounds on the term lists for the 2nd layers
//       (which you can't unless you don't discard the top docs and store them in accumulators).
// In standard WAND, the k-th threshold is initialized to 0.
int LocalQueryProcessor::MergeListsWand(LexiconData** query_term_data, int num_query_terms, Result* results, int* num_results, bool two_tiered) {
  // Constraints on the type of index we expect.
  assert(index_layered_);
  assert(index_overlapping_layers_);
  assert(index_num_layers_ == 2);

  const int kMaxNumResults = *num_results;

  // Holds a pointer to the list for each corresponding query term.
  ListData* list_data_pointers[num_query_terms];  // Using a variable length array here.

  // For WAND to work correctly, need term upperbounds on the whole list.
  float list_thresholds[num_query_terms];  // Using a variable length array here.

  bool single_term_query = false;
  if (num_query_terms == 1) {
    single_term_query = true;
  }

  for (int i = 0; i < num_query_terms; ++i) {
    // Open the first layer (the top docs).
    list_data_pointers[i] = index_reader_.OpenList(*query_term_data[i], 0, single_term_query);
    list_thresholds[i] = list_data_pointers[i]->score_threshold();
#ifdef IRTK_DEBUG
    cout << "Top Docs Layer for '" << string(query_term_data[i]->term(), query_term_data[i]->term_len())
        << "', Layer Num: 0, Score Threshold: " << list_data_pointers[i]->score_threshold()
        << ", Num Docs: " << list_data_pointers[i]->num_docs()
        << ", Num Blocks: " << list_data_pointers[i]->num_blocks()
        << ", Num Chunks: " << list_data_pointers[i]->num_chunks() << endl;
#endif
  }

  int total_num_results = 0;
  if (num_query_terms == 1 && two_tiered) {  // We do this optimization only if we have explicitly specified two-tier mode.
    // Do standard DAAT OR mode processing, since WAND won't help.
    if (index_layered_ && query_term_data[0]->num_layers() == 2) {
      // We have two layers, so let's run the standard DAAT OR on the first layer only.
      // If there are k results, we can stop; otherwise rerun the query on the second layer.
      total_num_results = MergeLists(list_data_pointers, num_query_terms, results, kMaxNumResults,false);
      if (total_num_results < kMaxNumResults) {
        index_reader_.CloseList(list_data_pointers[0]);
        list_data_pointers[0] = index_reader_.OpenList(*query_term_data[0], query_term_data[0]->num_layers() - 1, single_term_query);
        total_num_results = MergeLists(list_data_pointers, num_query_terms, results, kMaxNumResults,false);
      }
    } else {
      // There is only one layer, run the query on it.
      total_num_results = MergeLists(list_data_pointers, num_query_terms, results, kMaxNumResults,false);
    }
  } else {
    /*
     * We can estimate the threshold after processing the top docs lists in OR mode, but we can't decrease the upperbounds on the 2nd layers
     * because this will result in many of our high scoring documents to be skipped from the 2nd layers (including the ones from the top docs lists).
     *
     * TODO: What are some ways of decreasing the upperbound on the 2nd layers...?
     */

    float threshold = 0;
    if (two_tiered) {
      // It's possible that after processing the top docs, there is an unresolved docID (only present in some of the top docs lists, but not in others)
      // that could have a score higher than the top-k threshold we derive here.
      // For this reason, we can't early terminate here if we get k results.
      int top_docs_num_results = MergeLists(list_data_pointers, num_query_terms, results, kMaxNumResults,false);
#ifdef IRTK_DEBUG
      cout << "Num results from top docs lists: " << top_docs_num_results << endl;
#endif

      // The k-th score in the heap we get from the union of the top docs layers is our starting threshold.
      // It is a lower bound for the score necessary for a new docID to make it into our top-k.
      // The threshold is 0 if we didn't get k results from the top docs layers, meaning any docID can make it into the top-k.
      threshold = (top_docs_num_results >= kMaxNumResults) ? results[kMaxNumResults - 1].first : 0;
#ifdef IRTK_DEBUG
      cout << "Threshold from top docs lists: " << threshold << endl;
#endif
    }

    // We have to make sure that the layers are overlapping. So we'll be traversing the top-docs twice (in the second overlapping layer).
    // This is necessary because we're not using accumulators for the top-docs lists. It's only an approximate lower bound score on the top docIDs, since
    // the docID may be present in other lists, that did not make it into the top-docs.
    for (int i = 0; i < num_query_terms; ++i) {
      if (query_term_data[i]->num_layers() == 1) {
        // For a single layered list, we'll have to traverse it again.
        list_data_pointers[i]->ResetList(single_term_query);
      } else {
        // For a dual layered list, we close the first layer and open the second layer.
        index_reader_.CloseList(list_data_pointers[i]);
        list_data_pointers[i] = index_reader_.OpenList(*query_term_data[i], query_term_data[i]->num_layers() - 1, single_term_query);
      }

#ifdef IRTK_DEBUG
      cout << "Overlapping Layer for '" << string(query_term_data[i]->term(), query_term_data[i]->term_len())
          << "', Layer Num: " << (query_term_data[i]->num_layers() - 1)
          << ", Score Threshold: " << list_data_pointers[i]->score_threshold()
          << ", Num Docs: " << list_data_pointers[i]->num_docs()
          << ", Num Blocks: " << list_data_pointers[i]->num_blocks()
          << ", Num Chunks: " << list_data_pointers[i]->num_chunks() << endl;
#endif
    }

    const bool kMWand = true;

    // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
    const float kBm25K1 =  2.0;  // k1
    const float kBm25B = 0.75;  // b

    // We can precompute a few of the BM25 values here.
    const float kBm25NumeratorMul = kBm25K1 + 1;
    const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
    const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

    // BM25 components.
    float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection.
    int doc_len;
    uint32_t f_d_t;

    // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
    float idf_t[num_query_terms];  // Using a variable length array here.
    int num_docs_t;
    for (int i = 0; i < num_query_terms; ++i) {
      num_docs_t = list_data_pointers[i]->num_docs_complete_list();
      idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
    }

    // We use this to get the next lowest docID from all the lists.
    pair<uint32_t, int> lists_curr_postings[num_query_terms]; // Using a variable length array here.
    int num_lists_remaining = 0; // The number of lists with postings remaining.
    uint32_t curr_doc_id;
    for (int i = 0; i < num_query_terms; ++i) {
      if ((curr_doc_id = list_data_pointers[i]->NextGEQ(0)) < ListData::kNoMoreDocs) {
        lists_curr_postings[num_lists_remaining++] = make_pair(curr_doc_id, i);
      }
    }

    int i, j;
    pair<uint32_t, int> pivot = make_pair(0, -1);  // The pivot can't be a pointer to the 'lists_curr_postings'
                                                   // since those values will change when we advance list pointers after scoring a docID.
    float pivot_weight;                            // The upperbound score on the pivot docID.

    /*
     * Two implementation choices here:
     * * Keep track of the number of lists remaining; requires an if statement after each nextGEQ() to check if we reached the max docID sentinel value (implemented here).
     * * Don't keep track of the number of lists remaining. Don't need if statement after each nextGEQ(), but need to sort all list postings at every turn.
     */
    while (num_lists_remaining) {
      // Sort current postings in non-descending order.
      // Can also sort all entries less than or equal to the pivot docID and merge with all higher docIDs.
      // Although probably won't be faster unless we have a significant number of terms in the query.
      sort(lists_curr_postings, lists_curr_postings + num_lists_remaining, ListDocIdCompare());

      // Select a pivot.
      pivot_weight = 0;
      pivot.second = -1;
      for (i = 0; i < num_lists_remaining; ++i) {
        pivot_weight += list_thresholds[lists_curr_postings[i].second];
        if (pivot_weight >= threshold) {
          pivot = lists_curr_postings[i];
          break;
        }
      }

      /*
      // If using this, change the while condition to true. Don't need to check for sentinel value after NextGEQ(),
      // but need to sort all the list postings at each step.
      if(pivot.first == ListData::kNoMoreDocs) {
        break;
      }
      */

      // If we don't have a pivot (the pivot list is -1), or if the pivot docID is the sentinel value for no more docs,
      // it means that no newly encountered docID can make it into the top-k and we can quit.
      if (pivot.second == -1) {
        break;
      }

      if (pivot.first == lists_curr_postings[0].first) {
        // We have enough weight on the pivot, so score all docIDs equal to the pivot (these can be beyond the pivot as well).
        // We know we have enough weight when the docID at the pivot list equals the docID at the first list.
        bm25_sum = 0;
        for(i = 0; i < num_lists_remaining && pivot.first == lists_curr_postings[i].first; ++i) {
          // Compute the BM25 score from frequencies.
          f_d_t = list_data_pointers[lists_curr_postings[i].second]->GetFreq();
          doc_len = index_reader_.document_map().GetDocumentLength(lists_curr_postings[i].first);
          bm25_sum += idf_t[lists_curr_postings[i].second] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);

          ++num_postings_scored_;

          // Advance list pointer.
          if ((lists_curr_postings[i].first = list_data_pointers[lists_curr_postings[i].second]->NextGEQ(lists_curr_postings[i].first + 1)) == ListData::kNoMoreDocs) {
            // Compact the array. Move the current posting to the end.
            --num_lists_remaining;
            pair<uint32_t, int> curr = lists_curr_postings[i];
            for(j = i; j < num_lists_remaining; ++j) {
              lists_curr_postings[j] = lists_curr_postings[j+1];
            }
            lists_curr_postings[num_lists_remaining] = curr;
            --i;
          }
        }

        // Decide whether docID makes it into the top-k.
        if (total_num_results < kMaxNumResults) {
          // We insert a document if we don't have k documents yet.
          results[total_num_results] = make_pair(bm25_sum, pivot.first);
          push_heap(results, results + total_num_results + 1, ResultCompare());
        } else {
          if (bm25_sum > results->first) {
            // We insert a document only if it's score is greater than the minimum scoring document in the heap.
            pop_heap(results, results + kMaxNumResults, ResultCompare());
            results[kMaxNumResults - 1].first = bm25_sum;
            results[kMaxNumResults - 1].second = pivot.first;
            push_heap(results, results + kMaxNumResults, ResultCompare());

            // Update the threshold.
            threshold = results->first;
          }
        }
        ++total_num_results;
      } else {
        // We don't have enough weight on the pivot yet. We know this is true when the docID from the first list != docID at the pivot.
        // There are two simple strategies that we can employ:
        // * Advance any one list before the pivot (just choose the first list). This is the original WAND algorithm.
        // * Advance all lists before the pivot (saves a few sorting operations at the cost of less list skipping). This is the mWAND algorithm.
        //   Main point is that index accesses are cheaper when the index is in main memory, so we try to do less list pointer sorting operations instead.
        // In both strategies, we advance the list pointer(s) at least to the pivot docID.
        if (kMWand) {
          for (i = 0; i < num_lists_remaining; ++i) {
            // Advance list pointer.
            if ((lists_curr_postings[i].first = list_data_pointers[lists_curr_postings[i].second]->NextGEQ(pivot.first)) == ListData::kNoMoreDocs) {
              // Compact the array. Move the current posting to the end.
              --num_lists_remaining;
              pair<uint32_t, int> curr = lists_curr_postings[i];
              for (j = i; j < num_lists_remaining; ++j) {
                lists_curr_postings[j] = lists_curr_postings[j + 1];
              }
              lists_curr_postings[num_lists_remaining] = curr;
              --i;
            }
          }
        } else {
          if ((lists_curr_postings[0].first = list_data_pointers[lists_curr_postings[0].second]->NextGEQ(pivot.first)) == ListData::kNoMoreDocs) {
            // Just swap the current posting with the one at the end of the array.
            // We'll be sorting at the start of the loop, so we don't need to compact and keep the order of the postings.
            --num_lists_remaining;
            pair<uint32_t, int> curr = lists_curr_postings[0];
            lists_curr_postings[0] = lists_curr_postings[num_lists_remaining];
            lists_curr_postings[num_lists_remaining] = curr;
          }
        }
      }
    }
  }

  // Sort top-k results in descending order by document score.
  sort(results, results + min(kMaxNumResults, total_num_results), ResultCompare());

  *num_results = min(total_num_results, kMaxNumResults);
  for (int i = 0; i < num_query_terms; ++i) {
    index_reader_.CloseList(list_data_pointers[i]);
  }
  return total_num_results;
}

// TODO:
// Difference between MaxScore and WAND is that once the threshold is sufficient enough, MaxScore will ignore the rest of the new docIDs in lists
// whose upperbounds indicate that they can't make it into the top-k.
//
// WAND is more akin to AND mode, since we move all list pointers to a common docID before scoring a document (unless we skip it); the difference being that we don't require the query terms to
// appear in all docIDs. Here, we skip scoring whole docIDs.
//
// MaxScore is more akin to OR mode, since we score a posting as soon as we reach it in the postings list (with the exception that we are able to skip scoring some postings).
// Here, we skip scoring individual postings.
//
// Use the MaxScore and Two Level MaxScore algorithms.
int LocalQueryProcessor::MergeListsMaxScore(LexiconData** query_term_data, int num_query_terms, Result* results, int* num_results, bool two_tiered) {
  // Constraints on the type of index we expect.
  assert(index_layered_);
  assert(index_overlapping_layers_);
  assert(index_num_layers_ == 2);

  const int kMaxNumResults = *num_results;

  // Holds a pointer to the list for each corresponding query term.
  ListData* list_data_pointers[num_query_terms];  // Using a variable length array here.

  // For MaxScore to work correctly, need term upperbounds on the whole list.
  float list_thresholds[num_query_terms];  // Using a variable length array here.

  bool single_term_query = false;
  if (num_query_terms == 1) {
    single_term_query = true;
  }

  for (int i = 0; i < num_query_terms; ++i) {
    // Open the first layer (the top docs).
    list_data_pointers[i] = index_reader_.OpenList(*query_term_data[i], 0, single_term_query);
    list_thresholds[i] = list_data_pointers[i]->score_threshold();
#ifdef IRTK_DEBUG
    cout << "Top Docs Layer for '" << string(query_term_data[i]->term(), query_term_data[i]->term_len())
        << "', Layer Num: 0, Score Threshold: " << list_data_pointers[i]->score_threshold()
        << ", Num Docs: " << list_data_pointers[i]->num_docs()
        << ", Num Blocks: " << list_data_pointers[i]->num_blocks()
        << ", Num Chunks: " << list_data_pointers[i]->num_chunks() << endl;
#endif
  }

  int total_num_results = 0;
  if (num_query_terms == 1 && two_tiered) {  // We do this optimization only if we have explicitly specified two-tier mode.
    // Do standard DAAT OR mode processing, since Max Score won't help.
    if (index_layered_ && query_term_data[0]->num_layers() == 2) {
      // We have two layers, so let's run the standard DAAT OR on the first layer only.
      // If there are k results, we can stop; otherwise rerun the query on the second layer.
      total_num_results = MergeLists(list_data_pointers, num_query_terms, results, kMaxNumResults,false);
      if (total_num_results < kMaxNumResults) {
        index_reader_.CloseList(list_data_pointers[0]);
        list_data_pointers[0] = index_reader_.OpenList(*query_term_data[0], query_term_data[0]->num_layers() - 1, single_term_query);
        total_num_results = MergeLists(list_data_pointers, num_query_terms, results, kMaxNumResults,false);
      }
    } else {
      // There is only one layer, run the query on it.
      total_num_results = MergeLists(list_data_pointers, num_query_terms, results, kMaxNumResults,false);
    }
  } else {
    /*
     * We can estimate the threshold after processing the top docs lists in OR mode, but we can't decrease the upperbounds on the 2nd layers
     * because this will result in many of our high scoring documents to be skipped from the 2nd layers (including the ones from the top docs lists).
     *
     * TODO: What are some ways of decreasing the upperbound on the 2nd layers...?
     */

    float threshold = 0;
    if (two_tiered) {
      // It's possible that after processing the top docs, there is an unresolved docID (only present in some of the top docs lists, but not in others)
      // that could have a score higher than the top-k threshold we derive here.
      // For this reason, we can't early terminate here if we get k results.
      int top_docs_num_results = MergeLists(list_data_pointers, num_query_terms, results, kMaxNumResults,false);
#ifdef IRTK_DEBUG
      cout << "Num results from top docs lists: " << top_docs_num_results << endl;
#endif

      // The k-th score in the heap we get from the union of the top docs layers is our starting threshold.
      // It is a lower bound for the score necessary for a new docID to make it into our top-k.
      // The threshold is 0 if we didn't get k results from the top docs layers, meaning any docID can make it into the top-k.
      threshold = (top_docs_num_results >= kMaxNumResults) ? results[kMaxNumResults - 1].first : 0;
#ifdef IRTK_DEBUG
      cout << "Threshold from top docs lists: " << threshold << endl;
#endif
    }

    // We have to make sure that the layers are overlapping. So we'll be traversing the top-docs twice (in the second overlapping layer).
    // This is necessary because we're not using accumulators for the top-docs lists. It's only an approximate lower bound score on the top docIDs, since
    // the docID may be present in other lists, that did not make it into the top-docs.
    for (int i = 0; i < num_query_terms; ++i) {
      if (query_term_data[i]->num_layers() == 1) {
        // For a single layered list, we'll have to traverse it again.
        list_data_pointers[i]->ResetList(single_term_query);
      } else {
        // For a dual layered list, we close the first layer and open the second layer.
        index_reader_.CloseList(list_data_pointers[i]);
        list_data_pointers[i] = index_reader_.OpenList(*query_term_data[i], query_term_data[i]->num_layers() - 1, single_term_query);
      }

#ifdef IRTK_DEBUG
      cout << "Overlapping Layer for '" << string(query_term_data[i]->term(), query_term_data[i]->term_len())
          << "', Layer Num: " << (query_term_data[i]->num_layers() - 1)
          << ", Score Threshold: " << list_data_pointers[i]->score_threshold()
          << ", Num Docs: " << list_data_pointers[i]->num_docs()
          << ", Num Blocks: " << list_data_pointers[i]->num_blocks()
          << ", Num Chunks: " << list_data_pointers[i]->num_chunks() << endl;
#endif
    }

    // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
    const float kBm25K1 =  2.0;  // k1
    const float kBm25B = 0.75;   // b

    // We can precompute a few of the BM25 values here.
    const float kBm25NumeratorMul = kBm25K1 + 1;
    const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
    const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

    // BM25 components.
    float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection.
    int doc_len;
    uint32_t f_d_t;

    // For use with score skipping.
    float remaining_upperbound;

    // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
    float idf_t[num_query_terms];  // Using a variable length array here.
    int num_docs_t;
    for (int i = 0; i < num_query_terms; ++i) {
      num_docs_t = list_data_pointers[i]->num_docs_complete_list();
      idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
    }

    // We use this to get the next lowest docID from all the lists.
    uint32_t lists_curr_postings[num_query_terms];  // Using a variable length array here.
    for (int i = 0; i < num_query_terms; ++i) {
      lists_curr_postings[i] = list_data_pointers[i]->NextGEQ(0);
    }

    pair<float, int> list_upperbounds[num_query_terms];  // Using a variable length array here.
    int num_lists_remaining = 0;  // The number of lists with postings remaining.
    for (int i = 0; i < num_query_terms; ++i) {
      if (lists_curr_postings[i] != ListData::kNoMoreDocs) {
        list_upperbounds[num_lists_remaining++] = make_pair(list_thresholds[i], i);
      }
    }

    sort(list_upperbounds, list_upperbounds + num_lists_remaining, greater<pair<float, int> > ());

    // Precalculate the upperbounds for all possibilities.
    for (int i = num_lists_remaining - 2; i >= 0; --i) {
      list_upperbounds[i].first += list_upperbounds[i + 1].first;
    }

    /*// When a list has no more postings remaining, we can remove it right away, or wait until we iterated through the rest of the lists,
    // and remove any that have no more postings remaining. Removing them after iterating through all lists required an additional if statement.
    // What's odd is that when we remove the threshold checks (so that we can no longer early terminate), setting this option to 'false'
    // performs about 2ms faster (we wouldn't expect it to because of the extra if statement). However, when the threshold checks are in place,
    // setting this option to 'true' performs slightly faster (1-2ms). As far as I can tell, both do the same thing.
    const bool kCompactArrayRightAway = false;*/

    // When 'true', enables the use of embedded list score information to provide further efficiency gains
    // through better list skipping and less scoring computations.
    const bool kScoreSkipping = false;

    // Defines the score skipping mode to use.
    // '0' means use block score upperbounds.
    // '1' means use chunk score upperbounds.
#define SCORE_SKIPPING_MODE 1

    int i, j;
    int curr_list_idx;
    pair<float, int>* top;
    uint32_t curr_doc_id;  // Current docID we're processing the score for.
    /*bool compact_upperbounds = false;*/

    while (num_lists_remaining) {
      // Check if we can early terminate. This might happen only after we have finished traversing at least one list.
      // This is because our upperbounds don't decrease unless we are totally finished traversing one list.
      // Must check this since we initialize top to point to the first element in the list upperbounds array by default.
      if (threshold > list_upperbounds[0].first) {
        break;
      }

      top = &list_upperbounds[0];
      if (kScoreSkipping && threshold > list_upperbounds[1].first) {
#ifdef MAX_SCORE_DEBUG
        cout << "Current threshold: " << threshold << endl;
        cout << "Remaining upperbound: " << list_upperbounds[1].first << endl;
#endif

        // Only the first (highest scoring) list can contain a docID that can still make it into the top-k,
        // so we move the first list to the first docID that has an upperbound that will allow it to make it into the top-k.
#if SCORE_SKIPPING_MODE == 0
        if ((lists_curr_postings[0] = list_data_pointers[top->second]->NextGreaterBlockScore(threshold - list_upperbounds[1].first)) == ListData::kNoMoreDocs) {
#elif SCORE_SKIPPING_MODE == 1
        if ((lists_curr_postings[0] = list_data_pointers[top->second]->NextGreaterChunkScore(threshold - list_upperbounds[1].first)) == ListData::kNoMoreDocs) {
#endif
          // Can early terminate at this point.
          break;
        }
      } else {
        // Find the lowest docID that can still possibly make it into the top-k (while being able to make it into the top-k).
        for (i = 1; i < num_lists_remaining; ++i) {
          curr_list_idx = list_upperbounds[i].second;
          if (threshold > list_upperbounds[i].first) {
            break;
          }

          if (lists_curr_postings[curr_list_idx] < lists_curr_postings[top->second]) {
            top = &list_upperbounds[i];
          }
        }
      }

      // At this point, 'curr_doc_id' can either not be able to exceed the threshold score, or it can be the max possible docID sentinel value.
      curr_doc_id = lists_curr_postings[top->second];

      // We score a docID fully here, making any necessary lookups right away into other lists.
      // Disadvantage with this approach is that you'll be doing a NextGEQ() more than once for some lists on the same docID.
      bm25_sum = 0;
      for (i = 0; i < num_lists_remaining; ++i) {
        curr_list_idx = list_upperbounds[i].second;

        // Check if we can early terminate the scoring of this particular docID.
        if (threshold > bm25_sum + list_upperbounds[i].first) {
          break;
        }

        // Move to the curr docID we're scoring.
        lists_curr_postings[curr_list_idx] = list_data_pointers[curr_list_idx]->NextGEQ(curr_doc_id);

        if (lists_curr_postings[curr_list_idx] == curr_doc_id) {
          // Use the tighter score bound we have on the current list to see if we can early terminate the scoring of this particular docID.
          if (kScoreSkipping) {
            // TODO: To avoid the (i == num_lists_remaining - 1) test, can insert a dummy list with upperbound 0.
            remaining_upperbound = (i == num_lists_remaining - 1) ? 0 : list_upperbounds[i + 1].first;
#if SCORE_SKIPPING_MODE == 0
            if (threshold > bm25_sum + list_data_pointers[curr_list_idx]->GetBlockScoreBound() + remaining_upperbound) {
#elif SCORE_SKIPPING_MODE == 1
            if (threshold > bm25_sum + list_data_pointers[curr_list_idx]->GetChunkScoreBound() + remaining_upperbound) {
#endif
#ifdef MAX_SCORE_DEBUG
              cout << "Short circuiting evaluation of docID: " << curr_doc_id << " from list with " << list_data_pointers[curr_list_idx]->num_docs()
                  << " postings" << endl;
              cout << "Current BM25 sum: " << bm25_sum << endl;
              cout << "Current chunk bound for docID " << curr_doc_id << " is: " << list_data_pointers[curr_list_idx]->GetChunkScoreBound() << endl;
              cout << "Current threshold: " << threshold << endl;
              cout << "Remaining upperbound: " << remaining_upperbound << endl;
#endif

              // Can now move the list pointer further.
              lists_curr_postings[curr_list_idx] = list_data_pointers[curr_list_idx]->NextGEQ(lists_curr_postings[curr_list_idx] + 1);
              if (lists_curr_postings[curr_list_idx] == ListData::kNoMoreDocs) {
                /*if (kCompactArrayRightAway) {*/
                  --num_lists_remaining;
                  float curr_list_upperbound = list_thresholds[curr_list_idx];

                  // Compact the list upperbounds array.
                  for (j = i; j < num_lists_remaining; ++j) {
                    list_upperbounds[j] = list_upperbounds[j + 1];
                  }

                  // Recalculate the list upperbounds. Note that we only need to recalculate those entries less than i.
                  for (j = 0; j < i; ++j) {
                    list_upperbounds[j].first -= curr_list_upperbound;
                  }
                  --i;
                /*} else {
                  compact_upperbounds = true;
                }*/
              }

              break;
            }
          }

          // Compute BM25 score from frequencies.
          f_d_t = list_data_pointers[curr_list_idx]->GetFreq();
          doc_len = index_reader_.document_map().GetDocumentLength(lists_curr_postings[curr_list_idx]);
          bm25_sum += idf_t[curr_list_idx] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);

          ++num_postings_scored_;

          // Can now move the list pointer further.
          lists_curr_postings[curr_list_idx] = list_data_pointers[curr_list_idx]->NextGEQ(lists_curr_postings[curr_list_idx] + 1);
        }

        if (lists_curr_postings[curr_list_idx] == ListData::kNoMoreDocs) {
          /*if (kCompactArrayRightAway) {*/
            --num_lists_remaining;
            float curr_list_upperbound = list_thresholds[curr_list_idx];

            // Compact the list upperbounds array.
            for (j = i; j < num_lists_remaining; ++j) {
              list_upperbounds[j] = list_upperbounds[j + 1];
            }

            // Recalculate the list upperbounds. Note that we only need to recalculate those entries less than i.
            for (j = 0; j < i; ++j) {
              list_upperbounds[j].first -= curr_list_upperbound;
            }
            --i;
          /*} else {
            compact_upperbounds = true;
          }*/
        }
      }

      // Need to keep track of the top-k documents.
      if (total_num_results < kMaxNumResults) {
        // We insert a document if we don't have k documents yet.
        results[total_num_results] = make_pair(bm25_sum, curr_doc_id);
        push_heap(results, results + total_num_results + 1, ResultCompare());
      } else {
        if (bm25_sum > results->first) {
          // We insert a document only if it's score is greater than the minimum scoring document in the heap.
          pop_heap(results, results + kMaxNumResults, ResultCompare());
          results[kMaxNumResults - 1].first = bm25_sum;
          results[kMaxNumResults - 1].second = curr_doc_id;
          push_heap(results, results + kMaxNumResults, ResultCompare());

          // Update the threshold.
          threshold = results->first;
        }
      }
      ++total_num_results;

      /*if (!kCompactArrayRightAway) {
        if (compact_upperbounds) {
          int num_lists = num_lists_remaining;
          num_lists_remaining = 0;
          for (i = 0; i < num_lists; ++i) {
            curr_list_idx = list_upperbounds[i].second;
            if (lists_curr_postings[curr_list_idx] != ListData::kNoMoreDocs) {
              list_upperbounds[num_lists_remaining++] = make_pair(list_thresholds[curr_list_idx], curr_list_idx);
            }
          }

          sort(list_upperbounds, list_upperbounds + num_lists_remaining, greater<pair<float, int> > ());

          // Precalculate the upperbounds for all possibilities.
          for (i = num_lists_remaining - 2; i >= 0; --i) {
            list_upperbounds[i].first += list_upperbounds[i + 1].first;
          }

          compact_upperbounds = false;
        }
      }*/
    }
  }

  // Sort top-k results in descending order by document score.
  sort(results, results + min(kMaxNumResults, total_num_results), ResultCompare());

  *num_results = min(total_num_results, kMaxNumResults);
  for (int i = 0; i < num_query_terms; ++i) {
    index_reader_.CloseList(list_data_pointers[i]);
  }
  return total_num_results;
}

int LocalQueryProcessor::IntersectLists(ListData** lists, int num_lists, Result* results, int num_results) {
	if(index_use_precomputed_score_)
	{
		return IntersectLists(NULL, 0, lists, num_lists, results, num_results, false, 0); //new version using pre-computed scores by Wei.
	}
	else
	{
		return IntersectLists(NULL, 0, lists, num_lists, results, num_results, false); //original version by Roman.
	}

}

int LocalQueryProcessor::IntersectListsForModifyingPreComputedScores(LexiconData** query_term_data, ListData** lists, int num_lists, string mode,POSTING_RESULT* posting_results){

	if(index_use_precomputed_score_)
	{
		return IntersectListsForModifyingPreComputedScores(query_term_data, NULL, 0, lists, num_lists, true, mode,posting_results); //new version using pre-computed scores by Wei.
	}
	else
	{
		cout << "This set of indexes do NOT have external pre-computed scores stored." << endl;
		cout << "Please select another set of indexes." << endl;
		exit(1);
		// the last argument is true for pruning project.
		// return IntersectLists(NULL, 0, lists, num_lists, results, num_results, true); //original version by Roman.
	}
}


int LocalQueryProcessor::IntersectListsEspecaillyForPruningProject(ListData** lists, int num_lists, Result* results, int num_results) {
	cout << "index_use_precomputed_score_:" << index_use_precomputed_score_ << endl;
	if(index_use_precomputed_score_)
	{
		// need to add the boolen argument for the pruning project as well. Currently, No yet adding. 2012/07/26
		return IntersectLists(NULL, 0, lists, num_lists, results, num_results, false, 0); //new version using pre-computed scores by Wei.
	}
	else
	{
		// the last argument is true for pruning project.
		return IntersectLists(NULL, 0, lists, num_lists, results, num_results, true); //original version by Roman.
	}
}

int LocalQueryProcessor::IntersectListsEspecaillyForPruningProject2(ListData** lists, int num_lists, Result_Wei_2012* results, int num_results) {
	// cout << "--->[serverHiddenInfo]index_use_precomputed_score_:" << index_use_precomputed_score_ << endl;

	// based on the value of the computation_method, select different data structures to store the values
	int computation_method = Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kUniversalComputationMethod));


	if (computation_method == 0){
		// 0.0 1.0
		// 0.399625 0.9 abandoned
		// 0.640738 0.8 abandoned
		// 0.851091 0.7 abandoned
		// 1.11768 0.6 abandoned
		// 1.4121 0.5 in use
		// 1.73521 0.4 abandoned
		// 2.1482 0.3 in use
		// 2.71155 0.2 in use
		// 3.1639 0.15 in use
		// 3.77101 0.1 in use
		// 5.04156 0.05 in use
		// 6.20388 0.03 in use
		// 8.15333 0.01 in use
		universal_threshold_socre_of_posting_ = 0.0;
		cout << "universal_threshold_socre_of_posting_(partialBM25): " << universal_threshold_socre_of_posting_ << endl;
	}
	else if(computation_method == 1){
		// Current used version since 2013/08/09 morning by Wei at school
		// For NOW, just directly hard code the threshold
		// threshold for partialBM25 {partialBM25,3, 2_3, 1_3, 1_2_3}
		universal_threshold_socre_of_posting_ = 0.0; // 100% kept

		// version in test
		// universal_threshold_socre_of_posting_ = Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kUniversalImportanceThresholdSocreOfPosting));

		// OLD version 2, decrepited since 2013/08/04 night
		// long threshold_socre_of_posting_times_1000000 = Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kUniversalImportanceThresholdSocreOfPosting));
		// universal_threshold_socre_of_posting_ = (float)threshold_socre_of_posting_times_1000000;
		// OLD version 2, decrepited months ago
		// universal_threshold_socre_of_posting_ = (float)threshold_socre_of_posting_times_1000000 / 1000000;
		cout << "universal_threshold_socre_of_posting_:" << universal_threshold_socre_of_posting_ << endl;
	}
	else if(computation_method == 2){
		// output the thresholds info line for this TCP method
		indexPositionOfCorrespondingThresholdToUse_ = Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kIndexPositionOfCorrespondingThresholdToUse));

		// The following logic will output the following sample format line:
		// threshold_scores_for_each_term_based_on_partialBM25_: value1 value2 value3 value4 \n
		cout << "threshold_scores_for_each_term_based_on_partialBM25_:";
		for(int tempCounter = 0; tempCounter < queryTermPostionIndexPairs_.size(); tempCounter++){
			  // output the following sample format
			  cout << term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[tempCounter].first ][indexPositionOfCorrespondingThresholdToUse_] << " ";
		}
		cout << endl;

	}
	else if(computation_method == 3){
		// Not yet implemented
	}
	else if(computation_method == 4){
		// output the thresholds info line for this TCP QV method
		indexPositionOfCorrespondingThresholdToUse_ = Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kIndexPositionOfCorrespondingThresholdToUse));

		// The following logic will output the following sample format line:
		// threshold_scores_for_each_term_based_on_partialBM25_: value1 value2 value3 value4 \n
		cout << "threshold_scores_for_each_term_based_on_partialBM25_:";
		for(int tempCounter = 0; tempCounter < queryTermPostionIndexPairs_.size(); tempCounter++){
			  // output the following sample format
			  cout << term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[tempCounter].first ][indexPositionOfCorrespondingThresholdToUse_] << " ";
		}
		cout << endl;
	}
	else if(computation_method == 5){
		// Not yet implemented
	}
	else if(computation_method == 6){
		// The ORIGINAL version
	}
	else if(computation_method == 7){
		// set the value of universal_threshold_socre_of_posting_
		// Updated by Wei 2013/08/31 afternoon
		// The simplified version of the method1
		// Current used version since 2013/08/09 morning by Wei at school
		// For NOW, just directly hard code the threshold
		// threshold for SIMPLIFIED 1_2_3 {1_2_3}

		// 1D threshold
		// Update by Wei 2 months ago at school.
		// uniformPruningThreshold percentageToKeep percentageToThrow
		// 0.0 1.0 0.0
		// 8.07057e-15 0.9 0.1
		// 8.97655e-13 0.8 0.2
		// 1.23773e-11 0.7 0.3
		// 4.65079e-11 0.6 0.4
		// 1.15089e-10 0.5 0.5
		// 2.56434e-10 0.4 0.6
		// 5.25546e-10 0.3 0.7
		// 1.01887e-09 0.2 0.8
		// 2.8103e-09 0.1 0.9
		// 5.97128e-09 0.05 0.85
		// 1.96632e-08 0.01 0.99


		// 2D threshold
		// Updated by Wei 2013/12/10 afternoon at school
		// uniformPruningThreshold percentageToKeep percentageToThrow
		// 0.0 1.0 0.0
		// 1.80499e-13 0.9 0.1
		// 1.31083e-11 0.8 0.2
		// 4.75302e-11 0.7 0.3
		// 1.11179e-10 0.6 0.4
		// 1.92253e-10 0.5 0.5
		// 3.31377e-10 0.4 0.6
		// 5.74885e-10 0.3 0.7
		// 1.04142e-09 0.2 0.8
		// 2.13591e-09 0.1 0.9
		// 3.66925e-09 0.05 0.85
		// 1.02201e-08 0.01 0.99

		// 2D threshold
		// Updated by Wei 2013/12/12 night at school
		// uniformPruningThreshold percentageToKeep percentageToThrow
		// 0.0 1.0 0.0
		// 2.26669e-13 0.9 0.1
		// 1.34474e-11 0.8 0.2
		// 4.98971e-11 0.7 0.3
		// 1.18219e-10 0.6 0.4
		// 2.0546e-10 0.5 0.5
		// 3.62948e-10 0.4 0.6
		// 6.50328e-10 0.3 0.7
		// 1.20113e-09 0.2 0.8
		// 2.50253e-09 0.1 0.9
		// 4.7152e-09 0.05 0.85
		// 1.71372e-08 0.01 0.99

		// 2D quadTree threshold (ONLY using the part2)
		// 0.0 1.0 0.0
		// 2.20444e-13 0.9 0.1
		// 1.24318e-11 0.8 0.2
		// 6.06963e-11 0.7 0.3
		// 1.389e-10 0.6 0.4
		// 2.54369e-10 0.5 0.5
		// 4.26134e-10 0.4 0.6
		// 7.11438e-10 0.3 0.7
		// 1.29132e-09 0.2 0.8
		// 2.86691e-09 0.1 0.9
		// 5.50187e-09 0.05 0.85
		// 2.14721e-08 0.01 0.99

		// 1_2_3 (recompute the data results on 2013/12/12 night by Wei)
		// uniformPruningThreshold percentageToKeep percentageToThrow
		// 0.0 1.0 0.0
		// 2.26669e-13 0.9 0.1
		// 1.34474e-11 0.8 0.2
		// 4.98971e-11 0.7 0.3
		// 1.18219e-10 0.6 0.4
		// 2.0546e-10 0.5 0.5
		// 3.62948e-10 0.4 0.6
		// 6.50328e-10 0.3 0.7
		// 1.20113e-09 0.2 0.8
		// 2.50253e-09 0.1 0.9
		// 4.7152e-09 0.05 0.85
		// 1.71372e-08 0.01 0.99

		// 2D quadTree threshold (computed from the WHOLE thing)
		// 0.0 1.0 0.0
		// 1.93148e-13 0.9 0.1
		// 9.98748e-12 0.8 0.2
		// 4.2652e-11 0.7 0.3
		// 9.81033e-11 0.6 0.4
		// 1.93265e-10 0.5 0.5
		// 3.29176e-10 0.4 0.6
		// 5.78605e-10 0.3 0.7
		// 1.0697e-09 0.2 0.8
		// 2.4398e-09 0.1 0.9
		// 4.8276e-09 0.05 0.85
		// 1.94497e-08 0.01 0.99

		// Updated by Wei on 2014/01/05 night at school (PART OF(50K) with correction version 2)
		// the combination of (relrank, list length)
		// 1_2_3
		// uniformPruningThreshold percentageToKeep percentageToThrow
		// 0.0 1.0 0.0
		// 1.47341e-14 0.9 0.1
		// 1.01177e-12 0.8 0.2
		// 4.26065e-12 0.7 0.3
		// 8.85895e-12 0.6 0.4
		// 1.64094e-11 0.5 0.5
		// 3.10841e-11 0.4 0.6
		// 5.95985e-11 0.3 0.7
		// 1.10067e-10 0.2 0.8
		// 2.71342e-10 0.1 0.9
		// 5.75438e-10 0.05 0.85
		// 1.97203e-09 0.01 0.99

		// Updated by Wei on 2014/01/05 night at school (COMPLETE(100K) with correction version 2)
		// the combination of (relrank, list length)
		// 1_2_3
		// uniformPruningThreshold percentageToKeep percentageToThrow
		// 0.0 1.0 0.0
		// 2.26939e-14 0.9 0.1
		// 1.4131e-12 0.8 0.2
		// 5.52557e-12 0.7 0.3
		// 1.2222e-11 0.6 0.4
		// 2.36388e-11 0.5 0.5
		// 4.23029e-11 0.4 0.6
		// 7.36196e-11 0.3 0.7
		// 1.35598e-10 0.2 0.8
		// 3.09099e-10 0.1 0.9
		// 6.0867e-10 0.05 0.85
		// 2.03584e-09 0.01 0.99

		// Updated by Wei on 2014/01/07 afternoon at school (directly get from the piece simulation (NOT_Include_ROW1_0_Unseen_Terms))
		// 6323944039 0.0 1.0 0.0
		// 5691562983 1.84256982201e-14 0.9 0.1
		// 5059155322 1.30077813106e-12 0.8 0.2
		// 4427320658 5.10649223067e-12 0.7 0.3
		// 3794656247 1.16049543815e-11 0.6 0.4
		// 3162705190 2.25936257147e-11 0.5 0.5
		// 2530034908 4.05720726901e-11 0.4 0.6
		// 1897461708 6.98850229491e-11 0.3 0.7
		// 1268930511 1.29490959858e-10 0.2 0.8
		// 633016339 2.91663055436e-10 0.1 0.9
		// 316199140 5.65890166917e-10 0.05 0.85
		// 63256515 2.02110267059e-09 0.01 0.99

		// Updated by Wei on 2014/01/08 afternoon at school (directly get from the piece simulation (Include_ROW1_0_Unseen_Terms))
		// 5806754167 2.00713552261e-14 0.9 0.1
		// 5161654824 1.4156052109e-12 0.8 0.2
		// 4516454949 5.50446528246e-12 0.7 0.3
		// 3871341703 1.21896770052e-11 0.6 0.4
		// 3230232598 2.3638815896e-11 0.5 0.5
		// 2583093351 4.22768464399e-11 0.4 0.6
		// 1935782629 7.346086005e-11 0.3 0.7
		// 1290611900 1.34353923164e-10 0.2 0.8
		// 645231751 3.09120504319e-10 0.1 0.9
		// 322597881 6.07903800068e-10 0.05 0.85
		// 64525785 1.97781979355e-09 0.01 0.99

		// Updated by Wei on 2014/01/12 night at school (using the official relrank developped by Wei today) (NOT COMPLETE 100K, but just 87K for ONLY the query terms)
		// 0.0 1.0 0.0
		// 2.97973e-11 0.9 0.1
		// 7.33018e-11 0.8 0.2
		// 1.3328e-10 0.7 0.3
		// 2.24084e-10 0.6 0.4
		// 3.52202e-10 0.5 0.5
		// 5.50629e-10 0.4 0.6
		// 8.70467e-10 0.3 0.7
		// 1.47127e-09 0.2 0.8
		// 3.02833e-09 0.1 0.9
		// 5.83379e-09 0.05 0.85
		// 2.1035e-08 0.01 0.99

		// Updated by Wei on 2014/01/16 morning at school (relrank with modified correction factor V2) (COMPLETE 100K)
		// 0.0 1.0 0.0
		// 2.11063e-13 0.9 0.1
		// 1.30724e-11 0.8 0.2
		// 5.1855e-11 0.7 0.3
		// 1.17322e-10 0.6 0.4
		// 2.28332e-10 0.5 0.5
		// 4.0557e-10 0.4 0.6
		// 7.0351e-10 0.3 0.7
		// 1.30241e-09 0.2 0.8
		// 2.9381e-09 0.1 0.9
		// 5.73799e-09 0.05 0.85
		// 1.93189e-08 0.01 0.99

		// Updated by Wei on 2014/01/17 night at school (piecewise approach implemented in c)
		// times 10, they are almost the same Granularity and that is very good.
		// 0.0 1.0 0.0
		// 2.26939e-14 0.9 0.1
		// 1.41364e-12 0.8 0.2
		// 5.5256e-12 0.7 0.3
		// 1.2222e-11 0.6 0.4
		// 2.36388e-11 0.5 0.5
		// 4.23029e-11 0.4 0.6
		// 7.36196e-11 0.3 0.7
		// 1.35598e-10 0.2 0.8
		// 3.09099e-10 0.1 0.9
		// 6.0867e-10 0.05 0.85
		// 2.03584e-09 0.01 0.99

		// Updated by Wei on 2014/01/19 night at school (relrank) (COMPLETE 100K) with the stepGap being 0.3 (The results are NOT as good as the stepGap being 0.5)
		// 0.0 1.0 0.0
		// 4.15185e-11 0.9 0.1
		// 1.03375e-10 0.8 0.2
		// 1.56505e-10 0.7 0.3
		// 1.56505e-10 0.6 0.4
		// 1.89278e-10 0.5 0.5
		// 3.45015e-10 0.4 0.6
		// 6.03753e-10 0.3 0.7
		// 1.13191e-09 0.2 0.8
		// 2.58466e-09 0.1 0.9
		// 5.01006e-09 0.05 0.85
		// 1.92222e-08 0.01 0.99

		// Updated by Wei on 2014/01/13 night at school (relrank) (COMPLETE 100K) (CURRENTLY THE BEST METHOD) with the stepGap being 0.5
		// 0.0 1.0 0.0
		// 1.9026e-13 0.9 abandoned
		// 1.20703e-11 0.8 abandoned
		// 4.72879e-11 0.7 abandoned
		// 1.07011e-10 0.6 abandoned
		// 1.86066e-10 0.5 in use
		// 3.39984e-10 0.4 abandoned
		// 5.93962e-10 0.3 in use
		// 1.11172e-09 0.2 in use
		// 1.56194e-09 0.15 in use
		// 2.49815e-09 0.1 in use
		// 4.75225e-09 0.05 in use
		// 7.38717e-09 0.03 in use
		// 1.74678e-08 0.01 in use
		universal_threshold_socre_of_posting_ = 0.0;
		cout << "universal_threshold_socre_of_posting_: " << universal_threshold_socre_of_posting_ << endl;
	}
	else{
		cout << "Not supported operation" << endl;
	}
	cout << "computation_method: " << computation_method << endl;

	if(index_use_precomputed_score_)
	{
		// need to add the boolen argument for the pruning project as well. Currently, No yet adding. 2012/07/26
		// the last second argument is the true or false switch for the debugFlag

		// current version. Updated by Wei 2013/02/18
		// the last argument should be changed to the threshold of the lower bound which the posting can be taken into consideration.

		// threshold do NOT need to be set in compiled time.
		// Let's do it dynamically.

		// This method has its limitation cause it depends on what external value has been stored in the external index.
		// threshold_socre_of_posting should be has the same meaning consistent with the external score
		return IntersectLists2ScoreExtractedFromExternal(NULL, 0, lists, num_lists, results, num_results, false); //new version using pre-computed scores by Wei.
	}
	else
	{
		return IntersectLists2ScoreComputeOnlineForSIGIR2014(NULL, 0, lists, num_lists, results, num_results, true, false,computation_method);
	}
}

int LocalQueryProcessor::MergeListsEspeciallyForPruningProject2(ListData** lists, int num_lists, Result_Wei_2012* results, int num_results){
	int computation_method = Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kUniversalComputationMethod));


	if (computation_method == 0){
		// 0.0 1.0
		// 0.399625 0.9 abandoned
		// 0.640738 0.8 abandoned
		// 0.851091 0.7 abandoned
		// 1.11768 0.6 abandoned
		// 1.4121 0.5 in use
		// 1.73521 0.4 abandoned
		// 2.1482 0.3 in use
		// 2.71155 0.2 in use
		// 3.1639 0.15 in use
		// 3.77101 0.1 in use
		// 5.04156 0.05 in use
		// 6.20388 0.03 in use
		// 8.15333 0.01 in use
		universal_threshold_socre_of_posting_ = 0.0;
		cout << "universal_threshold_socre_of_posting_(partialBM25 score): " << universal_threshold_socre_of_posting_ << endl;
	}
	else if(computation_method == 2){
		// output the thresholds info line for this TCP method
		indexPositionOfCorrespondingThresholdToUse_ = Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kIndexPositionOfCorrespondingThresholdToUse));

		// The following logic will output the following sample format line:
		// threshold_scores_for_each_term_based_on_partialBM25_: value1 value2 value3 value4 \n
		cout << "threshold_scores_for_each_term_based_on_partialBM25_:";
		for(int tempCounter = 0; tempCounter < queryTermPostionIndexPairs_.size(); tempCounter++){
			  // output the following sample format
			  cout << term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[tempCounter].first ][indexPositionOfCorrespondingThresholdToUse_] << " ";
		}
		cout << endl;
	}
	else if(computation_method == 4){
		// output the thresholds info line for this TCP QV method
		indexPositionOfCorrespondingThresholdToUse_ = Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kIndexPositionOfCorrespondingThresholdToUse));

		// The following logic will output the following sample format line:
		// threshold_scores_for_each_term_based_on_partialBM25_: value1 value2 value3 value4 \n
		cout << "threshold_scores_for_each_term_based_on_partialBM25_:";
		for(int tempCounter = 0; tempCounter < queryTermPostionIndexPairs_.size(); tempCounter++){
			  // output the following sample format
			  cout << term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[tempCounter].first ][indexPositionOfCorrespondingThresholdToUse_] << " ";
		}
		cout << endl;
	}
	else if (computation_method == 7){
		// The following thresholds got from the AND semantics, best known methods
		// Updated by Wei on 2014/01/13 night at school (relrank) (COMPLETE 100K) (CURRENTLY THE BEST METHOD) with the stepGap being 0.5
		// 0.0 1.0 0.0
		// 1.9026e-13 0.9 0.1
		// 1.20703e-11 0.8 0.2
		// 4.72879e-11 0.7 0.3
		// 1.07011e-10 0.6 0.4
		// 1.86066e-10 0.5 0.5
		// 3.39984e-10 0.4 0.6
		// 5.93962e-10 0.3 0.7
		// 1.11172e-09 0.2 0.8
		// 2.49815e-09 0.1 0.9
		// 4.75225e-09 0.05 0.85
		// 1.74678e-08 0.01 0.99

		// Updated by Wei on 2014/01/21 night at school (relrank) (COMPLETE 100K) (CURRENTLY THE BEST METHOD) with the stepGap being 0.5
		// Note: with more subdivided % kept
		// 0.0 1.0 0.0
		// 1.9026e-13 0.9 abandoned
		// 1.20703e-11 0.8 abandoned
		// 4.72879e-11 0.7 abandoned
		// 1.07011e-10 0.6 abandoned
		// 1.86066e-10 0.5 in use
		// 3.39984e-10 0.4 abandoned
		// 5.93962e-10 0.3 in use
		// 1.11172e-09 0.2 in use
		// 1.56194e-09 0.15 in use
		// 2.49815e-09 0.1 in use
		// 4.75225e-09 0.05 in use
		// 7.38717e-09 0.03 in use
		// 1.74678e-08 0.01 in use



		universal_threshold_socre_of_posting_ = 1.74678e-08;
		cout << "universal_threshold_socre_of_posting_: " << universal_threshold_socre_of_posting_ << endl;
	}
	else{
		cout << "Not supported operation" << endl;
		cout << "Exiting." << endl;
		exit(1);
	}

	cout << "computation_method:" << computation_method << endl;

	// Manually for testing purposes.
	// Currently, for the OR semantics. I only use the online computation version, no function has been supported for the offline, score direct extracting.
	// Updated by 2012/09/12
	index_use_precomputed_score_ = false;

	if(index_use_precomputed_score_)
	{
		// need to add the boolen argument for the pruning project as well. Currently, No yet adding. 2012/08/28
		return MergeLists2ScoreExtractedFromExternal(lists, num_lists, results, num_results, false, 0); //new version using pre-computed scores by Wei.
	}
	else
	{
		return MergeLists2ScoreComputeOnlineForSIGIR2014(lists, num_lists, results, num_results, true, computation_method); //original version by Roman.

		// original version
		// the last argument is true for pruning project.
		// return MergeLists2ScoreComputeOnline(lists, num_lists, results, num_results, true, threshold_socre_of_posting,computation_method); //original version by Roman.
	}
}

int LocalQueryProcessor::ComputeBM25Score(ListData** lists, int num_lists, Result* results, int num_results, string term) {
  return ComputeBM25Score(NULL, 0, lists, num_lists, results, num_results, term);
}

// Returns the total number of document results found in the intersection.
// Note that there is not a guaranteed order of same scoring docIDs.
int LocalQueryProcessor::ComputeBM25Score(ListData** merge_lists, int num_merge_lists, ListData** lists, int num_lists, Result* results, int num_results, string term) {
  // We have a choice of whether to use a heap (push() / pop() an array) or just search through an array to replace low scoring results
  // and finally sorting it before returning the top-k results in sorted order.
  // For k = 10 results, an array performs only slightly better than a heap. As k increases above 10, heap should be faster.
  // In the general case, a heap should be used (unless k is less than 10), so this option should be 'false'.


  //feature generation for the machine learning project.
  string outputIDFScoreFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kIDFOutputFilePath));
  ofstream outputIDFScoreFile( outputIDFScoreFileName.c_str(), ios::out | ios::app );
  outputIDFScoreFile << term << " ";

  const bool kUseArrayInsteadOfHeap = false;
  int total_num_results = 0;

  // For the array instead of heap top-k technique.
  float curr_min_doc_score;
  Result* min_scoring_result = NULL;

  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
  const float kBm25K1 =  2.0;  // k1
  const float kBm25B = 0.75;   // b

  // We can precompute a few of the BM25 values here.
  const float kBm25NumeratorMul = kBm25K1 + 1;
  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

  // BM25 components.
  float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection.
  int doc_len;
  uint32_t f_d_t;

  uint32_t did = 0;
  uint32_t d;
  int i;  // Index for various loops.

  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
  float idf_t[num_lists];  // Using a variable length array here.
  int num_docs_t;
  for (i = 0; i < num_lists; ++i) {
    num_docs_t = lists[i]->num_docs_complete_list();
    outputIDFScoreFile << num_docs_t << " ";
    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
  }

  // Necessary for the merge lists.
  // TODO: Can also try the heap based method here. Can select between heap and array method based on 'num_merge_lists'.
  uint32_t min_doc_id;

  while (did < ListData::kNoMoreDocs) {
    if (merge_lists != NULL) { // For the lists which we are merging.
      // This will select the lowest docID (ignoring duplicates among the merge lists and any docIDs we have skipped past through AND mode operation).
      min_doc_id = ListData::kNoMoreDocs;
      for (i = 0; i < num_merge_lists; ++i) {
        if ((d = merge_lists[i]->NextGEQ(did)) < min_doc_id) {
          min_doc_id = d;
        }
      }

      assert(min_doc_id >= did);

      did = min_doc_id;
      i = 0;
    } else {
      // Get next element from shortest list.
      did = lists[0]->NextGEQ(did);
      i = 1;
    }

    if (did == ListData::kNoMoreDocs)
      break;

    d = did;

    // Try to find entries with same docID in other lists.
    for (; (i < num_lists) && ((d = lists[i]->NextGEQ(did)) == did); ++i) {
      continue;
    }

    if (d > did) {
      // Not in intersection.
      did = d;
    } else {
      assert(d == did);

      // Compute BM25 score from frequencies.



      bm25_sum = 0;
      for (i = 0; i < num_lists; ++i) {
        f_d_t = lists[i]->GetFreq();
        doc_len = index_reader_.document_map().GetDocumentLength(did);

        // Wei, here, it is time for me to do analysis.
        //cout << "did:" << did << endl;
        //cout << "idf_t[0]:" << idf_t[0] << endl;
        //cout << "f_d_t:" << f_d_t << endl;
        //cout << "BM25 For this term Score:" << idf_t[i] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len) << endl;
        //cout << endl;
        //outputIDFScoreFile << idf_t[i] << " ";
        bm25_sum += idf_t[i] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);

      }

      if (kUseArrayInsteadOfHeap) {
        // Use an array to maintain the top-k documents.
        if (total_num_results < num_results) {
          results[total_num_results] = make_pair(bm25_sum, did);
          if (min_scoring_result == NULL || bm25_sum < min_scoring_result->first)
            min_scoring_result = results + total_num_results;
        } else {
          if (bm25_sum > min_scoring_result->first) {
            // Replace the min scoring result with the current (higher scoring) result.
            min_scoring_result->first = bm25_sum;
            min_scoring_result->second = did;

            // Find the new min scoring document.
            curr_min_doc_score = numeric_limits<float>::max();
            for (i = 0; i < num_results; ++i) {
              if (results[i].first < curr_min_doc_score) {
                curr_min_doc_score = results[i].first;
                min_scoring_result = results + i;
              }
            }
          }
        }
      } else {
        // Use a heap to maintain the top-k documents. This has to be a min heap,
        // where the lowest scoring document is on top, so that we can easily pop it,
        // and push a higher scoring document if need be.
        if (total_num_results < num_results) {
          // We insert a document if we don't have k documents yet.
          results[total_num_results] = make_pair(bm25_sum, did);
          push_heap(results, results + total_num_results + 1, ResultCompare());
        } else {
          if (bm25_sum > results->first) {
            // We insert a document only if it's score is greater than the minimum scoring document in the heap.
            pop_heap(results, results + num_results, ResultCompare());
            results[num_results - 1].first = bm25_sum;
            results[num_results - 1].second = did;
            push_heap(results, results + num_results, ResultCompare());
          }
        }
      }

      ++total_num_results;
      ++did;  // Search for next docID.
    }
  }

  // Sort top-k results in descending order by document score.
  sort(results, results + min(num_results, total_num_results), ResultCompare());

  outputIDFScoreFile << endl;
  outputIDFScoreFile.close();

  return total_num_results;
}

int LocalQueryProcessor::UpdateExternalPreComputedScores(LexiconData** query_term_data, ListData** lists, string query_term, uint32_t doc_id, float new_score){
	  //cout << "Notices:" << endl;
	  //cout << "Updated by Wei 2012/08/03 evening" << endl;
	  //cout << "The logic of this function is the following:" << endl;
	  //cout << "(1)Update the score and write it into disk." << endl;

	  lists[0]->SaveScoreIntoExternalIndexGivenDocID(doc_id,new_score, false);

	  return 0;
}

// Returns the total number of document results found in the intersection.
// Note that there is not a guaranteed order of same scoring docIDs.
// This function has been modified by Wei for storing bm25 scores into index purposes.
int LocalQueryProcessor::IntersectListsForModifyingPreComputedScores(LexiconData** query_term_data, ListData** merge_lists, int num_merge_lists, ListData** lists, int num_lists, bool scoreModificationFlag, string mode,POSTING_RESULT* posting_results){
	  //cout << "Notices:" << endl;
	  //cout << "Updated by Wei 2012/08/09 afternoon" << endl;
	  cout << "The logic is the following:" << endl;
	  cout << "(1)The scores are directly got from external index(No online computing)." << endl;
	  cout << "(2)Acting in the Interactive mode or in the Programmatic mode." << endl;

	  // Show the queryTermID to the screen.
	  for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
          string queryTermInStringFormat = "";
          string posting_result_output_string = "";
		  for(int tempCounter2 = 0; tempCounter2 < query_term_data[tempCounter]->term_len(); tempCounter2++){
			  queryTermInStringFormat += query_term_data[tempCounter]->term()[tempCounter2];
		  }
		  cout << "queryTermInStringFormat:" << queryTermInStringFormat << " queryTermIndex(ID):" << tempCounter << endl;
	  }


	  string outputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kNewScoreIntermediateFileForPruning));
	  ofstream outputFileHandler(outputFileName.c_str());

	  int total_num_results = 0;
	  uint32_t did = 0;
	  uint32_t d;
	  int doc_index = 0;
	  int posting_index = 0;
	  int total_num_posting = 0;
	  int i;  // Index for various loops.

	  // BM25 components.
	  float old_bm25_sum = 0;  // The old BM25 sum for the current document we're processing in the intersection.
	  float new_bm25_sum = 0;  // The new BM25 sum for the current document we're processing in the intersection.
	  float input_external_scores[num_lists];
	  float output_external_scores[num_lists];




	  while (did < ListData::kNoMoreDocs) {
		  for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
			  input_external_scores[tempCounter] = 0.0;
		  }

		// Get next element from shortest list.
	    did = lists[0]->NextGEQRomanRead(did,input_external_scores[0], false);
	    i = 1;

	    if (did == ListData::kNoMoreDocs)
	      break;

	    d = did;

	    // Try to find entries with same docID in other lists.
	    for (; (i < num_lists) && ((d = lists[i]->NextGEQRomanRead(did,input_external_scores[i], false) ) == did); ++i) {
	      continue;
	    }

	    if (d > did)
	    {
	      // Not in intersection.
	      did = d;
	    }
	    else
	    {
	    	assert(d == did);

	        old_bm25_sum = 0;

		    for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
		    	old_bm25_sum += input_external_scores[tempCounter];
		    }
		    new_bm25_sum = old_bm25_sum;

	        if(scoreModificationFlag){
				  if (mode == "Interactive"){
		        	  for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
						  output_external_scores[tempCounter] = -1;
					  }

		        	  for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
			              cout << "--->New Score for the posting(";
						  for(int tempCounter2 = 0; tempCounter2 < query_term_data[tempCounter]->term_len(); tempCounter2++){
							  cout << query_term_data[tempCounter]->term()[tempCounter2];
						  }
			              cout << "," << did << ") from " << input_external_scores[tempCounter] << " to:";
						  string tempInputNewScoreLine = "";
						  getline(cin,tempInputNewScoreLine);
						  if(tempInputNewScoreLine == ""){
							  cout << "(The score is NOT changed)" << endl;
							  output_external_scores[tempCounter] = input_external_scores[tempCounter];
						  }
						  else{
							  // first, write the corresponding query term.
							  for(int tempCounter2 = 0; tempCounter2 < query_term_data[tempCounter]->term_len(); tempCounter2++){
								  outputFileHandler << query_term_data[tempCounter]->term()[tempCounter2];
							  }
							  outputFileHandler << " ";

							  // write the corresponding did and new score.
							  float new_score = atof(tempInputNewScoreLine.c_str());
							  output_external_scores[tempCounter] = new_score;
							  outputFileHandler << did << " " << output_external_scores[tempCounter] << endl;
						  }
		              }

		        	  new_bm25_sum = 0;
		        	  for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
		        		  new_bm25_sum += output_external_scores[tempCounter];
		        	  }
				  }
				  else if(mode == "Programmatic"){
		        	  // updated by Wei, 2012/08/09.
		        	  for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
			              string queryTermInStringFormat = "";
			              string posting_result_output_string = "";
						  for(int tempCounter2 = 0; tempCounter2 < query_term_data[tempCounter]->term_len(); tempCounter2++){
							  queryTermInStringFormat += query_term_data[tempCounter]->term()[tempCounter2];
						  }
			              posting_result_output_string = queryTermInStringFormat + " " + boost::lexical_cast<string>( did ) + " " + boost::lexical_cast<string>( input_external_scores[tempCounter] );
			              //debug purpose
			              //cout << posting_index << " " << tempCounter << " " << posting_result_output_string << endl;

			              posting_results[posting_index].queryTerm = queryTermInStringFormat.c_str();
			              posting_results[posting_index].queryTermLength = queryTermInStringFormat.length();
			              posting_results[posting_index].queryTermID = tempCounter;
			              posting_results[posting_index].docId = did;
			              posting_results[posting_index].score = input_external_scores[tempCounter];

			              posting_index += 1;
		              }
		        	  //debug purpose
		        	  //cout << "posting_results_in_string_.size():" << posting_results_in_string_.size() << endl;
				  }
	        }
	        //debug purpose
	        //cout << "doc_index: " << doc_index << " did: " << did << " old_bm25_sum:" << old_bm25_sum << " new_bm25_sum:" << new_bm25_sum << endl;
	        //cout << endl;

		    doc_index += 1;
		    ++total_num_results;
		    ++did;  // Search for next docID.
	    }
	  }
	  outputFileHandler.close();

	  // in order to return the # of postings, I have increased the variable posting_index by 2.
	  total_num_posting = posting_index;
	  return total_num_posting;
}

// Returns the total number of document results found in the intersection.
// Note that there is not a guaranteed order of same scoring docIDs.
// This function has been modified by Wei for storing bm25 scores into index purposes.
int LocalQueryProcessor::IntersectLists2ScoreExtractedFromExternal(ListData** merge_lists, int num_merge_lists, ListData** lists, int num_lists, Result_Wei_2012* results, int num_results, bool debugFlag) {
  // Notices:
  // Updated by Wei 2013/09/10 afternoon
  // Remember to LOAD/ATTACH the correct external index, sometimes the system just do NOT load the external index and do NOT have an error message, leaving you guessing
  // Batch query processing use this function, but currently there are bugs(really has bugs here????)
  // The values are directly got from external index(No online computing)

  // for debug
  cout << "LocalQueryProcessor::IntersectLists2ScoreExtractedFromExternal(...) is called." << endl;

  int total_num_results = 0;
  uint32_t did = 0;
  uint32_t d;
  int curr_posting_num = 0; //Wei added to be corresponding with the did. Let's try, 2012/07/05
  int i;  // Index for various loops.

  // BM25 components.
  float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection.
  SCORES_SET scores_set; //scores_set for the specifc document.

  float external_scores[num_lists];

  while (did < ListData::kNoMoreDocs) {
	  for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
		  external_scores[tempCounter] = 0.0;
	  }

	// Get next element from shortest list.
    did = lists[0]->NextGEQRomanRead(did,external_scores[0], false);
    i = 1;

    if (did == ListData::kNoMoreDocs)
      break;

    d = did;

    // Try to find entries with same docID in other lists.
    for (; (i < num_lists) && ((d = lists[i]->NextGEQRomanRead(did,external_scores[i], false) ) == did); ++i) {
      continue;
    }

    if (d > did)
    {
      // Not in intersection.
      did = d;
    }
    else
    {
    	// This docID is in intersection
    	assert(d == did);

    	// initialization of the values
        bm25_sum = 0;

        scores_set.totalScore = 0.0;

        // This may not be updated reasonably well because ALL the scores are from the external score index
    	scores_set.doc_length = 0;

	  scores_set.postingThreeFactorProbabilities0 = 0.0;
	  scores_set.postingThreeFactorProbabilities1 = 0.0;
	  scores_set.postingThreeFactorProbabilities2 = 0.0;
	  scores_set.postingThreeFactorProbabilities3 = 0.0;
	  scores_set.postingThreeFactorProbabilities4 = 0.0;
	  scores_set.postingThreeFactorProbabilities5 = 0.0;
	  scores_set.postingThreeFactorProbabilities6 = 0.0;
	  scores_set.postingThreeFactorProbabilities7 = 0.0;
	  scores_set.postingThreeFactorProbabilities8 = 0.0;
	  scores_set.postingThreeFactorProbabilities9 = 0.0;

	  scores_set.posting0ScoreComponentPart1 = 0.0;
	  scores_set.posting1ScoreComponentPart1 = 0.0;
	  scores_set.posting2ScoreComponentPart1 = 0.0;
	  scores_set.posting3ScoreComponentPart1 = 0.0;
	  scores_set.posting4ScoreComponentPart1 = 0.0;
	  scores_set.posting5ScoreComponentPart1 = 0.0;
	  scores_set.posting6ScoreComponentPart1 = 0.0;
	  scores_set.posting7ScoreComponentPart1 = 0.0;
	  scores_set.posting8ScoreComponentPart1 = 0.0;
	  scores_set.posting9ScoreComponentPart1 = 0.0;

	  scores_set.posting0ScoreComponentPart2 = 0.0;
	  scores_set.posting1ScoreComponentPart2 = 0.0;
	  scores_set.posting2ScoreComponentPart2 = 0.0;
	  scores_set.posting3ScoreComponentPart2 = 0.0;
	  scores_set.posting4ScoreComponentPart2 = 0.0;
	  scores_set.posting5ScoreComponentPart2 = 0.0;
	  scores_set.posting6ScoreComponentPart2 = 0.0;
	  scores_set.posting7ScoreComponentPart2 = 0.0;
	  scores_set.posting8ScoreComponentPart2 = 0.0;
	  scores_set.posting9ScoreComponentPart2 = 0.0;

        scores_set.postingScore0 = 0.0;
        scores_set.postingScore1 = 0.0;
        scores_set.postingScore2 = 0.0;
        scores_set.postingScore3 = 0.0;
        scores_set.postingScore4 = 0.0;
        scores_set.postingScore5 = 0.0;
        scores_set.postingScore6 = 0.0;
        scores_set.postingScore7 = 0.0;
        scores_set.postingScore8 = 0.0;
        scores_set.postingScore9 = 0.0;

        scores_set.lengthOfTheInvertedList0 = 0;
        scores_set.lengthOfTheInvertedList1 = 0;
        scores_set.lengthOfTheInvertedList2 = 0;
        scores_set.lengthOfTheInvertedList3 = 0;
        scores_set.lengthOfTheInvertedList4 = 0;
        scores_set.lengthOfTheInvertedList5 = 0;
        scores_set.lengthOfTheInvertedList6 = 0;
        scores_set.lengthOfTheInvertedList7 = 0;
        scores_set.lengthOfTheInvertedList8 = 0;
        scores_set.lengthOfTheInvertedList9 = 0;

        scores_set.postingTermFrequency0 = 0;
        scores_set.postingTermFrequency1 = 0;
        scores_set.postingTermFrequency2 = 0;
        scores_set.postingTermFrequency3 = 0;
        scores_set.postingTermFrequency4 = 0;
        scores_set.postingTermFrequency5 = 0;
        scores_set.postingTermFrequency6 = 0;
        scores_set.postingTermFrequency7 = 0;
        scores_set.postingTermFrequency8 = 0;
        scores_set.postingTermFrequency9 = 0;

        // values assignment process
        // new version with threshold checking
        bool postingThresholdNotQualifiedFlag = false;
	    for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
	    	bm25_sum += external_scores[tempCounter];
	    	if(tempCounter == 0){
	    		scores_set.postingScore0 = external_scores[tempCounter];

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore0 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	    	}
	    	else if(tempCounter == 1){
	    		scores_set.postingScore1 = external_scores[tempCounter];

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore1 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	    	}
	    	else if(tempCounter == 2){
	    		scores_set.postingScore2 = external_scores[tempCounter];

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore2 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	    	}
	    	else if(tempCounter == 3){
	    		scores_set.postingScore3 = external_scores[tempCounter];

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore3 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	    	}
	    	else if(tempCounter == 4){
	    		scores_set.postingScore4 = external_scores[tempCounter];

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore4 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	    	}
	    	else if(tempCounter == 5){
	    		scores_set.postingScore5 = external_scores[tempCounter];

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore5 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	    	}
	    	else if(tempCounter == 6){
	    		scores_set.postingScore6 = external_scores[tempCounter];

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore6 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	    	}
	    	else if(tempCounter == 7){
	    		scores_set.postingScore7 = external_scores[tempCounter];

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore7 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	    	}
	    	else if(tempCounter == 8){
	    		scores_set.postingScore8 = external_scores[tempCounter];

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore8 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	    	}
	    	else if(tempCounter == 9){
	    		scores_set.postingScore9 = external_scores[tempCounter];

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore9 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	    	}
	    }
	    scores_set.totalScore = bm25_sum;

        if(debugFlag){
            cout << "curr_posting_num: " << curr_posting_num << " did: " << did << " score(computed offline):" << bm25_sum << " number of lists intersect:" << num_lists << endl;
        }

        if (postingThresholdNotQualifiedFlag){
        	// although I already have an document candidate here, but one or more of the postings are NOT qualified for the posting threshold.
        	// So, here, the injection into the priority heap operation can be ignored.
        	// In that case, there is no real logic here.
        	++did;  // Search for next docID.
        }
        else{
            // Use a heap to maintain the top-k documents. This has to be a min heap,
            // where the lowest scoring document is on top, so that we can easily pop it,
            // and push a higher scoring document if need be.
            if (total_num_results < num_results)
            {
              // We insert a document if we don't have k documents yet.

              // current version
              results[total_num_results] = make_pair(scores_set, did);
              // old version
              //results[total_num_results] = make_pair(bm25_sum, did);

              push_heap(results, results + total_num_results + 1, ResultCompare2());
            }
            else
            {
              if (scores_set.totalScore > results->first.totalScore)
              {
                // We insert a document only if it's score is greater than the minimum scoring document in the heap.
                pop_heap(results, results + num_results, ResultCompare2());
                results[num_results - 1].first = scores_set;
                results[num_results - 1].second = did;
                push_heap(results, results + num_results, ResultCompare2());
              }
            }
    	    ++total_num_results;
    	    ++did;  // Search for next docID.
        }
	    curr_posting_num += 1; // curr_posting_num add by 1 and prepare for the next positing.

    }
  }

  // Sort top-k results in descending order by document score.
  // standard sort function in c/c++
  sort(results, results + min(num_results, total_num_results), ResultCompare2());

  return total_num_results;
}

// Returns the total number of document results found in the intersection.
// Note that there is not a guaranteed order of same scoring docIDs.
// This function has been modified by Wei for storing bm25 scores into index purposes.
int LocalQueryProcessor::IntersectLists(ListData** merge_lists, int num_merge_lists, ListData** lists, int num_lists, Result* results, int num_results, bool debugFlag, int computeMode) {
  cout << "Notices:" << endl;
  cout << "Updated by Wei2012/07/06 afternoon" << endl;
  cout << "The scores are directly got from external index(No online computing)." << endl;
  cout << "The variable int computeMode is NOT used currently" << endl;

  int total_num_results = 0;
  uint32_t did = 0;
  uint32_t d;
  int curr_posting_num = 0; //Wei added to be corresponding with the did. Let's try, 2012/07/05
  int i;  // Index for various loops.

  // BM25 components.
  float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection.
  float external_scores[num_lists];

  while (did < ListData::kNoMoreDocs) {
	  for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
		  external_scores[tempCounter] = 0.0;
	  }

	// Get next element from shortest list.
    did = lists[0]->NextGEQRomanRead(did,external_scores[0], false);
    i = 1;

    if (did == ListData::kNoMoreDocs)
      break;

    d = did;

    // Try to find entries with same docID in other lists.
    for (; (i < num_lists) && ((d = lists[i]->NextGEQRomanRead(did,external_scores[i], false) ) == did); ++i) {
      continue;
    }

    if (d > did)
    {
      // Not in intersection.
      did = d;
    }
    else
    {
    	assert(d == did);

        bm25_sum = 0;

	    for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
	    	bm25_sum += external_scores[tempCounter];
	    }

        if(debugFlag){
            cout << "curr_posting_num: " << curr_posting_num << " did: " << did << " score(computed offline):" << bm25_sum << " number of lists intersect:" << num_lists << endl;
        }

        // Use a heap to maintain the top-k documents. This has to be a min heap,
        // where the lowest scoring document is on top, so that we can easily pop it,
        // and push a higher scoring document if need be.
        if (total_num_results < num_results)
        {
          // We insert a document if we don't have k documents yet.
          results[total_num_results] = make_pair(bm25_sum, did);
          push_heap(results, results + total_num_results + 1, ResultCompare());
        }
        else
        {
          if (bm25_sum > results->first)
          {
            // We insert a document only if it's score is greater than the minimum scoring document in the heap.
            pop_heap(results, results + num_results, ResultCompare());
            results[num_results - 1].first = bm25_sum;
            results[num_results - 1].second = did;
            push_heap(results, results + num_results, ResultCompare());
          }
        }



	    // curr_posting_num add by 1 and prepare for the next positing.
	    curr_posting_num += 1;


	    ++total_num_results;
	    ++did;  // Search for next docID.
    }
  }

  // Sort top-k results in descending order by document score.
  // standard sort function in c/c++
  sort(results, results + min(num_results, total_num_results), ResultCompare());

  return total_num_results;
}

void LocalQueryProcessor::LoadUpAuxFilesForSecondProbabilityFactor(){
	cout << "LoadUpAuxFilesForSecondProbabilityFactor() called." << endl;

    // step1: fill the map<int,float> query_length_probability_map_
    // key: query length
	// value: probability of being a K term query

    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kQueryLengthDistributionFileName));
    cout << "inputFileName: " << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

    // skip the only headline
	// e.g. queryLength probability(k term query)
    getline (inputfile,currentLine);

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string queryLengthInStringFormat;
		    string probablityOfBeingAKTermQueryInStringFormat;

		    iss >> queryLengthInStringFormat;
			iss >> probablityOfBeingAKTermQueryInStringFormat;

			int queryLength = atof(queryLengthInStringFormat.c_str());
			float probablityOfBeingAKTermQuery = atof(probablityOfBeingAKTermQueryInStringFormat.c_str());

			query_length_probability_map_[queryLength] = probablityOfBeingAKTermQuery;

		}
	}
	inputfile.close();

	// step2: fill the following dicts
	// map<string,float> trecID_With_Xdoc_Value_goldStandarded_map_;
	// map<string,float> trecID_With_Xdoc_Value_1D_map_;
	// map<string,float> trecID_With_Xdoc_Value_2D_map_;
	// map<string,float> trecID_With_Xdoc_Value_goodTurning_map_;
    // key: the trecID
    // value: Xdoc value

    inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kTrecIDWithXdocValuesFileName));
    cout << "inputFileName: " << inputFileName << endl;
    ifstream inputfile2(inputFileName.c_str());

    // skip the only headline
	// e.g. queryLength probability(k term query)
    getline (inputfile2,currentLine);

	while ( inputfile2.good() )
	{
		getline (inputfile2,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string trecIDInStringFormat;
			string docIDInStringFormat;
		    string XdocValueGoldStandardInStringFormat;
		    string XdocValue1DInStringFormat;
		    string XdocValue2DInStringFormat;
		    string XdocValueGoodTurningInStringFormat;

		    iss >> trecIDInStringFormat;
		    iss >> docIDInStringFormat;
		    iss >> XdocValueGoldStandardInStringFormat;
		    iss >> XdocValue1DInStringFormat;
		    iss >> XdocValue2DInStringFormat;
		    iss >> XdocValueGoodTurningInStringFormat;

		    float XdocValueGoldStandard = atof(XdocValueGoldStandardInStringFormat.c_str());
		    float XdocValue1D = atof(XdocValue1DInStringFormat.c_str());
		    float XdocValue2D = atof(XdocValue2DInStringFormat.c_str());
		    float XdocValueGoodTurning = atof(XdocValueGoodTurningInStringFormat.c_str());

		    docID_With_Xdoc_Value_goldStandarded_map_[docIDInStringFormat] = XdocValueGoldStandard;
		    docID_With_Xdoc_Value_1D_map_[docIDInStringFormat] = XdocValue1D;
		    docID_With_Xdoc_Value_2D_map_[docIDInStringFormat] = XdocValue2D;
		    docID_With_Xdoc_Value_goodTurning_map_[docIDInStringFormat] = XdocValueGoodTurning;

		}
	}
	inputfile2.close();

	cout << "query_length_probability_map_[1]:" << query_length_probability_map_[1] << endl;

    if(query_length_probability_map_.size() == 0 or docID_With_Xdoc_Value_goldStandarded_map_.size() == 0 or docID_With_Xdoc_Value_1D_map_.size() == 0 or docID_With_Xdoc_Value_2D_map_.size() == 0 or docID_With_Xdoc_Value_goodTurning_map_.size() == 0){
	    GetDefaultLogger().Log("Load Up Aux Files For 2ed Probability Factor Elements NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(query_length_probability_map_.size()) + " <queryLength,probabilityOfBeingAKTermQuery> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(docID_With_Xdoc_Value_goldStandarded_map_.size()) + " <docID,XdocValueGoldStandarded> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(docID_With_Xdoc_Value_1D_map_.size()) + " <docID,XdocValue1D> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(docID_With_Xdoc_Value_2D_map_.size()) + " <docID,XdocValue2D> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(docID_With_Xdoc_Value_goodTurning_map_.size()) + " <docID,XdocValueGoodTurning> pairs have been loaded.", false);
    }
}

void LocalQueryProcessor::LoadUpTermPieceInfoForRelRank(){
	cout << "LayeredIndexGenerator::LoadUpTermPieceInfoForRelRank() called." << endl;
	// master plan
	// prepare two kinds of files
	// one is for the random posting set
	// another is for the query term set
	// both are just NOT large and easy to control

    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPartialBM25ScoreRangesAndProbabilitiesFileNameVersion2Aux3));
    cout << "inputFileName: " << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string currentTerm;
		    string currentTermLengthOfTheListInStringFormat;	// currently No USE
		    string currentTermBelongingClassLabalInStringFormat;	// currently No USE
		    string numOfPieceInStringFormat;

		    iss >> currentTerm;
			iss >> currentTermLengthOfTheListInStringFormat;
			iss >> currentTermBelongingClassLabalInStringFormat;
			iss >> numOfPieceInStringFormat;

			int currentTermBelongingClassLabalInIntFormat = atoi( currentTermBelongingClassLabalInStringFormat.c_str() );

			term_with_their_belonging_class_map_[currentTerm] = currentTermBelongingClassLabalInIntFormat;

			const unsigned int NUM_OF_PIECE = atoi(numOfPieceInStringFormat.c_str());
			for(unsigned int tempCounter = 0; tempCounter < NUM_OF_PIECE; tempCounter++){
				string currentPieceIDInStringFormat;
				string numOfPostingsInCurrentPieceInStringFormat;
				iss >> currentPieceIDInStringFormat;
				iss >> numOfPostingsInCurrentPieceInStringFormat;
				int currentPieceIDInIntFormat = atoi(currentPieceIDInStringFormat.c_str());
				long numOfPostingsInCurrentPieceInIntFormat = atol(numOfPostingsInCurrentPieceInStringFormat.c_str());


				if (term_with_piece_info_map_[currentTerm].count(currentPieceIDInIntFormat) > 0){
					cout << "Duplicated pieceID detected. Critical Error. Mark4" << endl;
					cout << "currentTerm: " << currentTerm << endl;
					cout << "numOfPieceInStringFormat: " << numOfPieceInStringFormat << endl;
					cout << "currentTermBelongingClassLabalInStringFormat: " << currentTermBelongingClassLabalInStringFormat << endl;
					exit(1);
				}
				else{
					term_with_piece_info_map_[currentTerm][currentPieceIDInIntFormat] = numOfPostingsInCurrentPieceInIntFormat;
				}

			}
		}
	}
	inputfile.close();

	// for debug ONLY
	cout << "term_with_piece_info_map_.size(): " << term_with_piece_info_map_.size() << endl;
	cout << "term_with_their_belonging_class_map_.size(): " << term_with_their_belonging_class_map_.size() << endl;
	cout << "term_with_their_belonging_class_map_['apple']: " << term_with_their_belonging_class_map_["apple"] << endl;
	cout << "term_with_piece_info_map_['apple'].size(): " << term_with_piece_info_map_["apple"].size() << endl;
	cout << "term_with_piece_info_map_['apple'][0]: " << term_with_piece_info_map_["apple"][0] << endl;
	cout << "term_with_piece_info_map_['apple'][1]: " << term_with_piece_info_map_["apple"][1] << endl;
	cout << "term_with_piece_info_map_['apple'][2]: " << term_with_piece_info_map_["apple"][2] << endl;
	cout << "term_with_piece_info_map_['apple'][3]: " << term_with_piece_info_map_["apple"][3] << endl;


    if(term_with_piece_info_map_.size() == 0){
	    GetDefaultLogger().Log("Load Up term with piece info map NOT Done--- Take Care", false);
	    GetDefaultLogger().Log("Load Up term with belonging class info map NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(term_with_piece_info_map_.size()) + " terms with their lists have been loaded.", false);
    }
}


void LocalQueryProcessor::LoadUpTheCombinationOfSecondANDThirdFactorProbabilityTable2D(){
	// Lack the list length info and the impact score ranges info for each record.
	cout << "LocalQueryProcessor::LoadUpTheCombinationOfSecondANDThirdFactorProbabilityTableVersion2() called." << endl;

	// probability file 1
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPartialBM25ScoreRangesAndProbabilitiesFileNameVersion2));
    cout << "inputFileName: " << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string keyInStringFormat;	// currently used
			string valueInFloatFormat; // currently used

		    iss >> keyInStringFormat;
		    iss >> valueInFloatFormat;

		    float currentClassProbabilityValueInFloatFormat = atof( valueInFloatFormat.c_str() );

		    // cout << "currentLine: " << currentLine << endl;
		    // cout << "keyInStringFormat: " << keyInStringFormat << endl;
		    // cout << "currentClassProbabilityValueInFloatFormat: " << currentClassProbabilityValueInFloatFormat << endl;

			if ( class_label_with_probability_of_2D_ranges_map_.count(keyInStringFormat) > 0){
				cout << "Duplicated class label added. Critical Error. Mark1" << endl;
				exit(1);
			}
			else{
				class_label_with_probability_of_2D_ranges_map_[keyInStringFormat] = currentClassProbabilityValueInFloatFormat;
			}
		}
	}
	cout << "class_label_with_probability_of_2D_ranges_map_['1_0']: " << class_label_with_probability_of_2D_ranges_map_["1_0"] << endl;
	cout << "class_label_with_probability_of_2D_ranges_map_['69_55']: " << class_label_with_probability_of_2D_ranges_map_["69_55"] << endl;
	cout << "class_label_with_probability_of_2D_ranges_map_.size(): " << class_label_with_probability_of_2D_ranges_map_.size() << endl;
	inputfile.close();

	// probability file 2
    inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPartialBM25ScoreRangesAndProbabilitiesFileNameVersion2Aux1));
    cout << "inputFileName: " << inputFileName << endl;
	ifstream inputfile2(inputFileName.c_str());

	while ( inputfile2.good() )
	{
		getline (inputfile2,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string keyInStringFormat;	// currently used
			string valueInFloatFormat; // currently used

		    iss >> keyInStringFormat;
		    iss >> valueInFloatFormat;


		    int currentClassLabelInIntFormat = atoi( keyInStringFormat.c_str() );
		    uint32_t currentClassListLengthInIntFormat = atoi( valueInFloatFormat.c_str() );

			if ( class_label_with_lower_bounds_of_list_length_map_.count(currentClassLabelInIntFormat) > 0){
				cout << "Duplicated class label added. Critical Error. Mark2" << endl;
				exit(1);
			}
			else{
				class_label_with_lower_bounds_of_list_length_map_[currentClassLabelInIntFormat] = currentClassListLengthInIntFormat;
			}
		}
	}
	inputfile2.close();
	cout << "class_label_with_lower_bounds_of_list_length_map_[0]: " << class_label_with_lower_bounds_of_list_length_map_[0] << endl;
	cout << "class_label_with_lower_bounds_of_list_length_map_[70]: " << class_label_with_lower_bounds_of_list_length_map_[70] << endl;
	cout << "class_label_with_lower_bounds_of_list_length_map_[1305]: " << class_label_with_lower_bounds_of_list_length_map_[1305] << endl;
	cout << "class_label_with_lower_bounds_of_list_length_map_[1306]: " << class_label_with_lower_bounds_of_list_length_map_[1306] << endl;

	// probability file 3
    inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPartialBM25ScoreRangesAndProbabilitiesFileNameVersion2Aux2));
    cout << "inputFileName: " << inputFileName << endl;
	ifstream inputfile3(inputFileName.c_str());

	while ( inputfile3.good() )
	{
		getline (inputfile3,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string keyInStringFormat;	// currently used
			string valueInFloatFormat; // currently used

		    iss >> keyInStringFormat;
		    iss >> valueInFloatFormat;


		    int currentClassLabelInIntFormat = atoi( keyInStringFormat.c_str() );
		    float currentClassImpactScoreInFloatFormat = atof( valueInFloatFormat.c_str() );

			if ( class_label_with_lower_bounds_of_impact_scores_map_.count(currentClassLabelInIntFormat) > 0){
				cout << "Duplicated class label added. Critical Error. Mark3" << endl;
				exit(1);
			}
			else{
				class_label_with_lower_bounds_of_impact_scores_map_[currentClassLabelInIntFormat] = currentClassImpactScoreInFloatFormat;
			}
		}
	}
	cout << "class_label_with_lower_bounds_of_impact_scores_map_[0]: " << class_label_with_lower_bounds_of_impact_scores_map_[0] << endl;
	cout << "class_label_with_lower_bounds_of_impact_scores_map_[1]: " << class_label_with_lower_bounds_of_impact_scores_map_[1] << endl;
	cout << "class_label_with_lower_bounds_of_impact_scores_map_[55]: " << class_label_with_lower_bounds_of_impact_scores_map_[55] << endl;
	inputfile3.close();

	// for debug ONLY
	cout << "class_label_with_probability_of_2D_ranges_map_.size(): " << class_label_with_probability_of_2D_ranges_map_.size() << endl;
	cout << "class_label_with_lower_bounds_of_impact_scores_map_.size(): " << class_label_with_lower_bounds_of_impact_scores_map_.size() << endl;
	cout << "class_label_with_lower_bounds_of_list_length_map_.size(): " << class_label_with_lower_bounds_of_list_length_map_.size() << endl;

    if(class_label_with_probability_of_2D_ranges_map_.size() == 0 or class_label_with_lower_bounds_of_impact_scores_map_.size() == 0 or class_label_with_lower_bounds_of_list_length_map_.size() == 0){
	    GetDefaultLogger().Log("Load Up partialBM25 probabilities/aux1/aux2 related maps NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(class_label_with_probability_of_2D_ranges_map_.size()) + " <classLabel,probability> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(class_label_with_lower_bounds_of_impact_scores_map_.size()) + " <classLabel,impactScores> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(class_label_with_lower_bounds_of_list_length_map_.size()) + " <classLabel,listLengths> pairs have been loaded.", false);
    }

    // debug
    // exit(1);
}



void LocalQueryProcessor::LoadUpTheCombinationOfSecondANDThirdFactorProbabilityTable1D(){
	cout << "LocalQueryProcessor::LoadUpTheCombinationOfSecondANDThirdFactorProbabilityTable() called." << endl;
	cout << "Get the smoothed version of the probability of the combination of the second and third factor." << endl;
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPartialBM25ScoreRangesAndProbabilitiesFileName));
    cout << "inputFileName:" << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	// skip the headline
	getline (inputfile,currentLine);

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string currentClassLabelInStringFormat;	// currently used
			string currentClassPartialBM25LOWERBoundInStringFormat; // currently used
			string currentClassPartialBM25AVERAGEBoundXAxisInStringFormat;	// currently NOT used
			string currentNumofTotalPostingsInStringFormat;	// currently NOT used
			string currentNumofTOP10PostingsInStringFormat;	// currently NOT used
			string currentPercentageYAxisInStringFormat;	// currently used

		    iss >> currentClassLabelInStringFormat;
		    iss >> currentClassPartialBM25LOWERBoundInStringFormat;
		    iss >> currentClassPartialBM25AVERAGEBoundXAxisInStringFormat;
		    iss >> currentNumofTotalPostingsInStringFormat;
		    iss >> currentNumofTOP10PostingsInStringFormat;
		    iss >> currentPercentageYAxisInStringFormat;

		    int currentClassLabelInIntFormat = atoi(currentClassLabelInStringFormat.c_str());
		    float currentClassPartialBM25LOWERBoundInFloatFormat = atof(currentClassPartialBM25LOWERBoundInStringFormat.c_str());
		    float currentPercentageYAxisInFloatFormat = atof(currentPercentageYAxisInStringFormat.c_str());

			if (class_label_with_lower_bounds_map_.count(currentClassLabelInIntFormat) > 0 || class_label_with_probability_map_.count(currentClassLabelInIntFormat) > 0){
				cout << "Duplicated class label added. Critical Error. Mark4" << endl;
				exit(1);
			}
			else{
				class_label_with_lower_bounds_map_[currentClassLabelInIntFormat] = currentClassPartialBM25LOWERBoundInFloatFormat;
				class_label_with_probability_map_[currentClassLabelInIntFormat] = currentPercentageYAxisInFloatFormat;
			}
		}
	}
	inputfile.close();

	// for debug ONLY
	cout << "class_label_with_lower_bounds_map_.size():" << class_label_with_lower_bounds_map_.size() << endl;
	cout << "class_label_with_probability_map_.size():" << class_label_with_probability_map_.size() << endl;

    if(class_label_with_lower_bounds_map_.size() == 0 || class_label_with_probability_map_.size() == 0){
	    GetDefaultLogger().Log("Load Up partialBM25 probabilities related map NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(class_label_with_lower_bounds_map_.size()) + " <classLabel,lowerBound> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(class_label_with_probability_map_.size()) + " <classLabel,probability> pairs have been loaded.", false);
    }
}

void LocalQueryProcessor::LoadUpAuxFilesForFirstProbabilityFactor(){
    cout << "LoadUpAuxFilesForFirstProbabilityFactor() called." << endl;
    // step1: fill the map<int,float> freq_first_factor_probability_map_
    // key: # of times this object appears
	// value: the probability that this term will occur in the next query

    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kFirstProbabilityFactorFileName));
    cout << "inputFileName:" << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

    // skip 4 headlines
    getline (inputfile,currentLine);
    getline (inputfile,currentLine);
    getline (inputfile,currentLine);
    getline (inputfile,currentLine);

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );
			string freqInStringFormat;
		    string placeHolder1;
		    string placeHolder2;
		    string placeHolder3;
		    string probabilityOfTheTermAppearedInTheNextQuerySlotInStringFormat;
		    string probablityOfTheTermAppearedInTheNextQueryInStringFormat;

		    iss >> freqInStringFormat;
			iss >> placeHolder1;
			iss >> placeHolder2;
			iss >> placeHolder3;
			iss >> probabilityOfTheTermAppearedInTheNextQuerySlotInStringFormat;
			iss >> probablityOfTheTermAppearedInTheNextQueryInStringFormat;

			int freq = atof(freqInStringFormat.c_str());
			// Updated by Wei 2013/08/31 afternoon. ONLY at this time, I will use the probabilityOfTheTermAppearedInTheNextQuerySlotInStringFormat
			// instead of using probablityOfTheTermAppearedInTheNextQueryInStringFormat to compute the 1st factor.
			float first_factor_probablity = atof(probabilityOfTheTermAppearedInTheNextQuerySlotInStringFormat.c_str());
			if (first_factor_probablity != 0.0){
				freq_first_factor_probability_map_[freq] = first_factor_probablity;
			}
		}
	}
	inputfile.close();

	// step2: fill the map<string,int> terms_with_corresponding_species_belonging_to_map_
    // key: the terms which have been seen in the training queries
    // value: which freq it belongs to

    inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kFreqCorrespondingTermsFileName));
    cout << "inputFileName:" << inputFileName << endl;
    ifstream inputfile2(inputFileName.c_str());

	while ( inputfile2.good() )
	{
		getline (inputfile2,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );
			string freqInStringFormat;
		    string numOfTermsAssociatedInStringFormat;
		    string currentTerm;
		    iss >> freqInStringFormat;
		    iss >> numOfTermsAssociatedInStringFormat;
		    int freq = atoi(freqInStringFormat.c_str());
		    int NUM_OF_TERMS_ASSOCIATED = atoi(numOfTermsAssociatedInStringFormat.c_str());
			if (NUM_OF_TERMS_ASSOCIATED != 0){
			    for(int tempCounter = 0; tempCounter < NUM_OF_TERMS_ASSOCIATED; tempCounter++ ){
					iss >> currentTerm;
					terms_with_corresponding_species_belonging_to_map_[currentTerm] = freq;
				}
			}

		}
	}
	inputfile2.close();

	cout << "terms_with_corresponding_species_belonging_to_map_['soalr']:" << terms_with_corresponding_species_belonging_to_map_["soalr"] << endl;

    if(freq_first_factor_probability_map_.size() == 0 or terms_with_corresponding_species_belonging_to_map_.size() == 0){
	    GetDefaultLogger().Log("Load Up Aux Files For 1st Probability Factor NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(freq_first_factor_probability_map_.size()) + " <freq,probability> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(terms_with_corresponding_species_belonging_to_map_.size()) + " <term freq appeared in the QL> pairs have been loaded.", false);
    }
}


void LocalQueryProcessor::LoadUpThreeFeatureValuesForMachineLearnedTraining(){
    cout << "LoadUpThreeFeatureValuesForMachineLearnedTraining() called." << endl;
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kThreeFeatureValuesForTrainingIn95KQueries));
    cout << "inputFileName:" << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());
	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );
		    string term;
		    string length_of_the_inverted_list_in_string_format;
		    string term_freq_in_collection_in_string_format;
		    string term_freq_in_queries_in_string_format;

			iss >> term;
			iss >> length_of_the_inverted_list_in_string_format;
			iss >> term_freq_in_collection_in_string_format;
			iss >> term_freq_in_queries_in_string_format;

			float length_of_the_inverted_list = atof(length_of_the_inverted_list_in_string_format.c_str());
			float term_freq_in_collection = atof(term_freq_in_collection_in_string_format.c_str());
			float term_freq_in_queries = atof(term_freq_in_queries_in_string_format.c_str());

			query_terms_length_of_the_inverted_index_map_[term] = length_of_the_inverted_list;
			query_terms_term_freq_in_collection_map_[term] = term_freq_in_collection;
			query_terms_term_freq_in_queries_map_[term] = term_freq_in_queries;
		}
	}
	inputfile.close();

    if(query_terms_length_of_the_inverted_index_map_.size() == 0 or query_terms_term_freq_in_collection_map_.size() == 0 or query_terms_term_freq_in_queries_map_.size() == 0){
	    GetDefaultLogger().Log("Load Up Three Feature Values For Machine Learned Training NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(query_terms_length_of_the_inverted_index_map_.size()) + " <term,length_of_the_inverted_list> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(query_terms_term_freq_in_collection_map_.size()) + " <term,term_freq_in_collection> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(query_terms_term_freq_in_queries_map_.size()) + " <term,term_freq_in_queries> pairs have been loaded.", false);
    }

}

float LocalQueryProcessor::ComputeThirdFactorProbability(int queryTermPostionIndexNum,float valueOfPartialBM25ScoreComponentPart1_IDF,float valueOfPartialBM25ScoreComponentPart2_TF,float valueOfPartialBM25Score, uint32_t f_d_t,int doc_len, float &third_factor_probability_value){
	  // new set of weights for the Logistic Regression model updated by Wei 2013/08/04 at school
	  // This set of weights are for the Model19
	  float intercept_weight_0 						= -5.729427121406548;
	  float partialBM25ScoreComponentPart1_IDF_weight_1    = 0.7311304467682848;
	  float partialBM25ScoreComponentPart2_TF_weight_2 	= 0.9032571917744138;
	  float partialBM25_weight_3 					= 0.0;
	  float length_of_the_inverted_index_weight_4   = 0.0;
	  float term_freq_in_doc_weight_5 				= 0.0;
	  float doc_words_weight_6 						= 0.0;
	  float term_freq_in_training_head95K_queries_weight_7 		= 0.0;
	  float term_freq_in_collection_weight_8 		= 0.0;
	  float posting_rank_in_doc_weight_9 			= 0.0;
	  float posting_rank_in_list_weight_10 			= 0.0;

	  /*
	  // new set of weights for the Logistic Regression model updated by Wei 2013/06/19
	  float intercept_weight_0 = -1.8732626417531228;
	  float partialBM25ScoreComponentPart1_IDF_1 = 0.0;
	  float partialBM25ScoreComponentPart2_TF = 0.0;
	  float partialBM25_weight_3 = 0.500839240298326;
	  float length_of_the_inverted_index_weight_4 = 2.0221253339977762E-7;
	  float term_freq_in_doc_weight_5 = 0.0027364835030978286;
	  float doc_words_weight_6 = -1.529538647814492E-5;
	  float term_freq_in_95Kqueries_weight_7 = -9.83669087608801E-5;
	  float term_freq_in_collection_weight_8 = -2.214225299989675E-9;
	  float posting_rank_in_doc_weight_9 = -5.493538914727568E-4;
	  float posting_rank_in_list_weight_10 = -3.599972929911481E-8;
	  */

	  string curr_term = queryTermPostionIndexPairs_[queryTermPostionIndexNum].first;
	  float valueOfCurrentPostingLengthOfTheInvertedList = 0.0;
	  float valueOfCurrentPostingTermFreqInCollection_ = 0.0;
	  float valueOfcurrentPostingTermFreqInQueries = 0.0;
	  float valueOfPostingRankInDoc = 0;	// currently can NOT feasibly implemented when applying to the whole index on 2013/08/04 at school
	  float valueOfPostingRankInList = 0;	// currently can NOT feasibly implemented when applying to the whole index on 2013/08/04 at school

	  // sub-step1:
	  if (query_terms_length_of_the_inverted_index_map_.count( curr_term ) > 0){
		valueOfCurrentPostingLengthOfTheInvertedList = query_terms_length_of_the_inverted_index_map_[curr_term];
	  }
	  else{
		valueOfCurrentPostingLengthOfTheInvertedList = 0.0;
	  }

	  // sub-step2:
	  if(query_terms_term_freq_in_collection_map_.count(curr_term) > 0){
		valueOfCurrentPostingTermFreqInCollection_ = query_terms_term_freq_in_collection_map_[curr_term];
	  }
	  else{
		valueOfCurrentPostingTermFreqInCollection_ = 0.0;
	  }

	  // sub-step3:
	  if (query_terms_term_freq_in_queries_map_.count(curr_term) > 0){
		valueOfcurrentPostingTermFreqInQueries = query_terms_term_freq_in_queries_map_[curr_term];
	  }
	  else{
		valueOfcurrentPostingTermFreqInQueries = 0.0;
	  }

	  // computing probability logic begins...
	  // checking the posting probability against the lowerBoundThreshold
	  float matrixMultiplicationScore = intercept_weight_0                                  * 1 +
			                            partialBM25ScoreComponentPart1_IDF_weight_1         * valueOfPartialBM25ScoreComponentPart1_IDF +
			                            partialBM25ScoreComponentPart2_TF_weight_2          * valueOfPartialBM25ScoreComponentPart2_TF +
			  	  	  	  	  	  	  	partialBM25_weight_3                                * valueOfPartialBM25Score +
			  	  	  	  	  	        length_of_the_inverted_index_weight_4               * valueOfCurrentPostingLengthOfTheInvertedList +
			  	  	  	  	  	        term_freq_in_doc_weight_5                           * f_d_t +
			  	  	  	  	  	        doc_words_weight_6                                  * doc_len +
			  	  	  	  	  	        term_freq_in_training_head95K_queries_weight_7      * valueOfcurrentPostingTermFreqInQueries +
			  	  	  	  	  	        term_freq_in_collection_weight_8                    * valueOfCurrentPostingTermFreqInCollection_ +
			  	  	  	  	  	        posting_rank_in_doc_weight_9                        * valueOfPostingRankInDoc +
			  	  	  	  	  	        posting_rank_in_list_weight_10                      * valueOfPostingRankInList +
			  	  	  	  	  	        0.0;

	  // Updated by Wei 2013/07/17 night:
	  // For this formula, I need to clearly understand whether it is 1 - sth, OR just sth.
	  third_factor_probability_value = 1 - 1/(1 + exp( matrixMultiplicationScore ));

	  // The true probability is usually very small, so I need to time a big number.
	  // option1:
	  float BIG_NUMBER = 1000000;

	  /*
	  // This is just the reference for how I use the variable queryTermPostionIndexPairs_
	  // output that very important line for feature extraction
	  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
		  // output the following sample format
		  // oil:0 industry:1 history:2 u:3 s:4
		  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
	  }
	  */
	  float generalProbabilityOfThePostingTimes1000000BigNumber = 0.0;
	  float probabilityGivenTheQueryTermsTimes1000000BigNumber = 0.0;

	  float probabilityOfThisQueryTermAppearedNext = 0.0;
	  string currTerm = queryTermPostionIndexPairs_[queryTermPostionIndexNum].first;
	  // depend on different kinds of prediction models I am using, I should have different maps
	  if (queryTermPredictionModelValue_ == 1){
		  probabilityOfThisQueryTermAppearedNext = queryTermsTrueProbabilityDistributionMap_[currTerm];
	  }
	  else if (queryTermPredictionModelValue_ == 2){
		  probabilityOfThisQueryTermAppearedNext = queryTerms1DProbabilityDistributionMap_[currTerm];
	  }
	  else if (queryTermPredictionModelValue_ == 3){
		  probabilityOfThisQueryTermAppearedNext = queryTerms2DProbabilityDistributionMap_[currTerm];
	  }
	  else if (queryTermPredictionModelValue_ == 4){
		  probabilityOfThisQueryTermAppearedNext = queryTermsGoodTuringProbabilityDistributionMap_[currTerm];
	  }
	  probabilityGivenTheQueryTermsTimes1000000BigNumber = third_factor_probability_value * BIG_NUMBER;
	  generalProbabilityOfThePostingTimes1000000BigNumber = third_factor_probability_value * probabilityOfThisQueryTermAppearedNext * BIG_NUMBER;

	  probabilityGivenTheQueryTermsTimes1000000BigNumber = make_the_value_into_string_format_with_fixed_mode(probabilityGivenTheQueryTermsTimes1000000BigNumber, 6, false);
	  generalProbabilityOfThePostingTimes1000000BigNumber = make_the_value_into_string_format_with_fixed_mode(generalProbabilityOfThePostingTimes1000000BigNumber, 6, false);
	  // left for debugging but still do not find how to use it. :)
	  // for debug ONLY
	  /*
	  cout << "checking" << endl;
	  cout << "basic:" << endl;
	  cout << "did:" << did << endl;
	  cout << "returning_score:" << scores_set.postingScore0 << endl;
	  cout << "f_d_t:" << f_d_t << endl;
	  cout << "doc_len:" << doc_len << endl;
	  cout << "extend:" << endl;
	  cout << "queryTermsProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ]:" << queryTermsProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ] << endl;
	  cout << "probabilityGivenTheQueryTerms:" << probabilityGivenTheQueryTerms << endl;
	  cout << "checking" << endl;
	  cout << "did:" << did << endl;
	  cout << "probabilityGivenTheQueryTerms:" << probabilityGivenTheQueryTerms << endl;
	  cout << "generalProbabilityOfThePosting:" << generalProbabilityOfThePosting << endl;
	  cout << "universal_threshold_socre_of_posting_:" << universal_threshold_socre_of_posting_ << endl;
	  cout << endl;
	  */
	  /*
	  cout << "for debug purpose" << endl;
	  cout << "docID:" << "N/A" << endl;
	  cout << "probabilityGivenTheQueryTermsTimes1000000BigNumber:" << probabilityGivenTheQueryTermsTimes1000000BigNumber << endl;
	  cout << "generalProbabilityOfThePostingTimes1000000BigNumber:" << generalProbabilityOfThePostingTimes1000000BigNumber << endl;
	  cout << endl;
	  */

	  return generalProbabilityOfThePostingTimes1000000BigNumber;
}

float LocalQueryProcessor::make_the_value_into_string_format_with_fixed_mode(float originalValue, int precisionNumber, bool debugFlag){
	  string originalValueInStringFormat = "";
	  float originalValueInFloatFormat = 0.0;
	  stringstream ss (stringstream::in | stringstream::out);

	  // option1
	  // Updated by Wei 2013/02/15, this score will be strictly corresponding to the original version of irtk
	  // ss << fixed;
	  ss << originalValue << setprecision(precisionNumber);

	  // option2
	  // ss << originalValue;

	  originalValueInStringFormat = ss.str();

	  if (debugFlag){
		  cout << "test:" << originalValueInStringFormat << endl;
	  }

	  originalValueInFloatFormat = atof( originalValueInStringFormat.c_str() );
	  return originalValueInFloatFormat;
}

// Returns the total number of document results found in the intersection.
// Note that there is not a guaranteed order of same scoring docIDs.
int LocalQueryProcessor::IntersectLists2ScoreComputeOnlineForSIGIR2014(ListData** merge_lists, int num_merge_lists, ListData** lists, int num_lists, Result_Wei_2012* results, int num_results, bool pruningProjectSwitch, bool currentlyNOUsePlaceHolder, int computation_method) {
  // This lowerBoundThreshold can be either a partialBM25 score or a probability for the posting
  // cout << "This function LocalQueryProcessor::IntersectLists2ScoreComputeOnline(...) called in test" << endl;
  cout << "WIKI setting, option0, AND" << endl;
  // cout << "Buettcher, Clarke trec2004 setting for the parameters K1,B, option1, AND" << endl;
  // cout << "own setting option2, AND" << endl;
  // cout << "own setting option3, AND" << endl;
  // cout << "own setting option4, AND" << endl;
  // cout << "own setting option5, AND" << endl;
  // cout << "own setting option6, AND" << endl;
  int total_num_results = 0;

  // Updated by Wei 2014/01/13 night at school
  int total_num_results_been_filtered = 0;

  if (computation_method == 0){
	  // cout << "--->[serverHiddenInfo]Original Version by Roman" << endl;

	  /*
	  cout << "Notices:" << endl;
	  cout << "Updated by Wei 2013/02/18 night" << endl;
	  cout << "Roman originally developped version, online computation of the queries" << endl;
	  cout << "posting score lower bound threshold:" << lowerBoundThreshold << endl;
	  */

	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25', option0
	  const float kBm25K1 =  2.0;  // k1
	  const float kBm25B = 0.75;   // b

	  // Buettcher, Clarke setting for trec2004, option1
	  // const float kBm25K1 =  1.2;  // k1
	  // const float kBm25B = 0.5;   // b

	  // own setting, option2
	  // const float kBm25K1 =  1.2;  // k1
	  // const float kBm25B = 0.75;   // b

	  // own setting, option3
	  // const float kBm25K1 = 2;  // k1
	  // const float kBm25B = 0.5;   // b

	  // own setting. option4
	  // const float kBm25K1 = 2;  // k1
	  // const float kBm25B = 0.3;   // b

	  // own setting. option5
	  // const float kBm25K1 = 2;  // k1
	  // const float kBm25B = 0.1;   // b

	  // own setting. option6
	  // const float kBm25K1 = 2;  // k1
	  // const float kBm25B = 0.2;   // b

	  // We can precompute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

	  // BM25 components.
	  float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection operation.
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in AND semantics.


	  SCORES_SET scores_set; //initialization of the SCORES_SET for the a specific document.
	  scores_set.totalScore = 0.0;
	  scores_set.doc_length = 0;

	  scores_set.docCandidateQualifyStatus = true;
	  scores_set.posting0QualifyStatus = true;
	  scores_set.posting1QualifyStatus = true;
	  scores_set.posting2QualifyStatus = true;
	  scores_set.posting3QualifyStatus = true;
	  scores_set.posting4QualifyStatus = true;
	  scores_set.posting5QualifyStatus = true;
	  scores_set.posting6QualifyStatus = true;
	  scores_set.posting7QualifyStatus = true;
	  scores_set.posting8QualifyStatus = true;
	  scores_set.posting9QualifyStatus = true;

	  scores_set.posting0RankInList = 0;
	  scores_set.posting1RankInList = 0;
	  scores_set.posting2RankInList = 0;
	  scores_set.posting3RankInList = 0;
	  scores_set.posting4RankInList = 0;
	  scores_set.posting5RankInList = 0;
	  scores_set.posting6RankInList = 0;
	  scores_set.posting7RankInList = 0;
	  scores_set.posting8RankInList = 0;
	  scores_set.posting9RankInList = 0;

	  scores_set.postingFirstProbabilities0 = 0.0;
	  scores_set.postingFirstProbabilities1 = 0.0;
	  scores_set.postingFirstProbabilities2 = 0.0;
	  scores_set.postingFirstProbabilities3 = 0.0;
	  scores_set.postingFirstProbabilities4 = 0.0;
	  scores_set.postingFirstProbabilities5 = 0.0;
	  scores_set.postingFirstProbabilities6 = 0.0;
	  scores_set.postingFirstProbabilities7 = 0.0;
	  scores_set.postingFirstProbabilities8 = 0.0;
	  scores_set.postingFirstProbabilities9 = 0.0;

	  scores_set.postingSecondANDThirdProbabilities0 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities1 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities2 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities3 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities4 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities5 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities6 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities7 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities8 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities9 = 0.0;


	  scores_set.postingThreeFactorProbabilities0 = 0.0;
	  scores_set.postingThreeFactorProbabilities1 = 0.0;
	  scores_set.postingThreeFactorProbabilities2 = 0.0;
	  scores_set.postingThreeFactorProbabilities3 = 0.0;
	  scores_set.postingThreeFactorProbabilities4 = 0.0;
	  scores_set.postingThreeFactorProbabilities5 = 0.0;
	  scores_set.postingThreeFactorProbabilities6 = 0.0;
	  scores_set.postingThreeFactorProbabilities7 = 0.0;
	  scores_set.postingThreeFactorProbabilities8 = 0.0;
	  scores_set.postingThreeFactorProbabilities9 = 0.0;

	  scores_set.posting0ScoreComponentPart1 = 0.0;
	  scores_set.posting1ScoreComponentPart1 = 0.0;
	  scores_set.posting2ScoreComponentPart1 = 0.0;
	  scores_set.posting3ScoreComponentPart1 = 0.0;
	  scores_set.posting4ScoreComponentPart1 = 0.0;
	  scores_set.posting5ScoreComponentPart1 = 0.0;
	  scores_set.posting6ScoreComponentPart1 = 0.0;
	  scores_set.posting7ScoreComponentPart1 = 0.0;
	  scores_set.posting8ScoreComponentPart1 = 0.0;
	  scores_set.posting9ScoreComponentPart1 = 0.0;

	  scores_set.posting0ScoreComponentPart2 = 0.0;
	  scores_set.posting1ScoreComponentPart2 = 0.0;
	  scores_set.posting2ScoreComponentPart2 = 0.0;
	  scores_set.posting3ScoreComponentPart2 = 0.0;
	  scores_set.posting4ScoreComponentPart2 = 0.0;
	  scores_set.posting5ScoreComponentPart2 = 0.0;
	  scores_set.posting6ScoreComponentPart2 = 0.0;
	  scores_set.posting7ScoreComponentPart2 = 0.0;
	  scores_set.posting8ScoreComponentPart2 = 0.0;
	  scores_set.posting9ScoreComponentPart2 = 0.0;


	  scores_set.postingScore0 = 0.0;
	  scores_set.postingScore1 = 0.0;
	  scores_set.postingScore2 = 0.0;
	  scores_set.postingScore3 = 0.0;
	  scores_set.postingScore4 = 0.0;
	  scores_set.postingScore5 = 0.0;
	  scores_set.postingScore6 = 0.0;
	  scores_set.postingScore7 = 0.0;
	  scores_set.postingScore8 = 0.0;
	  scores_set.postingScore9 = 0.0;

	  scores_set.lengthOfTheInvertedList0 = 0;
	  scores_set.lengthOfTheInvertedList1 = 0;
	  scores_set.lengthOfTheInvertedList2 = 0;
	  scores_set.lengthOfTheInvertedList3 = 0;
	  scores_set.lengthOfTheInvertedList4 = 0;
	  scores_set.lengthOfTheInvertedList5 = 0;
	  scores_set.lengthOfTheInvertedList6 = 0;
	  scores_set.lengthOfTheInvertedList7 = 0;
	  scores_set.lengthOfTheInvertedList8 = 0;
	  scores_set.lengthOfTheInvertedList9 = 0;

	  scores_set.postingTermFrequency0 = 0;
	  scores_set.postingTermFrequency1 = 0;
	  scores_set.postingTermFrequency2 = 0;
	  scores_set.postingTermFrequency3 = 0;
	  scores_set.postingTermFrequency4 = 0;
	  scores_set.postingTermFrequency5 = 0;
	  scores_set.postingTermFrequency6 = 0;
	  scores_set.postingTermFrequency7 = 0;
	  scores_set.postingTermFrequency8 = 0;
	  scores_set.postingTermFrequency9 = 0;

	  int doc_len;
	  uint32_t f_d_t;

	  uint32_t did = 0;
	  uint32_t d;


	  int i;  // Index for various loops.

	  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
	  float idf_t[num_lists];  // Using a variable length array here.

	  // updated by wei 2013/02/21
	  // lengthOfTheInvertedListForThisTerm is equal to the num of docs in the complete list
	  int lengthOfTheInvertedListForThisTerm[num_lists];
	  int num_docs_t;




	  for (i = 0; i < num_lists; ++i) {

		// If for the pruning project, then use the same overall statistics for all the pruned index.
		if(pruningProjectSwitch){
			num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
		}
		// If not for the pruning project, we can use what the index has.
		else{
			num_docs_t = lists[i]->num_docs_complete_list();
		}

		lengthOfTheInvertedListForThisTerm[i] = num_docs_t;
	    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
	  }


	  // Necessary for the merge lists.
	  // TODO: Can also try the heap based method here. Can select between heap and array method based on 'num_merge_lists'.
	  uint32_t min_doc_id;

	  while (did < ListData::kNoMoreDocs) {
	    if (merge_lists != NULL) { // For the lists which we are merging.
	      // This will select the lowest docID (ignoring duplicates among the merge lists and any docIDs we have skipped past through AND mode operation).
	      min_doc_id = ListData::kNoMoreDocs;
	      for (i = 0; i < num_merge_lists; ++i) {
	        if ((d = merge_lists[i]->NextGEQ(did)) < min_doc_id) {
	          min_doc_id = d;
	        }
	      }

	      assert(min_doc_id >= did);

	      did = min_doc_id;
	      i = 0;
	    } else {
	      // Get next element from shortest list.
	      // I guess the list in the lists varable has already been sorted.
	      did = lists[0]->NextGEQ(did);

	      i = 1;
	    }



	    if (did == ListData::kNoMoreDocs)
	      break;

	    d = did;

	    // Try to find entries with same docID in other lists.
	    for (; (i < num_lists) && ((d = lists[i]->NextGEQ(did)) == did); ++i) {
	      continue;
	    }

	    if (d > did)
	    {
	      // Not in intersection.
	      did = d;
	    }
	    else
	    {
	      assert(d == did);

	      /*
	      cout << did << " ";
	      */
	      scores_set.doc_length = doc_len;
		  scores_set.docCandidateQualifyStatus = true;
		  scores_set.posting0QualifyStatus = true;
		  scores_set.posting1QualifyStatus = true;
		  scores_set.posting2QualifyStatus = true;
		  scores_set.posting3QualifyStatus = true;
		  scores_set.posting4QualifyStatus = true;
		  scores_set.posting5QualifyStatus = true;
		  scores_set.posting6QualifyStatus = true;
		  scores_set.posting7QualifyStatus = true;
		  scores_set.posting8QualifyStatus = true;
		  scores_set.posting9QualifyStatus = true;

	      // Compute BM25 score from frequencies.
	      bm25_sum = 0;
	      bool postingThresholdNotQualifiedFlag = false;
	      for (i = 0; i < num_lists; ++i) {
	        f_d_t = lists[i]->GetFreq();
	        doc_len = index_reader_.document_map().GetDocumentLength(did);
	        partial_bm25 = idf_t[i] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);



	        if(i == 0){
	        	scores_set.postingScore0 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList0 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency0 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore0 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    			scores_set.posting0QualifyStatus = false;
	    			scores_set.docCandidateQualifyStatus = false;
	    		}

	        }
	        else if(i == 1){
	        	scores_set.postingScore1 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList1 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency1 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore1 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    			scores_set.posting1QualifyStatus = false;
	    			scores_set.docCandidateQualifyStatus = false;
	    		}
	        }
	        else if(i == 2){
	        	scores_set.postingScore2 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList2 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency2 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore2 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    			scores_set.posting2QualifyStatus = false;
	    			scores_set.docCandidateQualifyStatus = false;

	    			// debug
	    			// cout << "scores_set.postingScore2: " << scores_set.postingScore2 << endl;
	    			// cout << "universal_threshold_socre_of_posting_: " << universal_threshold_socre_of_posting_ << endl;
	    			// exit(1);
	    		}
	        }
	        else if(i == 3){
	        	scores_set.postingScore3 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList3 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency3 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore3 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    			scores_set.posting3QualifyStatus = false;
	    			scores_set.docCandidateQualifyStatus = false;
	    		}
	        }
	        else if(i == 4){
	        	scores_set.postingScore4 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList4 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency4 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore4 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    			scores_set.posting4QualifyStatus = false;
	    			scores_set.docCandidateQualifyStatus = false;
	    		}
	        }
	        else if(i == 5){
	        	scores_set.postingScore5 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList5 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency5 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore5 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    			scores_set.posting5QualifyStatus = false;
	    			scores_set.docCandidateQualifyStatus = false;
	    		}
	        }
	        else if(i == 6){
	        	scores_set.postingScore6 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList6 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency6 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore6 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    			scores_set.posting6QualifyStatus = false;
	    			scores_set.docCandidateQualifyStatus = false;
	    		}
	        }
	        else if(i == 7){
	        	scores_set.postingScore7 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList7 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency7 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore7 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    			scores_set.posting7QualifyStatus = false;
	    			scores_set.docCandidateQualifyStatus = false;
	    		}
	        }
	        else if(i == 8){
	        	scores_set.postingScore8 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList8 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency8 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore8 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    			scores_set.posting8QualifyStatus = false;
	    			scores_set.docCandidateQualifyStatus = false;
	    		}
	        }
	        else if(i == 9){
	        	scores_set.postingScore9 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList9 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency9 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore9 <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    			scores_set.posting9QualifyStatus = false;
	    			scores_set.docCandidateQualifyStatus = false;
	    		}
	        }
	        // even the score has been added to the bm25_sum and the score is high enough into the top-k
	        // the document will still NOT be appeared in the top-k results if any of the partial bm25 score for each posting
	        // has been below the threshold.
	        bm25_sum += partial_bm25;
	      }

	      // cout << endl;

	      scores_set.totalScore = bm25_sum;

	      // ############################################################
	      // Notes:
	      // Updated 2013/01/11 by Wei
	      // The following line has special purpose: output ALL the documents with the partialBM25 score associated with the term
	      // Can directly output all the posting with scores from here.
	      // There are 3 columns:
	      // the 1st column is: trecID for gov2
	      // the 2ed column is: docID for polyIRToolkit interal representation
	      // the 3th column is: partialBM25 score for this posting
	      // option1:
	      // cout << index_reader_.document_map().GetDocumentNumber( did ) << " " << did << " " << scores_set.totalScore << endl;
	      // option2:
	      // cout << did << " " << scores_set.totalScore << endl;
	      // ############################################################

	      /*
	      // version format1
	      if (!scores_set.docCandidateQualifyStatus){
	      	// although I already have an document candidate here, but one or more of the postings are NOT qualified for the posting threshold.
	      	// So, here, the injection into the priority heap operation can be ignored.
	      	// In that case, there is no real logic here.
	    	  ++total_num_results_been_filtered;  // Search for next docID.
	      }
		  // Use a heap to maintain the top-k documents. This has to be a min heap,
		  // where the lowest scoring document is on top, so that we can easily pop it,
		  // and push a higher scoring document if need be.
		  if (total_num_results < num_results) {
			// We insert a document if we don't have k documents yet.
			results[total_num_results] = make_pair(scores_set, did);
			push_heap(results, results + total_num_results + 1, ResultCompare2());
		  }
		  else
		  {
			if (scores_set.totalScore > results->first.totalScore)
			{
			  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
			  pop_heap(results, results + num_results, ResultCompare2());
			  results[num_results - 1].first = scores_set;
			  results[num_results - 1].second = did;
			  push_heap(results, results + num_results, ResultCompare2());
			}
		  }
		  ++total_num_results;
		  ++did;  // Search for next docID.
		  */


	      // version format2
	      if (!scores_set.docCandidateQualifyStatus){
	      	// although I already have an document candidate here, but one or more of the postings are NOT qualified for the posting threshold.
	      	// So, here, the injection into the priority heap operation can be ignored.
	      	// In that case, there is no real logic here.
	      	++did;  // Search for next docID.
	      }
	      else{
	          // Use a heap to maintain the top-k documents. This has to be a min heap,
	    	  // where the lowest scoring document is on top, so that we can easily pop it,
	    	  // and push a higher scoring document if need be.
	    	  if (total_num_results < num_results) {
	    	    // We insert a document if we don't have k documents yet.
	    	    results[total_num_results] = make_pair(scores_set, did);
	    	    push_heap(results, results + total_num_results + 1, ResultCompare2());
	    	  }
	    	  else
	    	  {
	    	    if (scores_set.totalScore > results->first.totalScore)
	    	    {
	    		  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	    		  pop_heap(results, results + num_results, ResultCompare2());
	    		  results[num_results - 1].first = scores_set;
	    		  results[num_results - 1].second = did;
	    		  push_heap(results, results + num_results, ResultCompare2());
	    	    }
	    	  }
	          ++total_num_results;
	          ++did;  // Search for next docID.
	      }

	    }
	  }

	  // cout << "total_num_of_postings:" << total_num_results << endl;

	  // Sort top-k results in descending order by document score.
	  sort(results, results + min(num_results, total_num_results), ResultCompare2());
	  cout << total_num_results_been_filtered << " results have been filtered." << endl;
	  return total_num_results;
  }
  else if (computation_method == 1){
	  cout << "3 factor probability formula logic implemented by Wei on 2013/08/02 at school" << endl;
	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  const float kBm25K1 =  2.0;  // k1
	  const float kBm25B = 0.75;   // b

	  // Pre compute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

	  // BM25 components.
	  float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection operation.
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in AND semantics.
	  float valueOfPartialBM25ScoreComponentPart1_IDF;	// part1 is the idf_t[i] which has been computed completely
	  float valueOfPartialBM25ScoreComponentPart2_TF;	      // (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len)
	  // initialization of the SCORES_SET
	  SCORES_SET scores_set; //scores_set for the a specific document.
	  scores_set.totalScore = 0.0;

	  scores_set.doc_length = 0;

	  // remember to init these variables as well
	  scores_set.postingThreeFactorProbabilities0 = 0.0;
	  scores_set.postingThreeFactorProbabilities1 = 0.0;
	  scores_set.postingThreeFactorProbabilities2 = 0.0;
	  scores_set.postingThreeFactorProbabilities3 = 0.0;
	  scores_set.postingThreeFactorProbabilities4 = 0.0;
	  scores_set.postingThreeFactorProbabilities5 = 0.0;
	  scores_set.postingThreeFactorProbabilities6 = 0.0;
	  scores_set.postingThreeFactorProbabilities7 = 0.0;
	  scores_set.postingThreeFactorProbabilities8 = 0.0;
	  scores_set.postingThreeFactorProbabilities9 = 0.0;

	  scores_set.posting0ScoreComponentPart1 = 0.0;
	  scores_set.posting1ScoreComponentPart1 = 0.0;
	  scores_set.posting2ScoreComponentPart1 = 0.0;
	  scores_set.posting3ScoreComponentPart1 = 0.0;
	  scores_set.posting4ScoreComponentPart1 = 0.0;
	  scores_set.posting5ScoreComponentPart1 = 0.0;
	  scores_set.posting6ScoreComponentPart1 = 0.0;
	  scores_set.posting7ScoreComponentPart1 = 0.0;
	  scores_set.posting8ScoreComponentPart1 = 0.0;
	  scores_set.posting9ScoreComponentPart1 = 0.0;

	  scores_set.posting0ScoreComponentPart2 = 0.0;
	  scores_set.posting1ScoreComponentPart2 = 0.0;
	  scores_set.posting2ScoreComponentPart2 = 0.0;
	  scores_set.posting3ScoreComponentPart2 = 0.0;
	  scores_set.posting4ScoreComponentPart2 = 0.0;
	  scores_set.posting5ScoreComponentPart2 = 0.0;
	  scores_set.posting6ScoreComponentPart2 = 0.0;
	  scores_set.posting7ScoreComponentPart2 = 0.0;
	  scores_set.posting8ScoreComponentPart2 = 0.0;
	  scores_set.posting9ScoreComponentPart2 = 0.0;


	  scores_set.postingScore0 = 0.0;
	  scores_set.postingScore1 = 0.0;
	  scores_set.postingScore2 = 0.0;
	  scores_set.postingScore3 = 0.0;
	  scores_set.postingScore4 = 0.0;
	  scores_set.postingScore5 = 0.0;
	  scores_set.postingScore6 = 0.0;
	  scores_set.postingScore7 = 0.0;
	  scores_set.postingScore8 = 0.0;
	  scores_set.postingScore9 = 0.0;

	  scores_set.lengthOfTheInvertedList0 = 0;
	  scores_set.lengthOfTheInvertedList1 = 0;
	  scores_set.lengthOfTheInvertedList2 = 0;
	  scores_set.lengthOfTheInvertedList3 = 0;
	  scores_set.lengthOfTheInvertedList4 = 0;
	  scores_set.lengthOfTheInvertedList5 = 0;
	  scores_set.lengthOfTheInvertedList6 = 0;
	  scores_set.lengthOfTheInvertedList7 = 0;
	  scores_set.lengthOfTheInvertedList8 = 0;
	  scores_set.lengthOfTheInvertedList9 = 0;

	  scores_set.postingTermFrequency0 = 0;
	  scores_set.postingTermFrequency1 = 0;
	  scores_set.postingTermFrequency2 = 0;
	  scores_set.postingTermFrequency3 = 0;
	  scores_set.postingTermFrequency4 = 0;
	  scores_set.postingTermFrequency5 = 0;
	  scores_set.postingTermFrequency6 = 0;
	  scores_set.postingTermFrequency7 = 0;
	  scores_set.postingTermFrequency8 = 0;
	  scores_set.postingTermFrequency9 = 0;

	  int doc_len;
	  uint32_t f_d_t;

	  uint32_t did = 0;
	  uint32_t d;


	  int i;  // Index for various loops.

	  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
	  float idf_t[num_lists];  // Using a variable length array here.

	  // updated by wei 2013/02/21
	  // lengthOfTheInvertedListForThisTerm is equal to the num of docs in the complete list
	  int lengthOfTheInvertedListForThisTerm[num_lists];
	  int num_docs_t;

	  for (i = 0; i < num_lists; ++i) {

		// If for the pruning project, then use the same overall statistics for all the pruned index.
		if(pruningProjectSwitch){
			num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
		}
		// If not for the pruning project, we can use what the index has.
		else{
			num_docs_t = lists[i]->num_docs_complete_list();
		}

		lengthOfTheInvertedListForThisTerm[i] = num_docs_t;
	    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
	  }


	  // Necessary for the merge lists.
	  // TODO: Can also try the heap based method here. Can select between heap and array method based on 'num_merge_lists'.
	  uint32_t min_doc_id;

	  while (did < ListData::kNoMoreDocs) {
	    if (merge_lists != NULL) { // For the lists which we are merging.
	      // This will select the lowest docID (ignoring duplicates among the merge lists and any docIDs we have skipped past through AND mode operation).
	      min_doc_id = ListData::kNoMoreDocs;
	      for (i = 0; i < num_merge_lists; ++i) {
	        if ((d = merge_lists[i]->NextGEQ(did)) < min_doc_id) {
	          min_doc_id = d;
	        }
	      }

	      assert(min_doc_id >= did);

	      did = min_doc_id;
	      i = 0;
	    } else {
	      // Get next element from shortest list.
	      // I guess the list in the lists varable has already been sorted.
	      did = lists[0]->NextGEQ(did);

	      i = 1;
	    }



	    if (did == ListData::kNoMoreDocs)
	      break;

	    d = did;

	    // Try to find entries with same docID in other lists.
	    for (; (i < num_lists) && ((d = lists[i]->NextGEQ(did)) == did); ++i) {
	      continue;
	    }

	    if (d > did)
	    {
	      // Not in intersection.
	      did = d;
	    }
	    else
	    {
	      assert(d == did);
	      // cout << did << " ";
	      // Compute BM25 score from frequencies.
	      bm25_sum = 0;
	      bool postingThresholdNotQualifiedFlag = false;
	      for (i = 0; i < num_lists; ++i) {
	        f_d_t = lists[i]->GetFreq();
	        doc_len = index_reader_.document_map().GetDocumentLength(did);
	        valueOfPartialBM25ScoreComponentPart1_IDF = idf_t[i];
	        valueOfPartialBM25ScoreComponentPart2_TF = (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
	        partial_bm25 = valueOfPartialBM25ScoreComponentPart1_IDF * valueOfPartialBM25ScoreComponentPart2_TF;

	        /*
	        // debug ONLY for the BM25 score correctness
	        cout << "Debug begins..." << endl;
	        cout << "partial_bm25:" << partial_bm25 << endl;
	        cout << "idf_t[i](BM25ScorePart1 IDF component):" << idf_t[i] << endl;
	        cout << "bm25ScoreComponentPart2(BM25ScorePart2 TF component):" << bm25ScoreComponentPart2 << endl;
	        // cout << "f_d_t:" << f_d_t << endl;
	        // cout << "kBm25NumeratorMul:" << kBm25NumeratorMul << endl;
	        // cout << "kBm25DenominatorAdd:" << kBm25DenominatorAdd << endl;
	        // cout << "kBm25DenominatorDocLenMul:" << kBm25DenominatorDocLenMul << endl;
	        // cout << "doc_len:" << doc_len << endl;
	        cout << "Debug ends." << endl;
	        cout << endl;
	        */

	        scores_set.doc_length = doc_len;



	        if(i == 0){
	        	  scores_set.posting0ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	  scores_set.posting0ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	  scores_set.postingScore0 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList0 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency0 = f_d_t;
	        }
	        else if(i == 1){
	        	  scores_set.posting1ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	  scores_set.posting1ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	  scores_set.postingScore1 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList1 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency1 = f_d_t;
	        }
	        else if(i == 2){
	        	  scores_set.posting2ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	  scores_set.posting2ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	  scores_set.postingScore2 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList2 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency2 = f_d_t;
	        }
	        else if(i == 3){
	        	  scores_set.posting3ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	  scores_set.posting3ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	  scores_set.postingScore3 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList3 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency3 = f_d_t;
	        }
	        else if(i == 4){
	        	  scores_set.posting4ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	  scores_set.posting4ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	  scores_set.postingScore4 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList4 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency4 = f_d_t;
	        }
	        else if(i == 5){
	        	scores_set.posting5ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	scores_set.posting5ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	scores_set.postingScore5 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList5 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency5 = f_d_t;
	        }
	        else if(i == 6){
	        	scores_set.posting6ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	scores_set.posting6ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	scores_set.postingScore6 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList6 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency6 = f_d_t;
	        }
	        else if(i == 7){
	            scores_set.posting7ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	scores_set.posting7ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	scores_set.postingScore7 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList7 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency7 = f_d_t;
	        }
	        else if(i == 8){
	            scores_set.posting8ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	scores_set.posting8ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	scores_set.postingScore8 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList8 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency8 = f_d_t;
	        }
	        else if(i == 9){
	            scores_set.posting9ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	scores_set.posting9ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	scores_set.postingScore9 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList9 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency9 = f_d_t;
	        }

	        // Updated by Wei 2013/08/04 afternoon by Wei at school
	        // init the 3 probabilities
	        double first_factor_probability_value = 0.0;
	        float second_factor_probability_value = 0.0;
	        float third_factor_probability_value = 0.0;
	        double factor_2_3_combined_probability_value = 0.0;
	        double factor_1_3_combined_probability_value = 0.0;
	        double final_3_factors_probability_combined_value = 0.0;
	        double posting_judged_probability = 0.0;

	        // compute P(t)
	        string curr_look_up_term = queryTermPostionIndexPairs_[i].first;
            if (terms_with_corresponding_species_belonging_to_map_.count(curr_look_up_term) > 0 ){
            	// for debug
            	// cout << "mark1" << endl;
            	first_factor_probability_value = freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[curr_look_up_term] ];
            }
            else{
            	// for debug
            	// cout << "mark2" << endl;
            	first_factor_probability_value = freq_first_factor_probability_map_[0];
            }

	        // compute P(Int)
	        float partialProbability = 0.0;
	        float valuePart1 = 0.0;
	        float valuePart2 = 0.0;
	        // compute the second_factor_probability_value
	        float XDocValueForGoodTurning = 0.0;
	        // Updated by Wei 2013/08/03 morning by Wei at school
	        // Use the good turing for now
	        string didInStringFormat = "";
	        stringstream ss;
	        ss << did;
	        ss >> didInStringFormat;
	        if (docID_With_Xdoc_Value_goodTurning_map_.count(didInStringFormat) > 0){
	        	XDocValueForGoodTurning = docID_With_Xdoc_Value_goodTurning_map_[didInStringFormat];
	        }
	        else{
	        	cout << "System Error, the didInStringFormat is NOT in the docID_With_Xdoc_Value_goodTurning_map_, mark3" << endl;
	        	exit(1);
	        }
	        // for debug ONLY
	        // cout << "XDocValueForGoodTurning:" << XDocValueForGoodTurning << endl;


	        map<int,float>::iterator iter;
	        for (iter = query_length_probability_map_.begin(); iter != query_length_probability_map_.end(); ++iter){
	        	valuePart1 = (*iter).second;
	        	valuePart2 = pow(XDocValueForGoodTurning, (*iter).first-1);
	        	partialProbability = valuePart1 * valuePart2;
	        	second_factor_probability_value += partialProbability;
	        	// for debug ONLY
	        	/*
	        	cout << "queryLength:" << (*iter).first << endl;
	        	cout << "valuePart1:" << valuePart1 << endl;
	        	cout << "valuePart2:" << valuePart2 << endl;
	        	cout << "partialProbability:" << partialProbability << endl;
	        	cout << "second_factor_probability_value:" << second_factor_probability_value << endl;
	        	cout << endl;
	        	*/
	        }

	        // compute P(TOP10)
	        // CURRENT version Updated by Wei 2013/08/04:
	        // Note: the variable third_factor_probability_value will automatically be filled
	        ComputeThirdFactorProbability(i, valueOfPartialBM25ScoreComponentPart1_IDF, valueOfPartialBM25ScoreComponentPart2_TF, partial_bm25, f_d_t, doc_len, third_factor_probability_value);

	        // OLD version:
	        // This is what old version is doing, need to times the Big Number which I think it is NO USE anymore since 2013/08/04
	        // float thirdFactorProbabilityValueTimes1000000BigNumber = ComputeThirdFactorProbability(i, valueOfPartialBM25ScoreComponentPart1_IDF, valueOfPartialBM25ScoreComponentPart2_TF, partial_bm25, f_d_t, doc_len, third_factor_probability_value);

	        factor_2_3_combined_probability_value = second_factor_probability_value * third_factor_probability_value;
	        factor_1_3_combined_probability_value = first_factor_probability_value * third_factor_probability_value;
	        final_3_factors_probability_combined_value = first_factor_probability_value * second_factor_probability_value * third_factor_probability_value;

	        // many choices for the assignment of the posting_judged_probability
	        // { partial_bm25,
	        //   third_factor_probability_value,
	        //   factor_2_3_combined_probability_value,
	        //   factor_1_3_combined_probability_value,
	        //   final_3_factors_probability_combined_value}
	        posting_judged_probability = partial_bm25;

	        if(i == 0){
	        	scores_set.postingThreeFactorProbabilities0 = posting_judged_probability;
	        }
	        else if(i == 1){
	        	scores_set.postingThreeFactorProbabilities1 = posting_judged_probability;
	        }
	        else if(i == 2){
	        	scores_set.postingThreeFactorProbabilities2 = posting_judged_probability;
	        }
	        else if(i == 3){
	        	scores_set.postingThreeFactorProbabilities3 = posting_judged_probability;
	        }
	        else if(i == 4){
	        	scores_set.postingThreeFactorProbabilities4 = posting_judged_probability;
	        }
	        else if(i == 5){
	        	scores_set.postingThreeFactorProbabilities5 = posting_judged_probability;
	        }
	        else if(i == 6){
	        	scores_set.postingThreeFactorProbabilities6 = posting_judged_probability;
	        }
	        else if(i == 7){
	        	scores_set.postingThreeFactorProbabilities7 = posting_judged_probability;
	        }
	        else if(i == 8){
	        	scores_set.postingThreeFactorProbabilities8 = posting_judged_probability;
	        }
	        else if(i == 9){
	        	scores_set.postingThreeFactorProbabilities9 = posting_judged_probability;
	        }


	        // for debug section ONLY (especially for the term 'soalr')
	        /*
	        cout << "current_term: " << curr_look_up_term << endl;
	        cout << "didInStringFormat: " << didInStringFormat << endl;
	        cout << "first_factor_probability_value: " << first_factor_probability_value << endl;
	        cout << "second_factor_probability_value: " << second_factor_probability_value << endl;
	        cout << "third_factor_probability_value: " << third_factor_probability_value << endl;
	        cout << "factor_2_3_combined_probability_value: " << factor_2_3_combined_probability_value << endl;
	        cout << "factor_1_3_combined_probability_value: " << factor_1_3_combined_probability_value << endl;
	        cout << "final_3_factors_probability_combined_value: " << final_3_factors_probability_combined_value << endl;
	        // especially for the debug query term "soalr"
	        cout << "scores_set.postingThreeFactorProbabilities0: " << scores_set.postingThreeFactorProbabilities0 << endl;
	        cout << endl;
	        */

	        // in debug MODE by Wei on 2013/08/06 at school
	        // 4 threshold choices are both for the universal_threshold_socre_of_posting_ and the value which needed to be compared to

	        if (posting_judged_probability <= universal_threshold_socre_of_posting_){
				  postingThresholdNotQualifiedFlag = true;
			}
	        bm25_sum += partial_bm25;
	      }

	      scores_set.totalScore = bm25_sum;

	      // ############################################################
	      // Notes:
	      // Updated 2013/01/11 by Wei
	      // The following line has special purpose: output ALL the documents with the partialBM25 score associated with the term
	      // Can directly output all the posting with scores from here.
	      // There are 3 columns:
	      // the 1st column is: trecID for gov2
	      // the 2ed column is: docID for polyIRToolkit interal representation
	      // the 3th column is: partialBM25 score for this posting
	      // option1:
	      // cout << index_reader_.document_map().GetDocumentNumber( did ) << " " << did << " " << scores_set.totalScore << endl;
	      // option2:
	      // cout << did << " " << scores_set.totalScore << endl;
	      // ############################################################

	      if (postingThresholdNotQualifiedFlag){
	      	// although I already have an document candidate here, but one or more of the postings are NOT qualified for the posting threshold.
	      	// So, here, the injection into the priority heap operation can be ignored.
	      	// In that case, there is no real logic here.
	      	++did;  // Search for next docID.
	      }
	      else{
	          // Use a heap to maintain the top-k documents. This has to be a min heap,
	    	  // where the lowest scoring document is on top, so that we can easily pop it,
	    	  // and push a higher scoring document if need be.
	    	  if (total_num_results < num_results) {
	    	    // We insert a document if we don't have k documents yet.
	    	    results[total_num_results] = make_pair(scores_set, did);
	    	    push_heap(results, results + total_num_results + 1, ResultCompare2());
	    	  }
	    	  else
	    	  {
	    	    if (scores_set.totalScore > results->first.totalScore)
	    	    {
	    		  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	    		  pop_heap(results, results + num_results, ResultCompare2());
	    		  results[num_results - 1].first = scores_set;
	    		  results[num_results - 1].second = did;
	    		  push_heap(results, results + num_results, ResultCompare2());
	    	    }
	    	  }
	          ++total_num_results;
	          ++did;  // Search for next docID.
	      }
	    }
	  }

	  // Sort top-k results in descending order by document score.
	  sort(results, results + min(num_results, total_num_results), ResultCompare2());

	  return total_num_results;

  }
  else if(computation_method == -1){
	  cout << "Logistic Regression probability logic implemented(Under Construction by Wei 2013/06/19)" << endl;

	  // updated by Wei:2013/02/24
	  // the weight vector for the Logistic Regression I trained.
	  float intercept_weight_0 = 1.2404;
	  float partialBM25_weight_1 = -0.4866;
	  float length_of_the_inverted_index_2 = 0.0;
	  float term_freq_in_collection_3 = 0.0;
	  float term_freq_in_doc_4 = -0.0068;
	  float doc_words_5 = 0.0001;
	  float term_freq_in_queries_6 = 0.0;
	  float posting_rank_in_list_7 = 0.0;
	  float posting_rank_in_doc_8 = 0.0;

	  /*
		  // The following code is just for reference
		  uint32_t f_d_t = entry.frequency;
		  int doc_len = doc_map_reader_.GetDocumentLength(entry.doc_id);
		  returning_score = kIdfT * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
		  assert(!isnan(returning_score));

		  // intercept_weight_0;
		  // partialBM25_weight_1;
		  // length_of_the_inverted_index_2;
		  // term_freq_in_collection_3;
		  // term_freq_in_doc_4;
		  // doc_words_5;
		  // term_freq_in_queries_6;
		  // posting_rank_in_list_7;
		  // posting_rank_in_doc_8;

		  float matrixMultiplicationScore = intercept_weight_0 * 1 +
				  	  	  	  	  	  	  	partialBM25_weight_1 * returning_score +
				  	  	  	  	  	        // length_of_the_inverted_index_2 * sth +
				  	  	  	  	  	        // term_freq_in_collection_3 * sth +
				  	  	  	  	  	        term_freq_in_doc_4 * f_d_t +
				  	  	  	  	  	        doc_words_5 * doc_len +
				  	  	  	  	  	        // term_freq_in_queries_6 * sth +
				  	  	  	  	  	        // posting_rank_in_list_7 * sth +
				  	  	  	  	  	        // posting_rank_in_doc_8 * sth +
				  	  	  	  	  	        0.0;

		  float probabilityGivenTheQueryTerms = 1/(1 + exp( matrixMultiplicationScore ));

		  // The true probability is usually very small, so I need to time a big number.
		  // option1:
		  float BIG_NUMBER = 1000000;

		  // option2:
		  // float BIG_NUMBER = 1;

		  returning_score = queryTermsProbabilityDistributionMap_[term_] * probabilityGivenTheQueryTerms * BIG_NUMBER;
	  */

	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  const float kBm25K1 =  2.0;  // k1
	  const float kBm25B = 0.75;   // b

	  // Pre compute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

	  // BM25 components.
	  float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection operation.
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in AND semantics.

	  // initialization of the SCORES_SET
	  SCORES_SET scores_set; //scores_set for the a specific document.
	  scores_set.totalScore = 0.0;

	  scores_set.doc_length = 0;

	  // remember to init these variables as well
	  scores_set.postingThreeFactorProbabilities0 = 0.0;
	  scores_set.postingThreeFactorProbabilities1 = 0.0;
	  scores_set.postingThreeFactorProbabilities2 = 0.0;
	  scores_set.postingThreeFactorProbabilities3 = 0.0;
	  scores_set.postingThreeFactorProbabilities4 = 0.0;
	  scores_set.postingThreeFactorProbabilities5 = 0.0;
	  scores_set.postingThreeFactorProbabilities6 = 0.0;
	  scores_set.postingThreeFactorProbabilities7 = 0.0;
	  scores_set.postingThreeFactorProbabilities8 = 0.0;
	  scores_set.postingThreeFactorProbabilities9 = 0.0;

	  scores_set.postingScore0 = 0.0;
	  scores_set.postingScore1 = 0.0;
	  scores_set.postingScore2 = 0.0;
	  scores_set.postingScore3 = 0.0;
	  scores_set.postingScore4 = 0.0;
	  scores_set.postingScore5 = 0.0;
	  scores_set.postingScore6 = 0.0;
	  scores_set.postingScore7 = 0.0;
	  scores_set.postingScore8 = 0.0;
	  scores_set.postingScore9 = 0.0;

	  scores_set.lengthOfTheInvertedList0 = 0;
	  scores_set.lengthOfTheInvertedList1 = 0;
	  scores_set.lengthOfTheInvertedList2 = 0;
	  scores_set.lengthOfTheInvertedList3 = 0;
	  scores_set.lengthOfTheInvertedList4 = 0;
	  scores_set.lengthOfTheInvertedList5 = 0;
	  scores_set.lengthOfTheInvertedList6 = 0;
	  scores_set.lengthOfTheInvertedList7 = 0;
	  scores_set.lengthOfTheInvertedList8 = 0;
	  scores_set.lengthOfTheInvertedList9 = 0;

	  scores_set.postingTermFrequency0 = 0;
	  scores_set.postingTermFrequency1 = 0;
	  scores_set.postingTermFrequency2 = 0;
	  scores_set.postingTermFrequency3 = 0;
	  scores_set.postingTermFrequency4 = 0;
	  scores_set.postingTermFrequency5 = 0;
	  scores_set.postingTermFrequency6 = 0;
	  scores_set.postingTermFrequency7 = 0;
	  scores_set.postingTermFrequency8 = 0;
	  scores_set.postingTermFrequency9 = 0;

	  int doc_len;
	  uint32_t f_d_t;

	  uint32_t did = 0;
	  uint32_t d;


	  int i;  // Index for various loops.

	  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
	  float idf_t[num_lists];  // Using a variable length array here.

	  // updated by wei 2013/02/21
	  // lengthOfTheInvertedListForThisTerm is equal to the num of docs in the complete list
	  int lengthOfTheInvertedListForThisTerm[num_lists];
	  int num_docs_t;

	  for (i = 0; i < num_lists; ++i) {

		// If for the pruning project, then use the same overall statistics for all the pruned index.
		if(pruningProjectSwitch){
			num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
		}
		// If not for the pruning project, we can use what the index has.
		else{
			num_docs_t = lists[i]->num_docs_complete_list();
		}

		lengthOfTheInvertedListForThisTerm[i] = num_docs_t;
	    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
	  }


	  // Necessary for the merge lists.
	  // TODO: Can also try the heap based method here. Can select between heap and array method based on 'num_merge_lists'.
	  uint32_t min_doc_id;

	  while (did < ListData::kNoMoreDocs) {
	    if (merge_lists != NULL) { // For the lists which we are merging.
	      // This will select the lowest docID (ignoring duplicates among the merge lists and any docIDs we have skipped past through AND mode operation).
	      min_doc_id = ListData::kNoMoreDocs;
	      for (i = 0; i < num_merge_lists; ++i) {
	        if ((d = merge_lists[i]->NextGEQ(did)) < min_doc_id) {
	          min_doc_id = d;
	        }
	      }

	      assert(min_doc_id >= did);

	      did = min_doc_id;
	      i = 0;
	    } else {
	      // Get next element from shortest list.
	      // I guess the list in the lists varable has already been sorted.
	      did = lists[0]->NextGEQ(did);

	      i = 1;
	    }



	    if (did == ListData::kNoMoreDocs)
	      break;

	    d = did;

	    // Try to find entries with same docID in other lists.
	    for (; (i < num_lists) && ((d = lists[i]->NextGEQ(did)) == did); ++i) {
	      continue;
	    }

	    if (d > did)
	    {
	      // Not in intersection.
	      did = d;
	    }
	    else
	    {
	      assert(d == did);
	      // cout << did << " ";
	      // Compute BM25 score from frequencies.
	      bm25_sum = 0;
	      bool postingThresholdNotQualifiedFlag = false;
	      for (i = 0; i < num_lists; ++i) {
	        f_d_t = lists[i]->GetFreq();
	        doc_len = index_reader_.document_map().GetDocumentLength(did);
	        partial_bm25 = idf_t[i] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);

	        scores_set.doc_length = doc_len;

	        if(i == 0){
	        	  scores_set.postingScore0 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList0 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency0 = f_d_t;

	        	  // computing probability logic begins...
	    		  // checking the posting probability against the lowerBoundThreshold
				  float matrixMultiplicationScore = intercept_weight_0 * 1 +
						  	  	  	  	  	  	  	partialBM25_weight_1 * scores_set.postingScore0 +
						  	  	  	  	  	        // length_of_the_inverted_index_2 * sth +
						  	  	  	  	  	        // term_freq_in_collection_3 * sth +
						  	  	  	  	  	        term_freq_in_doc_4 * f_d_t +
						  	  	  	  	  	        doc_words_5 * doc_len +
						  	  	  	  	  	        // term_freq_in_queries_6 * sth +
						  	  	  	  	  	        // posting_rank_in_list_7 * sth +
						  	  	  	  	  	        // posting_rank_in_doc_8 * sth +
						  	  	  	  	  	        0.0;

				  float probabilityGivenTheQueryTerms = 1/(1 + exp( matrixMultiplicationScore ));

				  // The true probability is usually very small, so I need to time a big number.
				  // option1:
				  float BIG_NUMBER = 1000000;

				  // option2:
				  // float BIG_NUMBER = 1;
				  /*
				  // This is just the reference for how I use the variable queryTermPostionIndexPairs_
				  // output that very important line for feature extraction
				  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
					  // output the following sample format
					  // oil:0 industry:1 history:2 u:3 s:4
					  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
				  }
				  */
				  float generalProbabilityOfThePosting = queryTermsTrueProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ] * probabilityGivenTheQueryTerms * BIG_NUMBER;

				  // for debug ONLY
				  /*
				  cout << "checking" << endl;
				  cout << "basic:" << endl;
				  cout << "did:" << did << endl;
				  cout << "returning_score:" << scores_set.postingScore0 << endl;
				  cout << "f_d_t:" << f_d_t << endl;
				  cout << "doc_len:" << doc_len << endl;
				  cout << "extend:" << endl;
				  cout << "queryTermsProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ]:" << queryTermsProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ] << endl;
				  cout << "probabilityGivenTheQueryTerms:" << probabilityGivenTheQueryTerms << endl;
				  cout << "checking" << endl;
				  cout << "did:" << did << endl;
				  cout << "probabilityGivenTheQueryTerms:" << probabilityGivenTheQueryTerms << endl;
				  cout << "generalProbabilityOfThePosting:" << generalProbabilityOfThePosting << endl;
				  cout << "universal_threshold_socre_of_posting_:" << universal_threshold_socre_of_posting_ << endl;
				  cout << endl;
				  */


				  if (generalProbabilityOfThePosting <= universal_threshold_socre_of_posting_){
					  postingThresholdNotQualifiedFlag = true;
				  }

	        }
	        else if(i == 1){
	        	  scores_set.postingScore1 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList1 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency1 = f_d_t;

	        	  // computing probability logic begins...
	    		  // checking the posting probability against the lowerBoundThreshold
				  float matrixMultiplicationScore = intercept_weight_0 * 1 +
						  	  	  	  	  	  	  	partialBM25_weight_1 * scores_set.postingScore1 +
						  	  	  	  	  	        // length_of_the_inverted_index_2 * sth +
						  	  	  	  	  	        // term_freq_in_collection_3 * sth +
						  	  	  	  	  	        term_freq_in_doc_4 * f_d_t +
						  	  	  	  	  	        doc_words_5 * doc_len +
						  	  	  	  	  	        // term_freq_in_queries_6 * sth +
						  	  	  	  	  	        // posting_rank_in_list_7 * sth +
						  	  	  	  	  	        // posting_rank_in_doc_8 * sth +
						  	  	  	  	  	        0.0;

				  float probabilityGivenTheQueryTerms = 1/(1 + exp( matrixMultiplicationScore ));

				  // The true probability is usually very small, so I need to time a big number.
				  // option1:
				  float BIG_NUMBER = 1000000;

				  // option2:
				  // float BIG_NUMBER = 1;
				  /*
				  // This is just the reference for how I use the variable queryTermPostionIndexPairs_
				  // output that very important line for feature extraction
				  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
					  // output the following sample format
					  // oil:0 industry:1 history:2 u:3 s:4
					  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
				  }
				  */
				  float generalProbabilityOfThePosting = queryTermsTrueProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ] * probabilityGivenTheQueryTerms * BIG_NUMBER;
				  // computing probability logic ends.

	    		  // checking the posting probability against the lowerBoundThreshold
	    		  if (generalProbabilityOfThePosting <= universal_threshold_socre_of_posting_){
	    			  postingThresholdNotQualifiedFlag = true;
	    		  }
	        }
	        else if(i == 2){
	        	  scores_set.postingScore2 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList2 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency2 = f_d_t;

	        	  // computing probability logic begins...
	    		  // checking the posting probability against the lowerBoundThreshold
				  float matrixMultiplicationScore = intercept_weight_0 * 1 +
						  	  	  	  	  	  	  	partialBM25_weight_1 * scores_set.postingScore2 +
						  	  	  	  	  	        // length_of_the_inverted_index_2 * sth +
						  	  	  	  	  	        // term_freq_in_collection_3 * sth +
						  	  	  	  	  	        term_freq_in_doc_4 * f_d_t +
						  	  	  	  	  	        doc_words_5 * doc_len +
						  	  	  	  	  	        // term_freq_in_queries_6 * sth +
						  	  	  	  	  	        // posting_rank_in_list_7 * sth +
						  	  	  	  	  	        // posting_rank_in_doc_8 * sth +
						  	  	  	  	  	        0.0;

				  float probabilityGivenTheQueryTerms = 1/(1 + exp( matrixMultiplicationScore ));

				  // The true probability is usually very small, so I need to time a big number.
				  // option1:
				  float BIG_NUMBER = 1000000;

				  // option2:
				  // float BIG_NUMBER = 1;
				  /*
				  // This is just the reference for how I use the variable queryTermPostionIndexPairs_
				  // output that very important line for feature extraction
				  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
					  // output the following sample format
					  // oil:0 industry:1 history:2 u:3 s:4
					  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
				  }
				  */
				  float generalProbabilityOfThePosting = queryTermsTrueProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ] * probabilityGivenTheQueryTerms * BIG_NUMBER;
				  // computing probability logic ends.


	    		  // checking the posting probability against the lowerBoundThreshold
	    		  if (generalProbabilityOfThePosting <= universal_threshold_socre_of_posting_){
	    			  postingThresholdNotQualifiedFlag = true;
	    		  }
	        }
	        else if(i == 3){
	        	scores_set.postingScore3 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList3 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency3 = f_d_t;

	        	  // computing probability logic begins...
	    		  // checking the posting probability against the lowerBoundThreshold
				  float matrixMultiplicationScore = intercept_weight_0 * 1 +
						  	  	  	  	  	  	  	partialBM25_weight_1 * scores_set.postingScore3 +
						  	  	  	  	  	        // length_of_the_inverted_index_2 * sth +
						  	  	  	  	  	        // term_freq_in_collection_3 * sth +
						  	  	  	  	  	        term_freq_in_doc_4 * f_d_t +
						  	  	  	  	  	        doc_words_5 * doc_len +
						  	  	  	  	  	        // term_freq_in_queries_6 * sth +
						  	  	  	  	  	        // posting_rank_in_list_7 * sth +
						  	  	  	  	  	        // posting_rank_in_doc_8 * sth +
						  	  	  	  	  	        0.0;

				  float probabilityGivenTheQueryTerms = 1/(1 + exp( matrixMultiplicationScore ));

				  // The true probability is usually very small, so I need to time a big number.
				  // option1:
				  float BIG_NUMBER = 1000000;

				  // option2:
				  // float BIG_NUMBER = 1;
				  /*
				  // This is just the reference for how I use the variable queryTermPostionIndexPairs_
				  // output that very important line for feature extraction
				  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
					  // output the following sample format
					  // oil:0 industry:1 history:2 u:3 s:4
					  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
				  }
				  */
				  float generalProbabilityOfThePosting = queryTermsTrueProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ] * probabilityGivenTheQueryTerms * BIG_NUMBER;
				  // computing probability logic ends.



	    		// checking the posting probability against the lowerBoundThreshold
	    		if (generalProbabilityOfThePosting <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 4){
	        	scores_set.postingScore4 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList4 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency4 = f_d_t;

	        	  // computing probability logic begins...
	    		  // checking the posting probability against the lowerBoundThreshold
				  float matrixMultiplicationScore = intercept_weight_0 * 1 +
						  	  	  	  	  	  	  	partialBM25_weight_1 * scores_set.postingScore4 +
						  	  	  	  	  	        // length_of_the_inverted_index_2 * sth +
						  	  	  	  	  	        // term_freq_in_collection_3 * sth +
						  	  	  	  	  	        term_freq_in_doc_4 * f_d_t +
						  	  	  	  	  	        doc_words_5 * doc_len +
						  	  	  	  	  	        // term_freq_in_queries_6 * sth +
						  	  	  	  	  	        // posting_rank_in_list_7 * sth +
						  	  	  	  	  	        // posting_rank_in_doc_8 * sth +
						  	  	  	  	  	        0.0;

				  float probabilityGivenTheQueryTerms = 1/(1 + exp( matrixMultiplicationScore ));

				  // The true probability is usually very small, so I need to time a big number.
				  // option1:
				  float BIG_NUMBER = 1000000;

				  // option2:
				  // float BIG_NUMBER = 1;
				  /*
				  // This is just the reference for how I use the variable queryTermPostionIndexPairs_
				  // output that very important line for feature extraction
				  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
					  // output the following sample format
					  // oil:0 industry:1 history:2 u:3 s:4
					  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
				  }
				  */
				  float generalProbabilityOfThePosting = queryTermsTrueProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ] * probabilityGivenTheQueryTerms * BIG_NUMBER;
				  // computing probability logic ends.


	    		// checking the posting probability against the lowerBoundThreshold
	    		if (generalProbabilityOfThePosting <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 5){
	        	scores_set.postingScore5 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList5 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency5 = f_d_t;
	        	  // computing probability logic begins...
	    		  // checking the posting probability against the lowerBoundThreshold
				  float matrixMultiplicationScore = intercept_weight_0 * 1 +
						  	  	  	  	  	  	  	partialBM25_weight_1 * scores_set.postingScore5 +
						  	  	  	  	  	        // length_of_the_inverted_index_2 * sth +
						  	  	  	  	  	        // term_freq_in_collection_3 * sth +
						  	  	  	  	  	        term_freq_in_doc_4 * f_d_t +
						  	  	  	  	  	        doc_words_5 * doc_len +
						  	  	  	  	  	        // term_freq_in_queries_6 * sth +
						  	  	  	  	  	        // posting_rank_in_list_7 * sth +
						  	  	  	  	  	        // posting_rank_in_doc_8 * sth +
						  	  	  	  	  	        0.0;

				  float probabilityGivenTheQueryTerms = 1/(1 + exp( matrixMultiplicationScore ));

				  // The true probability is usually very small, so I need to time a big number.
				  // option1:
				  float BIG_NUMBER = 1000000;

				  // option2:
				  // float BIG_NUMBER = 1;
				  /*
				  // This is just the reference for how I use the variable queryTermPostionIndexPairs_
				  // output that very important line for feature extraction
				  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
					  // output the following sample format
					  // oil:0 industry:1 history:2 u:3 s:4
					  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
				  }
				  */
				  float generalProbabilityOfThePosting = queryTermsTrueProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ] * probabilityGivenTheQueryTerms * BIG_NUMBER;
				  // computing probability logic ends.

	    		// checking the posting probability against the lowerBoundThreshold
	    		if (generalProbabilityOfThePosting <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 6){
	        	scores_set.postingScore6 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList6 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency6 = f_d_t;
	        	  // computing probability logic begins...
	    		  // checking the posting probability against the lowerBoundThreshold
				  float matrixMultiplicationScore = intercept_weight_0 * 1 +
						  	  	  	  	  	  	  	partialBM25_weight_1 * scores_set.postingScore6 +
						  	  	  	  	  	        // length_of_the_inverted_index_2 * sth +
						  	  	  	  	  	        // term_freq_in_collection_3 * sth +
						  	  	  	  	  	        term_freq_in_doc_4 * f_d_t +
						  	  	  	  	  	        doc_words_5 * doc_len +
						  	  	  	  	  	        // term_freq_in_queries_6 * sth +
						  	  	  	  	  	        // posting_rank_in_list_7 * sth +
						  	  	  	  	  	        // posting_rank_in_doc_8 * sth +
						  	  	  	  	  	        0.0;

				  float probabilityGivenTheQueryTerms = 1/(1 + exp( matrixMultiplicationScore ));

				  // The true probability is usually very small, so I need to time a big number.
				  // option1:
				  float BIG_NUMBER = 1000000;

				  // option2:
				  // float BIG_NUMBER = 1;
				  /*
				  // This is just the reference for how I use the variable queryTermPostionIndexPairs_
				  // output that very important line for feature extraction
				  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
					  // output the following sample format
					  // oil:0 industry:1 history:2 u:3 s:4
					  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
				  }
				  */
				  float generalProbabilityOfThePosting = queryTermsTrueProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ] * probabilityGivenTheQueryTerms * BIG_NUMBER;
				  // computing probability logic ends.

	    		// checking the posting probability against the lowerBoundThreshold
	    		if (generalProbabilityOfThePosting <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 7){
	        	scores_set.postingScore7 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList7 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency7 = f_d_t;
	        	  // computing probability logic begins...
	    		  // checking the posting probability against the lowerBoundThreshold
				  float matrixMultiplicationScore = intercept_weight_0 * 1 +
						  	  	  	  	  	  	  	partialBM25_weight_1 * scores_set.postingScore7 +
						  	  	  	  	  	        // length_of_the_inverted_index_2 * sth +
						  	  	  	  	  	        // term_freq_in_collection_3 * sth +
						  	  	  	  	  	        term_freq_in_doc_4 * f_d_t +
						  	  	  	  	  	        doc_words_5 * doc_len +
						  	  	  	  	  	        // term_freq_in_queries_6 * sth +
						  	  	  	  	  	        // posting_rank_in_list_7 * sth +
						  	  	  	  	  	        // posting_rank_in_doc_8 * sth +
						  	  	  	  	  	        0.0;

				  float probabilityGivenTheQueryTerms = 1/(1 + exp( matrixMultiplicationScore ));

				  // The true probability is usually very small, so I need to time a big number.
				  // option1:
				  float BIG_NUMBER = 1000000;

				  // option2:
				  // float BIG_NUMBER = 1;
				  /*
				  // This is just the reference for how I use the variable queryTermPostionIndexPairs_
				  // output that very important line for feature extraction
				  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
					  // output the following sample format
					  // oil:0 industry:1 history:2 u:3 s:4
					  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
				  }
				  */
				  float generalProbabilityOfThePosting = queryTermsTrueProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ] * probabilityGivenTheQueryTerms * BIG_NUMBER;
				  // computing probability logic ends.
	    		// checking the posting probability against the lowerBoundThreshold
	    		if (generalProbabilityOfThePosting <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 8){
	        	scores_set.postingScore8 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList8 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency8 = f_d_t;
	        	  // computing probability logic begins...
	    		  // checking the posting probability against the lowerBoundThreshold
				  float matrixMultiplicationScore = intercept_weight_0 * 1 +
						  	  	  	  	  	  	  	partialBM25_weight_1 * scores_set.postingScore8 +
						  	  	  	  	  	        // length_of_the_inverted_index_2 * sth +
						  	  	  	  	  	        // term_freq_in_collection_3 * sth +
						  	  	  	  	  	        term_freq_in_doc_4 * f_d_t +
						  	  	  	  	  	        doc_words_5 * doc_len +
						  	  	  	  	  	        // term_freq_in_queries_6 * sth +
						  	  	  	  	  	        // posting_rank_in_list_7 * sth +
						  	  	  	  	  	        // posting_rank_in_doc_8 * sth +
						  	  	  	  	  	        0.0;

				  float probabilityGivenTheQueryTerms = 1/(1 + exp( matrixMultiplicationScore ));

				  // The true probability is usually very small, so I need to time a big number.
				  // option1:
				  float BIG_NUMBER = 1000000;

				  // option2:
				  // float BIG_NUMBER = 1;
				  /*
				  // This is just the reference for how I use the variable queryTermPostionIndexPairs_
				  // output that very important line for feature extraction
				  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
					  // output the following sample format
					  // oil:0 industry:1 history:2 u:3 s:4
					  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
				  }
				  */
				  float generalProbabilityOfThePosting = queryTermsTrueProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ] * probabilityGivenTheQueryTerms * BIG_NUMBER;
				  // computing probability logic ends.
	    		// checking the posting probability against the lowerBoundThreshold
	    		if (generalProbabilityOfThePosting <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 9){
	        	scores_set.postingScore9 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList9 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency9 = f_d_t;
	        	  // computing probability logic begins...
	    		  // checking the posting probability against the lowerBoundThreshold
				  float matrixMultiplicationScore = intercept_weight_0 * 1 +
						  	  	  	  	  	  	  	partialBM25_weight_1 * scores_set.postingScore9 +
						  	  	  	  	  	        // length_of_the_inverted_index_2 * sth +
						  	  	  	  	  	        // term_freq_in_collection_3 * sth +
						  	  	  	  	  	        term_freq_in_doc_4 * f_d_t +
						  	  	  	  	  	        doc_words_5 * doc_len +
						  	  	  	  	  	        // term_freq_in_queries_6 * sth +
						  	  	  	  	  	        // posting_rank_in_list_7 * sth +
						  	  	  	  	  	        // posting_rank_in_doc_8 * sth +
						  	  	  	  	  	        0.0;

				  float probabilityGivenTheQueryTerms = 1/(1 + exp( matrixMultiplicationScore ));

				  // The true probability is usually very small, so I need to time a big number.
				  // option1:
				  float BIG_NUMBER = 1000000;

				  // option2:
				  // float BIG_NUMBER = 1;
				  /*
				  // This is just the reference for how I use the variable queryTermPostionIndexPairs_
				  // output that very important line for feature extraction
				  for(int tempCounter = 0; tempCounter < num_query_terms; tempCounter++){
					  // output the following sample format
					  // oil:0 industry:1 history:2 u:3 s:4
					  cout << queryTermPostionIndexPairs_[tempCounter].first << ":" << queryTermPostionIndexPairs_[tempCounter].second << " ";
				  }
				  */
				  float generalProbabilityOfThePosting = queryTermsTrueProbabilityDistributionMap_[ queryTermPostionIndexPairs_[i].first ] * probabilityGivenTheQueryTerms * BIG_NUMBER;
				  // computing probability logic ends.

	    		// checking the posting probability against the lowerBoundThreshold
	    		if (generalProbabilityOfThePosting <= universal_threshold_socre_of_posting_){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        bm25_sum += partial_bm25;
	      }

	      // cout << endl;

	      scores_set.totalScore = bm25_sum;

	      // ############################################################
	      // Notes:
	      // Updated 2013/01/11 by Wei
	      // The following line has special purpose: output ALL the documents with the partialBM25 score associated with the term
	      // Can directly output all the posting with scores from here.
	      // There are 3 columns:
	      // the 1st column is: trecID for gov2
	      // the 2ed column is: docID for polyIRToolkit interal representation
	      // the 3th column is: partialBM25 score for this posting
	      // option1:
	      // cout << index_reader_.document_map().GetDocumentNumber( did ) << " " << did << " " << scores_set.totalScore << endl;
	      // option2:
	      // cout << did << " " << scores_set.totalScore << endl;
	      // ############################################################

	      if (postingThresholdNotQualifiedFlag){
	      	// although I already have an document candidate here, but one or more of the postings are NOT qualified for the posting threshold.
	      	// So, here, the injection into the priority heap operation can be ignored.
	      	// In that case, there is no real logic here.
	      	++did;  // Search for next docID.
	      }
	      else{
	          // Use a heap to maintain the top-k documents. This has to be a min heap,
	    	  // where the lowest scoring document is on top, so that we can easily pop it,
	    	  // and push a higher scoring document if need be.
	    	  if (total_num_results < num_results) {
	    	    // We insert a document if we don't have k documents yet.
	    	    results[total_num_results] = make_pair(scores_set, did);
	    	    push_heap(results, results + total_num_results + 1, ResultCompare2());
	    	  }
	    	  else
	    	  {
	    	    if (scores_set.totalScore > results->first.totalScore)
	    	    {
	    		  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	    		  pop_heap(results, results + num_results, ResultCompare2());
	    		  results[num_results - 1].first = scores_set;
	    		  results[num_results - 1].second = did;
	    		  push_heap(results, results + num_results, ResultCompare2());
	    	    }
	    	  }
	          ++total_num_results;
	          ++did;  // Search for next docID.
	      }
	    }
	  }

	  // Sort top-k results in descending order by document score.
	  sort(results, results + min(num_results, total_num_results), ResultCompare2());

	  return total_num_results;
  }
  else if(computation_method == 2){
	  // NOTE: LEAVE this method untouched on 2014/01/25 night at school for a future add

	  // cout << "This is the alg to do the TCP" << endl;

	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  const float kBm25K1 =  2.0;  // k1
	  const float kBm25B = 0.75;   // b

	  // We can precompute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

	  // BM25 components.
	  float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection operation.
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in AND semantics.

	  // initialization of the SCORES_SET
	  SCORES_SET scores_set; //scores_set for the a specific document.
	  scores_set.totalScore = 0.0;
	  scores_set.doc_length = 0;

	  scores_set.postingScore0 = 0.0;
	  scores_set.postingScore1 = 0.0;
	  scores_set.postingScore2 = 0.0;
	  scores_set.postingScore3 = 0.0;
	  scores_set.postingScore4 = 0.0;
	  scores_set.postingScore5 = 0.0;
	  scores_set.postingScore6 = 0.0;
	  scores_set.postingScore7 = 0.0;
	  scores_set.postingScore8 = 0.0;
	  scores_set.postingScore9 = 0.0;

	  scores_set.lengthOfTheInvertedList0 = 0;
	  scores_set.lengthOfTheInvertedList1 = 0;
	  scores_set.lengthOfTheInvertedList2 = 0;
	  scores_set.lengthOfTheInvertedList3 = 0;
	  scores_set.lengthOfTheInvertedList4 = 0;
	  scores_set.lengthOfTheInvertedList5 = 0;
	  scores_set.lengthOfTheInvertedList6 = 0;
	  scores_set.lengthOfTheInvertedList7 = 0;
	  scores_set.lengthOfTheInvertedList8 = 0;
	  scores_set.lengthOfTheInvertedList9 = 0;

	  scores_set.postingTermFrequency0 = 0;
	  scores_set.postingTermFrequency1 = 0;
	  scores_set.postingTermFrequency2 = 0;
	  scores_set.postingTermFrequency3 = 0;
	  scores_set.postingTermFrequency4 = 0;
	  scores_set.postingTermFrequency5 = 0;
	  scores_set.postingTermFrequency6 = 0;
	  scores_set.postingTermFrequency7 = 0;
	  scores_set.postingTermFrequency8 = 0;
	  scores_set.postingTermFrequency9 = 0;

	  int doc_len;
	  uint32_t f_d_t;

	  uint32_t did = 0;
	  uint32_t d;


	  int i;  // Index for various loops.

	  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
	  float idf_t[num_lists];  // Using a variable length array here.

	  // updated by wei 2013/02/21
	  // lengthOfTheInvertedListForThisTerm is equal to the num of docs in the complete list
	  int lengthOfTheInvertedListForThisTerm[num_lists];
	  int num_docs_t;




	  for (i = 0; i < num_lists; ++i) {

		// If for the pruning project, then use the same overall statistics for all the pruned index.
		if(pruningProjectSwitch){
			num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
		}
		// If not for the pruning project, we can use what the index has.
		else{
			num_docs_t = lists[i]->num_docs_complete_list();
		}

		lengthOfTheInvertedListForThisTerm[i] = num_docs_t;
	    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
	  }


	  // Necessary for the merge lists.
	  // TODO: Can also try the heap based method here. Can select between heap and array method based on 'num_merge_lists'.
	  uint32_t min_doc_id;

	  while (did < ListData::kNoMoreDocs) {
	    if (merge_lists != NULL) { // For the lists which we are merging.
	      // This will select the lowest docID (ignoring duplicates among the merge lists and any docIDs we have skipped past through AND mode operation).
	      min_doc_id = ListData::kNoMoreDocs;
	      for (i = 0; i < num_merge_lists; ++i) {
	        if ((d = merge_lists[i]->NextGEQ(did)) < min_doc_id) {
	          min_doc_id = d;
	        }
	      }

	      assert(min_doc_id >= did);

	      did = min_doc_id;
	      i = 0;
	    } else {
	      // Get next element from shortest list.
	      // I guess the list in the lists varable has already been sorted.
	      did = lists[0]->NextGEQ(did);

	      i = 1;
	    }



	    if (did == ListData::kNoMoreDocs)
	      break;

	    d = did;

	    // Try to find entries with same docID in other lists.
	    for (; (i < num_lists) && ((d = lists[i]->NextGEQ(did)) == did); ++i) {
	      continue;
	    }

	    if (d > did)
	    {
	      // Not in intersection.
	      did = d;
	    }
	    else
	    {
	      assert(d == did);

	      /*
	      cout << did << " ";
	      */



	      // Compute BM25 score from frequencies.
	      bm25_sum = 0;
	      bool postingThresholdNotQualifiedFlag = false;
	      for (i = 0; i < num_lists; ++i) {
	        f_d_t = lists[i]->GetFreq();
	        doc_len = index_reader_.document_map().GetDocumentLength(did);
	        partial_bm25 = idf_t[i] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);

	        scores_set.doc_length = doc_len;

	        if(i == 0){
	        	scores_set.postingScore0 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList0 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency0 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore0 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_]){
	    			postingThresholdNotQualifiedFlag = true;
	    		}

	        }
	        else if(i == 1){
	        	scores_set.postingScore1 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList1 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency1 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore1 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_]){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 2){
	        	scores_set.postingScore2 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList2 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency2 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore2 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_]){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 3){
	        	scores_set.postingScore3 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList3 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency3 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore3 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_]){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 4){
	        	scores_set.postingScore4 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList4 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency4 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore4 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_]){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 5){
	        	scores_set.postingScore5 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList5 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency5 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore5 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_]){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 6){
	        	scores_set.postingScore6 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList6 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency6 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore6 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_]){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 7){
	        	scores_set.postingScore7 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList7 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency7 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore7 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_]){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 8){
	        	scores_set.postingScore8 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList8 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency8 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore8 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_]){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 9){
	        	scores_set.postingScore9 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList9 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency9 = f_d_t;

	    		// checking the postingScore against the lowerBoundThreshold
	    		if (scores_set.postingScore9 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_]){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        // even the score has been added to the bm25_sum and the score is high enough into the top-k
	        // the document will still NOT be appeared in the top-k results if any of the partial bm25 score for each posting
	        // has been below the threshold.
	        bm25_sum += partial_bm25;
	      }

	      // cout << endl;

	      scores_set.totalScore = bm25_sum;

	      // ############################################################
	      // Notes:
	      // Updated 2013/01/11 by Wei
	      // The following line has special purpose: output ALL the documents with the partialBM25 score associated with the term
	      // Can directly output all the posting with scores from here.
	      // There are 3 columns:
	      // the 1st column is: trecID for gov2
	      // the 2ed column is: docID for polyIRToolkit interal representation
	      // the 3th column is: partialBM25 score for this posting
	      // option1:
	      // cout << index_reader_.document_map().GetDocumentNumber( did ) << " " << did << " " << scores_set.totalScore << endl;
	      // option2:
	      // cout << did << " " << scores_set.totalScore << endl;
	      // ############################################################

	      if (postingThresholdNotQualifiedFlag){
	      	// although I already have an document candidate here, but one or more of the postings are NOT qualified for the posting threshold.
	      	// So, here, the injection into the priority heap operation can be ignored.
	      	// In that case, there is no real logic here.
	      	++did;  // Search for next docID.
	      }
	      else{
	          // Use a heap to maintain the top-k documents. This has to be a min heap,
	    	  // where the lowest scoring document is on top, so that we can easily pop it,
	    	  // and push a higher scoring document if need be.
	    	  if (total_num_results < num_results) {
	    	    // We insert a document if we don't have k documents yet.
	    	    results[total_num_results] = make_pair(scores_set, did);
	    	    push_heap(results, results + total_num_results + 1, ResultCompare2());
	    	  }
	    	  else
	    	  {
	    	    if (scores_set.totalScore > results->first.totalScore)
	    	    {
	    		  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	    		  pop_heap(results, results + num_results, ResultCompare2());
	    		  results[num_results - 1].first = scores_set;
	    		  results[num_results - 1].second = did;
	    		  push_heap(results, results + num_results, ResultCompare2());
	    	    }
	    	  }
	          ++total_num_results;
	          ++did;  // Search for next docID.
	      }
	    }
	  }

	  // cout << "total_num_of_postings:" << total_num_results << endl;

	  // Sort top-k results in descending order by document score.
	  sort(results, results + min(num_results, total_num_results), ResultCompare2());

	  return total_num_results;
  }
  else if(computation_method == 3){
	  cout << "This is the alg to do the DCP" << endl;
	  cout << "DCP NOT yet implemented. Sorry :)" << endl;
	  exit(1);
  }
  else if(computation_method == 4){
	  // cout << "This is the alg to do the TCP-QV" << endl;

	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  const float kBm25K1 =  2.0;  // k1
	  const float kBm25B = 0.75;   // b

	  // We can precompute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

	  // BM25 components.
	  float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection operation.
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in AND semantics.

	  // initialization of the SCORES_SET
	  SCORES_SET scores_set; //scores_set for the a specific document.
	  scores_set.totalScore = 0.0;
	  scores_set.doc_length = 0;

	  scores_set.postingScore0 = 0.0;
	  scores_set.postingScore1 = 0.0;
	  scores_set.postingScore2 = 0.0;
	  scores_set.postingScore3 = 0.0;
	  scores_set.postingScore4 = 0.0;
	  scores_set.postingScore5 = 0.0;
	  scores_set.postingScore6 = 0.0;
	  scores_set.postingScore7 = 0.0;
	  scores_set.postingScore8 = 0.0;
	  scores_set.postingScore9 = 0.0;

	  scores_set.lengthOfTheInvertedList0 = 0;
	  scores_set.lengthOfTheInvertedList1 = 0;
	  scores_set.lengthOfTheInvertedList2 = 0;
	  scores_set.lengthOfTheInvertedList3 = 0;
	  scores_set.lengthOfTheInvertedList4 = 0;
	  scores_set.lengthOfTheInvertedList5 = 0;
	  scores_set.lengthOfTheInvertedList6 = 0;
	  scores_set.lengthOfTheInvertedList7 = 0;
	  scores_set.lengthOfTheInvertedList8 = 0;
	  scores_set.lengthOfTheInvertedList9 = 0;

	  scores_set.postingTermFrequency0 = 0;
	  scores_set.postingTermFrequency1 = 0;
	  scores_set.postingTermFrequency2 = 0;
	  scores_set.postingTermFrequency3 = 0;
	  scores_set.postingTermFrequency4 = 0;
	  scores_set.postingTermFrequency5 = 0;
	  scores_set.postingTermFrequency6 = 0;
	  scores_set.postingTermFrequency7 = 0;
	  scores_set.postingTermFrequency8 = 0;
	  scores_set.postingTermFrequency9 = 0;

	  int doc_len;
	  uint32_t f_d_t;

	  uint32_t did = 0;
	  uint32_t d;


	  int i;  // Index for various loops.

	  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
	  float idf_t[num_lists];  // Using a variable length array here.

	  // updated by wei 2013/02/21
	  // lengthOfTheInvertedListForThisTerm is equal to the num of docs in the complete list
	  int lengthOfTheInvertedListForThisTerm[num_lists];
	  int num_docs_t;

	  for (i = 0; i < num_lists; ++i) {

		// If for the pruning project, then use the same overall statistics for all the pruned index.
		if(pruningProjectSwitch){
			num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
		}
		// If not for the pruning project, we can use what the index has.
		else{
			num_docs_t = lists[i]->num_docs_complete_list();
		}

		lengthOfTheInvertedListForThisTerm[i] = num_docs_t;
	    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
	  }


	  // Necessary for the merge lists.
	  // TODO: Can also try the heap based method here. Can select between heap and array method based on 'num_merge_lists'.
	  uint32_t min_doc_id;

	  while (did < ListData::kNoMoreDocs) {
	    if (merge_lists != NULL) { // For the lists which we are merging.
	      // This will select the lowest docID (ignoring duplicates among the merge lists and any docIDs we have skipped past through AND mode operation).
	      min_doc_id = ListData::kNoMoreDocs;
	      for (i = 0; i < num_merge_lists; ++i) {
	        if ((d = merge_lists[i]->NextGEQ(did)) < min_doc_id) {
	          min_doc_id = d;
	        }
	      }

	      assert(min_doc_id >= did);

	      did = min_doc_id;
	      i = 0;
	    } else {
	      // Get next element from shortest list.
	      // I guess the list in the lists varable has already been sorted.
	      did = lists[0]->NextGEQ(did);

	      i = 1;
	    }



	    if (did == ListData::kNoMoreDocs)
	      break;

	    d = did;

	    // Try to find entries with same docID in other lists.
	    for (; (i < num_lists) && ((d = lists[i]->NextGEQ(did)) == did); ++i) {
	      continue;
	    }

	    if (d > did)
	    {
	      // Not in intersection.
	      did = d;
	    }
	    else
	    {
	      assert(d == did);

	      /*
	      cout << did << " ";
	      */
	      // Compute BM25 score from frequencies.
	      bm25_sum = 0;
	      bool postingThresholdNotQualifiedFlag = false;
	      for (i = 0; i < num_lists; ++i) {
	        f_d_t = lists[i]->GetFreq();
	        doc_len = index_reader_.document_map().GetDocumentLength(did);
	        partial_bm25 = idf_t[i] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);

	        scores_set.doc_length = doc_len;

	        if(i == 0){
	        	scores_set.postingScore0 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList0 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency0 = f_d_t;
	        	// small example of how to use the data structure queryView
	        	/*
	        	if ( queryView_["yellow"].count("1538296") > 0 ){
	        		cout << "yellow in the docID: 1538296" << endl;
	        	}
	        	else{
	        		cout << "yellow NOT in the docID: 1538296" << endl;
	        	}
	        	*/

	    		// checking the postingScore against threshold for this term at this percentage kept
	    		if (scores_set.postingScore0 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_] and queryView_[queryTermPostionIndexPairs_[i].first].count(did) <= 0){
	    			postingThresholdNotQualifiedFlag = true;
	    		}

	        }
	        else if(i == 1){
	        	scores_set.postingScore1 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList1 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency1 = f_d_t;

	    		// checking the postingScore against threshold for this term at this percentage kept
	    		if (scores_set.postingScore1 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_] and queryView_[queryTermPostionIndexPairs_[i].first].count(did) <= 0){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 2){
	        	scores_set.postingScore2 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList2 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency2 = f_d_t;

	    		// checking the postingScore against threshold for this term at this percentage kept
	    		if (scores_set.postingScore2 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_] and queryView_[queryTermPostionIndexPairs_[i].first].count(did) <= 0){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 3){
	        	scores_set.postingScore3 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList3 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency3 = f_d_t;

	    		// checking the postingScore against threshold for this term at this percentage kept
	    		if (scores_set.postingScore3 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_] and queryView_[queryTermPostionIndexPairs_[i].first].count(did) <= 0){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 4){
	        	scores_set.postingScore4 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList4 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency4 = f_d_t;

	    		// checking the postingScore against threshold for this term at this percentage kept
	    		if (scores_set.postingScore4 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_] and queryView_[queryTermPostionIndexPairs_[i].first].count(did) <= 0){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 5){
	        	scores_set.postingScore5 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList5 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency5 = f_d_t;

	    		// checking the postingScore against threshold for this term at this percentage kept
	    		if (scores_set.postingScore5 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_] and queryView_[queryTermPostionIndexPairs_[i].first].count(did) <= 0){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 6){
	        	scores_set.postingScore6 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList6 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency6 = f_d_t;

	    		// checking the postingScore against threshold for this term at this percentage kept
	    		if (scores_set.postingScore6 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_] and queryView_[queryTermPostionIndexPairs_[i].first].count(did) <= 0){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 7){
	        	scores_set.postingScore7 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList7 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency7 = f_d_t;

	    		// checking the postingScore against threshold for this term at this percentage kept
	    		if (scores_set.postingScore7 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_] and queryView_[queryTermPostionIndexPairs_[i].first].count(did) <= 0){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 8){
	        	scores_set.postingScore8 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList8 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency8 = f_d_t;

	    		// checking the postingScore against threshold for this term at this percentage kept
	    		if (scores_set.postingScore8 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_] and queryView_[queryTermPostionIndexPairs_[i].first].count(did) <= 0){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        else if(i == 9){
	        	scores_set.postingScore9 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList9 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency9 = f_d_t;

	    		// checking the postingScore against threshold for this term at this percentage kept
	    		if (scores_set.postingScore9 <= term_thresholds_based_on_percentage_[ queryTermPostionIndexPairs_[i].first ][indexPositionOfCorrespondingThresholdToUse_] and queryView_[queryTermPostionIndexPairs_[i].first].count(did) <= 0){
	    			postingThresholdNotQualifiedFlag = true;
	    		}
	        }
	        // even the score has been added to the bm25_sum and the score is high enough into the top-k
	        // the document will still NOT be appeared in the top-k results if any of the partial bm25 score for each posting
	        // has been below the threshold.
	        bm25_sum += partial_bm25;
	      }

	      // cout << endl;

	      scores_set.totalScore = bm25_sum;

	      // ############################################################
	      // Notes:
	      // Updated 2013/01/11 by Wei
	      // The following line has special purpose: output ALL the documents with the partialBM25 score associated with the term
	      // Can directly output all the posting with scores from here.
	      // There are 3 columns:
	      // the 1st column is: trecID for gov2
	      // the 2ed column is: docID for polyIRToolkit interal representation
	      // the 3th column is: partialBM25 score for this posting
	      // option1:
	      // cout << index_reader_.document_map().GetDocumentNumber( did ) << " " << did << " " << scores_set.totalScore << endl;
	      // option2:
	      // cout << did << " " << scores_set.totalScore << endl;
	      // ############################################################

	      if (postingThresholdNotQualifiedFlag){
	      	// although I already have an document candidate here, but one or more of the postings are NOT qualified for the posting threshold.
	      	// So, here, the injection into the priority heap operation can be ignored.
	      	// In that case, there is no real logic here.
	      	++did;  // Search for next docID.
	      }
	      else{
	          // Use a heap to maintain the top-k documents. This has to be a min heap,
	    	  // where the lowest scoring document is on top, so that we can easily pop it,
	    	  // and push a higher scoring document if need be.
	    	  if (total_num_results < num_results) {
	    	    // We insert a document if we don't have k documents yet.
	    	    results[total_num_results] = make_pair(scores_set, did);
	    	    push_heap(results, results + total_num_results + 1, ResultCompare2());
	    	  }
	    	  else
	    	  {
	    	    if (scores_set.totalScore > results->first.totalScore)
	    	    {
	    		  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	    		  pop_heap(results, results + num_results, ResultCompare2());
	    		  results[num_results - 1].first = scores_set;
	    		  results[num_results - 1].second = did;
	    		  push_heap(results, results + num_results, ResultCompare2());
	    	    }
	    	  }
	          ++total_num_results;
	          ++did;  // Search for next docID.
	      }
	    }
	  }

	  // cout << "total_num_of_postings:" << total_num_results << endl;

	  // Sort top-k results in descending order by document score.
	  sort(results, results + min(num_results, total_num_results), ResultCompare2());

	  return total_num_results;
  }
  else if(computation_method == 5){
	  cout << "This is the alg to do the DCP-QV" << endl;
	  cout << "DCP-QV NOT yet implemented. Sorry :)" << endl;
	  exit(1);
  }
  else if(computation_method == 6){
	  cout << "This is the alg to do the ORIGINAL logic following Roman's practice" << endl;

	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  const float kBm25K1 =  2.0;  // k1
	  const float kBm25B = 0.75;   // b

	  // Pre compute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

	  // BM25 components.
	  float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection operation.
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in AND semantics.
	  float valueOfPartialBM25ScoreComponentPart1_IDF;	// part1 is the idf_t[i] which has been computed completely
	  float valueOfPartialBM25ScoreComponentPart2_TF;	      // (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len)

	  // initialization of the SCORES_SET
	  SCORES_SET scores_set; //scores_set for the a specific document.
	  scores_set.totalScore = 0.0;
	  scores_set.doc_length = 0;

	  // init variables
	  scores_set.posting0RankInList = 0;
	  scores_set.posting1RankInList = 0;
	  scores_set.posting2RankInList = 0;
	  scores_set.posting3RankInList = 0;
	  scores_set.posting4RankInList = 0;
	  scores_set.posting5RankInList = 0;
	  scores_set.posting6RankInList = 0;
	  scores_set.posting7RankInList = 0;
	  scores_set.posting8RankInList = 0;
	  scores_set.posting9RankInList = 0;

	  scores_set.postingThreeFactorProbabilities0 = 0.0;
	  scores_set.postingThreeFactorProbabilities1 = 0.0;
	  scores_set.postingThreeFactorProbabilities2 = 0.0;
	  scores_set.postingThreeFactorProbabilities3 = 0.0;
	  scores_set.postingThreeFactorProbabilities4 = 0.0;
	  scores_set.postingThreeFactorProbabilities5 = 0.0;
	  scores_set.postingThreeFactorProbabilities6 = 0.0;
	  scores_set.postingThreeFactorProbabilities7 = 0.0;
	  scores_set.postingThreeFactorProbabilities8 = 0.0;
	  scores_set.postingThreeFactorProbabilities9 = 0.0;

	  scores_set.posting0ScoreComponentPart1 = 0.0;
	  scores_set.posting1ScoreComponentPart1 = 0.0;
	  scores_set.posting2ScoreComponentPart1 = 0.0;
	  scores_set.posting3ScoreComponentPart1 = 0.0;
	  scores_set.posting4ScoreComponentPart1 = 0.0;
	  scores_set.posting5ScoreComponentPart1 = 0.0;
	  scores_set.posting6ScoreComponentPart1 = 0.0;
	  scores_set.posting7ScoreComponentPart1 = 0.0;
	  scores_set.posting8ScoreComponentPart1 = 0.0;
	  scores_set.posting9ScoreComponentPart1 = 0.0;

	  scores_set.posting0ScoreComponentPart2 = 0.0;
	  scores_set.posting1ScoreComponentPart2 = 0.0;
	  scores_set.posting2ScoreComponentPart2 = 0.0;
	  scores_set.posting3ScoreComponentPart2 = 0.0;
	  scores_set.posting4ScoreComponentPart2 = 0.0;
	  scores_set.posting5ScoreComponentPart2 = 0.0;
	  scores_set.posting6ScoreComponentPart2 = 0.0;
	  scores_set.posting7ScoreComponentPart2 = 0.0;
	  scores_set.posting8ScoreComponentPart2 = 0.0;
	  scores_set.posting9ScoreComponentPart2 = 0.0;


	  scores_set.postingScore0 = 0.0;
	  scores_set.postingScore1 = 0.0;
	  scores_set.postingScore2 = 0.0;
	  scores_set.postingScore3 = 0.0;
	  scores_set.postingScore4 = 0.0;
	  scores_set.postingScore5 = 0.0;
	  scores_set.postingScore6 = 0.0;
	  scores_set.postingScore7 = 0.0;
	  scores_set.postingScore8 = 0.0;
	  scores_set.postingScore9 = 0.0;

	  scores_set.lengthOfTheInvertedList0 = 0;
	  scores_set.lengthOfTheInvertedList1 = 0;
	  scores_set.lengthOfTheInvertedList2 = 0;
	  scores_set.lengthOfTheInvertedList3 = 0;
	  scores_set.lengthOfTheInvertedList4 = 0;
	  scores_set.lengthOfTheInvertedList5 = 0;
	  scores_set.lengthOfTheInvertedList6 = 0;
	  scores_set.lengthOfTheInvertedList7 = 0;
	  scores_set.lengthOfTheInvertedList8 = 0;
	  scores_set.lengthOfTheInvertedList9 = 0;

	  scores_set.postingTermFrequency0 = 0;
	  scores_set.postingTermFrequency1 = 0;
	  scores_set.postingTermFrequency2 = 0;
	  scores_set.postingTermFrequency3 = 0;
	  scores_set.postingTermFrequency4 = 0;
	  scores_set.postingTermFrequency5 = 0;
	  scores_set.postingTermFrequency6 = 0;
	  scores_set.postingTermFrequency7 = 0;
	  scores_set.postingTermFrequency8 = 0;
	  scores_set.postingTermFrequency9 = 0;

	  int doc_len;
	  uint32_t f_d_t;

	  uint32_t did = 0;
	  uint32_t d;


	  int i;  // Index for various loops.

	  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
	  float idf_t[num_lists];  // Using a variable length array here.

	  // updated by wei 2013/02/21
	  // lengthOfTheInvertedListForThisTerm is equal to the num of docs in the complete list
	  int lengthOfTheInvertedListForThisTerm[num_lists];
	  int num_docs_t;

	  for (i = 0; i < num_lists; ++i) {

		// If for the pruning project, then use the same overall statistics for all the pruned index.
		if(pruningProjectSwitch){
			num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
		}
		// If not for the pruning project, we can use what the index has.
		else{
			num_docs_t = lists[i]->num_docs_complete_list();
		}

		lengthOfTheInvertedListForThisTerm[i] = num_docs_t;
	    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
	  }


	  // Necessary for the merge lists.
	  // TODO: Can also try the heap based method here. Can select between heap and array method based on 'num_merge_lists'.
	  uint32_t min_doc_id;

	  while (did < ListData::kNoMoreDocs) {
	    if (merge_lists != NULL) { // For the lists which we are merging.
	      // This will select the lowest docID (ignoring duplicates among the merge lists and any docIDs we have skipped past through AND mode operation).
	      min_doc_id = ListData::kNoMoreDocs;
	      for (i = 0; i < num_merge_lists; ++i) {
	        if ((d = merge_lists[i]->NextGEQ(did)) < min_doc_id) {
	          min_doc_id = d;
	        }
	      }

	      assert(min_doc_id >= did);

	      did = min_doc_id;
	      i = 0;
	    } else {
	      // Get next element from shortest list.
	      // I guess the list in the lists varable has already been sorted.
	      did = lists[0]->NextGEQ(did);

	      i = 1;
	    }



	    if (did == ListData::kNoMoreDocs)
	      break;

	    d = did;

	    // Try to find entries with same docID in other lists.
	    for (; (i < num_lists) && ((d = lists[i]->NextGEQ(did)) == did); ++i) {
	      continue;
	    }

	    if (d > did)
	    {
	      // Not in intersection.
	      did = d;
	    }
	    else
	    {
	      assert(d == did);
	      // cout << did << " ";
	      // Compute BM25 score from frequencies.
	      bm25_sum = 0;
	      for (i = 0; i < num_lists; ++i) {
	        f_d_t = lists[i]->GetFreq();
	        doc_len = index_reader_.document_map().GetDocumentLength(did);
	        valueOfPartialBM25ScoreComponentPart1_IDF = idf_t[i];
	        valueOfPartialBM25ScoreComponentPart2_TF = (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
	        partial_bm25 = valueOfPartialBM25ScoreComponentPart1_IDF * valueOfPartialBM25ScoreComponentPart2_TF;
	        bm25_sum += partial_bm25;
	      }
	      scores_set.totalScore = bm25_sum;
		  // Use a heap to maintain the top-k documents. This has to be a min heap,
		  // where the lowest scoring document is on top, so that we can easily pop it,
		  // and push a higher scoring document if need be.
		  if (total_num_results < num_results) {
			// We insert a document if we don't have k documents yet.
			results[total_num_results] = make_pair(scores_set, did);
			push_heap(results, results + total_num_results + 1, ResultCompare2());
		  }
		  else
		  {
			if (scores_set.totalScore > results->first.totalScore)
			{
			  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
			  pop_heap(results, results + num_results, ResultCompare2());
			  results[num_results - 1].first = scores_set;
			  results[num_results - 1].second = did;
			  push_heap(results, results + num_results, ResultCompare2());
			}
		  }
		  ++total_num_results;
		  ++did;  // Search for next docID.
	    }
	  }

	  // Sort top-k results in descending order by document score.
	  sort(results, results + min(num_results, total_num_results), ResultCompare2());
	  return total_num_results;
  }
  else if(computation_method == 7){
	  // cout << "simplified 3 factor probability formula logic implemented by Wei on 2013/08/31 afternoon at school" << endl;
	  // cout << "simplified 3 factor probability formula logic implemented by Wei on 2013/12/10 afternoon at school" << endl;
	  // Note: extract the rank in the list from the external index and print those numbers to the screen
	  cout << "simplified 3 factor probability formula logic implemented by Wei on 2014/03/30 night at school" << endl;

	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  const float kBm25K1 =  2.0;  // k1
	  const float kBm25B = 0.75;   // b

	  // option3 setting
	  // const float kBm25K1 =  2.0;  // k1
	  // const float kBm25B = 0.5;   // b

	  // Pre compute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

	  // BM25 components.
	  float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection operation.
	  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in AND semantics.
	  float valueOfPartialBM25ScoreComponentPart1_IDF;	// part1 is the idf_t[i] which has been computed completely
	  float valueOfPartialBM25ScoreComponentPart2_TF;	      // (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len)

	  // initialization of the SCORES_SET
	  SCORES_SET scores_set; //scores_set for the a specific document.
	  scores_set.totalScore = 0.0;
	  scores_set.doc_length = 0;
	  scores_set.docCandidateQualifyStatus = true;
	  scores_set.posting0QualifyStatus = true;
	  scores_set.posting1QualifyStatus = true;
	  scores_set.posting2QualifyStatus = true;
	  scores_set.posting3QualifyStatus = true;
	  scores_set.posting4QualifyStatus = true;
	  scores_set.posting5QualifyStatus = true;
	  scores_set.posting6QualifyStatus = true;
	  scores_set.posting7QualifyStatus = true;
	  scores_set.posting8QualifyStatus = true;
	  scores_set.posting9QualifyStatus = true;

	  scores_set.posting0RankInList = 0;
	  scores_set.posting1RankInList = 0;
	  scores_set.posting2RankInList = 0;
	  scores_set.posting3RankInList = 0;
	  scores_set.posting4RankInList = 0;
	  scores_set.posting5RankInList = 0;
	  scores_set.posting6RankInList = 0;
	  scores_set.posting7RankInList = 0;
	  scores_set.posting8RankInList = 0;
	  scores_set.posting9RankInList = 0;

	  scores_set.postingFirstProbabilities0 = 0.0;
	  scores_set.postingFirstProbabilities1 = 0.0;
	  scores_set.postingFirstProbabilities2 = 0.0;
	  scores_set.postingFirstProbabilities3 = 0.0;
	  scores_set.postingFirstProbabilities4 = 0.0;
	  scores_set.postingFirstProbabilities5 = 0.0;
	  scores_set.postingFirstProbabilities6 = 0.0;
	  scores_set.postingFirstProbabilities7 = 0.0;
	  scores_set.postingFirstProbabilities8 = 0.0;
	  scores_set.postingFirstProbabilities9 = 0.0;

	  scores_set.postingSecondANDThirdProbabilities0 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities1 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities2 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities3 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities4 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities5 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities6 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities7 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities8 = 0.0;
	  scores_set.postingSecondANDThirdProbabilities9 = 0.0;


	  scores_set.postingThreeFactorProbabilities0 = 0.0;
	  scores_set.postingThreeFactorProbabilities1 = 0.0;
	  scores_set.postingThreeFactorProbabilities2 = 0.0;
	  scores_set.postingThreeFactorProbabilities3 = 0.0;
	  scores_set.postingThreeFactorProbabilities4 = 0.0;
	  scores_set.postingThreeFactorProbabilities5 = 0.0;
	  scores_set.postingThreeFactorProbabilities6 = 0.0;
	  scores_set.postingThreeFactorProbabilities7 = 0.0;
	  scores_set.postingThreeFactorProbabilities8 = 0.0;
	  scores_set.postingThreeFactorProbabilities9 = 0.0;

	  scores_set.posting0ScoreComponentPart1 = 0.0;
	  scores_set.posting1ScoreComponentPart1 = 0.0;
	  scores_set.posting2ScoreComponentPart1 = 0.0;
	  scores_set.posting3ScoreComponentPart1 = 0.0;
	  scores_set.posting4ScoreComponentPart1 = 0.0;
	  scores_set.posting5ScoreComponentPart1 = 0.0;
	  scores_set.posting6ScoreComponentPart1 = 0.0;
	  scores_set.posting7ScoreComponentPart1 = 0.0;
	  scores_set.posting8ScoreComponentPart1 = 0.0;
	  scores_set.posting9ScoreComponentPart1 = 0.0;

	  scores_set.posting0ScoreComponentPart2 = 0.0;
	  scores_set.posting1ScoreComponentPart2 = 0.0;
	  scores_set.posting2ScoreComponentPart2 = 0.0;
	  scores_set.posting3ScoreComponentPart2 = 0.0;
	  scores_set.posting4ScoreComponentPart2 = 0.0;
	  scores_set.posting5ScoreComponentPart2 = 0.0;
	  scores_set.posting6ScoreComponentPart2 = 0.0;
	  scores_set.posting7ScoreComponentPart2 = 0.0;
	  scores_set.posting8ScoreComponentPart2 = 0.0;
	  scores_set.posting9ScoreComponentPart2 = 0.0;


	  scores_set.postingScore0 = 0.0;
	  scores_set.postingScore1 = 0.0;
	  scores_set.postingScore2 = 0.0;
	  scores_set.postingScore3 = 0.0;
	  scores_set.postingScore4 = 0.0;
	  scores_set.postingScore5 = 0.0;
	  scores_set.postingScore6 = 0.0;
	  scores_set.postingScore7 = 0.0;
	  scores_set.postingScore8 = 0.0;
	  scores_set.postingScore9 = 0.0;

	  scores_set.lengthOfTheInvertedList0 = 0;
	  scores_set.lengthOfTheInvertedList1 = 0;
	  scores_set.lengthOfTheInvertedList2 = 0;
	  scores_set.lengthOfTheInvertedList3 = 0;
	  scores_set.lengthOfTheInvertedList4 = 0;
	  scores_set.lengthOfTheInvertedList5 = 0;
	  scores_set.lengthOfTheInvertedList6 = 0;
	  scores_set.lengthOfTheInvertedList7 = 0;
	  scores_set.lengthOfTheInvertedList8 = 0;
	  scores_set.lengthOfTheInvertedList9 = 0;

	  scores_set.postingTermFrequency0 = 0;
	  scores_set.postingTermFrequency1 = 0;
	  scores_set.postingTermFrequency2 = 0;
	  scores_set.postingTermFrequency3 = 0;
	  scores_set.postingTermFrequency4 = 0;
	  scores_set.postingTermFrequency5 = 0;
	  scores_set.postingTermFrequency6 = 0;
	  scores_set.postingTermFrequency7 = 0;
	  scores_set.postingTermFrequency8 = 0;
	  scores_set.postingTermFrequency9 = 0;

	  int doc_len;
	  uint32_t f_d_t;

	  uint32_t did = 0;
	  uint32_t d;


	  int i;  // Index for various loops.

	  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
	  float idf_t[num_lists];  // Using a variable length array here.

	  // updated by wei 2013/02/21
	  // lengthOfTheInvertedListForThisTerm is equal to the num of docs in the complete list
	  int lengthOfTheInvertedListForThisTerm[num_lists];
	  int num_docs_t;

	  for (i = 0; i < num_lists; ++i) {

		// If for the pruning project, then use the same overall statistics for all the pruned index.
		if(pruningProjectSwitch){
			num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
		}
		// If not for the pruning project, we can use what the index has.
		else{
			num_docs_t = lists[i]->num_docs_complete_list();
		}

		// Updated by Wei on 2014/01/04 night at school
		// short cut for the query term: zuma
		// num_docs_t = 566;
		lengthOfTheInvertedListForThisTerm[i] = num_docs_t;
	    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
	  }

	  // Necessary for the merge lists.
	  // TODO: Can also try the heap based method here. Can select between heap and array method based on 'num_merge_lists'.
	  uint32_t min_doc_id;

	  // Note: added by Wei 2014/01/01 night at school
	  float external_scores[num_lists];

	  while (did < ListData::kNoMoreDocs) {

		// Note: added by Wei 2014/01/01 night at school
		for(int tempCounter = 0; tempCounter < num_lists; tempCounter++){
			  external_scores[tempCounter] = 0.0;
		}

		if (merge_lists != NULL) { // For the lists which we are merging.
	      // This will select the lowest docID (ignoring duplicates among the merge lists and any docIDs we have skipped past through AND mode operation).
	      min_doc_id = ListData::kNoMoreDocs;
	      for (i = 0; i < num_merge_lists; ++i) {
	        // CURRENT version since 2014/01/01 night at school
	    	if ((d = merge_lists[i]->NextGEQRomanRead(did,external_scores[i],false)) < min_doc_id) {
	    	// OLD version
	    	// if ((d = merge_lists[i]->NextGEQ(did)) < min_doc_id) {
	          min_doc_id = d;
	        }
	      }

	      assert(min_doc_id >= did);

	      did = min_doc_id;
	      i = 0;
	    } else {
	      // Get next element from shortest list.
	      // I guess the list in the lists varable has already been sorted.
	      // CURRENT version since 2014/01/01 night at school
	      did = lists[0]->NextGEQRomanRead(did,external_scores[0],false);
	      // OLD version
	      // did = lists[0]->NextGEQ(did);

	      i = 1;
	    }



	    if (did == ListData::kNoMoreDocs)
	      break;

	    d = did;

	    // Try to find entries with same docID in other lists.
	    // CURRENT version since 2014/01/01 night at school
	    for (; (i < num_lists) && (( d = lists[i]->NextGEQRomanRead(did,external_scores[i],false) ) == did); ++i) {
	      continue;
	    }
	    // OLD version
	    // for (; (i < num_lists) && ((d = lists[i]->NextGEQ(did)) == did); ++i) {
	    //  continue;
	    // }


	    if (d > did)
	    {
	      // Not in intersection.
	      did = d;
	    }
	    else
	    {
	      // for a document which is in the intersection
	      // init some variables
	  	  scores_set.docCandidateQualifyStatus = true;
	  	  scores_set.posting0QualifyStatus = true;
	  	  scores_set.posting1QualifyStatus = true;
	  	  scores_set.posting2QualifyStatus = true;
	  	  scores_set.posting3QualifyStatus = true;
	  	  scores_set.posting4QualifyStatus = true;
	  	  scores_set.posting5QualifyStatus = true;
	  	  scores_set.posting6QualifyStatus = true;
	  	  scores_set.posting7QualifyStatus = true;
	  	  scores_set.posting8QualifyStatus = true;
	  	  scores_set.posting9QualifyStatus = true;

	      assert(d == did);
	      bm25_sum = 0;
	      for (i = 0; i < num_lists; ++i) {
	        f_d_t = lists[i]->GetFreq();
	        doc_len = index_reader_.document_map().GetDocumentLength(did);
	        valueOfPartialBM25ScoreComponentPart1_IDF = idf_t[i];
	        valueOfPartialBM25ScoreComponentPart2_TF = (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
	        partial_bm25 = valueOfPartialBM25ScoreComponentPart1_IDF * valueOfPartialBM25ScoreComponentPart2_TF;

	        /*
	        // debug ONLY for the BM25 score correctness
	        cout << "Debug begins..." << endl;
	        cout << "partial_bm25:" << partial_bm25 << endl;
	        cout << "idf_t[i](BM25ScorePart1 IDF component):" << idf_t[i] << endl;
	        cout << "bm25ScoreComponentPart2(BM25ScorePart2 TF component):" << bm25ScoreComponentPart2 << endl;
	        // cout << "f_d_t:" << f_d_t << endl;
	        // cout << "kBm25NumeratorMul:" << kBm25NumeratorMul << endl;
	        // cout << "kBm25DenominatorAdd:" << kBm25DenominatorAdd << endl;
	        // cout << "kBm25DenominatorDocLenMul:" << kBm25DenominatorDocLenMul << endl;
	        // cout << "doc_len:" << doc_len << endl;
	        cout << "Debug ends." << endl;
	        cout << endl;
	        */

	        scores_set.doc_length = doc_len;

	        if(i == 0){
	        	  scores_set.posting0RankInList = external_scores[i];
	        	  scores_set.posting0ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	  scores_set.posting0ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	  scores_set.postingScore0 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList0 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency0 = f_d_t;
	        }
	        else if(i == 1){
	        	  scores_set.posting1RankInList = external_scores[i];
	        	  scores_set.posting1ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	  scores_set.posting1ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	  scores_set.postingScore1 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList1 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency1 = f_d_t;
	        }
	        else if(i == 2){
	        	  scores_set.posting2RankInList = external_scores[i];
	        	  scores_set.posting2ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	  scores_set.posting2ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	  scores_set.postingScore2 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList2 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency2 = f_d_t;
	        }
	        else if(i == 3){
	        	  scores_set.posting3RankInList = external_scores[i];
	        	  scores_set.posting3ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	  scores_set.posting3ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	  scores_set.postingScore3 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList3 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency3 = f_d_t;
	        }
	        else if(i == 4){
	        	  scores_set.posting4RankInList = external_scores[i];
	        	  scores_set.posting4ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	  scores_set.posting4ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	  scores_set.postingScore4 = partial_bm25;
	        	  scores_set.lengthOfTheInvertedList4 = lengthOfTheInvertedListForThisTerm[i];
	        	  scores_set.postingTermFrequency4 = f_d_t;
	        }
	        else if(i == 5){
	        	scores_set.posting5RankInList = external_scores[i];
	        	scores_set.posting5ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	scores_set.posting5ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	scores_set.postingScore5 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList5 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency5 = f_d_t;
	        }
	        else if(i == 6){
	        	scores_set.posting6RankInList = external_scores[i];
	        	scores_set.posting6ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	scores_set.posting6ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	scores_set.postingScore6 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList6 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency6 = f_d_t;
	        }
	        else if(i == 7){
	        	scores_set.posting7RankInList = external_scores[i];
	        	scores_set.posting7ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	scores_set.posting7ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	scores_set.postingScore7 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList7 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency7 = f_d_t;
	        }
	        else if(i == 8){
	        	scores_set.posting8RankInList = external_scores[i];
	        	scores_set.posting8ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	scores_set.posting8ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	scores_set.postingScore8 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList8 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency8 = f_d_t;
	        }
	        else if(i == 9){
	        	scores_set.posting9RankInList = external_scores[i];
	        	scores_set.posting9ScoreComponentPart1 = valueOfPartialBM25ScoreComponentPart1_IDF;
	        	scores_set.posting9ScoreComponentPart2 = valueOfPartialBM25ScoreComponentPart2_TF;
	        	scores_set.postingScore9 = partial_bm25;
	        	scores_set.lengthOfTheInvertedList9 = lengthOfTheInvertedListForThisTerm[i];
	        	scores_set.postingTermFrequency9 = f_d_t;
	        }

	        // Updated by Wei 2013/08/31 afternoon by Wei at school
	        // init some variables
	        double first_factor_probability_value = 0.0;
			float pieceProbability = 0.0;
			long numOfPostingsInThatPiece = 0;
	        double factor_2_3_combined_probability_value = 0.0;
	        double final_3_factors_probability_combined_value = 0.0;
	        double final_posting_probability_used_for_judgement = 0.0;

	        // (1) compute P(t)
	        string curr_look_up_term = queryTermPostionIndexPairs_[i].first;
            if (terms_with_corresponding_species_belonging_to_map_.count(curr_look_up_term) > 0 ){
            	first_factor_probability_value = freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[curr_look_up_term] ];
            }
            else{
            	first_factor_probability_value = freq_first_factor_probability_map_[0];
            }

	        // (2) compute P(unknown) for the second and third factor
            // step2/2: compute the combination of the second and third factor P(unknown) called as second_AND_third_factor_combination_probability_value

		    float valueOfPartialBM25Score = partial_bm25;
		    float valueOfTermWithLengthOfTheList = lengthOfTheInvertedListForThisTerm[i];
		    float compareRelRank = 0.0;

		    assert(!isnan(valueOfPartialBM25Score));
		    assert(!isnan(valueOfTermWithLengthOfTheList));

			// The logic of computing the variable called: second_AND_third_factor_combination_probability_value
			string part1InStringFormat = "";
			string part2InStringFormat = "";

			int currentClassLabelOfListLengthInIntFormat = -1;
			int previousClassLabelOfListLengthInIntFormat = -1;
			unsigned int previousClassUpperBound = 0;
			int currentPieceID = -1;
			int currentClassLabelOfImpactScoresInIntFormat = -1;
			int previousClassLabelOfImpactScoresInIntFormat = -1;
			unsigned int currentClassLabelBasedOnRelRankInIntFormat = -1;
			string probabilityAccessKeyInStringFormat = "";

			// for debug
			// cout << "class_label_with_lower_bounds_map_.size(): " << class_label_with_lower_bounds_map_.size() << endl;
			// cout << "class_label_with_probability_of_2D_ranges_map_.size(): " << class_label_with_probability_of_2D_ranges_map_.size() << endl;
			// cout << "class_label_with_lower_bounds_of_impact_scores_map_.size(): " << class_label_with_lower_bounds_of_impact_scores_map_.size() << endl;
			// cout << "class_label_with_lower_bounds_of_list_length_map_.size(): " << class_label_with_lower_bounds_of_list_length_map_.size() << endl;
			// cout << "valueOfListOfLengthForThisTerm: " << valueOfTermWithLengthOfTheList << endl;
			// cout << "valueOfPartialBM25Score: " << valueOfPartialBM25Score << endl;

			/*
			// Used mainly on 2014Jan (believe to be CORRECT)
		    /////////////////////////////////////////////////////optimized piece approach Begins.../////////////////////////////////////////////////////////
		    // newly added on 2014/01/16 night by Wei at school
		    previousClassLabelOfListLengthInIntFormat = term_with_their_belonging_class_map_[ queryTermPostionIndexPairs_[i].first ];
		    // cout << "--->previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;

		    uint32_t currentClassUpperBound = 0;
		    uint32_t currentRankInList = external_scores[i];
		    for (currentPieceID = term_with_piece_info_map_[ queryTermPostionIndexPairs_[i].first ].size() - 1; currentPieceID >= 0; currentPieceID--) {
				  currentClassUpperBound += term_with_piece_info_map_[ queryTermPostionIndexPairs_[i].first ][currentPieceID];
				  // cout << currentPieceID << " " << currentClassUpperBound << endl;
				  if (currentClassUpperBound >= currentRankInList){
					  // cout << "should break" << endl;
					  break;
				  }
		    }
		    // at this point, the currentPieceID should be the value of that we want
		    numOfPostingsInThatPiece = term_with_piece_info_map_[ queryTermPostionIndexPairs_[i].first ][currentPieceID];
		    stringstream ss1;
		    ss1 << previousClassLabelOfListLengthInIntFormat;
		    part1InStringFormat = ss1.str();
		    stringstream ss2;
		    ss2 << currentPieceID;
		    part2InStringFormat = ss2.str();
		    probabilityAccessKeyInStringFormat = part1InStringFormat + "_" + part2InStringFormat;
		    if (class_label_with_probability_of_2D_ranges_map_.count(probabilityAccessKeyInStringFormat) > 0){
				  // every posting needs to * 10, so just ignore here
		    	  factor_2_3_combined_probability_value = class_label_with_probability_of_2D_ranges_map_[ probabilityAccessKeyInStringFormat ] / numOfPostingsInThatPiece;
		    }
		    else{
				  cout << "probabilityAccessKeyInStringFormat: " << probabilityAccessKeyInStringFormat << " not in class_label_with_probability_of_2D_ranges_map_" << endl;
				  cout << "critical error" << endl;
				  exit(1);
		    }

		    // for debug
		    // cout << "----->ONLY fit for relRank approach NOW<------" << endl;
		    // cout << "entry.doc_id: " << did << endl;
		    // cout << "entry.partialBM25: " << valueOfPartialBM25Score << endl;
		    // cout << "currentRankInList: " << currentRankInList << endl;
		    // cout << "valueOfPartialBM25Score: " << valueOfPartialBM25Score << endl;
		    // cout << "num_docs_t_: " << valueOfTermWithLengthOfTheList << endl;
		    // cout << "compareRelRank: " << compareRelRank << endl;
		    // cout << "previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;
		    // cout << "previousClassLabelOfImpactScoresInIntFormat: " << previousClassLabelOfImpactScoresInIntFormat << endl;
		    // cout << "currentClassLabelBasedOnRelRankInIntFormat: " << currentClassLabelBasedOnRelRankInIntFormat << endl;
		    // cout << "pieceProbability: " << pieceProbability << endl;
		    // cout << "previousClassUpperBound: " << previousClassUpperBound << endl;
		    // cout << "currentPieceID: " << currentPieceID << endl;
		    // cout << "currentClassUpperBound: " << currentClassUpperBound << endl;
		    // cout << "term_with_piece_info_map_[term_].size(): " << term_with_piece_info_map_[queryTermPostionIndexPairs_[i].first].size() << endl;
		    // cout << "numOfPostingsInThatPiece: " << numOfPostingsInThatPiece << endl;
		    // cout << "second_AND_third_factor_combination_probability_value: " << factor_2_3_combined_probability_value << endl;
		    // cout << endl;
		    /////////////////////////////////////////////////////optimized piece approach Ends./////////////////////////////////////////////////////////
		    */

			// Replaceable Component for the computation of the probability of the second and third component
			// optimized relrank related (CORRECT version)
            // take the (relrank, list length) into consideration
	        // (2) compute P(unknown) for the second and third factor
            // step2/2: compute the combination of the second and third factor P(unknown) called as second_AND_third_factor_combination_probability_value
			//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			  previousClassLabelOfListLengthInIntFormat = term_with_their_belonging_class_map_[ queryTermPostionIndexPairs_[i].first ];
			  previousClassUpperBound = class_label_with_lower_bounds_of_list_length_map_[previousClassLabelOfListLengthInIntFormat + 1];
			  // cout << "--->previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;

			  // CURRENT version
			  // for development purpose
			  compareRelRank = float(external_scores[i]) / float(valueOfTermWithLengthOfTheList);
			  float currentRangeUpperBound = 1.0;
			  float previousRangeUpperBound = currentRangeUpperBound;
			  unsigned int numOfPieces = int( class_label_with_lower_bounds_of_impact_scores_map_[previousClassLabelOfListLengthInIntFormat] );

			  // Updated by Wei on 2014/01/19 night
			  // This component has the stepGap to be 0.5
			  int counter = 0;
			  // debug
			  // cout << counter << " " << currentRangeUpperBound << endl;
			  counter += 1;
			  for(; counter < numOfPieces; counter++){
				  currentRangeUpperBound = currentRangeUpperBound / 2;
			  }
			  // at this point, currentRangeUpperBound has the smallest upper bound except 0

			  currentClassLabelBasedOnRelRankInIntFormat = numOfPieces - 1;
			  // debug
			  // cout << "compareRelRank: " << compareRelRank << endl;
			  // cout << "currentRangeUpperBound: " << currentRangeUpperBound << endl;

			  while (compareRelRank > currentRangeUpperBound){
				  currentRangeUpperBound = currentRangeUpperBound * 2;
				  currentClassLabelBasedOnRelRankInIntFormat -= 1;
			  }


			  /*
			  // This component has the stepGap to be 0.3
			  int counter = 0;
			  // debug
			  // cout << counter << " " << currentRangeUpperBound << endl;
			  counter += 1;
			  for(; counter < numOfPieces; counter++){
				  currentRangeUpperBound = previousRangeUpperBound - previousRangeUpperBound / 3;
				  previousRangeUpperBound = currentRangeUpperBound;
			  }
			  // at this point, currentRangeUpperBound has the smallest upper bound except 0

			  currentClassLabelBasedOnRelRankInIntFormat = numOfPieces - 1;
			  // debug
			  // cout << "compareRelRank: " << compareRelRank << endl;
			  // cout << "currentRangeUpperBound: " << currentRangeUpperBound << endl;

			  if (compareRelRank != 1){
				  while (compareRelRank > currentRangeUpperBound){
					  currentRangeUpperBound = currentRangeUpperBound * 3 / 2;
					  currentClassLabelBasedOnRelRankInIntFormat -= 1;
				  }
			  }
			  else{
				  currentClassLabelBasedOnRelRankInIntFormat = 0;
				  currentRangeUpperBound = 1;
			  }
			  // at this point, currentRangeUpperBound has rightly become the tightest upper bound of the current relRank
			  */

			  // debug
			  // cout << "currentRangeUpperBound: " << currentRangeUpperBound << endl;
			  // cout << "currentClassLabelBasedOnRelRankInIntFormat: " << currentClassLabelBasedOnRelRankInIntFormat << endl;


			  stringstream ss1;
			  ss1 << previousClassLabelOfListLengthInIntFormat;
			  part1InStringFormat = ss1.str();
			  stringstream ss2;
			  ss2 << currentClassLabelBasedOnRelRankInIntFormat;
			  part2InStringFormat = ss2.str();
			  // for debug
			  // cout << "previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;
			  // cout << "previousClassLabelOfImpactScoresInIntFormat: " << previousClassLabelOfImpactScoresInIntFormat << endl;
			  // cout << "part1InStringFormat: " << part1InStringFormat << endl;
			  // cout << "part2InStringFormat: " << part2InStringFormat << endl;

			  probabilityAccessKeyInStringFormat = part1InStringFormat + "_" + part2InStringFormat;
			  if (class_label_with_probability_of_2D_ranges_map_.count(probabilityAccessKeyInStringFormat) > 0){
				  // relRank version with correction factor on 2014/01/06 morning by Wei at school
				  // factor_2_3_combined_probability_value =  class_label_with_probability_of_2D_ranges_map_[ probabilityAccessKeyInStringFormat ] * previousClassUpperBound / valueOfTermWithLengthOfTheList ;
				  // original relRank version
				  factor_2_3_combined_probability_value =  class_label_with_probability_of_2D_ranges_map_[ probabilityAccessKeyInStringFormat ];
			  }
			  else{
				  cout << "probabilityAccessKeyInStringFormat: " << probabilityAccessKeyInStringFormat << " not in class_label_with_probability_of_2D_ranges_map_" << endl;
				  cout << "critical error" << endl;
				  exit(1);
			  }

			  // for debug for each approach
			  // cout << "----->ONLY fit for relRank approach NOW<------" << endl;
			  // cout << "did: " << did << endl;
			  // cout << "PartialBM25Score: " << valueOfPartialBM25Score << endl;
			  // cout << "external_scores["<< i << "]: " << external_scores[i] << endl;
			  // cout << "valueOfPartialBM25Score: " << valueOfPartialBM25Score << endl;
			  // cout << "LengthOfTheList_: " << valueOfTermWithLengthOfTheList << endl;
			  // cout << "compareRelRank: " << compareRelRank << endl;
			  // cout << "previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;
			  // cout << "previousClassLabelOfImpactScoresInIntFormat: " << previousClassLabelOfImpactScoresInIntFormat << endl;
			  // cout << "currentClassLabelBasedOnRelRankInIntFormat: " << currentClassLabelBasedOnRelRankInIntFormat << endl;
			  // cout << "pieceProbability: " << pieceProbability << endl;
			  // cout << "previousClassUpperBound: " << previousClassUpperBound << endl;
			  // cout << "numOfPostingsInThatPiece: " << numOfPostingsInThatPiece << endl;
			  // cout << "second_AND_third_factor_combination_probability_value: " << factor_2_3_combined_probability_value << endl;
			  // cout << endl;
			//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


			/*
			// Replaceable Component for the computation of the probability of the second and third component
			// optimized relrank related (NOT CORRECT version)
            // take the (relrank, list length) into consideration
	        // (2) compute P(unknown) for the second and third factor
            // step2/2: compute the combination of the second and third factor P(unknown) called as second_AND_third_factor_combination_probability_value
			//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			  previousClassLabelOfListLengthInIntFormat = term_with_their_belonging_class_map_[ queryTermPostionIndexPairs_[i].first ];
			  // cout << "--->previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;

			  // CURRENT version
			  // for development purpose
			  compareRelRank = float(external_scores[i]) / float(valueOfTermWithLengthOfTheList);
			  float currentRangeUpperBound = 1.0;
			  unsigned int numOfPieces = int( class_label_with_lower_bounds_of_impact_scores_map_[previousClassLabelOfListLengthInIntFormat] );

			  int counter = 0;
			  // debug
			  // cout << counter << " " << currentRangeUpperBound << endl;
			  counter += 1;
			  for(; counter < numOfPieces; counter++){
				  currentRangeUpperBound = currentRangeUpperBound / 2;
			  }
			  // at this point, currentRangeUpperBound has the smallest upper bound except 0

			  currentClassLabelBasedOnRelRankInIntFormat = numOfPieces - 1;
			  // debug
			  // cout << "compareRelRank: " << compareRelRank << endl;
			  // cout << "currentRangeUpperBound: " << currentRangeUpperBound << endl;

			  while (compareRelRank > currentRangeUpperBound){
				  currentRangeUpperBound = currentRangeUpperBound * 2;
				  currentClassLabelBasedOnRelRankInIntFormat -= 1;
			  }
			  // debug
			  // cout << "currentRangeUpperBound: " << currentRangeUpperBound << endl;
			  // cout << "currentClassLabelBasedOnRelRankInIntFormat: " << currentClassLabelBasedOnRelRankInIntFormat << endl;

			  stringstream ss1;
			  ss1 << previousClassLabelOfListLengthInIntFormat;
			  part1InStringFormat = ss1.str();
			  stringstream ss2;
			  ss2 << currentClassLabelBasedOnRelRankInIntFormat;
			  part2InStringFormat = ss2.str();
			  // for debug
			  // cout << "previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;
			  // cout << "previousClassLabelOfImpactScoresInIntFormat: " << previousClassLabelOfImpactScoresInIntFormat << endl;
			  // cout << "part1InStringFormat: " << part1InStringFormat << endl;
			  // cout << "part2InStringFormat: " << part2InStringFormat << endl;

			  probabilityAccessKeyInStringFormat = part1InStringFormat + "_" + part2InStringFormat;
			  if (class_label_with_probability_of_2D_ranges_map_.count(probabilityAccessKeyInStringFormat) > 0){
				  pieceProbability = class_label_with_probability_of_2D_ranges_map_[ probabilityAccessKeyInStringFormat ];
				  numOfPostingsInThatPiece = term_with_piece_info_map_[ queryTermPostionIndexPairs_[i].first ][currentClassLabelBasedOnRelRankInIntFormat];
				  factor_2_3_combined_probability_value = pieceProbability / numOfPostingsInThatPiece;
			  }
			  else{
				  cout << "probabilityAccessKeyInStringFormat: " << probabilityAccessKeyInStringFormat << " not in class_label_with_probability_of_2D_ranges_map_" << endl;
				  cout << "critical error" << endl;
				  exit(1);
			  }

			  // for debug
			  // cout << "----->ONLY fit for relRank approach NOW<------" << endl;
			  // cout << "did: " << did << endl;
			  // cout << "PartialBM25Score: " << valueOfPartialBM25Score << endl;
			  // cout << "external_scores["<< i << "]: " << external_scores[i] << endl;
			  // cout << "valueOfPartialBM25Score: " << valueOfPartialBM25Score << endl;
			  // cout << "LengthOfTheList_: " << valueOfTermWithLengthOfTheList << endl;
			  // cout << "compareRelRank: " << compareRelRank << endl;
			  // cout << "previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;
			  // cout << "previousClassLabelOfImpactScoresInIntFormat: " << previousClassLabelOfImpactScoresInIntFormat << endl;
			  // cout << "currentClassLabelBasedOnRelRankInIntFormat: " << currentClassLabelBasedOnRelRankInIntFormat << endl;
			  // cout << "pieceProbability: " << pieceProbability << endl;
			  // cout << "numOfPostingsInThatPiece: " << numOfPostingsInThatPiece << endl;
			  // cout << "second_AND_third_factor_combination_probability_value: " << factor_2_3_combined_probability_value << endl;
			  // cout << endl;
			//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			*/

			/*
			// Replaceable Component for the computation of the probability of the second and third component
			// unoptimized relrank related
            // take the (relrank, list length) into consideration
	        // (2) compute P(unknown) for the second and third factor
            // step2/2: compute the combination of the second and third factor P(unknown) called as second_AND_third_factor_combination_probability_value
			//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			// outside loop
			  bool outsideLoopStopFlag = false;
			  for (map<int, uint32_t>::iterator iter1 = class_label_with_lower_bounds_of_list_length_map_.begin(); iter1 != class_label_with_lower_bounds_of_list_length_map_.end(); iter1++) {
				  if (outsideLoopStopFlag){
					  break;
				  }
				  currentClassLabelOfListLengthInIntFormat = (*iter1).first;
				  previousClassLabelOfListLengthInIntFormat = currentClassLabelOfListLengthInIntFormat - 1;

				  if (valueOfTermWithLengthOfTheList < class_label_with_lower_bounds_of_list_length_map_[currentClassLabelOfListLengthInIntFormat] and valueOfTermWithLengthOfTheList >= class_label_with_lower_bounds_of_list_length_map_[previousClassLabelOfListLengthInIntFormat]){
					  // inside loop
					  // previousClassLabelOfListLengthInIntFormat has the right info here
					  // CURRENT version
					  // debug
					  // cout << "external_scores[" << i << "]: " << external_scores[i] << endl;
					  // cout << "valueOfTermWithLengthOfTheList: " << valueOfTermWithLengthOfTheList << endl;
					  compareRelRank = float(external_scores[i]) / float(valueOfTermWithLengthOfTheList);
					  float currentRangeUpperBound = 1.0;
					  unsigned int numOfPieces = int( class_label_with_lower_bounds_of_impact_scores_map_[previousClassLabelOfListLengthInIntFormat] );

					  int counter = 0;
					  // debug
					  // cout << counter << " " << currentRangeUpperBound << endl;
					  counter += 1;
					  for(; counter < numOfPieces; counter++){
						  currentRangeUpperBound = currentRangeUpperBound / 2;
					  }


					  currentClassLabelBasedOnRelRankInIntFormat = numOfPieces - 1;
					  // debug
					  // cout << "compareRelRank: " << compareRelRank << endl;
					  // cout << "smallestRangeUpperBound: " << currentRangeUpperBound << endl; // at this point, currentRangeUpperBound has the smallest upper bound except 0

					  while (compareRelRank > currentRangeUpperBound){
						  currentRangeUpperBound = currentRangeUpperBound * 2;
						  currentClassLabelBasedOnRelRankInIntFormat -= 1;
					  }
					  // debug
					  // cout << "currentRangeUpperBound: " << currentRangeUpperBound << endl;
					  // cout << "--->previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;
					  // cout << "--->currentClassLabelBasedOnRelRankInIntFormat: " << currentClassLabelBasedOnRelRankInIntFormat << endl;

					  stringstream ss1;
					  ss1 << previousClassLabelOfListLengthInIntFormat;
					  part1InStringFormat = ss1.str();
					  stringstream ss2;
					  ss2 << currentClassLabelBasedOnRelRankInIntFormat;
					  part2InStringFormat = ss2.str();
					  // for debug
					  // cout << "previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;
					  // cout << "previousClassLabelOfImpactScoresInIntFormat: " << previousClassLabelOfImpactScoresInIntFormat << endl;
					  // cout << "part1InStringFormat: " << part1InStringFormat << endl;
					  // cout << "part2InStringFormat: " << part2InStringFormat << endl;

				      probabilityAccessKeyInStringFormat = part1InStringFormat + "_" + part2InStringFormat;
				      if (class_label_with_probability_of_2D_ranges_map_.count(probabilityAccessKeyInStringFormat) > 0){
				    	  factor_2_3_combined_probability_value = class_label_with_probability_of_2D_ranges_map_[ probabilityAccessKeyInStringFormat ];
				      }
				      else{
				    	  cout << "probabilityAccessKeyInStringFormat: " << probabilityAccessKeyInStringFormat << " not in class_label_with_probability_of_2D_ranges_map_" << endl;
				    	  cout << "critical error" << endl;
				    	  exit(1);
				      }
				      outsideLoopStopFlag = true;
				      break;
				  }
			  }

			  // for debug (It is important and help me to find a critical BUG)
			  // In memory of this debug section

			  // cout << "did: " << did << endl;
			  // cout << "external_scores["<< i << "]: " << external_scores[i] << endl;
			  // cout << "PartialBM25Score: " << valueOfPartialBM25Score << endl;
			  // cout << "LengthOfTheList_: " << valueOfTermWithLengthOfTheList << endl;
			  // cout << "compareRelRank: " << compareRelRank << endl;
			  // cout << "previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;
			  // cout << "previousClassLabelOfImpactScoresInIntFormat: " << previousClassLabelOfImpactScoresInIntFormat << endl;
			  // cout << "currentClassLabelBasedOnRelRankInIntFormat: " << currentClassLabelBasedOnRelRankInIntFormat << endl;
			  // cout << "second_AND_third_factor_combination_probability_value: " << factor_2_3_combined_probability_value << endl;
			  // cout << endl;
			//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			*/


			/*
			//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			// Replaceable Component for the computation of the probability of the second and third component
			// 2D
            // take the (impact score, list length) into consideration
	        // (2) compute P(unknown) for the second and third factor
            // copied the logic from the --cat command just develop yesterday night
            // step2/2: compute the combination of the second and third factor P(unknown) called as second_AND_third_factor_combination_probability_value
			// outside loop
			bool outsideLoopStopFlag = false;
			for (map<int, uint32_t>::iterator iter1 = class_label_with_lower_bounds_of_list_length_map_.begin(); iter1 != class_label_with_lower_bounds_of_list_length_map_.end(); iter1++) {
				if (outsideLoopStopFlag){
					break;
				}
				currentClassLabelOfListLengthInIntFormat = (*iter1).first;
				previousClassLabelOfListLengthInIntFormat = currentClassLabelOfListLengthInIntFormat - 1;

				if (valueOfTermWithLengthOfTheList < class_label_with_lower_bounds_of_list_length_map_[currentClassLabelOfListLengthInIntFormat] and valueOfTermWithLengthOfTheList >= class_label_with_lower_bounds_of_list_length_map_[previousClassLabelOfListLengthInIntFormat]){
					// inside loop
					// previousClassLabelOfListLengthInIntFormat has the right info here
					// debug
					// cout << "--->previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;
					for (map<int, float>::iterator iter2 = class_label_with_lower_bounds_of_impact_scores_map_.begin(); iter2 != class_label_with_lower_bounds_of_impact_scores_map_.end(); iter2++) {
						currentClassLabelOfImpactScoresInIntFormat = (*iter2).first;
						previousClassLabelOfImpactScoresInIntFormat = currentClassLabelOfImpactScoresInIntFormat - 1;

						if (valueOfPartialBM25Score < class_label_with_lower_bounds_of_impact_scores_map_[currentClassLabelOfImpactScoresInIntFormat] and valueOfPartialBM25Score >= class_label_with_lower_bounds_of_impact_scores_map_[previousClassLabelOfImpactScoresInIntFormat]){
							  // previousClassLabelOfImpactScoresInIntFormat has the right info here
							  // debug
							  // cout << "--->previousClassLabelOfImpactScoresInIntFormat: " << previousClassLabelOfImpactScoresInIntFormat << endl;

							  stringstream ss1;
							  ss1 << previousClassLabelOfListLengthInIntFormat;
							  part1InStringFormat = ss1.str();
							  stringstream ss2;
							  ss2 << previousClassLabelOfImpactScoresInIntFormat;
							  part2InStringFormat = ss2.str();
							  // for debug
							  // cout << "previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;
							  // cout << "previousClassLabelOfImpactScoresInIntFormat: " << previousClassLabelOfImpactScoresInIntFormat << endl;
							  // cout << "part1InStringFormat: " << part1InStringFormat << endl;
							  // cout << "part2InStringFormat: " << part2InStringFormat << endl;

							  probabilityAccessKeyInStringFormat = part1InStringFormat + "_" + part2InStringFormat;
							  if (class_label_with_probability_of_2D_ranges_map_.count(probabilityAccessKeyInStringFormat) > 0){
								  factor_2_3_combined_probability_value = class_label_with_probability_of_2D_ranges_map_[ probabilityAccessKeyInStringFormat ];
							  }
							  else{
								  cout << "probabilityAccessKeyInStringFormat: " << probabilityAccessKeyInStringFormat << " not in class_label_with_probability_of_2D_ranges_map_" << endl;
								  cout << "critical error" << endl;
								  exit(1);
							  }
							  outsideLoopStopFlag = true;
							  break;
						}
					}
				}
			}
			*/
			//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

			//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			/*
            // Replaceable Component for the computation of the probability of the second and third component
            // 1D
            // ONLY take the impact score into consideration
	        // (2) compute P(unknown) for the second and third factor
            // copied the logic from the --cat command just develop yesterday night
            // step2/2: compute the combination of the second and third factor P(unknown) called as second_AND_third_factor_combination_probability_value
		    assert(!isnan(valueOfPartialBM25Score));

		    int currentClassLabelInIntFormat = -1;
		    int previousClassLabelInIntFormat = -1;
		    for (map<int, float>::iterator iter = class_label_with_lower_bounds_map_.begin(); iter != class_label_with_lower_bounds_map_.end(); iter++) {
				  currentClassLabelInIntFormat = (*iter).first;
				  previousClassLabelInIntFormat = currentClassLabelInIntFormat - 1;
				  // current testing and pass version
				  if (valueOfPartialBM25Score < class_label_with_lower_bounds_map_[currentClassLabelInIntFormat] and valueOfPartialBM25Score >= class_label_with_lower_bounds_map_[previousClassLabelInIntFormat]){

					  if (class_label_with_probability_map_.count(previousClassLabelInIntFormat) > 0){
						  factor_2_3_combined_probability_value = class_label_with_probability_map_[ previousClassLabelInIntFormat ];
					  }
					  else{
						  cout << "previousClassLabelInIntFormat:" << previousClassLabelInIntFormat << " not in class_label_with_probability_map_" << endl;
						  cout << "critical error" << endl;
						  exit(1);
					  }
					  break;
				  }
			}
		    */
			//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

	        final_3_factors_probability_combined_value = first_factor_probability_value * factor_2_3_combined_probability_value;

	        // 3 choices for the assignment of the posting_judged_probability
	        // { partial_bm25,
	        //   factor_2_3_combined_probability_value,
	        //   final_3_factors_probability_combined_value }
	        final_posting_probability_used_for_judgement = final_3_factors_probability_combined_value;

	        if(i == 0){
	        	scores_set.postingFirstProbabilities0 = first_factor_probability_value;
	        	scores_set.postingSecondANDThirdProbabilities0 = factor_2_3_combined_probability_value;
	        	scores_set.postingThreeFactorProbabilities0 = final_posting_probability_used_for_judgement;
	        }
	        else if(i == 1){
	        	scores_set.postingFirstProbabilities1 = first_factor_probability_value;
	        	scores_set.postingSecondANDThirdProbabilities1 = factor_2_3_combined_probability_value;
	        	scores_set.postingThreeFactorProbabilities1 = final_posting_probability_used_for_judgement;
	        }
	        else if(i == 2){
	        	scores_set.postingFirstProbabilities2 = first_factor_probability_value;
	        	scores_set.postingSecondANDThirdProbabilities2 = factor_2_3_combined_probability_value;
	        	scores_set.postingThreeFactorProbabilities2 = final_posting_probability_used_for_judgement;
	        }
	        else if(i == 3){
	        	scores_set.postingFirstProbabilities3 = first_factor_probability_value;
	        	scores_set.postingSecondANDThirdProbabilities3 = factor_2_3_combined_probability_value;
	        	scores_set.postingThreeFactorProbabilities3 = final_posting_probability_used_for_judgement;
	        }
	        else if(i == 4){
	        	scores_set.postingFirstProbabilities4 = first_factor_probability_value;
	        	scores_set.postingSecondANDThirdProbabilities4 = factor_2_3_combined_probability_value;
	        	scores_set.postingThreeFactorProbabilities4 = final_posting_probability_used_for_judgement;
	        }
	        else if(i == 5){
	        	scores_set.postingFirstProbabilities5 = first_factor_probability_value;
	        	scores_set.postingSecondANDThirdProbabilities5 = factor_2_3_combined_probability_value;
	        	scores_set.postingThreeFactorProbabilities5 = final_posting_probability_used_for_judgement;
	        }
	        else if(i == 6){
	        	scores_set.postingFirstProbabilities6 = first_factor_probability_value;
	        	scores_set.postingSecondANDThirdProbabilities6 = factor_2_3_combined_probability_value;
	        	scores_set.postingThreeFactorProbabilities6 = final_posting_probability_used_for_judgement;
	        }
	        else if(i == 7){
	        	scores_set.postingFirstProbabilities7 = first_factor_probability_value;
	        	scores_set.postingSecondANDThirdProbabilities7 = factor_2_3_combined_probability_value;
	        	scores_set.postingThreeFactorProbabilities7 = final_posting_probability_used_for_judgement;
	        }
	        else if(i == 8){
	        	scores_set.postingFirstProbabilities8 = first_factor_probability_value;
	        	scores_set.postingSecondANDThirdProbabilities8 = factor_2_3_combined_probability_value;
	        	scores_set.postingThreeFactorProbabilities8 = final_posting_probability_used_for_judgement;
	        }
	        else if(i == 9){
	        	scores_set.postingFirstProbabilities9 = first_factor_probability_value;
	        	scores_set.postingSecondANDThirdProbabilities9 = factor_2_3_combined_probability_value;
	        	scores_set.postingThreeFactorProbabilities9 = final_posting_probability_used_for_judgement;
	        }

			// for debug the whole thing
			// cout << "valueOfPartialBM25Score: " << valueOfPartialBM25Score << endl;
			// cout << "valueOfLengthOfTheList_: " << valueOfTermWithLengthOfTheList << endl;
			// cout << "[1D]previousClassLabelInIntFormat: " << previousClassLabelInIntFormat << endl;
			// cout << "[2D]previousClassLabelOfListLengthInIntFormat: " << previousClassLabelOfListLengthInIntFormat << endl;
			// cout << "[2D]previousClassLabelOfImpactScoresInIntFormat: " << previousClassLabelOfImpactScoresInIntFormat << endl;
			// cout << "first_factor_probability_value: " << first_factor_probability_value << endl;
			// cout << "second_AND_third_factor_combination_probability_value: " << factor_2_3_combined_probability_value << endl;
			// cout << "final_3_factors_probability_combined_value: " << final_3_factors_probability_combined_value << endl;
			// cout << endl;

	        if (final_posting_probability_used_for_judgement >= universal_threshold_socre_of_posting_){
	        	// do nothing
	        }
	        else{
				  scores_set.docCandidateQualifyStatus = false;

				  if (i == 0){
					  scores_set.posting0QualifyStatus = false;
				  }
				  else if (i == 1){
					  scores_set.posting1QualifyStatus = false;
				  }
				  else if (i == 2){
					  scores_set.posting2QualifyStatus = false;
				  }
				  else if (i == 3){
					  scores_set.posting3QualifyStatus = false;
				  }
				  else if (i == 4){
					  scores_set.posting4QualifyStatus = false;
				  }
				  else if (i == 5){
					  scores_set.posting5QualifyStatus = false;
				  }
				  else if (i == 6){
					  scores_set.posting6QualifyStatus = false;
				  }
				  else if (i == 7){
					  scores_set.posting7QualifyStatus = false;
				  }
				  else if (i == 8){
					  scores_set.posting8QualifyStatus = false;
				  }
				  else if (i == 9){
					  scores_set.posting9QualifyStatus = false;
				  }
			}
	        bm25_sum += partial_bm25;
	      }
	      scores_set.totalScore = bm25_sum;

	      ////////////////////////////////////////////////////////////////////////////////////////////////////////////
	      // Updated by Wei on 2014/01/13 night at school
	      // Currently in use
          // Use a heap to maintain the top-k documents. This has to be a min heap,
    	  // where the lowest scoring document is on top, so that we can easily pop it,
    	  // and push a higher scoring document if need be.
		  if (!scores_set.docCandidateQualifyStatus){
			  ++total_num_results_been_filtered;
		  }

	      if (total_num_results < num_results) {
    	    // We insert a document if we don't have k documents yet.
    	    results[total_num_results] = make_pair(scores_set, did);
    	    push_heap(results, results + total_num_results + 1, ResultCompare2());
    	  }
    	  else
    	  {
    	    if (scores_set.totalScore > results->first.totalScore)
    	    {
    		  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
    		  pop_heap(results, results + num_results, ResultCompare2());
    		  results[num_results - 1].first = scores_set;
    		  results[num_results - 1].second = did;
    		  push_heap(results, results + num_results, ResultCompare2());
    	    }
    	  }
          ++total_num_results;
          ++did;  // Search for next docID.
          ////////////////////////////////////////////////////////////////////////////////////////////////////////////

          /*
	      ////////////////////////////////////////////////////////////////////////////////////////////////////////////
	      // Updated by Wei on 2014/01/13 night at school
	      // OLD version
	      if (!scores_set.docCandidateQualifyStatus){
	      	// although I already have an document candidate here, but one or more of the postings are NOT qualified for the posting threshold.
	      	// So, here, the injection into the priority heap operation can be ignored.
	      	// In that case, there is no real logic here.
	      	++did;  // Search for next docID.
	      }
	      else{
	          // Use a heap to maintain the top-k documents. This has to be a min heap,
	    	  // where the lowest scoring document is on top, so that we can easily pop it,
	    	  // and push a higher scoring document if need be.
	    	  if (total_num_results < num_results) {
	    	    // We insert a document if we don't have k documents yet.
	    	    results[total_num_results] = make_pair(scores_set, did);
	    	    push_heap(results, results + total_num_results + 1, ResultCompare2());
	    	  }
	    	  else
	    	  {
	    	    if (scores_set.totalScore > results->first.totalScore)
	    	    {
	    		  // We insert a document only if it's score is greater than the minimum scoring document in the heap.
	    		  pop_heap(results, results + num_results, ResultCompare2());
	    		  results[num_results - 1].first = scores_set;
	    		  results[num_results - 1].second = did;
	    		  push_heap(results, results + num_results, ResultCompare2());
	    	    }
	    	  }
	          ++total_num_results;
	          ++did;  // Search for next docID.
	      }
	      ////////////////////////////////////////////////////////////////////////////////////////////////////////////
	      */
	    }
	  }

	  // Sort top-k results in descending order by document score.
	  sort(results, results + min(num_results, total_num_results), ResultCompare2());

	  // Updated by Wei on 2014/01/13 night at school
	  // cout << total_num_results_been_filtered << " results have been filtered." << endl;
	  return total_num_results;

  }
  else{
	  cout << "unsupported logic" << endl;
	  return total_num_results;
  }
}

// Returns the total number of document results found in the intersection.
// Note that there is not a guaranteed order of same scoring docIDs.
int LocalQueryProcessor::IntersectLists(ListData** merge_lists, int num_merge_lists, ListData** lists, int num_lists, Result* results, int num_results, bool pruningProjectSwitch) {

  //cout << "Original Version by Roman" << endl;
  //cout << "num_merge_lists:" << num_merge_lists << endl;
  //cout << "num_lists:" << num_lists << endl;

  // We have a choice of whether to use a heap (push() / pop() an array) or just search through an array to replace low scoring results
  // and finally sorting it before returning the top-k results in sorted order.
  // For k = 10 results, an array performs only slightly better than a heap. As k increases above 10, heap should be faster.
  // In the general case, a heap should be used (unless k is less than 10), so this option should be 'false'.
  const bool kUseArrayInsteadOfHeap = false;

  int total_num_results = 0;

  // For the array instead of heap top-k technique.
  float curr_min_doc_score;
  Result* min_scoring_result = NULL;

  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
  const float kBm25K1 =  2.0;  // k1
  const float kBm25B = 0.75;   // b

  // We can precompute a few of the BM25 values here.
  const float kBm25NumeratorMul = kBm25K1 + 1;
  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

  // BM25 components.
  float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection operation.
  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in AND semantics.
  int doc_len;
  uint32_t f_d_t;

  uint32_t did = 0;
  uint32_t d;


  int i;  // Index for various loops.

  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
  float idf_t[num_lists];  // Using a variable length array here.
  int num_docs_t;
  for (i = 0; i < num_lists; ++i) {

	// If for the pruning project, then use the same overall statistics for all the pruned index.
	if(pruningProjectSwitch){
		num_docs_t = originalListLengthsVectorForPruningProjectForCurrentQueries_[i];
	}
	// If not for the pruning project, we can use what the index has.
	else{
		num_docs_t = lists[i]->num_docs_complete_list();
	}

    cout << "i:" << i << " num_docs_t:" << num_docs_t << endl;

    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
  }


  // Necessary for the merge lists.
  // TODO: Can also try the heap based method here. Can select between heap and array method based on 'num_merge_lists'.
  uint32_t min_doc_id;

  while (did < ListData::kNoMoreDocs) {
    if (merge_lists != NULL) { // For the lists which we are merging.
      // This will select the lowest docID (ignoring duplicates among the merge lists and any docIDs we have skipped past through AND mode operation).
      min_doc_id = ListData::kNoMoreDocs;
      for (i = 0; i < num_merge_lists; ++i) {
        if ((d = merge_lists[i]->NextGEQ(did)) < min_doc_id) {
          min_doc_id = d;
        }
      }

      assert(min_doc_id >= did);

      did = min_doc_id;
      i = 0;
    } else {
      // Get next element from shortest list.
      // I guess the list in the lists varable has already been sorted.
      did = lists[0]->NextGEQ(did);

      i = 1;
    }



    if (did == ListData::kNoMoreDocs)
      break;

    d = did;

    // Try to find entries with same docID in other lists.
    for (; (i < num_lists) && ((d = lists[i]->NextGEQ(did)) == did); ++i) {
      continue;
    }

    if (d > did)
    {
      // Not in intersection.
      did = d;
    }
    else
    {
      assert(d == did);
      cout << did << " ";
      // Compute BM25 score from frequencies.
      bm25_sum = 0;
      for (i = 0; i < num_lists; ++i) {
        f_d_t = lists[i]->GetFreq();
        doc_len = index_reader_.document_map().GetDocumentLength(did);
        partial_bm25 = idf_t[i] * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
        //debug option1:
        //cout << "f_d_t:" << f_d_t << " doc_len:" << doc_len << " idf_t[i]:" << idf_t[i] << " partial_bm25:"<< partial_bm25 << " ";
        //debug option2:
        cout << partial_bm25 << " ";

        bm25_sum += partial_bm25;
      }
      cout << endl;

      if (kUseArrayInsteadOfHeap) {       // Use an array to maintain the top-k documents.
        if (total_num_results < num_results) {
          results[total_num_results] = make_pair(bm25_sum, did);
          if (min_scoring_result == NULL || bm25_sum < min_scoring_result->first)
            min_scoring_result = results + total_num_results;
        } else {
          if (bm25_sum > min_scoring_result->first) {
            // Replace the min scoring result with the current (higher scoring) result.
            min_scoring_result->first = bm25_sum;
            min_scoring_result->second = did;

            // Find the new min scoring document.
            curr_min_doc_score = numeric_limits<float>::max();
            for (i = 0; i < num_results; ++i) {
              if (results[i].first < curr_min_doc_score) {
                curr_min_doc_score = results[i].first;
                min_scoring_result = results + i;
              }
            }
          }
        }
      } else {
        // Use a heap to maintain the top-k documents. This has to be a min heap,
        // where the lowest scoring document is on top, so that we can easily pop it,
        // and push a higher scoring document if need be.
        if (total_num_results < num_results) {
          // We insert a document if we don't have k documents yet.
          results[total_num_results] = make_pair(bm25_sum, did);
          push_heap(results, results + total_num_results + 1, ResultCompare());
        } else {
          if (bm25_sum > results->first) {
            // We insert a document only if it's score is greater than the minimum scoring document in the heap.
            pop_heap(results, results + num_results, ResultCompare());
            results[num_results - 1].first = bm25_sum;
            results[num_results - 1].second = did;
            push_heap(results, results + num_results, ResultCompare());
          }
        }
      }
      ++total_num_results;
      ++did;  // Search for next docID.
    }
  }

  // Sort top-k results in descending order by document score.
  sort(results, results + min(num_results, total_num_results), ResultCompare());

  return total_num_results;
}

// Processes queries in AND mode. Utilizes position data for the top scoring docIDs.
// The top docIDs (the number is configured within the function, by 'kNumTopPositionsToScore') are ranked according to BM25,
// and their position data is stored as well; these top scoring docIDs are then ranked along with position information,
// finally storing the new top 'num_results' docIDs into 'results'.
// Returns the total number of document results found in the intersection.
int LocalQueryProcessor::IntersectListsTopPositions(ListData** lists, int num_lists, Result* results, int num_results) {
  assert(use_positions_ == true);

  // Maintain the top docIDs in a heap of this size, scored using standard BM25.
  // Then, utilize the position information for these results and keep only the top 'num_results'.
  const int kNumTopPositionsToScore = max(200, num_results);

  const int kNumLists = num_lists;                              // The number of lists we traverse.
  const int kMaxNumResults = num_results;                       // The maximum number of results we have to return.
  const int kMaxPositions = ChunkDecoder::kMaxProperties;       // The maximum number of positions for a docID in any list.
  const int kResultPositionStride = kMaxPositions + 1;          // For each result, per list, we store all the positions, plus an integer
                                                                // indicating the number of positions stored.
  const int kResultStride = kNumLists * kResultPositionStride;  // For each result, we have 'num_lists' worth of position information.

  // We will store position information for the top candidates in this array. The first 'kNumTopPositionsToScore' results will be stored sequentially,
  // but afterwards any result that gets pushed out of the top candidates heap, will have its positions replaced.
  // We always store a pointer to the start of the positions for each candidate document.
  uint32_t* position_pool = new uint32_t[kResultStride * kNumTopPositionsToScore];

  // The k temporary docID, score, and position pointer tuples, with a score comparator to maintain the top-k results.
  ResultPositionTuple* result_position_tuples = new ResultPositionTuple[kNumTopPositionsToScore];

  int total_num_results = 0;

  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
  const float kBm25K1 =  2.0;  // k1
  const float kBm25B = 0.75;   // b

  // We can precompute a few of the BM25 values here.
  const float kBm25NumeratorMul = kBm25K1 + 1;
  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len_;

  // BM25 components.
  float bm25_sum;  // The BM25 sum for the current document we're processing in the intersection.
  int doc_len_d;   // The length for the current document we're processing in the intersection.
  // Using variable length arrays here.
  uint32_t f_d_t[kNumLists];                 // The document term frequencies, one per list.
  const uint32_t* positions_d_t[kNumLists];  // The document position pointers, one per list.
  float acc_d_t[kNumLists];                  // The term proximity accumulators, one per list.
  float idf_t[kNumLists];                    // The inverse document frequencies, one per list.

  uint32_t did = 0;
  uint32_t d;

  uint32_t num_positions;

  int i, j;

  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for each list.
  int num_docs_t;
  for (i = 0; i < kNumLists; ++i) {
    num_docs_t = lists[i]->num_docs_complete_list();
    idf_t[i] = log10(1 + (collection_total_num_docs_ - num_docs_t + 0.5) / (num_docs_t + 0.5));
  }

  while (did < ListData::kNoMoreDocs) {
    // Get next element from shortest list.
    if ((did = lists[0]->NextGEQ(did)) == ListData::kNoMoreDocs)
      break;

    d = did;

    // Try to find entries with same docID in other lists.
    for (i = 1; (i < kNumLists) && ((d = lists[i]->NextGEQ(did)) == did); ++i) {
      continue;
    }

    if (d > did) {
      // Not in intersection.
      did = d;
    } else {
      assert(d == did);

      // Compute BM25 score from frequencies.
      bm25_sum = 0;
      doc_len_d = index_reader_.document_map().GetDocumentLength(did);
      for (i = 0; i < kNumLists; ++i) {
        f_d_t[i] = lists[i]->GetFreq();
        positions_d_t[i] = lists[i]->curr_chunk_decoder().current_positions();
        bm25_sum += idf_t[i] * (f_d_t[i] * kBm25NumeratorMul) / (f_d_t[i] + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len_d);
      }

      // Use a heap to maintain the top-k documents. This has to be a min heap,
      // where the lowest scoring document is on top, so that we can easily pop it,
      // and push a higher scoring document if need be.
      if (total_num_results < kNumTopPositionsToScore) {
        // We insert a document if we don't have k documents yet.
        result_position_tuples[total_num_results].doc_id = did;
        result_position_tuples[total_num_results].doc_len = doc_len_d;
        result_position_tuples[total_num_results].score = bm25_sum;
        result_position_tuples[total_num_results].positions = &position_pool[total_num_results * kResultStride];
        for (i = 0; i < kNumLists; ++i) {
          num_positions = min(f_d_t[i], static_cast<uint32_t>(kMaxPositions));
          result_position_tuples[total_num_results].positions[i * kResultPositionStride] = num_positions;
          memcpy(&result_position_tuples[total_num_results].positions[(i * kResultPositionStride) + 1], positions_d_t[i], num_positions * sizeof(*positions_d_t[i]));
        }
        push_heap(result_position_tuples, result_position_tuples + total_num_results + 1);
      } else {
        if (bm25_sum > result_position_tuples[0].score) {
          // We insert a document only if it's score is greater than the minimum scoring document in the heap.
          pop_heap(result_position_tuples, result_position_tuples + kNumTopPositionsToScore);
          result_position_tuples[kNumTopPositionsToScore - 1].doc_id = did;
          result_position_tuples[kNumTopPositionsToScore - 1].doc_len = doc_len_d;
          result_position_tuples[kNumTopPositionsToScore - 1].score = bm25_sum;
          // Replace the positions.
          for (i = 0; i < kNumLists; ++i) {
            num_positions = min(f_d_t[i], static_cast<uint32_t>(kMaxPositions));
            result_position_tuples[kNumTopPositionsToScore - 1].positions[i * kResultPositionStride] = num_positions;
            memcpy(&result_position_tuples[kNumTopPositionsToScore - 1].positions[(i * kResultPositionStride) + 1], positions_d_t[i], num_positions * sizeof(*positions_d_t[i]));
          }
          push_heap(result_position_tuples, result_position_tuples + num_results);
        }
      }

      ++total_num_results;
      ++did;  // Search for next docID.
    }
  }

  // Utilize positions and prepare final result set.
  // Note that positions are stored in gap coded form.
  // We use a formula that rewards proximity of the query terms. It's too slow to run on all possible candidates.
  const int kNumReturnedResults = min(kNumTopPositionsToScore, total_num_results);

  for (i = 0; i < kNumLists; ++i) {
    acc_d_t[i] = 0;
  }

  // Term proximity components.
  int r;
  uint32_t k, l;
  uint32_t num_positions_top, num_positions_bottom;
  const uint32_t* positions_top, *positions_bottom;
  uint32_t positions_top_actual, positions_bottom_actual;
  int dist;
  float ids;

  for (r = 0; r < kNumReturnedResults; ++r) {
    for (i = 0; i < kNumLists; ++i) {
      num_positions_top = result_position_tuples[r].positions[i * kResultPositionStride];
      positions_top = &result_position_tuples[r].positions[i * kResultPositionStride + 1];

      for (j = i + 1; j < kNumLists; ++j) {
        num_positions_bottom = result_position_tuples[r].positions[j * kResultPositionStride];
        positions_bottom = &result_position_tuples[r].positions[j * kResultPositionStride + 1];

        positions_top_actual = 0;  // Positions are stored gap coded for each document and we need to decode the gaps on the fly.
        for (k = 0; k < num_positions_top; ++k) {
          positions_top_actual += positions_top[k];

          positions_bottom_actual = 0;  // Positions are stored gap coded for each document and we need to decode the gaps on the fly.
          for (l = 0; l < num_positions_bottom; ++l) {
            positions_bottom_actual += positions_bottom[l];

            dist = positions_top_actual - positions_bottom_actual;
            assert(dist != 0);  // This is an indication of a bug in the program.

            ids = 1.0 / (dist * dist);

            acc_d_t[i] += idf_t[i] * ids;
            acc_d_t[j] += idf_t[j] * ids;
          }
        }
      }

      // Include the normalized proximity score.
      result_position_tuples[r].score += min(1.0f, idf_t[i]) * (acc_d_t[i] * kBm25NumeratorMul) / (acc_d_t[i] + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * result_position_tuples[r].doc_len);
    }
  }

  // This just iterates through all the positions for each list of each result.
  /*for (i = 0; i < kNumReturnedResults; ++i) {
    cout << "docID: " << result_position_tuples[i].doc_id << endl;
    for (j = 0; j < kNumLists; ++j) {
      cout << "Positions for list: " << j << endl;
      const uint32_t* positions = &result_position_tuples[i].positions[j * kResultPositionStride];
      uint32_t num_positions = positions[0];
      ++positions;
      for (k = 0; k < num_positions; ++k) {
        cout << positions[k] << endl;
      }
    }
  }*/

  // Sort top results in descending order by document score.
  sort(result_position_tuples, result_position_tuples + kNumReturnedResults, ResultPositionTuple());

  // Copy the top scoring documents into the final result set.
  const int kNumFinalResultSet = min(kMaxNumResults, kNumReturnedResults);
  for (i = 0; i < kNumFinalResultSet; ++i) {
    results[i].first = result_position_tuples[i].score;
    results[i].second = result_position_tuples[i].doc_id;
  }

  delete[] result_position_tuples;
  delete[] position_pool;

  return total_num_results;
}

void LocalQueryProcessor::computeBM25ScoreGivenTerm(string term, int qid, ofstream &outputFileHandler, map<string, float> &bm25LookUpDict, string flagString){

	  // All the words in the lexicon are lower case, so queries must be too, convert them to lower case.
	  int num_query_terms = 1;
	  LexiconData* query_term_data[num_query_terms];  // Using a variable length array here.

	  // For compute the BM25 score, I actually do not need so complicated logic, need to be further simplified.
	  // For AND semantics, all query terms must exist in the lexicon for query processing to proceed.
	  // For OR semantics, any of the query terms can be in the lexicon.

	  enum ProcessingSemantics {
	    kAnd, kOr, kUndefined
	  };

	  int curr_query_term_num = 0;
	  for (int i = 0; i < num_query_terms; ++i) {

		LexiconData* lex_data = index_reader_.lexicon().GetEntry(term.c_str(), term.length());

	    if (lex_data != NULL){
	    	query_term_data[curr_query_term_num++] = lex_data;
	    }
	    else{
	    	cout << "Term NOT Found in lexicon:" << term << endl;
	    }

	  }

//Testing part

	  int results_size;
	  // This number makes me very uncomfortable. Should can be changed.
	  int const number_of_BM25_term_score = 1000000;
	  int total_num_results;
	  double query_elapsed_time;

	  //used for getting rid of the warning from c++ compiler.
	  if(false){
		  cout << total_num_results << endl;
	  }

	  if (curr_query_term_num == num_query_terms) {
		//new version, fit for compute all the BM25 Term score.

		results_size = number_of_BM25_term_score;

		//old version, default, it is 10
		//results_size = max_num_results_;

	    // These results are ranked from highest BM25 score to lowest.
	    Result ranked_results[number_of_BM25_term_score];  // Using a variable length array here.

	    Timer query_time;  // Time how long it takes to answer a query.
	    switch (query_algorithm_) {
	      case kDaatAnd:
	      case kDaatOr:
	      case kDaatAndTopPositions:
	    	total_num_results = ComputeBM25ScoreForSpecifcTerm(query_term_data, num_query_terms, ranked_results, &results_size, term);
	    	//total_num_results = ProcessQuery(query_term_data, num_query_terms, ranked_results, &results_size);
	        break;
	      default:
	        total_num_results = 0;
	        assert(false);
	    }
	    query_elapsed_time = query_time.GetElapsedTime();

	    if (!warm_up_mode_) {
	      total_querying_time_ += query_elapsed_time;
	      ++total_num_queries_;
	    }

	    cout.setf(ios::fixed, ios::floatfield);
	    cout.setf(ios::showpoint);
	    //cout << "total # of results:" << results_size << endl;

	    cout << "the length:" << results_size << endl;
	    for (int i = 0; i < results_size; ++i) {
	      switch (result_format_) {
	        case kNormal:
	          if (!silent_mode_){
	        	  // No record for the term and the docID, just the BM25 score.
	        	  if (flagString == "1"){
	        		  outputFileHandler << ranked_results[i].first << endl;
	        	  }
	        	  else if(flagString == "2"){
	        		  outputFileHandler << term << " "<< ranked_results[i].second << " "<< ranked_results[i].first << endl;
	        	  }
	        	  else{
	        		  cout << "flag value do NOT recognized." << endl;
	        		  exit(1);
	        	  }
	          }
	          break;
	        default:
	          assert(false);
	      }
	    }
	  }
	  else
	  {
	    // One of the query terms did not exist in the lexicon.
		//cout << "Term do not have data." << endl;
	    results_size = 0;
	    total_num_results = 0;
	    query_elapsed_time = 0;
	  }

}

// In case of AND queries, we only count queries for which all terms are in the lexicon as part of the number of queries executed and the total elapsed querying
// time. A query that contains terms which are not in the lexicon will just terminate with 0 results and 0 running time, so we ignore these for our benchmarking
// purposes.
int LocalQueryProcessor::ExecuteQuery(string query_line, int qid, vector<string> &resultsInString, int max_num_results_to_return) {
  cout << "--->[serverHiddenInfo]LocalQueryProcessor::ExecuteQuery(..4.) called" << endl;
  // All the words in the lexicon are lower case, so queries must be too, convert them to lower case.
  // Wei Added: What about some capital letter can indicate something important? Like a human's name?
  for (size_t i = 0; i < query_line.size(); i++) {
    if (isupper(query_line[i]))
      query_line[i] = tolower(query_line[i]);

    // We need to remove punctuation from the queries, since we only index alphanumeric characters and anything separated by a non-alphanumeric
    // character is considered a token separator by our parser. Not removing punctuation will result in the token not being found in the lexicon.
    int int_val = query_line[i];
    if (!((int_val >= 48 && int_val < 58) || (int_val >= 65 && int_val < 91) || (int_val >= 97 && int_val < 123) || (int_val == 32))) {
      query_line[i] = ' ';  // Replace it with a space.
    }
  }

  if (query_mode_ == kBatch) {
    if (!silent_mode_)
      cout << "\nSearch: " << query_line << endl;
  }

  istringstream qss(query_line);
  vector<string> words;
  string term;
  while (qss >> term) {
    // Apply query time word stop list.
    // Remove words that appear in our stop list.
    if (!stop_words_.empty()) {
      if (stop_words_.find(term) == stop_words_.end()) {
        words.push_back(term);
      }
    } else {
      words.push_back(term);
    }
  }

  if (words.size() == 0) {
    if (!silent_mode_)
      cout << "Please enter a query.\n" << endl;
    return -1;
  }

  // The following are the input query line processing process.

  // Remove duplicate words, since there is no point in traversing lists for the same word multiple times.
  sort(words.begin(), words.end());
  words.erase(unique(words.begin(), words.end()), words.end());

  int num_query_terms = words.size();
  LexiconData* query_term_data[num_query_terms];  // Using a variable length array here.

  // For AND semantics, all query terms must exist in the lexicon for query processing to proceed.
  // For OR semantics, any of the query terms can be in the lexicon.
  enum ProcessingSemantics {
    kAnd, kOr, kUndefined
  };
  ProcessingSemantics processing_semantics;
  switch (query_algorithm_) {
    case kDaatAnd:
    case kDaatAndTopPositions:
    case kDualLayeredOverlappingDaat:
    case kDualLayeredOverlappingMergeDaat:
      processing_semantics = kAnd;
      break;
    case kDaatOr:
    case kMultiLayeredDaatOr:
    case kMultiLayeredDaatOrMaxScore:
    case kLayeredTaatOrEarlyTerminated:
    case kWand:
    case kDualLayeredWand:
    case kMaxScore:
    case kDualLayeredMaxScore:
      processing_semantics = kOr;
      break;
    default:
      processing_semantics = kUndefined;
      assert(false);
  }

  if (result_format_ == kCompare) {
    // Print the query.
    for (int i = 0; i < num_query_terms; ++i) {
      cout << words[i] << ((i != num_query_terms - 1) ? ' ' : '\n');
    }
  }

  int curr_query_term_num = 0;
  for (int i = 0; i < num_query_terms; ++i) {
    LexiconData* lex_data = index_reader_.lexicon().GetEntry(words[i].c_str(), words[i].length());
    if (lex_data != NULL)
      query_term_data[curr_query_term_num++] = lex_data;
  }

  if (processing_semantics == kOr) {
    num_query_terms = curr_query_term_num;
  }

  int results_size;
  int total_num_results;
  double query_elapsed_time;

  if (curr_query_term_num == num_query_terms) {
    // Here, I use the max_num_results_to_return to replace the original one: max_num_results_
	results_size = max_num_results_to_return;

    // These results are ranked from highest BM25 score to lowest.
    Result ranked_results[max_num_results_to_return];  // Using a variable length array here.

    Timer query_time;  // Time how long it takes to answer a query.
    switch (query_algorithm_) {
      case kDaatAnd:
      case kDaatOr:
      case kDaatAndTopPositions:
        total_num_results = ProcessQuery(query_term_data, num_query_terms, ranked_results, &results_size);
        break;
      case kDualLayeredOverlappingDaat:
      case kDualLayeredOverlappingMergeDaat:
        total_num_results = ProcessLayeredQuery(query_term_data, num_query_terms, ranked_results, &results_size);
        break;
      case kMultiLayeredDaatOr:
        total_num_results = ProcessMultiLayeredDaatOrQuery(query_term_data, num_query_terms, ranked_results, &results_size);
        break;
      case kMultiLayeredDaatOrMaxScore:
        total_num_results = ProcessMultiLayeredDaatOrMaxScoreQuery(query_term_data, num_query_terms, ranked_results, &results_size);
        break;
      case kLayeredTaatOrEarlyTerminated:
        total_num_results = ProcessLayeredTaatPrunedEarlyTerminatedQuery(query_term_data, num_query_terms, ranked_results, &results_size);
        break;
      case kWand:
        total_num_results = MergeListsWand(query_term_data, num_query_terms, ranked_results, &results_size, false);
        break;
      case kDualLayeredWand:
        total_num_results = MergeListsWand(query_term_data, num_query_terms, ranked_results, &results_size, true);
        break;
      case kMaxScore:
        total_num_results = MergeListsMaxScore(query_term_data, num_query_terms, ranked_results, &results_size, false);
        break;
      case kDualLayeredMaxScore:
        total_num_results = MergeListsMaxScore(query_term_data, num_query_terms, ranked_results, &results_size, true);
        break;
      default:
        total_num_results = 0;
        assert(false);
    }
    query_elapsed_time = query_time.GetElapsedTime();

    if (!warm_up_mode_) {
      total_querying_time_ += query_elapsed_time;
      ++total_num_queries_;
    }

    cout.setf(ios::fixed, ios::floatfield);
    cout.setf(ios::showpoint);

    if (result_format_ == kCompare) {
      cout << "num results: " << results_size << endl;
    }

    for (int i = 0; i < results_size; ++i) {
      switch (result_format_) {
        case kNormal:
          if (!silent_mode_)
          {
        	  string resultString = boost::lexical_cast<string>( ranked_results[i].first ) + " " + boost::lexical_cast<string>( ranked_results[i].second ) + " " + string( index_reader_.document_map().GetDocumentUrl(ranked_results[i].second) );

        	  resultsInString.push_back(resultString);

          }
          break;
        case kTrec:
          cout << qid << '\t' << "Q0" << '\t' << index_reader_.document_map().GetDocumentNumber(ranked_results[i].second) << '\t' << i << '\t'
              << ranked_results[i].first << '\t' << "PolyIRTK" << "\n";
          break;
        case kCompare:
          cout << setprecision(2) << setw(2) << ranked_results[i].first << "\t" << ranked_results[i].second << setprecision(6) << "\n";
          break;
        case kDiscard:
          break;
        default:
          assert(false);
      }
    }
  } else {
    // One of the query terms did not exist in the lexicon.
    results_size = 0;
    total_num_results = 0;
    query_elapsed_time = 0;
  }

  if (result_format_ == kNormal)
    if (!silent_mode_){
    	cout << "The results from the polyIRToolkit has been temp* hidden." << endl;
    	cout << "\nShowing " << results_size << " results out of " << total_num_results << ". (" << setprecision(1) << (query_elapsed_time * 1000)
          << setprecision(6) << " ms)\n";

    }

  return resultsInString.size();
}

void LocalQueryProcessor::UpdateInvertedIndexesPreComputedScores(string query_term, uint32_t doc_id, float new_score){
	  cout << "Change old posting to the new posting(" << query_term << "," << doc_id << "," << new_score << ")" << endl;
	  int num_query_terms = 1;
	  LexiconData* query_term_data[num_query_terms];  // Using a variable length array here.
	  int curr_query_term_num = 0;
	  for (int i = 0; i < num_query_terms; ++i) {
	    LexiconData* lex_data = index_reader_.lexicon().GetEntry(query_term.c_str(), query_term.length());
	    if (lex_data != NULL)
	      query_term_data[curr_query_term_num++] = lex_data;
	  }
	  ProcessUpdateInvertedIndexesPreComputedScores(query_term_data, query_term, doc_id, new_score);
}




// In case of AND queries, we only count queries for which all terms are in the lexicon as part of the number of queries executed and the total elapsed querying
// time. A query that contains terms which are not in the lexicon will just terminate with 0 results and 0 running time, so we ignore these for our benchmarking
// purposes.
int LocalQueryProcessor::ModifyInvertedIndexesPreComputedScores(int qid, string query_line, string semantics, string mode){
	  // Updated by Wei: 2012/08/02
	  cout << "ModifyInvertedIndexesPreComputedScores(...) called." << endl;

	  // All the words in the lexicon are lower case, so queries must be too, convert them to lower case.
	  for (size_t i = 0; i < query_line.size(); i++) {
	    if (isupper(query_line[i]))
	      query_line[i] = tolower(query_line[i]);

	    // We need to remove punctuation from the queries, since we only index alphanumeric characters and anything separated by a non-alphanumeric
	    // character is considered a token separator by our parser. Not removing punctuation will result in the token not being found in the lexicon.
	    int int_val = query_line[i];
	    if (!((int_val >= 48 && int_val < 58) || (int_val >= 65 && int_val < 91) || (int_val >= 97 && int_val < 123) || (int_val == 32))) {
	      query_line[i] = ' ';  // Replace it with a space.
	    }
	  }


	  istringstream qss(query_line);
	  vector<string> words;
	  string term;
	  while (qss >> term) {
	    // Apply query time word stop list.
	    // Remove words that appear in our stop list.
	    if (!stop_words_.empty()) {
	      if (stop_words_.find(term) == stop_words_.end()) {
	        words.push_back(term);
	      }
	    } else {
	      words.push_back(term);
	    }
	  }

	  if (words.size() == 0) {
	    if (!silent_mode_)
	      cout << "Please enter a query.\n" << endl;
	    return -1;
	  }

	  // The following are the input query line processing process.

	  // Remove duplicate words, since there is no point in traversing lists for the same word multiple times.
	  sort(words.begin(), words.end());
	  words.erase(unique(words.begin(), words.end()), words.end());

	  int num_query_terms = words.size();
	  LexiconData* query_term_data[num_query_terms];  // Using a variable length array here.

	  if(semantics == "AND"){
		  processing_semantics_ = kAnd;
	  }
	  else if (semantics == "OR"){
		  processing_semantics_ = kOr;
	  }
	  else{
	      processing_semantics_ = kUndefined;
	      assert(false);
	  }

	  int curr_query_term_num = 0;
	  for (int i = 0; i < num_query_terms; ++i) {
	    LexiconData* lex_data = index_reader_.lexicon().GetEntry(words[i].c_str(), words[i].length());
	    if (lex_data != NULL)
	      query_term_data[curr_query_term_num++] = lex_data;
	  }

	  if (processing_semantics_ == kOr) {
	    num_query_terms = curr_query_term_num;
	  }


	  int total_num_posting_scores;
	  if (curr_query_term_num == num_query_terms) {
	    switch (processing_semantics_) {
	      case kAnd:
	      case kOr:
	        total_num_posting_scores = ProcessModifyInvertedIndexesPreComputedScores(query_term_data, num_query_terms,mode);
	        break;
	      default:
	        total_num_posting_scores = 0;
	        assert(false);
	        break;
	    }
	  }
	  else{
	    cout << "One of the query terms did not exist in the lexicon." << endl;
	    total_num_posting_scores = 0;
	  }
	  return total_num_posting_scores;
	  //cout << "Displaying " << total_num_posting_scores << " results for score modification." << endl;
}

void LocalQueryProcessor::ExecuteRequest(string request_line) {


  /*
  // Need to be updated 2013/09/10 afternoon, SOON
  if (query_mode_ == kBatch) {
	cout << endl;
  	cout << "qid: " << qid << endl;
  	cout << "Search: " << request_line << endl;
  }
  */

  istringstream qss(request_line);

  string term;
  string docIDInStringFormat;
  uint32_t docIDInUint32_tFormat;

  qss >> term;
  qss >> docIDInStringFormat;

  docIDInUint32_tFormat = atoi(docIDInStringFormat.c_str());
  // for DEBUG
  // cout << "term: " << term << endl;
  // cout << "docID: " << docIDInStringFormat << endl;

  LexiconData* term_data[1];  // Using an array with length 1 here

  int curr_query_term_num = 0;
  LexiconData* lex_data = index_reader_.lexicon().GetEntry(term.c_str(), term.length());
  if (lex_data != NULL){
    term_data[curr_query_term_num++] = lex_data;
  }
  else{
    cout << "the term: '" << term << "' is NOT in the lexicon." << endl;
    // strict rules applied. Updated by Wei on 2013/09/10 afternoon
    exit(1);
  }

  double processing_elapsed_time;
  int posting_rank_in_list;	// This's ALL I want currently

  Timer time_clock;  // Time how long it takes to answer a query.
  posting_rank_in_list = GetPostingRankInList(term_data, term, docIDInUint32_tFormat);
  processing_elapsed_time = time_clock.GetElapsedTime();
  // for DEBUG
  cout << "term: " << term << endl;
  cout << "docIDInUint32_tFormat: " << docIDInUint32_tFormat << endl;
  cout << "posting_rank_in_list: " << posting_rank_in_list << endl;
  cout << "processing_elapsed_time: " << processing_elapsed_time << endl;
  cout << endl;

}


// In case of AND queries, we only count queries for which all terms are in the lexicon as part of the number of queries executed and the total elapsed querying
// time. A query that contains terms which are not in the lexicon will just terminate with 0 results and 0 running time, so we ignore these for our benchmarking
// purposes.
void LocalQueryProcessor::ExecuteQuery(string query_line, int qid) {
  // cout << "--->[serverHiddenInfo]LocalQueryProcessor::ExecuteQuery(..2.) called" << endl;
  // All the words in the lexicon are lower case, so queries must be too, convert them to lower case.

  for (size_t i = 0; i < query_line.size(); i++) {
    if (isupper(query_line[i]))
      query_line[i] = tolower(query_line[i]);

    // We need to remove punctuation from the queries, since we only index alphanumeric characters and anything separated by a non-alphanumeric
    // character is considered a token separator by our parser. Not removing punctuation will result in the token not being found in the lexicon.
    int int_val = query_line[i];
    if (!((int_val >= 48 && int_val < 58) || (int_val >= 65 && int_val < 91) || (int_val >= 97 && int_val < 123) || (int_val == 32))) {
      query_line[i] = ' ';  // Replace it with a space.
    }
  }

  if (query_mode_ == kBatch) {
    // Updated by Wei 2013/08/12 night at school
	// CURRENT version
	cout << endl;
  	cout << "qid: " << qid << endl;
  	cout << "Search: " << query_line << endl;

	// OLD version
	/*
	if (!silent_mode_){
    	cout << "qid: " << qid << endl;
    	cout << "Search: " << query_line << endl;
    }
	*/
  }

  istringstream qss(query_line);
  vector<string> words;
  string term;
  while (qss >> term) {
    // Apply query time word stop list.
    // Remove words that appear in our stop list.
    if (!stop_words_.empty()) {
      if (stop_words_.find(term) == stop_words_.end()) {
        words.push_back(term);
      }
    } else {
      words.push_back(term);
    }
  }

  if (words.size() == 0) {
    if (!silent_mode_)
      cout << "Please enter a query.\n" << endl;
    return;
  }

  // Remove duplicate words, since there is no point in traversing lists for the same word multiple times.
  sort(words.begin(), words.end());
  words.erase(unique(words.begin(), words.end()), words.end());

  int num_query_terms = words.size();
  LexiconData* query_term_data[num_query_terms];  // Using a variable length array here.

  // For AND semantics, all query terms must exist in the lexicon for query processing to proceed.
  // For OR semantics, any of the query terms can be in the lexicon.
  enum ProcessingSemantics {
    kAnd, kOr, kUndefined
  };
  ProcessingSemantics processing_semantics;
  switch (query_algorithm_) {
    case kDaatAnd:
    case kDaatAndTopPositions:
    case kDualLayeredOverlappingDaat:
    case kDualLayeredOverlappingMergeDaat:
      processing_semantics = kAnd;
      break;
    case kDaatOr:
    case kMultiLayeredDaatOr:
    case kMultiLayeredDaatOrMaxScore:
    case kLayeredTaatOrEarlyTerminated:
    case kWand:
    case kDualLayeredWand:
    case kMaxScore:
    case kDualLayeredMaxScore:
      processing_semantics = kOr;
      break;
    default:
      processing_semantics = kUndefined;
      assert(false);
  }

  if (result_format_ == kCompare) {
    // Print the query.
    for (int i = 0; i < num_query_terms; ++i) {
      cout << words[i] << ((i != num_query_terms - 1) ? ' ' : '\n');
    }
  }

  int curr_query_term_num = 0;
  for (int i = 0; i < num_query_terms; ++i) {
    LexiconData* lex_data = index_reader_.lexicon().GetEntry(words[i].c_str(), words[i].length());
    if (lex_data != NULL){
    	query_term_data[curr_query_term_num++] = lex_data;
    }
    else{
    	cout << "the term: '" << words[i] << "' is NOT in the lexicon." << endl;
    }
  }

  if (processing_semantics == kOr) {
    num_query_terms = curr_query_term_num;
  }

  int results_size;
  int total_num_results;
  double query_elapsed_time;

  if (curr_query_term_num == num_query_terms) {
	results_size = max_num_results_;

    // The returning results are ranked from highest score to lowest using BM25
	// current version updated by Wei 2013/06/06
	Result* ranked_results = new Result[max_num_results_]; // Using a pointer here
	// old version
	// Result ranked_results[max_num_results_];  // Using a variable length array here.

    // current version updated by Wei 2013/06/06
    Result_Wei_2012* ranked_results2 = new Result_Wei_2012[max_num_results_]; // Using a pointer here
    // old version
    // Result_Wei_2012 ranked_results2[max_num_results_];  // Using a variable length array here.

    Timer query_time;  // Time how long it takes to answer a query.
    switch (query_algorithm_) {
      case kDaatAnd:
      case kDaatOr:
      case kDaatAndTopPositions:
        total_num_results = ProcessQuery2(query_term_data, num_query_terms, ranked_results2, &results_size);
        break;
      case kDualLayeredOverlappingDaat:
      case kDualLayeredOverlappingMergeDaat:
        total_num_results = ProcessLayeredQuery(query_term_data, num_query_terms, ranked_results, &results_size);
        break;
      case kMultiLayeredDaatOr:
        total_num_results = ProcessMultiLayeredDaatOrQuery(query_term_data, num_query_terms, ranked_results, &results_size);
        break;
      case kMultiLayeredDaatOrMaxScore:
        total_num_results = ProcessMultiLayeredDaatOrMaxScoreQuery(query_term_data, num_query_terms, ranked_results, &results_size);
        break;
      case kLayeredTaatOrEarlyTerminated:
        total_num_results = ProcessLayeredTaatPrunedEarlyTerminatedQuery(query_term_data, num_query_terms, ranked_results, &results_size);
        break;
      case kWand:
        total_num_results = MergeListsWand(query_term_data, num_query_terms, ranked_results, &results_size, false);
        break;
      case kDualLayeredWand:
        total_num_results = MergeListsWand(query_term_data, num_query_terms, ranked_results, &results_size, true);
        break;
      case kMaxScore:
        total_num_results = MergeListsMaxScore(query_term_data, num_query_terms, ranked_results, &results_size, false);
        break;
      case kDualLayeredMaxScore:
        total_num_results = MergeListsMaxScore(query_term_data, num_query_terms, ranked_results, &results_size, true);
        break;
      default:
        total_num_results = 0;
        assert(false);
    }
    query_elapsed_time = query_time.GetElapsedTime();

    if (!warm_up_mode_) {
      total_querying_time_ += query_elapsed_time;
      ++total_num_queries_;
    }

    cout.setf(ios::fixed, ios::floatfield);
    cout.setf(ios::showpoint);

    if (result_format_ == kCompare) {
      cout << "num results: " << results_size << endl;
    }

    for (int i = 0; i < results_size; ++i) {
      switch (result_format_) {
        case kNormal:
          if (!silent_mode_){
        	  // output option1: original output by Roman
        	  cout << setw(2) << "Score: " << ranked_results2[i].first.totalScore
        			  	  	  << "\tDocID: " << ranked_results2[i].second
        			  	  	  << "\ttrecID: " << index_reader_.document_map().GetDocumentNumber(ranked_results2[i].second)
        			  	  	  // << "\tURL: " << index_reader_.document_map().GetDocumentUrl(ranked_results2[i].second)
        			  	  	  << "\tdocLength: " << ranked_results2[i].first.doc_length
        			  	  	  << setprecision(6)
        			  	  	  << "\n";


        	  /*
        	  // output option2: enhanced output by Wei (add the predicted probability given by query terms for each posting as well)
        	  // for debug ONLY
        	  // cout << ranked_results2[i].first.postingThreeFactorProbabilities0 << endl << endl;
        	  cout << i+1 << " " << ranked_results2[i].first.postingThreeFactorProbabilities0 << " " << ranked_results2[i].first.postingThreeFactorProbabilities1 << " " << ranked_results2[i].first.postingThreeFactorProbabilities2 << " " << ranked_results2[i].first.postingThreeFactorProbabilities3 << " " << ranked_results2[i].first.postingThreeFactorProbabilities4 << " " << ranked_results2[i].first.postingThreeFactorProbabilities5 << " " << ranked_results2[i].first.postingThreeFactorProbabilities6 << " " << ranked_results2[i].first.postingThreeFactorProbabilities7 << " " << ranked_results2[i].first.postingThreeFactorProbabilities8 << " " << ranked_results2[i].first.postingThreeFactorProbabilities9 << " "
        			     // the format controller
        			     << setw(2)
        			     // BM25 score component part1
        			     << ranked_results2[i].first.posting0ScoreComponentPart1 << " " << ranked_results2[i].first.posting1ScoreComponentPart1 << " " << ranked_results2[i].first.posting2ScoreComponentPart1 << " " << ranked_results2[i].first.posting3ScoreComponentPart1 << " " << ranked_results2[i].first.posting4ScoreComponentPart1 << " " << ranked_results2[i].first.posting5ScoreComponentPart1 << " " << ranked_results2[i].first.posting6ScoreComponentPart1 << " " << ranked_results2[i].first.posting7ScoreComponentPart1 << " " << ranked_results2[i].first.posting8ScoreComponentPart1 << " " << ranked_results2[i].first.posting9ScoreComponentPart1 << " "
        			     // BM25 score component part2
        			     << ranked_results2[i].first.posting0ScoreComponentPart2 << " " << ranked_results2[i].first.posting1ScoreComponentPart2 << " " << ranked_results2[i].first.posting2ScoreComponentPart2 << " " << ranked_results2[i].first.posting3ScoreComponentPart2 << " " << ranked_results2[i].first.posting4ScoreComponentPart2 << " " << ranked_results2[i].first.posting5ScoreComponentPart2 << " " << ranked_results2[i].first.posting6ScoreComponentPart2 << " " << ranked_results2[i].first.posting7ScoreComponentPart2 << " " << ranked_results2[i].first.posting8ScoreComponentPart2 << " " << ranked_results2[i].first.posting9ScoreComponentPart2 << " "
        			     << ranked_results2[i].first.postingScore0 << " " << ranked_results2[i].first.postingScore1 << " " << ranked_results2[i].first.postingScore2 << " " << ranked_results2[i].first.postingScore3 << " " << ranked_results2[i].first.postingScore4 << " " << ranked_results2[i].first.postingScore5 << " " << ranked_results2[i].first.postingScore6 << " " << ranked_results2[i].first.postingScore7 << " " << ranked_results2[i].first.postingScore8 << " " << ranked_results2[i].first.postingScore9 << " "
        			     << ranked_results2[i].first.lengthOfTheInvertedList0 << " " << ranked_results2[i].first.lengthOfTheInvertedList1 << " " << ranked_results2[i].first.lengthOfTheInvertedList2 << " " << ranked_results2[i].first.lengthOfTheInvertedList3 << " " << ranked_results2[i].first.lengthOfTheInvertedList4 << " "   << ranked_results2[i].first.lengthOfTheInvertedList5 << " " << ranked_results2[i].first.lengthOfTheInvertedList6 << " " << ranked_results2[i].first.lengthOfTheInvertedList7 << " " << ranked_results2[i].first.lengthOfTheInvertedList8 << " "  << ranked_results2[i].first.lengthOfTheInvertedList9 << " "
        			     << ranked_results2[i].first.postingTermFrequency0 << " " << ranked_results2[i].first.postingTermFrequency1 << " " << ranked_results2[i].first.postingTermFrequency2 << " " << ranked_results2[i].first.postingTermFrequency3 << " " << ranked_results2[i].first.postingTermFrequency4 << " "   << ranked_results2[i].first.postingTermFrequency5 << " " << ranked_results2[i].first.postingTermFrequency6 << " " << ranked_results2[i].first.postingTermFrequency7 << " " << ranked_results2[i].first.postingTermFrequency8 << " "  << ranked_results2[i].first.postingTermFrequency9 << " "
        			     << ranked_results2[i].first.doc_length << " "
        			     << ranked_results2[i].first.totalScore << " "
        			     << ranked_results2[i].second << " "
        			     << index_reader_.document_map().GetDocumentNumber( ranked_results2[i].second )
        			     << "\n";
        	  */

        	  /*
        	  // output option3: free style
        	  // Updated by Wei on 2014/02/22 afternoon at school
        	  // cout << ranked_results2[i].first.postingThreeFactorProbabilities0 << endl << endl;
        	  cout << i+1 << " "
        			     << ranked_results2[i].first.postingScore0 << " " << ranked_results2[i].first.postingScore1 << " " << ranked_results2[i].first.postingScore2 << " " << ranked_results2[i].first.postingScore3 << " " << ranked_results2[i].first.postingScore4 << " " << ranked_results2[i].first.postingScore5 << " " << ranked_results2[i].first.postingScore6 << " " << ranked_results2[i].first.postingScore7 << " " << ranked_results2[i].first.postingScore8 << " " << ranked_results2[i].first.postingScore9 << " "
        			     << ranked_results2[i].first.posting0QualifyStatus << " " << ranked_results2[i].first.posting1QualifyStatus << " " << ranked_results2[i].first.posting2QualifyStatus << " " << ranked_results2[i].first.posting3QualifyStatus << " " << ranked_results2[i].first.posting4QualifyStatus << " " << ranked_results2[i].first.posting5QualifyStatus << " " << ranked_results2[i].first.posting6QualifyStatus << " " << ranked_results2[i].first.posting7QualifyStatus << " " << ranked_results2[i].first.posting8QualifyStatus << " " << ranked_results2[i].first.posting9QualifyStatus << " "
        			     << ranked_results2[i].first.totalScore << " "
        			     << ranked_results2[i].second << " "
        			     << "\n";
        	  */
          }
          break;
        case kPruning:
          if (!silent_mode_){
        	  /*
        	  // Updated by Wei on 2013/06/30 night at school
        	  // option1:
        	  cout << i+1 << " "
        		   << ranked_results2[i].first.totalScore << " "
        		   << ranked_results2[i].second << " "
        		   << index_reader_.document_map().GetDocumentNumber( ranked_results2[i].second )
        		   << "\n";
        	  */

        	  // Updated by Wei on 2014/05/17 night at school
        	  cout << i+1 << " " << ranked_results2[i].first.posting0RankInList << " " << ranked_results2[i].first.posting1RankInList << " " << ranked_results2[i].first.posting2RankInList << " " << ranked_results2[i].first.posting3RankInList << " " << ranked_results2[i].first.posting4RankInList << " " << ranked_results2[i].first.posting5RankInList << " " << ranked_results2[i].first.posting6RankInList << " " << ranked_results2[i].first.posting7RankInList << " " << ranked_results2[i].first.posting8RankInList << " " << ranked_results2[i].first.posting9RankInList << " ";
        	  cout << ranked_results2[i].first.lengthOfTheInvertedList0 << " " << ranked_results2[i].first.lengthOfTheInvertedList1 << " " << ranked_results2[i].first.lengthOfTheInvertedList2 << " " << ranked_results2[i].first.lengthOfTheInvertedList3 << " " << ranked_results2[i].first.lengthOfTheInvertedList4 << " " << ranked_results2[i].first.lengthOfTheInvertedList5 << " " << ranked_results2[i].first.lengthOfTheInvertedList6 << " " << ranked_results2[i].first.lengthOfTheInvertedList7 << " " << ranked_results2[i].first.lengthOfTheInvertedList8 << " " << ranked_results2[i].first.lengthOfTheInvertedList9 << " ";
        	  cout << ranked_results2[i].first.postingScore0 << " " << ranked_results2[i].first.postingScore1 << " " << ranked_results2[i].first.postingScore2 << " " << ranked_results2[i].first.postingScore3 << " " << ranked_results2[i].first.postingScore4 << " " << ranked_results2[i].first.postingScore5 << " " << ranked_results2[i].first.postingScore6 << " " << ranked_results2[i].first.postingScore7 << " " << ranked_results2[i].first.postingScore8 << " " << ranked_results2[i].first.postingScore9 << " ";
        	  cout.unsetf(ios_base::floatfield);
        	  cout << ranked_results2[i].first.totalScore << " "
        		   << ranked_results2[i].second << " "
        		   << index_reader_.document_map().GetDocumentNumber( ranked_results2[i].second )
        		   << "\n";

        	  /*
        	  // Updated by Wei on 2014/01/13 night at school
        	  // option3:
        	  cout << i+1 << " " << scientific << ranked_results2[i].first.postingFirstProbabilities0 << " " << ranked_results2[i].first.postingFirstProbabilities1 << " " << ranked_results2[i].first.postingFirstProbabilities2 << " " << ranked_results2[i].first.postingFirstProbabilities3 << " " << ranked_results2[i].first.postingFirstProbabilities4 << " " << ranked_results2[i].first.postingFirstProbabilities5 << " " << ranked_results2[i].first.postingFirstProbabilities6 << " " << ranked_results2[i].first.postingFirstProbabilities7 << " " << ranked_results2[i].first.postingFirstProbabilities8 << " " << ranked_results2[i].first.postingFirstProbabilities9 << " ";
              cout << ranked_results2[i].first.postingSecondANDThirdProbabilities0 << " " << ranked_results2[i].first.postingSecondANDThirdProbabilities1 << " " << ranked_results2[i].first.postingSecondANDThirdProbabilities2 << " " << ranked_results2[i].first.postingSecondANDThirdProbabilities3 << " " << ranked_results2[i].first.postingSecondANDThirdProbabilities4 << " " << ranked_results2[i].first.postingSecondANDThirdProbabilities5 << " " << ranked_results2[i].first.postingSecondANDThirdProbabilities6 << " " << ranked_results2[i].first.postingSecondANDThirdProbabilities7 << " " << ranked_results2[i].first.postingSecondANDThirdProbabilities8 << " " << ranked_results2[i].first.postingSecondANDThirdProbabilities9 << " ";
        	  cout << ranked_results2[i].first.postingThreeFactorProbabilities0 << " " << ranked_results2[i].first.postingThreeFactorProbabilities1 << " " << ranked_results2[i].first.postingThreeFactorProbabilities2 << " " << ranked_results2[i].first.postingThreeFactorProbabilities3 << " " << ranked_results2[i].first.postingThreeFactorProbabilities4 << " " << ranked_results2[i].first.postingThreeFactorProbabilities5 << " " << ranked_results2[i].first.postingThreeFactorProbabilities6 << " " << ranked_results2[i].first.postingThreeFactorProbabilities7 << " " << ranked_results2[i].first.postingThreeFactorProbabilities8 << " " << ranked_results2[i].first.postingThreeFactorProbabilities9 << " ";
        	  cout << ranked_results2[i].first.posting0QualifyStatus << " " << ranked_results2[i].first.posting1QualifyStatus << " " << ranked_results2[i].first.posting2QualifyStatus << " " << ranked_results2[i].first.posting3QualifyStatus << " " << ranked_results2[i].first.posting4QualifyStatus << " " << ranked_results2[i].first.posting5QualifyStatus << " " << ranked_results2[i].first.posting6QualifyStatus << " " << ranked_results2[i].first.posting7QualifyStatus << " " << ranked_results2[i].first.posting8QualifyStatus << " " << ranked_results2[i].first.posting9QualifyStatus << " ";
        	  cout << ranked_results2[i].first.postingScore0 << " " << ranked_results2[i].first.postingScore1 << " " << ranked_results2[i].first.postingScore2 << " " << ranked_results2[i].first.postingScore3 << " " << ranked_results2[i].first.postingScore4 << " " << ranked_results2[i].first.postingScore5 << " " << ranked_results2[i].first.postingScore6 << " " << ranked_results2[i].first.postingScore7 << " " << ranked_results2[i].first.postingScore8 << " " << ranked_results2[i].first.postingScore9 << " ";
        	  cout.unsetf(ios_base::floatfield);
        	  cout << ranked_results2[i].first.totalScore << " "
        		   << ranked_results2[i].second << " "
        		   << index_reader_.document_map().GetDocumentNumber( ranked_results2[i].second ) << " "
        		   // for AND semantics
        		   << ranked_results2[i].first.docCandidateQualifyStatus
        		   // for OR semantics
        		   // << ranked_results2[i].first.actualTotalScore
        		   << "\n";
        	  */

        	  /*
        	  // Updated by Wei on 2014/01/25 afternoon at school
        	  // option3:
        	  cout << i+1 << " ";
        	  cout << ranked_results2[i].first.posting0QualifyStatus << " " << ranked_results2[i].first.posting1QualifyStatus << " " << ranked_results2[i].first.posting2QualifyStatus << " " << ranked_results2[i].first.posting3QualifyStatus << " " << ranked_results2[i].first.posting4QualifyStatus << " " << ranked_results2[i].first.posting5QualifyStatus << " " << ranked_results2[i].first.posting6QualifyStatus << " " << ranked_results2[i].first.posting7QualifyStatus << " " << ranked_results2[i].first.posting8QualifyStatus << " " << ranked_results2[i].first.posting9QualifyStatus << " ";
        	  cout << ranked_results2[i].first.postingScore0 << " " << ranked_results2[i].first.postingScore1 << " " << ranked_results2[i].first.postingScore2 << " " << ranked_results2[i].first.postingScore3 << " " << ranked_results2[i].first.postingScore4 << " " << ranked_results2[i].first.postingScore5 << " " << ranked_results2[i].first.postingScore6 << " " << ranked_results2[i].first.postingScore7 << " " << ranked_results2[i].first.postingScore8 << " " << ranked_results2[i].first.postingScore9 << " ";
        	  cout.unsetf(ios_base::floatfield);
        	  cout << ranked_results2[i].first.totalScore << " "
        		   << ranked_results2[i].second << " "
        		   << index_reader_.document_map().GetDocumentNumber( ranked_results2[i].second ) << " "
        		   // for AND semantics
        		   // << ranked_results2[i].first.docCandidateQualifyStatus
        		   // for OR semantics
        		   << ranked_results2[i].first.actualTotalScore
        		   << "\n";
        	  */
          }
          break;
        case kTrec:
          // a version for pruning, normal results output, SIGIR2014, AND semantics
          cout << qid << '\t' << "Q0" << '\t' << index_reader_.document_map().GetDocumentNumber(ranked_results2[i].second) << '\t' << i << '\t'
              << ranked_results2[i].first.totalScore << '\t' << "WeiIRTK" << "\n";
          // a version for pruning, SIGIR2014, OR semantics
          // cout << qid << '\t' << "Q0" << '\t' << index_reader_.document_map().GetDocumentNumber(ranked_results2[i].second) << '\t' << i << '\t'
          //      << ranked_results2[i].first.actualTotalScore << '\t' << "WeiIRTK" << "\n";

          break;
        case kCompare:
          cout << setprecision(2) << setw(2) << ranked_results[i].first << "\t" << ranked_results[i].second << setprecision(6) << "\n";
          break;
        case kDiscard:
          break;
        default:
          assert(false);
      }
    }

    // current version as using the pointer to allocate the memory, then I need to deallocate them somehow.
    // Updated by Wei 2013/06/06
    delete[] ranked_results;
    delete[] ranked_results2;
  }
  else
  {
    // One of the query terms did not exist in the lexicon.
    cout << "This query has query terms NOT in the lexicon." << endl;
	results_size = 0;
    total_num_results = 0;
    query_elapsed_time = 0;
  }

  if (result_format_ == kNormal or result_format_ == kPruning)
	cout << "Showing* " << results_size << " results out of " << total_num_results << ". (" << setprecision(1) << (query_elapsed_time * 1000)
		  << setprecision(6) << " ms)\n";
}

void LocalQueryProcessor::RunQueryViewQueries(const string& input_source) {
  cout << "Updated by Wei 2013/02/28" << endl;
  cout << "This function is currently under construction so PLEASE DO NOT USE THIS FUNCTION AT THIS MOMENT" << endl;
  cout << "Existing" << endl;
  /*
  ifstream batch_query_file_stream;
  if (!(input_source.empty() || input_source == "stdin" || input_source == "cin")) {
    batch_query_file_stream.open(input_source.c_str());
    if (!batch_query_file_stream) {
      GetErrorLogger().Log("Could not open batch query file '" + input_source + "'.", true);
    }
  }
  istream& is = batch_query_file_stream.is_open() ? batch_query_file_stream : cin;
  vector<pair<int, string> > queries;
  string query_line;
  while (getline(is, query_line)) {
    size_t colon_pos = query_line.find(':');
    if (colon_pos != string::npos && colon_pos < (query_line.size() - 1)) {
      queries.push_back(make_pair(atoi(query_line.substr(0, colon_pos).c_str()), query_line.substr(colon_pos + 1)));
    } else {
      queries.push_back(make_pair(0, query_line));
    }
  }
  warm_up_mode_ = false;
  for (int i = 0; i < static_cast<int> (queries.size()); ++i) {
      ExecuteQuery(queries[i].second, queries[i].first);
  }
  */
}

// Updated by Wei 2013/09/11 night at school
void LocalQueryProcessor::RunBatchRequests(const string& input_source) {
  ifstream batch_query_file_stream;
  if (!(input_source.empty() || input_source == "stdin" || input_source == "cin")) {
    batch_query_file_stream.open(input_source.c_str());
    if (!batch_query_file_stream) {
      GetErrorLogger().Log("Could not open batch query file '" + input_source + "'.", true);
    }
  }

  istream& is = batch_query_file_stream.is_open() ? batch_query_file_stream : cin;

  vector<string> requestLines;
  string request_line;
  while (getline(is, request_line)) {
	requestLines.push_back(request_line);
  }

  Timer time_clock;  // Time how long it takes to answer a query.
  double overall_processing_elapsed_time;
  for (int i = 0; i < static_cast<int> (requestLines.size()); ++i) {
      ExecuteRequest(requestLines[i]);
  }
  overall_processing_elapsed_time = time_clock.GetElapsedTime();
  cout << "overall_processing_elapsed_time: " << overall_processing_elapsed_time << endl;
}


void LocalQueryProcessor::RunBatchQueries(const string& input_source, bool warmup, int num_timed_runs) {
  ifstream batch_query_file_stream;
  if (!(input_source.empty() || input_source == "stdin" || input_source == "cin")) {
    batch_query_file_stream.open(input_source.c_str());
    if (!batch_query_file_stream) {
      GetErrorLogger().Log("Could not open batch query file '" + input_source + "'.", true);
    }
  }

  istream& is = batch_query_file_stream.is_open() ? batch_query_file_stream : cin;

  vector<pair<int, string> > queries;
  string query_line;
  while (getline(is, query_line)) {
    size_t colon_pos = query_line.find(':');
    if (colon_pos != string::npos && colon_pos < (query_line.size() - 1)) {
      queries.push_back(make_pair(atoi(query_line.substr(0, colon_pos).c_str()), query_line.substr(colon_pos + 1)));
    } else {
      queries.push_back(make_pair(0, query_line));
    }
  }

  if (warmup) {
    warm_up_mode_ = true;
    for (int i = 0; i < static_cast<int> (queries.size()); ++i) {

      ExecuteQuery(queries[i].second, queries[i].first);
    }

    index_reader_.ResetStats();
  }

  warm_up_mode_ = false;
  while (num_timed_runs-- > 0) {
    for (int i = 0; i < static_cast<int> (queries.size()); ++i) {

      ExecuteQuery(queries[i].second, queries[i].first);
    }
  }
}

void LocalQueryProcessor::LoadIndexProperties() {
  collection_total_num_docs_ = atol(index_reader_.meta_info().GetValue(meta_properties::kTotalNumDocs).c_str());
  if (collection_total_num_docs_ <= 0) {
    GetErrorLogger().Log("The '" + string(meta_properties::kTotalNumDocs) + "' value in the loaded index meta file seems to be incorrect.", false);
  }

  uint64_t collection_total_document_lengths = atol(index_reader_.meta_info().GetValue(meta_properties::kTotalDocumentLengths).c_str());
  if (collection_total_document_lengths <= 0) {
    GetErrorLogger().Log("The '" + string(meta_properties::kTotalDocumentLengths) + "' value in the loaded index meta file seems to be incorrect.", false);
  }

  if (collection_total_num_docs_ <= 0 || collection_total_document_lengths <= 0) {
    collection_average_doc_len_ = 1;
  } else {
    collection_average_doc_len_ = collection_total_document_lengths / collection_total_num_docs_;
  }

  if (!index_reader_.includes_positions()) {
    use_positions_ = false;
  }

  // Determine whether this index is layered and whether the index layers are overlapping.
  // From this info, we can determine the query processing mode.
  KeyValueStore::KeyValueResult<long int> layered_index_res = index_reader_.meta_info().GetNumericalValue(meta_properties::kLayeredIndex);
  KeyValueStore::KeyValueResult<long int> overlapping_layers_res = index_reader_.meta_info().GetNumericalValue(meta_properties::kOverlappingLayers);
  KeyValueStore::KeyValueResult<long int> num_layers_res = index_reader_.meta_info().GetNumericalValue(meta_properties::kNumLayers);
  KeyValueStore::KeyValueResult<long int> precomputed_score_res = index_reader_.meta_info().GetNumericalValue(meta_properties::kIncludesPrecomputedScores);


  // TODO:
  // If there are errors reading the values for these keys (most likely missing value), we assume they're false
  // (because that would require updating the index meta file generation in some places, which should be done eventually).
  index_layered_ = layered_index_res.error() ? false : layered_index_res.value_t();
  index_overlapping_layers_ = overlapping_layers_res.error() ? false : overlapping_layers_res.value_t();
  index_num_layers_ = num_layers_res.error() ? 1 : num_layers_res.value_t();
  index_included_precomputed_score_ = precomputed_score_res.error() ? false : precomputed_score_res.value_t();


  if(index_included_precomputed_score_){
	  cout << "--->External pre-computed scores are ready. Do you want the system to use them ONLY for computing[y,N]:";
	  string tempInputCommandLine = "";
	  //string defaultCommandOption = "y";
	  getline(cin,tempInputCommandLine);
	  /*
	  if(tempInputCommandLine != ""){
		  tempInputCommandLine = defaultCommandOption;
	  }
	  */
	  if(tempInputCommandLine == "y"){
		  index_use_precomputed_score_ = true;
	  }
	  else{
		  index_use_precomputed_score_ = false;
	  }
  }


  bool inappropriate_algorithm = false;
  switch (query_algorithm_) {
    case kDefault:  // Choose a conservative algorithm based on the index properties.
      // Note that for a layered index with overlapping layers, we can do non-layered processing
      // by just opening the last layer from each list (which contains all the docIDs in the entire list).
      if (!index_layered_ || index_overlapping_layers_) {
        query_algorithm_ = kDaatAnd;  // TODO: Default should probably be an OR mode algorithm.
        break;
      }
      if (index_layered_ && !index_overlapping_layers_) {
        query_algorithm_ = kLayeredTaatOrEarlyTerminated;
        break;
      }
      break;
    case kDaatAnd:
    case kDaatOr:
    case kWand:  // TODO: For WAND, only need a single layered index, but need term upperbounds, which is not yet supported.
    case kDualLayeredWand:
    case kMaxScore:  // TODO: For MaxScore, only need a single layered index, but need term upperbounds, which is not yet supported.
    case kDualLayeredMaxScore:
    case kDaatAndTopPositions:
      if (index_layered_ && !index_overlapping_layers_) {
        inappropriate_algorithm = true;
      }
      break;
    case kDualLayeredOverlappingDaat:
    case kDualLayeredOverlappingMergeDaat:
      if (!index_layered_ || !index_overlapping_layers_ || index_num_layers_ != 2) {
        inappropriate_algorithm = true;
      }
      break;
    case kLayeredTaatOrEarlyTerminated:
    case kMultiLayeredDaatOr:
    case kMultiLayeredDaatOrMaxScore:
      if (!index_layered_ || index_overlapping_layers_) {
        inappropriate_algorithm = true;
      }
      break;
    case kTaatOr:
      GetErrorLogger().Log("The selected query algorithm is not yet supported.", true);
      break;
    default:
      assert(false);
  }

  if (inappropriate_algorithm) {
    GetErrorLogger().Log("The selected query algorithm is not appropriate for this index type.", true);
  }
}

void LocalQueryProcessor::PrintQueryingParameters() {
  cout << "--->collection_total_num_docs_: " << collection_total_num_docs_ << endl;
  cout << "--->collection_average_doc_len_: " << collection_average_doc_len_ << endl;
}



POSTING_RESULT* LocalQueryProcessor::posting_results_ptr(){
	return posting_results_ptr_;
}

void LocalQueryProcessor::UpdateExternalIndexFromHardDrive(){
	// for different source, we have different logic to update the external index.
	string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kNewScoreIntermediateFileForPruning));
	ifstream inputFileHandler(inputFileName.c_str());
	string currentLine;
	vector<string> currentLineElements;
	string query_term = "";
	string doc_id_in_string_format = "";
	uint32_t doc_id = 0;
	string new_score_in_string_format = "";
	float new_score = 0.0;
	while ( inputFileHandler.good() )
	{
		getline (inputFileHandler,currentLine);
		if(currentLine != ""){
			boost::algorithm::trim(currentLine);
			boost::algorithm::split(currentLineElements, currentLine, boost::algorithm::is_any_of(" ") );
			if(currentLineElements.size() != 3){
				// cout << "vital problem, mark4" << endl;
				exit(1);
			}
			else{
				query_term = currentLineElements[0];
				doc_id_in_string_format = currentLineElements[1];
				doc_id = strtoul(doc_id_in_string_format.c_str(), NULL, 0);

				new_score_in_string_format = currentLineElements[2];
				new_score = atof(new_score_in_string_format.c_str());
				cout << "--->query_term:" << query_term << " doc_id:" << doc_id << " new_score:" << new_score << endl;
				UpdateInvertedIndexesPreComputedScores(query_term, doc_id, new_score);
			}
		}
	}
	inputFileHandler.close();
}





CacheManager* LocalQueryProcessor::GetCacheManager(const char* index_filename) const {
  bool memory_mapped_index = IndexConfiguration::GetResultValue(Configuration::GetConfiguration().GetBooleanValue(config_properties::kMemoryMappedIndex), false);
  bool memory_resident_index = IndexConfiguration::GetResultValue(Configuration::GetConfiguration().GetBooleanValue(config_properties::kMemoryResidentIndex), false);

  if (memory_mapped_index) {
    return new MemoryMappedCachePolicy(index_filename);
  } else if (memory_resident_index) {
    return new FullContiguousCachePolicy(index_filename);
  } else {
    return new LruCachePolicy(index_filename);
  }
}

const ExternalIndexReader* LocalQueryProcessor::GetExternalIndexReader(QueryAlgorithm query_algorithm, const char* external_index_filename) const {
  switch (query_algorithm) {
    case kDaatAnd:
    case kDaatOr:
    case kMaxScore:
    case kDualLayeredMaxScore:
      GetDefaultLogger().Log("External Index has been attached.", false);
      return new ExternalIndexReader(external_index_filename);
    default:
      GetDefaultLogger().Log("External Index has NOT been attached --- Take Care.", false);
      return NULL;
  }
}


