//==============================================================================================================================================================
// Author(s): Roman Khmelichek, Wei Jiang
//
// This class takes a standard index as input and splits the inverted lists into several pseudo lists, which we call layers here. Successive layers contain
// documents whose scores are lower than the previous layer(s). Each layer has a threshold score, which is the maximum partial BM25 score of a document in the
// list. Successive layers can also be overlapping, meaning that they will also contain the documents of all the previous layers.
//
// We assume here that any single inverted list can fit completely into main memory, mainly for simplicity. If not, we'd have to first split up each list,
// sort each piece individually by score, write out each piece to disk, then do a merge of the score sorted lists. Then, we wouldn't have to load the whole
// list into main memory in order to layer it, and it would be fully I/O efficient. However, in practice, loading the whole list into main memory is reasonable.
//==============================================================================================================================================================

#include "index_layerify.h"


#include <cstdlib>
#include <cstring>

#include <algorithm>
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include <iostream>
#include <iomanip>
#include <limits>
#include <fstream>
#include <sstream>

#include "coding_policy_helper.h"
#include "config_file_properties.h"
#include "configuration.h"
#include "external_index.h"
#include "globals.h"
#include "index_build.h"
#include "index_merge.h"
#include "index_reader.h"
#include "key_value_store.h"
#include "logger.h"
#include "meta_file_properties.h"
#include "timer.h"
using namespace std;

/**************************************************************************************************************************************************************
 * LayeredIndexGenerator
 *
 **************************************************************************************************************************************************************/
LayeredIndexGenerator::LayeredIndexGenerator(const IndexFiles& input_index_files, const string& output_index_prefix) :
  output_index_files_(output_index_prefix),
  index_(NULL),
  external_index_builder_(NULL),
  index_builder_(NULL),
  includes_contexts_(false),
  includes_positions_(false),
  overlapping_layers_(false),
  num_layers_(0),
  doc_id_compressor_(CodingPolicy::kDocId),
  frequency_compressor_(CodingPolicy::kFrequency),
  position_compressor_(CodingPolicy::kPosition),
  block_header_compressor_(CodingPolicy::kBlockHeader),
  total_num_docs_(0),
  total_unique_num_docs_(0),
  total_document_lengths_(0),
  document_posting_count_(0),
  index_posting_count_(0),
  first_doc_id_in_index_(0),
  last_doc_id_in_index_(0) {

  // Put the output index into the specific dir.

  // option1: output the thing into the partial BM25 dir
  // for the machine dodo
  // output_index_files_.SetDirectory("/home/diaosi/outputDirForIndexes/prunedGov2Index/prunedGov2IndexBasedOn_PartialBM25");
  // for the machine pangolin
  output_index_files_.SetDirectory("/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForIndexes/prunedGov2IndexBasedOn_PartialBM25");

  // option2: output the thing into the machine learned dir
  // for the machine dodo
  // output_index_files_.SetDirectory("/home/diaosi/outputDirForIndexes/prunedGov2Index/prunedGov2IndexBasedOn_Machine_Learned");

  external_index_builder_ = new ExternalIndexBuilder(output_index_files_.external_index_filename().c_str());
  index_builder_ = new IndexBuilder(output_index_files_.lexicon_filename().c_str(), output_index_files_.index_filename().c_str(), block_header_compressor_,
                                    external_index_builder_);

  CacheManager* cache_policy = new MergingCachePolicy(input_index_files.index_filename().c_str());
  IndexReader* index_reader = new IndexReader(IndexReader::kMerge, *cache_policy, input_index_files.lexicon_filename().c_str(),
                                              input_index_files.document_map_basic_filename().c_str(), input_index_files.document_map_extended_filename().c_str(),
                                              input_index_files.meta_info_filename().c_str(), false);

  // Coding policy for the remapped index remains the same as that of the original index.
  coding_policy_helper::LoadPolicyAndCheck(doc_id_compressor_, index_reader->meta_info().GetValue(meta_properties::kIndexDocIdCoding), "docID");
  coding_policy_helper::LoadPolicyAndCheck(frequency_compressor_, index_reader->meta_info().GetValue(meta_properties::kIndexFrequencyCoding), "frequency");
  coding_policy_helper::LoadPolicyAndCheck(position_compressor_, index_reader->meta_info().GetValue(meta_properties::kIndexPositionCoding), "position");
  coding_policy_helper::LoadPolicyAndCheck(block_header_compressor_, index_reader->meta_info().GetValue(meta_properties::kIndexBlockHeaderCoding),
                                           "block header");

  if (!index_reader->includes_contexts())
    includes_contexts_ = false;

  if (!index_reader->includes_positions())
    includes_positions_ = false;

  // These must match in the layered index, except 'index_posting_count_' which is larger if the index layers are overlapping.
  total_num_docs_ = IndexConfiguration::GetResultValue(index_reader->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), false);
  total_unique_num_docs_ = IndexConfiguration::GetResultValue(index_reader->meta_info().GetNumericalValue(meta_properties::kTotalUniqueNumDocs), false);
  total_document_lengths_ = IndexConfiguration::GetResultValue(index_reader->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), false);
  document_posting_count_ = IndexConfiguration::GetResultValue(index_reader->meta_info().GetNumericalValue(meta_properties::kDocumentPostingCount), false);
  index_posting_count_ = IndexConfiguration::GetResultValue(index_reader->meta_info().GetNumericalValue(meta_properties::kIndexPostingCount), false);
  first_doc_id_in_index_ = IndexConfiguration::GetResultValue(index_reader->meta_info().GetNumericalValue(meta_properties::kFirstDocId), false);
  last_doc_id_in_index_ = IndexConfiguration::GetResultValue(index_reader->meta_info().GetNumericalValue(meta_properties::kLastDocId), false);

  index_ = new Index(cache_policy, index_reader);

  // Load the layering properties from the configuration file.
  overlapping_layers_ = Configuration::GetResultValue(Configuration::GetConfiguration().GetBooleanValue(config_properties::kOverlappingLayers));
  num_layers_ = Configuration::GetResultValue(Configuration::GetConfiguration().GetNumericalValue(config_properties::kNumLayers));
  if (num_layers_ <= 0 || num_layers_ > MAX_LIST_LAYERS) {
    Configuration::ErroneousValue(config_properties::kNumLayers, Stringify(num_layers_));
  }
  layering_strategy_ = Configuration::GetResultValue(Configuration::GetConfiguration().GetStringValue(config_properties::kLayeringStrategy));

  assert(includes_positions_ == false);  // TODO: We don't support layered indices with positions yet.
}

LayeredIndexGenerator::~LayeredIndexGenerator() {
  delete index_;
  delete index_builder_;
}

void LayeredIndexGenerator::OutputTrecIDAndDocIDAndDocSizeInWordsToScreen(){
	cout << "LayeredIndexGenerator::OutputTrecIDAndDocIDToScreen() called." << endl;
	for(uint32_t doc_id = 0; doc_id <= 25205178; doc_id++){
		string currentTrecID = index_->index_reader()->document_map().GetDocumentNumber(doc_id);
		int currentDocSizeInWords = index_->index_reader()->document_map().GetDocumentLength(doc_id);
		cout << currentTrecID << " " << doc_id << " " << currentDocSizeInWords << endl;
	}
}





void LayeredIndexGenerator::LoadUpAuxFilesForSecondProbabilityFactor(){
	cout << "LayeredIndexGenerator::LoadUpAuxFilesForSecondProbabilityFactor() called." << endl;

    // step1: fill the map<int,float> query_length_probability_map_
    // key: query length
	// value: probability of being a K term query

    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kQueryLengthDistributionFileName));
    cout << "inputFileName:" << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

    // skip the only headline
	// e.g. queryLength probability(k term query)
    getline (inputfile,currentLine);

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string queryLengthInStringFormat;
		    string probablityOfBeingAKTermQueryInStringFormat;

		    iss >> queryLengthInStringFormat;
			iss >> probablityOfBeingAKTermQueryInStringFormat;

			int queryLength = atof(queryLengthInStringFormat.c_str());
			float probablityOfBeingAKTermQuery = atof(probablityOfBeingAKTermQueryInStringFormat.c_str());

			query_length_probability_map_[queryLength] = probablityOfBeingAKTermQuery;

		}
	}
	inputfile.close();

	// step2: fill the following dicts
	// map<string,float> trecID_With_Xdoc_Value_goldStandarded_map_;
	// map<string,float> trecID_With_Xdoc_Value_1D_map_;
	// map<string,float> trecID_With_Xdoc_Value_2D_map_;
	// map<string,float> trecID_With_Xdoc_Value_goodTurning_map_;
    // key: the trecID
    // value: Xdoc value

    inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kTrecIDWithXdocValuesFileName));
    cout << "inputFileName:" << inputFileName << endl;
    ifstream inputfile2(inputFileName.c_str());

    // skip the only headline
	// e.g. queryLength probability(k term query)
    getline (inputfile2,currentLine);

	while ( inputfile2.good() )
	{
		getline (inputfile2,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string trecIDInStringFormat;
			string docIDInStringFormat;
		    string XdocValueGoldStandardInStringFormat;
		    string XdocValue1DInStringFormat;
		    string XdocValue2DInStringFormat;
		    string XdocValueGoodTurningInStringFormat;

		    iss >> trecIDInStringFormat;
		    iss >> docIDInStringFormat;
		    iss >> XdocValueGoldStandardInStringFormat;
		    iss >> XdocValue1DInStringFormat;
		    iss >> XdocValue2DInStringFormat;
		    iss >> XdocValueGoodTurningInStringFormat;

		    float XdocValueGoldStandard = atof(XdocValueGoldStandardInStringFormat.c_str());
		    float XdocValue1D = atof(XdocValue1DInStringFormat.c_str());
		    float XdocValue2D = atof(XdocValue2DInStringFormat.c_str());
		    float XdocValueGoodTurning = atof(XdocValueGoodTurningInStringFormat.c_str());

		    docID_With_Xdoc_Value_goldStandarded_map_[docIDInStringFormat] = XdocValueGoldStandard;
		    docID_With_Xdoc_Value_1D_map_[docIDInStringFormat] = XdocValue1D;
		    docID_With_Xdoc_Value_2D_map_[docIDInStringFormat] = XdocValue2D;
		    docID_With_Xdoc_Value_goodTurning_map_[docIDInStringFormat] = XdocValueGoodTurning;

		}
	}
	inputfile2.close();

	cout << "query_length_probability_map_[1]:" << query_length_probability_map_[1] << endl;

    if(query_length_probability_map_.size() == 0 or docID_With_Xdoc_Value_goldStandarded_map_.size() == 0 or docID_With_Xdoc_Value_1D_map_.size() == 0 or docID_With_Xdoc_Value_2D_map_.size() == 0 or docID_With_Xdoc_Value_goodTurning_map_.size() == 0){
	    GetDefaultLogger().Log("Load Up Aux Files For 2ed Probability Factor Elements NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(query_length_probability_map_.size()) + " <queryLength,probabilityOfBeingAKTermQuery> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(docID_With_Xdoc_Value_goldStandarded_map_.size()) + " <trecID,XdocValueGoldStandarded> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(docID_With_Xdoc_Value_1D_map_.size()) + " <trecID,XdocValue1D> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(docID_With_Xdoc_Value_2D_map_.size()) + " <trecID,XdocValue2D> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(docID_With_Xdoc_Value_goodTurning_map_.size()) + " <trecID,XdocValueGoodTurning> pairs have been loaded.", false);
    }
}

void LayeredIndexGenerator::LoadUpSelectedTerms(){
    cout << "LayeredIndexGenerator::LoadUpSelectedTerms() called." << endl;
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kQueryTermsONLYInputfileName));
    cout << "inputFileName:" << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());
	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
		    selected_terms_map_[currentLine] = 1;
		}
	}
	inputfile.close();

    if(selected_terms_map_.size() == 0){
	    GetDefaultLogger().Log("Load Up selected_terms_map_ NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(selected_terms_map_.size()) + " terms have been loaded.", false);
    }

}

void LayeredIndexGenerator::LoadUpTermIDANDTermPairs(){
	cout << "LayeredIndexGenerator::LoadUpTermIDANDTermPairs() called." << endl;
	// The purpose is to fill the following data structure: docIDWithNumOfPostingsRecordedDict_
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kTermANDTermIDFileName));
    cout << "inputFileName: " << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string termIDInStringFormat;
			string termInStringFormat;
			uint32_t termIDInUint32_tFormat;

		    iss >> termIDInStringFormat;
		    iss >> termInStringFormat;

		    termIDInUint32_tFormat = atoi(termIDInStringFormat.c_str());


		    if(termWithTermIDDict_.count(termInStringFormat) > 0){
		    	cout << "duplicated term" << endl;
		    	exit(1);
		    }
		    else{
		    	termWithTermIDDict_[termInStringFormat] = termIDInUint32_tFormat;
		    	termIDWithTermDict_[termIDInUint32_tFormat] = termInStringFormat;
		    }
		}
	}

	inputfile.close();

	// for DEBUG
	cout << "termWithTermIDDict_['soalr']: " << termWithTermIDDict_["soalr"]<< endl;
	cout << "termWithTermIDDict_['so']: " << termWithTermIDDict_["so"] << endl;
	cout << "termIDWithTermDict_[33139651]: " << termIDWithTermDict_[33139651]<< endl;
	cout << "termIDWithTermDict_[33143411]: " << termIDWithTermDict_[33143411]<< endl;

    if(termWithTermIDDict_.size() == 0){
	    GetDefaultLogger().Log("Load Up termWithTermIDDict_ OR termIDWithTermDict_ NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(termWithTermIDDict_.size()) + " <term,termID> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(termIDWithTermDict_.size()) + " <termID,term> pairs have been loaded.", false);
    }
}

void LayeredIndexGenerator::LoadUpDocIDANDNumOfPostingPairs(){
	cout << "LayeredIndexGenerator::LoadUpDocIDANDNumOfPostingPairs() called." << endl;
	// The purpose is to fill the following data structure: docIDWithNumOfPostingsRecordedDict_
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kDocIDNumOfPostingsRecordedFileName));
    cout << "inputFileName: " << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string currentDocIDInStringFormat;
			string currentNumOfPostingsRecordedInStringFormat;
			int currentDocIDInIntFormat;
			int currentNumOfPostingsRecordedInIntFormat;


		    iss >> currentDocIDInStringFormat;
		    iss >> currentNumOfPostingsRecordedInStringFormat;

		    currentDocIDInIntFormat = atoi(currentDocIDInStringFormat.c_str());
		    currentNumOfPostingsRecordedInIntFormat = atoi(currentNumOfPostingsRecordedInStringFormat.c_str());

		    if(docIDWithNumOfPostingsRecordedDict_.count(currentDocIDInIntFormat) > 0){
		    	cout << "duplicate docID" << endl;
		    	exit(1);
		    }
		    else{
		    	docIDWithNumOfPostingsRecordedDict_[currentDocIDInIntFormat] = currentNumOfPostingsRecordedInIntFormat;
		    }
		}
	}


	/*
    // for DEBUG and TEST
	cout << "docIDWithNumOfPostingsRecordedDict_.size(): " << docIDWithNumOfPostingsRecordedDict_.size() << endl;
	cout << "docIDWithNumOfPostingsRecordedDict_[0]: " << docIDWithNumOfPostingsRecordedDict_[0] << endl;
	cout << "docIDWithNumOfPostingsRecordedDict_[25205178]: " << docIDWithNumOfPostingsRecordedDict_[25205178] << endl;
	*/

	inputfile.close();

    if(docIDWithNumOfPostingsRecordedDict_.size() == 0){
	    GetDefaultLogger().Log("Load Up docIDWithNumOfPostingsRecordedDict_ NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(docIDWithNumOfPostingsRecordedDict_.size()) + " <docID,# of postings recorded> pairs have been loaded.", false);
    }
}


void LayeredIndexGenerator::LoadUpProbabilityTableBasedOnListLengthANDRelativeRank(){
	cout << "LayeredIndexGenerator::LoadUpProbabilityTableBasedOnListLengthANDRelativeRank() called." << endl;
	// The purpose is to fill the following data structure: classLabelWithPiecesMetaInfoDict_
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kListLengthANDRelatativeRankAndProbabilitiesFileName));
    cout << "inputFileName: " << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string currentClassLabelInStringFormat;
			string currentNumOfPiecesInClassInStringFormat;
			int currentClassLabelInIntFormat;
			int currentNumOfPiecesInClassInIntFormat;


		    iss >> currentClassLabelInStringFormat;
		    iss >> currentNumOfPiecesInClassInStringFormat;

		    currentClassLabelInIntFormat = atoi(currentClassLabelInStringFormat.c_str());
		    currentNumOfPiecesInClassInIntFormat = atoi(currentNumOfPiecesInClassInStringFormat.c_str());

		    if(classLabelWithPiecesMetaInfoDict_.count(currentClassLabelInIntFormat) > 0){
		    	cout << "duplicate class label" << endl;
		    	exit(1);
		    }
		    else{
		    	classLabelWithPiecesMetaInfoDict_[currentClassLabelInIntFormat] = {};
		    }

		    // control how many times it do.
		    for(unsigned int i = 0; i < currentNumOfPiecesInClassInIntFormat; i++){
			    string currentPieceProbabilityInStringFormat;
			    float currentPieceProbabilityInFloatFormat;
			    iss >> currentPieceProbabilityInStringFormat;
			    // Does the problem lie here?
			    currentPieceProbabilityInFloatFormat = atof(currentPieceProbabilityInStringFormat.c_str());
			    if (classLabelWithPiecesMetaInfoDict_[currentClassLabelInIntFormat].count(i) > 0){
			    	cout << "duplicate piece label in current class" << endl;
			    	exit(1);
			    }
			    else{
				    // for DEBUG Only
				    if (currentPieceProbabilityInFloatFormat == 0.0){
				    	cout << "Check NOT Passed." << endl;
				    	cout << "currentClassLabelInStringFormat: " << currentClassLabelInStringFormat << endl;
				    	cout << "currentNumOfPiecesInClassInIntFormat: " << currentNumOfPiecesInClassInIntFormat << endl;
				    	cout << "currentPieceProbabilityInStringFormat: " << currentPieceProbabilityInStringFormat << endl;
				    	cout << "currentPieceProbabilityInFloatFormat: " << currentPieceProbabilityInFloatFormat << endl;
				    	exit(1);
				    }
			    	classLabelWithPiecesMetaInfoDict_[currentClassLabelInIntFormat][i] = currentPieceProbabilityInFloatFormat;
			    }
		    }
		}
	}

	/*
    // for DEBUG and TEST
	cout << "classLabelWithPiecesMetaInfoDict_.size(): " << classLabelWithPiecesMetaInfoDict_.size() << endl;
	cout << "classLabelWithPiecesMetaInfoDict_[0][0]: " << classLabelWithPiecesMetaInfoDict_[0][0] << endl;
	cout << "classLabelWithPiecesMetaInfoDict_[69][17]: " << classLabelWithPiecesMetaInfoDict_[69][17] << endl;
	cout << "classLabelWithPiecesMetaInfoDict_[70][18]: " << classLabelWithPiecesMetaInfoDict_[70][18] << endl;
	exit(1);
	*/

	inputfile.close();

    if(classLabelWithPiecesMetaInfoDict_.size() == 0){
	    GetDefaultLogger().Log("Load Up classLabelWithPiecesMetaInfoDict_ NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(classLabelWithPiecesMetaInfoDict_.size()) + " <classLabel,set of probabilies> pairs have been loaded.", false);
    }
}

void LayeredIndexGenerator::LoadUpTheCombinationOfSecondANDThirdFactorProbabilityTable(){
	cout << "LayeredIndexGenerator::LoadUpTheCombinationOfSecondANDThirdFactorProbabilityTable() called." << endl;
	cout << "Get the smoothed version of the probability of the combination of the second and third factor." << endl;
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPartialBM25ScoreRangesAndProbabilitiesFileName));
    cout << "inputFileName:" << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	// skip the headline
	getline (inputfile,currentLine);

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string currentClassLabelInStringFormat;	// currently used
			string currentClassPartialBM25LOWERBoundInStringFormat; // currently used
			string currentClassPartialBM25AVERAGEBoundXAxisInStringFormat;	// currently NOT used
			string currentNumofTotalPostingsInStringFormat;	// currently NOT used
			string currentNumofTOP10PostingsInStringFormat;	// currently NOT used
			string currentPercentageYAxisInStringFormat;	// currently used

		    iss >> currentClassLabelInStringFormat;
		    iss >> currentClassPartialBM25LOWERBoundInStringFormat;
		    iss >> currentClassPartialBM25AVERAGEBoundXAxisInStringFormat;
		    iss >> currentNumofTotalPostingsInStringFormat;
		    iss >> currentNumofTOP10PostingsInStringFormat;
		    iss >> currentPercentageYAxisInStringFormat;

		    int currentClassLabelInIntFormat = atoi(currentClassLabelInStringFormat.c_str());
		    float currentClassPartialBM25LOWERBoundInFloatFormat = atof(currentClassPartialBM25LOWERBoundInStringFormat.c_str());
		    float currentPercentageYAxisInFloatFormat = atof(currentPercentageYAxisInStringFormat.c_str());

			if (class_label_with_lower_bounds_map_.count(currentClassLabelInIntFormat) > 0 || class_label_with_probability_map_.count(currentClassLabelInIntFormat) > 0){
				cout << "Duplicated class label added. Critical Error. Mark4" << endl;
				exit(1);
			}
			else{
				class_label_with_lower_bounds_map_[currentClassLabelInIntFormat] = currentClassPartialBM25LOWERBoundInFloatFormat;
				class_label_with_probability_map_[currentClassLabelInIntFormat] = currentPercentageYAxisInFloatFormat;
			}
		}
	}
	inputfile.close();

	// for debug ONLY
	cout << "class_label_with_lower_bounds_map_.size():" << class_label_with_lower_bounds_map_.size() << endl;
	cout << "class_label_with_probability_map_.size():" << class_label_with_probability_map_.size() << endl;

    if(class_label_with_lower_bounds_map_.size() == 0 || class_label_with_probability_map_.size() == 0){
	    GetDefaultLogger().Log("Load Up partialBM25 probabilities related map NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(class_label_with_lower_bounds_map_.size()) + " <classLabel,lowerBound> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(class_label_with_probability_map_.size()) + " <classLabel,probability> pairs have been loaded.", false);
    }
}

void LayeredIndexGenerator::LoadUpRandomlySelectedPostings(){
    cout << "LayeredIndexGenerator::LoadUpRandomlySelectedPostings() called." << endl;

    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kRandomlySelectedPostingListFileName));
    cout << "inputFileName:" << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string currentTerm;
		    string localListIndexInStringFormat;

		    iss >> currentTerm;
			iss >> localListIndexInStringFormat;

			int localListIndex = atof(localListIndexInStringFormat.c_str());

			if (term_with_selected_postings_map_[currentTerm].count(localListIndex) > 0){
				cout << "Duplicated posting selected. Critical Error. Mark4" << endl;
				exit(1);
			}
			else{
				term_with_selected_postings_map_[currentTerm][localListIndex] = "";
			}
		}
	}
	inputfile.close();

	// for debug ONLY
	cout << "term_with_selected_postings_map_.size():" << term_with_selected_postings_map_.size() << endl;
	// cout << "term_with_selected_postings_map_['soalr'].size():" << term_with_selected_postings_map_["soalr"].size() << endl;

    if(term_with_selected_postings_map_.size() == 0){
	    GetDefaultLogger().Log("Load Up term with selected postings map NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(term_with_selected_postings_map_.size()) + " <term,selectedPostingList> pairs have been loaded.", false);
    }
}

void LayeredIndexGenerator::buildTermIDWithTheirFirstFactorProbabilityMap(){
	// This computation is DONE in main memory
	// Tell me the range of the termID first
	// The variable I want to build is: termID_with_their_first_factor_probability_map_
	cout << "termIDWithTermDict_.size(): " << termIDWithTermDict_.size() << endl;
	cout << "terms_with_corresponding_species_belonging_to_map_.size(): " << terms_with_corresponding_species_belonging_to_map_.size() << endl;
	cout << "freq_first_factor_probability_map_.size(): " << freq_first_factor_probability_map_.size() << endl;
    for( map<uint32_t, string>::iterator it1 = termIDWithTermDict_.begin(); it1 != termIDWithTermDict_.end(); ++it1 ){
    	uint32_t key1 = it1->first;
        string value1 = it1->second;
        // cout << "it1->first: " << it1->first << endl;
        // cout << "it1->second: " << it1->second << endl;
		if (termID_with_their_first_factor_probability_map_.count(key1) > 0 ){
			cout << "Critical Error." << endl;
			cout << "key1: " << key1 << endl;
			cout << "termID_with_their_first_factor_probability_map_[key1]: " << termID_with_their_first_factor_probability_map_[key1] << endl;
			exit(1);
		}
		else{
			// cout << "key1: " << key1 << " assignment."<< endl;
			termID_with_their_first_factor_probability_map_[key1] = freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[ termIDWithTermDict_[key1] ] ];
		}
    }

    cout << "DEBUG" << endl;
    cout << "probability of the term 'solar':"<< termID_with_their_first_factor_probability_map_[33143411] << endl;


    if (termID_with_their_first_factor_probability_map_.size() == 0){
    	cout << "termID_with_their_first_factor_probability_map_ has NOT been loaded." << endl;
    	exit(1);
    }
    else{
    	cout << "termID_with_their_first_factor_probability_map_.size(): " << termID_with_their_first_factor_probability_map_.size() << endl;
    }
}

void LayeredIndexGenerator::LoadUpFinalTOP10DocumentResultRelatedPostings(){
	cout << "LayeredIndexGenerator::LoadUpFinalTOP10DocumentResultRelatedPostings() called." << endl;
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kTail5KResultsWithRelatedPieceNumFileName) );
    cout << "inputFileName:" << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );
			string postingKeyInStringFormat;
			string placeHolder0;
		    string docIDInStringFormat;
		    string termInStringFormat;
		    string placeHolder1;
		    string placeHolder2;


		    iss >> placeHolder0;
			iss >> docIDInStringFormat;
			iss >> termInStringFormat;
			iss >> placeHolder1;
			iss >> placeHolder2;

			postingKeyInStringFormat = termInStringFormat + "_" + docIDInStringFormat;

			if (top10RelatedPostingsDict_.count(postingKeyInStringFormat) > 0){
				top10RelatedPostingsDict_[postingKeyInStringFormat] += 1;
			}
			else{
				top10RelatedPostingsDict_[postingKeyInStringFormat] = 1;
			}
		}
	}
	cout << "the size of top10RelatedPostingsDict_ is: " << top10RelatedPostingsDict_.size() << endl;
	cout << "top10RelatedPostingsDict_['bronx_21911485']: " << top10RelatedPostingsDict_["bronx_21911485"] << endl;

	if (top10RelatedPostingsDict_.size() == 0){
		cout << "Loading Problem" << endl;
		exit(1);
	}

	inputfile.close();
}


void LayeredIndexGenerator::LoadUpAuxFilesForFirstProbabilityFactor(){
    cout << "LoadUpAuxFilesForFirstProbabilityFactor() called." << endl;
    // step1: fill the map<int,float> freq_first_factor_probability_map_
    // key: # of times this object appears
	// value: the probability that this term will occur in the next query

    // Updated by Wei 2013/08/30 afternoon at school.
    // The file load up here is actually set of good turning probabilities
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kFirstProbabilityFactorFileName));
    cout << "inputFileName:" << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());

    // skip 4 headlines
    getline (inputfile,currentLine);
    getline (inputfile,currentLine);
    getline (inputfile,currentLine);
    getline (inputfile,currentLine);

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );
			string freqInStringFormat;
		    string placeHolder1;
		    string placeHolder2;
		    string placeHolder3;
		    string probabilityOfATermAppearedInTheNextQuerySlotInStringFormat;
		    string probablityOfATermAppearedInTheNextQueryInStringFormat;

		    iss >> freqInStringFormat;
			iss >> placeHolder1;
			iss >> placeHolder2;
			iss >> placeHolder3;
			iss >> probabilityOfATermAppearedInTheNextQuerySlotInStringFormat;
			iss >> probablityOfATermAppearedInTheNextQueryInStringFormat;

			int freq = atof(freqInStringFormat.c_str());

			// Updated by Wei 2013/08/28 at school
			// It is always correct to use the probabilityOfATermAppearedInTheNextQuery to compute the first factor probability
			// But here today, in order to make the result consistent with the term oriented, piece oriented result, I just temporarily
			// replace the correct probability with the NOT SO correct one.
			// The temp not so correct one.
			float first_factor_probablity = atof(probabilityOfATermAppearedInTheNextQuerySlotInStringFormat.c_str());
			// The always correct one.
			// float first_factor_probablity = atof(probablityOfATermAppearedInTheNextQueryInStringFormat.c_str());
			if (first_factor_probablity != 0.0){
				freq_first_factor_probability_map_[freq] = first_factor_probablity;
			}
		}
	}
	inputfile.close();

	// step2: fill the map<string,int> terms_with_corresponding_species_belonging_to_map_
    // key: the terms which have been seen in the training queries
    // value: which freq it belongs to

    inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kFreqCorrespondingTermsFileName));
    cout << "inputFileName:" << inputFileName << endl;
    ifstream inputfile2(inputFileName.c_str());

	while ( inputfile2.good() )
	{
		getline (inputfile2,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );
			string freqInStringFormat;
		    string numOfTermsAssociatedInStringFormat;
		    string currentTerm;
		    iss >> freqInStringFormat;
		    iss >> numOfTermsAssociatedInStringFormat;
		    int freq = atoi(freqInStringFormat.c_str());
		    int NUM_OF_TERMS_ASSOCIATED = atoi(numOfTermsAssociatedInStringFormat.c_str());
			if (NUM_OF_TERMS_ASSOCIATED != 0){
			    for(int tempCounter = 0; tempCounter < NUM_OF_TERMS_ASSOCIATED; tempCounter++ ){
					iss >> currentTerm;
					terms_with_corresponding_species_belonging_to_map_[currentTerm] = freq;
				}
			}

		}
	}
	inputfile2.close();

	cout << "P['soalr']: " << freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_["soalr"] ] << endl;
	cout << "P['so']: " << freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_["so"] ] << endl;

    if(freq_first_factor_probability_map_.size() == 0 or terms_with_corresponding_species_belonging_to_map_.size() == 0){
	    GetDefaultLogger().Log("Load Up Aux Files For 1st Probability Factor NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(freq_first_factor_probability_map_.size()) + " <freq,probability> pairs have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(terms_with_corresponding_species_belonging_to_map_.size()) + " <term freq appeared in the QL> pairs have been loaded.", false);
    }
}

void LayeredIndexGenerator::LoadUpThreeFeatureValuesForMachineLearnedTraining(){
    cout << "LayeredIndexGenerator::LoadUpThreeFeatureValuesForMachineLearnedTraining() called." << endl;
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kThreeFeatureValuesForTrainingIn95KQueries));
    cout << "inputFileName:" << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());
	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );
		    string term;
		    string length_of_the_inverted_list_in_string_format;
		    string term_freq_in_collection_in_string_format;
		    string term_freq_in_queries_in_string_format;

			iss >> term;
			iss >> length_of_the_inverted_list_in_string_format;
			iss >> term_freq_in_collection_in_string_format;
			iss >> term_freq_in_queries_in_string_format;

			// They are all in float types
			float length_of_the_inverted_list = atof(length_of_the_inverted_list_in_string_format.c_str());
			float term_freq_in_collection = atof(term_freq_in_collection_in_string_format.c_str());
			float term_freq_in_queries = atof(term_freq_in_queries_in_string_format.c_str());

			query_terms_length_of_the_inverted_index_map_[term] = length_of_the_inverted_list;
			query_terms_term_freq_in_collection_map_[term] = term_freq_in_collection;
			query_terms_term_freq_in_queries_map_[term] = term_freq_in_queries;
		}
	}
	inputfile.close();

    if(query_terms_length_of_the_inverted_index_map_.size() == 0 or query_terms_term_freq_in_collection_map_.size() == 0 or query_terms_term_freq_in_queries_map_.size() == 0){
	    GetDefaultLogger().Log("Load Up Three Feature Values For Machine Learned Training NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(query_terms_length_of_the_inverted_index_map_.size()) + " <term,length_of_the_inverted_list> have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(query_terms_term_freq_in_collection_map_.size()) + " <term,term_freq_in_collection> have been loaded.", false);
	    GetDefaultLogger().Log(Stringify(query_terms_term_freq_in_queries_map_.size()) + " <term,term_freq_in_queries> have been loaded.", false);
    }

}

void LayeredIndexGenerator::CreatePrunedIndexAuxInfo(){
	  cout << "LayeredIndexGenerator::CreatePrunedIndexAuxInfo() function called in." << endl;
	  string outputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kFreqOfQueryTermsInCollection));
	  ofstream outputFileHandler(outputFileName.c_str());

	  while (index_->NextTerm())
	  {
		  // This logic needed to be extended in order to compute the freq in the collection.
		  // Currently, it is computing the length of the inverted list which is NOT what I want. :)
		  // Updated by Wei 2013/02/21

		  // the inner loop is for each term
		  // the general logic is loop through each docID, and then add up their freq in the doc for each document, and then output the sum
		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  cout << "curr_term:" << curr_term << endl;

		  uint32_t did = 0; // Let's start from the did = 0
		  const int LENGTH_OF_INVERTED_LIST = index_->curr_list_data()->num_docs_complete_list();
		  int docCounter = 1;
		  long freq_curr_term_in_collection = 0;

		  while (docCounter <= LENGTH_OF_INVERTED_LIST) {
			  did = index_->curr_list_data()->NextGEQ(did);
			  freq_curr_term_in_collection += index_->curr_list_data()->GetFreq();
			  // for debug ONLY
			  // cout << "did:" << did << endl;
			  // cout << "(step by step)freq_curr_term_in_collection:" << freq_curr_term_in_collection << endl;
			  ++did;
			  ++docCounter;
		  }
		  // for debug ONLY
		  // cout << "curr_term:" << curr_term << endl;
		  // cout << "(overall)freq_curr_term_in_collection:" << freq_curr_term_in_collection << endl;
		  outputFileHandler << curr_term << " " << LENGTH_OF_INVERTED_LIST << " " << freq_curr_term_in_collection << endl;
	  }


	  outputFileHandler.close();
	  cout << "output aux file for the collection: term_freq_in_collection..DONE" << endl;
	  cout << "LayeredIndexGenerator::CreatePrunedIndexAuxInfo() function called out." << endl;
}

void LayeredIndexGenerator::CreateCutThresholdOfEachTermBasedOnPercentageForMultipleTerms(map<string,int> & queryTerms, bool debugFlag){
	  string outputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kQueryTermThresholdsKeptBasedOnPercentage));
	  ofstream outputFileHandler(outputFileName.c_str());

	  outputFileHandler << "queryTerm" << " " << "0.01Kept" << " " << "0.05Kept" << " " << "0.1Kept" << " " << "0.2Kept" << " " << "0.3Kept" << " " << "0.4Kept" << " " << "0.5Kept" << " " << "0.6Kept" << " " << "0.7Kept" << " " << "0.8Kept" << " " << "0.9Kept" << " " << "1.0Kept"<< endl;

	  vector<int> scoringMethodCodeForEachQueryTerm;

	  const int NUMBER_QUERY_TERMS = queryTerms.size();
	  int number_query_terms_already_deal_with = 0;

	  map<string,float> queryTermsProbabilityDistributionMap;

	  // meta information ONLY
	  /*
	  cout << "Select the sorting method" << endl;
	  cout << "1: Not sorted at all" << endl;
	  cout << "2: sorted based on partial bm25 score (largest to smallest)" << endl;
	  cout << "3: sorted based on a specific machine learning model(Mostly Logistic Regression, Updated by Wei 20130223)" << endl;
	  cout << "4: sorted based on partial bm25 score (smallest to largest)" << endl;
	  cout << "5: sorted based on a specific SIMPLE machine learning model(The difference between this the # 3 is that this is much simpler than the # 3 one. Mostly Logistic Regression, Updated by Wei 20130223)" << endl;
	  */

	  // Use NextTerm() to traverse the whole index's lexicon.
	  while (index_->NextTerm())
	  {
		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  bool matchFlag = false;
		  float percentageForTheTerm = -1.0;
		  int scoringMethodCodeForTheTerm = 2;

		  if (queryTerms.count(curr_term) > 0){
			  matchFlag = true;
		  }
		  else{
			  matchFlag = false;
		  }

		  // The following are the actual computation
		  if (matchFlag)
		  {
			cout << "Processing term:" << curr_term << endl;
			number_query_terms_already_deal_with += 1;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			int num_docs_in_original_list = index_->curr_list_data()->num_docs();

			if(debugFlag){
				cout << "number of docs in orginal inverted index:" << num_docs_in_original_list << endl;
			}

			IndexEntry* index_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int index_entry_offset = 0;

			while (index_->NextDocId())
			{

			  IndexEntry& curr_index_entry = index_entry_buffer[index_entry_offset];

			  assert(index_entry_offset < num_docs_in_original_list);

			  curr_index_entry.doc_id = index_->curr_doc_id();
			  curr_index_entry.frequency = index_->curr_list_data()->GetFreq();

			  ++index_entry_offset;
			}  // No more postings in the list.

			// Need the average document length for computing BM25 scores.
			// This meta info(including total_document_lengths, total_num_docs and average_doc_length) maybe wrong due to some shortcut I make.(Wei: 2012/06/28)
			long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
			long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
			int average_doc_length = total_document_lengths / total_num_docs;


			DocIdScoreComparisonWei doc_id_score_comparatorWei(index_->index_reader()->document_map(), num_docs_in_original_list, average_doc_length, total_num_docs, scoringMethodCodeForTheTerm,curr_term,queryTermsProbabilityDistributionMap);

			if(debugFlag){
				// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
				// The results are sorted by docIDs
				cout << "Printing all (docID,score) pairs ordered by docID: " << curr_term << endl;
				for (int i = 0; i < index_entry_offset; ++i) {
					cout << "docID: " << index_entry_buffer[i].doc_id << ", score: " << doc_id_score_comparatorWei.score(index_entry_buffer[i]) << endl;
				}
			}

			//standard sort function in c++.
			//It is sorted based on some kinds of score computation mechanism
			sort(index_entry_buffer, index_entry_buffer + index_entry_offset, doc_id_score_comparatorWei);

			if(debugFlag){
				// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
				// The results are sorted by some score.
				cout << "Printing all (docID,score) pairs ordered by some kind of score: " << curr_term << endl;
				for (int i = 0; i < index_entry_offset; ++i) {
					cout << "docID: " << index_entry_buffer[i].doc_id << ", score: " << doc_id_score_comparatorWei.score(index_entry_buffer[i]) << endl;
				}
			}

			// compute the cut for this term:
			int positionForDot01Kept = index_entry_offset * 0.01;
			int positionForDot05Kept = index_entry_offset * 0.05;
			int positionForDot1Kept = index_entry_offset * 0.1;
			int positionForDot2Kept = index_entry_offset * 0.2;
			int positionForDot3Kept = index_entry_offset * 0.3;
			int positionForDot4Kept = index_entry_offset * 0.4;
			int positionForDot5Kept = index_entry_offset * 0.5;
			int positionForDot6Kept = index_entry_offset * 0.6;
			int positionForDot7Kept = index_entry_offset * 0.7;
			int positionForDot8Kept = index_entry_offset * 0.8;
			int positionForDot9Kept = index_entry_offset * 0.9;


			float thresholdForDot01Kept = doc_id_score_comparatorWei.score(index_entry_buffer[positionForDot01Kept]);
			float thresholdForDot05Kept = doc_id_score_comparatorWei.score(index_entry_buffer[positionForDot05Kept]);
			float thresholdForDot1Kept = doc_id_score_comparatorWei.score(index_entry_buffer[positionForDot1Kept]);
			float thresholdForDot2Kept = doc_id_score_comparatorWei.score(index_entry_buffer[positionForDot2Kept]);
			float thresholdForDot3Kept = doc_id_score_comparatorWei.score(index_entry_buffer[positionForDot3Kept]);
			float thresholdForDot4Kept = doc_id_score_comparatorWei.score(index_entry_buffer[positionForDot4Kept]);
			float thresholdForDot5Kept = doc_id_score_comparatorWei.score(index_entry_buffer[positionForDot5Kept]);
			float thresholdForDot6Kept = doc_id_score_comparatorWei.score(index_entry_buffer[positionForDot6Kept]);
			float thresholdForDot7Kept = doc_id_score_comparatorWei.score(index_entry_buffer[positionForDot7Kept]);
			float thresholdForDot8Kept = doc_id_score_comparatorWei.score(index_entry_buffer[positionForDot8Kept]);
			float thresholdForDot9Kept = doc_id_score_comparatorWei.score(index_entry_buffer[positionForDot9Kept]);


			cout << "dot01 kept " << positionForDot01Kept << " " << thresholdForDot01Kept << endl;
			cout << "dot05 kept " << positionForDot05Kept << " " << thresholdForDot05Kept << endl;
			cout << "dot1 kept " << positionForDot1Kept << " " << thresholdForDot1Kept << endl;
			cout << "dot2 kept " << positionForDot2Kept << " " << thresholdForDot2Kept << endl;
			cout << "dot3 kept " << positionForDot3Kept << " " << thresholdForDot3Kept << endl;
			cout << "dot4 kept " << positionForDot4Kept << " " << thresholdForDot4Kept << endl;
			cout << "dot5 kept " << positionForDot5Kept << " " << thresholdForDot5Kept << endl;
			cout << "dot6 kept " << positionForDot6Kept << " " << thresholdForDot6Kept << endl;
			cout << "dot7 kept " << positionForDot7Kept << " " << thresholdForDot7Kept << endl;
			cout << "dot8 kept " << positionForDot8Kept << " " << thresholdForDot8Kept << endl;
			cout << "dot9 kept " << positionForDot9Kept << " " << thresholdForDot9Kept << endl;


			outputFileHandler << curr_term << " " << thresholdForDot01Kept << " " << thresholdForDot05Kept << " " << thresholdForDot1Kept << " " << thresholdForDot2Kept << " " << thresholdForDot3Kept << " " << thresholdForDot4Kept << " " << thresholdForDot5Kept
										   << " " << thresholdForDot6Kept << " " << thresholdForDot7Kept << " " << thresholdForDot8Kept << " " << thresholdForDot9Kept << " " << "0.0"<< endl;

			delete[] index_entry_buffer;
			cout << "...Done" << endl;
			cout << endl;

			// If all the query terms have been processed. Then the system can exit and no need to continue to traverse the whole index's lexicon.
			if(number_query_terms_already_deal_with == NUMBER_QUERY_TERMS){
				break;
			}

			// python modules ends here.
			Py_Finalize();
		  }
	  }
	  outputFileHandler.close();
}

void LayeredIndexGenerator::ComputeHowMuchMemoryWillBeUsed(){
	cout << "The function LayeredIndexGenerator::ComputeHowMuchMemoryWillBeUsed() is called()." << endl;
}

bool LayeredIndexGenerator::scoreInTest(const IndexDocOptimizedEntry& l,const IndexDocOptimizedEntry& r){
	  // The operators:
	  // > descending order
	  // < ascending order
	  return true;
}
void LayeredIndexGenerator::PrototypingOfThePostingOrientedUniformPruningMethodOptimizedVersion(bool debugFlag){
	cout << "The function LayeredIndexGenerator::PrototypingOfThePostingOrientedUniformPruningMethodOptimizedVersion(...) is called." << endl;
	// seed for generating the random numbers
	srand((unsigned)time(0));

	// The overall weights settings:
	const float a = 0.9;	// the weight for part1
	const float b = 20;	// the weight for part2, try different weights
	const unsigned int BIG_NUMBER = 50000000;	// This BIG NUMBER will be assigned to the init value of the termID
	// for production
	const int NUMBER_OF_TERMS_NEEDED_TO_PROCESS = 39000;	// This value should be set to a little larger than 38449
	// for debug
	// const unsigned int NUMBER_OF_TERMS_NEEDED_TO_PROCESS = 2;	// This value should be set to 1 OR 2 for debug
	int number_terms_processed = 0;

	// Updated by Wei 2013/09/14 night at school
	// Let's init for a very big heap (like the returning results data structures in the query processing do)
	// sub-option1:
	// for production
	// total # of documents in gov2 dataset: 25205179 (25M)
	// const unsigned int NUM_OF_DOCS_IN_GOV2_COLLECTION = 25205179;
	// sub-option2:
	// for debug (using the 1st compressed file in the gov2 dataset)
	// sub_option1:
	// total # of documents in the 1st compressed file:
	// const unsigned int NUM_OF_DOCS_IN_GOV2_COLLECTION = 1092;
	// sub_option2:
	// 1Doc (currently in development)
	// const unsigned int NUM_OF_DOCS_IN_GOV2_COLLECTION = 2;
	// sub_option3:
	// 2Doc
	// const unsigned int NUM_OF_DOCS_IN_GOV2_COLLECTION = 2;
	// 10921Doc
	// const unsigned int NUM_OF_DOCS_IN_GOV2_COLLECTION = 10921;
	// change to the selected parsing index, ONLY has 12 documents
	// const unsigned int NUM_OF_DOCS_IN_GOV2_COLLECTION = 12;
	// change to the selected parsing index, ONLY has 30 documents
	const unsigned int NUM_OF_DOCS_IN_GOV2_COLLECTION = 30;

	// NOT working (been tested on 2013/09/24 afternoon by Wei at school)
	// uint32_t total_num_of_postings_been_allocated = 0;
	// working
	unsigned long total_num_of_postings_been_allocated = 0;
	unsigned int total_num_of_documents_been_processed = 0;
	docPostingsOptimizedTuple* myDocPostingsTupleHeap = new docPostingsOptimizedTuple[NUM_OF_DOCS_IN_GOV2_COLLECTION]; // Using a pointer here

	// init and allocate the memory for the BIG heap
	Timer processing_time;
	double step0_starting_time = processing_time.GetElapsedTime();
	double step0_ending_time = 0.0;
	cout << "step0: allocate the memory for the BIG heap" << endl;
	if(docIDWithNumOfPostingsRecordedDict_.size() == 0){
		cout << "--->Use the internal system setting to set the size of the posting array for each document." << endl;
	}
	else{
		cout << "--->Use the docIDWithNumOfPostingsRecordedDict_ info to set the size of the posting array for each document." << endl;
	}

	for(uint32_t i = 0; i < NUM_OF_DOCS_IN_GOV2_COLLECTION; i++){
		total_num_of_documents_been_processed += 1;
		if(i % 1000000 == 0){
			cout << i << " docs inited." << endl;
		}
		docIDWithTupleCreatedDict_[i] = true;
		// init the MetaInfo_SET
		MetaInfo_SET meta_info_set; //initialization of the MetaInfo_SET for the a specific document.
		meta_info_set.current_doc_id = i; // Not Very Proper cause 0 actually represent a typical gov2 document
		meta_info_set.current_largest_value_of_the_posting_array = 0;
		meta_info_set.current_size_of_the_posting_array_filled = 0;
		meta_info_set.current_size_of_the_posting_array_allocated = 0;
		meta_info_set.current_beginning_index_of_the_posting_array = 0;
		meta_info_set.all_Xdoc = 0.0;
		meta_info_set.selected_Xdoc = 0.0;

		// init the index_doc_entry_buffer buffer
		int num_postings_in_current_doc = 0;
		if (docIDWithNumOfPostingsRecordedDict_.count(i) > 0){
			num_postings_in_current_doc = docIDWithNumOfPostingsRecordedDict_[i];
		}
		else{
			// Just a guess for the average # of postings in the doc. Let's try 250
			// System setting if the external document info is NOT Loaded.
			num_postings_in_current_doc = 2;
		}

		// the extreme case is not attaching memory for storing the posting info
		// IndexDocEntry* index_doc_entry_buffer = NULL;
		// meta_info_set.current_size_of_the_posting_array_allocated = 0;


		// allocate this piece of memory and maintain it
		IndexDocOptimizedEntry* index_doc_entry_buffer = new IndexDocOptimizedEntry[num_postings_in_current_doc];

		// init the value as well. Otherwise, the value will be just random.
		for(int index_doc_entry_offset = 0; index_doc_entry_offset < num_postings_in_current_doc; index_doc_entry_offset++){
			  IndexDocOptimizedEntry& curr_index_doc_entry = index_doc_entry_buffer[index_doc_entry_offset];
			  assert(index_doc_entry_offset < num_postings_in_current_doc);
			  // In debug on 2013/09/24 afternoon by Wei at school
			  // curr_index_doc_entry.term_id = rand() % 37728619;	// total # of terms in the lexicon: 37728619
			  // In production
			  curr_index_doc_entry.term_id = BIG_NUMBER; // Not Very Proper cause 0 actually represent another term

			  // In debug on 2013/09/24 afternoon by Wei at school
			  // curr_index_doc_entry.staticProbability = (float)rand()/(float)RAND_MAX;
			  // In production
			  curr_index_doc_entry.staticProbability = 0.0;
		}
		meta_info_set.current_size_of_the_posting_array_allocated = num_postings_in_current_doc;
		// Updated by Wei 2013/09/24 morning at school
		total_num_of_postings_been_allocated += meta_info_set.current_size_of_the_posting_array_allocated;
		// still regard this as an array here
		myDocPostingsTupleHeap[ i ] = make_pair(meta_info_set, index_doc_entry_buffer);
	}
	cout << "Total # of Postings Been Allocated: " << total_num_of_postings_been_allocated << endl;
	cout << "Total # of Documents Been Processed: " << total_num_of_documents_been_processed << endl;
	step0_ending_time = processing_time.GetElapsedTime();
	GetDefaultLogger().Log("Time Elapsed: " + Stringify(step0_ending_time - step0_starting_time), false);
	cout << endl;
	// for DEBUG
	// exit(1);

	/*
	// for debug, fill the following info for each document and will be fine.
	cout << "FAKE step1_3_3_4: all the fields in the first element of the tuple have been faked." << endl;
	double fake_step1_2_3_4_starting_time = processing_time.GetElapsedTime();
	double fake_step1_2_3_4_ending_time = 0.0;
	DocPostingOptimizedCompareClass doc_posting_optimized_compare_class(termID_with_their_first_factor_probability_map_, a, b, 0.0, 0.0);
	for(uint32_t i = 0; i < NUM_OF_DOCS_IN_GOV2_COLLECTION; i++){
		// cout << "i: " << i << endl;
		if( i % 10000 == 0){
			cout << "i: " << i << " processed." << endl;
		}
		myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled = myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_allocated;

		for(unsigned int j = 0; j < myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled; j++){
			myDocPostingsTupleHeap[i].first.all_Xdoc += termID_with_their_first_factor_probability_map_[ myDocPostingsTupleHeap[i].second[j].term_id ];
		}

		doc_posting_optimized_compare_class.setCurrentALLXDOC( myDocPostingsTupleHeap[i].first.all_Xdoc );
		doc_posting_optimized_compare_class.setCurrentSelectXDOC( myDocPostingsTupleHeap[i].first.selected_Xdoc );

		// (1) Sort every doc array
		sort(myDocPostingsTupleHeap[i].second, myDocPostingsTupleHeap[i].second + myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled, doc_posting_optimized_compare_class);

		// (2) Update the most promising posting's value to the meta_info tuple
		myDocPostingsTupleHeap[i].first.current_largest_value_of_the_posting_array = doc_posting_optimized_compare_class.score( myDocPostingsTupleHeap[i].second[0] );

	}
	fake_step1_2_3_4_ending_time = processing_time.GetElapsedTime();
	GetDefaultLogger().Log("Time Elapsed: " + Stringify( fake_step1_2_3_4_ending_time - fake_step1_2_3_4_starting_time ), false);
	*/


	// Updated on 2013/10/22 afternoon by Wei at school
	// for production, actually loading the postings into the main memory
	cout << "step1_3_3: Load the real postings info and compute the static probability, put them into the heap." << endl;
	double step1_2_3_starting_time = processing_time.GetElapsedTime();
	double step1_2_3_ending_time = 0.0;
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kTermsWithLengthOfListAndClassLabelAndNumOfPostingsInEachPiecesFileName));
    cout << "inputFileName: " << inputFileName << endl;
    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	/*
	// for debug on 2013/10/26 night at school
	// This part of logic can help us to understand how many terms are in the lexicon and what are they? :)
	while (index_->NextTerm()){
		string curr_term = string(index_->curr_term(), index_->curr_term_len());
		cout << "traverse the team: " << curr_term << endl;
	}
	exit(1);
	*/

	// for production
	// Use NextTerm() to traverse the whole index's lexicon.
	while (index_->NextTerm()){
		string curr_term = string(index_->curr_term(), index_->curr_term_len());

		// Use the term '0' and '00' for DEBUG
		// Use the term 'so' and 'soalr' for DEBUG
		// the term "applebananapear" is NOT in the lexicon
		// Use the term '00wc' and '0197' for DEBUG
		// if (curr_term == "00wc" or curr_term == "0197"){
		if (curr_term != "applebananapear"){
		    // key: piece number
			// value: # of postings in the current piece
			// The variable currentTermPiecesInfoMap is LIST by LIST
			map<int,int> currentTermPiecesInfoMap;
			string currentTermFromFile = "";
			string noUseWildCard = "";
			string classLabelInStringFormat = "";
			int classLabelInIntFormat = -1;
			string numOfPieceCurrentTermHasInStringFormat = "";
			int numOfPieceCurrentTermHasInIntFormat = -1;

			// cout << curr_term << endl;
			// get meta info from the file
			getline (inputfile,currentLine);
			boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );
		    iss >> currentTermFromFile;

		    if(curr_term != currentTermFromFile){
		    	cout << "curr_term: " << curr_term << endl;
		    	cout << "currentTermFromFile: " << currentTermFromFile << endl;
		    	exit(1);
		    }
		    iss >> noUseWildCard;
		    iss >> classLabelInStringFormat;
		    classLabelInIntFormat = atoi(classLabelInStringFormat.c_str());
		    iss >> numOfPieceCurrentTermHasInStringFormat;
		    numOfPieceCurrentTermHasInIntFormat = atoi(numOfPieceCurrentTermHasInStringFormat.c_str());

		    for(unsigned int i = 0; i< numOfPieceCurrentTermHasInIntFormat; i++){
		    	string currentPieceNumInStringFormat = "";
		    	string numOfPostingsInCurrentPieceInStringFormat = "";
		    	iss >> currentPieceNumInStringFormat;
		    	iss >> numOfPostingsInCurrentPieceInStringFormat;
		    	int currentPieceNumInIntFormat = atoi(currentPieceNumInStringFormat.c_str());
		    	int numOfPostingsInCurrentPieceInIntFormat = atoi(numOfPostingsInCurrentPieceInStringFormat.c_str());

		    	if (currentTermPiecesInfoMap.count(currentPieceNumInIntFormat) > 0){
		    		cout << "duplicated piece #." << endl;
		    		cout << "currentPieceNumInIntFormat: " << currentPieceNumInIntFormat << endl;
		    		cout << "numOfPostingsInCurrentPieceInIntFormat: " << numOfPostingsInCurrentPieceInIntFormat << endl;
		    		exit(1);
		    	}
		    	else{
		    		currentTermPiecesInfoMap[currentPieceNumInIntFormat] = numOfPostingsInCurrentPieceInIntFormat;
		    	}

		    }


		    // for DEBUG purpose
		    // cout << "currentTermPiecesInfoMap.size(): " << currentTermPiecesInfoMap.size() << endl;
		    // cout << "currentTermPiecesInfoMap[0]: " << currentTermPiecesInfoMap[0] << endl;
		    // cout << "currentTermPiecesInfoMap[17]: " << currentTermPiecesInfoMap[17] << endl;
		    // exit(1);



			Timer time_clock;  // Time how long it takes to process a term
			double processing_elapsed_time;
			number_terms_processed += 1;
			unsigned int num_docs_in_original_list = index_->curr_list_data()->num_docs();

			// for debug
			// cout << "---># of terms processed: " << number_terms_processed << endl;
			// cout << "--->Processing term: " << curr_term << endl;
			// cout << "---># of postings in list: " << num_docs_in_original_list << endl;

			// for debug
			// cout << "--->sub_step1: load the postings...";

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			// Load the thing into the main memory cause I need to sort them
			IndexEntry* index_list_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int index_list_entry_offset = 0;
			while (index_->NextDocId())
			{

			  IndexEntry& curr_index_entry = index_list_entry_buffer[index_list_entry_offset];

			  assert(index_list_entry_offset < num_docs_in_original_list);

			  curr_index_entry.doc_id = index_->curr_doc_id();
			  curr_index_entry.frequency = index_->curr_list_data()->GetFreq();
			  curr_index_entry.staticProbability = 0.0;
			  curr_index_entry.partialBM25 = 0.0;
			  ++index_list_entry_offset;
			}  // No more postings in the list.
			// cout << "--->Done." << endl;

			//current sorting Methods:
			//1: N/A
			//2: sort based on partial bm25 score
			//3: N/A
			//4: N/A

			// Need the average document length for computing BM25 scores.
			long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
			long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
			int average_doc_length = total_document_lengths / total_num_docs;

			// Actually, don't need this variable but just to satisfy the constructor's needs of DocIdScoreComparisonWei
			map<string,float> queryTermsProbabilityDistributionMap;
			DocIdScoreComparisonWei doc_id_score_comparatorWei(index_->index_reader()->document_map(), num_docs_in_original_list, average_doc_length, total_num_docs, 2, curr_term, queryTermsProbabilityDistributionMap);

			// standard sort function in c++.
			// Sort the index entry buffer based on certain score value
			// for debug
			// cout << "--->sub_step2: sort by score in order to get the rank...";
			sort(index_list_entry_buffer, index_list_entry_buffer + index_list_entry_offset, doc_id_score_comparatorWei);
			// for debug
			// cout << "--->Done." << endl;

			// Since now, the buffer has been sorted, the index position of the array is actually the value of the posting rank in list
			// cout << "Printing all <docID,score,value> pairs ordered by some score: " << curr_term << endl;

			// Updated by Wei 2013/09/14 afternoon at school
			// Updated by Wei 2013/09/13 afternoon at school
			// for debug
			// cout << "--->sub_step3: assign the static probability for the current posting based on the rank and the list length..." << endl;
			int currentAccumulatedRange = 0;
			float currentPostingItselfProbability = 0.0;
			int j = currentTermPiecesInfoMap.size()-1;
			currentAccumulatedRange += currentTermPiecesInfoMap[j];

			// Transfer the postings from the list to the document based.
			for (int i = 0; i < index_list_entry_offset; ++i) {
				// Step3_1 logic: assign the static probability for the current posting
				// (1) I have the rank: i + 1 and this term, and I can give you back the probability NOW.
				int absoluteRankInList = i + 1;
				if(currentAccumulatedRange >= absoluteRankInList){
					// the current Posting Itself Probability = probablity of hitting that range / actual size of that range of that term
					currentPostingItselfProbability = classLabelWithPiecesMetaInfoDict_[classLabelInIntFormat][j] / currentTermPiecesInfoMap[j];
					// The value assigned here should be the probability of this posting made into TOP10
					index_list_entry_buffer[i].staticProbability = currentPostingItselfProbability;

					/*
					// for DEBUG
					if(curr_term == "the" or curr_term == "to"){
						cout << "currentPostingItselfProbability: " << currentPostingItselfProbability << endl;
						cout << "classLabelWithPiecesMetaInfoDict_[classLabelInIntFormat][j]: " << classLabelWithPiecesMetaInfoDict_[classLabelInIntFormat][j] << endl;
						cout << "currentTermPiecesInfoMap[j]: " << currentTermPiecesInfoMap[j] << endl;
						cout << endl;
					}
					*/
				}
				else{
					// It is time to update the coverage.
					j -= 1;
					currentAccumulatedRange += currentTermPiecesInfoMap[j];
					// deal with this BORDER one
					// the current Posting Itself Probability = probablity of hitting that range / actual size of that range of that term
					currentPostingItselfProbability = classLabelWithPiecesMetaInfoDict_[classLabelInIntFormat][j] / currentTermPiecesInfoMap[j];
					// The value assigned here should be the probability of this posting made into TOP10
					index_list_entry_buffer[i].staticProbability = currentPostingItselfProbability;
				}

				// for DEBUG ONLY
				// if (index_list_entry_buffer[i].doc_id == 2432615 or index_list_entry_buffer[i].doc_id == 19263777){
				//	cout << "doc_id: " << index_list_entry_buffer[i].doc_id << endl;
				//	cout << "staticProbability: " << index_list_entry_buffer[i].staticProbability << endl;
				// }


				// Step3_2 logic: Update the corresponding heapTuple in the heap

				// Check mechinism 1
				if(myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.current_size_of_the_posting_array_filled < myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.current_size_of_the_posting_array_allocated){
					// Case1: There are still space for the incoming posting. So no problem
				}
				else{
					// Case2: There are NO MORE space for the incoming posting, critical error and need to exit() right now.
					cout << "--->SPACE NOT ENOUGH PROBLEM." << endl;
					cout << "--->doc_id: " << index_list_entry_buffer[i].doc_id << endl;
					cout << "--->current_size_of_the_posting_array_allocated: " << myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.current_size_of_the_posting_array_allocated << endl;
					cout << "--->current_size_of_the_posting_array_filled: " << myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.current_size_of_the_posting_array_filled << endl;
					exit(1);
				}

				// Check mechinism 2
				// Updated by Wei 2013/09/20 night at school
				// maybe this assertion will be a problem :)
				// assert(index_doc_entry_offset < num_of_postings_in_doc);

				int index_doc_entry_offset = myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.current_size_of_the_posting_array_filled;
				IndexDocOptimizedEntry& curr_index_doc_entry = myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].second[index_doc_entry_offset];
				int num_of_postings_in_doc = myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.current_size_of_the_posting_array_allocated;


				// add the new postings at the end of corresponding doc list
				if(termWithTermIDDict_.count(curr_term) > 0){
					curr_index_doc_entry.term_id = termWithTermIDDict_[curr_term];
				}
				else{
					cout << "The term: '" << curr_term << "' is NOT found in the termWithTermIDDict_." << endl;
					cout << "termWithTermIDDict_[curr_term]: " << termWithTermIDDict_[curr_term] << endl;
					exit(1);
				}

				// Comment it out on 2013/09/2 morning by Wei at school. No Need to do this updating
				// index_list_entry_buffer[i].partialBM25 = doc_id_score_comparatorWei.score( index_list_entry_buffer[i] );
				// curr_index_doc_entry.partialBM25 = index_list_entry_buffer[i].partialBM25;
				curr_index_doc_entry.staticProbability = index_list_entry_buffer[i].staticProbability;
				// curr_index_doc_entry.valueToComputeANDStoreStaticPart1 = curr_index_doc_entry.staticProbability;	//This updates the static part of the formula
				// LOAD the P(t)
				// double first_factor_probability_value = 0.0;
				// first_factor_probability_value = freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[curr_term] ];
				// curr_index_doc_entry.valueToComputeANDStoreCombined = first_factor_probability_value * ( a * curr_index_doc_entry.valueToComputeANDStoreStaticPart1 + b * curr_index_doc_entry.valueToComputeANDStoreDynamicPart2 );


				// if(index_list_entry_buffer[i].doc_id == 24284542){
				// 	cout << "DEBUG" << endl;
				// 	cout << "index_doc_entry_offset: " << index_doc_entry_offset << endl;
				//	cout << "curr_index_entry.term_id: " << curr_index_entry.term_id << endl;
				//	cout << "curr_index_entry.partialBM25: " << curr_index_entry.partialBM25 << endl;
				//	cout << "curr_index_entry.valueToComputeANDStore: " << curr_index_entry.valueToComputeANDStore << endl;
				//	cout << endl;
				// }

				myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.current_size_of_the_posting_array_filled += 1;
				myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.all_Xdoc += termID_with_their_first_factor_probability_map_[curr_index_doc_entry.term_id];
				// myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.all_Xdoc += freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[curr_term] ];
			}

			// for DEBUG
			// cout << "--->Done." << endl;
			// cout << endl;

			delete[] index_list_entry_buffer;

			// If all the terms have been processed. Then we can exit the loop and NO NEED to continue to traverse the rest of the terms in lexicon
			if(number_terms_processed == NUMBER_OF_TERMS_NEEDED_TO_PROCESS){
				break;
			}
		}
		else{
			// Pass
		}

	}
	step1_2_3_ending_time = processing_time.GetElapsedTime();
	GetDefaultLogger().Log("Time Elapsed: " + Stringify( step1_2_3_ending_time - step1_2_3_starting_time ), false);
	cout << endl;



	// In production
	double step4_starting_time = processing_time.GetElapsedTime();
	double step4_ending_time = 0.0;
	// Updated by Wei 2013/09/21 afternoon at school
	cout << "step4: Loop through the heap and Sort each array..." << endl;
	// cout << "mark1:" << endl;
	// (0) Init the DocPostingOptimizedCompareClass for every document (in the total of 25M)
	// Both for the step4 and step5, init ONE object ONLY
	DocPostingOptimizedCompareClass doc_posting_optimized_compare_class(termID_with_their_first_factor_probability_map_, a, b, 0.0, 0.0);

	// for each document, do the same thing
	for(uint32_t i = 0; i < NUM_OF_DOCS_IN_GOV2_COLLECTION; i++){
		// for DEBUG
		// cout << "i:" << i << endl;
		doc_posting_optimized_compare_class.setCurrentALLXDOC( myDocPostingsTupleHeap[i].first.all_Xdoc );
		doc_posting_optimized_compare_class.setCurrentSelectXDOC( myDocPostingsTupleHeap[i].first.selected_Xdoc );

		// cout << "mark2:" << endl;
		// cout << "current_size: " << myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled << endl;
		// (1) Sort every doc array
		// current testing version1
		// sort(myDocPostingsTupleHeap[i].second, myDocPostingsTupleHeap[i].second + myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled, scoreInTest);
		// current testing version2
		// sort(myDocPostingsTupleHeap[i].second, myDocPostingsTupleHeap[i].second + myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled, DocPostingOptimizedCompareForTest());
		// current testing version3
		sort(myDocPostingsTupleHeap[i].second, myDocPostingsTupleHeap[i].second + myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled, doc_posting_optimized_compare_class);

		// cout << "mark3:" << endl;
		// (2) Update the most promising posting's value to the meta_info tuple
		myDocPostingsTupleHeap[i].first.current_largest_value_of_the_posting_array = doc_posting_optimized_compare_class.score( myDocPostingsTupleHeap[i].second[0] );

		/*
		// for DEBUG part
		// examine a specific doc
		if(myDocPostingsTupleHeap[i].first.current_doc_id == 0){
			cout << "current_doc_id: " << myDocPostingsTupleHeap[i].first.current_doc_id << endl;
			cout << "all_Xdoc: " << myDocPostingsTupleHeap[i].first.all_Xdoc << endl;
			cout << "selected_Xdoc: " << myDocPostingsTupleHeap[i].first.selected_Xdoc << endl;
			cout << "current_beginning_index_of_the_posting_array: " << myDocPostingsTupleHeap[i].first.current_beginning_index_of_the_posting_array << endl;
			cout << "current_largest_value_of_the_posting_array: " << myDocPostingsTupleHeap[i].first.current_largest_value_of_the_posting_array << endl;
			cout << "current_size_of_the_posting_array_allocated: " << myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_allocated << endl;
			cout << "current_size_of_the_posting_array_filled: " << myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled << endl;
			if ( myDocPostingsTupleHeap[i].first.current_doc_id == 0 ){
				for(unsigned k = 0; k < myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled; k++){
					cout << "term_id: " << myDocPostingsTupleHeap[i].second[k].term_id << endl;
					cout << "value: "<< doc_posting_optimized_compare_class.score( myDocPostingsTupleHeap[i].second[k] ) << endl;
					cout << endl;
				}
			}
			cout << endl;
		}
		*/

		//for DEBUG part
		//if (myDocPostingsTupleHeap[i].first.current_largest_value_of_the_posting_array != 0.0){
		//	cout << "current_doc_id: " << myDocPostingsTupleHeap[i].first.current_doc_id << endl;
		//	cout << "all_Xdoc: " << myDocPostingsTupleHeap[i].first.all_Xdoc << endl;
		//	cout << "selected_Xdoc: " << myDocPostingsTupleHeap[i].first.selected_Xdoc << endl;
		//	cout << "current_beginning_index_of_the_posting_array: " << myDocPostingsTupleHeap[i].first.current_beginning_index_of_the_posting_array << endl;
		//	cout << "current_size_of_the_posting_array_allocated: " << myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_allocated << endl;
		//	cout << "current_size_of_the_posting_array_filled: " << myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled << endl;
		//	cout << "current_largest_value_of_the_posting_array: " << myDocPostingsTupleHeap[i].first.current_largest_value_of_the_posting_array << endl;
		//	cout << endl;
		//}

	}

	// for debug
	// cout << "Done." << endl;
	step4_ending_time = processing_time.GetElapsedTime();
	GetDefaultLogger().Log("Time Elapsed: " + Stringify( step4_ending_time - step4_starting_time ), false);


	cout << "step5: Make Heap and Prepare The First Pop..." << endl;
	double step5_starting_time = processing_time.GetElapsedTime();
	double step5_ending_time = 0.0;
	make_heap(myDocPostingsTupleHeap, myDocPostingsTupleHeap+NUM_OF_DOCS_IN_GOV2_COLLECTION, HeapTupleOptimizedCompare() );
	cout << "Done." << endl;
	step5_ending_time = processing_time.GetElapsedTime();
	GetDefaultLogger().Log("Time Elapsed: " + Stringify( step5_ending_time - step5_starting_time ), false);
	cout << endl;

	cout << "step6: Pop and Count Process Begins..." << endl;
	double step6_starting_time = processing_time.GetElapsedTime();
	double step6_ending_time = 0.0;
	unsigned long currentTotalNumOfPostingsAccumulatedInTheHeap = 0;
	for(uint32_t i = 0; i < NUM_OF_DOCS_IN_GOV2_COLLECTION; i++){
		currentTotalNumOfPostingsAccumulatedInTheHeap += myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled;
		// for debug
		cout << "i: " << i << " " << myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled << endl;
	}
	cout << "totalNumOfPostingsAccumulatedInTheHeap: " << currentTotalNumOfPostingsAccumulatedInTheHeap << endl;

	// add the 1%,5%,10%,20%,30%,40%,50%,60%,70%,80%,90% dict
	map<string, int> postingsPopped1PercentDict_;
	map<string, int> postingsPopped5PercentDict_;
	map<string, int> postingsPopped10PercentDict_;
	map<string, int> postingsPopped20PercentDict_;
	map<string, int> postingsPopped30PercentDict_;
	map<string, int> postingsPopped40PercentDict_;
	map<string, int> postingsPopped50PercentDict_;
	map<string, int> postingsPopped60PercentDict_;
	map<string, int> postingsPopped70PercentDict_;
	map<string, int> postingsPopped80PercentDict_;
	map<string, int> postingsPopped90PercentDict_;
	map<string, int> postingsPopped100PercentDict_;

	bool postingsPopped1PercentDictStoppingSign_ = false;
	bool postingsPopped5PercentDictStoppingSign_ = false;
	bool postingsPopped10PercentDictStoppingSign_ = false;
	bool postingsPopped20PercentDictStoppingSign_ = false;
	bool postingsPopped30PercentDictStoppingSign_ = false;
	bool postingsPopped40PercentDictStoppingSign_ = false;
	bool postingsPopped50PercentDictStoppingSign_ = false;
	bool postingsPopped60PercentDictStoppingSign_ = false;
	bool postingsPopped70PercentDictStoppingSign_ = false;
	bool postingsPopped80PercentDictStoppingSign_ = false;
	bool postingsPopped90PercentDictStoppingSign_ = false;
	bool postingsPopped100PercentDictStoppingSign_ = false;


	unsigned long numOfPostingsPoppedWhen100PercentOfPostingsKept = currentTotalNumOfPostingsAccumulatedInTheHeap;
	unsigned long numOfPostingsPoppedWhen90PercentOfPostingsKept = currentTotalNumOfPostingsAccumulatedInTheHeap * 0.9;
	unsigned long numOfPostingsPoppedWhen80PercentOfPostingsKept = currentTotalNumOfPostingsAccumulatedInTheHeap * 0.8;
	unsigned long numOfPostingsPoppedWhen70PercentOfPostingsKept = currentTotalNumOfPostingsAccumulatedInTheHeap * 0.7;
	unsigned long numOfPostingsPoppedWhen60PercentOfPostingsKept = currentTotalNumOfPostingsAccumulatedInTheHeap * 0.6;
	unsigned long numOfPostingsPoppedWhen50PercentOfPostingsKept = currentTotalNumOfPostingsAccumulatedInTheHeap * 0.5;
	unsigned long numOfPostingsPoppedWhen40PercentOfPostingsKept = currentTotalNumOfPostingsAccumulatedInTheHeap * 0.4;
	unsigned long numOfPostingsPoppedWhen30PercentOfPostingsKept = currentTotalNumOfPostingsAccumulatedInTheHeap * 0.3;
	unsigned long numOfPostingsPoppedWhen20PercentOfPostingsKept = currentTotalNumOfPostingsAccumulatedInTheHeap * 0.2;
	unsigned long numOfPostingsPoppedWhen10PercentOfPostingsKept = currentTotalNumOfPostingsAccumulatedInTheHeap * 0.1;
	unsigned long numOfPostingsPoppedWhen5PercentOfPostingsKept = currentTotalNumOfPostingsAccumulatedInTheHeap * 0.05;
	unsigned long numOfPostingsPoppedWhen1PercentOfPostingsKept = currentTotalNumOfPostingsAccumulatedInTheHeap * 0.01;

	cout << "# Of Postings @ 100% Kept: " << numOfPostingsPoppedWhen100PercentOfPostingsKept << endl;
	cout << "# Of Postings @ 90% Kept: " << numOfPostingsPoppedWhen90PercentOfPostingsKept << endl;
	cout << "# Of Postings @ 80% Kept: " << numOfPostingsPoppedWhen80PercentOfPostingsKept << endl;
	cout << "# Of Postings @ 70% Kept: " << numOfPostingsPoppedWhen70PercentOfPostingsKept << endl;
	cout << "# Of Postings @ 60% Kept: " << numOfPostingsPoppedWhen60PercentOfPostingsKept << endl;
	cout << "# Of Postings @ 50% Kept: " << numOfPostingsPoppedWhen50PercentOfPostingsKept << endl;
	cout << "# Of Postings @ 40% Kept: " << numOfPostingsPoppedWhen40PercentOfPostingsKept << endl;
	cout << "# Of Postings @ 30% Kept: " << numOfPostingsPoppedWhen30PercentOfPostingsKept << endl;
	cout << "# Of Postings @ 20% Kept: " << numOfPostingsPoppedWhen20PercentOfPostingsKept << endl;
	cout << "# Of Postings @ 10% Kept: " << numOfPostingsPoppedWhen10PercentOfPostingsKept << endl;
	cout << "# Of Postings @ 5% Kept: " << numOfPostingsPoppedWhen5PercentOfPostingsKept << endl;
	cout << "# Of Postings @ 1% Kept: " << numOfPostingsPoppedWhen1PercentOfPostingsKept << endl;

	// for debug
	// exit(1);

	// Updated by Wei on 2013/09/24 Afternoon at school
	// Outputting the related TOP10 postings(hit ratio) which have been popped from the heap to the file on disk
	string outputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kImportantPostingsBeingPoppedAtDifferentLevelsFileName));
	ofstream outputFileHandler;
	outputFileHandler.open(outputFileName.c_str());

	// Updated by Wei on 2013/10/31 Afternoon at school
	// Outputting the related ALL postings(hit ratio) which have been popped from the heap to the file on disk
	string outputFileName2 = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kALLPostingsBeingPoppedAtDifferentLevelsFileName));
	ofstream outputFileHandler2;
	outputFileHandler2.open(outputFileName2.c_str());

	// NO document will be totally out using the current method. Even the empty document will push back and maintain in the heap.
	// Optimization chance is that I can ged rid of the empty documents and free the corresponding memory space.
	// Maybe somehow become important
	unsigned long numOfPoppedPostingsInTheTOP10RelatedPostingSet = 0;
	unsigned long numOfDocumentsOut = 0;	// currently do NOT function at our popping method
	// some research note:
	// # of postings for the 1st compressed file: 184868
	// # of postings for test: 60
	unsigned long NUM_OF_POSTINGS_WANTED_TO_POP = 60;	// This is actually the total # of postings in gov2 dataset
	unsigned long NUM_OF_POSTINGS_CAN_BE_POP = 0;
	unsigned long NUM_OF_POSTINGS_ACTUALLY_POPPED = 0;
	if (currentTotalNumOfPostingsAccumulatedInTheHeap > NUM_OF_POSTINGS_WANTED_TO_POP){
		NUM_OF_POSTINGS_CAN_BE_POP = NUM_OF_POSTINGS_WANTED_TO_POP;
	}
	else{
		NUM_OF_POSTINGS_CAN_BE_POP = currentTotalNumOfPostingsAccumulatedInTheHeap;
	}
	cout << "NUM_OF_POSTINGS_CAN_BE_POP: " << NUM_OF_POSTINGS_CAN_BE_POP << endl;
	cout << "j_index" << " " << "current_largest_value_of_the_posting_array" << " " << "doc_id" << " " << "term" << endl;
	for(unsigned long j = 1; j <= NUM_OF_POSTINGS_CAN_BE_POP; j++){
		// The logic of getting the termInStringFormat
		int tempBeginningIndex = myDocPostingsTupleHeap[0].first.current_beginning_index_of_the_posting_array;
		string termInStringFormat = termIDWithTermDict_[ myDocPostingsTupleHeap[0].second[tempBeginningIndex].term_id ];	// This line needed to be modified

		// The logic of getting the current_doc_id
		stringstream ss;
		ss << myDocPostingsTupleHeap[0].first.current_doc_id;
		string docIDInStringFormat;
		ss >> docIDInStringFormat;

		string postingCurrentPoppingKeyInStringFormat = termInStringFormat + "_" + docIDInStringFormat;


		double staticProbabilityScore = doc_posting_optimized_compare_class.getStaticPartScore( myDocPostingsTupleHeap[0].second[tempBeginningIndex] );
		double dynamicProbabilityScore = doc_posting_optimized_compare_class.getDynamicPartScore( myDocPostingsTupleHeap[0].second[tempBeginningIndex] );
		// for debug ONLY
		// In order to view the decreasing tendency of the popping postings
		// add a SUM check here:
		double tempCheckSum = staticProbabilityScore + dynamicProbabilityScore;
		double diff = myDocPostingsTupleHeap[0].first.current_largest_value_of_the_posting_array - tempCheckSum;
		// cout << "diff: " << diff << endl;
		/*
		// Updated by Wei on 2013/10/25 afternoon at school
		// This statement comparizon is just NOT right, design flaw and please do NOT use it :)
		if (myDocPostingsTupleHeap[0].first.current_largest_value_of_the_posting_array != tempCheckSum){
			cout << "Check NOT Passed." << endl;
			cout << "myDocPostingsTupleHeap[0].first.current_largest_value_of_the_posting_array: " << myDocPostingsTupleHeap[0].first.current_largest_value_of_the_posting_array << endl;
			cout << "staticProbabilityScore: " << staticProbabilityScore << endl;
			cout << "dynamicProbabilityScore: " << dynamicProbabilityScore << endl;
			cout << "tempCheckSum: " << tempCheckSum << endl;
			exit(1);
		}
		*/

		// for debug
		// cout << j << " " << myDocPostingsTupleHeap[0].first.current_largest_value_of_the_posting_array << " " << staticProbabilityScore << " " << dynamicProbabilityScore << " " << docIDInStringFormat << " " << termInStringFormat << endl;

		NUM_OF_POSTINGS_ACTUALLY_POPPED += 1;
		if ( top10RelatedPostingsDict_.count(postingCurrentPoppingKeyInStringFormat) > 0){
			numOfPoppedPostingsInTheTOP10RelatedPostingSet += 1;
			outputFileHandler << termInStringFormat << " " << docIDInStringFormat << endl;
		}
		else{
			// Just do NOT touch any of the important postings so far. :(
		}

		outputFileHandler2 << termInStringFormat << " " << docIDInStringFormat << endl;

		if(!postingsPopped1PercentDictStoppingSign_){
			if( postingsPopped1PercentDict_.count(postingCurrentPoppingKeyInStringFormat) <= 0 ){
				postingsPopped1PercentDict_[postingCurrentPoppingKeyInStringFormat] = 1;
			}
			else{
				cout << "Critical Error: The same posting being popped twice. 1%" << endl;
				cout << "postingCurrentPoppingKeyInStringFormat: " << postingCurrentPoppingKeyInStringFormat << endl;
				exit(1);
			}
		}

		if(!postingsPopped5PercentDictStoppingSign_){
			if( postingsPopped5PercentDict_.count(postingCurrentPoppingKeyInStringFormat) <= 0 ){
				postingsPopped5PercentDict_[postingCurrentPoppingKeyInStringFormat] = 1;
			}
			else{
				cout << "Critical Error: The same posting being popped twice. 5%" << endl;
				cout << "postingCurrentPoppingKeyInStringFormat: " << postingCurrentPoppingKeyInStringFormat << endl;
				exit(1);
			}
		}

		if(!postingsPopped10PercentDictStoppingSign_){
			if( postingsPopped10PercentDict_.count(postingCurrentPoppingKeyInStringFormat) <= 0 ){
				postingsPopped10PercentDict_[postingCurrentPoppingKeyInStringFormat] = 1;
			}
			else{
				cout << "Critical Error: The same posting being popped twice. 10%" << endl;
				cout << "postingCurrentPoppingKeyInStringFormat: " << postingCurrentPoppingKeyInStringFormat << endl;
				exit(1);
			}
		}

		if(!postingsPopped20PercentDictStoppingSign_){
			if( postingsPopped20PercentDict_.count(postingCurrentPoppingKeyInStringFormat) <= 0 ){
				postingsPopped20PercentDict_[postingCurrentPoppingKeyInStringFormat] = 1;
			}
			else{
				cout << "Critical Error: The same posting being popped twice. 20%" << endl;
				cout << "postingCurrentPoppingKeyInStringFormat: " << postingCurrentPoppingKeyInStringFormat << endl;
				exit(1);
			}
		}

		if(!postingsPopped30PercentDictStoppingSign_){
			if( postingsPopped30PercentDict_.count(postingCurrentPoppingKeyInStringFormat) <= 0 ){
				postingsPopped30PercentDict_[postingCurrentPoppingKeyInStringFormat] = 1;
			}
			else{
				cout << "Critical Error: The same posting being popped twice. 30%" << endl;
				cout << "postingCurrentPoppingKeyInStringFormat: " << postingCurrentPoppingKeyInStringFormat << endl;
				exit(1);
			}
		}

		if(!postingsPopped40PercentDictStoppingSign_){
			if( postingsPopped40PercentDict_.count(postingCurrentPoppingKeyInStringFormat) <= 0 ){
				postingsPopped40PercentDict_[postingCurrentPoppingKeyInStringFormat] = 1;
			}
			else{
				cout << "Critical Error: The same posting being popped twice. 40%" << endl;
				cout << "postingCurrentPoppingKeyInStringFormat: " << postingCurrentPoppingKeyInStringFormat << endl;
				exit(1);
			}
		}

		if(!postingsPopped50PercentDictStoppingSign_){
			if( postingsPopped50PercentDict_.count(postingCurrentPoppingKeyInStringFormat) <= 0 ){
				postingsPopped50PercentDict_[postingCurrentPoppingKeyInStringFormat] = 1;
			}
			else{
				cout << "Critical Error: The same posting being popped twice. 50%" << endl;
				cout << "postingCurrentPoppingKeyInStringFormat: " << postingCurrentPoppingKeyInStringFormat << endl;
				exit(1);
			}
		}

		if(!postingsPopped60PercentDictStoppingSign_){
			if( postingsPopped60PercentDict_.count(postingCurrentPoppingKeyInStringFormat) <= 0 ){
				postingsPopped60PercentDict_[postingCurrentPoppingKeyInStringFormat] = 1;
			}
			else{
				cout << "Critical Error: The same posting being popped twice. 60%" << endl;
				cout << "postingCurrentPoppingKeyInStringFormat: " << postingCurrentPoppingKeyInStringFormat << endl;
				exit(1);
			}
		}

		if(!postingsPopped70PercentDictStoppingSign_){
			if( postingsPopped70PercentDict_.count(postingCurrentPoppingKeyInStringFormat) <= 0 ){
				postingsPopped70PercentDict_[postingCurrentPoppingKeyInStringFormat] = 1;
			}
			else{
				cout << "Critical Error: The same posting being popped twice. 70%" << endl;
				cout << "postingCurrentPoppingKeyInStringFormat: " << postingCurrentPoppingKeyInStringFormat << endl;
				exit(1);
			}
		}

		if(!postingsPopped80PercentDictStoppingSign_){
			if( postingsPopped80PercentDict_.count(postingCurrentPoppingKeyInStringFormat) <= 0 ){
				postingsPopped80PercentDict_[postingCurrentPoppingKeyInStringFormat] = 1;
			}
			else{
				cout << "Critical Error: The same posting being popped twice. 80%" << endl;
				cout << "postingCurrentPoppingKeyInStringFormat: " << postingCurrentPoppingKeyInStringFormat << endl;
				exit(1);
			}
		}

		if(!postingsPopped90PercentDictStoppingSign_){
			if( postingsPopped90PercentDict_.count(postingCurrentPoppingKeyInStringFormat) <= 0 ){
				postingsPopped90PercentDict_[postingCurrentPoppingKeyInStringFormat] = 1;
			}
			else{
				cout << "Critical Error: The same posting being popped twice. 90%" << endl;
				cout << "postingCurrentPoppingKeyInStringFormat: " << postingCurrentPoppingKeyInStringFormat << endl;
				exit(1);
			}
		}

		if(!postingsPopped100PercentDictStoppingSign_){
			if( postingsPopped100PercentDict_.count(postingCurrentPoppingKeyInStringFormat) <= 0 ){
				postingsPopped100PercentDict_[postingCurrentPoppingKeyInStringFormat] = 1;
			}
			else{
				cout << "Critical Error: The same posting being popped twice. 100%" << endl;
				cout << "postingCurrentPoppingKeyInStringFormat: " << postingCurrentPoppingKeyInStringFormat << endl;
				exit(1);
			}
		}



		// land mark during running
		if ( j % 10000 == 0){
			cout << "j: " << j << " popped."<< endl;
		}


		if ( j == numOfPostingsPoppedWhen1PercentOfPostingsKept){
			cout << "# Of Popped Postings In The TOP10 Related Posting Set when @ 1% kept: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
			outputFileHandler << "1%" << endl;
			outputFileHandler << endl;

			cout << "# Of Popped Postings when @ 1% kept: " << postingsPopped1PercentDict_.size() << endl;
			outputFileHandler2 << "1%" << endl;
			outputFileHandler2 << endl;

			postingsPopped1PercentDictStoppingSign_ = true;
		}

		if ( j == numOfPostingsPoppedWhen5PercentOfPostingsKept){
			cout << "# Of Popped Postings In The TOP10 Related Posting Set when @ 5% kept: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
			outputFileHandler << "5%" << endl;
			outputFileHandler << endl;

			cout << "# Of Popped Postings when @ 5% kept: " << postingsPopped5PercentDict_.size() << endl;
			outputFileHandler2 << "5%" << endl;
			outputFileHandler2 << endl;

			postingsPopped5PercentDictStoppingSign_ = true;
		}

		if ( j == numOfPostingsPoppedWhen10PercentOfPostingsKept){
			cout << "# Of Popped Postings In The TOP10 Related Posting Set when @ 10% kept: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
			outputFileHandler << "10%" << endl;
			outputFileHandler << endl;

			cout << "# Of Popped Postings when @ 10% kept: " << postingsPopped10PercentDict_.size() << endl;
			outputFileHandler2 << "10%" << endl;
			outputFileHandler2 << endl;

			postingsPopped10PercentDictStoppingSign_ = true;
		}

		if ( j == numOfPostingsPoppedWhen20PercentOfPostingsKept){
			cout << "# Of Popped Postings In The TOP10 Related Posting Set when @ 20% kept: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
			outputFileHandler << "20%" << endl;
			outputFileHandler << endl;

			cout << "# Of Popped Postings when @ 20% kept: " << postingsPopped20PercentDict_.size() << endl;
			outputFileHandler2 << "20%" << endl;
			outputFileHandler2 << endl;

			postingsPopped20PercentDictStoppingSign_ = true;
		}

		if ( j == numOfPostingsPoppedWhen30PercentOfPostingsKept){
			cout << "# Of Popped Postings In The TOP10 Related Posting Set when @ 30% kept: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
			outputFileHandler << "30%" << endl;
			outputFileHandler << endl;

			cout << "# Of Popped Postings when @ 30% kept: " << postingsPopped30PercentDict_.size() << endl;
			outputFileHandler2 << "30%" << endl;
			outputFileHandler2 << endl;

			postingsPopped30PercentDictStoppingSign_ = true;
		}

		if ( j == numOfPostingsPoppedWhen40PercentOfPostingsKept){
			cout << "# Of Popped Postings In The TOP10 Related Posting Set when @ 40% kept: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
			outputFileHandler << "40%" << endl;
			outputFileHandler << endl;

			cout << "# Of Popped Postings when @ 40% kept: " << postingsPopped40PercentDict_.size() << endl;
			outputFileHandler2 << "40%" << endl;
			outputFileHandler2 << endl;

			postingsPopped40PercentDictStoppingSign_ = true;
		}

		if ( j == numOfPostingsPoppedWhen50PercentOfPostingsKept){
			cout << "# Of Popped Postings In The TOP10 Related Posting Set when @ 50% kept: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
			outputFileHandler << "50%" << endl;
			outputFileHandler << endl;

			cout << "# Of Popped Postings when @ 50% kept: " << postingsPopped50PercentDict_.size() << endl;
			outputFileHandler2 << "50%" << endl;
			outputFileHandler2 << endl;

			postingsPopped50PercentDictStoppingSign_ = true;
		}

		if ( j == numOfPostingsPoppedWhen60PercentOfPostingsKept){
			cout << "# Of Popped Postings In The TOP10 Related Posting Set when @ 60% kept: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
			outputFileHandler << "60%" << endl;
			outputFileHandler << endl;

			cout << "# Of Popped Postings when @ 60% kept: " << postingsPopped60PercentDict_.size() << endl;
			outputFileHandler2 << "60%" << endl;
			outputFileHandler2 << endl;

			postingsPopped60PercentDictStoppingSign_ = true;
		}

		if ( j == numOfPostingsPoppedWhen70PercentOfPostingsKept){
			cout << "# Of Popped Postings In The TOP10 Related Posting Set when @ 70% kept: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
			outputFileHandler << "70%" << endl;
			outputFileHandler << endl;

			cout << "# Of Popped Postings when @ 70% kept: " << postingsPopped70PercentDict_.size() << endl;
			outputFileHandler2 << "70%" << endl;
			outputFileHandler2 << endl;

			postingsPopped70PercentDictStoppingSign_ = true;
		}

		if ( j == numOfPostingsPoppedWhen80PercentOfPostingsKept){
			cout << "# Of Popped Postings In The TOP10 Related Posting Set when @ 80% kept: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
			outputFileHandler << "80%" << endl;
			outputFileHandler << endl;

			cout << "# Of Popped Postings when @ 80% kept: " << postingsPopped80PercentDict_.size() << endl;
			outputFileHandler2 << "80%" << endl;
			outputFileHandler2 << endl;

			postingsPopped80PercentDictStoppingSign_ = true;
		}

		if ( j == numOfPostingsPoppedWhen90PercentOfPostingsKept){
			cout << "# Of Popped Postings In The TOP10 Related Posting Set when @ 90% kept: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
			outputFileHandler << "90%" << endl;
			outputFileHandler << endl;

			cout << "# Of Popped Postings when @ 90% kept: " << postingsPopped90PercentDict_.size() << endl;
			outputFileHandler2 << "90%" << endl;
			outputFileHandler2 << endl;

			postingsPopped90PercentDictStoppingSign_ = true;
		}

		if ( j == numOfPostingsPoppedWhen100PercentOfPostingsKept){
			cout << "# Of Popped Postings In The TOP10 Related Posting Set when @ 100% kept: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
			outputFileHandler << "100%" << endl;
			outputFileHandler << endl;

			cout << "# Of Popped Postings when @ 100% kept: " << postingsPopped100PercentDict_.size() << endl;
			outputFileHandler2 << "100%" << endl;
			outputFileHandler2 << endl;

			postingsPopped100PercentDictStoppingSign_ = true;
		}




		// Pop the largest element
		pop_heap( myDocPostingsTupleHeap, myDocPostingsTupleHeap + NUM_OF_DOCS_IN_GOV2_COLLECTION - numOfDocumentsOut, HeapTupleOptimizedCompare());

		// implemented by Wei 2013/09/22 afternoon
		// REVERSE UPDATE back to the document posting array
		// Part1: Update the overall statistics in the first element of the heap tuple
		float tempProbability = 0.0;
		int postingArrayOffset = 0;
		// Current Version
		postingArrayOffset = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_beginning_index_of_the_posting_array;
		tempProbability = termID_with_their_first_factor_probability_map_[myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[ postingArrayOffset ].term_id];


		// OLD Version
		// string tempTerm = termIDWithTermDict_[ myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[0].term_id ];
		// tempProbability = freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[tempTerm] ];

		myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.selected_Xdoc += tempProbability;
		myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_beginning_index_of_the_posting_array += 1;
		// Updated on 2013/10/25 morning by Wei at school
		// I think this is a programming flaw
		// for DEBUG: Actually, I don't think I need to decrease the value of this variable by 1
		// myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_size_of_the_posting_array_filled -= 1;

		// Part2: Update the info back into the corresponding posting array
		// (1)Sort the related doc array
		int beginningIndex = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].first.current_beginning_index_of_the_posting_array;
		int currentSizeOfPostingArrayBeingFilled = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].first.current_size_of_the_posting_array_filled;

		// init the DocPostingOptimizedCompareClass
		float numerator = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.selected_Xdoc;
		// OLD version
		// float denominator = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.all_Xdoc;
		// init the variable here
		// OLD version used weeks ago
		// DocPostingOptimizedCompareClass doc_posting_optimized_compare_class(termID_with_their_first_factor_probability_map_, a, b, numerator, denominator);
		doc_posting_optimized_compare_class.setCurrentSelectXDOC(numerator);
		// OLD version
		// doc_posting_optimized_compare_class.setCurrentALLXDOC(denominator);

		if (beginningIndex < currentSizeOfPostingArrayBeingFilled){
			// use the DocPostingOptimizedCompareClass to sort thing again
			sort(myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].second + beginningIndex, myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].second + currentSizeOfPostingArrayBeingFilled, doc_posting_optimized_compare_class);
		}
		else{
			// Just enjoy this journey
			// situation 1: beginningIndex == currentSizeOfPostingArrayBeingFilled : One posting left and you do NOT need to sort
			// situation 2: beginningIndex > currentSizeOfPostingArrayBeingFilled : NO posting left for this document
		}

		// (2)Update the most promising posting's value to the meta_info of the heapTupleArray
		if (beginningIndex < currentSizeOfPostingArrayBeingFilled){
			myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].first.current_largest_value_of_the_posting_array = doc_posting_optimized_compare_class.score( myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[beginningIndex] );
		}
		else if(beginningIndex == currentSizeOfPostingArrayBeingFilled){
			myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].first.current_largest_value_of_the_posting_array = doc_posting_optimized_compare_class.score( myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[beginningIndex] );
		}
		else{
			myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].first.current_largest_value_of_the_posting_array = 0.0;
			cout << "Some checks here: " << endl;
			cout << "beginningIndex: " << beginningIndex << endl;
			cout << "currentSizeOfPostingArrayBeingFilled: " << currentSizeOfPostingArrayBeingFilled << endl;
			cout << endl;
			exit(1);
		}


		// before pushing the document back into the heap, examining the documents a little bit more :)
		//if(myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_doc_id == 2432615 or myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_doc_id == 19263777){
		//	cout << "current_doc_id: " << myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_doc_id << endl;
		//	cout << "all_Xdoc: " << myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.all_Xdoc << endl;
		//	cout << "selected_Xdoc: " << myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.selected_Xdoc << endl;
		//	cout << "current_beginning_index_of_the_posting_array: " << myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_beginning_index_of_the_posting_array << endl;
		//	cout << "current_largest_value_of_the_posting_array: " << myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_largest_value_of_the_posting_array << endl;
		//	cout << "current_size_of_the_posting_array_allocated: " << myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_size_of_the_posting_array_allocated << endl;
		//	cout << "current_size_of_the_posting_array_filled: " << myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_size_of_the_posting_array_filled << endl;
		//	cout << endl;
		//}


		// (3) Push the document(No matter empty or NOT) back into the heap
		push_heap(myDocPostingsTupleHeap, myDocPostingsTupleHeap + NUM_OF_DOCS_IN_GOV2_COLLECTION - numOfDocumentsOut, HeapTupleOptimizedCompare());

	}
	cout << "Done." << endl;
	// option for test:
	// cout << "sort heap begins...";
	// sort_heap(myDocPostingsTupleHeap, myDocPostingsTupleHeap+NUM_OF_DOCS_IN_GOV2_COLLECTION, HeapTupleCompare());
	// cout << "Done." << endl;
	step6_ending_time = processing_time.GetElapsedTime();
	GetDefaultLogger().Log("Time Elapsed: " + Stringify( step6_ending_time - step6_starting_time ), false);


	// DEBUG Part
	// The following logic is JUST a check
	// WHOLE check for the data structure: myDocPostingsTupleHeap
	vector<uint32_t> docIDNeededToCheckVector;
	// docIDNeededToCheckVector.push_back(2797972);
	// docIDNeededToCheckVector.push_back(2178851);
	// docIDNeededToCheckVector.push_back(23669277);
	// docIDNeededToCheckVector.push_back(24630232);
	// docIDNeededToCheckVector.push_back(2432615);
	// docIDNeededToCheckVector.push_back(335001);
	// docIDNeededToCheckVector.push_back(6353238);
	// docIDNeededToCheckVector.push_back(2334304);
	// docIDNeededToCheckVector.push_back(19263777);
	// docIDNeededToCheckVector.push_back(21616474);

	// NO USE since 2013/10/25 afternoon by Wei at school
	// unsigned long currentTotalNumOfPostingsAccumulatedAfter = 0;
	// because following the tradition, the docID is represented as uint32_t
	for(uint32_t i = 0; i < NUM_OF_DOCS_IN_GOV2_COLLECTION; i++){
		// NO USE since 2013/10/25 afternoon by Wei at school
		// currentTotalNumOfPostingsAccumulatedAfter += myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled;

		bool processedFlag = false;
		for(vector<uint32_t>::iterator it = docIDNeededToCheckVector.begin(); it != docIDNeededToCheckVector.end(); it++){
			if ( *it == myDocPostingsTupleHeap[i].first.current_doc_id){
				cout << "myDocPostingsTupleHeap[i].first.current_doc_id:" << myDocPostingsTupleHeap[i].first.current_doc_id << endl;
				processedFlag = true;
				break;
			}
			else{
				// Just do NOTHING
			}
		}

		if(processedFlag){

			cout << "current_doc_id: " << myDocPostingsTupleHeap[i].first.current_doc_id << endl;
			cout << "all_Xdoc: " << myDocPostingsTupleHeap[i].first.all_Xdoc << endl;
			cout << "selected_Xdoc: " << myDocPostingsTupleHeap[i].first.selected_Xdoc << endl;
			cout << "current_beginning_index_of_the_posting_array: " << myDocPostingsTupleHeap[i].first.current_beginning_index_of_the_posting_array << endl;
			cout << "first.current_largest_value_of_the_posting_array: " << myDocPostingsTupleHeap[i].first.current_largest_value_of_the_posting_array << endl;
			cout << "current_size_of_the_posting_array_allocated: " << myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_allocated << endl;
			cout << "current_size_of_the_posting_array_filled: " << myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled << endl;
			cout << "term_id: " << myDocPostingsTupleHeap[i].second[0].term_id << endl;
			cout << "staticProbability: " << myDocPostingsTupleHeap[i].second[0].staticProbability << endl;
			cout << "term_id: " << myDocPostingsTupleHeap[i].second[1].term_id << endl;
			cout << "staticProbability: " << myDocPostingsTupleHeap[i].second[1].staticProbability << endl;
			cout << endl;
		}
	}

	cout << endl;
	// NO USE since 2013/10/25 afternoon by Wei at school
	// cout << "currentTotalNumOfPostingsAccumulatedAfter: " << currentTotalNumOfPostingsAccumulatedAfter << endl;
	// cout << "# of postings EXTRACTED: " << currentTotalNumOfPostingsAccumulatedInTheHeap - currentTotalNumOfPostingsAccumulatedAfter << endl;

	cout << "Processing Overall Statistics:" << endl;
	cout << "Checking Section:" << endl;
	unsigned long checkingTotalNumOfPostingsInHeap = 0;
	for(uint32_t i = 0; i < NUM_OF_DOCS_IN_GOV2_COLLECTION; i++){
		checkingTotalNumOfPostingsInHeap += myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled - myDocPostingsTupleHeap[i].first.current_beginning_index_of_the_posting_array;
		cout << "----->docID: " << i << endl;
		float numerator = myDocPostingsTupleHeap[i].first.current_beginning_index_of_the_posting_array;
		float denominator = myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled;
		float percentageOfPostingsPickedFromTheCurrentDocument = numerator / denominator;
		// cout << "myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled: " << denominator << endl;
		// cout << "myDocPostingsTupleHeap[i].first.current_beginning_index_of_the_posting_array: " << numerator << endl;
		cout << "----->%OfPostingsPicked: " << percentageOfPostingsPickedFromTheCurrentDocument << endl;
	}
	cout << "checkingTotalNumOfPostingsInHeap: " << checkingTotalNumOfPostingsInHeap << endl;
	cout << "totalNumOfPostingsAccumulatedInTheHeap: " << currentTotalNumOfPostingsAccumulatedInTheHeap << endl;
	cout << "NUM_OF_POSTINGS_ACTUALLY_POPPED: " << NUM_OF_POSTINGS_ACTUALLY_POPPED << endl;
	cout << "numOfPoppedPostingsInTheTOP10RelatedPostingSet: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
	// NOT yet tested on 2013/10/24 morning by Wei at school
	// cout << "# of top10 related postings: " << top10RelatedPostingsDict_.size() << endl;
	// cout << "% of top10 related postings being hit: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet / top10RelatedPostingsDict_.size() << endl;

	// cout << "Weight for a: " << a << endl;
	cout << "Weight for b: " << b << endl;
	cout << "postingsPopped1PercentDict_.size(): " << postingsPopped1PercentDict_.size() << endl;
	cout << "postingsPopped5PercentOfDict_.size(): " << postingsPopped5PercentDict_.size() << endl;
	cout << "postingsPopped10PercentDict_.size(): " << postingsPopped10PercentDict_.size() << endl;
	cout << "postingsPopped20PercentDict_.size(): " << postingsPopped20PercentDict_.size() << endl;
	cout << "postingsPopped30PercentDict_.size(): " << postingsPopped30PercentDict_.size() << endl;
	cout << "postingsPopped40PercentDict_.size(): " << postingsPopped40PercentDict_.size() << endl;
	cout << "postingsPopped50PercentDict_.size(): " << postingsPopped50PercentDict_.size() << endl;
	cout << "postingsPopped60PercentDict_.size(): " << postingsPopped60PercentDict_.size() << endl;
	cout << "postingsPopped70PercentDict_.size(): " << postingsPopped70PercentDict_.size() << endl;
	cout << "postingsPopped80PercentDict_.size(): " << postingsPopped80PercentDict_.size() << endl;
	cout << "postingsPopped90PercentDict_.size(): " << postingsPopped90PercentDict_.size() << endl;
	cout << "postingsPopped100PercentDict_.size(): " << postingsPopped100PercentDict_.size() << endl;
	cout << "outputFileName: " << outputFileName << endl;
	cout << "outputFileName2: " << outputFileName2 << endl;
	delete[] myDocPostingsTupleHeap;
	outputFileHandler.close();
	outputFileHandler2.close();
}


/*
void LayeredIndexGenerator::PrototypingOfThePostingOrientedUniformPruningMethod(bool debugFlag){
	cout << "The function LayeredIndexGenerator::PrototypingOfThePostingOrientedUniformPruningMethod(...) is called." << endl;
	// Weights Setting:
	const int BIG_NUMBER = 50000000;	// This BIG NUMBER will be assigned to the init value of the termID
	const float a = 0.9;	// Maybe need to dynamically changed for this parameter
	const float b = 0.1;	// Maybe need to dynamically changed for this parameter

	const int NUMBER_OF_TERMS_NEEDED_TO_PROCESS = 39000;	// a little larger than 38449
	int number_terms_processed = 0;
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kTermsWithLengthOfListAndClassLabelAndNumOfPostingsInEachPiecesFileName));
    cout << "inputFileName: " << inputFileName << endl;
    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	// Updated by Wei 2013/09/14 night at school
	// Let's init for a very big heap (like the returning results data structures in the query processing do)
	// total # of documents in gov2 dataset: 25205179 (25M)
	int NUM_OF_DOCS_IN_GOV2_COLLECTION = 25205179;
	docPostingsTuple* myDocPostingsTupleHeap = new docPostingsTuple[NUM_OF_DOCS_IN_GOV2_COLLECTION]; // Using a pointer here
	//init and allocate the memory NOW
	Timer processing_time;
	double step0_starting_time = processing_time.GetElapsedTime();
	double step0_ending_time = 0.0;
	cout << "step0: allocate the memory for the BIG heap" << endl;
	for(uint32_t i = 0; i < NUM_OF_DOCS_IN_GOV2_COLLECTION; i++){
		if(i % 1000000 == 0){
			cout << i << " docs inited." << endl;
		}
		docIDWithTupleCreatedDict_[i] = true;
		// init the MetaInfo_SET
		MetaInfo_SET meta_info_set; //initialization of the MetaInfo_SET for the a specific document.
		meta_info_set.current_doc_id = i; // Not Very Proper cause 0 actually represent a typical gov2 document
		meta_info_set.current_largest_value_of_the_posting_array = 0;
		meta_info_set.current_size_of_the_posting_array_filled = 0;
		meta_info_set.current_size_of_the_posting_array_allocated = 0;
		meta_info_set.current_beginning_index_of_the_posting_array = 0;
		meta_info_set.all_Xdoc = 0.0;
		meta_info_set.selected_Xdoc = 0.0;

		// init the index_doc_entry_buffer buffer
		int num_postings_in_current_doc = 0;
		if (docIDWithNumOfPostingsRecordedDict_.count(i) > 0){
			num_postings_in_current_doc = docIDWithNumOfPostingsRecordedDict_[i];
		}
		else{
			// Just a guess for the average # of postings in the doc. Let's try 250
			num_postings_in_current_doc = 2;
		}

		// maintain this piece of buffer and put it somewhere in the memory
		IndexDocEntry* index_doc_entry_buffer = new IndexDocEntry[num_postings_in_current_doc];


		// for DEBUG only: see how much memory has been allocated
		// Updated by Wei 2013/09/20 morning
		// cout << "i: " << i << endl;
		// cout << "num_postings_in_current_doc: " << num_postings_in_current_doc << endl;
		// cout << "sizeof(*index_doc_entry_buffer ): " << sizeof( *index_doc_entry_buffer ) << endl;
		// cout << "sizeof( index_doc_entry_buffer ): " << sizeof( index_doc_entry_buffer ) << endl;
		// exit(1);


		// init the value as well. Otherwise, the value will be just random.
		for(int index_doc_entry_offset = 0; index_doc_entry_offset < num_postings_in_current_doc; index_doc_entry_offset++){
			  IndexDocEntry& curr_index_doc_entry = index_doc_entry_buffer[index_doc_entry_offset];
			  assert(index_doc_entry_offset < num_postings_in_current_doc);
			  curr_index_doc_entry.term_id = BIG_NUMBER; // Not Very Proper cause 0 actually represent another term
			  curr_index_doc_entry.partialBM25 = 0.0;
			  curr_index_doc_entry.staticProbability = 0.0;
			  curr_index_doc_entry.valueToComputeANDStoreStaticPart1 = 0.0;
			  curr_index_doc_entry.valueToComputeANDStoreDynamicPart2 = 0.0;
			  // LOAD the P(t)
			  double first_factor_probability_value = 0.0;
			  // There is NO assignment for the first_factor_probability_value cause the term is NOT fixed
			  // Just be N/A for the term here
			  // first_factor_probability_value = freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[ N/A] ];
			  curr_index_doc_entry.valueToComputeANDStoreCombined = first_factor_probability_value * (a * curr_index_doc_entry.valueToComputeANDStoreStaticPart1 + b * curr_index_doc_entry.valueToComputeANDStoreDynamicPart2);
		}


		meta_info_set.current_size_of_the_posting_array_allocated = num_postings_in_current_doc;

		// still regard this as an array here
		myDocPostingsTupleHeap[ i ] = make_pair(meta_info_set, index_doc_entry_buffer);
	}
	step0_ending_time = processing_time.GetElapsedTime();
	GetDefaultLogger().Log("Time Elapsed: " + Stringify(step0_ending_time - step0_starting_time), false);

	double step1_2_3_starting_time = processing_time.GetElapsedTime();
	double step1_2_3_ending_time = 0.0;


	// Use NextTerm() to traverse the whole index's lexicon.
	while (index_->NextTerm()){
		string curr_term = string(index_->curr_term(), index_->curr_term_len());

		// Use the term '0' and '00' for DEBUG
		// Use the term 'so' and 'soalr' for DEBUG
		// the term "applebananapear" is NOT in the lexicon
		// Use the term '00wc' and '0197' for DEBUG
		// if (curr_term == "00wc" or curr_term == "0197"){
		if (curr_term == "soalr"){
		    // key: piece number
			// value: # of postings in the current piece
			map<int,int> currentTermPiecesInfoMap;
			string currentTermFromFile = "";
			string noUseWildCard = "";
			string classLabelInStringFormat = "";
			int classLabelInIntFormat = -1;
			string numOfPieceCurrentTermHasInStringFormat = "";
			int numOfPieceCurrentTermHasInIntFormat = -1;

			// cout << curr_term << endl;
			// get meta info from the file
			getline (inputfile,currentLine);
			boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );
		    iss >> currentTermFromFile;

		    if(curr_term != currentTermFromFile){
		    	cout << "curr_term: " << curr_term << endl;
		    	cout << "currentTermFromFile: " << currentTermFromFile << endl;
		    	exit(1);
		    }
		    iss >> noUseWildCard;
		    iss >> classLabelInStringFormat;
		    classLabelInIntFormat = atoi(classLabelInStringFormat.c_str());
		    iss >> numOfPieceCurrentTermHasInStringFormat;
		    numOfPieceCurrentTermHasInIntFormat = atoi(numOfPieceCurrentTermHasInStringFormat.c_str());
		    for(unsigned int i = 0; i< numOfPieceCurrentTermHasInIntFormat; i++){
		    	string currentPieceNumInStringFormat = "";
		    	string numOfPostingsInCurrentPieceInStringFormat = "";
		    	iss >> currentPieceNumInStringFormat;
		    	iss >> numOfPostingsInCurrentPieceInStringFormat;

		    	int currentPieceNumInIntFormat = atoi(currentPieceNumInStringFormat.c_str());
		    	int numOfPostingsInCurrentPieceInIntFormat = atoi(numOfPostingsInCurrentPieceInStringFormat.c_str());
		    	if (currentTermPiecesInfoMap.count(currentPieceNumInIntFormat) > 0){
		    		cout << "duplicated piece #." << endl;
		    		cout << "currentPieceNumInIntFormat: " << currentPieceNumInIntFormat << endl;
		    		cout << "numOfPostingsInCurrentPieceInIntFormat: " << numOfPostingsInCurrentPieceInIntFormat << endl;
		    		exit(1);
		    	}
		    	else{
		    		currentTermPiecesInfoMap[currentPieceNumInIntFormat] = numOfPostingsInCurrentPieceInIntFormat;
		    	}
		    }


		    // for DEBUG purpose
		    // cout << "currentTermPiecesInfoMap.size(): " << currentTermPiecesInfoMap.size() << endl;
		    // cout << "currentTermPiecesInfoMap[0]: " << currentTermPiecesInfoMap[0] << endl;
		    // cout << "currentTermPiecesInfoMap[17]: " << currentTermPiecesInfoMap[17] << endl;
		    // exit(1);



			Timer time_clock;  // Time how long it takes to process a term
			double processing_elapsed_time;
			number_terms_processed += 1;
			int num_docs_in_original_list = index_->curr_list_data()->num_docs();

			cout << "---># of terms processed: " << number_terms_processed << endl;
			cout << "--->Processing term: " << curr_term << endl;
			cout << "---># of postings in list: " << num_docs_in_original_list << endl;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			// Load the thing into the main memory cause I need to sort them
			IndexEntry* index_list_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int index_list_entry_offset = 0;

			cout << "--->sub_step1: load the postings...";
			while (index_->NextDocId())
			{

			  IndexEntry& curr_index_entry = index_list_entry_buffer[index_list_entry_offset];

			  assert(index_list_entry_offset < num_docs_in_original_list);

			  curr_index_entry.doc_id = index_->curr_doc_id();
			  curr_index_entry.frequency = index_->curr_list_data()->GetFreq();
			  curr_index_entry.staticProbability = 0.0;
			  curr_index_entry.partialBM25 = 0.0;
			  ++index_list_entry_offset;
			}  // No more postings in the list.
			cout << "--->Done." << endl;

			//current sorting Methods:
			//1: N/A
			//2: sort based on partial bm25 score
			//3: N/A
			//4: N/A

			// Need the average document length for computing BM25 scores.
			long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
			long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
			int average_doc_length = total_document_lengths / total_num_docs;

			// Actually, don't need this variable but just to satisfy the constructor's needs of DocIdScoreComparisonWei
			map<string,float> queryTermsProbabilityDistributionMap;
			DocIdScoreComparisonWei doc_id_score_comparatorWei(index_->index_reader()->document_map(), num_docs_in_original_list, average_doc_length, total_num_docs, 2, curr_term, queryTermsProbabilityDistributionMap);

			// standard sort function in c++.
			// Sort the index entry buffer based on certain score value
			cout << "--->sub_step2: sort by score in order to get the rank...";
			sort(index_list_entry_buffer, index_list_entry_buffer + index_list_entry_offset, doc_id_score_comparatorWei);
			cout << "--->Done." << endl;

			// Since now, the buffer has been sorted, the index position of the array is actually the value of the posting rank in list
			// cout << "Printing all <docID,score,value> pairs ordered by some score: " << curr_term << endl;

			// Updated by Wei 2013/09/14 afternoon at school
			// Updated by Wei 2013/09/13 afternoon at school
			cout << "--->sub_step3: (1)assign the probability based on the rank and the list length... (2)Update the corresponding heapTuple in the heap..." << endl;

			int currentAccumulatedRange = 0;
			float currentPostingItselfProbability = 0.0;
			int j = currentTermPiecesInfoMap.size()-1;
			currentAccumulatedRange += currentTermPiecesInfoMap[j];

			for (int i = 0; i < index_list_entry_offset; ++i) {
				// Step3_1 logic:
				// (1) I have the rank: i + 1 and this term, and I can give you back the probability NOW.
				int absoluteRankInList = i + 1;
				if(currentAccumulatedRange >= absoluteRankInList){
					// the current Posting Itself Probability = probablity of hitting that range / actual size of that range of that term
					currentPostingItselfProbability = classLabelWithPiecesMetaInfoDict_[classLabelInIntFormat][j] / currentTermPiecesInfoMap[j];
					// The value assigned here should be the probability of this posting made into TOP10
					index_list_entry_buffer[i].staticProbability = currentPostingItselfProbability;
				}
				else{
					// It is time to update the coverage.
					j -= 1;
					currentAccumulatedRange += currentTermPiecesInfoMap[j];
					// deal with this BORDER one
					// the current Posting Itself Probability = probablity of hitting that range / actual size of that range of that term
					currentPostingItselfProbability = classLabelWithPiecesMetaInfoDict_[classLabelInIntFormat][j] / currentTermPiecesInfoMap[j];
					// The value assigned here should be the probability of this posting made into TOP10
					index_list_entry_buffer[i].staticProbability = currentPostingItselfProbability;
				}


				// Step3_2 logic: Update the corresponding heapTuple in the heap
				// special DEBUG for a specific document
				int index_doc_entry_offset = myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.current_size_of_the_posting_array_filled;
				IndexDocEntry& curr_index_doc_entry = myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].second[index_doc_entry_offset];
				int num_of_postings_in_doc = myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.current_size_of_the_posting_array_allocated;
				// Updated by Wei 2013/09/20 night at school
				// maybe this assertion will be a problem :)
				assert(index_doc_entry_offset < num_of_postings_in_doc);
				// some ugly here, the following data structure are for classic posting data structure, NOT for this doc structure.
				// add the new postings at the end of corresponding doc list
				curr_index_doc_entry.term_id = termWithTermIDDict_[curr_term];
				index_list_entry_buffer[i].partialBM25 = doc_id_score_comparatorWei.score( index_list_entry_buffer[i] );
				curr_index_doc_entry.partialBM25 = index_list_entry_buffer[i].partialBM25;
				curr_index_doc_entry.staticProbability = index_list_entry_buffer[i].staticProbability;
				curr_index_doc_entry.valueToComputeANDStoreStaticPart1 = curr_index_doc_entry.staticProbability;	//This updates the static part of the formula
				// LOAD the P(t)
				double first_factor_probability_value = 0.0;
				first_factor_probability_value = freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[curr_term] ];
				curr_index_doc_entry.valueToComputeANDStoreCombined = first_factor_probability_value * ( a * curr_index_doc_entry.valueToComputeANDStoreStaticPart1 + b * curr_index_doc_entry.valueToComputeANDStoreDynamicPart2 );


				//if(index_list_entry_buffer[i].doc_id == 24284542){
					//cout << "DEBUG" << endl;
					//cout << "index_doc_entry_offset: " << index_doc_entry_offset << endl;
					//cout << "curr_index_entry.term_id: " << curr_index_entry.term_id << endl;
					//cout << "curr_index_entry.partialBM25: " << curr_index_entry.partialBM25 << endl;
					//cout << "curr_index_entry.valueToComputeANDStore: " << curr_index_entry.valueToComputeANDStore << endl;
					//cout << endl;
				//}

				myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.current_size_of_the_posting_array_filled += 1;
				myDocPostingsTupleHeap[index_list_entry_buffer[i].doc_id].first.all_Xdoc += freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[curr_term] ];
			}
			cout << "--->Done." << endl;
			delete[] index_list_entry_buffer;

			// If all the terms have been processed. Then we can exit the loop and NO NEED to continue to traverse the rest of the terms in lexicon
			if(number_terms_processed == NUMBER_OF_TERMS_NEEDED_TO_PROCESS){
				break;
			}
		}
		else{
			// Pass
		}

	}
	step1_2_3_ending_time = processing_time.GetElapsedTime();
	GetDefaultLogger().Log("Time Elapsed: " + Stringify( step1_2_3_ending_time - step1_2_3_starting_time ), false);


	double step4_starting_time = processing_time.GetElapsedTime();
	double step4_ending_time = 0.0;
	// Updated by Wei 2013/09/18 afternoon at school
	cout << "step4: Loop through the heap and Sort each array..." << endl;
	for(uint32_t i = 0; i < NUM_OF_DOCS_IN_GOV2_COLLECTION; i++){
		// (1) Sort every doc array
		sort(myDocPostingsTupleHeap[i].second, myDocPostingsTupleHeap[i].second + myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled, DocPostingCompare());

		// (2) Update the most promising posting's value to the meta_info tuple
		myDocPostingsTupleHeap[i].first.current_largest_value_of_the_posting_array = myDocPostingsTupleHeap[i].second[0].valueToComputeANDStoreCombined;
	}
	cout << "Done." << endl;
	step4_ending_time = processing_time.GetElapsedTime();
	GetDefaultLogger().Log("Time Elapsed: " + Stringify( step4_ending_time - step4_starting_time ), false);
	exit(1);

	// the assignment of the important value: currentTotalNumOfPostingsAccumulated and it is NOT just a check
	int currentTotalNumOfPostingsAccumulatedBefore = 0;
	for(uint32_t i = 0; i < NUM_OF_DOCS_IN_GOV2_COLLECTION; i++){
		currentTotalNumOfPostingsAccumulatedBefore += myDocPostingsTupleHeap[i].first.current_size_of_the_posting_array_filled;
	}
	cout << "currentTotalNumOfPostingsAccumulatedBefore: " << currentTotalNumOfPostingsAccumulatedBefore << endl;



	double step5_starting_time = processing_time.GetElapsedTime();
	double step5_ending_time = 0.0;
	cout << "step5: Make Heap and Prepare to Pop...";
	make_heap(myDocPostingsTupleHeap, myDocPostingsTupleHeap+NUM_OF_DOCS_IN_GOV2_COLLECTION, HeapTupleCompare());
	cout << "Done." << endl;
	// another option:
	// cout << "sort heap begins...";
	// sort_heap(myDocPostingsTupleHeap, myDocPostingsTupleHeap+NUM_OF_DOCS_IN_GOV2_COLLECTION, HeapTupleCompare());
	// cout << "Done." << endl;
	step5_ending_time = processing_time.GetElapsedTime();
	GetDefaultLogger().Log("Time Elapsed: " + Stringify( step5_ending_time - step5_starting_time ), false);

	double step6_starting_time = processing_time.GetElapsedTime();
	double step6_ending_time = 0.0;
	cout << "step6: Popping and Counting Process Begins..." << endl;
	unsigned int numOfPoppedPostingsInTheTOP10RelatedPostingSet = 0;
	// This variable maintains to be 0 when I do option1, but the variable will change when I do option2
	unsigned int numOfDocumentsOut = 0;
	unsigned int NUM_OF_POSTINGS_WANTED_TO_POP = 6451948010;
	unsigned int NUM_OF_POSTINGS_CAN_BE_POP = 0;
	unsigned int NUM_OF_POSTINGS_ACTUALLY_POPPED = 0;
	if (currentTotalNumOfPostingsAccumulatedBefore > NUM_OF_POSTINGS_WANTED_TO_POP){
		NUM_OF_POSTINGS_CAN_BE_POP = NUM_OF_POSTINGS_WANTED_TO_POP;
	}
	else{
		NUM_OF_POSTINGS_CAN_BE_POP = currentTotalNumOfPostingsAccumulatedBefore;
	}
	cout << "NUM_OF_POSTINGS_CAN_BE_POP: " << NUM_OF_POSTINGS_CAN_BE_POP << endl;
	cout << "j index" << " " << "current_largest_value_of_the_posting_array" << " " << "partialBM25" << " " << "doc_id" << " " << "term" << endl;
	for(unsigned j = 0; j < NUM_OF_POSTINGS_CAN_BE_POP; j++){
		string termInStringFormat = termIDWithTermDict_[ myDocPostingsTupleHeap[0].second->term_id ];
		stringstream ss;
		ss << myDocPostingsTupleHeap[0].first.current_doc_id;
		string docIDInStringFormat;
		ss >> docIDInStringFormat;

		string postingCurrentPoppingKeyInStringFormat = termInStringFormat + "_" + docIDInStringFormat;

		cout << "j: " << j << " " << myDocPostingsTupleHeap[0].first.current_largest_value_of_the_posting_array << " " << myDocPostingsTupleHeap[0].second[0].partialBM25 << " " << docIDInStringFormat << " " << termInStringFormat << endl;

		NUM_OF_POSTINGS_ACTUALLY_POPPED += 1;
		if ( top10RelatedPostingsDict_.count(postingCurrentPoppingKeyInStringFormat) > 0){
			numOfPoppedPostingsInTheTOP10RelatedPostingSet += 1;
		}
		else{
			// Just do NOT touch any of the important postings so far. :(
		}

		// Pop the largest element
		pop_heap( myDocPostingsTupleHeap, myDocPostingsTupleHeap + NUM_OF_DOCS_IN_GOV2_COLLECTION - numOfDocumentsOut, HeapTupleCompare());

		// implemented by Wei 2013/09/17 afternoon
		// reverse update to the the overall statistics
		// cout << "--->the score ADDED to the selected_Xdoc: " << myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[0].partialBM25 << endl;

		// CURRENT Version
		string tempTerm = termIDWithTermDict_[ myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[0].term_id ];
		float tempProbability = freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[tempTerm] ];
		myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.selected_Xdoc += tempProbability;

		// OLD Version
		// myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.selected_Xdoc += myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[0].partialBM25;
		myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_beginning_index_of_the_posting_array += 1;
		myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_size_of_the_posting_array_filled -= 1;


		// option1: still push back every empty document into the heap
		// sort
		// (1)Sort the related doc array
		// cout << "--->mark1" << endl;
		int beginningIndex = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].first.current_beginning_index_of_the_posting_array;
		int currentSizeOfPostingArrayBeingFilled = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].first.current_size_of_the_posting_array_filled;


		// cout << "--->beginningIndex: " << beginningIndex << endl;
		// cout << "--->currentSizeOfPostingArrayBeingFilled: " << currentSizeOfPostingArrayBeingFilled << endl;
		// cout << "--->numOfDocumentsOut:" << numOfDocumentsOut << endl;
		if(beginningIndex <= currentSizeOfPostingArrayBeingFilled){
			// first do a Reverse Update to ALL the rest of the postings
			// do this on 2013/09/17 afternoon
			// Here, should only Update the Part2, but NOT Part1(Part1 is static and never changed during the game)

			// cout << "The updating logic is called." << endl;
			// cout << "current_doc_id:" << myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_doc_id << endl;
			// exit(1);
			for(unsigned k = 0; k < myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.current_size_of_the_posting_array_filled; k++){

				float numerator = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.selected_Xdoc;
				float denominator = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].first.all_Xdoc;
				myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[beginningIndex].valueToComputeANDStoreDynamicPart2 = numerator / denominator;
				float tempPart1 = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[beginningIndex].valueToComputeANDStoreStaticPart1;
				float tempPart2 = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[beginningIndex].valueToComputeANDStoreDynamicPart2;
				uint32_t termIDInUint32_tFormat = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[beginningIndex].term_id;
				// LOAD the P(t)
				double first_factor_probability_value = 0.0;
				first_factor_probability_value = freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[ termIDWithTermDict_[termIDInUint32_tFormat] ] ];
				myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[beginningIndex].valueToComputeANDStoreCombined = first_factor_probability_value * (a * tempPart1 + b * tempPart2);
			}
		}
		else{
			// No need to update since beginningIndex > currentSizeOfPostingArrayBeingFilled
		}

		if (beginningIndex < currentSizeOfPostingArrayBeingFilled){
			// then sort the thing again
			sort(myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].second + beginningIndex, myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].second + currentSizeOfPostingArrayBeingFilled, DocPostingCompare());
		}
		else{
			// Just enjoy this journey
			// situation 1: beginningIndex == currentSizeOfPostingArrayBeingFilled : One posting left and you do NOT need to sort
			// situation 2: beginningIndex > currentSizeOfPostingArrayBeingFilled : NO posting left for this document
		}

		// (2)Update the most promising posting's value to the meta_info tuple
		// cout << "--->mark2" << endl;
		if (beginningIndex < currentSizeOfPostingArrayBeingFilled){
			myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].first.current_largest_value_of_the_posting_array = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[beginningIndex].valueToComputeANDStoreCombined;
		}
		else if(beginningIndex == currentSizeOfPostingArrayBeingFilled){
			myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].first.current_largest_value_of_the_posting_array = myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION - 1 - numOfDocumentsOut].second[beginningIndex].valueToComputeANDStoreCombined;
		}
		else{
			myDocPostingsTupleHeap[NUM_OF_DOCS_IN_GOV2_COLLECTION-1-numOfDocumentsOut].first.current_largest_value_of_the_posting_array = 0.0;
		}

		// (3) Push the document(No matter empty or NOT) back into the heap
		// cout << "--->mark3" << endl;
		push_heap(myDocPostingsTupleHeap, myDocPostingsTupleHeap + NUM_OF_DOCS_IN_GOV2_COLLECTION - numOfDocumentsOut, HeapTupleCompare());
	}
	cout << "Done." << endl;
	step6_ending_time = processing_time.GetElapsedTime();
	GetDefaultLogger().Log("Time Elapsed: " + Stringify( step6_ending_time - step6_starting_time ), false);

	cout << "Processing Overall Statistics:" << endl;
	cout << "NUM_OF_POSTINGS_ACTUALLY_POPPED: " << NUM_OF_POSTINGS_ACTUALLY_POPPED << endl;
	cout << "numOfPoppedPostingsInTheTOP10RelatedPostingSet: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet << endl;
	cout << "# of top10 related postings: " << top10RelatedPostingsDict_.size() << endl;
	cout << "% of top10 related postings being hit: " << numOfPoppedPostingsInTheTOP10RelatedPostingSet / top10RelatedPostingsDict_.size() << endl;
	cout << "Weight for a: " << a << endl;
	cout << "Weight for b: " << b << endl;
	delete[] myDocPostingsTupleHeap;
}
*/

void LayeredIndexGenerator::StorePostingRankInListToExternalIndex(map<string,int> & queryTermsDictForDebugging, bool debugFlag){
	  cout << "The function LayeredIndexGenerator::StorePostingRankInListToExternalIndex(...) is called." << endl;
	  bool store_value_in_external_index_flag = true; // This flag should ALWAYS be true under this function
	  int NUMBER_OF_TERMS_NEEDED_TO_PROCESS = 0; // init the variable


	  if (queryTermsDictForDebugging.size() == 0){
		  NUMBER_OF_TERMS_NEEDED_TO_PROCESS = 40000000;	// 40M
	  }
	  else{
		  NUMBER_OF_TERMS_NEEDED_TO_PROCESS = queryTermsDictForDebugging.size();
	  }
	  int number_terms_processed = 0;

	  // Use NextTerm() to traverse the whole index's lexicon.
	  while (index_->NextTerm())
	  {

		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  // for debug
		  // cout << "curr_term:" << curr_term << endl;

		  bool matchFlag = false;
		  // lazy delete operation
		  // float percentageForTheTerm = -1.0;
		  // int pruningMethodCodeForTheTerm = -1;

		  if (queryTermsDictForDebugging.size() != 0){
			  if (queryTermsDictForDebugging.count(curr_term) > 0){
				  matchFlag = true; // set the matchFlag to be true
			  }
			  else{
				  // JUST pass this term
			  }
		  }
		  else{
			  matchFlag = true;	// In production mode, every term in the lexicon needs to be processed
		  }



		  if (matchFlag)
		  {
			Timer time_clock;  // Time how long it takes to process a term
			double processing_elapsed_time;
			cout << "Processing term: " << curr_term << endl;
			number_terms_processed += 1;

			int num_docs_in_original_list = index_->curr_list_data()->num_docs();
			cout << "# of postings in list: " << num_docs_in_original_list << endl;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			// Load the thing into the main memory cause I need to sort them
			IndexEntry* index_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int index_entry_offset = 0;

			while (index_->NextDocId())
			{

			  IndexEntry& curr_index_entry = index_entry_buffer[index_entry_offset];

			  assert(index_entry_offset < num_docs_in_original_list);

			  curr_index_entry.doc_id = index_->curr_doc_id();
			  curr_index_entry.frequency = index_->curr_list_data()->GetFreq();
			  curr_index_entry.staticProbability = 0;

			  ++index_entry_offset;
			}  // No more postings in the list.

			// Need the average document length for computing BM25 scores.
			// This meta info(including total_document_lengths, total_num_docs and average_doc_length) maybe wrong due to some shortcut I make.(Wei: 2012/06/28)
			long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
			long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
			int average_doc_length = total_document_lengths / total_num_docs;

			//current sorting Methods:
			//1: N/A
			//2: sorted based on partial bm25 score
			//3: N/A
			//4: N/A

			// Updated on 2013/09/10 afternoon. Basically, I don't need this for this task but just satisfy the constructor's needs of DocIdScoreComparisonWei
			map<string,float> queryTermsProbabilityDistributionMap;
			DocIdScoreComparisonWei doc_id_score_comparatorWei(index_->index_reader()->document_map(), num_docs_in_original_list, average_doc_length, total_num_docs, 2, curr_term, queryTermsProbabilityDistributionMap);

			if(debugFlag){
				// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
				// The results are sorted by docIDs
				cout << "Printing all <docID,score,value> pairs ordered by docID: " << curr_term << endl;
				for (int i = 0; i < index_entry_offset; ++i) {
					cout << "docID:" << index_entry_buffer[i].doc_id << "	score:" << doc_id_score_comparatorWei.score(index_entry_buffer[i]) << "	value:" << index_entry_buffer[i].staticProbability << endl;
				}
			}

			// standard sort function in c++.
			// Sort the index entry buffer based on certain score value
			cout << "step1: sort by score...";
			sort(index_entry_buffer, index_entry_buffer + index_entry_offset, doc_id_score_comparatorWei);
			cout << "Done" << endl;

			// Since now, the buffer has been sorted, the index position of the array is actually the value of the posting rank in list
			// cout << "Printing all <docID,score,value> pairs ordered by some score: " << curr_term << endl;

			cout << "step2: assign the values...";
			for (int i = 0; i < index_entry_offset; ++i) {
				index_entry_buffer[i].staticProbability = i + 1;	// assign the value of the posting rank in list here. Updated by Wei 2013/09/10 afternoon by Wei at school
				if(debugFlag){
					// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
					// The results are sorted by some score.
					cout << "docID:" << index_entry_buffer[i].doc_id << "	score:" << doc_id_score_comparatorWei.score(index_entry_buffer[i]) << "	value:" << index_entry_buffer[i].staticProbability << endl;
				}
			}
			cout << "Done" << endl;

			int total_num_postings = index_entry_offset;
			cout << "step3: sort by docID...";
			// after the value of posting rank in list has been stored for each posting, then let's sort the postings back by docIDs
			sort(index_entry_buffer, index_entry_buffer + total_num_postings, IndexEntryDocIdComparison());
			cout << "Done" << endl;

			cout << "step4: dump info to disk...";
			// This dumps a single inverted list into an index(ONLY main index / both the main and external index depend on the flag you set for the function.).
			DumpToIndexForPruningProjectWei(doc_id_score_comparatorWei, index_entry_buffer, total_num_postings, index_->curr_term(),
						  index_->curr_term_len(), store_value_in_external_index_flag, num_docs_in_original_list);
			// ????
			index_builder_->FinalizeLayer(0.0);  // Is this statement useful in my case here? Need to call this before writing out the next layer.
			cout << "Done" << endl;

			delete[] index_entry_buffer;

			processing_elapsed_time = time_clock.GetElapsedTime();
			cout << "...ALL Steps Done" << endl;
			cout << "# of terms processed: " << number_terms_processed << endl;
			cout << "processing_elapsed_time: " << processing_elapsed_time << endl;
			cout << endl;

			// If all the terms have been processed. Then we can exit the loop and NO NEED to continue to traverse the rest of the terms in lexicon
			if(number_terms_processed == NUMBER_OF_TERMS_NEEDED_TO_PROCESS){
				break;
			}

		  }
	  }
	  index_builder_->Finalize();

	  // Until the end of the index construction period, the meta data will be written.
	  // Note updated by Wei 2013/09/12 afternoon at school
	  if(store_value_in_external_index_flag){
		  WriteMetaFile(output_index_files_.meta_info_filename(),11);
	  }
	  else{
		  WriteMetaFile(output_index_files_.meta_info_filename(),12);
	  }

}

//The output index will ONLY contain the query terms you entered.
void LayeredIndexGenerator::CreatePrunedIndexForMultipleTerms(vector<string> & queryTerms, bool debugFlag,bool store_computed_score_into_external_index_flag,float percentageToKeepOfTheWholeIndex,int pruningMethodCodeOfTheWholeIndex,map<string,float> &queryTermsProbabilityDistributionMap) {
	  vector<float> percentageToKeepForEachQueryTerm;
	  vector<int> pruningMethodCodeForEachQueryTerm;

	  const int NUMBER_QUERY_TERMS = queryTerms.size();
	  int number_query_terms_already_deal_with = 0;

	  cout << "Configuration process begin:" << endl;
	  //TODO: need exception handler.
	  for(unsigned int tempCounter = 0; tempCounter < queryTerms.size(); tempCounter++){

		  cout << "Configuration for query term:" << queryTerms[tempCounter] << endl;
		  cout << "Percentage of postings to keep[0,100]:";

		  float percentageToKeep = percentageToKeepOfTheWholeIndex;
		  cout << "You entered:" << percentageToKeep << endl;
		  percentageToKeepForEachQueryTerm.push_back(percentageToKeep);

		  cout << "Select the sorting method" << endl;
		  cout << "1: Not sorted at all" << endl;
		  cout << "2: sorted based on partial bm25 score" << endl;
		  cout << "3: sorted based on a specific machine learning model(Mostly Logistic Regression, Updated by Wei 20130223)" << endl;
		  cout << "Enter the function value[1,2,3]:";

		  int pruningMethodCode = pruningMethodCodeOfTheWholeIndex;
		  cout << "You entered:" << pruningMethodCode << endl;
		  pruningMethodCodeForEachQueryTerm.push_back(pruningMethodCode);
	  }
	  cout << "Configuration process end." << endl;
	  cout << endl << endl;

	  // Use NextTerm() to traverse the whole index's lexicon.
	  while (index_->NextTerm())
	  {

		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  bool matchFlag = false;
		  float percentageForTheTerm = -1.0;
		  int pruningMethodCodeForTheTerm = -1;

		  for(unsigned int tempCounter = 0; tempCounter < queryTerms.size(); tempCounter ++){
			  if (curr_term == queryTerms[tempCounter]){
				  matchFlag = true;
				  percentageForTheTerm = percentageToKeepForEachQueryTerm[tempCounter];
				  pruningMethodCodeForTheTerm = pruningMethodCodeForEachQueryTerm[tempCounter];
				  cout << "pruning query term:" << queryTerms[tempCounter] << " percentage to keep:" << percentageForTheTerm << " pruning method code:" << pruningMethodCodeForTheTerm << endl;
				  break;
			  }
		  }

		  // The following are the actual pruning process.
		  if (matchFlag)
		  {


			number_query_terms_already_deal_with += 1;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			int num_docs_in_original_list = index_->curr_list_data()->num_docs();
			if(debugFlag){
				cout << "number of docs in orginal inverted index:" << num_docs_in_original_list << endl;
			}


			IndexEntry* index_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int index_entry_offset = 0;

			while (index_->NextDocId())
			{

			  IndexEntry& curr_index_entry = index_entry_buffer[index_entry_offset];

			  assert(index_entry_offset < num_docs_in_original_list);

			  curr_index_entry.doc_id = index_->curr_doc_id();
			  curr_index_entry.frequency = index_->curr_list_data()->GetFreq();

			  ++index_entry_offset;
			}  // No more postings in the list.

			// Need the average document length for computing BM25 scores.
			// This meta info(including total_document_lengths, total_num_docs and average_doc_length) maybe wrong due to some shortcut I make.(Wei: 2012/06/28)
			long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
			long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
			int average_doc_length = total_document_lengths / total_num_docs;

			//current sorting Methods:
			//1: not sorted at all(but based on docID)
			//2: sorted based on partial bm25 score
			//3: sorted based on a specific machine learning model (This machine learning model will assign the posting a score, either keep the posting OR remove it, more and more clear now)
			//4: empty

			DocIdScoreComparisonWei doc_id_score_comparatorWei(index_->index_reader()->document_map(), num_docs_in_original_list, average_doc_length, total_num_docs, pruningMethodCodeForTheTerm,curr_term,queryTermsProbabilityDistributionMap);

			// for test
			// doc_id_score_comparatorWei.get_posting_rank_in_list("000sites","24188054");

			if(debugFlag){
				// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
				// The results are sorted by docIDs
				cout << "Printing all (docID,score) pairs ordered by docID: " << curr_term << endl;
				for (int i = 0; i < index_entry_offset; ++i) {
					cout << "docID: " << index_entry_buffer[i].doc_id << ", score: " << doc_id_score_comparatorWei.score(index_entry_buffer[i]) << endl;
				}
			}

			//standard sort function in c++.
			//It is sorted based on some kinds of score computation mechanism
			sort(index_entry_buffer, index_entry_buffer + index_entry_offset, doc_id_score_comparatorWei);

			if(debugFlag){
				// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
				// The results are sorted by some score.
				cout << "Printing all (docID,score) pairs ordered by some kind of score: " << curr_term << endl;
				for (int i = 0; i < index_entry_offset; ++i) {
					cout << "docID: " << index_entry_buffer[i].doc_id << ", score: " << doc_id_score_comparatorWei.score(index_entry_buffer[i]) << endl;
				}
			}


			  int total_num_postings = index_entry_offset;
			  int num_postings_left = total_num_postings;
			  int num_postings_curr_layer;



			  if (num_postings_left <= 0) {
				break;
			  }

			  // Init set up for the 2 parameters: num_postings_curr_layer and num_postings_left. Also notice that this 2 parameters can be adjusted according to the following reasons Roman addressed.

			  // option1
			  // current version
			  // If percentageForTheTerm is represented like 0.9, 0.8 or sth, then please use the folloiwng statement
			  num_postings_curr_layer = percentageForTheTerm * total_num_postings;

			  // option2
			  // old version
			  // If percentageForTheTerm is represented like 90, 100, then please use the folloiwng statement
			  // num_postings_curr_layer = (percentageForTheTerm / 100.0) * total_num_postings;
			  num_postings_left -= num_postings_curr_layer;


			  /*
			  // Reason to do the adjustment for the 2 parameters (1)num_postings_curr_layer and (2)num_postings_left:
			  // We want to split so that scores in each layer are unique (i.e. the lowest scoring posting in one layer does not have the same score
			  // as the highest scoring posting in the next layer).
			  // This causes problems in early termination algorithms if not taken into account (the top-k documents returned will not be identical).

			  // The solution to this problem is the following:
			  // If the last posting of the current layer has the same score as the next n postings (which are in the next layer(s)),
			  // we move those same scoring postings into the current layer.
			  // If the next layer(s) now contain 0 documents, we push postings from layers further down into the upper layers.

			  float curr_layer_threshold, next_layer_threshold;
			  do {
				// If this is (1)the only layer or (2)no posting left. Then nothing needs to be done.
				if (0 == (num_layers_ - 1) || num_postings_left <= 0)
				  cout << "just out test" << endl;
				  break;

				int curr_layer_threshold_idx = total_num_postings - num_postings_left - num_postings_curr_layer;
				int next_layer_threshold_idx = total_num_postings - num_postings_left;
				curr_layer_threshold = doc_id_score_comparatorWei.score(index_entry_buffer[curr_layer_threshold_idx]);
				next_layer_threshold = doc_id_score_comparatorWei.score(index_entry_buffer[next_layer_threshold_idx]);
				// The current layer threshold should always be greater than the next layer threshold.
				// We add postings to the current layer until the above is true.
				if (curr_layer_threshold <= next_layer_threshold) {
				  ++num_postings_curr_layer;
				  --num_postings_left;
				} else {
				  break;
				}
			  } while (true);
			  */

			  assert(num_postings_curr_layer > 0);

			  cout << num_postings_curr_layer << " of postings are kept." << endl;
			  cout << num_postings_left << " of postings have been pruned." << endl;

			  // Here we do the actual splitting of the layers.
			  // TODO: Instead of resorting the whole buffer, it might be faster to sort only the 2nd layer by docID, and then do a merge of the layers.
			  //       This would require a different DumpToIndex() method that is more incremental, because we can't do an in-place merge of the whole array
			  //       (it would require an additional array).
			  int curr_layer_start = total_num_postings - num_postings_left - num_postings_curr_layer;
			  int layer_start = overlapping_layers_ ? 0 : curr_layer_start;
			  // The upperbound score for the whole list.
			  float score_threshold = doc_id_score_comparatorWei.score(index_entry_buffer[curr_layer_start]);

			  // sort it again and order by docIDs
			  sort(index_entry_buffer + layer_start, index_entry_buffer + curr_layer_start + num_postings_curr_layer, IndexEntryDocIdComparison());

			  /*
			  //  The arguments for the following function are:
				  DocIdScoreComparisonWei& doc_id_score_comparator,
				  IndexEntry* index_entries,
				  int num_index_entries,
				  const char* curr_term,
				  int curr_term_len,
				  bool store_computed_score_into_external_index_flag,
				  int specialNumberValue
			  */
			  // cout << "num_index_entries:" << curr_layer_start + num_postings_curr_layer - layer_start << endl;

			  // This dumps a single inverted list into an index(ONLY main index / both the main and external index depend on the flag you set for the function.).
			  DumpToIndexForPruningProjectWei(doc_id_score_comparatorWei, index_entry_buffer + layer_start, curr_layer_start + num_postings_curr_layer - layer_start, index_->curr_term(),
						  index_->curr_term_len(), store_computed_score_into_external_index_flag, num_docs_in_original_list);

			  index_builder_->FinalizeLayer(score_threshold);  // Need to call this before writing out the next layer.


			  delete[] index_entry_buffer;

			  cout << "...Done" << endl;
			  cout << endl;

			  // If all the query terms have been processed. Then the system can exit and no need to continue to traverse the whole index's lexicon.
			  if(number_query_terms_already_deal_with == NUMBER_QUERY_TERMS){
				  break;
			  }

			  // python modules ends here.
			  Py_Finalize();
		  }
	  }
	  index_builder_->Finalize();

	  if(store_computed_score_into_external_index_flag){
		  WriteMetaFile(output_index_files_.meta_info_filename(),11);
	  }
	  else{
		  WriteMetaFile(output_index_files_.meta_info_filename(),12);
	  }
}

string LayeredIndexGenerator::make_the_value_into_string_format_with_fixed_mode(float originalValue, int precisionNumber, bool debugFlag){
	  string originalValueInStringFormat = "";
	  stringstream ss (stringstream::in | stringstream::out);

	  // option1
	  // Updated by Wei 2013/02/15, this score will be strictly corresponding to the original version of irtk
	  // ss << fixed;
	  ss << originalValue << setprecision(precisionNumber);

	  // option2
	  // ss << originalValue;

	  originalValueInStringFormat = ss.str();
	  if (debugFlag){
		  cout << "test:" << originalValueInStringFormat << endl;
	  }
	  return originalValueInStringFormat;
}

string LayeredIndexGenerator::make_the_value_into_string_format(float originalValue){
	  string originalValueInStringFormat = "";
	  stringstream ss (stringstream::in | stringstream::out);
	  ss << originalValue;
	  originalValueInStringFormat = ss.str();
	  return originalValueInStringFormat;
}

string LayeredIndexGenerator::make_the_value_into_string_format(int originalValue){
	  string originalValueInStringFormat = "";
	  stringstream ss (stringstream::in | stringstream::out);
	  ss << originalValue;
	  originalValueInStringFormat = ss.str();
	  return originalValueInStringFormat;
}

void LayeredIndexGenerator::BuildForwardIndex(vector<string> & queryTerms,bool debugFlag,bool store_computed_score_into_external_index_flag){
	  // This is actually the (name,value) pair
	  // The name is actually the float point score (precision to the decimal 6 digit)
	  // The value is actually the histogram counting

	  // Updated by 2013/02/09 I just want to put all the postings in plain text format into the big map file. Many (name, value) pairs with it.
	  // the value will be a set of postings with the partial BM25 in it. To replace the original freq in the doc
	  // step1: this stage is for me to load all the inverted index data into the main memory
	  // Use NextTerm() to traverse the whole index's lexicon.

	  while (index_->NextTerm())
	  {
		  int num_of_postings_being_cut = 0;
		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  bool matchFlag = false;
		  for(unsigned int tempCounter = 0; tempCounter < queryTerms.size(); tempCounter ++){
			  if (curr_term == queryTerms[tempCounter]){
				  matchFlag = true;
				  break;
			  }
		  }

		  // updated by Wei 2013/02/10.
		  // The inverted index is NOT pruned one by one. But need to load them all into the main memory.
		  if (matchFlag)
		  {
			cout << "query term: " << curr_term << endl;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			int num_docs_in_original_list = index_->curr_list_data()->num_docs();

			cout << "# of postings:" << num_docs_in_original_list << endl;

			// deal with the data structures mapForStoringCurrTermIndexEntryBufferOfEachTerm and vectoreForStoringAuxPruningTermEntry

			// production procedure
			// begins *****
			// put the postings into the memory
			IndexEntry* curr_term_index_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int curr_term_index_entry_offset = 0;

			while (index_->NextDocId())
			{
			  // current version
			  IndexEntry& curr_posting_index_entry = curr_term_index_entry_buffer[curr_term_index_entry_offset];
			  assert(curr_term_index_entry_offset < num_docs_in_original_list);
			  curr_posting_index_entry.doc_id = index_->curr_doc_id();
			  curr_posting_index_entry.frequency = index_->curr_list_data()->GetFreq();
			  ++curr_term_index_entry_offset;
			}  // No more postings in the list.
		  }
	  }
}

//The output index will ONLY contain the query terms you entered.
void LayeredIndexGenerator::CutBasedOnUniversalImportanceScore(vector<string> & queryTerms, bool debugFlag, bool store_computed_score_into_external_index_flag, float percentageToKeepOfTheWholeIndex, int pruningMethodCodeOfTheWholeIndex) {

	  string systemLogFileBaseName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kSystemLogFileName));
	  string systemLogFileFinalName = systemLogFileBaseName + "-" + make_the_value_into_string_format(percentageToKeepOfTheWholeIndex) + "-" + make_the_value_into_string_format(pruningMethodCodeOfTheWholeIndex);
	  ofstream systemLogFileHandler;
	  systemLogFileHandler.open(systemLogFileFinalName.c_str());
	  systemLogFileHandler << "term" << " #OfPostingsInTheOriginalList" << " #OfPostingsPruned" << " #OfPostingsLeft" << endl;

	  string systemLogCompletelyPrunedTermsFileBaseName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kSystemLogFileNameCompletelyPrunedTerms));
	  string systemLogCompletelyPrunedTermsFileFinalName = systemLogCompletelyPrunedTermsFileBaseName + "-" + make_the_value_into_string_format(percentageToKeepOfTheWholeIndex) + "-" + make_the_value_into_string_format(pruningMethodCodeOfTheWholeIndex);
	  ofstream systemLogCompletelyPrunedTermsFileHandler;
	  systemLogCompletelyPrunedTermsFileHandler.open(systemLogCompletelyPrunedTermsFileFinalName.c_str());
	  systemLogCompletelyPrunedTermsFileHandler << "completely_being_pruned_terms" << endl;

	  // Let's try the threshold 3.70066 for the term: soalr
	  float universalCuttingThreshold = 0.0;

	  // manually do it.
	  universalCuttingThreshold = 0.0; // for percentageToKeepOfTheWholeIndex == 1.0
	  // universalCuttingThreshold = 0.394423; // for percentageToKeepOfTheWholeIndex == 0.9
	  // universalCuttingThreshold = 0.614933; // for percentageToKeepOfTheWholeIndex == 0.8
	  // universalCuttingThreshold = 0.807624; // for percentageToKeepOfTheWholeIndex == 0.7
	  // universalCuttingThreshold = 1.04028; // for percentageToKeepOfTheWholeIndex == 0.6
	  // universalCuttingThreshold = 1.28548; // for percentageToKeepOfTheWholeIndex == 0.5
	  // universalCuttingThreshold = 1.55904; // for percentageToKeepOfTheWholeIndex == 0.4
	  // universalCuttingThreshold = 1.88018; // for percentageToKeepOfTheWholeIndex == 0.3
	  // universalCuttingThreshold = 2.30847; // for percentageToKeepOfTheWholeIndex == 0.2
	  // universalCuttingThreshold = 3.00667; // for percentageToKeepOfTheWholeIndex == 0.1
	  // universalCuttingThreshold = 3.70066; // for percentageToKeepOfTheWholeIndex == 0.05
	  // universalCuttingThreshold = 5.4158; // for percentageToKeepOfTheWholeIndex == 0.01

	  // currently, it is NOT working and I do NOT know why.
	  /*
	  if (percentageToKeepOfTheWholeIndex == 1.0){
		  universalCuttingThreshold = 0.0;
	  }
	  else if (percentageToKeepOfTheWholeIndex == 0.9){
		  universalCuttingThreshold = 0.394423;
	  }
	  else if (percentageToKeepOfTheWholeIndex == 0.8){
		  universalCuttingThreshold = 0.614933;
	  }
	  else if (percentageToKeepOfTheWholeIndex == 0.7){
		  universalCuttingThreshold = 0.807624;
	  }
	  else if (percentageToKeepOfTheWholeIndex == 0.6){
		  universalCuttingThreshold = 1.04028;
	  }
	  else if (percentageToKeepOfTheWholeIndex == 0.5){
		  universalCuttingThreshold = 1.28548;
	  }
	  else if (percentageToKeepOfTheWholeIndex == 0.4){
		  universalCuttingThreshold = 1.55904;
	  }
	  else if (percentageToKeepOfTheWholeIndex == 0.3){
		  universalCuttingThreshold = 1.88018;
	  }
	  else if (percentageToKeepOfTheWholeIndex == 0.2){
		  universalCuttingThreshold = 2.30847;
	  }
	  else if (percentageToKeepOfTheWholeIndex == 0.1){
		  universalCuttingThreshold = 3.00667;
	  }
	  else if (percentageToKeepOfTheWholeIndex == 0.05){
		  universalCuttingThreshold = 3.70066;
	  }
	  else if (percentageToKeepOfTheWholeIndex == 0.01){
		  universalCuttingThreshold = 5.4158;
	  }
	  */




	  cout << "percentageToKeepOfTheWholeIndex: " << percentageToKeepOfTheWholeIndex << endl;
	  cout << "universalCuttingThreshold:" << universalCuttingThreshold << endl;

	  // currently existing sorting methods
	  // 1: Not sorted at all
	  // 2: sorted based on partial bm25 score (largest to smallest)
	  // 3: sorted based on a specific machine learning model (Updated 20130118 by Wei)
	  // 4: sorted based on partial bm25 score (smallest to largest) (Updated 20130213 by Wei)

	  // This is actually the (name,value) pair
	  // The name is actually the float point score (precision to the decimal 6 digit)
	  // The value is actually the histogram counting

	  const int NUMBER_QUERY_TERMS = queryTerms.size();
	  int number_query_terms_already_deal_with = 0;

	  // Updated by 2013/02/09 I just want to put all the postings in plain text format into the big map file. Many (name, value) pairs with it.
	  // the value will be a set of postings with the partial BM25 in it. To replace the original freq in the doc
	  // step1: this stage is for me to load all the inverted index data into the main memory
	  // Use NextTerm() to traverse the whole index's lexicon.

	  while (index_->NextTerm())
	  {
		  int num_of_postings_being_cut = 0;
		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  bool matchFlag = false;
		  for(unsigned int tempCounter = 0; tempCounter < queryTerms.size(); tempCounter ++){
			  if (curr_term == queryTerms[tempCounter]){
				  matchFlag = true;
				  break;
			  }
		  }

		  // updated by Wei 2013/02/10.
		  // The inverted index is NOT pruned one by one. But need to load them all into the main memory.
		  if (matchFlag)
		  {
			cout << "query term: " << curr_term << endl;
			number_query_terms_already_deal_with += 1;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			int num_docs_in_original_list = index_->curr_list_data()->num_docs();

			cout << "# of postings:" << num_docs_in_original_list << endl;

			// deal with the data structures mapForStoringCurrTermIndexEntryBufferOfEachTerm and vectoreForStoringAuxPruningTermEntry

			// production procedure
			// begins *****
			// put the postings into the memory
			IndexEntry* curr_term_index_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int curr_term_index_entry_offset = 0;

			while (index_->NextDocId())
			{
			  // current version
			  IndexEntry& curr_posting_index_entry = curr_term_index_entry_buffer[curr_term_index_entry_offset];
			  assert(curr_term_index_entry_offset < num_docs_in_original_list);
			  curr_posting_index_entry.doc_id = index_->curr_doc_id();
			  curr_posting_index_entry.frequency = index_->curr_list_data()->GetFreq();
			  ++curr_term_index_entry_offset;

			}  // No more postings in the list.

			// Need the average document length for computing BM25 scores.
			// This meta info(including total_document_lengths, total_num_docs and average_doc_length) maybe wrong due to some shortcut I make.(Wei: 2012/06/28)
			long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
			long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
			int average_doc_length = total_document_lengths / total_num_docs;

			//current sorting Methods:
			//1: not sorted at all(but based on docID)
			//2: sorted based on partial bm25 score
			//3: sorted based on a specific machine learning model (This machine learning model will assign the posting a score, either keep the posting OR remove it, more and more clear now)
			//4: empty

			// This is empty
			map<string,float> queryTermsProbabilityDistributionMap;

			DocIdScoreComparisonWei doc_id_score_comparatorWei(index_->index_reader()->document_map(), num_docs_in_original_list, average_doc_length, total_num_docs, pruningMethodCodeOfTheWholeIndex,curr_term,queryTermsProbabilityDistributionMap);

			if(debugFlag){
				// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
				// The results are sorted by docIDs
				cout << "Printing all (docID,score) pairs ordered by docID: " << curr_term << endl;
				for (int i = 0; i < curr_term_index_entry_offset; ++i) {
					float currentPostingScore = doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[i]);
					cout << "docID: " << curr_term_index_entry_buffer[i].doc_id << ", score: " << currentPostingScore << endl;

				}
			}

			//standard sort function in c++.
			//It is sorted based on some kinds of score computation mechanism
			sort(curr_term_index_entry_buffer, curr_term_index_entry_buffer + curr_term_index_entry_offset, doc_id_score_comparatorWei);

			if(debugFlag){
				// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
				// The results are sorted by some score.
				cout << "Printing all (docID,score) pairs ordered by some kind of score: " << curr_term << endl;
				for (int i = 0; i < curr_term_index_entry_offset; ++i) {
					float currentPostingScore = doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[i]);
					cout << "docID: " << curr_term_index_entry_buffer[i].doc_id << ", score: " << currentPostingScore << endl;
					if (currentPostingScore <= universalCuttingThreshold){
						num_of_postings_being_cut += 1;
					}
				}
			}

			for (int i = 0; i < curr_term_index_entry_offset; ++i) {
				float currentPostingScore = doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[i]);
				if (currentPostingScore <= universalCuttingThreshold){
					num_of_postings_being_cut += 1;
				}
			}

			int total_num_postings = curr_term_index_entry_offset;
			int num_postings_left = num_of_postings_being_cut;
			int num_postings_curr_layer = total_num_postings - num_of_postings_being_cut;
			cout << "num_of_postings_being_cut:" << num_of_postings_being_cut << endl;
			cout << "num_postings_curr_layer:" << num_postings_curr_layer << endl;



			cout << num_postings_curr_layer << " of postings are kept." << endl;
			cout << num_postings_left << " of postings have been pruned." << endl;

	  		if(num_postings_curr_layer == 0){
	  			// All the postings have been pruned for this term, so this term will NOT appear in the new inverted index.
	  			systemLogCompletelyPrunedTermsFileHandler << curr_term << endl;
	  		}
	  		else{

	  			assert(num_postings_curr_layer > 0);

				float score_threshold = doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[0]);

				// sort it again and order by docIDs
				sort(curr_term_index_entry_buffer, curr_term_index_entry_buffer + num_postings_curr_layer, IndexEntryDocIdComparison());

				// This dumps a single inverted list into an index(ONLY main index / both the main and external index depend on the flag you set for the function.).
				DumpToIndexForPruningProjectWei(doc_id_score_comparatorWei, curr_term_index_entry_buffer, num_postings_curr_layer, index_->curr_term(),
							  index_->curr_term_len(), store_computed_score_into_external_index_flag, num_docs_in_original_list);

				index_builder_->FinalizeLayer(score_threshold);  // Need to call this before writing out the next layer.

				systemLogFileHandler << curr_term << " " << num_docs_in_original_list << " " << num_postings_left << " " << num_postings_curr_layer << endl;

				delete[] curr_term_index_entry_buffer; // delete and free the buffer we just used

				cout << "...Done" << endl;
				cout << endl;

				// python modules ends here.
				Py_Finalize();
	  		}

		    // If all the query terms have been processed. Then the system can exit and no need to continue to traverse the whole index's lexicon.
		    if(number_query_terms_already_deal_with == NUMBER_QUERY_TERMS){
				  break;
		    }
		  }
	  }

	  // Updated by Wei 2013/02/16
	  // I assume that no matter how the threshold has been set to very high, there are always postings left here for the pruned inverted index
	  // cout << "test1" << endl;

	  index_builder_->Finalize();

	  // cout << "test2" << endl;

	  if(store_computed_score_into_external_index_flag){
		  WriteMetaFile(output_index_files_.meta_info_filename(),11);
	  }
	  else{
		  WriteMetaFile(output_index_files_.meta_info_filename(),12);
	  }

	  systemLogFileHandler.close();
	  systemLogCompletelyPrunedTermsFileHandler.close();
}

void LayeredIndexGenerator::ConvertingOLD32BitLexiconToNEW64BitLexicon(){
	cout << "LayeredIndexGenerator::ConvertingOLD32BitLexiconToNEW64BitLexicon() begins..." << endl;
	int numOfTermsInLexicon = 0;
	while (index_->NextTerm())
	{
		numOfTermsInLexicon += 1;
		if (numOfTermsInLexicon % 1000000 == 0){
			cout << "numOfTermsInLexicon:" << numOfTermsInLexicon << endl;
		}
		// string curr_term = string(index_->curr_term(), index_->curr_term_len());
		// cout << curr_term << endl;
	}
	cout << "numOfTermsInLexicon:" << numOfTermsInLexicon << endl;
	cout << "LayeredIndexGenerator::ConvertingOLD32BitLexiconToNEW64BitLexicon() ends." << endl;
}

void LayeredIndexGenerator::OutputingEachDocumentWithTheirNumOfPostingsInIndex(){
	  cout << "LayeredIndexGenerator::OutputingEachDocumentWithTheirNumOfPostingsInIndex() begins..." << endl;

	  string outputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kTrecIDDocIDNumOfPostingsRecordedInIndexFileName));
	  ofstream outputFileHandler;
	  outputFileHandler.open(outputFileName.c_str());

	  const int NUM_OF_TERMS = selected_terms_map_.size();
	  int number_terms_already_DONE = 0;

	  // Need the average document length for computing BM25 scores.
	  // This meta info(including total_document_lengths, total_num_docs and average_doc_length) maybe wrong due to some shortcut I make.(Wei: 2012/06/28)
	  long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
	  long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
	  int average_doc_length = total_document_lengths / total_num_docs;

	  // Use NextTerm() to traverse the whole index's lexicon.
	  while (index_->NextTerm())
	  {
		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  // option1:
		  /*
		  bool matchFlag = false;
		  if(selected_terms_map_.count(curr_term) > 0){
			  matchFlag = true;
			  cout << "ready to process term: " << curr_term << endl;
		  }
		  */

		  // option2:
		  bool matchFlag = true;

		  if (matchFlag)
		  {
			cout << "processing term: " << curr_term << " begins..."<< endl;
			number_terms_already_DONE += 1;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			int num_docs_in_original_list = index_->curr_list_data()->num_docs();

			cout << "# of postings in original inverted index: " << num_docs_in_original_list << endl;

			// put the postings into the memory
			IndexEntry* curr_term_index_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int curr_term_index_entry_offset = 0;


			while (index_->NextDocId())
			{
			  // current version
			  IndexEntry& curr_posting_index_entry = curr_term_index_entry_buffer[curr_term_index_entry_offset];
			  assert(curr_term_index_entry_offset < num_docs_in_original_list);
			  curr_posting_index_entry.doc_id = index_->curr_doc_id();
			  curr_posting_index_entry.frequency = index_->curr_list_data()->GetFreq();


			  // do sth and need to fill here. After the bath
			  if (docIDs_with_num_of_postings_map_.count(curr_posting_index_entry.doc_id) > 0){
				  docIDs_with_num_of_postings_map_[curr_posting_index_entry.doc_id] += 1;
			  }
			  else{
				  docIDs_with_num_of_postings_map_[curr_posting_index_entry.doc_id] = 1;
			  }

			  ++curr_term_index_entry_offset;

			}  // No more postings in the list.



			delete[] curr_term_index_entry_buffer; // delete and free the buffer we just used (important)

			cout << "docIDs_with_num_of_postings_map_.size():" << docIDs_with_num_of_postings_map_.size() << endl;
			cout << "processing term: " << curr_term << " ends."<< endl;
			cout << endl;

			// option1:
			/*
		    // If all the query terms have been processed. Then the system can exit and no need to continue to traverse the whole index's lexicon.
		    if(number_terms_already_DONE == NUM_OF_TERMS){
			 	  break;
		    }
		    */

		  }
	  }

	  map<uint32_t,int>::iterator iter; // use iter to traversing the map<uint32_t,int> data structure
	  for (iter = docIDs_with_num_of_postings_map_.begin(); iter != docIDs_with_num_of_postings_map_.end(); iter++) {
		  outputFileHandler << (*iter).first << " " << (*iter).second << endl;
	  }

	  outputFileHandler.close();
	  cout << "outputFileName: " << outputFileName << endl;
	  cout << "LayeredIndexGenerator::OutputingEachDocumentWithTheirNumOfPostingsInIndex() ends." << endl;
}

void LayeredIndexGenerator::OutputASetOfDocumentsNeededToBeParsedGivenASetOfPostingsAsInput(){
	  cout << "Program Begins..." << endl;
	  map<uint32_t,int> localDocumentsMap;

	  // The output file1
	  string outputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kGov2DocumentNeededToBeFurtherProcessFileName));
	  ofstream outputFileHandler;
	  outputFileHandler.open(outputFileName.c_str());

	  // The output file2
	  string outputFileName2 = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kRandomlySelectedPostingsWithTheirCorrespondingDocIDsFileName));
	  ofstream outputFileHandler2;
	  outputFileHandler2.open(outputFileName2.c_str());

	  unsigned long totalNumOfPostingsProcessed = 0;

	  // Use NextTerm() to traverse the whole index's lexicon.
	  while (index_->NextTerm())
	  {
		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  bool matchFlag = false;
		  if(term_with_selected_postings_map_.count(curr_term) > 0){
			  matchFlag = true;
			  cout << "ready to process term: " << curr_term << endl;
		  }


		  if (matchFlag)
		  {
			cout << "processing term: " << curr_term << " begins..."<< endl;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			int num_docs_in_original_list = index_->curr_list_data()->num_docs();
			cout << "# of postings in original inverted index: " << num_docs_in_original_list << endl;

			// put the postings into the memory
			IndexEntry* curr_term_index_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int curr_term_index_entry_offset = 0;

			while (index_->NextDocId())
			{
			  // current version
			  IndexEntry& curr_posting_index_entry = curr_term_index_entry_buffer[curr_term_index_entry_offset];
			  assert(curr_term_index_entry_offset < num_docs_in_original_list);
			  curr_posting_index_entry.doc_id = index_->curr_doc_id();
			  curr_posting_index_entry.frequency = index_->curr_list_data()->GetFreq();
			  ++curr_term_index_entry_offset;
			}  // No more postings in the list.

			// map<string,map<int,string> > term_with_selected_postings_map_;
		    map<int, string>::iterator iter; // use iter to traversing the map<int,string> data structure
		    for (iter = term_with_selected_postings_map_[curr_term].begin(); iter != term_with_selected_postings_map_[curr_term].end(); iter++) {
		    	uint32_t currentDocID = curr_term_index_entry_buffer[ (*iter).first ].doc_id;
		    	string currentTrecID = index_->index_reader()->document_map().GetDocumentNumber(currentDocID);
		    	if (localDocumentsMap.count(currentDocID) > 0 ){
		    		// Do NOTHING
		    	}
		    	else{
		    		localDocumentsMap[currentDocID] = 1;
		    		outputFileHandler << currentTrecID << endl;
		    	}
		    	outputFileHandler2 << curr_term << " " << (*iter).first << " " << currentDocID << " " << currentTrecID << endl;
		    }
		    cout << "# of postings been selected and recorded: " << term_with_selected_postings_map_[curr_term].size() << endl;
		    totalNumOfPostingsProcessed += term_with_selected_postings_map_[curr_term].size();
			delete[] curr_term_index_entry_buffer; // delete and free the buffer we just used (important)

			cout << "processing term: " << curr_term << " ends."<< endl;
			cout << endl;
		  }
	  }
	  outputFileHandler.close();
	  outputFileHandler2.close();
	  cout << "totalNumOfPostingsProcessed: " << totalNumOfPostingsProcessed << endl;
	  cout << "Program Ends." << endl;
}

void LayeredIndexGenerator::ProduceProbabilitiesForRandomlySelectedPostingsBaseline(){
	  cout << "LayeredIndexGenerator::ProduceProbabilitiesForRandomlySelectedPostingsBaseline() called." << endl;
	  // currently existing sorting methods & values
	  // 1: Not sorted at all
	  // 2: sorted based on partial bm25 score (largest to smallest)
	  // 3: sorted based on a specific machine learning model (Updated 20130118 by Wei)
	  // 4: sorted based on partial bm25 score (smallest to largest) (Updated 20130213 by Wei)
	  // 5: sorted based on a current machine learned model (Updated 20130805 night by Wei)
	  // 6: maybe useful in sorting

	  // The output file
	  string outputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kRandomlySelectedPostingListOutputFileName));
	  ofstream outputFileHandler;
	  outputFileHandler.open(outputFileName.c_str());

	  const int NUM_OF_TERMS = term_with_selected_postings_map_.size();
	  int number_terms_already_DONE = 0;

	  // Need the average document length for computing BM25 scores.
	  // This meta info(including total_document_lengths, total_num_docs and average_doc_length) maybe wrong due to some shortcut I make.(Wei: 2012/06/28)
	  long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
	  long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
	  int average_doc_length = total_document_lengths / total_num_docs;

	  // Use NextTerm() to traverse the whole index's lexicon.
	  while (index_->NextTerm())
	  {
		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  bool matchFlag = false;
		  if(term_with_selected_postings_map_.count(curr_term) > 0){
			  matchFlag = true;
			  cout << "ready to process term: " << curr_term << endl;
		  }


		  if (matchFlag)
		  {
			cout << "processing term: " << curr_term << " begins..."<< endl;
			number_terms_already_DONE += 1;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			int num_docs_in_original_list = index_->curr_list_data()->num_docs();

			cout << "# of postings in original inverted index: " << num_docs_in_original_list << endl;

			// put the postings into the memory
			IndexEntry* curr_term_index_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int curr_term_index_entry_offset = 0;

			// cout << "copy in(fast)" << endl;
			while (index_->NextDocId())
			{
			  // current version
			  IndexEntry& curr_posting_index_entry = curr_term_index_entry_buffer[curr_term_index_entry_offset];
			  assert(curr_term_index_entry_offset < num_docs_in_original_list);
			  curr_posting_index_entry.doc_id = index_->curr_doc_id();
			  curr_posting_index_entry.frequency = index_->curr_list_data()->GetFreq();
			  ++curr_term_index_entry_offset;

			}  // No more postings in the list.
			// cout << "copy out" << endl;


			// init our scoring component variable called: doc_id_score_comparatorWei
			// Current version since 2013/08/30 afternoon by Wei at school
			DocIdScoreComparisonWei doc_id_score_comparatorWei(index_->index_reader()->document_map(), num_docs_in_original_list, average_doc_length, total_num_docs, 6, curr_term, freq_first_factor_probability_map_, terms_with_corresponding_species_belonging_to_map_, query_length_probability_map_, class_label_with_lower_bounds_map_,class_label_with_probability_map_);


			// sub-step1:
			if (query_terms_length_of_the_inverted_index_map_.count(curr_term) > 0){
				doc_id_score_comparatorWei.valueOfCurrentPostingLengthOfTheInvertedList_ = query_terms_length_of_the_inverted_index_map_[curr_term];
			}
			else{
				doc_id_score_comparatorWei.valueOfCurrentPostingLengthOfTheInvertedList_ = 0.0;
			}

			// sub-step2:
			if(query_terms_term_freq_in_collection_map_.count(curr_term) > 0){
				doc_id_score_comparatorWei.valueOfCurrentPostingTermFreqInCollection_ = query_terms_term_freq_in_collection_map_[curr_term];
			}
			else{
				doc_id_score_comparatorWei.valueOfCurrentPostingTermFreqInCollection_ = 0.0;
			}

			// sub-step3:
			if (query_terms_term_freq_in_queries_map_.count(curr_term) > 0){
				doc_id_score_comparatorWei.valueOfcurrentPostingTermFreqInQueries_ = query_terms_term_freq_in_queries_map_[curr_term];
			}
			else{
				doc_id_score_comparatorWei.valueOfcurrentPostingTermFreqInQueries_ = 0.0;
			}


			// map<string,map<int,string> > term_with_selected_postings_map_;
		    map<int, string>::iterator iter; // use iter to traversing the map<int,string> data structure
		    for (iter = term_with_selected_postings_map_[curr_term].begin(); iter != term_with_selected_postings_map_[curr_term].end(); iter++) {
		    	stringstream ss;
		    	ss << curr_term_index_entry_buffer[ (*iter).first ].doc_id;
		    	string currentDocIDInStringFormat;
		    	ss >> currentDocIDInStringFormat;

		    	doc_id_score_comparatorWei.XDocValue_ = docID_With_Xdoc_Value_goodTurning_map_[ currentDocIDInStringFormat ];

		    	float partialBM25Score = doc_id_score_comparatorWei.score( curr_term_index_entry_buffer[ (*iter).first ] );

		    	float currentPostingValue1 = doc_id_score_comparatorWei.get_1_FactorProbabilityOriginalValue();
				float currentPostingValue2 = 0.0; // doc_id_score_comparatorWei.get_2_FactorProbabilityOriginalValue();
				float currentPostingValue3 = 0.0; // doc_id_score_comparatorWei.get_3_FactorProbabilityOriginalValue();
				float currentPostingValue4 = doc_id_score_comparatorWei.get_2_3_FactorProbabilitiesCombinedOriginalValueBaseline();
				float currentPostingValue5 = 0.0; // doc_id_score_comparatorWei.get_1_3_FactorProbabilitiesCombinedOriginalValue();
				float currentPostingValue6 = doc_id_score_comparatorWei.get_1_2_3_FactorProbabilitiesCombinedOriginalValueBaseline();

				/*
				// for debug ONLY
				cout << "debug begins..." << endl;
				cout << "curr_term: " << curr_term << endl;
				cout << "curr_term_index_entry_buffer[ (*iter).first ].doc_id: " << curr_term_index_entry_buffer[ (*iter).first ].doc_id << endl;
				cout << "partialBM25Score: " << partialBM25Score << endl;
				cout << "currentPostingValue1: " << currentPostingValue1 << endl;
				cout << "currentPostingValue2: " << currentPostingValue2 << endl;
				cout << "currentPostingValue3: " << currentPostingValue3 << endl;
				cout << "currentPostingValue4: " << currentPostingValue4 << endl;
				cout << "currentPostingValue5: " << currentPostingValue5 << endl;
				cout << "currentPostingValue6: " << currentPostingValue6 << endl;
				cout << "debug ends." << endl;
				cout << endl;
				*/

				outputFileHandler << curr_term << " " << (*iter).first << " " << currentDocIDInStringFormat << " "<< partialBM25Score << " " << currentPostingValue1 << " " << currentPostingValue2 << " " << currentPostingValue3 << " " << currentPostingValue4 << " " << currentPostingValue5 << " " << currentPostingValue6 << endl;

		    }
		    cout << "# of postings been selected and recorded: " << term_with_selected_postings_map_[curr_term].size() << endl;

			delete[] curr_term_index_entry_buffer; // delete and free the buffer we just used (important)

			cout << "processing term: " << curr_term << " ends."<< endl;
			cout << endl;

		    // If all the query terms have been processed. Then the system can exit and no need to continue to traverse the whole index's lexicon.
		    if(number_terms_already_DONE == NUM_OF_TERMS){
				  break;
		    }

		  }
	  }
	  outputFileHandler.close();
	  cout << "outputFileName: " << outputFileName << endl;
}


void LayeredIndexGenerator::ProduceProbabilitiesForRandomlySelectedPostings(){
	  cout << "LayeredIndexGenerator::ProduceProbabilitiesForRandomlySelectedPostings() called." << endl;
	  // currently existing sorting methods & values
	  // 1: Not sorted at all
	  // 2: sorted based on partial bm25 score (largest to smallest)
	  // 3: sorted based on a specific machine learning model (Updated 20130118 by Wei)
	  // 4: sorted based on partial bm25 score (smallest to largest) (Updated 20130213 by Wei)
	  // 5: sorted based on a current machine learned model (Updated 20130805 night by Wei)

	  // The output file
	  string outputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kRandomlySelectedPostingListOutputFileName));
	  ofstream outputFileHandler;
	  outputFileHandler.open(outputFileName.c_str());

	  const int NUM_OF_TERMS = term_with_selected_postings_map_.size();
	  int number_terms_already_DONE = 0;

	  // Need the average document length for computing BM25 scores.
	  // This meta info(including total_document_lengths, total_num_docs and average_doc_length) maybe wrong due to some shortcut I make.(Wei: 2012/06/28)
	  long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
	  long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
	  int average_doc_length = total_document_lengths / total_num_docs;

	  // Use NextTerm() to traverse the whole index's lexicon.
	  while (index_->NextTerm())
	  {
		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  bool matchFlag = false;
		  if(term_with_selected_postings_map_.count(curr_term) > 0){
			  matchFlag = true;
			  cout << "ready to process term: " << curr_term << endl;
		  }


		  if (matchFlag)
		  {
			cout << "processing term: " << curr_term << " begins..."<< endl;
			number_terms_already_DONE += 1;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			int num_docs_in_original_list = index_->curr_list_data()->num_docs();

			cout << "# of postings in original inverted index: " << num_docs_in_original_list << endl;

			// put the postings into the memory
			IndexEntry* curr_term_index_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int curr_term_index_entry_offset = 0;

			// cout << "copy in(fast)" << endl;
			while (index_->NextDocId())
			{
			  // current version
			  IndexEntry& curr_posting_index_entry = curr_term_index_entry_buffer[curr_term_index_entry_offset];
			  assert(curr_term_index_entry_offset < num_docs_in_original_list);
			  curr_posting_index_entry.doc_id = index_->curr_doc_id();
			  curr_posting_index_entry.frequency = index_->curr_list_data()->GetFreq();
			  ++curr_term_index_entry_offset;

			}  // No more postings in the list.
			// cout << "copy out" << endl;


			// init our scoring component variable called: doc_id_score_comparatorWei
			// Current version since 2013/08/07 morning by Wei at school
			DocIdScoreComparisonWei doc_id_score_comparatorWei(index_->index_reader()->document_map(), num_docs_in_original_list, average_doc_length, total_num_docs, 5, curr_term, freq_first_factor_probability_map_, terms_with_corresponding_species_belonging_to_map_, query_length_probability_map_, class_label_with_lower_bounds_map_, class_label_with_probability_map_);


			// sub-step1:
			if (query_terms_length_of_the_inverted_index_map_.count(curr_term) > 0){
				doc_id_score_comparatorWei.valueOfCurrentPostingLengthOfTheInvertedList_ = query_terms_length_of_the_inverted_index_map_[curr_term];
			}
			else{
				doc_id_score_comparatorWei.valueOfCurrentPostingLengthOfTheInvertedList_ = 0.0;
			}

			// sub-step2:
			if(query_terms_term_freq_in_collection_map_.count(curr_term) > 0){
				doc_id_score_comparatorWei.valueOfCurrentPostingTermFreqInCollection_ = query_terms_term_freq_in_collection_map_[curr_term];
			}
			else{
				doc_id_score_comparatorWei.valueOfCurrentPostingTermFreqInCollection_ = 0.0;
			}

			// sub-step3:
			if (query_terms_term_freq_in_queries_map_.count(curr_term) > 0){
				doc_id_score_comparatorWei.valueOfcurrentPostingTermFreqInQueries_ = query_terms_term_freq_in_queries_map_[curr_term];
			}
			else{
				doc_id_score_comparatorWei.valueOfcurrentPostingTermFreqInQueries_ = 0.0;
			}


			// map<string,map<int,string> > term_with_selected_postings_map_;
		    map<int, string>::iterator iter; // use iter to traversing the map<int,string> data structure
		    for (iter = term_with_selected_postings_map_[curr_term].begin(); iter != term_with_selected_postings_map_[curr_term].end(); iter++) {
		    	stringstream ss;
		    	ss << curr_term_index_entry_buffer[ (*iter).first ].doc_id;
		    	string currentDocIDInStringFormat;
		    	ss >> currentDocIDInStringFormat;

		    	doc_id_score_comparatorWei.XDocValue_ = docID_With_Xdoc_Value_goodTurning_map_[ currentDocIDInStringFormat ];

		    	float partialBM25Score = doc_id_score_comparatorWei.score( curr_term_index_entry_buffer[ (*iter).first ] );

		    	float currentPostingValue1 = doc_id_score_comparatorWei.get_1_FactorProbabilityOriginalValue();
				float currentPostingValue2 = doc_id_score_comparatorWei.get_2_FactorProbabilityOriginalValue();
				float currentPostingValue3 = doc_id_score_comparatorWei.get_3_FactorProbabilityOriginalValue();
				float currentPostingValue4 = doc_id_score_comparatorWei.get_2_3_FactorProbabilitiesCombinedOriginalValue();
				float currentPostingValue5 = doc_id_score_comparatorWei.get_1_3_FactorProbabilitiesCombinedOriginalValue();
				float currentPostingValue6 = doc_id_score_comparatorWei.get_1_2_3_FactorProbabilitiesCombinedOriginalValue();

				// for debug ONLY
				//cout << "debug begins..." << endl;
				//cout << "curr_term: " << curr_term << endl;
				//cout << "curr_term_index_entry_buffer[ (*iter).first ].doc_id: " << curr_term_index_entry_buffer[ (*iter).first ].doc_id << endl;
				//cout << "partialBM25Score: " << partialBM25Score << endl;
				//cout << "currentPostingValue1: " << currentPostingValue1 << endl;
				//cout << "currentPostingValue2: " << currentPostingValue2 << endl;
				//cout << "currentPostingValue3: " << currentPostingValue3 << endl;
				//cout << "currentPostingValue4: " << currentPostingValue4 << endl;
				//cout << "currentPostingValue5: " << currentPostingValue5 << endl;
				//cout << "currentPostingValue6: " << currentPostingValue6 << endl;
				//cout << "debug ends." << endl;
				//cout << endl;
				outputFileHandler << curr_term << " " << (*iter).first << " " << currentDocIDInStringFormat << " "<< partialBM25Score << " " << currentPostingValue1 << " " << currentPostingValue2 << " " << currentPostingValue3 << " " << currentPostingValue4 << " " << currentPostingValue5 << " " << currentPostingValue6 << endl;

		    }
		    cout << "# of postings been selected and recorded: " << term_with_selected_postings_map_[curr_term].size() << endl;

			delete[] curr_term_index_entry_buffer; // delete and free the buffer we just used (important)

			cout << "processing term: " << curr_term << " ends."<< endl;
			cout << endl;

		    // If all the query terms have been processed. Then the system can exit and no need to continue to traverse the whole index's lexicon.
		    if(number_terms_already_DONE == NUM_OF_TERMS){
				  break;
		    }

		  }
	  }
	  outputFileHandler.close();
	  cout << "outputFileName: " << outputFileName << endl;
}

void LayeredIndexGenerator::CreateHistogram(vector<string> & terms, bool debugFlag, int sortingMethodCodeForTheTerm) {
	  cout << "temporary NOT using during the construction of the score assigner" << endl;

	  /*
	  // currently existing sorting methods & values
	  // 1: Not sorted at all
	  // 2: sorted based on partial bm25 score (largest to smallest)
	  // 3: sorted based on a specific machine learning model (Updated 20130118 by Wei)
	  // 4: sorted based on partial bm25 score (smallest to largest) (Updated 20130213 by Wei)
	  // 5: sorted based on a current machine learned model (Updated 20130805 night by Wei)

	  // current version
	  // map for (key,value) pair
	  // Key: the float point score/probability (let the c++ system itself to turn the precision)
	  // Value: freq count for the value
	  map<float, int> histogramMap1;
	  map<float, int> histogramMap2;
	  map<float, int> histogramMap3;
	  map<float, int> histogramMap4;
	  map<float, int> histogramMap5;
	  map<float, int> histogramMap6;

	  const int NUM_OF_TERMS = terms.size();
	  int number_terms_already_DONE = 0;

	  // step1: this stage is for me to load all the inverted index data into the main memory

	  // Use NextTerm() to traverse the whole index's lexicon.
	  while (index_->NextTerm())
	  {
		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  bool matchFlag = false;
		  for(unsigned int tempCounter = 0; tempCounter < terms.size(); tempCounter ++){
			  if (curr_term == terms[tempCounter]){
				  matchFlag = true;
				  break;
			  }
		  }


		  // The inverted index is NOT pruned one by one. But need to load them all into the main memory.
		  if (matchFlag)
		  {
			cout << "processing query term: " << curr_term << endl;
			number_terms_already_DONE += 1;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			int num_docs_in_original_list = index_->curr_list_data()->num_docs();

			if(debugFlag){
				cout << "# of docs in original inverted index:" << num_docs_in_original_list << endl;
			}

			// put the postings into the memory
			IndexEntry* curr_term_index_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int curr_term_index_entry_offset = 0;

			while (index_->NextDocId())
			{
			  // current version
			  IndexEntry& curr_posting_index_entry = curr_term_index_entry_buffer[curr_term_index_entry_offset];
			  assert(curr_term_index_entry_offset < num_docs_in_original_list);
			  curr_posting_index_entry.doc_id = index_->curr_doc_id();
			  curr_posting_index_entry.frequency = index_->curr_list_data()->GetFreq();
			  ++curr_term_index_entry_offset;

			}  // No more postings in the list.

			// Need the average document length for computing BM25 scores.
			// This meta info(including total_document_lengths, total_num_docs and average_doc_length) maybe wrong due to some shortcut I make.(Wei: 2012/06/28)
			long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
			long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
			int average_doc_length = total_document_lengths / total_num_docs;

			// init our scoring component variable called: doc_id_score_comparatorWei

			// Current version since 2013/08/04
			DocIdScoreComparisonWei doc_id_score_comparatorWei(index_->index_reader()->document_map(), num_docs_in_original_list, average_doc_length, total_num_docs, sortingMethodCodeForTheTerm, curr_term, freq_first_factor_probability_map_, terms_with_corresponding_species_belonging_to_map_, query_length_probability_map_, docID_With_Xdoc_Value_goldStandarded_map_, docID_With_Xdoc_Value_1D_map_, docID_With_Xdoc_Value_2D_map_, docID_With_Xdoc_Value_goodTurning_map_);

			// OLD version and has been dumpped since 2013/08/04 afternoon by Wei
			// DocIdScoreComparisonWei doc_id_score_comparatorWei(index_->index_reader()->document_map(), num_docs_in_original_list, average_doc_length, total_num_docs, sortingMethodCodeForTheTerm,curr_term,queryTermsTrueProbabilityDistributionMap,queryTerms1DProbabilityDistributionMap,queryTerms2DProbabilityDistributionMap,queryTermsGoodTuringProbabilityDistributionMap);

			// sub-step1:
			if (query_terms_length_of_the_inverted_index_map_.count(curr_term) > 0){
				doc_id_score_comparatorWei.valueOfCurrentPostingLengthOfTheInvertedList_ = query_terms_length_of_the_inverted_index_map_[curr_term];
			}
			else{
				doc_id_score_comparatorWei.valueOfCurrentPostingLengthOfTheInvertedList_ = 0.0;
			}

			// sub-step2:
			if(query_terms_term_freq_in_collection_map_.count(curr_term) > 0){
				doc_id_score_comparatorWei.valueOfCurrentPostingTermFreqInCollection_ = query_terms_term_freq_in_collection_map_[curr_term];
			}
			else{
				doc_id_score_comparatorWei.valueOfCurrentPostingTermFreqInCollection_ = 0.0;
			}

			// sub-step3:
			if (query_terms_term_freq_in_queries_map_.count(curr_term) > 0){
				doc_id_score_comparatorWei.valueOfcurrentPostingTermFreqInQueries = query_terms_term_freq_in_queries_map_[curr_term];
			}
			else{
				doc_id_score_comparatorWei.valueOfcurrentPostingTermFreqInQueries = 0.0;
			}

			for (int i = 0; i < curr_term_index_entry_offset; ++i) {
				// load the values into the dict
				float partialBM25Score = doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[i]);
				float currentPostingValue1 = doc_id_score_comparatorWei.get_1_FactorProbabilityOriginalValue();
				float currentPostingValue2 = doc_id_score_comparatorWei.get_2_FactorProbabilityOriginalValue();
				float currentPostingValue3 = doc_id_score_comparatorWei.get_3_FactorProbabilityOriginalValue();
				float currentPostingValue4 = doc_id_score_comparatorWei.get_2_3_FactorProbabilitiesCombinedOriginalValue();
				float currentPostingValue5 = doc_id_score_comparatorWei.get_1_3_FactorProbabilitiesCombinedOriginalValue();
				float currentPostingValue6 = doc_id_score_comparatorWei.get_1_2_3_FactorProbabilitiesCombinedOriginalValue();


				// for debug ONLY
				// cout << "debug begins..." << endl;
				// cout << "partialBM25Score: " << partialBM25Score << endl;
				// cout << "currentPostingValue1: " << currentPostingValue1 << endl;
				// cout << "currentPostingValue2: " << currentPostingValue2 << endl;
				// cout << "currentPostingValue3: " << currentPostingValue3 << endl;
				// cout << "currentPostingValue4: " << currentPostingValue4 << endl;
				// cout << "currentPostingValue5: " << currentPostingValue5 << endl;
				// cout << "currentPostingValue6: " << currentPostingValue6 << endl;
				// cout << "debug ends." << endl;
				// cout << endl;


				// for DEBUG only (maybe useful)
				// cout << "docID: " << curr_term_index_entry_buffer[i].doc_id << ", score: " << currentPostingScore << endl;


				// The following logic is going to update the data structure histogramMap1, histogramMap2, histogramMap3, histogramMap4 and histogramMap5 separately
				if (histogramMap1.count( currentPostingValue1 )>0){
					histogramMap1[ currentPostingValue1 ] += 1;
				}
				else{
					histogramMap1[ currentPostingValue1 ] = 1;
				}

				if (histogramMap2.count( currentPostingValue2 )>0){
					histogramMap2[ currentPostingValue2 ] += 1;
				}
				else{
					histogramMap2[ currentPostingValue2 ] = 1;
				}

				if (histogramMap3.count( currentPostingValue3 )>0){
					histogramMap3[ currentPostingValue3 ] += 1;
				}
				else{
					histogramMap3[ currentPostingValue3 ] = 1;
				}

				if (histogramMap4.count( currentPostingValue4 )>0){
					histogramMap4[ currentPostingValue4 ] += 1;
				}
				else{
					histogramMap4[ currentPostingValue4 ] = 1;
				}

				if (histogramMap5.count( currentPostingValue5 )>0){
					histogramMap5[ currentPostingValue5 ] += 1;
				}
				else{
					histogramMap5[ currentPostingValue5 ] = 1;
				}

				if (histogramMap6.count( currentPostingValue6 )>0){
					histogramMap6[ currentPostingValue6 ] += 1;
				}
				else{
					histogramMap6[ currentPostingValue6 ] = 1;
				}


			}

			delete[] curr_term_index_entry_buffer; // delete and free the buffer we just used (important)


			cout << "histogramContainer1.size():" << histogramMap1.size() << endl;
			cout << "histogramContainer2.size():" << histogramMap2.size() << endl;
			cout << "histogramContainer3.size():" << histogramMap3.size() << endl;
			cout << "histogramContainer4.size():" << histogramMap4.size() << endl;
			cout << "histogramContainer5.size():" << histogramMap5.size() << endl;
			cout << "histogramContainer6.size():" << histogramMap6.size() << endl;
			cout << endl;

		    // If all the query terms have been processed. Then the system can exit and no need to continue to traverse the whole index's lexicon.
		    if(number_terms_already_DONE == NUM_OF_TERMS){
				  break;
		    }

		  }
	  }

	  // The logic for outputting the histogram files from memory to disk
	  string outputFileName1 = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kHistogramOutputFileName1));
	  string outputFileName2 = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kHistogramOutputFileName2));
	  string outputFileName3 = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kHistogramOutputFileName3));
	  string outputFileName4 = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kHistogramOutputFileName4));
	  string outputFileName5 = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kHistogramOutputFileName5));
	  string outputFileName6 = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kHistogramOutputFileName6));

	  ofstream histogramOutputFileHandler1;
	  ofstream histogramOutputFileHandler2;
	  ofstream histogramOutputFileHandler3;
	  ofstream histogramOutputFileHandler4;
	  ofstream histogramOutputFileHandler5;
	  ofstream histogramOutputFileHandler6;

	  histogramOutputFileHandler1.open(outputFileName1.c_str());
	  histogramOutputFileHandler2.open(outputFileName2.c_str());
	  histogramOutputFileHandler3.open(outputFileName3.c_str());
	  histogramOutputFileHandler4.open(outputFileName4.c_str());
	  histogramOutputFileHandler5.open(outputFileName5.c_str());
	  histogramOutputFileHandler6.open(outputFileName6.c_str());


      map<float, int>::iterator iter;
      // output for the 1st histogramMap1
      for (iter = histogramMap1.begin(); iter != histogramMap1.end(); iter++) {
    	  histogramOutputFileHandler1 << (*iter).first << " " << (*iter).second << endl;
    	  // cout << (*iter).first << " " << (*iter).second << endl;
      }

      // output for the 2ed histogramMap2
      for (iter = histogramMap2.begin(); iter != histogramMap2.end(); iter++) {
    	  histogramOutputFileHandler2 << (*iter).first << " " << (*iter).second << endl;
    	  // cout << (*iter).first << " " << (*iter).second << endl;
      }

      // output for the 3rd histogramMap3
      for (iter = histogramMap3.begin(); iter != histogramMap3.end(); iter++) {
    	  histogramOutputFileHandler3 << (*iter).first << " " << (*iter).second << endl;
    	  // cout << (*iter).first << " " << (*iter).second << endl;
      }

      // output for the 4th histogramMap4
      for (iter = histogramMap4.begin(); iter != histogramMap4.end(); iter++) {
    	  histogramOutputFileHandler4 << (*iter).first << " " << (*iter).second << endl;
    	  // cout << (*iter).first << " " << (*iter).second << endl;
      }

      // output for the 5th histogramMap4
      for (iter = histogramMap5.begin(); iter != histogramMap5.end(); iter++) {
    	  histogramOutputFileHandler5 << (*iter).first << " " << (*iter).second << endl;
    	  // cout << (*iter).first << " " << (*iter).second << endl;
      }

      // output for the 6th histogramMap4
      for (iter = histogramMap6.begin(); iter != histogramMap6.end(); iter++) {
    	  histogramOutputFileHandler6 << (*iter).first << " " << (*iter).second << endl;
    	  // cout << (*iter).first << " " << (*iter).second << endl;
      }

      histogramOutputFileHandler1.close();
      histogramOutputFileHandler2.close();
      histogramOutputFileHandler3.close();
      histogramOutputFileHandler4.close();
      histogramOutputFileHandler5.close();
      histogramOutputFileHandler6.close();

      cout << "The output histogram files are:" << endl;
      cout << "outputFileName1: " << outputFileName1 << endl;
      cout << "outputFileName2: " << outputFileName2 << endl;
      cout << "outputFileName3: " << outputFileName3 << endl;
      cout << "outputFileName4: " << outputFileName4 << endl;
      cout << "outputFileName5: " << outputFileName5 << endl;
      cout << "outputFileName6: " << outputFileName6 << endl;
      */
}

// ****************
//The output index will ONLY contain the query terms you entered.
void LayeredIndexGenerator::CreateExternalScoreFileForEachQueryTerm(vector<string> & queryTerms, bool debugFlag, int sortingMethodCodeForTheTerm) {
	  // currently existing sorting methods
	  // 1: Not sorted at all
	  // 2: sorted based on partial bm25 score (largest to smallest)
	  // 3: sorted based on a specific machine learning model (Updated 20130118 by Wei)
	  // 4: sorted based on partial bm25 score (smallest to largest) (Updated 20130213 by Wei)

	  const int NUMBER_QUERY_TERMS = queryTerms.size();
	  int number_query_terms_already_deal_with = 0;

	  // Updated by 2013/02/09 I just want to put all the postings in plain text format into the big map file. Many (name, value) pairs with it.
	  // the value will be a set of postings with the partial BM25 in it. To replace the original freq in the doc
	  // step1: this stage is for me to load all the inverted index data into the main memory
	  // Use NextTerm() to traverse the whole index's lexicon.

	  while (index_->NextTerm())
	  {
		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  bool matchFlag = false;
		  for(unsigned int tempCounter = 0; tempCounter < queryTerms.size(); tempCounter ++){
			  if (curr_term == queryTerms[tempCounter]){
				  matchFlag = true;
				  break;
			  }
		  }

		  // updated by Wei 2013/02/10.
		  // The inverted index is NOT pruned one by one. But need to load them all into the main memory.
		  if (matchFlag)
		  {
			cout << "query term: " << curr_term << endl;
			number_query_terms_already_deal_with += 1;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			int num_docs_in_original_list = index_->curr_list_data()->num_docs();

			// plaintext file format
			// string termScoreOutputFileName = "/home/diaosi/outputDirForTermScores/" + curr_term.substr(0,1) + "/" + curr_term + "_" + make_the_value_into_string_format(num_docs_in_original_list) + ".txt";
			// ofstream termScoreOutputFileHandler;
			// termScoreOutputFileHandler.open(termScoreOutputFileName.c_str());

			// binary file format
			ssize_t write_ret;
			string termScoreOutputBinaryFileName = "/home/diaosi/outputDirForTermScores/" + curr_term.substr(0,1) + "/" + curr_term + "_" + make_the_value_into_string_format(num_docs_in_original_list) + ".binary";
			// ofstream termScoreOutputBinaryFileHandler;
			// I am kind of like you, Roman
			int binary_scores_fd_;
			binary_scores_fd_ = open(termScoreOutputBinaryFileName.c_str(), O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);


			if(debugFlag){
				cout << "number of docs in orginal inverted index:" << num_docs_in_original_list << endl;
			}
			// deal with the data structures mapForStoringCurrTermIndexEntryBufferOfEachTerm and vectoreForStoringAuxPruningTermEntry

			// production procedure
			// begins *****
			// put the postings into the memory
			IndexEntry* curr_term_index_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int curr_term_index_entry_offset = 0;

			while (index_->NextDocId())
			{
			  // current version
			  IndexEntry& curr_posting_index_entry = curr_term_index_entry_buffer[curr_term_index_entry_offset];
			  assert(curr_term_index_entry_offset < num_docs_in_original_list);
			  curr_posting_index_entry.doc_id = index_->curr_doc_id();
			  curr_posting_index_entry.frequency = index_->curr_list_data()->GetFreq();
			  ++curr_term_index_entry_offset;

			}  // No more postings in the list.

			// Need the average document length for computing BM25 scores.
			// This meta info(including total_document_lengths, total_num_docs and average_doc_length) maybe wrong due to some shortcut I make.(Wei: 2012/06/28)
			long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
			long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
			int average_doc_length = total_document_lengths / total_num_docs;

			//current sorting Methods:
			//1: not sorted at all(but based on docID)
			//2: sorted based on partial bm25 score
			//3: sorted based on a specific machine learning model (This machine learning model will assign the posting a score, either keep the posting OR remove it, more and more clear now)
			//4: empty

			map<string,float> queryTermsProbabilityDistributionMap;
			DocIdScoreComparisonWei doc_id_score_comparatorWei(index_->index_reader()->document_map(), num_docs_in_original_list, average_doc_length, total_num_docs, sortingMethodCodeForTheTerm,curr_term,queryTermsProbabilityDistributionMap);

			if(debugFlag){
				// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
				// The results are sorted by docIDs
				cout << "Printing all (docID,score) pairs ordered by docID: " << curr_term << endl;
				for (int i = 0; i < curr_term_index_entry_offset; ++i) {
					cout << "docID: " << curr_term_index_entry_buffer[i].doc_id << ", score: " << doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[i]) << endl;
				}
			}

			//standard sort function in c++.
			//It is sorted based on some kinds of score computation mechanism
			sort(curr_term_index_entry_buffer, curr_term_index_entry_buffer + curr_term_index_entry_offset, doc_id_score_comparatorWei);

			if(debugFlag){
				// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
				// The results are sorted by some score.
				cout << "Printing all (docID,score) pairs ordered by some kind of score: " << curr_term << endl;
				for (int i = 0; i < curr_term_index_entry_offset; ++i) {
					cout << "docID: " << curr_term_index_entry_buffer[i].doc_id << ", score: " << doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[i]) << endl;
				}
			}

			// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
			// The results are sorted by some score.
			cout << "Outputing the scores to a specific file for the term: " << curr_term << endl;
			for (int i = 0; i < curr_term_index_entry_offset; ++i) {
				float readyToWriteScore = doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[i]);
				// debug only
				// cout << readyToWriteScore << endl;
				// termScoreOutputFileHandler << readyToWriteScore << endl;
				write_ret = write(binary_scores_fd_, &readyToWriteScore, sizeof(readyToWriteScore));
				assert(write_ret == sizeof( doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[i]) ));
			}

			// termScoreOutputFileHandler.close();
			close(binary_scores_fd_);

			delete[] curr_term_index_entry_buffer; // delete and free the buffer we just used

		    // If all the query terms have been processed. Then the system can exit and no need to continue to traverse the whole index's lexicon.
		    if(number_query_terms_already_deal_with == NUMBER_QUERY_TERMS){
				  break;
		    }

		  }
	  }

}
// ****************
// Updated by Wei 2013/02/16 This alg is silly cause it loads ALL the postings into the main memory and it is kind of NOT scalable in most of the cases.
// The output index will ONLY contain the query terms you entered.
void LayeredIndexGenerator::CreatePrunedIndexForMultipleTermsBasedOnUniversalScoreImportanceOLDAndNotUsed(vector<string> & queryTerms, bool debugFlag,bool store_computed_score_into_external_index_flag,float percentageToKeepOfTheWholeIndex,int pruningMethodCodeOfTheWholeIndex) {
	  string systemLogFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kSystemLogFileName));
	  ofstream systemLogFileHandler;
	  systemLogFileHandler.open(systemLogFileName.c_str());
	  systemLogFileHandler << "term" << " #OfPostingsInTheOriginalList" << " #OfPostingsPruned" << " #OfPostingsLeft" << " currentMinValue" << " currentMinDocID" << endl;

	  string systemLogCompletelyPrunedTermsFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kSystemLogFileNameCompletelyPrunedTerms));
	  ofstream systemLogCompletelyPrunedTermsFileHandler;
	  systemLogCompletelyPrunedTermsFileHandler.open(systemLogCompletelyPrunedTermsFileName.c_str());
	  systemLogCompletelyPrunedTermsFileHandler << "completely_being_pruned_terms" << endl;

	  // currently existing pruning methods
	  // 1: Not sorted at all
	  // 2: sorted based on partial bm25 score
	  // 3: sorted based on a specific machine learning model(Updated 20130118 by Wei)

	  const int NUMBER_QUERY_TERMS = queryTerms.size();
	  int number_query_terms_already_deal_with = 0;

	  // Let's have some data structures here to store the curr_term_index_entry_buffer for each term
	  // new data structure 1:
	  map<string, vector<DocIDWithPartialBM25Score> > mapForStoringCurrTermIndexEntryBufferOfEachTerm;

	  // new data structure 2:
	  vector<aux_pruning_term_entry> vectoreForStoringAuxPruningTermEntry;

	  if (debugFlag){
		  cout << "mapForStoringCurrTermIndexEntryBufferOfEachTerm.max_size():" << mapForStoringCurrTermIndexEntryBufferOfEachTerm.max_size() << endl;
		  cout << "vectoreForStoringAuxPruningTermEntry.max_size():" << vectoreForStoringAuxPruningTermEntry.max_size() << endl;
	  }

	  // Updated by 2013/02/09 I just want to put all the postings in plain text format into the big map file. Many (name, value) pairs with it.
	  // the value will be a set of postings with the partial BM25 in it. To replace the original freq in the doc
	  // step1: this stage is for me to load all the inverted index data into the main memory
	  // Use NextTerm() to traverse the whole index's lexicon.

	  while (index_->NextTerm())
	  {

		  string curr_term = string(index_->curr_term(), index_->curr_term_len());
		  bool matchFlag = false;
		  float percentageForTheTerm = -1.0;
		  int pruningMethodCodeForTheTerm = -1;

		  for(unsigned int tempCounter = 0; tempCounter < queryTerms.size(); tempCounter ++){
			  if (curr_term == queryTerms[tempCounter]){
				  matchFlag = true;
				  percentageForTheTerm = percentageToKeepOfTheWholeIndex;
				  pruningMethodCodeForTheTerm = pruningMethodCodeOfTheWholeIndex;
				  if(debugFlag){
					  cout << "**********pruning preparation analysis step1" << endl;
					  cout << "pruning query term:" << queryTerms[tempCounter] << endl;
					  cout << "percentage to keep:" << percentageForTheTerm << endl;
					  cout << "pruning method code:" << pruningMethodCodeForTheTerm << endl;
				  }
				  break;
			  }
		  }

		  // updated by Wei 2013/02/10.
		  // The inverted index is NOT pruned one by one. But need to load them all into the main memory.
		  if (matchFlag)
		  {
			cout << "query term: " << curr_term << endl;
			number_query_terms_already_deal_with += 1;

			// TODO: It's better to reuse the buffer, and resize only when necessary.
			int num_docs_in_original_list = index_->curr_list_data()->num_docs();
			if(debugFlag){
				cout << "number of docs in orginal inverted index:" << num_docs_in_original_list << endl;
			}
			// deal with the datastructures mapForStoringCurrTermIndexEntryBufferOfEachTerm and vectoreForStoringAuxPruningTermEntry

			/*
			// testing procedure
			// begins *****
			DocIDWithPartialBM25Score curr_docID_with_score;
			// This doc_id got to be special and got to be valid in the document_map
			curr_docID_with_score.doc_id = 2797972;
			curr_docID_with_score.frequency = 99999998;
			curr_docID_with_score.score = -1;
			mapForStoringCurrTermIndexEntryBufferOfEachTerm[curr_term].push_back(curr_docID_with_score);

			// the arguments:
			// string term_;
			// int num_of_posting_pruned_;
			// int num_of_posting_in_the_original_list_;
			// float current_min_value_;
			aux_pruning_term_entry curr_term_pruning_aux_entry(curr_term, 0, 1, -1, 2797972, true);
			vectoreForStoringAuxPruningTermEntry.push_back(curr_term_pruning_aux_entry);
			// testing procedure
			// ends *****
			*/



			// production procedure
			// begins *****
			// put the postings into the memory
			IndexEntry* curr_term_index_entry_buffer = new IndexEntry[num_docs_in_original_list];
			int curr_term_index_entry_offset = 0;

			while (index_->NextDocId())
			{
			  // current version
			  IndexEntry& curr_posting_index_entry = curr_term_index_entry_buffer[curr_term_index_entry_offset];
			  assert(curr_term_index_entry_offset < num_docs_in_original_list);
			  curr_posting_index_entry.doc_id = index_->curr_doc_id();
			  curr_posting_index_entry.frequency = index_->curr_list_data()->GetFreq();
			  ++curr_term_index_entry_offset;

			}  // No more postings in the list.

			// Need the average document length for computing BM25 scores.
			// This meta info(including total_document_lengths, total_num_docs and average_doc_length) maybe wrong due to some shortcut I make.(Wei: 2012/06/28)
			long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
			long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
			int average_doc_length = total_document_lengths / total_num_docs;

			//current sorting Methods:
			//1: not sorted at all(but based on docID)
			//2: sorted based on partial bm25 score
			//3: sorted based on a specific machine learning model (This machine learning model will assign the posting a score, either keep the posting OR remove it, more and more clear now)
			//4: empty

			map<string,float> queryTermsProbabilityDistributionMap;
			DocIdScoreComparisonWei doc_id_score_comparatorWei(index_->index_reader()->document_map(), num_docs_in_original_list, average_doc_length, total_num_docs, pruningMethodCodeForTheTerm,curr_term,queryTermsProbabilityDistributionMap);

			if(debugFlag){
				// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
				// The results are sorted by docIDs
				cout << "Printing all (docID,score) pairs ordered by docID: " << curr_term << endl;
				for (int i = 0; i < curr_term_index_entry_offset; ++i) {
					cout << "docID: " << curr_term_index_entry_buffer[i].doc_id << ", score: " << doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[i]) << endl;
				}
			}

			//standard sort function in c++.
			//It is sorted based on some kinds of score computation mechanism
			sort(curr_term_index_entry_buffer, curr_term_index_entry_buffer + curr_term_index_entry_offset, doc_id_score_comparatorWei);

			if(debugFlag){
				// For this particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
				// The results are sorted by some score.
				cout << "Printing all (docID,score) pairs ordered by some kind of score: " << curr_term << endl;
				for (int i = 0; i < curr_term_index_entry_offset; ++i) {
					cout << "docID: " << curr_term_index_entry_buffer[i].doc_id << ", score: " << doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[i]) << endl;
				}
			}

			// updated 2013/02/09
			// So I need all those scores completely store into the structure called: mapForStoringCurrTermIndexEntryBufferOfEachTerm
			for (int i = 0; i < curr_term_index_entry_offset; ++i) {
				DocIDWithPartialBM25Score curr_docID_with_score;
				curr_docID_with_score.doc_id = curr_term_index_entry_buffer[i].doc_id;
				curr_docID_with_score.frequency = curr_term_index_entry_buffer[i].frequency;
				curr_docID_with_score.score = doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[i]);
				// cout << "docID: " << curr_term_index_entry_buffer[i].doc_id << ", score: " << doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[i]) << endl;
				mapForStoringCurrTermIndexEntryBufferOfEachTerm[curr_term].push_back(curr_docID_with_score);
			}

			cout << "Min value for the current list:" << doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[curr_term_index_entry_offset-1]) << endl;

			// current working version
			aux_pruning_term_entry curr_term_pruning_aux_entry(curr_term, 0, curr_term_index_entry_offset, doc_id_score_comparatorWei.score(curr_term_index_entry_buffer[curr_term_index_entry_offset-1]), curr_term_index_entry_buffer[curr_term_index_entry_offset-1].doc_id, true);

			vectoreForStoringAuxPruningTermEntry.push_back(curr_term_pruning_aux_entry);

			delete[] curr_term_index_entry_buffer; // delete and free the buffer we just used
			// production procedure
			// ends *****


		    // If all the query terms have been processed. Then the system can exit and no need to continue to traverse the whole index's lexicon.
		    if(number_query_terms_already_deal_with == NUMBER_QUERY_TERMS){
				  break;
		    }

		    if (pruningMethodCodeForTheTerm == 3){
			    Py_Finalize();	// if the python modules have been used, we should Py_Finalize() it.
		    }

		  }

	  }


	  uint32_t num_of_postings = 0;
	  uint32_t num_of_postings_left = 0;
	  uint32_t num_of_postings_pruned = 0;

	  // step2: let's use the aux data structures and the inverted index data to do the universal pruning
	  cout << "vectoreForStoringAuxPruningTermEntry.size():" << vectoreForStoringAuxPruningTermEntry.size() << endl;
	  cout << "mapForStoringCurrTermIndexEntryBufferOfEachTerm.size():" << mapForStoringCurrTermIndexEntryBufferOfEachTerm.size() << endl;


	  // Let's test whether the mapForStoringCurrTermIndexEntryBufferOfEachTerm.size() is full of the thing I want:
	  for( map<string, vector<DocIDWithPartialBM25Score> >::const_iterator it = mapForStoringCurrTermIndexEntryBufferOfEachTerm.begin(); it != mapForStoringCurrTermIndexEntryBufferOfEachTerm.end(); ++it )
	  {
		  // for test only
	      // cout << "term:" << it->first << " list size:"<< it->second.size() << endl;
	      num_of_postings += it->second.size();
	  }

	  // for test
	  // num_of_postings = mapForStoringCurrTermIndexEntryBufferOfEachTerm["so"].size() + mapForStoringCurrTermIndexEntryBufferOfEachTerm["soalr"].size() + mapForStoringCurrTermIndexEntryBufferOfEachTerm["snyder"].size();

	  num_of_postings_left = num_of_postings * percentageToKeepOfTheWholeIndex;
	  num_of_postings_pruned = num_of_postings - num_of_postings_left;
	  cout << "# of postings in total: " << num_of_postings << endl;
	  cout << "# of postings left: " << num_of_postings_left << endl;
	  cout << "# of postings pruned: " << num_of_postings_pruned << endl;

	  // put the smallest value at the front and that is my purpose
	  make_heap(vectoreForStoringAuxPruningTermEntry.begin(), vectoreForStoringAuxPruningTermEntry.end(), MinValueCompare());
	  cout << "initial min heap: " << vectoreForStoringAuxPruningTermEntry.front().term_ << " " << vectoreForStoringAuxPruningTermEntry.front().current_min_value_ << '\n';

	  // my goal is to cut num_of_postings_pruned postings.
	  // Let's do a big loop here to modify the data structure vectoreForStoringAuxPruningTermEntry:
	  for(unsigned int i =0; i < num_of_postings_pruned; i++){
		  // step1: pop operation
		  pop_heap (vectoreForStoringAuxPruningTermEntry.begin(),vectoreForStoringAuxPruningTermEntry.end(),MinValueCompare());
		  // step1.1 received and modify this entry

		  aux_pruning_term_entry curr_working_term_pruning_aux_entry = vectoreForStoringAuxPruningTermEntry.back();
		  curr_working_term_pruning_aux_entry.num_of_posting_pruned_ += 1;
		  // add the boundary critique
		  int vectorIndex = curr_working_term_pruning_aux_entry.num_of_posting_in_the_original_list_ - 1 - curr_working_term_pruning_aux_entry.num_of_posting_pruned_;

		  if (debugFlag){
			  // boundary check
			  cout << "curr_working_term_pruning_aux_entry.term_:" << curr_working_term_pruning_aux_entry.term_ << endl;
			  cout << "vectorIndex:" << vectorIndex << endl;
			  cout << "mapForStoringCurrTermIndexEntryBufferOfEachTerm[curr_working_term_pruning_aux_entry.term_][0].score:" << mapForStoringCurrTermIndexEntryBufferOfEachTerm[curr_working_term_pruning_aux_entry.term_][0].score << endl;
			  cout << "mapForStoringCurrTermIndexEntryBufferOfEachTerm[curr_working_term_pruning_aux_entry.term_][0].doc_id:" << mapForStoringCurrTermIndexEntryBufferOfEachTerm[curr_working_term_pruning_aux_entry.term_][0].doc_id << endl;
		  }



		  if(vectorIndex >= 0){
			  // the list still has some left posting for pruning
			  curr_working_term_pruning_aux_entry.current_min_value_ = mapForStoringCurrTermIndexEntryBufferOfEachTerm[curr_working_term_pruning_aux_entry.term_][vectorIndex].score;
			  curr_working_term_pruning_aux_entry.current_min_docID_ = mapForStoringCurrTermIndexEntryBufferOfEachTerm[curr_working_term_pruning_aux_entry.term_][vectorIndex].doc_id;
		  }
		  else{
			  // the list do NOT have any posting left for pruning, in this case, all the list for this term have been pruned
			  // become super large, so No body can touch me again
			  curr_working_term_pruning_aux_entry.current_min_value_ = 9999;
			  curr_working_term_pruning_aux_entry.current_min_docID_ = 2178851;
		  }
		  vectoreForStoringAuxPruningTermEntry.pop_back();
		  // cout << "min heap after pop : " << vectoreForStoringAuxPruningTermEntry.front().term_ << " " << vectoreForStoringAuxPruningTermEntry.front().current_min_value_ << '\n';

		  // step2: push operation
		  // push the curr_working_term_pruning_aux_entry back into the vector
		  vectoreForStoringAuxPruningTermEntry.push_back(curr_working_term_pruning_aux_entry);
		  push_heap(vectoreForStoringAuxPruningTermEntry.begin(),vectoreForStoringAuxPruningTermEntry.end(),MinValueCompare());

		  // cout << "min heap after push: " << vectoreForStoringAuxPruningTermEntry.front().term_ << " " << vectoreForStoringAuxPruningTermEntry.front().current_min_value_ << '\n';
	  }

	  if(debugFlag){
		  // check point for the data structure: vectoreForStoringAuxPruningTermEntry
		  for(unsigned int i = 0; i < vectoreForStoringAuxPruningTermEntry.size(); i++){
			  cout << "**********pruning preparation analysis step2" << endl;
			  cout << "term_:" << vectoreForStoringAuxPruningTermEntry[i].term_ << endl;
			  cout << "num_of_posting_pruned_:" << vectoreForStoringAuxPruningTermEntry[i].num_of_posting_pruned_ << endl;
			  cout << "num_of_posting_in_the_original_list_:"<< vectoreForStoringAuxPruningTermEntry[i].num_of_posting_in_the_original_list_ << endl;
			  cout << "current_min_value_:" << vectoreForStoringAuxPruningTermEntry[i].current_min_value_ << endl;
			  cout << "current_min_docID_:"<< vectoreForStoringAuxPruningTermEntry[i].current_min_docID_ << endl;
		  }
	  }

	    // Let's put the rest of the postings back into the original pruning pipeline
	  	for(unsigned int i = 0; i < vectoreForStoringAuxPruningTermEntry.size(); i++){
	  		// Let's build the new inverted index of these terms one by one
	  		string term_in_building_stage = vectoreForStoringAuxPruningTermEntry[i].term_;
	  		int num_of_posting_left_in_building_stage = vectoreForStoringAuxPruningTermEntry[i].num_of_posting_in_the_original_list_ - vectoreForStoringAuxPruningTermEntry[i].num_of_posting_pruned_;

	  		if (debugFlag){
		  		cout << "**********pruning process analysis step3" << endl;
		  		cout << "term_: " << term_in_building_stage << endl;
		  		cout << "num_of_posting_pruned_: " << vectoreForStoringAuxPruningTermEntry[i].num_of_posting_pruned_ << endl;
		  		cout << "num_of_posting_left_in_building_stage: " << num_of_posting_left_in_building_stage << endl;
	  		}

	  		if(num_of_posting_left_in_building_stage == 0){
	  			// All the postings have been pruned for this term, so this term will NOT appear in the new inverted index.
	  			systemLogCompletelyPrunedTermsFileHandler << term_in_building_stage << endl;
	  		}
	  		else{
		  		// Let's build an index_entry_buffer
		  		IndexEntryWeiForPruning* index_entry_buffer = new IndexEntryWeiForPruning[ vectoreForStoringAuxPruningTermEntry[i].num_of_posting_in_the_original_list_ ];
				int index_entry_offset = 0;

				for(unsigned int j = 0; j < num_of_posting_left_in_building_stage; j++){

				  IndexEntryWeiForPruning& curr_index_entry = index_entry_buffer[index_entry_offset];

				  assert(index_entry_offset < vectoreForStoringAuxPruningTermEntry[i].num_of_posting_in_the_original_list_);

				  curr_index_entry.doc_id = mapForStoringCurrTermIndexEntryBufferOfEachTerm[ term_in_building_stage ][j].doc_id;
				  curr_index_entry.frequency = mapForStoringCurrTermIndexEntryBufferOfEachTerm[ term_in_building_stage ][j].frequency;
				  curr_index_entry.score = mapForStoringCurrTermIndexEntryBufferOfEachTerm[ term_in_building_stage ][j].score;

				  ++index_entry_offset;
				}  // No more postings in the list.

			  	// Now the index_entry_buffer is ready to GO.
				// sort it again and order by docIDs
				sort(index_entry_buffer, index_entry_buffer + index_entry_offset, IndexEntryWeiForPruningDocIdComparison());

				// This dumps a single inverted list into an index(ONLY main index / both the main and external index depend on the flag you set for the function.).
				DumpToIndexForPruningProjectWeiWithoutTheArgumentDoc_id_score_comparator(index_entry_buffer,index_entry_offset, vectoreForStoringAuxPruningTermEntry[i].term_.c_str(), vectoreForStoringAuxPruningTermEntry[i].term_.length(),true,vectoreForStoringAuxPruningTermEntry[i].num_of_posting_in_the_original_list_);

				// TODO: check whether it is correct in this assignment
				index_builder_->FinalizeLayer(vectoreForStoringAuxPruningTermEntry[i].current_min_value_);  // Need to call this before writing out the next layer.

				systemLogFileHandler << term_in_building_stage << " " << vectoreForStoringAuxPruningTermEntry[i].num_of_posting_in_the_original_list_ << " " << vectoreForStoringAuxPruningTermEntry[i].num_of_posting_pruned_ << " " << num_of_posting_left_in_building_stage << " " << vectoreForStoringAuxPruningTermEntry[i].current_min_value_ << " " << vectoreForStoringAuxPruningTermEntry[i].current_min_docID_ << endl;

				delete[] index_entry_buffer; // delete the memory buffer I used.
				mapForStoringCurrTermIndexEntryBufferOfEachTerm.erase (term_in_building_stage); // erase the whole list by the term

				if(debugFlag){
					cout << "...Done" << endl;
					cout << endl;
				}
	  		}
	  	}

	  index_builder_->Finalize();

	  // float percentageToKeepOfTheWholeIndex
	  // int pruningMethodCodeOfTheWholeIndex

	  if(store_computed_score_into_external_index_flag){
		  WriteMetaFile(output_index_files_.meta_info_filename(),11,percentageToKeepOfTheWholeIndex,pruningMethodCodeOfTheWholeIndex);
	  }
	  else{
		  WriteMetaFile(output_index_files_.meta_info_filename(),12,percentageToKeepOfTheWholeIndex,pruningMethodCodeOfTheWholeIndex);
	  }

	  systemLogFileHandler.close();
	  systemLogCompletelyPrunedTermsFileHandler.close();
}

// TODO: For now, we assume the whole inverted list fits in main memory and we don't index positions.
// TODO: Computing BM25 scores during the various sorting stages is expensive. When sorting, we have to do n*log(n) comparisons and thus recompute the
//       BM25 score more than necessary. We can speedup by precomputing and storing the BM25 scores.
void LayeredIndexGenerator::CreateLayeredIndex() {
  // Some static index layer properties.
  const int kLayerMinSize = CHUNK_SIZE;
  const int kMaxLayers = MAX_LIST_LAYERS;

  // TODO: Need a strategy that uses the IDF to split list into layers, so that only the top scoring documents are in the upper layers.

  // We implement three different layer splitting strategies:
  // * kPercentageLowerBounded: split layers by percentage, with a lowerbound size for each layer.
  // * kPercentageLowerUpperBounded: split layers by percentage, with lowerbound and upperbound sizes for each layer.
  // * kExponentiallyIncreasing: split by exponentially increasing bucket sizes, with a lowerbound size for each layer
  //   This strategy is based on the Anh/Moffat way of splitting, although they did this on a document level basis.
  // In each strategy, each layer (except possibly the last) will have a lowerbound size of 128 postings, regardless of whether we explicitly define a
  // lowerbound size. This is to make the most of a chunk, since we'll always be decompressing a whole chunk.
  enum LayerSplitMode {
    kPercentageLowerBounded, kPercentageLowerUpperBounded, kExponentiallyIncreasing, kUndefined
  };

  LayerSplitMode layer_splitting_strategy;

  // TODO: Disable for now...
  /*if (layering_strategy_ == "percentage-lower-bounded") {
    layer_splitting_strategy = kPercentageLowerBounded;
  } else if (layering_strategy_ == "percentage-lower-upper-bounded") {
    layer_splitting_strategy = kPercentageLowerUpperBounded;
  } else if (layering_strategy_ == "exponentially-increasing") {
    layer_splitting_strategy = kExponentiallyIncreasing;
  } else {
    layer_splitting_strategy = kUndefined;
    Configuration::ErroneousValue(config_properties::kLayeringStrategy, Stringify(layering_strategy_));
  }*/

  // If we have overlapping layers, should the threshold score include the overlapping documents?
  // This should generally be set to 'false', since all layers will then have the same threshold stored,
  // so if any algorithm desires this effect, it can just use the first layer threshold as the threshold for all subsequent overlapping layers.
  const bool kOverlappingLayerThresholdIncludesAllDocs = false;

  // Additional details for the above layering strategies.
  // TODO: Should be able to define these in the configuration file.

  //////////////////////////////////////////////////////////////////////////////
  // TODO: For testing purposes, override layering settings from the configuration file.
  // Wei: Now, I want to have that and have a try for the query processing alg.
  // So I make the corresponding changes.
  overlapping_layers_ = true;

  int min_layer_size = 32768;

  float layer_percentages[MAX_LIST_LAYERS];
  int layer_min_sizes[MAX_LIST_LAYERS];
  int layer_max_sizes[MAX_LIST_LAYERS];

  if (layering_strategy_ == "equal_2") {
    // EQUAL --- 2 LAYERS
    num_layers_ = 2;
    layer_splitting_strategy = kPercentageLowerUpperBounded;

    layer_percentages[0] = 50.0;
    layer_percentages[1] = 50.0;

    layer_min_sizes[0] = min_layer_size;
    layer_min_sizes[1] = min_layer_size;

    layer_max_sizes[0] = 0;
    layer_max_sizes[1] = 0;
  } else if (layering_strategy_ == "equal_4") {
    // EQUAL --- 4 LAYERS
    num_layers_ = 4;
    layer_splitting_strategy = kPercentageLowerUpperBounded;

    layer_percentages[0] = 25.0;
    layer_percentages[1] = 25.0;
    layer_percentages[2] = 25.0;
    layer_percentages[3] = 25.0;

    layer_min_sizes[0] = min_layer_size;
    layer_min_sizes[1] = min_layer_size;
    layer_min_sizes[2] = min_layer_size;
    layer_min_sizes[3] = min_layer_size;

    layer_max_sizes[0] = 0;
    layer_max_sizes[1] = 0;
    layer_max_sizes[2] = 0;
    layer_max_sizes[3] = 0;
  } else if (layering_strategy_ == "equal_8") {
    // EQUAL --- 8 LAYERS
    num_layers_ = 8;
    layer_splitting_strategy = kPercentageLowerUpperBounded;

    layer_percentages[0] = 12.5;
    layer_percentages[1] = 12.5;
    layer_percentages[2] = 12.5;
    layer_percentages[3] = 12.5;
    layer_percentages[4] = 12.5;
    layer_percentages[5] = 12.5;
    layer_percentages[6] = 12.5;
    layer_percentages[7] = 12.5;

    layer_min_sizes[0] = min_layer_size;
    layer_min_sizes[1] = min_layer_size;
    layer_min_sizes[2] = min_layer_size;
    layer_min_sizes[3] = min_layer_size;
    layer_min_sizes[4] = min_layer_size;
    layer_min_sizes[5] = min_layer_size;
    layer_min_sizes[6] = min_layer_size;
    layer_min_sizes[7] = min_layer_size;

    layer_max_sizes[0] = 0;
    layer_max_sizes[1] = 0;
    layer_max_sizes[2] = 0;
    layer_max_sizes[3] = 0;
    layer_max_sizes[4] = 0;
    layer_max_sizes[5] = 0;
    layer_max_sizes[6] = 0;
    layer_max_sizes[7] = 0;
  } else if (layering_strategy_ == "percentage_2") {
    // PERCENTAGE --- 2 LAYERS
    num_layers_ = 2;
    layer_splitting_strategy = kPercentageLowerUpperBounded;

    layer_percentages[0] = 25.0;
    layer_percentages[1] = 75.0;

    layer_min_sizes[0] = min_layer_size;
    layer_min_sizes[1] = min_layer_size;

    layer_max_sizes[0] = 0;
    layer_max_sizes[1] = 0;
  } else if (layering_strategy_ == "percentage_4") {
    // PERCENTAGE --- 4 LAYERS
    num_layers_ = 4;
    layer_splitting_strategy = kPercentageLowerUpperBounded;

    layer_percentages[0] = 6.25;
    layer_percentages[1] = 18.75;
    layer_percentages[2] = 18.75;
    layer_percentages[3] = 56.25;

    layer_min_sizes[0] = min_layer_size;
    layer_min_sizes[1] = min_layer_size;
    layer_min_sizes[2] = min_layer_size;
    layer_min_sizes[3] = min_layer_size;

    layer_max_sizes[0] = 0;
    layer_max_sizes[1] = 0;
    layer_max_sizes[2] = 0;
    layer_max_sizes[3] = 0;
  } else if (layering_strategy_ == "percentage_8") {
    // PERCENTAGE --- 8 LAYERS
    num_layers_ = 8;
    layer_splitting_strategy = kPercentageLowerUpperBounded;

    layer_percentages[0] = 1.5625;
    layer_percentages[1] = 4.6875;
    layer_percentages[2] = 4.6875;
    layer_percentages[3] = 4.6875;
    layer_percentages[4] = 14.0625;
    layer_percentages[5] = 14.0625;
    layer_percentages[6] = 14.0625;
    layer_percentages[7] = 42.1875;

    layer_min_sizes[0] = min_layer_size;
    layer_min_sizes[1] = min_layer_size;
    layer_min_sizes[2] = min_layer_size;
    layer_min_sizes[3] = min_layer_size;
    layer_min_sizes[4] = min_layer_size;
    layer_min_sizes[5] = min_layer_size;
    layer_min_sizes[6] = min_layer_size;
    layer_min_sizes[7] = min_layer_size;

    layer_max_sizes[0] = 0;
    layer_max_sizes[1] = 0;
    layer_max_sizes[2] = 0;
    layer_max_sizes[3] = 0;
    layer_max_sizes[4] = 0;
    layer_max_sizes[5] = 0;
    layer_max_sizes[6] = 0;
    layer_max_sizes[7] = 0;
  } else if (layering_strategy_ == "exponential_2") {
    // EXPONENTIAL --- 2 LAYERS
    num_layers_ = 2;
    layer_splitting_strategy = kExponentiallyIncreasing;

    layer_percentages[0] = 0;
    layer_percentages[1] = 0;

    layer_min_sizes[0] = min_layer_size;
    layer_min_sizes[1] = min_layer_size;

    layer_max_sizes[0] = 0;
    layer_max_sizes[1] = 0;
  } else if (layering_strategy_ == "exponential_4") {
    // EXPONENTIAL --- 4 LAYERS
    num_layers_ = 4;
    layer_splitting_strategy = kExponentiallyIncreasing;

    layer_percentages[0] = 0;
    layer_percentages[1] = 0;
    layer_percentages[2] = 0;
    layer_percentages[3] = 0;

    layer_min_sizes[0] = min_layer_size;
    layer_min_sizes[1] = min_layer_size;
    layer_min_sizes[2] = min_layer_size;
    layer_min_sizes[3] = min_layer_size;

    layer_max_sizes[0] = 0;
    layer_max_sizes[1] = 0;
    layer_max_sizes[2] = 0;
    layer_max_sizes[3] = 0;
  } else if (layering_strategy_ == "exponential_8") {
    // EXPONENTIAL --- 8 LAYERS
    num_layers_ = 8;
    layer_splitting_strategy = kExponentiallyIncreasing;

    layer_percentages[0] = 0;
    layer_percentages[1] = 0;
    layer_percentages[2] = 0;
    layer_percentages[3] = 0;
    layer_percentages[4] = 0;
    layer_percentages[5] = 0;
    layer_percentages[6] = 0;
    layer_percentages[7] = 0;

    layer_min_sizes[0] = min_layer_size;
    layer_min_sizes[1] = min_layer_size;
    layer_min_sizes[2] = min_layer_size;
    layer_min_sizes[3] = min_layer_size;
    layer_min_sizes[4] = min_layer_size;
    layer_min_sizes[5] = min_layer_size;
    layer_min_sizes[6] = min_layer_size;
    layer_min_sizes[7] = min_layer_size;

    layer_max_sizes[0] = 0;
    layer_max_sizes[1] = 0;
    layer_max_sizes[2] = 0;
    layer_max_sizes[3] = 0;
    layer_max_sizes[4] = 0;
    layer_max_sizes[5] = 0;
    layer_max_sizes[6] = 0;
    layer_max_sizes[7] = 0;
  } else {
    exit(1);
  }
  //////////////////////////////////////////////////////////////////////////////


  //////////////////////////////////////////////////////////////////////////////
  // Standard.
//  float layer_percentages[] = { 5, 5, 10, 15, 25, 40, 0, 0 };
//  // Set the minimum number of postings in each layer, 0 means no limit.
//  int layer_min_sizes[] = { 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072 };
//  // Set the maximum number of postings in each layer, 0 means no limit.
//  int layer_max_sizes[] = { 1024, 8192, 0, 0, 0, 0, 0, 0 };

//  // Equal.
//  float layer_percentages[] = { 10, 10, 10, 10, 15, 15, 15, 15 };
//  // Set the minimum number of postings in each layer, 0 means no limit.
//  int layer_min_sizes[] = { 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024 };
//  // Set the maximum number of postings in each layer, 0 means no limit.
//  int layer_max_sizes[] = { 0, 0, 0, 0, 0, 0, 0, 0 };

//  // Equal. Only a few layers.
//  float layer_percentages[] = { 25, 25, 25, 25 };
//  // Set the minimum number of postings in each layer, 0 means no limit.
//  int layer_min_sizes[] = { 4096, 4096, 4096, 4096 };
//  // Set the maximum number of postings in each layer, 0 means no limit.
//  int layer_max_sizes[] = { 0, 0, 0, 0 };

//  // Subsequent layers doubled (starting from 65536 postings). Last layer will have everything that remains.
//  float layer_percentages[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
//  // Set the minimum number of postings in each layer, 0 means no limit.
//  int layer_min_sizes[] = { 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608 };
//  // Set the maximum number of postings in each layer, 0 means no limit.
//  int layer_max_sizes[] = { 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608 };

  //////////////////////////////////////////////////////////////////////////////

  // Test that the index layering properties make sense.
  if (num_layers_ > kMaxLayers) {
    GetErrorLogger().Log("Cannot make index with more layers than " + Stringify(kMaxLayers) + " layers.", true);
  }
  assert(sizeof(layer_percentages) >= (num_layers_ * sizeof(layer_percentages[0])));
  assert(sizeof(layer_max_sizes) >= (num_layers_ * sizeof(layer_max_sizes[0])));
  assert(sizeof(layer_min_sizes) >= (num_layers_ * sizeof(layer_min_sizes[0])));
  for (int i = 0; i < num_layers_; ++i) {
    assert(layer_percentages[i] >= 0);
    assert(layer_max_sizes[i] >= 0);
    assert(layer_min_sizes[i] >= 0);
  }

  // set a Wei special counter to break.
  // currently, I do not need to process the whole lexicon.
  int breakCounter = 0;
  while (index_->NextTerm()) {
	 cout << "breakCounter:" << breakCounter << endl;
     if (breakCounter == 10){
    	 break;
     }
     breakCounter += 1;
    // Skip layering only to these lists for faster debugging.
    string curr_term = string(index_->curr_term(), index_->curr_term_len());
    /*
    if (!(curr_term == "beneficiaries" || curr_term == "insurance" || curr_term == "irs" || curr_term == "life" || curr_term == "hello" || curr_term == "world"
        || curr_term == "superior" || curr_term == "court" || curr_term == "king" || curr_term == "county" || curr_term == "divorce"))
      continue;
    */
    cout << "Layering for inverted list: " << curr_term << endl;


    // TODO: It's better to reuse the buffer, and resize only when necessary.
    int num_docs_in_list = index_->curr_list_data()->num_docs();
    IndexEntry* index_entry_buffer = new IndexEntry[num_docs_in_list];
    int index_entry_offset = 0;

    while (index_->NextDocId()) {
      IndexEntry& curr_index_entry = index_entry_buffer[index_entry_offset];

      assert(index_entry_offset < num_docs_in_list);

      curr_index_entry.doc_id = index_->curr_doc_id();
      curr_index_entry.frequency = index_->curr_list_data()->GetFreq();

      ++index_entry_offset;
    }  // No more postings in the list.

    // Need the average document length for computing BM25 scores.
    long int total_document_lengths = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalDocumentLengths), true);
    long int total_num_docs = IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetNumericalValue(meta_properties::kTotalNumDocs), true);
    int average_doc_length = total_document_lengths / total_num_docs;

    // First, we sort by docID score.
    DocIdScoreComparison doc_id_score_comparator(index_->index_reader()->document_map(), num_docs_in_list, average_doc_length, total_num_docs,2);

    /*
    // For a particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
    // This is before we sort the docIDs by score.
    // Currently, it is sorted by docIDs
    if (curr_term == "a" || curr_term == "0000000000") {
      cout << "Printing docIDs and scores for list: " << curr_term << endl;

      cout << "total postings: " << index_entry_offset << endl;
      for (int i = 0; i < index_entry_offset; ++i) {
        if ((i & 127) == 0) {
          assert((i % 128) == 0);
          cout << "*New Chunk*" << endl;
        }
        cout << "docID: " << index_entry_buffer[i].doc_id << ", score: " << doc_id_score_comparator.Bm25Score(index_entry_buffer[i]) << endl;
      }
    }
    */


    //important sort operation.
    cout << "# of postings in the list:" << index_entry_offset << endl;
    sort(index_entry_buffer, index_entry_buffer + index_entry_offset, doc_id_score_comparator);

    /*
    // For a particular term, print all the docIDs in the list and their partial BM25 scores, and group them in chunks.
    // This is after we sort the docIDs by score and Wei want to have a see.
    // Currently, it is sorted by score.
    if (curr_term == "a" || curr_term == "0000000000") {
      cout << "Printing docIDs and scores for list: " << curr_term << endl;
      cout << "# of postings in the list:" << index_entry_offset << endl;
      cout << "total postings: " << index_entry_offset << endl;
      for (int i = 0; i < index_entry_offset; ++i) {
        if ((i & 127) == 0) {
          assert((i % 128) == 0);
          cout << "*New Chunk*" << endl;
        }
        cout << "docID: " << index_entry_buffer[i].doc_id << ", score: " << doc_id_score_comparator.Bm25Score(index_entry_buffer[i]) << endl;
      }
    }
    */

    // For the exponentially increasing bucket size implementation.
    float base = pow(index_entry_offset, 1.0 / num_layers_);

    /*// The exponentially increasing bucket sizes for the initial layers are too small.  To solve this problem, we make sure that the first layer
    // is sized to at least the minimum number of postings specified by the user.  We then select an integer x such that the (initial_layer_size) * (2**x)
    // is at least the minimum_layer_size.  Successive layers will be multiplied by 2**(x-i) where i the layer number; we do not multiply by negative powers of
    // 2.  This makes further layers also bigger but by a smaller power of 2 since it's already exponentially bigger as is.
    int x = 0;
    if (layer_min_sizes[0] != 0) {
      int initial_layer_min_size = layer_min_sizes[0];
      int initial_layer_size = max(1.0, (base - 1.0) * pow(base, 0));

      float size_up_factor = initial_layer_min_size / initial_layer_size;
      size_up_factor = max(1.0f, size_up_factor);
      x = ceil(log2(size_up_factor));
    }*/

    // Initially, exponential layers are lower-bounded by a Fibonacci-like sequence.
    const int kFirstLayer = 16384;
    const int kSecondLayer = 32768;
    int exponential_prev_min = 0;
    int exponential_next_min = 0;

    float list_score_threshold = 0;  // The upperbound score for the whole list.
    int total_num_postings = index_entry_offset;
    int num_postings_left = total_num_postings;
    int num_postings_curr_layer;

    cout << "num_layers_:" << num_layers_ << endl;

    for (int i = 0; i < num_layers_; ++i) {
      if (num_postings_left <= 0) {
        break;
      }

      switch (layer_splitting_strategy) {
        case kPercentageLowerBounded:
          num_postings_curr_layer = (layer_percentages[i] / 100.0) * total_num_postings;
          break;
        case kPercentageLowerUpperBounded:
          num_postings_curr_layer = (layer_percentages[i] / 100.0) * total_num_postings;
          if (layer_max_sizes[i] != 0)  // A 0 means that the number of postings for this layer is not bounded.
            num_postings_curr_layer = min(num_postings_curr_layer, layer_max_sizes[i]);
          break;
        case kExponentiallyIncreasing:
          num_postings_curr_layer = (base - 1.0) * pow(base, i);

          // Follow a Fibonacci-like sequence for the first few small layers.
          if (exponential_next_min < kFirstLayer) {
            exponential_prev_min = kFirstLayer;
            exponential_next_min = kFirstLayer;
          } else if (exponential_next_min < kSecondLayer) {
            exponential_prev_min = kFirstLayer;
            exponential_next_min = kSecondLayer;
          } else {
            int tmp = exponential_prev_min;
            exponential_prev_min = exponential_next_min;
            exponential_next_min = tmp + exponential_next_min;
          }

          if (num_postings_curr_layer < exponential_next_min) {
            num_postings_curr_layer = exponential_next_min;
          }

          /*// Modify our exponential bucket size to make the initial layers bigger.
          assert(x >= 0);
          num_postings_curr_layer = num_postings_curr_layer * pow(2, x);
          // Decrease 'x' for successive layers.
          if (x > 0)
            --x;*/

          if (layer_min_sizes[i] != 0)  // A 0 means that the number of postings for this layer is not bounded.
            num_postings_curr_layer = max(num_postings_curr_layer, layer_min_sizes[i]);
          break;
        default:
          num_postings_curr_layer = 0;
          assert(false);
          break;
      }

      // Potentially, due to the layering parameters, we will get more postings in the current layer than the total remaining postings,
      // and we have to normalize for that.
      num_postings_curr_layer = min(num_postings_curr_layer, num_postings_left);

      // Make each layer the minimum size (if there are enough postings remaining).
      if (num_postings_curr_layer < kLayerMinSize) {
        if (num_postings_left >= kLayerMinSize) {
          num_postings_curr_layer = kLayerMinSize;
        }
      }

      num_postings_left -= num_postings_curr_layer;

      // If the next layer will have less postings than the current layer, we will make the current layer the last layer by putting all the
      // remaining postings into it.
      if (num_postings_left < num_postings_curr_layer) {
        num_postings_curr_layer += num_postings_left;
        num_postings_left = 0;
      }

      // Make sure that if this is the last layer, it contains all the remaining postings.
      if (i == (num_layers_ - 1) && num_postings_left > 0) {
        num_postings_curr_layer += num_postings_left;
        num_postings_left = 0;
      }

      // We want to split so that scores in each layer are unique (i.e. the lowest scoring posting in one layer does not have the same score
      // as the highest scoring posting in the next layer).
      // This causes problems in early termination algorithms if not taken into account (the top-k documents returned will not be identical).
      // The solution to this problem is the following:
      // If the last posting of the current layer has the same score as the next n postings (which are in the next layer(s)),
      // we move those same scoring postings into the current layer.
      // If the next layer(s) now contain 0 documents, we push postings from layers further down into the upper layers.
      float curr_layer_threshold, next_layer_threshold;
      do {
        // If this is the layer layer, nothing needs to be done.
        if (i == (num_layers_ - 1) || num_postings_left <= 0)
          break;

        int curr_layer_threshold_idx = total_num_postings - num_postings_left - num_postings_curr_layer;
        int next_layer_threshold_idx = total_num_postings - num_postings_left;
        curr_layer_threshold = doc_id_score_comparator.score(index_entry_buffer[curr_layer_threshold_idx]);
        next_layer_threshold = doc_id_score_comparator.score(index_entry_buffer[next_layer_threshold_idx]);
        // The current layer threshold should always be greater than the next layer threshold.
        // We add postings to the current layer until the above is true.
        if (curr_layer_threshold <= next_layer_threshold) {
          ++num_postings_curr_layer;
          --num_postings_left;
        } else {
          break;
        }
      } while (true);

      assert(num_postings_curr_layer > 0);

      // Here we do the actual splitting of the layers.(I like it by Wei)
      // TODO: Instead of resorting the whole buffer, it might be faster to sort only the 2nd layer by docID, and then do a merge of the layers.
      //       This would require a different DumpToIndex() method that is more incremental, because we can't do an in-place merge of the whole array
      //       (it would require an additional array).
      int curr_layer_start = total_num_postings - num_postings_left - num_postings_curr_layer;

      int layer_start = overlapping_layers_ ? 0 : curr_layer_start;
      float score_threshold = doc_id_score_comparator.score(index_entry_buffer[curr_layer_start]);
      if (i == 0) {
        list_score_threshold = score_threshold;
      }
      if (kOverlappingLayerThresholdIncludesAllDocs) {
        score_threshold = list_score_threshold;
      }
      sort(index_entry_buffer + layer_start, index_entry_buffer + curr_layer_start + num_postings_curr_layer, IndexEntryDocIdComparison());
      DumpToIndex(doc_id_score_comparator, index_entry_buffer + layer_start, curr_layer_start + num_postings_curr_layer - layer_start, index_->curr_term(),
                  index_->curr_term_len(), false);
      index_builder_->FinalizeLayer(score_threshold);  // Need to call this before writing out the next layer.

      //print out some info for Wei to understand.
      cout << "curr_layer_start:" << curr_layer_start << endl;
      cout << "score_threshold:" << score_threshold << endl;

    }

    delete[] index_entry_buffer;
  }

  index_builder_->Finalize();

  WriteMetaFile(output_index_files_.meta_info_filename(),0);
}

// This dumps a single list into an index.
void LayeredIndexGenerator::DumpToIndex(const DocIdScoreComparison& doc_id_score_comparator, IndexEntry* index_entries, int num_index_entries,
                                        const char* curr_term, int curr_term_len, bool store_computed_score_into_external_index_flag) {
  // Since the following input arrays will be used as input to the various coding policies, and the coding policy might apply a blockwise coding compressor
  // (which would pad the array to the block size), the following rules apply:
  // For the docID and frequency arrays, the block size is expected to be the chunk size.
  // For the position and context arrays, the block size is expected to be a multiple of the maximum positions/contexts possible for a particular docID.
  // Some alternative designs would be to define a fixed maximum block size and make sure the arrays are properly sized for this maximum
  // (the position/context arrays in particular).
  // Another alternative is to make these arrays dynamically allocated.

  assert(doc_id_compressor_.block_size() == 0 || ChunkEncoder::kChunkSize == doc_id_compressor_.block_size());
  assert(frequency_compressor_.block_size() == 0 || ChunkEncoder::kChunkSize == frequency_compressor_.block_size());
  assert(position_compressor_.block_size() == 0 || (ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties) % position_compressor_.block_size() == 0);

  uint32_t doc_ids[ChunkEncoder::kChunkSize];
  uint32_t frequencies[ChunkEncoder::kChunkSize];


  //wei added 2012/07/05. This variable is used for storing scores computed during the layering or pruning process and also want to store these scores in the external index.
  vector<float> scores_stored_in_index;



  //float scores_stored_in_index[ChunkEncoder::kChunkSize];

  uint32_t positions[ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties];
  unsigned char contexts[ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties];

  uint32_t prev_chunk_last_doc_id = 0;
  uint32_t prev_doc_id = 0;

  int index_entries_offset = 0;
  while(index_entries_offset < num_index_entries) {
    int doc_ids_offset = 0;
    int properties_offset = 0;
    for (doc_ids_offset = 0; doc_ids_offset < ChunkEncoder::kChunkSize && index_entries_offset < num_index_entries; ++doc_ids_offset) {
      const IndexEntry& curr_index_entry = index_entries[index_entries_offset];

      doc_ids[doc_ids_offset] = curr_index_entry.doc_id - prev_doc_id;



      // Check for duplicate docIDs (when the difference between the 'curr_index_entry.doc_id' and 'prev_doc_id' is zero), which is considered a bug.
      // But since 'prev_doc_id' is initialized to 0, which is a valid doc,
      // we have a case where the 'curr_index_entry.doc_id' could start from 0, which is an exception to the rule.
      // Thus, if this is the first iteration and 'curr_index_entry.doc_id' is 0, it is an acceptable case.
      // Wei: this make it a little more complicated than expected. 2012/06/28
      assert(doc_ids[doc_ids_offset] != 0 || (index_entries_offset == 0 && curr_index_entry.doc_id == 0));
      prev_doc_id = curr_index_entry.doc_id;

      frequencies[doc_ids_offset] = curr_index_entry.frequency;

      if(store_computed_score_into_external_index_flag){
    	  scores_stored_in_index.push_back( doc_id_score_comparator.score(curr_index_entry) );
      }

      if (includes_positions_) {
        uint32_t num_positions = min(curr_index_entry.frequency, static_cast<uint32_t> (ChunkEncoder::kMaxProperties));
        for (uint32_t j = 0; j < num_positions; ++j) {
          positions[properties_offset++] = curr_index_entry.positions[j];
        }
      }

      ++index_entries_offset;
    }

    if(store_computed_score_into_external_index_flag){
    	ChunkEncoder chunk(doc_ids, frequencies, scores_stored_in_index, (includes_positions_ ? positions : NULL), (includes_contexts_ ? contexts : NULL), doc_ids_offset,
    	                       properties_offset, prev_chunk_last_doc_id, doc_id_compressor_, frequency_compressor_, position_compressor_);

        chunk.set_max_score(GetChunkMaxScore(doc_id_score_comparator, index_entries + (index_entries_offset - doc_ids_offset), doc_ids_offset));

        prev_chunk_last_doc_id = chunk.last_doc_id();

        // The chunk is the basic storage unit for us to deal with.

        //Wei version(with external stored scores)
        index_builder_->AddWithStoredScores(chunk, curr_term, curr_term_len);



        // Reset the variables which need to be reset.
        scores_stored_in_index.clear();
    }
    else{
        ChunkEncoder chunk(doc_ids, frequencies, (includes_positions_ ? positions : NULL), (includes_contexts_ ? contexts : NULL), doc_ids_offset,
                           properties_offset, prev_chunk_last_doc_id, doc_id_compressor_, frequency_compressor_, position_compressor_);
        chunk.set_max_score(GetChunkMaxScore(doc_id_score_comparator, index_entries + (index_entries_offset - doc_ids_offset), doc_ids_offset));

        prev_chunk_last_doc_id = chunk.last_doc_id();

        // The chunk is the basic storage unit for us to deal with.
        index_builder_->Add(chunk, curr_term, curr_term_len);


    }
  }
}

// This dumps a single list into an index.
void LayeredIndexGenerator::DumpToIndexForPruningProjectWeiWithoutTheArgumentDoc_id_score_comparator(IndexEntryWeiForPruning* index_entries, int num_index_entries,
                                        const char* curr_term, int curr_term_len, bool store_computed_score_into_external_index_flag, int specialNumberValue) {
  // Since the following input arrays will be used as input to the various coding policies, and the coding policy might apply a blockwise coding compressor
  // (which would pad the array to the block size), the following rules apply:
  // For the docID and frequency arrays, the block size is expected to be the chunk size.
  // For the position and context arrays, the block size is expected to be a multiple of the maximum positions/contexts possible for a particular docID.
  // Some alternative designs would be to define a fixed maximum block size and make sure the arrays are properly sized for this maximum
  // (the position/context arrays in particular).
  // Another alternative is to make these arrays dynamically allocated.
  // cout << "LayeredIndexGenerator::DumpToIndexForPruningProjectWei(...) is called." << endl;
  assert(doc_id_compressor_.block_size() == 0 || ChunkEncoder::kChunkSize == doc_id_compressor_.block_size());
  assert(frequency_compressor_.block_size() == 0 || ChunkEncoder::kChunkSize == frequency_compressor_.block_size());
  assert(position_compressor_.block_size() == 0 || (ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties) % position_compressor_.block_size() == 0);

  uint32_t doc_ids[ChunkEncoder::kChunkSize];
  uint32_t frequencies[ChunkEncoder::kChunkSize];


  //wei added 2012/07/05. This variable is used for storing scores computed during the layering or pruning process and also want to store these scores in the external index.
  vector<float> scores_stored_in_index;



  //float scores_stored_in_index[ChunkEncoder::kChunkSize];

  uint32_t positions[ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties];
  unsigned char contexts[ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties];

  uint32_t prev_chunk_last_doc_id = 0;
  uint32_t prev_doc_id = 0;

  int index_entries_offset = 0;
  while(index_entries_offset < num_index_entries) {
    int doc_ids_offset = 0;
    int properties_offset = 0;
    for (doc_ids_offset = 0; doc_ids_offset < ChunkEncoder::kChunkSize && index_entries_offset < num_index_entries; ++doc_ids_offset) {
      const IndexEntryWeiForPruning& curr_index_entry = index_entries[index_entries_offset];

      doc_ids[doc_ids_offset] = curr_index_entry.doc_id - prev_doc_id;



      // Check for duplicate docIDs (when the difference between the 'curr_index_entry.doc_id' and 'prev_doc_id' is zero), which is considered a bug.
      // But since 'prev_doc_id' is initialized to 0, which is a valid doc,
      // we have a case where the 'curr_index_entry.doc_id' could start from 0, which is an exception to the rule.
      // Thus, if this is the first iteration and 'curr_index_entry.doc_id' is 0, it is an acceptable case.
      // Wei: this make it a little more complicated than expected. 2012/06/28
      assert(doc_ids[doc_ids_offset] != 0 || (index_entries_offset == 0 && curr_index_entry.doc_id == 0));
      prev_doc_id = curr_index_entry.doc_id;

      frequencies[doc_ids_offset] = curr_index_entry.frequency;

      if(store_computed_score_into_external_index_flag){
    	  scores_stored_in_index.push_back( curr_index_entry.score );
      }

      if (includes_positions_) {
        uint32_t num_positions = min(curr_index_entry.frequency, static_cast<uint32_t> (ChunkEncoder::kMaxProperties));
        for (uint32_t j = 0; j < num_positions; ++j) {
          positions[properties_offset++] = curr_index_entry.positions[j];
        }
      }

      ++index_entries_offset;
    }

    if(store_computed_score_into_external_index_flag){
    	ChunkEncoder chunk(doc_ids, frequencies, scores_stored_in_index, (includes_positions_ ? positions : NULL), (includes_contexts_ ? contexts : NULL), doc_ids_offset,
    	                       properties_offset, prev_chunk_last_doc_id, doc_id_compressor_, frequency_compressor_, position_compressor_);

        chunk.set_max_score(GetChunkMaxScoreWeiWithoutTheArgumentDoc_id_score_comparator(index_entries + (index_entries_offset - doc_ids_offset), doc_ids_offset));

        prev_chunk_last_doc_id = chunk.last_doc_id();

        // The chunk is the basic storage unit for us to deal with.

        //Wei version(with external stored scores)
        index_builder_->AddWithStoredScores(chunk, curr_term, curr_term_len);



        // Reset the variables which need to be reset.
        scores_stored_in_index.clear();
    }
    else{
        ChunkEncoder chunk(doc_ids, frequencies, (includes_positions_ ? positions : NULL), (includes_contexts_ ? contexts : NULL), doc_ids_offset,
                           properties_offset, prev_chunk_last_doc_id, doc_id_compressor_, frequency_compressor_, position_compressor_);
        chunk.set_max_score(GetChunkMaxScoreWeiWithoutTheArgumentDoc_id_score_comparator(index_entries + (index_entries_offset - doc_ids_offset), doc_ids_offset));

        prev_chunk_last_doc_id = chunk.last_doc_id();

        // The chunk is the basic storage unit for us to deal with.
        index_builder_->Add(chunk, curr_term, curr_term_len);
    }
  }
}


// This dumps a single list into an index.
void LayeredIndexGenerator::DumpToIndexForPruningProjectWei(DocIdScoreComparisonWei& doc_id_score_comparator, IndexEntry* index_entries, int num_index_entries,
                                        const char* curr_term, int curr_term_len, bool store_computed_score_into_external_index_flag, int specialNumberValue) {
  // Since the following input arrays will be used as input to the various coding policies, and the coding policy might apply a blockwise coding compressor
  // (which would pad the array to the block size), the following rules apply:
  // For the docID and frequency arrays, the block size is expected to be the chunk size.
  // For the position and context arrays, the block size is expected to be a multiple of the maximum positions/contexts possible for a particular docID.
  // Some alternative designs would be to define a fixed maximum block size and make sure the arrays are properly sized for this maximum
  // (the position/context arrays in particular).
  // Another alternative is to make these arrays dynamically allocated.
  // cout << "LayeredIndexGenerator::DumpToIndexForPruningProjectWei(...) is called." << endl;
  assert(doc_id_compressor_.block_size() == 0 || ChunkEncoder::kChunkSize == doc_id_compressor_.block_size());
  assert(frequency_compressor_.block_size() == 0 || ChunkEncoder::kChunkSize == frequency_compressor_.block_size());
  assert(position_compressor_.block_size() == 0 || (ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties) % position_compressor_.block_size() == 0);

  uint32_t doc_ids[ChunkEncoder::kChunkSize];
  uint32_t frequencies[ChunkEncoder::kChunkSize];


  //wei added 2012/07/05. This variable is used for storing scores computed during the layering or pruning process and also want to store these scores in the external index.
  vector<float> scores_stored_in_index;



  //float scores_stored_in_index[ChunkEncoder::kChunkSize];

  uint32_t positions[ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties];
  unsigned char contexts[ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties];

  uint32_t prev_chunk_last_doc_id = 0;
  uint32_t prev_doc_id = 0;

  int index_entries_offset = 0;
  while(index_entries_offset < num_index_entries) {
    int doc_ids_offset = 0;
    int properties_offset = 0;
    for (doc_ids_offset = 0; doc_ids_offset < ChunkEncoder::kChunkSize && index_entries_offset < num_index_entries; ++doc_ids_offset) {
      const IndexEntry& curr_index_entry = index_entries[index_entries_offset];

      doc_ids[doc_ids_offset] = curr_index_entry.doc_id - prev_doc_id;



      // Check for duplicate docIDs (when the difference between the 'curr_index_entry.doc_id' and 'prev_doc_id' is zero), which is considered a bug.
      // But since 'prev_doc_id' is initialized to 0, which is a valid doc,
      // we have a case where the 'curr_index_entry.doc_id' could start from 0, which is an exception to the rule.
      // Thus, if this is the first iteration and 'curr_index_entry.doc_id' is 0, it is an acceptable case.
      // Wei: this make it a little more complicated than expected. 2012/06/28
      assert(doc_ids[doc_ids_offset] != 0 || (index_entries_offset == 0 && curr_index_entry.doc_id == 0));
      prev_doc_id = curr_index_entry.doc_id;

      frequencies[doc_ids_offset] = curr_index_entry.frequency;

      if(store_computed_score_into_external_index_flag){
    	  // lazy warning response by Wei 2013/09/10 afternoon
    	  float tempValue = curr_index_entry.staticProbability;
    	  scores_stored_in_index.push_back( tempValue );
      }

      if (includes_positions_) {
        uint32_t num_positions = min(curr_index_entry.frequency, static_cast<uint32_t> (ChunkEncoder::kMaxProperties));
        for (uint32_t j = 0; j < num_positions; ++j) {
          positions[properties_offset++] = curr_index_entry.positions[j];
        }
      }

      ++index_entries_offset;
    }

    if(store_computed_score_into_external_index_flag){
    	ChunkEncoder chunk(doc_ids, frequencies, scores_stored_in_index, (includes_positions_ ? positions : NULL), (includes_contexts_ ? contexts : NULL), doc_ids_offset,
    	                       properties_offset, prev_chunk_last_doc_id, doc_id_compressor_, frequency_compressor_, position_compressor_);

        chunk.set_max_score(GetChunkMaxScoreWei(doc_id_score_comparator, index_entries + (index_entries_offset - doc_ids_offset), doc_ids_offset));

        prev_chunk_last_doc_id = chunk.last_doc_id();

        // The chunk is the basic storage unit for us to deal with.

        //Wei version(with external stored scores)
        index_builder_->AddWithStoredScores(chunk, curr_term, curr_term_len);



        // Reset the variables which need to be reset.
        scores_stored_in_index.clear();
    }
    else{
        ChunkEncoder chunk(doc_ids, frequencies, (includes_positions_ ? positions : NULL), (includes_contexts_ ? contexts : NULL), doc_ids_offset,
                           properties_offset, prev_chunk_last_doc_id, doc_id_compressor_, frequency_compressor_, position_compressor_);
        chunk.set_max_score(GetChunkMaxScoreWei(doc_id_score_comparator, index_entries + (index_entries_offset - doc_ids_offset), doc_ids_offset));

        prev_chunk_last_doc_id = chunk.last_doc_id();

        // The chunk is the basic storage unit for us to deal with.
        index_builder_->Add(chunk, curr_term, curr_term_len);
    }
  }
}



float LayeredIndexGenerator::GetChunkMaxScore(const DocIdScoreComparison& doc_id_score_comparator, IndexEntry* chunk_entries, int num_chunk_entries) {
  sort(chunk_entries, chunk_entries + num_chunk_entries, doc_id_score_comparator);
  return doc_id_score_comparator.score(chunk_entries[0]);
}

float LayeredIndexGenerator::GetChunkMaxScoreWei(DocIdScoreComparisonWei& doc_id_score_comparator, IndexEntry* chunk_entries, int num_chunk_entries) {
  sort(chunk_entries, chunk_entries + num_chunk_entries, doc_id_score_comparator);
  return doc_id_score_comparator.score(chunk_entries[0]);
}

float LayeredIndexGenerator::GetChunkMaxScoreWeiWithoutTheArgumentDoc_id_score_comparator(IndexEntryWeiForPruning* chunk_entries, int num_chunk_entries) {
  float chunkMaxScore = 0.0;
  unsigned int currentMaxScoreIndex = 0;
  for(unsigned int i = 0; i < num_chunk_entries; i++){
	  if ( chunk_entries[i].score > chunkMaxScore){
		  currentMaxScoreIndex = i;
		  chunkMaxScore = chunk_entries[i].score;
	  }
  }
  return chunkMaxScore;
}

// float percentageToKeepOfTheWholeIndex
// int pruningMethodCodeOfTheWholeIndex
void LayeredIndexGenerator::WriteMetaFile(const std::string& meta_filename, int functionValue) {
  KeyValueStore index_metafile;

  // functionValue = 0 means writing the original layered Index properties
  // functionValue = 11 means writing the new Index pruning properties with pre-computed scores
  // functionValue = 12 means writing the new Index pruning properties without pre-computed scores
  // They have minor differences.

  // Index layer properties.
  if (functionValue == 0){
	  index_metafile.AddKeyValuePair(meta_properties::kLayeredIndex, Stringify(true));
	  index_metafile.AddKeyValuePair(meta_properties::kNumLayers, Stringify(num_layers_));
	  index_metafile.AddKeyValuePair(meta_properties::kOverlappingLayers, Stringify(overlapping_layers_));
  }
  else if (functionValue == 11){
	  // Updated by Wei 2013/01/27: either can be pruned index or layered index.
	  // But can not be both because conflicting of the semantics
	  index_metafile.AddKeyValuePair(meta_properties::kPrunedIndex, Stringify(true));
	  // index_metafile.AddKeyValuePair(meta_properties::kLayeredIndex, Stringify(true));
	  index_metafile.AddKeyValuePair(meta_properties::kIncludesPrecomputedScores, Stringify(true));

	  index_metafile.AddKeyValuePair(meta_properties::kNumLayers, Stringify(num_layers_));
	  index_metafile.AddKeyValuePair(meta_properties::kOverlappingLayers, Stringify(overlapping_layers_));


  }
  else if (functionValue == 12){
	  // Updated by Wei 2013/01/27: either can be pruned index or layered index.
	  // But can not be both because conflicting of the semantics
	  index_metafile.AddKeyValuePair(meta_properties::kPrunedIndex, Stringify(true));
	  // index_metafile.AddKeyValuePair(meta_properties::kLayeredIndex, Stringify(true));
	  index_metafile.AddKeyValuePair(meta_properties::kIncludesPrecomputedScores, Stringify(false));

	  index_metafile.AddKeyValuePair(meta_properties::kNumLayers, Stringify(num_layers_));
	  index_metafile.AddKeyValuePair(meta_properties::kOverlappingLayers, Stringify(overlapping_layers_));


  }



  index_metafile.AddKeyValuePair(meta_properties::kIncludesPositions, Stringify(includes_positions_));
  index_metafile.AddKeyValuePair(meta_properties::kIncludesContexts, Stringify(includes_contexts_));
  index_metafile.AddKeyValuePair(meta_properties::kIndexDocIdCoding, IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetStringValue(meta_properties::kIndexDocIdCoding), false));
  index_metafile.AddKeyValuePair(meta_properties::kIndexFrequencyCoding, IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetStringValue(meta_properties::kIndexFrequencyCoding), false));
  index_metafile.AddKeyValuePair(meta_properties::kIndexPositionCoding, IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetStringValue(meta_properties::kIndexPositionCoding), false));
  index_metafile.AddKeyValuePair(meta_properties::kIndexBlockHeaderCoding, IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetStringValue(meta_properties::kIndexBlockHeaderCoding), false));

  index_metafile.AddKeyValuePair(meta_properties::kTotalNumChunks, Stringify(index_builder_->total_num_chunks())); // computed locally
  index_metafile.AddKeyValuePair(meta_properties::kTotalNumPerTermBlocks, Stringify(index_builder_->total_num_per_term_blocks())); // computed locally

  index_metafile.AddKeyValuePair(meta_properties::kTotalDocumentLengths, Stringify(total_document_lengths_)); //derived from original index
  index_metafile.AddKeyValuePair(meta_properties::kTotalNumDocs, Stringify(total_num_docs_)); //derived from original index
  index_metafile.AddKeyValuePair(meta_properties::kTotalUniqueNumDocs, Stringify(total_unique_num_docs_)); // computed locally

  index_metafile.AddKeyValuePair(meta_properties::kFirstDocId, Stringify(first_doc_id_in_index_)); //derived from original index
  index_metafile.AddKeyValuePair(meta_properties::kLastDocId, Stringify(last_doc_id_in_index_)); //derived from original index
  index_metafile.AddKeyValuePair(meta_properties::kDocumentPostingCount, Stringify(document_posting_count_)); //derived from original index

  // Updated by Wei: 2013/01/27. Let this waring message still shows is a good idea
  if ((!overlapping_layers_ && index_posting_count_ != index_builder_->posting_count()) ||
      (overlapping_layers_ && index_posting_count_ > index_builder_->posting_count())) {
    GetErrorLogger().Log("Inconsistency in the '" + string(meta_properties::kIndexPostingCount) + "' meta file property detected: "
        + "value from original index meta file doesn't add up to the value calculated by the index builder.", false);
  }

  index_metafile.AddKeyValuePair(meta_properties::kIndexPostingCount, Stringify(index_builder_->posting_count()));
  index_metafile.AddKeyValuePair(meta_properties::kNumUniqueTerms, Stringify(index_builder_->num_unique_terms()));

  index_metafile.AddKeyValuePair(meta_properties::kTotalHeaderBytes, Stringify(index_builder_->total_num_block_header_bytes()));
  index_metafile.AddKeyValuePair(meta_properties::kTotalDocIdBytes, Stringify(index_builder_->total_num_doc_ids_bytes()));
  index_metafile.AddKeyValuePair(meta_properties::kTotalFrequencyBytes, Stringify(index_builder_->total_num_frequency_bytes()));
  index_metafile.AddKeyValuePair(meta_properties::kTotalPositionBytes, Stringify(index_builder_->total_num_positions_bytes()));
  index_metafile.AddKeyValuePair(meta_properties::kTotalWastedBytes, Stringify(index_builder_->total_num_wasted_space_bytes()));

  index_metafile.WriteKeyValueStore(meta_filename.c_str());
}

// float percentageToKeepOfTheWholeIndex
// int pruningMethodCodeOfTheWholeIndex
void LayeredIndexGenerator::WriteMetaFile(const std::string& meta_filename, int functionValue, float percentageToKeepOfTheWholeIndex, int pruningMethodCodeOfTheWholeIndex) {
  KeyValueStore index_metafile;

  // functionValue = 0 means writing the original layered Index properties
  // functionValue = 11 means writing the new Index pruning properties with pre-computed scores
  // functionValue = 12 means writing the new Index pruning properties without pre-computed scores
  // They have minor differences.

  // Index layer properties.
  if (functionValue == 0){
	  index_metafile.AddKeyValuePair(meta_properties::kLayeredIndex, Stringify(true));
	  index_metafile.AddKeyValuePair(meta_properties::kNumLayers, Stringify(num_layers_));
	  index_metafile.AddKeyValuePair(meta_properties::kOverlappingLayers, Stringify(overlapping_layers_));
  }
  else if (functionValue == 11){
	  // Updated by Wei 2013/01/27: either can be pruned index or layered index.
	  // But can not be both because conflicting of the semantics
	  index_metafile.AddKeyValuePair(meta_properties::kPrunedIndex, Stringify(true));
	  // index_metafile.AddKeyValuePair(meta_properties::kLayeredIndex, Stringify(true));
	  index_metafile.AddKeyValuePair(meta_properties::kIncludesPrecomputedScores, Stringify(true));

	  index_metafile.AddKeyValuePair(meta_properties::kNumLayers, Stringify(num_layers_));
	  index_metafile.AddKeyValuePair(meta_properties::kOverlappingLayers, Stringify(overlapping_layers_));

	  // Updated by Wei 2013/02/11 add two more meta properties into the index meta file
	  index_metafile.AddKeyValuePair(meta_properties::kPercentageToKeepOfTheWholeIndex, Stringify(percentageToKeepOfTheWholeIndex));
	  index_metafile.AddKeyValuePair(meta_properties::kPruningMethodCodeOfTheWholeIndex, Stringify(pruningMethodCodeOfTheWholeIndex));
  }
  else if (functionValue == 12){
	  // Updated by Wei 2013/01/27: either can be pruned index or layered index.
	  // But can not be both because conflicting of the semantics
	  index_metafile.AddKeyValuePair(meta_properties::kPrunedIndex, Stringify(true));
	  // index_metafile.AddKeyValuePair(meta_properties::kLayeredIndex, Stringify(true));
	  index_metafile.AddKeyValuePair(meta_properties::kIncludesPrecomputedScores, Stringify(false));

	  index_metafile.AddKeyValuePair(meta_properties::kNumLayers, Stringify(num_layers_));
	  index_metafile.AddKeyValuePair(meta_properties::kOverlappingLayers, Stringify(overlapping_layers_));

	  // Updated by Wei 2013/02/11 add two more meta properties into the index meta file
	  index_metafile.AddKeyValuePair(meta_properties::kPercentageToKeepOfTheWholeIndex, Stringify(percentageToKeepOfTheWholeIndex));
	  index_metafile.AddKeyValuePair(meta_properties::kPruningMethodCodeOfTheWholeIndex, Stringify(pruningMethodCodeOfTheWholeIndex));
  }



  index_metafile.AddKeyValuePair(meta_properties::kIncludesPositions, Stringify(includes_positions_));
  index_metafile.AddKeyValuePair(meta_properties::kIncludesContexts, Stringify(includes_contexts_));
  index_metafile.AddKeyValuePair(meta_properties::kIndexDocIdCoding, IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetStringValue(meta_properties::kIndexDocIdCoding), false));
  index_metafile.AddKeyValuePair(meta_properties::kIndexFrequencyCoding, IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetStringValue(meta_properties::kIndexFrequencyCoding), false));
  index_metafile.AddKeyValuePair(meta_properties::kIndexPositionCoding, IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetStringValue(meta_properties::kIndexPositionCoding), false));
  index_metafile.AddKeyValuePair(meta_properties::kIndexBlockHeaderCoding, IndexConfiguration::GetResultValue(index_->index_reader()->meta_info().GetStringValue(meta_properties::kIndexBlockHeaderCoding), false));

  index_metafile.AddKeyValuePair(meta_properties::kTotalNumChunks, Stringify(index_builder_->total_num_chunks())); // computed locally
  index_metafile.AddKeyValuePair(meta_properties::kTotalNumPerTermBlocks, Stringify(index_builder_->total_num_per_term_blocks())); // computed locally

  index_metafile.AddKeyValuePair(meta_properties::kTotalDocumentLengths, Stringify(total_document_lengths_)); //derived from original index
  index_metafile.AddKeyValuePair(meta_properties::kTotalNumDocs, Stringify(total_num_docs_)); //derived from original index
  index_metafile.AddKeyValuePair(meta_properties::kTotalUniqueNumDocs, Stringify(total_unique_num_docs_)); // computed locally

  index_metafile.AddKeyValuePair(meta_properties::kFirstDocId, Stringify(first_doc_id_in_index_)); //derived from original index
  index_metafile.AddKeyValuePair(meta_properties::kLastDocId, Stringify(last_doc_id_in_index_)); //derived from original index
  index_metafile.AddKeyValuePair(meta_properties::kDocumentPostingCount, Stringify(document_posting_count_)); //derived from original index

  // Updated by Wei: 2013/01/27. Let this waring message still shows is a good idea
  if ((!overlapping_layers_ && index_posting_count_ != index_builder_->posting_count()) ||
      (overlapping_layers_ && index_posting_count_ > index_builder_->posting_count())) {
    GetErrorLogger().Log("Inconsistency in the '" + string(meta_properties::kIndexPostingCount) + "' meta file property detected: "
        + "value from original index meta file doesn't add up to the value calculated by the index builder.", false);
  }

  index_metafile.AddKeyValuePair(meta_properties::kIndexPostingCount, Stringify(index_builder_->posting_count()));
  index_metafile.AddKeyValuePair(meta_properties::kNumUniqueTerms, Stringify(index_builder_->num_unique_terms()));

  index_metafile.AddKeyValuePair(meta_properties::kTotalHeaderBytes, Stringify(index_builder_->total_num_block_header_bytes()));
  index_metafile.AddKeyValuePair(meta_properties::kTotalDocIdBytes, Stringify(index_builder_->total_num_doc_ids_bytes()));
  index_metafile.AddKeyValuePair(meta_properties::kTotalFrequencyBytes, Stringify(index_builder_->total_num_frequency_bytes()));
  index_metafile.AddKeyValuePair(meta_properties::kTotalPositionBytes, Stringify(index_builder_->total_num_positions_bytes()));
  index_metafile.AddKeyValuePair(meta_properties::kTotalWastedBytes, Stringify(index_builder_->total_num_wasted_space_bytes()));

  index_metafile.WriteKeyValueStore(meta_filename.c_str());
}

string DocIdScoreComparisonWei::get_high_level_features_including_rank_in_the_doc(string term, string trecID, bool debugFlag){
	    if (debugFlag){
	    	cout << "term:" << term  << " " << "trecID:" << trecID << endl;
	    }
	    string high_level_features_including_rank_in_the_doc = "";
	    // cout << "test point 0" << endl;
	    if (pModule_ != NULL) {
			pFunc_ = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_generateHighLevelFeaturesIncludingRankInTheDoc");
			// pFunc_local = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_assignTheRank");
			// pFunc is a new reference
			// cout << "test point 1" << endl;
			if (pFunc_ && PyCallable_Check(pFunc_)) {
				pArgs_ = PyTuple_New(2);
				pValue_ = PyString_FromString( term.c_str() );

				if (!pValue_) {
					Py_DECREF(pArgs_);
					Py_DECREF(pModule_);
					fprintf(stderr, "Cannot convert argument\n");
				}
				// pValue reference stolen here:
				PyTuple_SetItem(pArgs_, 0, pValue_);

				// argument 2:
				pValue_ = PyString_FromString( trecID.c_str() );

				if (!pValue_) {
					Py_DECREF(pArgs_);
					Py_DECREF(pModule_);
					fprintf(stderr, "Cannot convert argument\n");
				}
				// pValue reference stolen here:
				PyTuple_SetItem(pArgs_, 1, pValue_);

				// cout << "test point 2" << endl;

				pValue_ = PyObject_CallObject(pFunc_, pArgs_);

				Py_DECREF(pArgs_);
				if (pValue_ != NULL) {
					high_level_features_including_rank_in_the_doc = PyString_AsString(pValue_);
					// printf("posting_rank_in_doc(returned): %ld\n", posting_rank_in_doc);

					Py_DECREF(pValue_);
				}
				else {
					Py_DECREF(pFunc_);
					Py_DECREF(pModule_);
					PyErr_Print();
					fprintf(stderr,"Call failed\n");
				}
			}
			else {
				if (PyErr_Occurred())
					PyErr_Print();
				fprintf(stderr, "Cannot find function \"%s\"\n", "pythonModuleForCallingFromC_generateHighLevelFeaturesIncludingRankInTheDoc");
			}
			Py_XDECREF(pFunc_);
	    }
	    else{
	    	PyErr_Print();
	    	fprintf(stderr, "Failed to load the module");
	    	high_level_features_including_rank_in_the_doc = "";
	    }
	    if(debugFlag){
	    	cout << "high_level_features_including_rank_in_the_doc(returned):" << high_level_features_including_rank_in_the_doc << endl;
	    	cout << endl;
	    }
	    return high_level_features_including_rank_in_the_doc;
}

int DocIdScoreComparisonWei::get_term_freq_in_queries(string term, bool debugFlag){
	long term_freq_in_queries = 0;
    if (pModule_ != NULL) {
		pFunc_ = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_getTermFreqInQueries");
		// pFunc_local = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_assignTheRank");
		// pFunc is a new reference
		// cout << "test point 1" << endl;
		if (pFunc_ && PyCallable_Check(pFunc_)) {
			pArgs_ = PyTuple_New(1);
			pValue_ = PyString_FromString( term.c_str() );

			if (!pValue_) {
				Py_DECREF(pArgs_);
				Py_DECREF(pModule_);
				fprintf(stderr, "Cannot convert argument\n");
			}
			// pValue reference stolen here:
			PyTuple_SetItem(pArgs_, 0, pValue_);

			pValue_ = PyObject_CallObject(pFunc_, pArgs_);

			Py_DECREF(pArgs_);
			if (pValue_ != NULL) {
				term_freq_in_queries = PyInt_AsLong(pValue_);
				// printf("posting_rank_in_list: %ld\n", posting_rank_in_list);
				Py_DECREF(pValue_);
			}
			else {
				Py_DECREF(pFunc_);
				Py_DECREF(pModule_);
				PyErr_Print();
				fprintf(stderr,"Call failed\n");
			}
		}
		else {
			if (PyErr_Occurred())
				PyErr_Print();
			fprintf(stderr, "Cannot find function \"%s\"\n", "pythonModuleForCallingFromC_getTermFreqInQueries");
		}
		Py_XDECREF(pFunc_);
    }
    else{
    	PyErr_Print();
    	fprintf(stderr, "Failed to load the module");
    	term_freq_in_queries = -1;
    }
    if(debugFlag){
    	cout << "term_freq_in_queries:" << term_freq_in_queries << endl;
    	cout << endl;
    }
    return term_freq_in_queries;
}

int DocIdScoreComparisonWei::get_posting_rank_in_list(string term, string documentIdentifier, bool debugFlag){
	    if (debugFlag){
	    	cout << "term:" << term  << " " << "documentIdentifier:" << documentIdentifier << endl;
	    }
	    long posting_rank_in_list = 0;
	    // cout << "test point 0" << endl;

	    if (pModule_ != NULL) {
			pFunc_ = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_assignTheRankByDocID");
			// pFunc_local = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_assignTheRank");
			// pFunc is a new reference
			// cout << "test point 1" << endl;
			if (pFunc_ && PyCallable_Check(pFunc_)) {
				pArgs_ = PyTuple_New(2);
				pValue_ = PyString_FromString( term.c_str() );

				if (!pValue_) {
					Py_DECREF(pArgs_);
					Py_DECREF(pModule_);
					fprintf(stderr, "Cannot convert argument\n");
				}
				// pValue reference stolen here:
				PyTuple_SetItem(pArgs_, 0, pValue_);

				// argument 2:
				pValue_ = PyString_FromString( documentIdentifier.c_str() );

				if (!pValue_) {
					Py_DECREF(pArgs_);
					Py_DECREF(pModule_);
					fprintf(stderr, "Cannot convert argument\n");
				}
				// pValue reference stolen here:
				PyTuple_SetItem(pArgs_, 1, pValue_);

				// cout << "test point 2" << endl;

				pValue_ = PyObject_CallObject(pFunc_, pArgs_);

				Py_DECREF(pArgs_);
				if (pValue_ != NULL) {
					posting_rank_in_list = PyInt_AsLong(pValue_);
					// printf("posting_rank_in_list: %ld\n", posting_rank_in_list);
					Py_DECREF(pValue_);
				}
				else {
					Py_DECREF(pFunc_);
					Py_DECREF(pModule_);
					PyErr_Print();
					fprintf(stderr,"Call failed\n");
				}
			}
			else {
				if (PyErr_Occurred())
					PyErr_Print();
				fprintf(stderr, "Cannot find function \"%s\"\n", "pythonModuleForCallingFromC_assignTheRankByTrecID");
			}
			Py_XDECREF(pFunc_);
	    }
	    else{
	    	PyErr_Print();
	    	fprintf(stderr, "Failed to load the module");
	    	posting_rank_in_list = -1;
	    }
	    if(debugFlag){
	    	cout << "posting_rank_in_list:" << posting_rank_in_list << endl;
	    	cout << endl;
	    }
	    return posting_rank_in_list;
}

void DocIdScoreComparisonWei::load_model_from_file_wei(const string& file_name, SfWeightVector** w) {
  if (*w != NULL) {
    delete *w;
  }

  // version without std
  fstream model_stream;
  model_stream.open(file_name.c_str(), fstream::in);
  if (!model_stream) {
    cerr << "Error opening model input file " << file_name << endl;
    exit(1);
  }

  cerr << "-----> Reading model from: " << file_name << endl;
  string model_string;
  getline(model_stream, model_string);
  model_stream.close();

  *w = new SfWeightVector(model_string);
  assert(*w != NULL);
}

DocIdScoreComparisonWei::~DocIdScoreComparisonWei(){
    // final step: deconstruct the things we do NOT need
	  // for try, magic 20130121
    // Py_DECREF(pModule_);
    // Py_Finalize();
}

void DocIdScoreComparisonWei::pre_load_aux_file_for_feature_rank_in_the_list(){
	// cout << "test 1.2.1" << endl;
	pFunc_ = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_loadTheAuxInfoIntoMemoryForFeatureRankInTheList");
	// pFunc is a new reference
	// cout << "test 1.2.2" << endl;
	if (pFunc_ && PyCallable_Check(pFunc_)) {
		pArgs_ = PyTuple_New(0);
		// cout << "test 1.2.3" << endl;
		pValue_ = PyObject_CallObject(pFunc_, pArgs_);
		// cout << "test 1.2.4" << endl;
		Py_DECREF(pArgs_);
		if (pValue_ != NULL) {
			// printf("Result of call: %ld\n", PyInt_AsLong(pValue_));
			Py_DECREF(pValue_);
		}
		else {
			Py_DECREF(pFunc_);
			Py_DECREF(pModule_);
			PyErr_Print();
			fprintf(stderr,"Call failed\n");

		}
	}
	else {
		if (PyErr_Occurred())
			PyErr_Print();
		fprintf(stderr, "Cannot find the function");
	}
	Py_XDECREF(pFunc_);
	// cout << "test 1.2.5" << endl;
}

void DocIdScoreComparisonWei::pre_load_aux_file_for_high_level_features_including_rank_in_the_doc(){
	// call the function pythonModuleForCallingFromC_loadTheAuxInfoIntoMemory
	pFunc_ = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_loadTheAuxInfoIntoMemoryForHighLevelFeaturesIncludingRankInTheDoc");
	// pFunc is a new reference

	if (pFunc_ && PyCallable_Check(pFunc_)) {
		pArgs_ = PyTuple_New(0);
		pValue_ = PyObject_CallObject(pFunc_, pArgs_);
		Py_DECREF(pArgs_);
		if (pValue_ != NULL) {
			// printf("Result of call: %ld\n", PyInt_AsLong(pValue_));
			Py_DECREF(pValue_);
		}
		else {
			Py_DECREF(pFunc_);
			Py_DECREF(pModule_);
			PyErr_Print();
			fprintf(stderr,"Call failed\n");

		}
	}
	else {
		if (PyErr_Occurred())
			PyErr_Print();
		fprintf(stderr, "Cannot find the function");
	}
	Py_XDECREF(pFunc_);
}

void DocIdScoreComparisonWei::pre_load_the_actual_query_term_list_data_into_memory(){
	// load the experimental term: 000sites to test the it is OK
	pFunc_ = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_loadTheActualQueryTermListDataIntoMemory");
	// pFunc is a new reference

	if (pFunc_ && PyCallable_Check(pFunc_)) {
		pArgs_ = PyTuple_New(1);
		pValue_ = PyString_FromString( term_.c_str() );

		if (!pValue_) {
			Py_DECREF(pArgs_);
			Py_DECREF(pModule_);
			fprintf(stderr, "Cannot convert argument\n");
		}
		// pValue reference stolen here:
		PyTuple_SetItem(pArgs_, 0, pValue_);

		pValue_ = PyObject_CallObject(pFunc_, pArgs_);

		Py_DECREF(pArgs_);
		if (pValue_ != NULL) {
			// printf("Result of call: %ld\n", PyInt_AsLong(pValue_));
			Py_DECREF(pValue_);
		}
		else {
			Py_DECREF(pFunc_);
			Py_DECREF(pModule_);
			PyErr_Print();
			fprintf(stderr,"Call failed\n");
		}
	}
	else {
		if (PyErr_Occurred())
			PyErr_Print();
		fprintf(stderr, "Cannot find the function \"%s\"\n", "pythonModuleForCallingFromC_loadTheActualQueryTermListDataIntoMemory");
	}
	Py_XDECREF(pFunc_);
	// python modules init process ends.
}

void DocIdScoreComparisonWei::pre_load_aux_file_freq_Of_terms_in_queries(){
	// call the function pythonModuleForCallingFromC_loadTheAuxInfoIntoMemory
	pFunc_ = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_loadTheAuxFileForFreqOfTermsInQueries");
	// pFunc is a new reference

	if (pFunc_ && PyCallable_Check(pFunc_)) {
		pArgs_ = PyTuple_New(0);
		pValue_ = PyObject_CallObject(pFunc_, pArgs_);
		Py_DECREF(pArgs_);
		if (pValue_ != NULL) {
			// printf("Result of call: %ld\n", PyInt_AsLong(pValue_));
			Py_DECREF(pValue_);
		}
		else {
			Py_DECREF(pFunc_);
			Py_DECREF(pModule_);
			PyErr_Print();
			fprintf(stderr,"Call failed\n");

		}
	}
	else {
		if (PyErr_Occurred())
			PyErr_Print();
		fprintf(stderr, "Cannot find the function");
	}
	Py_XDECREF(pFunc_);
}

///////////////////////////////////////////////////////////////////////////////Will be dumped 2013/08/07
DocIdScoreComparisonWei::DocIdScoreComparisonWei(const DocumentMapReader& doc_map_reader, int num_docs_t, int average_doc_len, int total_num_docs, int sorting_method_code, string term, map<string,float> queryTermsTrueProbabilityDistributionMap):
	  sorting_method_code_(sorting_method_code),
	  term_(term),
	  doc_map_reader_(doc_map_reader),
	  queryTermsTrueProbabilityDistributionMap_(queryTermsTrueProbabilityDistributionMap){

	  if (sorting_method_code_ == 1){
		  // Based on docID
	  }
	  else if(sorting_method_code_ == 2){
		  // Based on partial BM25 score (largest to smallest)
		  // compute the partial BM25 score components here.
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));
		  num_docs_t_ = num_docs_t;

		  // cout << "DEBUG" << endl;
		  // cout << "kIdfT:" << kIdfT << endl;
		  // cout << "num_docs_t_:" << num_docs_t_ << endl;

	  }
	  else{
		  cout << "NOT supported method anymore since 2013/07/17 by Wei" << endl;
		  exit(1);
	  }
  }
///////////////////////////////////////////////////////////////////////////////



DocIdScoreComparisonWei::DocIdScoreComparisonWei(const DocumentMapReader& doc_map_reader, int num_docs_t, int average_doc_len, int total_num_docs, int sorting_method_code, string term, map<int,float>& freq_first_factor_probability_map, map<string,int>& terms_with_corresponding_species_belonging_to_map, map<int,float>& query_length_probability_map, map<int, float>& class_label_with_lower_bounds_map, map<int, float>& class_label_with_probability_map):
	  sorting_method_code_(sorting_method_code),
	  term_(term),
	  doc_map_reader_(doc_map_reader),
	  freq_first_factor_probability_map_(freq_first_factor_probability_map),
	  terms_with_corresponding_species_belonging_to_map_(terms_with_corresponding_species_belonging_to_map),
	  query_length_probability_map_(query_length_probability_map),
	  class_label_with_lower_bounds_map_(class_label_with_lower_bounds_map),
	  class_label_with_probability_map_(class_label_with_probability_map){

	  if (sorting_method_code_ == 1){
		  // Based on docID
	  }
	  else if(sorting_method_code_ == 2){
		  // Based on partial BM25 score (largest to smallest)
		  cout << "sorting_method_code_:" << sorting_method_code_ << " initialization begins..."<< endl;
		  // compute the partial BM25 score component here.
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));
		  num_docs_t_ = num_docs_t;
		  cout << "sorting_method_code_:" << sorting_method_code_ << " initialization ends."<< endl;
	  }
	  else if(sorting_method_code_ == 3){
		// Updated 2013/01/22
		// currently using python2.7 at the machine dodo
		cout << "sorting_method_code_:" << sorting_method_code_ << " initialization begins..."<< endl;
		Py_Initialize();
		PyRun_SimpleString("from time import time,ctime\n" "print '-----> Today is',ctime(time())\n");

		pName_ = PyString_FromString("pythonModuleForCallingFromC");

		/* Error checking of pName left out */

		pModule_ = PyImport_Import(pName_);
		Py_DECREF(pName_);

		pre_load_aux_file_for_feature_rank_in_the_list();
		pre_load_aux_file_freq_Of_terms_in_queries();
		// pre_load_aux_file_for_high_level_features_including_rank_in_the_doc();
		pre_load_the_actual_query_term_list_data_into_memory();

		  // The following are the scaling weights
		  Rmin_ = -1.0;
		  Rmax_ = 1.0;

		  Dmin_feature1_ = 0.001967;
		  Dmax_feature1_ = 17.711489;

		  Dmin_feature2_ = 2.0;
		  Dmax_feature2_ = 23077260;

		  Dmin_feature3_ = 1.0;
		  Dmax_feature3_ = 11531;

		  Dmin_feature4_ = 4.0;
		  Dmax_feature4_ = 87803;

		  Dmin_feature5_ = 1;
		  Dmax_feature5_ = 12169;


		  Dmin_feature6_ = 1;
		  Dmax_feature6_ = 23062469;

		  /*
		  Dmin_feature7_ = 10;
		  Dmax_feature7_ = 262140;

		  Dmin_feature8_ = 0;
		  Dmax_feature8_ = 228291;

		  Dmin_feature9_ = 0;
		  Dmax_feature9_ = 625.8;

		  Dmin_feature10_ = 0;
		  Dmax_feature10_ = 3913;

		  Dmin_feature11_ = 0;
		  Dmax_feature11_ = 1;

		  Dmin_feature12_ = 0;
		  Dmax_feature12_ = 1;

		  Dmin_feature13_ = 0;
		  Dmax_feature13_ = 1;

		  Dmin_feature14_ = 0;
		  Dmax_feature14_ = 1;

		  Dmin_feature15_ = 0;
		  Dmax_feature15_ = 1;

		  Dmin_feature16_ = 1;
		  Dmax_feature16_ = 87443;

		  Dmin_feature17_ = 1;
		  Dmax_feature17_ = 23062469;
		  */

		  // Based on partial BM25 score
		  // compute the partial BM25 score component here.
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));
		  num_docs_t_ = num_docs_t;
		  // Based on machine learning score assigner (Updated by Wei 2013/01/18)
		  // SfWeightVector* w = NULL;
		  // w = new SfWeightVector(20);

		  // Model1
		  // Model2: minimal features contained from the toolkit(BM25), 4 features, 5 dimensions
		  // Model3
		  // Model4
		  // Model5: currently using

		  // This dimension setting is for Model5
		  w_ = new SfWeightVector(20);
		  load_model_from_file_wei("/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model5/model", &w_);
		  cout << "sorting_method_code_:" << sorting_method_code_ << " initialization ends."<< endl;
	  }
	  else if(sorting_method_code_ == 4){
		  // Based on partial BM25 score (smallest to largest)
		  cout << "sorting_method_code_:" << sorting_method_code_ << " initialization begins..."<< endl;
		  // compute the partial BM25 score component here.
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));
		  num_docs_t_ = num_docs_t;
		  cout << "sorting_method_code_:" << sorting_method_code_ << " initialization ends."<< endl;
	  }
	  else if(sorting_method_code_ == 5){
		  // Based on the probability of a specific posting that can make into TOP100, TOP10 for example
		  cout << "sorting_method_code_:" << sorting_method_code_ << " initialization begins..."<< endl;

		  // step1: compute the partial BM25 score component here.
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));
		  num_docs_t_ = num_docs_t;



		  // step2: assign some logistic regression weights here
		  // Updated by Wei 2013/07/17
		  // This is the set of weights for the model X (Fill it when necessary by Wei on 2014/08/04)
		  // Current option1
		  /*
          === Classifier model (full training set) ===
          Logistic Regression with ridge parameter of 1.0E-8
          Coefficients...
          	  	  	  	  	  	  	  	  	  	  	   Class
          Variable                                     TOP1000
          ==========================================================================
		  partialBM25ScoreComponentPart1_IDF_weight_1_    =  1.0871194792851835;
		  partialBM25ScoreComponentPart2_TF_weight_2_     =  0.3728600012338109;
		  partialBM25_weight_3_                           =  0.11206566785653087;
		  length_of_the_inverted_index_weight_4_          =  9.744775805361068E-8;
		  term_freq_in_doc_weight_5_                      = -4.69988990569863E-4;
		  doc_words_weight_6_                             =  1.8479680483335113E-5;
		  term_freq_in_training_head95K_queries_weight_7_ = -3.896558795003812E-5;
		  term_freq_in_collection_weight_8_               = -3.3684749578451364E-10;
		  posting_rank_in_doc_weight_9_                   = -4.53826451398889E-6;
		  posting_rank_in_list_weight_10_                 =  1.8682187429337345E-8;
          intercept_weight_0_                             = -2.532240648254239;
          */

		  /*
		  intercept_weight_0_                             = -2.532240648254239;
		  partialBM25ScoreComponentPart1_IDF_weight_1_    =  1.0871194792851835;
		  partialBM25ScoreComponentPart2_TF_weight_2_     =  0.3728600012338109;
		  partialBM25_weight_3_                           =  0.11206566785653087;
		  length_of_the_inverted_index_weight_4_          =  9.744775805361068E-8;
		  term_freq_in_doc_weight_5_                      = -4.69988990569863E-4;
		  doc_words_weight_6_                             =  1.8479680483335113E-5;
		  term_freq_in_training_head95K_queries_weight_7_ = -3.896558795003812E-5;
		  term_freq_in_collection_weight_8_               = -3.3684749578451364E-10;
		  posting_rank_in_doc_weight_9_                   = -4.53826451398889E-6;
		  posting_rank_in_list_weight_10_                 =  1.8682187429337345E-8;
		  */

		  // Updated by Wei 2013/08/04
		  // This is the set of weights for the Model19
		  intercept_weight_0_                             =-5.729427121406548;
		  partialBM25ScoreComponentPart1_IDF_weight_1_    = 0.7311304467682848;
		  partialBM25ScoreComponentPart2_TF_weight_2_     = 0.9032571917744138;
		  partialBM25_weight_3_                           = 0.0;
		  length_of_the_inverted_index_weight_4_          = 0.0;
		  term_freq_in_doc_weight_5_                      = 0.0;
		  doc_words_weight_6_                             = 0.0;
		  term_freq_in_training_head95K_queries_weight_7_ = 0.0;
		  term_freq_in_collection_weight_8_               = 0.0;
		  posting_rank_in_doc_weight_9_                   = 0.0;
		  posting_rank_in_list_weight_10_                 = 0.0;


		  cout << "sorting_method_code_:" << sorting_method_code_ << " initialization ends."<< endl;
	  }
	  else if(sorting_method_code_ == 6){
		  cout << "sorting_method_code_:" << sorting_method_code_ << " initialization begins..."<< endl;

		  // step1: compute the partial BM25 score component here (Cause I still need to return this score as a checking value)
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));
		  num_docs_t_ = num_docs_t;

		  cout << "sorting_method_code_:" << sorting_method_code_ << " initialization ends."<< endl;
	  }
  }

float DocIdScoreComparisonWei::scale_the_value(float originalValue, float Rmin, float Rmax, float Dmin_feature, float Dmax_feature){
	float originalValue_scaled = 0.0;
	originalValue_scaled = originalValue * (Rmax - Rmin) / (Dmax_feature - Dmin_feature) + (Rmin * Dmax_feature - Rmax * Dmin_feature) / (Dmax_feature - Dmin_feature);
	return originalValue_scaled;
}

string DocIdScoreComparisonWei::make_the_value_into_string_format(float originalValue){
	  string originalValueInStringFormat = "";
	  stringstream ss (stringstream::in | stringstream::out);
	  ss << originalValue;
	  originalValueInStringFormat = ss.str();
	  return originalValueInStringFormat;
}


float DocIdScoreComparisonWei::score(const IndexEntry& entry){
	  float returning_score = 0.0;

	  if(sorting_method_code_ == 1){
		  // do nothing.
		  returning_score = -float(entry.doc_id)/100000000;
		  // for debugging.
		  //cout << "returning_score:" << returning_score << endl;
	  }
	  else if(sorting_method_code_ == 2){
		  uint32_t f_d_t = entry.frequency;
		  int doc_len = doc_map_reader_.GetDocumentLength(entry.doc_id);
		  returning_score = kIdfT * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
		  assert(!isnan(returning_score));
	  }
	  else if(sorting_method_code_ == 3){
		  cout << "Updated by Wei on 2013/09/16 at school" << endl;
		  cout << "This score() function is CURRENTLY under construction." << endl;
		  cout << "No logic has been implemented." << endl;
		  exit(1);
	  }
	  else if(sorting_method_code_ == 4){
		  uint32_t f_d_t = entry.frequency;
		  int doc_len = doc_map_reader_.GetDocumentLength(entry.doc_id);
		  returning_score = kIdfT * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
		  assert(!isnan(returning_score));
	  }
	  else if(sorting_method_code_ == 5){
		  // Updated by Wei 2013/08/05 night at school
		  // the returning value will be partialBM25 score
		  // But inside the function, various kinds of probabilities and their combinations will be computed
		  float first_factor_probability_value = 0.0;
		  float second_factor_probability_value = 0.0;
		  float third_factor_probability_value = 0.0;

	      // step1/3: compute P(t)
		  // compute the first_factor_probability_value
	      string curr_look_up_term = term_;
          if (terms_with_corresponding_species_belonging_to_map_.count(curr_look_up_term) > 0 ){
          	// for debug
          	// cout << "mark1" << endl;
          	first_factor_probability_value = freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[curr_look_up_term] ];
          }
          else{
          	// for debug
          	// cout << "mark2" << endl;
          	first_factor_probability_value = freq_first_factor_probability_map_[0];
          }

	      // step2/3: compute P(Int)
          // compute the second_factor_probability_value
	      float partialProbability = 0.0;
	      float valuePart1 = 0.0;
	      float valuePart2 = 0.0;

	      // !!!!! This maybe the part which is really slow.
	      // string currentDocIDInStringFormat;
	      // stringstream ss;
	      // ss << entry.doc_id;
	      // ss >> currentDocIDInStringFormat;

	      // OLD version
	      // float XDocValueForGoodTurning_ = 0.0;

	      // Updated by Wei 2013/08/03 morning by Wei at school
	      // Use the good turing for now

	      // CURRENT version
	      // XDocValueForGoodTurning_ will be assigned score in the layered_index_generator.ProduceProbabilitiesForRandomlySelectedPostings() function

	      // OLD version
	      /*
	      if (docID_With_Xdoc_Value_map_.count(currentDocIDInStringFormat) > 0){
	    	  XDocValueForGoodTurning_ = docID_With_Xdoc_Value_map_[currentDocIDInStringFormat];
	      }
	      else{
	          cout << "System Error, docID is NOT in the docID_With_Xdoc_Value_goodTurning_map_, mark3" << endl;
	          exit(1);
	      }
	      */

	      // for debug ONLY
	      // cout << "XDocValueForGoodTurning:" << XDocValueForGoodTurning << endl;

	      map<int,float>::iterator iter;
	      for (iter = query_length_probability_map_.begin(); iter != query_length_probability_map_.end(); ++iter){
				valuePart1 = (*iter).second;
				valuePart2 = pow(XDocValue_, (*iter).first-1);
				partialProbability = valuePart1 * valuePart2;
				second_factor_probability_value += partialProbability;
				// for debug ONLY
				/*
				cout << "queryLength:" << (*iter).first << endl;
				cout << "valuePart1:" << valuePart1 << endl;
				cout << "valuePart2:" << valuePart2 << endl;
				cout << "partialProbability:" << partialProbability << endl;
				cout << "second_factor_probability_value:" << second_factor_probability_value << endl;
				cout << endl;
				*/
	      }


	      // step3/3: compute P(TOP10)
          // compute the third_factor_probability_value
		  uint32_t f_d_t = entry.frequency;
		  int doc_len = doc_map_reader_.GetDocumentLength(entry.doc_id);
		  float valueOfPartialBM25ScoreComponentPart1_IDF = kIdfT;
		  float valueOfPartialBM25ScoreComponentPart2_TF = (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
		  float valueOfPartialBM25Score = valueOfPartialBM25ScoreComponentPart1_IDF * valueOfPartialBM25ScoreComponentPart2_TF;
		  assert(!isnan(valueOfPartialBM25Score));
		  float valueOfPostingRankInDoc = 0.0;
		  float valueOfPostingRankInList = 0.0;

		  // the following are for debug ONLY
		  /*
		  cout << "checking" << endl;
		  cout << "basic:" << endl;
		  cout << "entry.doc_id:" << entry.doc_id << endl;
		  cout << "returning_score:" << returning_score << endl;
		  cout << "f_d_t:" << f_d_t << endl;
		  cout << "doc_len:" << doc_len << endl;
		  */

		  float matrixMultiplicationScore = intercept_weight_0_                                  * 1 +
				                            partialBM25ScoreComponentPart1_IDF_weight_1_         * valueOfPartialBM25ScoreComponentPart1_IDF +
				                            partialBM25ScoreComponentPart2_TF_weight_2_          * valueOfPartialBM25ScoreComponentPart2_TF +
				  	  	  	  	  	  	  	partialBM25_weight_3_                                * valueOfPartialBM25Score +
				  	  	  	  	  	        length_of_the_inverted_index_weight_4_               * valueOfCurrentPostingLengthOfTheInvertedList_ +
				  	  	  	  	  	        term_freq_in_doc_weight_5_                           * f_d_t +
				  	  	  	  	  	        doc_words_weight_6_                                  * doc_len +
				  	  	  	  	  	        term_freq_in_training_head95K_queries_weight_7_      * valueOfcurrentPostingTermFreqInQueries_ +
				  	  	  	  	  	        term_freq_in_collection_weight_8_                    * valueOfCurrentPostingTermFreqInCollection_ +
				  	  	  	  	  	        posting_rank_in_doc_weight_9_                        * valueOfPostingRankInDoc + // currently can not feasibly provide the value of this feature
				  	  	  	  	  	        posting_rank_in_list_weight_10_                      * valueOfPostingRankInList + // currently can not feasibly provide the value of this feature
				  	  	  	  	  	        0.0;

		  // Updated by Wei 2013/07/17 night:
		  // For this formula, I need to clearly understand when it is 1 - sth, when it is just sth.
		  // If the
		  third_factor_probability_value = 1 - 1/(1 + exp( matrixMultiplicationScore ));

		  /*
		  // for debug ONLY
		  cout << "entry.doc_id:" << entry.doc_id << endl;
		  cout << "queryTermsTrueProbabilityDistributionMap_[term_]:" << queryTermsTrueProbabilityDistributionMap_[term_] << endl;
		  cout << "queryTerms1DProbabilityDistributionMap_[term_]:" << queryTerms1DProbabilityDistributionMap_[term_] << endl;
		  cout << "queryTerms2DProbabilityDistributionMap_[term_]:" << queryTerms2DProbabilityDistributionMap_[term_] << endl;
		  cout << "queryTermsGoodTuringProbabilityDistributionMap_[term_]:" << queryTermsGoodTuringProbabilityDistributionMap_[term_] << endl;
		  cout << "featureName" << " " << "weightOfFeature" << " " << "valueOfFeature" << endl;
		  cout << "intercept_weight_0_" << " " << intercept_weight_0_ << " " << "1" << endl;
		  cout << "partialBM25ScoreComponentPart1_IDF_weight_1_" << " " << partialBM25ScoreComponentPart1_IDF_weight_1_ << " " << valueOfPartialBM25ScoreComponentPart1_IDF << endl;
		  cout << "partialBM25ScoreComponentPart2_TF_weight_2_" << " " << partialBM25ScoreComponentPart2_TF_weight_2_ << " " << valueOfPartialBM25ScoreComponentPart2_TF << endl;
		  cout << "partialBM25_weight_3_" << " " << partialBM25_weight_3_ << " " << valueOfPartialBM25Score << endl;
		  cout << "length_of_the_inverted_index_weight_4_" << " " << length_of_the_inverted_index_weight_4_ << " " << valueOfCurrentPostingLengthOfTheInvertedList_ << endl;
		  cout << "term_freq_in_doc_weight_5_" << " " << term_freq_in_doc_weight_5_ << " " << f_d_t << endl;
		  cout << "doc_words_weight_6_" << " " << doc_words_weight_6_ << " " << doc_len << endl;
		  cout << "term_freq_in_training_head95K_queries_weight_7_" << " " << term_freq_in_training_head95K_queries_weight_7_ << " " << valueOfcurrentPostingTermFreqInQueries << endl;
		  cout << "term_freq_in_collection_weight_8_" << " " << term_freq_in_collection_weight_8_ << " " << valueOfCurrentPostingTermFreqInCollection_ << endl;
		  cout << "posting_rank_in_doc_weight_9_" << " " << posting_rank_in_doc_weight_9_ << " " << valueOfPostingRankInDoc << endl;
		  cout << "posting_rank_in_list_weight_10_" << " " << posting_rank_in_list_weight_10_ << " " << valueOfPostingRankInList << endl;
		  cout << "third_factor_probability_value:" << third_factor_probability_value << endl;
		  cout << endl;
		  */

		  // Updated by Wei 2013/08/05 night. Needed to remove this BIG_NUMBER related thing one day.
		  // The following is the old view. Updated by Wei 2013/07/17 night
		  // The true probability is usually very small, so I need to time a big number.
		  // option1 for BIG_NUMBER:
		  float BIG_NUMBER = 1000000;

		  // I think this will become an unused variable
		  thirdFactorProbabilityValueTimesBigNumberValue_ = third_factor_probability_value * BIG_NUMBER;

		  // current version
		  firstFactorProbabilityOriginalValue_ = first_factor_probability_value;
		  secondFactorProbabilityOriginalValue_ = second_factor_probability_value;
		  thirdFactorProbabilityOriginalValue_ = third_factor_probability_value;
		  final_1_3_FactorsProbabilityCombinedOriginalValue_ = firstFactorProbabilityOriginalValue_ * thirdFactorProbabilityOriginalValue_;
		  final_2_3_FactorsProbabilityCombinedOriginalValue_ = secondFactorProbabilityOriginalValue_ * thirdFactorProbabilityOriginalValue_;
		  final_1_2_3_FactorsProbabilityCombinedOriginalValue_ = firstFactorProbabilityOriginalValue_ * secondFactorProbabilityOriginalValue_ * thirdFactorProbabilityOriginalValue_;

		  /*
		  // for DEBUG only
		  cout << "current_term: " << curr_look_up_term << endl;
		  cout << "current_DocID: " << currentDocIDInStringFormat << endl;
		  cout << "valueOfPartialBM25Score:" <<valueOfPartialBM25Score << endl;
		  cout << "first_factor_probability_value_: " << firstFactorProbabilityOriginalValue_ << endl;
		  cout << "second_factor_probability_value_: " << secondFactorProbabilityOriginalValue_ << endl;
		  cout << "third_factor_probability_value_: " << thirdFactorProbabilityOriginalValue_ << endl;
		  cout << "final_2_3_FactorsProbabilityCombinedOriginalValue_: " << final_2_3_FactorsProbabilityCombinedOriginalValue_ << endl;
		  cout << "final_1_3_FactorsProbabilityCombinedOriginalValue_: " << final_1_3_FactorsProbabilityCombinedOriginalValue_ << endl;
		  cout << "final_1_2_3_FactorsProbabilityCombinedOriginalValue_ : " << final_1_2_3_FactorsProbabilityCombinedOriginalValue_<< endl;
		  cout << endl;
		  */

		  returning_score = valueOfPartialBM25Score;
	  }
	  else if(sorting_method_code_ == 6){
		  // Updated by Wei 2013/08/29 afternoon at school
		  // the returning value will still be the partialBM25 score
		  // But inside the function, various kinds of probabilities and their combinations will be computed
		  float first_factor_probability_value = 0.0;
		  float second_AND_third_factor_combination_probability_value = 0.0;

	      // step1/2: compute P(t)
		  // compute the first_factor_probability_value
	      string curr_look_up_term = term_;
          if (terms_with_corresponding_species_belonging_to_map_.count(curr_look_up_term) > 0 ){
        	  first_factor_probability_value = freq_first_factor_probability_map_[ terms_with_corresponding_species_belonging_to_map_[curr_look_up_term] ];
          }
          else{
        	  first_factor_probability_value = freq_first_factor_probability_map_[0];
          }

          // step2/2: compute the combination of the second and third factor P(unknown) called as second_AND_third_factor_combination_probability_value
          // basically, based on the partialBM25 score, we can go through the look up table and find the probability of what we want
		  uint32_t f_d_t = entry.frequency;
		  int doc_len = doc_map_reader_.GetDocumentLength(entry.doc_id);
		  float valueOfPartialBM25ScoreComponentPart1_IDF = kIdfT;
		  float valueOfPartialBM25ScoreComponentPart2_TF = (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
		  float valueOfPartialBM25Score = valueOfPartialBM25ScoreComponentPart1_IDF * valueOfPartialBM25ScoreComponentPart2_TF;
		  assert(!isnan(valueOfPartialBM25Score));

		  map<int, float>::iterator iter;
		  int currentClassLabelInIntFormat = -1;
		  int previousClassLabelInIntFormat = -1;
		  for (iter = class_label_with_lower_bounds_map_.begin(); iter != class_label_with_lower_bounds_map_.end(); iter++) {
			  currentClassLabelInIntFormat = (*iter).first;
			  previousClassLabelInIntFormat = currentClassLabelInIntFormat - 1;
			  // current testing and pass version
			  if (valueOfPartialBM25Score < class_label_with_lower_bounds_map_[currentClassLabelInIntFormat] and valueOfPartialBM25Score >= class_label_with_lower_bounds_map_[previousClassLabelInIntFormat]){

				  if (class_label_with_probability_map_.count(previousClassLabelInIntFormat) > 0){
					  second_AND_third_factor_combination_probability_value = class_label_with_probability_map_[ previousClassLabelInIntFormat ];
				  }
				  else{
					  cout << "previousClassLabelInIntFormat:" << previousClassLabelInIntFormat << " not in class_label_with_probability_map_" << endl;
					  cout << "critical error" << endl;
					  exit(1);
				  }
				  break;
			  }

			  // OLD version in test
			  // if (valueOfPartialBM25Score >= (*iter-1).second and valueOfPartialBM25Score < (*iter).second){
				  // second_AND_third_factor_combination_probability_value = class_label_with_probability_map_[(*iter-1).first];
			  // }

		  }

		  if(second_AND_third_factor_combination_probability_value == 0){
			  cout << "second_AND_third_factor_combination_probability_value:" << second_AND_third_factor_combination_probability_value << endl;
			  cout << "valueOfPartialBM25Score:" << valueOfPartialBM25Score << endl;
			  cout << "previousClassLabelInIntFormat:" << previousClassLabelInIntFormat << endl;
			  cout << "figure this out, boy." << endl;
			  cout << "critical error" << endl;
			  exit(1);
		  }

		  // The following variables will NOT BE updated since 2013/08/29 afternoon at school by Wei
		  // I think this will become an unused variable
		  // (1) thirdFactorProbabilityValueTimesBigNumberValue_
		  // (2) secondFactorProbabilityOriginalValue_
		  // (3) thirdFactorProbabilityOriginalValue_
		  // (4) final_1_3_FactorsProbabilityCombinedOriginalValue_
		  // (5) final_1_2_3_FactorsProbabilityCombinedOriginalValue_

		  // The following variables will still BE updated since 2013/08/29 afternoon at school by Wei
		  firstFactorProbabilityOriginalValue_ = first_factor_probability_value;
		  final_2_3_FactorsProbabilityCombinedOriginalValueBaseline_ = second_AND_third_factor_combination_probability_value;
		  final_1_2_3_FactorsProbabilityCombinedOriginalValueBaseline_ = firstFactorProbabilityOriginalValue_ * final_2_3_FactorsProbabilityCombinedOriginalValueBaseline_;

		  /*
		  // for DEBUG only
		  cout << "current_term: " << curr_look_up_term << endl;
		  cout << "current_DocID: " << currentDocIDInStringFormat << endl;
		  cout << "valueOfPartialBM25Score:" <<valueOfPartialBM25Score << endl;
		  cout << "first_factor_probability_value_: " << firstFactorProbabilityOriginalValue_ << endl;
		  cout << "second_factor_probability_value_: " << secondFactorProbabilityOriginalValue_ << endl;
		  cout << "third_factor_probability_value_: " << thirdFactorProbabilityOriginalValue_ << endl;
		  cout << "final_2_3_FactorsProbabilityCombinedOriginalValue_: " << final_2_3_FactorsProbabilityCombinedOriginalValue_ << endl;
		  cout << "final_1_3_FactorsProbabilityCombinedOriginalValue_: " << final_1_3_FactorsProbabilityCombinedOriginalValue_ << endl;
		  cout << "final_1_2_3_FactorsProbabilityCombinedOriginalValue_ : " << final_1_2_3_FactorsProbabilityCombinedOriginalValue_<< endl;
		  cout << endl;
		  */

		  returning_score = valueOfPartialBM25Score;
	  }
	  return returning_score;
}

bool DocIdScoreComparisonWei::operator()(const IndexEntry& lhs, const IndexEntry& rhs){
  if (sorting_method_code_ == 4){
	  // the scores will be sorted from smallest to largest
	  return score(lhs) < score(rhs);
  }
  else{
	  // the scores will be sorted from largest to smallest
	  return score(lhs) > score(rhs);
  }
}

aux_pruning_term_entry::aux_pruning_term_entry(string term, uint32_t num_of_posting_pruned, uint32_t num_of_posting_in_the_original_list, float current_min_value, uint32_t current_min_docID, bool whether_there_are_still_postings_to_prune):
	term_(term),
	num_of_posting_pruned_(num_of_posting_pruned),
	num_of_posting_in_the_original_list_(num_of_posting_in_the_original_list),
	current_min_value_(current_min_value),
	current_min_docID_(current_min_docID),
	whether_there_are_still_postings_to_prune_(whether_there_are_still_postings_to_prune){
}

DocPostingOptimizedCompareClass::DocPostingOptimizedCompareClass(const map<uint32_t, float>& termID_with_their_first_factor_probability_map, float a, float b, float currentSelectXDOC, float currentALLXDOC):
	  termID_with_their_first_factor_probability_map_(termID_with_their_first_factor_probability_map),
	  a_(a),	// currently NO USE at all since 2013/10/30 afternoon
	  b_(b),
	  currentSelectXDOC_(currentSelectXDOC),
	  currentALLXDOC_(currentALLXDOC){
	  	  // No need to have any logic here
}

bool DocPostingOptimizedCompareClass::operator ()(const IndexDocOptimizedEntry& lhs, const IndexDocOptimizedEntry& rhs){
	// for debug
	// return true;
	// for production
	return score(lhs) > score(rhs);
}

double DocPostingOptimizedCompareClass::getStaticPartScore(const IndexDocOptimizedEntry& entry){
	  /*
	  // Notes:
	  // (1) Sort elements in range
	  // (2) Sorts the elements in the range [first,last) into ascending(increasing) order.
	  // (3) The elements are compared using operator< for the first version, and comp for the second.
	  // (4) Equivalent elements are not guaranteed to keep their original relative order (see stable_sort).
	  // The operator should be >(descending order), but NOT <
	  */

	  // version try4:
	  double returningScore = -1.0;
	  // This 50000000 is the very LARGE big number here which outside the termID range from 0 - around 37M
	  if (entry.term_id == 50000000){
		  returningScore = -1.0;
	  }
	  else{
		  // cout << termID_with_their_first_factor_probability_map_.size() << endl;
		  // scoring function weeks ago since 2013/10/24 afternoon
		  // returningScore = termID_with_their_first_factor_probability_map_.find(entry.term_id)->second * ( a_ * entry.staticProbability + b_ * currentSelectXDOC_ / currentALLXDOC_);
		  // scoring function used since 2013/10/24 afternoon (so called NEW Formula)
		  // Updated on 2013/10/25 morning by Wei at school, ONLY the static probability part
		  returningScore = termID_with_their_first_factor_probability_map_.find(entry.term_id)->second * entry.staticProbability;
		  // cout << "returningScore: " << returningScore << endl;
		  // NOT working version DEBUGGING for hours on 2013/09/22 night by Wei at school
		  // returningScore = termID_with_their_first_factor_probability_map_[ entry.term_id ] * ( a_ * entry.staticProbability + b_ * currentSelectXDOC_ / currentALLXDOC_);
	  }
	  // cout << "entry.term_id: " << entry.term_id << endl;
	  // cout << "returningScore: " << returningScore << endl;
	  return returningScore;
}

double DocPostingOptimizedCompareClass::getDynamicPartScore(const IndexDocOptimizedEntry& entry){
	  /*
	  // Notes:
	  // (1) Sort elements in range
	  // (2) Sorts the elements in the range [first,last) into ascending(increasing) order.
	  // (3) The elements are compared using operator< for the first version, and comp for the second.
	  // (4) Equivalent elements are not guaranteed to keep their original relative order (see stable_sort).
	  // The operator should be >(descending order), but NOT <
	  */

	  // version try4:
	  double returningScore = -1.0;
	  // This 50000000 is the very LARGE big number here which outside the termID range from 0 - around 37M
	  if (entry.term_id == 50000000){
		  returningScore = -1.0;
	  }
	  else{
		  // cout << termID_with_their_first_factor_probability_map_.size() << endl;
		  // scoring function weeks ago since 2013/10/24 afternoon
		  // returningScore = termID_with_their_first_factor_probability_map_.find(entry.term_id)->second * ( a_ * entry.staticProbability + b_ * currentSelectXDOC_ / currentALLXDOC_);
		  // scoring function used since 2013/10/24 afternoon (so called NEW Formula)
		  // Updated on 2013/10/25 morning by Wei at school, ONLY the dynamic probability part(with a weight b_ attached to it)
		  returningScore = termID_with_their_first_factor_probability_map_.find(entry.term_id)->second * entry.staticProbability * b_ * currentSelectXDOC_ / currentALLXDOC_;
		  // for DEBUG
		  // cout << "DEBUG" << endl;
		  // cout << "termID_with_their_first_factor_probability_map_.find(entry.term_id)->second: " << termID_with_their_first_factor_probability_map_.find(entry.term_id)->second << endl;
		  // cout << "entry.staticProbability: " << entry.staticProbability << endl;
		  // cout << "b_: " << b_ << endl;
		  // cout << "currentSelectXDOC_ / currentALLXDOC_: " << currentSelectXDOC_ / currentALLXDOC_ << endl;
		  // cout << "currentSelectXDOC_: " << currentSelectXDOC_ << endl;
		  // cout << "currentALLXDOC_: " << currentALLXDOC_ << endl;
		  // cout << "returningScore: " << returningScore << endl;
		  // NOT working version DEBUGGING for hours on 2013/09/22 night by Wei at school
		  // returningScore = termID_with_their_first_factor_probability_map_[ entry.term_id ] * ( a_ * entry.staticProbability + b_ * currentSelectXDOC_ / currentALLXDOC_);
	  }
	  // cout << "entry.term_id: " << entry.term_id << endl;
	  // cout << "returningScore: " << returningScore << endl;
	  return returningScore;
}

double DocPostingOptimizedCompareClass::score(const IndexDocOptimizedEntry& entry){
	  /*
	  // Notes:
	  // (1) Sort elements in range
	  // (2) Sorts the elements in the range [first,last) into ascending(increasing) order.
	  // (3) The elements are compared using operator< for the first version, and comp for the second.
	  // (4) Equivalent elements are not guaranteed to keep their original relative order (see stable_sort).
	  // The operator should be >(descending order), but NOT <
	  */

	  // version try4:
	  double returningScore = -1.0;
	  // This 50000000 is the very LARGE big number here which outside the termID range from 0 - around 37M
	  if (entry.term_id == 50000000){
		  returningScore = -1.0;
	  }
	  else{
		  // cout << termID_with_their_first_factor_probability_map_.size() << endl;
		  // scoring function weeks ago since 2013/10/24 afternoon
		  // returningScore = termID_with_their_first_factor_probability_map_.find(entry.term_id)->second * ( a_ * entry.staticProbability + b_ * currentSelectXDOC_ / currentALLXDOC_);
		  // scoring function CURRENTLY used since 2013/10/24 afternoon (so called NEW Formula)
		  returningScore = termID_with_their_first_factor_probability_map_.find(entry.term_id)->second * entry.staticProbability * ( 1 + b_ * currentSelectXDOC_ / currentALLXDOC_);
		  // scoring function used on 2013/10/25 morning for DEBUG ONLY (The DEBUG has passed)
		  // returningScore = termID_with_their_first_factor_probability_map_.find(entry.term_id)->second * entry.staticProbability;
		  // cout << "returningScore: " << returningScore << endl;
		  // NOT working version DEBUGGING for hours on 2013/09/22 night by Wei at school
		  // returningScore = termID_with_their_first_factor_probability_map_[ entry.term_id ] * ( a_ * entry.staticProbability + b_ * currentSelectXDOC_ / currentALLXDOC_);

		  /*
		  // for debug
		  if (entry.term_id == 34346850 or entry.term_id == 34580731){
			  cout << "debug INFO" << endl;
			  cout << "entry.term_id: " << entry.term_id << endl;
			  cout << "termID_with_their_first_factor_probability_map_.find(entry.term_id)->second: " << termID_with_their_first_factor_probability_map_.find(entry.term_id)->second << endl;
			  cout << "entry.staticProbability: " << entry.staticProbability << endl;
		  }
		  */
	  }
	  // cout << "entry.term_id: " << entry.term_id << endl;
	  // cout << "returningScore: " << returningScore << endl;
	  return returningScore;
}
















