//==============================================================================================================================================================
// Author(s): Roman Khmelichek Wei Jiang
//
// TODO: DOCNO can be determined algorithmically from the docID (instead of storing as a string)? We also have to consider docID remapping.
// TODO: Need to implement modes 'kSingleDoc' and 'kStandard'.
//
// Some WARC records in ClueWeb09 are malformed. See 'http://www.umiacs.umd.edu/~jimmylin/cloud9/docs/content/clue.html' for more information. Here, we handle the
// common problem of having an extra new line in the WARC header. A few of the records also happen to have garbled URLs.
//==============================================================================================================================================================

#ifndef PARSERINL_H_
#define PARSERINL_H_

// Enables debugging output for this module.
// #define PARSERINL_DEBUG

#include <cstdlib>
#include <cstring>
#include <errno.h>
#include <fstream>
#include <iostream>
#include <map>
#include <string>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "config_file_properties.h"
#include "configuration.h"

using namespace std;

template<class Callback>
  Parser<Callback>::Parser(const Parser<Callback>::ParsingMode& parsing_mode, const Parser<Callback>::DocType& doc_type, Callback* callback) :
    parsing_mode_(parsing_mode), doc_type_(doc_type), callback_(callback) {
    assert(callback_ != NULL);

	// load the needed documents for the phase2 pruning
	LoadUpGov2DocumentSetNeededToBeExtracted();
  }

template<class Callback>
  int Parser<Callback>::LoadClueweb2009SpamReportDict(ifstream &inputfileForDocument) {
	return 0;
}

template<class Callback>
  void Parser<Callback>::LoadUpGov2DocumentSetNeededToBeExtracted(){
	cout << "Load up docIDs needed to parse" << endl;
	string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kGov2DocumentNeededToBeFurtherProcessFileName));
	ifstream inputFileHandler;
	inputFileHandler.open(inputFileName.c_str());

	string currentLine;
    string docID_str;
    uint32_t docID;
	while ( inputFileHandler.good() ){
		getline (inputFileHandler,currentLine);
		boost::algorithm::trim(currentLine);
		istringstream iss( currentLine );

		iss >> docID_str;
		docID = atoi(docID_str.c_str());
		priority_docIDs_[docID] = 1;
	}
	inputFileHandler.close();

	// check point
	if (priority_docIDs_.size() == 0){
		GetDefaultLogger().Log( Stringify(priority_docIDs_.size() ) + " docIDs loaded.  --- Take Care", false);
	}
	else{
		GetDefaultLogger().Log( Stringify(priority_docIDs_.size() ) + " docIDs loaded.", false);
	}
}

template<class Callback>
  int Parser<Callback>::ShowContentOfSpecifcDocument(const char* buf, int buf_len,const long beginningPosition, const long endingPosition,const char* term, int term_len){
	string lookUpTerm = "";
	string currentWord = "";
	for(int tempCounter = 0; tempCounter < term_len; tempCounter++){
		lookUpTerm += term[tempCounter];
	}
	cout << "lookUpTerm:" << lookUpTerm << endl;

	/*
	cout << "*********************web page content begin" << endl;
	for(int tempCounter = 0; tempCounter < buf_len; tempCounter++){
		cout << buf[tempCounter];
	}
	cout << "*********************web page content End." << endl;
	*/

	cout << "*********************positions & contexts begin" << endl;

	const char* curr_p = buf;

    assert(buf != NULL);
    assert(buf_len > 0);
    uint32_t doc_id = 0;

    Tag tag_ret;  // The special type of tag we encountered.
    unsigned char context = '\0';  // Bit array for the context.
    uint32_t position = 0;         // Tracks position of each word, final position for a document is it's size in words.

    // For parsing HTML.
    bool in_closing_tag = false;  // True when we're parsing a closing tag.
    bool in_script = false;       // True when we're parsing contents of script tag.

    // For TREC documents.
    bool in_doc = false;     // True when we're parsing contents of doc tag.
    bool in_docno = false;   // True when we're parsing contents of docno tag.
    bool in_dochdr = false;  // True when we're parsing contents of dochdr tag.

    // Track the starting point of various things we want to parse out.
    const char* word_p;        // Standalone word.
    //const char* url_p;         // TREC document URL.
    //const char* docno_p;       // TREC document number.
    const char* tag_p = NULL;  // Tracks the starting point of a tag; doubles as a flag as to whether we're currently in a tag.

    //variables set but currently do not use
    if(false){
    	cout << in_doc << endl;
    	cout << in_docno << endl;
    	cout << in_dochdr << endl;
    }

    // The main parsing loop
    while (IsWithinBounds(curr_p, buf, buf_len)) {
      if (!IsIndexable(*curr_p)) {
        if (*curr_p != '>') {
          if (*curr_p == '<') {
            tag_p = curr_p;
          }
          ++curr_p;
          continue;
        }

        if (!tag_p) {
          ++curr_p;
          continue;
        }

        // At this point, we must have just seen the end of a closing tag, '>'.
        ++curr_p;
        tag_ret = ProcessTag(tag_p, curr_p - tag_p, in_closing_tag, doc_id);

        switch (tag_ret) {
          case kTagNot:
            break;

          case kTagB:
            UpdateContext(context, in_closing_tag, kContextB);
            break;

          case kTagI:
            UpdateContext(context, in_closing_tag, kContextI);
            break;

          case kTagH:
            UpdateContext(context, in_closing_tag, kContextH);
            break;

          case kTagTitle:
            UpdateContext(context, in_closing_tag, kContextT);
            break;

          case kTagScript:
            in_script = in_closing_tag ? false : true;
            break;

          //If this tag is to be </Html>.
          case kTagHtml:
            if (doc_type_ != kWarc)
              break;

            if (in_closing_tag) {
              in_doc = false;



              break;
            }

          //If this tag is to be </Doc>.
          case kTagDoc:
            if (doc_type_ != kWarc)
              break;

            if (in_closing_tag) {
              in_doc = false;







            }
            else {
              in_doc = true;
            }
            break;

          case kTagDocno:
            if (doc_type_ != kTrec)
              break;

            in_docno = in_closing_tag ? false : true;
            break;

          case kTagDochdr:
            if (doc_type_ != kTrec)
              break;

            in_dochdr = in_closing_tag ? false : true;
            break;

          default:
            break;
        }

        tag_p = NULL;
        continue;
      }
      // Ignore everything between <script></script> tags and ignore inner contents of tags.
      if (in_script || tag_p) {
        ++curr_p;
        continue;
      }
      word_p = curr_p;
      while (IsWithinBounds(curr_p, buf, buf_len) && IsIndexable(*curr_p)) {
        ++curr_p;
      }

      //callback_->ProcessTerm(word_p, curr_p - word_p, doc_id, position++, context);
      currentWord = "";
      for(int tempCounter = 0; tempCounter < curr_p - word_p; tempCounter++){
    	  currentWord += word_p[tempCounter];
      }
      if(lookUpTerm == currentWord){
    	  cout << lookUpTerm << " " << position++ << " " << int(context) << endl;
      }
      else{
    	  position++;
      }
    }
    cout << "*********************positions & contexts end." << endl;
	return 0;
}

template<class Callback>
  int Parser<Callback>::ShowContentOfSpecifcDocument(const char* buf, int buf_len,const long beginningPosition, const long endingPosition,vector<string> &queryID_Term_docIDList, string currentDocID, ofstream &outputFileHandler){
	//cout << "length:" << queryID_Term_docIDList.size() << endl;

	map<string, string> lookUpTermDocIDDict;
	map<string, vector<int> > outputDict;
	map<string, vector<int> >::iterator outputDictIter;
	string currentWord = "";
	cout << "Looking for the following pairs:" << endl;
	for(unsigned int tempCounter = 0; tempCounter < queryID_Term_docIDList.size(); tempCounter++){
		vector<string> elementsForTheLine;
	    boost::algorithm::split(elementsForTheLine, queryID_Term_docIDList[tempCounter], boost::algorithm::is_any_of("_") );
		lookUpTermDocIDDict[ elementsForTheLine[1] ] = elementsForTheLine[0];
		outputDict[ elementsForTheLine[0] + " " + elementsForTheLine[1]].clear();
		cout << elementsForTheLine[0] << " " << elementsForTheLine[1] << " " << currentDocID << endl;
	}


	/*
	cout << "*********************web page content begin" << endl;
	for(int tempCounter = 0; tempCounter < buf_len; tempCounter++){
		cout << buf[tempCounter];
	}
	cout << "*********************web page content End." << endl;
	*/

	//cout << "*********************parsing begin" << endl;

	const char* curr_p = buf;

    assert(buf != NULL);
    assert(buf_len > 0);
    uint32_t doc_id = 0;

    Tag tag_ret;  // The special type of tag we encountered.
    unsigned char context = '\0';  // Bit array for the context.
    uint32_t position = 0;         // Tracks position of each word, final position for a document is it's size in words.

    // For parsing HTML.
    bool in_closing_tag = false;  // True when we're parsing a closing tag.
    bool in_script = false;       // True when we're parsing contents of script tag.

    // For TREC documents.
    bool in_doc = false;     // True when we're parsing contents of doc tag.
    bool in_docno = false;   // True when we're parsing contents of docno tag.
    bool in_dochdr = false;  // True when we're parsing contents of dochdr tag.

    // Track the starting point of various things we want to parse out.
    const char* word_p;        // Standalone word.
    // For clueweb data ONLY, currently. I comment them out currently.
    //const char* url_p;         // TREC document URL.
    //const char* docno_p;       // TREC document number.
    const char* tag_p = NULL;  // Tracks the starting point of a tag; doubles as a flag as to whether we're currently in a tag.


    //variables set but currently not used.
    if(false){
    	cout << in_doc << endl;
    	cout << in_docno << endl;
    	cout << in_dochdr << endl;
    }

    // The main parsing loop
    while (IsWithinBounds(curr_p, buf, buf_len)) {
      if (!IsIndexable(*curr_p)) {
        if (*curr_p != '>') {
          if (*curr_p == '<') {
            tag_p = curr_p;
          }
          ++curr_p;
          continue;
        }

        if (!tag_p) {
          ++curr_p;
          continue;
        }

        // At this point, we must have just seen the end of a closing tag, '>'.
        ++curr_p;
        tag_ret = ProcessTag(tag_p, curr_p - tag_p, in_closing_tag, doc_id);

        switch (tag_ret) {
          case kTagNot:
            break;

          case kTagB:
            UpdateContext(context, in_closing_tag, kContextB);
            break;

          case kTagI:
            UpdateContext(context, in_closing_tag, kContextI);
            break;

          case kTagH:
            UpdateContext(context, in_closing_tag, kContextH);
            break;

          case kTagTitle:
            UpdateContext(context, in_closing_tag, kContextT);
            break;

          case kTagScript:
            in_script = in_closing_tag ? false : true;
            break;

          //If this tag is to be </Html>.
          case kTagHtml:
            if (doc_type_ != kWarc)
              break;

            if (in_closing_tag) {
              in_doc = false;



              break;
            }

          //If this tag is to be </Doc>.
          case kTagDoc:
            if (doc_type_ != kWarc)
              break;

            if (in_closing_tag) {
              in_doc = false;







            }
            else {
              in_doc = true;
            }
            break;

          case kTagDocno:
            if (doc_type_ != kTrec)
              break;

            in_docno = in_closing_tag ? false : true;
            break;

          case kTagDochdr:
            if (doc_type_ != kTrec)
              break;

            in_dochdr = in_closing_tag ? false : true;
            break;

          default:
            break;
        }

        tag_p = NULL;
        continue;
      }
      // Ignore everything between <script></script> tags and ignore inner contents of tags.
      if (in_script || tag_p) {
        ++curr_p;
        continue;
      }
      word_p = curr_p;
      while (IsWithinBounds(curr_p, buf, buf_len) && IsIndexable(*curr_p)) {
        ++curr_p;
      }

      //callback_->ProcessTerm(word_p, curr_p - word_p, doc_id, position++, context);
      currentWord = "";
      for(int tempCounter = 0; tempCounter < curr_p - word_p; tempCounter++){
    	  currentWord += word_p[tempCounter];
      }

      if(lookUpTermDocIDDict.count(currentWord) > 0){
    	  //cout << currentWord << " " << position << " " << int(context) << endl;
    	  outputDict[ lookUpTermDocIDDict[currentWord] + " " + currentWord].push_back(position++);
    	  outputDict[ lookUpTermDocIDDict[currentWord] + " " + currentWord].push_back(int(context));
      }
      else{
    	  position++;
      }
    }
    //cout << "*********************parsing end." << endl;
    cout << "*********************outputDict results shown begin." << endl;
    for(outputDictIter=outputDict.begin(); outputDictIter != outputDict.end(); outputDictIter++){
    	cout << (*outputDictIter).first << " " << currentDocID << " " << (*outputDictIter).second.size() << " ";
    	outputFileHandler << (*outputDictIter).first << " " << currentDocID << " " << (*outputDictIter).second.size() << " ";
    	for(unsigned int tempCounter = 0; tempCounter < (*outputDictIter).second.size(); tempCounter++){
    		cout << (*outputDictIter).second[tempCounter] << " ";
    		outputFileHandler << (*outputDictIter).second[tempCounter] << " ";
    	}
    	cout << endl;
    	outputFileHandler << endl;
    }
    cout << "*********************outputDict results shown end." << endl;
	return 0;
}

template<class Callback>
  int Parser<Callback>::ShowContentAndSimpleParse(const char* buf, int buf_len){

	map<string, string> lookUpTermDocIDDict;
	map<string, vector<int> > outputDict;
	map<string, vector<int> >::iterator outputDictIter;
	string currentWord = "";

	cout << "*********************web page content begin" << endl;
	for(int tempCounter = 0; tempCounter < buf_len; tempCounter++){
		cout << buf[tempCounter];
	}
	cout << "*********************web page content End." << endl;

	cout << "*********************parsing begin" << endl;

	const char* curr_p = buf;

    assert(buf != NULL);
    assert(buf_len > 0);
    uint32_t doc_id = 0;

    Tag tag_ret;  // The special type of tag we encountered.
    unsigned char context = '\0';  // Bit array for the context.
    uint32_t position = 0;         // Tracks position of each word, final position for a document is it's size in words.

    // For parsing HTML.
    bool in_closing_tag = false;  // True when we're parsing a closing tag.
    bool in_script = false;       // True when we're parsing contents of script tag.

    // For TREC documents.
    bool in_doc = false;     // True when we're parsing contents of doc tag.
    bool in_docno = false;   // True when we're parsing contents of docno tag.
    bool in_dochdr = false;  // True when we're parsing contents of dochdr tag.

    // Track the starting point of various things we want to parse out.
    const char* word_p;        // Standalone word.
    // For clueweb data ONLY, currently. I comment them out currently.
    //const char* url_p;         // TREC document URL.
    //const char* docno_p;       // TREC document number.
    const char* tag_p = NULL;  // Tracks the starting point of a tag; doubles as a flag as to whether we're currently in a tag.


    //variables set but currently not used.
    if(false){
    	cout << in_doc << endl;
    	cout << in_docno << endl;
    	cout << in_dochdr << endl;
    }

    // The main parsing loop
    while (IsWithinBounds(curr_p, buf, buf_len)) {
      if (!IsIndexable(*curr_p)) {
        if (*curr_p != '>') {
          if (*curr_p == '<') {
            tag_p = curr_p;
          }
          ++curr_p;
          continue;
        }

        if (!tag_p) {
          ++curr_p;
          continue;
        }

        // At this point, we must have just seen the end of a closing tag, '>'.
        ++curr_p;
        tag_ret = ProcessTag(tag_p, curr_p - tag_p, in_closing_tag, doc_id);

        switch (tag_ret) {
          case kTagNot:
            break;

          case kTagB:
            UpdateContext(context, in_closing_tag, kContextB);
            break;

          case kTagI:
            UpdateContext(context, in_closing_tag, kContextI);
            break;

          case kTagH:
            UpdateContext(context, in_closing_tag, kContextH);
            break;

          case kTagTitle:
            UpdateContext(context, in_closing_tag, kContextT);
            break;

          case kTagScript:
            in_script = in_closing_tag ? false : true;
            break;

          //If this tag is to be </Html>.
          case kTagHtml:
            if (doc_type_ != kWarc)
              break;

            if (in_closing_tag) {
              in_doc = false;



              break;
            }

          //If this tag is to be </Doc>.
          case kTagDoc:
            if (doc_type_ != kWarc)
              break;

            if (in_closing_tag) {
              in_doc = false;
            }
            else {
              in_doc = true;
            }
            break;

          case kTagDocno:
            if (doc_type_ != kTrec)
              break;

            in_docno = in_closing_tag ? false : true;
            break;

          case kTagDochdr:
            if (doc_type_ != kTrec)
              break;

            in_dochdr = in_closing_tag ? false : true;
            break;

          default:
            break;
        }

        tag_p = NULL;
        continue;
      }
      // Ignore everything between <script></script> tags and ignore inner contents of tags.
      if (in_script || tag_p) {
        ++curr_p;
        continue;
      }
      word_p = curr_p;
      while (IsWithinBounds(curr_p, buf, buf_len) && IsIndexable(*curr_p)) {
        ++curr_p;
      }

      //callback_->ProcessTerm(word_p, curr_p - word_p, doc_id, position++, context);
      currentWord = "";
      for(int tempCounter = 0; tempCounter < curr_p - word_p; tempCounter++){
    	  currentWord += word_p[tempCounter];
      }

      if(lookUpTermDocIDDict.count(currentWord) > 0){
    	  //cout << currentWord << " " << position << " " << int(context) << endl;
    	  outputDict[ lookUpTermDocIDDict[currentWord] + " " + currentWord].push_back(position++);
    	  outputDict[ lookUpTermDocIDDict[currentWord] + " " + currentWord].push_back(int(context));
      }
      else{
    	  position++;
      }
    }

    cout << "*********************parsing end." << endl;
	return 0;
}

// Returns the number of documents parsed if parsing mode is set to 'MANY_DOC', otherwise 0.
// TODO: The base URL can be set by a <base> tag within the page and by the Content-Location field in the web server's HTTP response header.
//        These cases are not currently covered.
template<class Callback>
  int Parser<Callback>::ParseDocumentCollectionAndExtractingInfoForPhase2Pruning(const char* buf, int buf_len, uint32_t& doc_id, int& avg_doc_length) {
    assert(buf != NULL);
    assert(buf_len > 0);

    int num_docs_parsed = 0;
    const char* curr_p = buf;  // Tracks the current point in the buffer.

    if (doc_type_ == kWarc){
    	std::cout << "Not Supportive Doc Type for the clueweb09." << std::endl;
    }
    else if (doc_type_ == kTrec){
        // old and working version but there are some bugs(Updated by Wei 2013/07/14)
    	// num_docs_parsed += ParseTrecDataBufferForPhase2Pruning(buf, buf_len, doc_id, avg_doc_length, curr_p);
    	// current version in development(Updated by Wei 2013/07/14)
    	// Updated by Wei on 2013/11/02 night at school
    	// NOT Used since 2013/11/04 afternoon at school because I want to implement selected parsing for the documents
    	// This is working but NOT fitted for selected parsing, but fitted for all indexing
    	num_docs_parsed += ParseTrecDataBufferForPhase2PruningNewTryUpdatedOn20130714Night(buf, buf_len, doc_id, avg_doc_length, curr_p, true);
    	// This is my trying to implement version which has the function of doing selected parsing.
    	// num_docs_parsed += ParseTrecDataBufferForSelectedParsingUpdatedOn20131104Afternoon(buf, buf_len, doc_id, avg_doc_length, curr_p, true);
    }
    else{
        //No Supportive doc_type_
        std::cout << "Not Supportive Doc Type." << std::endl;
    }
    return num_docs_parsed;
  }


// Returns the number of documents parsed if parsing mode is set to 'MANY_DOC', otherwise 0.
// TODO: The base URL can be set by a <base> tag within the page and by the Content-Location field in the web server's HTTP response header.
//        These cases are not currently covered.
template<class Callback>
  int Parser<Callback>::ParseDocumentCollection(const char* buf, int buf_len, uint32_t& doc_id, int& avg_doc_length) {
    assert(buf != NULL);
    assert(buf_len > 0);

    int num_docs_parsed = 0;

    const char* curr_p = buf;  // Tracks the current point in the buffer.

    if (doc_type_ == kWarc){
        // The WARC format starts each bundle with 6 info lines, which we skip here.
        // the WARC-Type is:warcinfo

        WarcHeader warc_header;

        curr_p += ProcessWarcInfoHeader(buf, buf_len, curr_p, &warc_header);

        curr_p += warc_header.content_length;

        while (true) {
          // If parsing the WARC format, need to process the header first for each document.
          // This warc header type is:response

          int header_bytes = ProcessWarcResponseAndHTTPHeader(buf, buf_len, curr_p, &warc_header, doc_id);

          if (header_bytes == 0)
            break;

          curr_p += header_bytes;
          const char* content_start = curr_p;

		  num_docs_parsed += ParseWarcDataBuffer(content_start, warc_header.content_length, doc_id, avg_doc_length, curr_p, &warc_header);

		  // Since we're parsing one document at a time, we need to update the docID count, average doc length here.
		  ++doc_id;
		  ++num_docs_parsed;

		  if ((curr_p - content_start) != warc_header.content_length) {
			assert(false);
		  }
        }
    }
    else if (doc_type_ == kTrec){
    	// For general purpose
    	num_docs_parsed += ParseTrecDataBuffer(buf, buf_len, doc_id, avg_doc_length, curr_p);
    	// For special purpose of outputting the bigrams of the documents
    	// num_docs_parsed += ParseTrecDataBufferForBigrams(buf, buf_len, doc_id, avg_doc_length, curr_p);
    }
    else{
        std::cout << "Not Supportive Doc Type, please wait for the update" << std::endl;
    }
    return num_docs_parsed;
  }

template<class Callback>
  int Parser<Callback>::ParseWarcDataBuffer(const char* buf, int buf_len, uint32_t& doc_id, int& avg_doc_length, const char*& curr_p, WarcHeader* header) {

	string currentWord;

    assert(buf != NULL);
    assert(buf_len > 0);

    uint32_t initial_doc_id = doc_id;

    Tag tag_ret;  // The special type of tag we encountered.

    unsigned char context = '\0';  // Bit array for the context.
    uint32_t position = 0;         // Tracks position of each word, final position for a document is it's size in words.

    // For parsing HTML.
    bool in_closing_tag = false;  // True when we're parsing a closing tag.
    bool in_script = false;       // True when we're parsing contents of script tag.

    // Track the starting point of various things we want to parse out.
    const char* word_p;        // Standalone word.
    const char* tag_p = NULL;  // Tracks the starting point of a tag; doubles as a flag as to whether we're currently in a tag.



    // The main parsing loop
    while (IsWithinBounds(curr_p, buf, buf_len)) {
      if (!IsIndexable(*curr_p)) {
        if (*curr_p != '>') {
          if (*curr_p == '<') {
            tag_p = curr_p;
          }
          ++curr_p;
          continue;
        }

        if (!tag_p) {
          ++curr_p;
          continue;
        }

        // At this point, we must have just seen the end of a closing tag, '>'.
        ++curr_p;
        tag_ret = ProcessTag(tag_p, curr_p - tag_p, in_closing_tag, doc_id);

        switch (tag_ret) {
          case kTagNot:
            break;

          case kTagB:
            UpdateContext(context, in_closing_tag, kContextB);
            break;

          case kTagI:
            UpdateContext(context, in_closing_tag, kContextI);
            break;

          case kTagH:
            UpdateContext(context, in_closing_tag, kContextH);
            break;

          case kTagTitle:
            UpdateContext(context, in_closing_tag, kContextT);
            break;

          case kTagScript:
            in_script = in_closing_tag ? false : true;
            break;

          // If this tag is to be </Html>.
          // Updated by Wei on 2014/03/04 at school
          // The following is the WRONG implementation: Because not every clueweb09 document downloaded will have a </Html> tag, so it is NOT right to update the doc length here.
          /*
          case kTagHtml:
            if (in_closing_tag) {
              // The position at this time is actually the document length.
              avg_doc_length += position;

              // Call the following function to update the doc length in words (Duplicated words included)
              std::cout << position << " " << doc_id << std::endl;
              callback_->ProcessDocLength(position, doc_id);
            }
            break;

          default:
            break;
          */
        }

        tag_p = NULL;
        continue;
      }

      // Ignore everything between <script></script> tags and ignore inner contents of tags.
      if (in_script || tag_p) {
        ++curr_p;
        continue;
      }

      word_p = curr_p;
      while (IsWithinBounds(curr_p, buf, buf_len) && IsIndexable(*curr_p)) {
        ++curr_p;
      }

      callback_->ProcessTerm(word_p, curr_p - word_p, doc_id, position++, context,false);

    }

    avg_doc_length += position;
    callback_->ProcessDocLength(position, doc_id);
    // std::cout << position << " " << doc_id << std::endl;
    return doc_id - initial_doc_id;
  }

// Updated by Wei on 2013/11/04 afternoon at school
template<class Callback>
int Parser<Callback>::ParseTrecDataBufferForSelectedParsingUpdatedOn20131104Afternoon(const char* buf, int buf_len, uint32_t& current_doc_id, int& avg_doc_length, const char*& curr_p, bool usePriorityDocumentListFlag) {
	// Updated by Wei on 2015/02/24 @ Shanghai, code deletion
}


// Updated by Wei on 2013/11/02 night at school
// Let all the callback_ functions listens to the usePriorityDocumentListFlag.
template<class Callback>
int Parser<Callback>::ParseTrecDataBufferForPhase2PruningNewTryUpdatedOn20130714Night(const char* buf, int buf_len, uint32_t& current_doc_id, int& avg_doc_length, const char*& curr_p, bool usePriorityDocumentListFlag) {

	// for debug ONLY
	std::cout << "Updated on 2013/11/04 morning by Wei at school." << std::endl;
	std::cout << "Parser<Callback>::ParseTrecDataBufferForPhase2PruningNewTryUpdatedOn20130714Night(...) is called." << std::endl;

	assert(buf != NULL);
	assert(buf_len > 0);

	uint32_t initial_doc_id = current_doc_id;

	Tag tag_ret;  // The special type of tag we encountered.

	unsigned char context = '\0';  // Bit array for the context.
	uint32_t position = 0;         // Tracks position of each word, final position for a document is it's size in words.

	// For parsing HTML.
	bool in_closing_tag = false;  // True when we're parsing a closing tag.
	bool in_script = false;       // True when we're parsing contents of script tag.

	// For TREC documents.
	bool in_doc = false;     // True when we're parsing contents of doc tag.
	bool in_docno = false;   // True when we're parsing contents of docno tag.
	bool in_dochdr = false;  // True when we're parsing contents of dochdr tag.

	// Track the starting point of various things we want to parse out.
	const char* word_p;        // Standalone word.
	const char* url_p;         // TREC document URL.
	const char* docno_p;       // TREC document number.
	string docno_p_in_string_format;	// TREC document number in string format
	const char* tag_p = NULL;  // Tracks the starting point of a tag; doubles as a flag as to whether we're currently in a tag.

	//this is the dummy code:
	if(false){
		 cout << in_doc << endl;
	}

	while (IsWithinBounds(curr_p, buf, buf_len)) {
	  if (!IsIndexable(*curr_p)) {
		if (*curr_p != '>') {
		  if (*curr_p == '<') {
			tag_p = curr_p;
		  }
		  ++curr_p;
		  continue;
		}

		if (!tag_p) {
		  ++curr_p;
		  continue;
		}

		// At this point, we must have just seen the end of a closing tag, '>'.
		++curr_p;

		// Updated on 2013/11/04 morning by Wei at school
		// version in test
		/*
		if (usePriorityDocumentListFlag){
			  if( priority_document_needed_to_process_dict_.count(docno_p_in_string_format) > 0 ){
				  // callback_->GetEdgeCollectionController()->setCurrDocLen(position);
				  callback_->ProcessDocLength(position, current_doc_id);
			  }
		}
		*/
		// original version
		tag_ret = ProcessTag(tag_p, curr_p - tag_p, in_closing_tag, current_doc_id);

		switch (tag_ret) {
		  case kTagNot:
			break;

		  case kTagB:
			UpdateContext(context, in_closing_tag, kContextB);
			break;

		  case kTagI:
			UpdateContext(context, in_closing_tag, kContextI);
			break;

		  case kTagH:
			UpdateContext(context, in_closing_tag, kContextH);
			break;

		  case kTagTitle:
			UpdateContext(context, in_closing_tag, kContextT);
			break;

		  case kTagScript:
			in_script = in_closing_tag ? false : true;
			break;

		  case kTagDoc:
			if (doc_type_ != kTrec)
			  break;

			if (in_closing_tag) {
			  in_doc = false;

			  // The position at this time is actually the document length.
			  avg_doc_length += position;
			  /*
			  #ifdef PARSERINL_DEBUG
				std::cout << "doc_length is: " << position << std::endl;
				std::cout << "doc_id is: " << doc_id << std::endl;
			  #endif
			  */


			  std::cout << "docLength(in words): " << position << std::endl;

			  // Updated by Wei on 2013/11/02 night at school
			  // mark1
			  // original version
			  // callback_->GetEdgeCollectionController()->setCurrDocLen(position);
			  callback_->ProcessDocLength(position, current_doc_id);
			  // version in test
			  /*
			  if (usePriorityDocumentListFlag){
				  if( priority_document_needed_to_process_dict_.count(docno_p_in_string_format) > 0 ){
					  // callback_->GetEdgeCollectionController()->setCurrDocLen(position);
					  callback_->ProcessDocLength(position, current_doc_id);
				  }
			  }
			  */

			  // I should put the graph generation here because it marks the ending of parsing the whole document
			  // Updated by Wei 2013/07/14 night
			  /*
			  if (usePriorityDocumentListFlag){
				  if( priority_document_needed_to_process_dict_.count(docno_p_in_string_format) > 0 ){
					  if (callback_->GetEdgeCollectionController()->GetProperThreshold() <= 180){
						  callback_->GetEdgeCollectionController()->GenerateDocumentPostingGraphMethod3( current_doc_id, Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kTermPairEdgeFreqThreshold)),false, true, false, false, false, false, true );
					  }
					  else{
						  callback_->GetEdgeCollectionController()->GenerateDocumentPostingGraphMethod1( current_doc_id, Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kTermPairEdgeFreqThreshold)),false, true, false, false, false, false, true );
					  }

				  }
				  else{
					  // should clear the variable for the current document
					  // even NOT doing the graph generation part, you still need to clear up the data structures.
					  callback_->GetEdgeCollectionController()->ClearDocIDRelatedTermDicts(current_doc_id); // erasing the whole doc by key
					  callback_->GetEdgeCollectionController()->ClearTermsBothInCurrentDocIDAndQueryTraceVector(); // clear the terms_both_in_current_doc_id_and_query_trace_vector_
				  }
				  std::cout << std::endl;
			  }
			  else{
				  if (callback_->GetEdgeCollectionController()->GetProperThreshold() <= 180){
					  callback_->GetEdgeCollectionController()->GenerateDocumentPostingGraphMethod3( current_doc_id, Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kTermPairEdgeFreqThreshold)), false, true, false, false, false, false, true );
				  }
				  else{
					  callback_->GetEdgeCollectionController()->GenerateDocumentPostingGraphMethod1( current_doc_id, Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kTermPairEdgeFreqThreshold)), false, true, false, false, false, false, true );
				  }
			  }
			  */

			  // This only applies when we're parsing multiple documents in one go.
			  if (parsing_mode_ == kManyDoc) {
				context = 0;
				position = 0;
				++current_doc_id;

				// Need to reset certain properties before moving on to the next document.
				in_script = false;
			  }
			}
			else {
			  in_doc = true;
			}
			break;

		  case kTagDocno:
			if (doc_type_ != kTrec)
			  break;

			in_docno = in_closing_tag ? false : true;
			break;

		  case kTagDochdr:
			if (doc_type_ != kTrec)
			  break;

			in_dochdr = in_closing_tag ? false : true;
			break;

		  default:
			break;
		}

		tag_p = NULL;
		continue;
	  }

	  // Ignore everything between <script></script> tags and ignore inner contents of tags.
	  if (in_script || tag_p) {
		++curr_p;
		continue;
	  }

	  if (doc_type_ == kTrec)
	  {
		if (in_docno) {
		  docno_p = curr_p;
		  while (IsWithinBounds(curr_p, buf, buf_len) && *curr_p != '<') {
			++curr_p;
		  }


		  // for debug ONLY
		  docno_p_in_string_format = string(docno_p,curr_p - docno_p);

		  std::cout << "doc_id(internal): " << current_doc_id << std::endl;
		  std::cout << "docno(trecID): " << docno_p_in_string_format << std::endl;

		  // Updated on 2013/11/02 night by Wei at school
		  // Mark2
		  // original version
		  // callback_->GetEdgeCollectionController()->setCurrDocTrecID(docno_p_in_string_format);
		  callback_->ProcessDocno(docno_p, curr_p - docno_p, current_doc_id);
		  // version in test
		  /*
		  if (usePriorityDocumentListFlag){
			  if( priority_document_needed_to_process_dict_.count(docno_p_in_string_format) > 0 ){
				  callback_->GetEdgeCollectionController()->setCurrDocTrecID(docno_p_in_string_format);
				  callback_->ProcessDocno(docno_p, curr_p - docno_p, current_doc_id);
			  }
		  }
		  */


		  continue;
		} else if (in_dochdr) {
		  BitSet(context, kContextU);

		  url_p = curr_p;
		  while (IsWithinBounds(url_p, buf, buf_len) && *url_p != '\n') {
			if (!IsIndexable(*url_p)) {
			  url_p++;
			  continue;
			}

			word_p = url_p;
			while (IsWithinBounds(url_p, buf, buf_len) && IsIndexable(*url_p)) {
			  url_p++;
			}

			callback_->ProcessTerm(word_p, url_p - word_p, current_doc_id, position++, context, false);

		  }

		  BitUnset(context, kContextU);
		  /*
		  #ifdef PARSERINL_DEBUG
			std::cout << "url is: " << curr_p << std::endl;
			std::cout << "url_len is: " << url_p - curr_p << std::endl;
			std::cout << "doc_id is: " << doc_id << std::endl;
		  #endif
		  */

		  // Updated on 2013/11/02 night by Wei at school
		  // Mark4
		  // original version
		  callback_->ProcessUrl(curr_p, url_p - curr_p, current_doc_id);
		  // version in test
		  /*
		  if (usePriorityDocumentListFlag){
			  if( priority_document_needed_to_process_dict_.count(docno_p_in_string_format) > 0 ){
				  callback_->ProcessUrl(curr_p, url_p - curr_p, current_doc_id);
			  }
		  }
		  */
		  curr_p = url_p + 1;
		  // Skip the rest of the dochdr contents (making sure that we're at the end of the dochdr).
		  while (IsWithinBounds(curr_p, buf, buf_len)) {
			if (*curr_p == '<') {
			  const char kDocHdrClosingTag[] = "</DOCHDR>";
			  // Make sure it's actually the closing tag.
			  if (IsWithinBounds(curr_p + sizeof(kDocHdrClosingTag) - 1, buf, buf_len) && strncasecmp(curr_p, kDocHdrClosingTag, sizeof(kDocHdrClosingTag) - 1) == 0) {
				break;
			  }
			}
			++curr_p;
		  }

		  continue;
		}
	  }

	  word_p = curr_p;
	  while (IsWithinBounds(curr_p, buf, buf_len) && IsIndexable(*curr_p)) {
		++curr_p;
	  }

	  // Updated on 2013/11/02 night by Wei at school
	  // Mark5
	  // original version
	  callback_->ProcessTerm(word_p, curr_p - word_p, current_doc_id, position++, context, false);
	  // version in test
	  /*
	  if (usePriorityDocumentListFlag){
		  if( priority_document_needed_to_process_dict_.count(docno_p_in_string_format) > 0 ){
			  callback_->ProcessTerm(word_p, curr_p - word_p, current_doc_id, position++, context, false);
		  }
	  }
	  */
	}
	return current_doc_id - initial_doc_id;
}



/*
template<class Callback>
  void Parser<Callback>::ParseIncomingPostingStreamForFormat4(){
	// step3: load incoming sorted posting stream
	string incomingPostingStreamFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kIncomingPostingStreamFileNameForFormat2));
	ifstream inputfile2(incomingPostingStreamFileName.c_str());
	uint32_t lineCounter = 0;
	string currentLine = "";
	while ( inputfile2.good() ){
		getline (inputfile2,currentLine);
		lineCounter += 1;
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string termID;
			string docIDInStr;
			string probabilityInStr;
			string impactScoreInStr;

		    iss >> termID;
		    iss >> docIDInStr;
		    iss >> probabilityInStr;
		    iss >> impactScoreInStr;

		    uint32_t docID = strtoul(docIDInStr.c_str(), NULL, 0);
		    float impactScore = atof(impactScoreInStr.c_str());
		    uint32_t impactScoreInPositionForm = impactScore * 1000000;

		    // cout << termID << " " << docID << " " << probabilityInStr << " " << impactScoreInStr << endl;
		    // Just for reference
		    // callback_->ProcessTerm(tempTerm2.c_str(), 6, 0, impactScoreInPositionForm2, 0, impactScore2);
		    string term = "apple";
		    callback_->ProcessTerm(term.c_str(), term.length(), docID, impactScoreInPositionForm, 0, impactScore);
		    if (lineCounter % 100000 == 0){
		    	cout << lineCounter << " " << "line processed." << endl;
		    }
		}
	}
	exit(1);

	cout << "Begins..." << endl;
	string tempTerm1 = "apple";
	float impactScore1 = 7.417143;
	uint32_t impactScoreInPositionForm1 = impactScore1 * 1000000;
    callback_->ProcessTerm(tempTerm1.c_str(), 5, 0, impactScoreInPositionForm1, 0, impactScore1);

	string tempTerm2 = "banana";
	float impactScore2 = 7.439318;
	uint32_t impactScoreInPositionForm2 = impactScore2 * 1000000;
    callback_->ProcessTerm(tempTerm2.c_str(), 6, 0, impactScoreInPositionForm2, 0, impactScore2);

	uint32_t counter = 0;
	string fakeURL = "http://www.loveWei.com";

	// fake fill the URL for the document collection
	callback_->ProcessDocno("GX123456789012345", 17, 0);
	callback_->ProcessUrl(fakeURL.c_str(), 22, 0);
	callback_->ProcessDocLength(2, 0);


	GetPostingCollectionController().Finish();
	cout << "End." << endl;
}
*/

/*
template<class Callback>
  void Parser<Callback>::ParseIncomingPostingStreamForFormat3(){
	string infoIncomingSourceFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kInfoIncomingSource2));
    string currentLine;
	ifstream inputfile(infoIncomingSourceFileName.c_str());
	uint32_t lineCounter = 0;
	uint32_t docID = 0;
	uint32_t numOfPostings = 0;
	map<uint32_t,uint32_t> docIDAndSizeDict;
	map<uint32_t,string> docIDAndTrecIDDict;
	while ( inputfile.good() ){
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string docIDInStr;
			string trecIDInStr;
			string numOfPostingsInStr;

		    iss >> docIDInStr;
		    iss >> trecIDInStr;
		    iss >> numOfPostingsInStr;

		    docID = strtoul(docIDInStr.c_str(), NULL, 0);
		    numOfPostings = strtoul(numOfPostingsInStr.c_str(), NULL, 0);

		    docIDAndSizeDict[docID] = numOfPostings;
		    docIDAndTrecIDDict[docID] = trecIDInStr;
		    lineCounter += 1;
		    if(lineCounter % 1000000 == 0){
		    	cout << lineCounter << " lines processed." << endl;
		    }
		}
	}

    string tempTerm1 = "apple";
    callback_->ProcessTerm(tempTerm1.c_str(), 5, 0, 0, 0, false);

	uint32_t counter = 0;
	string fakeURL = "http://www.loveWei.com";
	for(; counter <25205179;counter += 1){
	    uint32_t docID = counter;
		// fake fill the URL for the document collection
		callback_->ProcessDocno(docIDAndTrecIDDict[docID].c_str(), 17, docID);
	    callback_->ProcessUrl(fakeURL.c_str(), 22, docID);
	    callback_->ProcessDocLength(docIDAndSizeDict[docID], docID);
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs processed." << endl;
	    }
	}
	cout << "docIDAndSizeDict[22470813]: " << docIDAndSizeDict[22470813] << endl;
	GetPostingCollectionController().Finish();
}
*/

template<class Callback>
  void Parser<Callback>::ParseIncomingPostingStreamForFormat2(){
	/*
	// step1: load term and termID transfer
	cout << "Load term, termID info" << endl;
	map<string,string> termANDTermIDDict;
	map<string,string> termIDANDTermDict;
	// termIDANDTermDict["23990334"] = "irs";
	// termIDANDTermDict["28508548"] = "nyc";
	// termANDTermIDDict["irs"] = 23990334;
	// termANDTermIDDict["nyc"] = 28508548;
	string infoIncomingSourceFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kInfoIncomingSource));
    string currentLine;
	ifstream inputfile(infoIncomingSourceFileName.c_str());
	uint32_t lineCounter = 0;
	while ( inputfile.good() ){
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string termID;
			string term;
			string wildCat;

		    iss >> termID;
		    iss >> term;
		    iss >> wildCat;

		    termANDTermIDDict[term] = termID;
		    termIDANDTermDict[termID] = term;

		    lineCounter += 1;
		    if(lineCounter % 1000000 == 0){
		    	cout << lineCounter << " lines processed." << endl;
		    }
		}
	}
	cout << "termANDTermIDDict['000000002']:" << termANDTermIDDict["000000002"] << endl;
	cout << "termIDANDTermDict['10000']:" << termIDANDTermDict["10000"] << endl;
	cout << "termANDTermIDDict.size():" << termANDTermIDDict.size() << endl;
	cout << "termIDANDTermDict.size():" << termIDANDTermDict.size() << endl;

	// step2: load doc and docSize
	map<uint32_t,uint32_t> docIDAndSizeDict;
	uint32_t counter = 0;
	for(; counter <25205179;counter += 1){
		docIDAndSizeDict[counter] = 0;
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs init." << endl;
	    }
	}


	// traditional way of converting
	// Updated by Wei on 20141009
	// step3: load incoming sorted posting stream
	string incomingPostingStreamFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kIncomingPostingStreamFileNameForFormat2));
	cout << "incomingPostingStreamFileName: " << incomingPostingStreamFileName << endl;
	ifstream inputfile2(incomingPostingStreamFileName.c_str());
	lineCounter = 0;
	while ( inputfile2.good() ){
		getline (inputfile2,currentLine);
		lineCounter += 1;
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string termID;
			string docIDInStr;
			string probabilityInStr;
			string impactScoreInStr;

		    iss >> termID;
		    iss >> docIDInStr;
		    iss >> probabilityInStr;
		    iss >> impactScoreInStr;

		    uint32_t docID = strtoul(docIDInStr.c_str(), NULL, 0);
		    float impactScore = atof(impactScoreInStr.c_str());
		    uint32_t impactScoreInPositionForm = impactScore * 1000000;
		    docIDAndSizeDict[docID] += 1;
		    // cout << termID << " " << docID << " " << probabilityInStr << " " << impactScoreInStr << endl;
		    // Just for reference
		    // callback_->ProcessTerm(tempTerm2.c_str(), 6, 0, impactScoreInPositionForm2, 0, impactScore2);
		    callback_->ProcessTerm(termIDANDTermDict[termID].c_str(), termIDANDTermDict[termID].length(), docID, impactScoreInPositionForm, 0, impactScore);
		    if (lineCounter % 100000 == 0){
		    	cout << lineCounter << " " << "line processed." << endl;
		    }
		}
	}

	// step4: fake fill the aux data structure
	cout << "fake fill begins2..." << endl;
	// fake fill the URL for the document collection
	string fakeURL = "http://www.loveWei.com";
	string fakeDocno = "GX000-00-00000000";
	// unsigned int fakeDocSize = 1;
	counter = 0;
	for(; counter <25205179;counter += 1){
	    uint32_t docID = counter;
		callback_->ProcessDocno(fakeDocno.c_str(), 17, docID);
	    callback_->ProcessUrl(fakeURL.c_str(), 22, docID);
	    callback_->ProcessDocLength(docIDAndSizeDict[docID], docID);
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs processed." << endl;
	    }
	}
	cout << "docIDAndSizeDict[22470813]: " << docIDAndSizeDict[22470813] << endl;
	GetPostingCollectionController().Finish();
	inputfile.close();
	inputfile2.close();
	*/
}

template<class Callback>
  void Parser<Callback>::BuildInvertedIndexForPostingHits(){
	/*
	// step1: load the whole term and termID mapping table
	cout << "STEP1" << endl;
	cout << "load the whole term and termID mapping table" << endl;
	cout << "Load term, termID info" << endl;
	map<string,string> termANDTermIDDict;
	map<string,string> termIDANDTermDict;

	// handle the VERY BIG number bug in my code
	termANDTermIDDict["thiswww8912thisthis"] = "50000000";
	termIDANDTermDict["50000000"] = "thiswww8912thisthis";
	termANDTermIDDict["thiswww8913thisthis"] = "100000000";
	termIDANDTermDict["100000000"] = "thiswww8913thisthis";
	// termIDANDTermDict["23990334"] = "irs";
	// termIDANDTermDict["28508548"] = "nyc";
	// termANDTermIDDict["irs"] = 23990334;
	// termANDTermIDDict["nyc"] = 28508548;


	string infoIncomingSourceFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kInfoIncomingSource));
    ifstream inputfile(infoIncomingSourceFileName.c_str());
    cout << "infoIncomingSourceFileName: " << infoIncomingSourceFileName << endl;
    string currentLine;
    uint32_t lineCounter = 0;
    while ( inputfile.good() ){
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string termID;
			string term;
			string wildCat;

		    iss >> termID;
		    iss >> term;
		    iss >> wildCat;

		    termANDTermIDDict[term] = termID;
		    termIDANDTermDict[termID] = term;

		    lineCounter += 1;
		    if(lineCounter % 1000000 == 0){
		    	cout << lineCounter << " lines processed." << endl;
		    }
		}
	}


	// cout << "termANDTermIDDict['000000002']:" << termANDTermIDDict["000000002"] << endl;
	// cout << "termIDANDTermDict['10000']:" << termIDANDTermDict["10000"] << endl;
	cout << "termANDTermIDDict.size():" << termANDTermIDDict.size() << endl;
	cout << "termIDANDTermDict.size():" << termIDANDTermDict.size() << endl;


	// step2: load doc and docSize init table
	cout << "STEP2" << endl;
	cout << "load doc and docSize init table" << endl;
	map<uint32_t,uint32_t> docIDAndSizeDict;
	uint32_t counter = 0;
	for(; counter <25205179;counter += 1){
		docIDAndSizeDict[counter] = 0;
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs init." << endl;
	    }
	}

	// This is the main operation
	// step3: load the postings into the index
	cout << "STEP3" << endl;
	cout << "load the postings into the index" << endl;
	string inputBinaryFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPostingIncomingForPostingHit));
    cout << "inputFileName: " << inputBinaryFileName << endl;
    int posting_info_fd;
    posting_info_fd = open(inputBinaryFileName.c_str(), O_RDONLY);	// the input is in Binary format
    struct stat stat_buf;
    fstat(posting_info_fd, &stat_buf);
    off_t POSTING_INFO_FILE_SIZE = stat_buf.st_size;
    off_t num_bytes_read = 0;
    uint32_t termID = 100000000;
    uint32_t docID = 100000000;
    float partialBM25Score = 0.0;
    uint32_t impactScoreInPositionForm = 0;
    string termIDInStringFormat = "";
    uint64_t numOfPostingRecorded = 0;
    while (num_bytes_read < POSTING_INFO_FILE_SIZE){
    	read(posting_info_fd, &docID, 4);
    	read(posting_info_fd, &termID, 4);
    	read(posting_info_fd, &partialBM25Score, 4);
    	cout << docID << " " << termID << " " << partialBM25Score << endl;
    	docIDAndSizeDict[docID] += 1;
    	impactScoreInPositionForm = partialBM25Score * 1000000;
        std::stringstream ss;
        ss << termID;
        ss >> termIDInStringFormat;
    	callback_->ProcessTerm(termIDANDTermDict[termIDInStringFormat].c_str(), termIDANDTermDict[termIDInStringFormat].length(), docID, impactScoreInPositionForm, 0, partialBM25Score);
    	num_bytes_read += 12;
    	numOfPostingRecorded +=1;
    }

	// step4: fake fill the aux data structure
    cout << "STEP4" << endl;
	cout << "fake fill begins2..." << endl;
	// fake fill the URL for the document collection
	string fakeURL = "http://www.loveWei.com";
	string fakeDocno = "GX000-00-00000000";
	// unsigned int fakeDocSize = 1;
	counter = 0;
	for(; counter <25205179;counter += 1){
	    uint32_t docID = counter;
		callback_->ProcessDocno(fakeDocno.c_str(), 17, docID);
	    callback_->ProcessUrl(fakeURL.c_str(), 22, docID);
	    callback_->ProcessDocLength(docIDAndSizeDict[docID], docID);
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs processed." << endl;
	    }
	}
	// cout << "docIDAndSizeDict[22470813]: " << docIDAndSizeDict[22470813] << endl;
	GetPostingCollectionController().Finish();
	// inputfile.close();
	*/
}

// TODO: tomorrow's work. Build Inverted Index For normalized Posting Hit
template<class Callback>
  void Parser<Callback>::BuildInvertedIndexForNormalizedPostingHit(){
	/*
	cout << "BuildInvertedIndexForNormalizedPostingHit()" << endl;
	cout << "Begins..." << endl;
	uint32_t normalizedPostingLowerBoundHittingThreshold = Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kNormalizedPostingHitLowerBound));
	cout << "normalized posting Lower Bound Hitting Threshold: " << normalizedPostingLowerBoundHittingThreshold << endl;

	// step1: load the whole term and termID mapping table
	cout << "load the whole term and termID mapping table" << endl;
	cout << "Load term, termID info" << endl;
	map<string,uint32_t> termANDTermIDDict;
	map<uint32_t,string> termIDANDTermDict;
	termANDTermIDDict["thiswww8912thisthis"] = 50000000;
	termIDANDTermDict[50000000] = "thiswww8912thisthis";
	termANDTermIDDict["thiswww8913thisthis"] = 100000000;
	termIDANDTermDict[100000000] = "thiswww8913thisthis";

	string infoIncomingSourceFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kInfoIncomingSource));
    ifstream inputfile(infoIncomingSourceFileName.c_str());
    cout << "infoIncomingSourceFileName: " << infoIncomingSourceFileName << endl;
    string currentLine;
    uint32_t lineCounter = 0;
    while ( inputfile.good() ){
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string termIDInStringFormat;
			uint32_t termID;
			string term;
			string wildCat;

		    iss >> termIDInStringFormat;
		    iss >> term;
		    iss >> wildCat;

		    termID = atol(termIDInStringFormat.c_str());
		    termANDTermIDDict[term] = termID;
		    termIDANDTermDict[termID] = term;

		    lineCounter += 1;
		    if(lineCounter % 1000000 == 0){
		    	cout << lineCounter << " lines processed." << endl;
		    }
		}
	}
	// cout << "termANDTermIDDict['000000002']:" << termANDTermIDDict["000000002"] << endl;
	// cout << "termIDANDTermDict['10000']:" << termIDANDTermDict["10000"] << endl;
	cout << "termANDTermIDDict.size():" << termANDTermIDDict.size() << endl;
	cout << "termIDANDTermDict.size():" << termIDANDTermDict.size() << endl;


	// step2: load doc and docSize init table
	cout << "load doc and docSize init table" << endl;
	map<uint32_t,uint32_t> docIDAndSizeDict;
	uint32_t counter = 0;
	for(; counter <25205179;counter += 1){
		docIDAndSizeDict[counter] = 0;
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs init." << endl;
	    }
	}

	// step3-2: load the corresponding document posting array into the main memory
	cout << "step1_2_3: Load the real postings and make them back into inverted index format." << endl;
	// "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/docHitsANDPostingHit/GOV2_documentPostingArray_0M_1M_docHit_postingHit_added";
	string inputBinaryFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kNormalizedPostingHitFileName));
	uint32_t docID = 100000000;
	uint32_t numOfPostings = -1;
	uint32_t docHit = 0;
	uint32_t termID = 100000000;
	string termIDInStringFormat = "";
	float partialBM25Score = 0.0;
	uint32_t postingHit = 0.0;
	uint64_t comparizonValue = 0;
	float pt = 0.0;
	int read_ret = 0;

	int posting_info_fd;
	posting_info_fd = open(inputBinaryFileName.c_str(), O_RDONLY);	// the input is in Binary format
	struct stat stat_buf;
	fstat(posting_info_fd, &stat_buf);
	off_t posting_info_file_size = stat_buf.st_size;
	off_t num_bytes_read = 0;

	cout << "posting_info_file_size: " << posting_info_file_size << endl;

	// Size of buffer for reading postings
	// 1024 * 1024 Bytes
	// int postingInfoBufferSize = 1073741824;
	int postingInfoBufferSize = 1048576;
	char* posting_info_buffer = new char[postingInfoBufferSize];	// Pointer to the current portion of the posting info we're buffering.
	char* posting_info_buffer_ptr = posting_info_buffer;                    // Current position in the posting info buffer.
	read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
	// uint32_t numOfDocsProcessed = 0;
	uint64_t numOfPostingRecorded = 0;
	while (num_bytes_read < posting_info_file_size) {
	  // We need to load more data in this case.
	  if (posting_info_buffer_ptr + (4 + 4 + 4) > posting_info_buffer + postingInfoBufferSize) {
		lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		if (read_ret < 0) {
		  GetErrorLogger().LogErrno("trying to read,mark1", errno, true);
		}
		posting_info_buffer_ptr = posting_info_buffer;
	  }

	  // get the docID
	  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
	  memcpy(&docID, posting_info_buffer_ptr, 4);
	  posting_info_buffer_ptr += 4;
	  num_bytes_read += 4;

	  // get the numOfPostings
	  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
	  memcpy(&numOfPostings, posting_info_buffer_ptr, 4);
	  posting_info_buffer_ptr += 4;
	  num_bytes_read += 4;

	  // get the docHit
	  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
	  memcpy(&docHit, posting_info_buffer_ptr, 4);
	  posting_info_buffer_ptr += 4;
	  num_bytes_read += 4;
	  cout << "docID: " << docID << " numOfPostings: " << numOfPostings << " numOfPostingRecorded: " << numOfPostingRecorded << " docHit: " << docHit << endl;

	  // let's do the debug game now
	  //if(docID == 38){
	  //	  break;
	  //}

	  int doc_entry_size = 4 + 4 + 4 + numOfPostings * 4 * 4;
	  if (doc_entry_size > postingInfoBufferSize) {
		  postingInfoBufferSize = doc_entry_size;
		  delete[] posting_info_buffer;
		  posting_info_buffer = new char[postingInfoBufferSize];
		  lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		  read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		  if (read_ret < 0) {
			GetErrorLogger().LogErrno("trying to read,mark2", errno, true);
		  }
		  posting_info_buffer_ptr = posting_info_buffer;
	  }

	  // We need to read more postings into the buffer.
	  if (posting_info_buffer_ptr + (numOfPostings * 4 * 4) > posting_info_buffer + postingInfoBufferSize) {
		lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		if (read_ret < 0) {
		  GetErrorLogger().LogErrno("trying to read,mark3", errno, true);
		}
		posting_info_buffer_ptr = posting_info_buffer;
	  }

		for(uint32_t i = 0; i< numOfPostings; i++){
			assert((posting_info_buffer_ptr + (4 + 4 + 4 + 4)) <= (posting_info_buffer + postingInfoBufferSize));
			// get termID
			memcpy(&termID, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get BM25 impact score
			memcpy(&partialBM25Score, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get postingHit of a posting
			memcpy(&postingHit, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get pt of a posting
			memcpy(&pt, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			comparizonValue = postingHit / pt / 10; // drop 1 digit
			// debug
			// cout << i << " " << termID << " " << partialBM25Score << " " << postingHit << " " << pt << " " << comparizonValue << endl;

			if (comparizonValue >= normalizedPostingLowerBoundHittingThreshold){
				// cout << i << " " << termIDANDTermDict[termID].c_str() << " " << termID << " " << partialBM25Score << " " << (postingHit+1) << endl;
				// impact score
				uint32_t impactScoreInPositionForm = partialBM25Score * 1000000;
				callback_->ProcessTerm(termIDANDTermDict[termID].c_str(), termIDANDTermDict[termID].length(), docID, impactScoreInPositionForm, 0, partialBM25Score);
				// the posting hit
				// special shortcut for this add 1 operation
				// uint32_t postingHitInPositionForm = (postingHit+1);
				// callback_->ProcessTerm(termIDANDTermDict[termID].c_str(), termIDANDTermDict[termID].length(), docID, postingHitInPositionForm, 0, partialBM25Score);
			}
			else{
				// do nothing here
			}

			numOfPostingRecorded += 1;
		}
	}
	delete[] posting_info_buffer;
	posting_info_buffer = NULL;
	cout << "--->num_bytes_read: " << num_bytes_read << endl;

	// step4: fake fill the aux data structure
	cout << "fake fill begins2..." << endl;
	// fake fill the URL for the document collection
	string fakeURL = "http://www.loveWei.com";
	string fakeDocno = "GX000-00-00000000";
	// unsigned int fakeDocSize = 1;
	counter = 0;
	for(; counter <25205179;counter += 1){
	    uint32_t docID = counter;
		callback_->ProcessDocno(fakeDocno.c_str(), 17, docID);
	    callback_->ProcessUrl(fakeURL.c_str(), 22, docID);
	    callback_->ProcessDocLength(docIDAndSizeDict[docID], docID);
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs processed." << endl;
	    }
	}
	// cout << "docIDAndSizeDict[22470813]: " << docIDAndSizeDict[22470813] << endl;
	GetPostingCollectionController().Finish();
	// inputfile.close();
	cout << "Ends." << endl;
	*/
}

template<class Callback>
  void Parser<Callback>::BuildInvertedIndexForPostingHitAndDocHit(){
	/*
	cout << "BuildInvertedIndexForPostingHitAndDocHit" << endl;
	cout << "Begins..." << endl;
	uint32_t postingLowerBoundHittingThreshold = Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kPostingHitLowerBound));
	cout << "posting Lower Bound Hitting Threshold: " << postingLowerBoundHittingThreshold << endl;
	// step1: load the whole term and termID mapping table
	cout << "load the whole term and termID mapping table" << endl;
	cout << "Load term, termID info" << endl;
	map<string,uint32_t> termANDTermIDDict;
	map<uint32_t,string> termIDANDTermDict;

	termANDTermIDDict["thiswww8912thisthis"] = 50000000;
	termIDANDTermDict[50000000] = "thiswww8912thisthis";
	termANDTermIDDict["thiswww8913thisthis"] = 100000000;
	termIDANDTermDict[100000000] = "thiswww8913thisthis";

	string infoIncomingSourceFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kInfoIncomingSource));
    ifstream inputfile(infoIncomingSourceFileName.c_str());
    cout << "infoIncomingSourceFileName: " << infoIncomingSourceFileName << endl;
    string currentLine;
    uint32_t lineCounter = 0;
    while ( inputfile.good() ){
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string termIDInStringFormat;
			uint32_t termID;
			string term;
			string wildCat;

		    iss >> termIDInStringFormat;
		    iss >> term;
		    iss >> wildCat;

		    termID = atol(termIDInStringFormat.c_str());
		    termANDTermIDDict[term] = termID;
		    termIDANDTermDict[termID] = term;

		    lineCounter += 1;
		    if(lineCounter % 1000000 == 0){
		    	cout << lineCounter << " lines processed." << endl;
		    }
		}
	}
	// cout << "termANDTermIDDict['000000002']:" << termANDTermIDDict["000000002"] << endl;
	// cout << "termIDANDTermDict['10000']:" << termIDANDTermDict["10000"] << endl;

	cout << "termANDTermIDDict.size():" << termANDTermIDDict.size() << endl;
	cout << "termIDANDTermDict.size():" << termIDANDTermDict.size() << endl;


	// step2: load doc and docSize init table
	cout << "load doc and docSize init table" << endl;
	map<uint32_t,uint32_t> docIDAndSizeDict;
	uint32_t counter = 0;
	for(; counter <25205179;counter += 1){
		docIDAndSizeDict[counter] = 0;
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs init." << endl;
	    }
	}

	// step3-2: load the corresponding document posting array into the main memory
	cout << "step1_2_3: Load the real postings and make them back into inverted index format." << endl;
	// "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/docHitsANDPostingHit/GOV2_documentPostingArray_0M_1M_docHit_postingHit_added";
	string inputBinaryFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kDocHitPostingHitFileName));
	uint32_t docID = 100000000;
	uint32_t numOfPostings = -1;
	uint32_t docHit = 0;
	uint32_t termID = 100000000;
	string termIDInStringFormat = "";
	float partialBM25Score = 0.0;
	uint32_t postingHit = 0.0;
	int read_ret = 0;

	int posting_info_fd;
	posting_info_fd = open(inputBinaryFileName.c_str(), O_RDONLY);	// the input is in Binary format
	struct stat stat_buf;
	fstat(posting_info_fd, &stat_buf);
	off_t posting_info_file_size = stat_buf.st_size;
	off_t num_bytes_read = 0;

	cout << "posting_info_file_size: " << posting_info_file_size << endl;

	// Size of buffer for reading postings
	// 1024 * 1024 Bytes
	// int postingInfoBufferSize = 1073741824;
	int postingInfoBufferSize = 1048576;
	char* posting_info_buffer = new char[postingInfoBufferSize];	// Pointer to the current portion of the posting info we're buffering.
	char* posting_info_buffer_ptr = posting_info_buffer;                    // Current position in the posting info buffer.
	read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
	// uint32_t numOfDocsProcessed = 0;
	uint64_t numOfPostingRecorded = 0;
	while (num_bytes_read < posting_info_file_size) {
	  // We need to load more data in this case.
	  if (posting_info_buffer_ptr + (4 + 4 + 4) > posting_info_buffer + postingInfoBufferSize) {
		lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		if (read_ret < 0) {
		  GetErrorLogger().LogErrno("trying to read,mark1", errno, true);
		}
		posting_info_buffer_ptr = posting_info_buffer;
	  }

	  // get the docID
	  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
	  memcpy(&docID, posting_info_buffer_ptr, 4);
	  posting_info_buffer_ptr += 4;
	  num_bytes_read += 4;

	  // get the numOfPostings
	  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
	  memcpy(&numOfPostings, posting_info_buffer_ptr, 4);
	  posting_info_buffer_ptr += 4;
	  num_bytes_read += 4;

	  // get the docHit
	  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
	  memcpy(&docHit, posting_info_buffer_ptr, 4);
	  posting_info_buffer_ptr += 4;
	  num_bytes_read += 4;
	  cout << "docID: " << docID << " numOfPostings: " << numOfPostings << " numOfPostingRecorded: " << numOfPostingRecorded << " docHit: " << docHit << endl;

	  // let's do the debug game now
	  //if(docID == 38){
	  //	  break;
	  //}

	  int doc_entry_size = 4 + 4 + 4 + numOfPostings * 3 * 4;
	  if (doc_entry_size > postingInfoBufferSize) {
		  postingInfoBufferSize = doc_entry_size;
		  delete[] posting_info_buffer;
		  posting_info_buffer = new char[postingInfoBufferSize];
		  lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		  read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		  if (read_ret < 0) {
			GetErrorLogger().LogErrno("trying to read,mark2", errno, true);
		  }
		  posting_info_buffer_ptr = posting_info_buffer;
	  }

	  // We need to read more postings into the buffer.
	  if (posting_info_buffer_ptr + (numOfPostings * 3 * 4) > posting_info_buffer + postingInfoBufferSize) {
		lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		if (read_ret < 0) {
		  GetErrorLogger().LogErrno("trying to read,mark3", errno, true);
		}
		posting_info_buffer_ptr = posting_info_buffer;
	  }

		for(uint32_t i = 0; i< numOfPostings; i++){
			assert((posting_info_buffer_ptr + (4 + 4 + 4)) <= (posting_info_buffer + postingInfoBufferSize));
			// get termID
			memcpy(&termID, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get BM25 impact score
			memcpy(&partialBM25Score, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get postingHit of a posting
			memcpy(&postingHit, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			if (postingHit+1 >= postingLowerBoundHittingThreshold){
				// cout << i << " " << termIDANDTermDict[termID].c_str() << " " << termID << " " << partialBM25Score << " " << (postingHit+1) << endl;
				// impact score
				uint32_t impactScoreInPositionForm = partialBM25Score * 1000000;
				callback_->ProcessTerm(termIDANDTermDict[termID].c_str(), termIDANDTermDict[termID].length(), docID, impactScoreInPositionForm, 0, partialBM25Score);
				// the posting hit
				// special shortcut for this add 1 operation
				// uint32_t postingHitInPositionForm = (postingHit+1);
				// callback_->ProcessTerm(termIDANDTermDict[termID].c_str(), termIDANDTermDict[termID].length(), docID, postingHitInPositionForm, 0, partialBM25Score);
			}
			else{
				// do nothing here
			}

			numOfPostingRecorded += 1;
		}
	}
	delete[] posting_info_buffer;
	posting_info_buffer = NULL;
	cout << "--->num_bytes_read: " << num_bytes_read << endl;

	// step4: fake fill the aux data structure
	cout << "fake fill begins2..." << endl;
	// fake fill the URL for the document collection
	string fakeURL = "http://www.loveWei.com";
	string fakeDocno = "GX000-00-00000000";
	// unsigned int fakeDocSize = 1;
	counter = 0;
	for(; counter <25205179;counter += 1){
	    uint32_t docID = counter;
		callback_->ProcessDocno(fakeDocno.c_str(), 17, docID);
	    callback_->ProcessUrl(fakeURL.c_str(), 22, docID);
	    callback_->ProcessDocLength(docIDAndSizeDict[docID], docID);
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs processed." << endl;
	    }
	}
	// cout << "docIDAndSizeDict[22470813]: " << docIDAndSizeDict[22470813] << endl;
	GetPostingCollectionController().Finish();
	// inputfile.close();
	cout << "Ends." << endl;
	*/
}

template<class Callback>
  void Parser<Callback>::BuildInvertedIndexForDocHitsMethod2(){
	/*
	cout << "Parser<Callback>::BuildInvertedIndexForDocHitsMethod2() is called." << endl;
	// step1: load the whole term and termID mapping table
	cout << "load the whole term and termID mapping table" << endl;
	cout << "Load term, termID info" << endl;
	map<string,string> termANDTermIDDict;
	map<string,string> termIDANDTermDict;

	// handle the VERY BIG number bug in my code
	termANDTermIDDict["thiswww8912thisthis"] = "50000000";
	termIDANDTermDict["50000000"] = "thiswww8912thisthis";
	termANDTermIDDict["thiswww8913thisthis"] = "100000000";
	termIDANDTermDict["100000000"] = "thiswww8913thisthis";
	// termIDANDTermDict["23990334"] = "irs";
	// termIDANDTermDict["28508548"] = "nyc";
	// termANDTermIDDict["irs"] = 23990334;
	// termANDTermIDDict["nyc"] = 28508548;
	string infoIncomingSourceFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kInfoIncomingSource));
    ifstream inputfile(infoIncomingSourceFileName.c_str());
    cout << "infoIncomingSourceFileName: " << infoIncomingSourceFileName << endl;
    string currentLine;
    uint32_t lineCounter = 0;
    while ( inputfile.good() ){
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string termID;
			string term;
			string wildCat;

		    iss >> termID;
		    iss >> term;
		    iss >> wildCat;

		    termANDTermIDDict[term] = termID;
		    termIDANDTermDict[termID] = term;

		    lineCounter += 1;
		    if(lineCounter % 1000000 == 0){
		    	cout << lineCounter << " lines processed." << endl;
		    }
		}
	}
	// cout << "termANDTermIDDict['000000002']:" << termANDTermIDDict["000000002"] << endl;
	// cout << "termIDANDTermDict['10000']:" << termIDANDTermDict["10000"] << endl;
	cout << "termANDTermIDDict.size():" << termANDTermIDDict.size() << endl;
	cout << "termIDANDTermDict.size():" << termIDANDTermDict.size() << endl;

	// step2: load doc and docSize init table
	cout << "load doc and docSize init table" << endl;
	map<uint32_t,uint32_t> docIDAndSizeDict;
	uint32_t counter = 0;
	for(; counter <25205179;counter += 1){
		docIDAndSizeDict[counter] = 0;
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs init." << endl;
	    }
	}

	// current version in development
	// step3-1: load the set of documents needed to parse
	cout << "load the set of documents needed to extract and put them into index" << endl;
	map<uint32_t,bool> docIDSelectedDict;
	// in test
	// string ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/tier1_OR_sortedByDocID_1";
	// in production
	string ifn = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kDocHitDocumentPartitionFileName));
	ifstream ifh(ifn.c_str());
	lineCounter = 0;
	while ( ifh.good() ){
		getline (ifh,currentLine);
		lineCounter += 1;
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string docIDInStr;
			string wildCatValue0;
			string wildCatValue1;
			string wildCatValue2;
			string wildCatValue3;
			string wildCatValue4;

			iss >> docIDInStr;
		    iss >> wildCatValue0;
		    iss >> wildCatValue1;
		    iss >> wildCatValue2;
		    iss >> wildCatValue3;
		    iss >> wildCatValue4;

		    uint32_t docID = strtoul(docIDInStr.c_str(), NULL, 0);
		    docIDSelectedDict[docID] += 1;
		}
		// for debug purposes
		// if (lineCounter == 1000){
		// 	break;
		// }

	}
	ifh.close();
	cout << "docIDDict.size(): " << docIDSelectedDict.size() << endl;

	// step3-2: load the corresponding document posting array into the main memory
	cout << "step1_2_3: Load the real postings and make them back into inverted index format." << endl;
	vector<string> inputBinaryFileNames;
	string ifn1 = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/GOV2_documentPostingArray_0M_1M";
	string ifn2 = "";
	string ifn3 = "";
	string ifn4 = "";
	string ifn5 = "";
	string ifn6 = "";
	string ifn7 = "";
	string ifn8 = "";
	string ifn9 = "";
	string ifn10 = "";
	string ifn11 = "";
	string ifn12 = "";
	string ifn13 = "";
	string ifn14 = "";
	string ifn15 = "";
	string ifn16 = "";
	string ifn17 = "";
	string ifn18 = "";
	string ifn19 = "";
	string ifn20 = "";
	string ifn21 = "";
	string ifn22 = "";
	string ifn23 = "";
	string ifn24 = "";
	string ifn25 = "";
	string ifn26 = "";

	inputBinaryFileNames.push_back(ifn1);
	inputBinaryFileNames.push_back(ifn2);
	inputBinaryFileNames.push_back(ifn3);
	inputBinaryFileNames.push_back(ifn4);
	inputBinaryFileNames.push_back(ifn5);
	inputBinaryFileNames.push_back(ifn6);
	inputBinaryFileNames.push_back(ifn7);
	inputBinaryFileNames.push_back(ifn8);
	inputBinaryFileNames.push_back(ifn9);
	inputBinaryFileNames.push_back(ifn10);
	inputBinaryFileNames.push_back(ifn11);
	inputBinaryFileNames.push_back(ifn12);
	inputBinaryFileNames.push_back(ifn13);
	inputBinaryFileNames.push_back(ifn14);
	inputBinaryFileNames.push_back(ifn15);
	inputBinaryFileNames.push_back(ifn16);
	inputBinaryFileNames.push_back(ifn17);
	inputBinaryFileNames.push_back(ifn18);
	inputBinaryFileNames.push_back(ifn19);
	inputBinaryFileNames.push_back(ifn20);
	inputBinaryFileNames.push_back(ifn21);
	inputBinaryFileNames.push_back(ifn22);
	inputBinaryFileNames.push_back(ifn23);
	inputBinaryFileNames.push_back(ifn24);
	inputBinaryFileNames.push_back(ifn25);
	inputBinaryFileNames.push_back(ifn26);

	for(uint32_t counter = 0; counter < inputBinaryFileNames.size(); counter++){
		// string inputBinaryFileName = "";
		string inputBinaryFileName = inputBinaryFileNames[counter];
		uint32_t docID = 100000000;
		int numOfPostings = -1;
		uint32_t termID = 100000000;
		string termIDInStringFormat = "";
		float staticProbablity = 0.0;
		float dynamicProbablity = 0.0;
		float finalProbablityForComparizon = 0.0;
		float partialBM25Score = 0.0;
		int read_ret = 0;

		int posting_info_fd;
		posting_info_fd = open(inputBinaryFileName.c_str(), O_RDONLY);	// the input is in Binary format
		struct stat stat_buf;
		fstat(posting_info_fd, &stat_buf);
		off_t posting_info_file_size = stat_buf.st_size;
		off_t num_bytes_read = 0;

		cout << "posting_info_file_size: " << posting_info_file_size << endl;

		// Size of buffer for reading postings
		// 1024 * 1024 Bytes
		// int postingInfoBufferSize = 1073741824;
		int postingInfoBufferSize = 1048576;
		char* posting_info_buffer = new char[postingInfoBufferSize];	// Pointer to the current portion of the posting info we're buffering.
		char* posting_info_buffer_ptr = posting_info_buffer;                    // Current position in the posting info buffer.
		read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		// uint32_t numOfDocsProcessed = 0;
		uint64_t numOfPostingRecorded = 0;
		while (num_bytes_read < posting_info_file_size) {

		  // lseek(posting_info_fd, num_bytes_read, SEEK_SET);

		  // We need to load more data in this case.
		  if (posting_info_buffer_ptr + (4 + 4) > posting_info_buffer + postingInfoBufferSize) {
			lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
			read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
			if (read_ret < 0) {
			  GetErrorLogger().LogErrno("trying to read,mark1", errno, true);
			}
			posting_info_buffer_ptr = posting_info_buffer;
		  }


		  // get the docID
		  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
		  memcpy(&docID, posting_info_buffer_ptr, 4);
		  posting_info_buffer_ptr += 4;
		  num_bytes_read += 4;

		  // get the numOfPostings
		  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
		  memcpy(&numOfPostings, posting_info_buffer_ptr, 4);
		  posting_info_buffer_ptr += 4;
		  num_bytes_read += 4;

		  if (docIDSelectedDict.count(docID) > 0){
			  docIDAndSizeDict[docID] = numOfPostings;
			  // for debug
			  // numOfDocsProcessed += 1;
			  cout << "docID: " << docID << " numOfPostings: " << numOfPostings << " numOfPostingRecorded: " << numOfPostingRecorded << endl;

		  }

		  int doc_entry_size = 4 + 4 + numOfPostings * 5 * 4;
		  if (doc_entry_size > postingInfoBufferSize) {
			  postingInfoBufferSize = doc_entry_size;
			  delete[] posting_info_buffer;
			  posting_info_buffer = new char[postingInfoBufferSize];
			  lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
			  read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
			  if (read_ret < 0) {
				GetErrorLogger().LogErrno("trying to read,mark2", errno, true);
			  }
			  posting_info_buffer_ptr = posting_info_buffer;
		  }

		  // We need to read more postings into the buffer.
		  if (posting_info_buffer_ptr + (numOfPostings * 5 * 4) > posting_info_buffer + postingInfoBufferSize) {
			lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
			read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
			if (read_ret < 0) {
			  GetErrorLogger().LogErrno("trying to read,mark3", errno, true);
			}
			posting_info_buffer_ptr = posting_info_buffer;
		  }

			for(unsigned int i = 0; i< numOfPostings; i++){
				assert((posting_info_buffer_ptr + (4 + 4 + 4 + 4 + 4)) <= (posting_info_buffer + postingInfoBufferSize));
				memcpy(&termID, posting_info_buffer_ptr, 4);
				posting_info_buffer_ptr += 4;
				num_bytes_read += 4;

				// ignore the static probability value cause we will get combination probability directly
				posting_info_buffer_ptr += 4;
				num_bytes_read += 4;

				// ignore the dynamic probability value cause we will get combination probability directly
				posting_info_buffer_ptr += 4;
				num_bytes_read += 4;

				// get the combined final probability value from here
				memcpy(&finalProbablityForComparizon, posting_info_buffer_ptr, 4);
				posting_info_buffer_ptr += 4;
				num_bytes_read += 4;

				// get the impact score of BM25, that's the thing I really needed.
				memcpy(&partialBM25Score, posting_info_buffer_ptr, 4);
				posting_info_buffer_ptr += 4;
				num_bytes_read += 4;

				if(docIDSelectedDict.count(docID)>0){
					std::stringstream ss;
					ss << termID;
					ss >> termIDInStringFormat;
					// cout << termID << " " << partialBM25Score << endl;
					uint32_t impactScoreInPositionForm = partialBM25Score * 1000000;
					// cout << termIDANDTermDict[termIDInStringFormat] << " " << termIDANDTermDict[termIDInStringFormat] << " " << docID << " " << partialBM25Score << endl;
					// debug
					//if(termIDANDTermDict[termIDInStringFormat].length() == 0){
					//	cout << termIDInStringFormat << endl;
					//	cout << termIDANDTermDict[termIDInStringFormat] << endl;
					//	exit(1);
					//}

					callback_->ProcessTerm(termIDANDTermDict[termIDInStringFormat].c_str(), termIDANDTermDict[termIDInStringFormat].length(), docID, impactScoreInPositionForm, 0, partialBM25Score);
					numOfPostingRecorded += 1;
				}
			}
		}
		delete[] posting_info_buffer;
		posting_info_buffer = NULL;
		cout << "--->num_bytes_read: " << num_bytes_read << endl;
	}

	// step4: fake fill the aux data structure
	cout << "fake fill begins2..." << endl;
	// fake fill the URL for the document collection
	string fakeURL = "http://www.loveWei.com";
	string fakeDocno = "GX000-00-00000000";
	// unsigned int fakeDocSize = 1;
	counter = 0;
	for(; counter <25205179;counter += 1){
	    uint32_t docID = counter;
		callback_->ProcessDocno(fakeDocno.c_str(), 17, docID);
	    callback_->ProcessUrl(fakeURL.c_str(), 22, docID);
	    callback_->ProcessDocLength(docIDAndSizeDict[docID], docID);
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs processed." << endl;
	    }
	}
	// cout << "docIDAndSizeDict[22470813]: " << docIDAndSizeDict[22470813] << endl;
	GetPostingCollectionController().Finish();
	inputfile.close();
	*/
}

template<class Callback>
  void Parser<Callback>::BuildInvertedIndexForDocHitsAndPostingHitHybridMethod_withGlobalUPPAdded_20141215(){
	cout << "Parser<Callback>::BuildInvertedIndexForDocHitsAndPostingHitHybridMethod_withGlobalUPPAdded_20141215() is called." << endl;
	/*
	// step1: load the whole term and termID mapping table
	cout << "load the whole term and termID mapping table" << endl;
	cout << "Load term, termID info" << endl;
	map<string,uint32_t> termANDTermIDDict;
	map<uint32_t,string> termIDANDTermDict;
	termANDTermIDDict["thiswww8912thisthis"] = 50000000;
	termIDANDTermDict[50000000] = "thiswww8912thisthis";
	termANDTermIDDict["thiswww8913thisthis"] = 100000000;
	termIDANDTermDict[100000000] = "thiswww8913thisthis";

	string infoIncomingSourceFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kInfoIncomingSource));
    ifstream inputfile(infoIncomingSourceFileName.c_str());
    cout << "infoIncomingSourceFileName: " << infoIncomingSourceFileName << endl;
    string currentLine;
    uint32_t lineCounter = 0;
    while ( inputfile.good() ){
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string termIDInStringFormat;
			uint32_t termID;
			string term;
			string wildCat;

		    iss >> termIDInStringFormat;
		    iss >> term;
		    iss >> wildCat;

		    termID = atol(termIDInStringFormat.c_str());
		    termANDTermIDDict[term] = termID;
		    termIDANDTermDict[termID] = term;

		    lineCounter += 1;
		    if(lineCounter % 1000000 == 0){
		    	cout << lineCounter << " lines processed." << endl;
		    }
		}
	}
	// cout << "termANDTermIDDict['000000002']:" << termANDTermIDDict["000000002"] << endl;
	// cout << "termIDANDTermDict['10000']:" << termIDANDTermDict["10000"] << endl;
	cout << "termANDTermIDDict.size():" << termANDTermIDDict.size() << endl;
	cout << "termIDANDTermDict.size():" << termIDANDTermDict.size() << endl;

	// step2: load doc and docSize init table
	cout << "load doc and docSize init table" << endl;
	map<uint32_t,uint32_t> docIDAndSizeDict;
	uint32_t counter = 0;
	for(; counter <25205179;counter += 1){
		docIDAndSizeDict[counter] = 0;
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs init." << endl;
	    }
	}

	// current version in development
	// step3-1: load the set of documents needed to parse
	cout << "load the set of documents needed to extract and put them into index" << endl;
	map<uint32_t,bool> docIDSelectedDict;
	// in test
	// string ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/tier1_OR_sortedByDocID_1";
	// in production
	string ifn = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kDocHitDocumentPartitionFileName));
	ifstream ifh(ifn.c_str());
	cout << "ifn: " << ifn << endl;
	lineCounter = 0;
	while ( ifh.good() ){
		getline (ifh,currentLine);
		lineCounter += 1;
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string docIDInStr;
			string wildCatValue0;
			string wildCatValue1;
			string wildCatValue2;
			string wildCatValue3;
			string wildCatValue4;

			iss >> docIDInStr;
		    iss >> wildCatValue0;

		    // cout << docIDInStr << " " << wildCatValue0 << endl;
		    uint32_t docID = strtoul(docIDInStr.c_str(), NULL, 0);
		    docIDSelectedDict[docID] += 1;
		}
	}
	ifh.close();
	cout << "docIDDict.size(): " << docIDSelectedDict.size() << endl;

	// step3-2: load the corresponding document posting array into the main memory
	cout << "step1_2_3: Load the real postings and make them back into inverted index format." << endl;
	// basePath:
	// /home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/theAllInOneFeatureArrays_20141215/
	// fileNames:
	// GOV2_documentPostingArray_0M_1M_features_allInOne
	
	string inputBinaryFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kNormalizedPostingHitFileName));
	uint32_t docID = 100000000;
	uint32_t numOfPostings = -1;
	uint32_t docHit = 0;
	uint32_t termID = 100000000;
	string termIDInStringFormat = "";
	float partialBM25Score = 0.0;
	uint32_t postingHit = 0.0;
	uint64_t comparizonValue = 0;
	float staticProbability = 0.0;
	float dynamicProbability = 0.0;
	float combinedProbability = 0.0;
	float pt = 0.0;
	int read_ret = 0;

	int posting_info_fd;
	posting_info_fd = open(inputBinaryFileName.c_str(), O_RDONLY);	// the input is in Binary format
	struct stat stat_buf;
	fstat(posting_info_fd, &stat_buf);
	off_t posting_info_file_size = stat_buf.st_size;
	off_t num_bytes_read = 0;

	cout << "posting_info_file_size: " << posting_info_file_size << endl;

	// Size of buffer for reading postings
	// 1024 * 1024 Bytes
	// int postingInfoBufferSize = 1073741824;
	int postingInfoBufferSize = 1048576;
	char* posting_info_buffer = new char[postingInfoBufferSize];	// Pointer to the current portion of the posting info we're buffering.
	char* posting_info_buffer_ptr = posting_info_buffer;                    // Current position in the posting info buffer.
	read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
	// uint32_t numOfDocsProcessed = 0;
	uint64_t numOfPostingRecorded = 0;
	while (num_bytes_read < posting_info_file_size) {
	  // We need to load more data in this case.
	  if (posting_info_buffer_ptr + (4 + 4 + 4) > posting_info_buffer + postingInfoBufferSize) {
		lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		if (read_ret < 0) {
		  GetErrorLogger().LogErrno("trying to read,mark1", errno, true);
		}
		posting_info_buffer_ptr = posting_info_buffer;
	  }

	  // get the docID
	  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
	  memcpy(&docID, posting_info_buffer_ptr, 4);
	  posting_info_buffer_ptr += 4;
	  num_bytes_read += 4;

	  // get the numOfPostings
	  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
	  memcpy(&numOfPostings, posting_info_buffer_ptr, 4);
	  posting_info_buffer_ptr += 4;
	  num_bytes_read += 4;

	  // get the docHit
	  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
	  memcpy(&docHit, posting_info_buffer_ptr, 4);
	  posting_info_buffer_ptr += 4;
	  num_bytes_read += 4;

	  cout << "docID: " << docID << " numOfPostings: " << numOfPostings << " numOfPostingRecorded: " << numOfPostingRecorded << " docHit: " << docHit << endl;


	  int doc_entry_size = 4 + 4 + 4 + numOfPostings * 4 * 7;
	  if (doc_entry_size > postingInfoBufferSize) {
		  postingInfoBufferSize = doc_entry_size;
		  delete[] posting_info_buffer;
		  posting_info_buffer = new char[postingInfoBufferSize];
		  lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		  read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		  if (read_ret < 0) {
			GetErrorLogger().LogErrno("trying to read,mark2", errno, true);
		  }
		  posting_info_buffer_ptr = posting_info_buffer;
	  }

	  // We need to read more postings into the buffer.
	  if (posting_info_buffer_ptr + (numOfPostings * 4 * 7) > posting_info_buffer + postingInfoBufferSize) {
		lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		if (read_ret < 0) {
		  GetErrorLogger().LogErrno("trying to read,mark3", errno, true);
		}
		posting_info_buffer_ptr = posting_info_buffer;
	  }

		for(uint32_t i = 0; i< numOfPostings; i++){
			assert((posting_info_buffer_ptr + (4 + 4 + 4 + 4 + 4 + 4 + 4)) <= (posting_info_buffer + postingInfoBufferSize));
			// get termID
			memcpy(&termID, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get the static probability
			memcpy(&staticProbability, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get the dynamic probability
			memcpy(&dynamicProbability, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get the combined probability
			memcpy(&combinedProbability, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get BM25 impact score
			memcpy(&partialBM25Score, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get postingHit of a posting
			memcpy(&postingHit, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get pt of a posting
			memcpy(&pt, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// debug line
			// cout << i << " " << docID << " " << termIDANDTermDict[termID] << " " << termID << " " << staticProbability << " " << dynamicProbability << " " << combinedProbability << " " << partialBM25Score << " " << postingHit << " " << pt << endl;

			// 3 conditions:
			if (docIDSelectedDict.count(docID) > 0){
				if (postingHit >= 10 or combinedProbability >= 4.79607500381e-08){
					// debug line
					// cout << i << " " << docID << " " << termIDANDTermDict[termID] << " " << termID << " " << partialBM25Score << " " << postingHit << endl;

					// impact score
					uint32_t impactScoreInPositionForm = partialBM25Score * 1000000;
					callback_->ProcessTerm(termIDANDTermDict[termID].c_str(), termIDANDTermDict[termID].length(), docID, impactScoreInPositionForm, 0, partialBM25Score);

					numOfPostingRecorded += 1;
					docIDAndSizeDict[docID] += 1;
				}
			}
		}

		// debug
		//if (docID == 1){
		//	break;
		//}

	}
	delete[] posting_info_buffer;
	posting_info_buffer = NULL;
	cout << "--->num_bytes_read: " << num_bytes_read << endl;

	// step4: fake fill the aux data structure
	cout << "fake fill begins2..." << endl;
	// fake fill the URL for the document collection
	string fakeURL = "http://www.loveWei.com";
	string fakeDocno = "GX000-00-00000000";
	// unsigned int fakeDocSize = 1;
	counter = 0;
	for(; counter <25205179;counter += 1){
	    uint32_t docID = counter;
		callback_->ProcessDocno(fakeDocno.c_str(), 17, docID);
	    callback_->ProcessUrl(fakeURL.c_str(), 22, docID);
	    callback_->ProcessDocLength(docIDAndSizeDict[docID], docID);
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs processed." << endl;
	    }
	}
	// cout << "docIDAndSizeDict[22470813]: " << docIDAndSizeDict[22470813] << endl;
	GetPostingCollectionController().Finish();
	// inputfile.close();
	cout << "Ends." << endl;
	*/
}

template<class Callback>
  void Parser<Callback>::BuildInvertedIndexForDocHitsAndPostingHitHybridMethod_20141212(){
	cout << "Parser<Callback>::BuildInvertedIndexForDocHitsAndPostingHitHybridMethod_20141212() is called." << endl;
	/*
	// step1: load the whole term and termID mapping table
	cout << "load the whole term and termID mapping table" << endl;
	cout << "Load term, termID info" << endl;
	map<string,uint32_t> termANDTermIDDict;
	map<uint32_t,string> termIDANDTermDict;
	termANDTermIDDict["thiswww8912thisthis"] = 50000000;
	termIDANDTermDict[50000000] = "thiswww8912thisthis";
	termANDTermIDDict["thiswww8913thisthis"] = 100000000;
	termIDANDTermDict[100000000] = "thiswww8913thisthis";

	string infoIncomingSourceFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kInfoIncomingSource));
    ifstream inputfile(infoIncomingSourceFileName.c_str());
    cout << "infoIncomingSourceFileName: " << infoIncomingSourceFileName << endl;
    string currentLine;
    uint32_t lineCounter = 0;
    while ( inputfile.good() ){
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string termIDInStringFormat;
			uint32_t termID;
			string term;
			string wildCat;

		    iss >> termIDInStringFormat;
		    iss >> term;
		    iss >> wildCat;

		    termID = atol(termIDInStringFormat.c_str());
		    termANDTermIDDict[term] = termID;
		    termIDANDTermDict[termID] = term;

		    lineCounter += 1;
		    if(lineCounter % 1000000 == 0){
		    	cout << lineCounter << " lines processed." << endl;
		    }
		}
	}
	// cout << "termANDTermIDDict['000000002']:" << termANDTermIDDict["000000002"] << endl;
	// cout << "termIDANDTermDict['10000']:" << termIDANDTermDict["10000"] << endl;
	cout << "termANDTermIDDict.size():" << termANDTermIDDict.size() << endl;
	cout << "termIDANDTermDict.size():" << termIDANDTermDict.size() << endl;

	// step2: load doc and docSize init table
	cout << "load doc and docSize init table" << endl;
	map<uint32_t,uint32_t> docIDAndSizeDict;
	uint32_t counter = 0;
	for(; counter <25205179;counter += 1){
		docIDAndSizeDict[counter] = 0;
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs init." << endl;
	    }
	}

	// current version in development
	// step3-1: load the set of documents needed to parse
	cout << "load the set of documents needed to extract and put them into index" << endl;
	map<uint32_t,bool> docIDSelectedDict;
	// in test
	// string ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/tier1_OR_sortedByDocID_1";
	// in production
	string ifn = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kDocHitDocumentPartitionFileName));
	ifstream ifh(ifn.c_str());
	cout << "ifn: " << ifn << endl;
	lineCounter = 0;
	while ( ifh.good() ){
		getline (ifh,currentLine);
		lineCounter += 1;
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string docIDInStr;
			string wildCatValue0;
			string wildCatValue1;
			string wildCatValue2;
			string wildCatValue3;
			string wildCatValue4;

			iss >> docIDInStr;
		    iss >> wildCatValue0;

		    // cout << docIDInStr << " " << wildCatValue0 << endl;
		    uint32_t docID = strtoul(docIDInStr.c_str(), NULL, 0);
		    docIDSelectedDict[docID] += 1;
		}
	}
	ifh.close();
	cout << "docIDDict.size(): " << docIDSelectedDict.size() << endl;

	// step3-2: load the corresponding document posting array into the main memory
	cout << "step1_2_3: Load the real postings and make them back into inverted index format." << endl;
	// "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/docHitsANDPostingHit/GOV2_documentPostingArray_0M_1M_docHit_postingHit_added";
	
	string inputBinaryFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kNormalizedPostingHitFileName));
	uint32_t docID = 100000000;
	uint32_t numOfPostings = -1;
	uint32_t docHit = 0;
	uint32_t termID = 100000000;
	string termIDInStringFormat = "";
	float partialBM25Score = 0.0;
	uint32_t postingHit = 0.0;
	uint64_t comparizonValue = 0;
	float pt = 0.0;
	int read_ret = 0;

	int posting_info_fd;
	posting_info_fd = open(inputBinaryFileName.c_str(), O_RDONLY);	// the input is in Binary format
	struct stat stat_buf;
	fstat(posting_info_fd, &stat_buf);
	off_t posting_info_file_size = stat_buf.st_size;
	off_t num_bytes_read = 0;

	cout << "posting_info_file_size: " << posting_info_file_size << endl;

	// Size of buffer for reading postings
	// 1024 * 1024 Bytes
	// int postingInfoBufferSize = 1073741824;
	int postingInfoBufferSize = 1048576;
	char* posting_info_buffer = new char[postingInfoBufferSize];	// Pointer to the current portion of the posting info we're buffering.
	char* posting_info_buffer_ptr = posting_info_buffer;                    // Current position in the posting info buffer.
	read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
	// uint32_t numOfDocsProcessed = 0;
	uint64_t numOfPostingRecorded = 0;
	while (num_bytes_read < posting_info_file_size) {
	  // We need to load more data in this case.
	  if (posting_info_buffer_ptr + (4 + 4 + 4) > posting_info_buffer + postingInfoBufferSize) {
		lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		if (read_ret < 0) {
		  GetErrorLogger().LogErrno("trying to read,mark1", errno, true);
		}
		posting_info_buffer_ptr = posting_info_buffer;
	  }

	  // get the docID
	  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
	  memcpy(&docID, posting_info_buffer_ptr, 4);
	  posting_info_buffer_ptr += 4;
	  num_bytes_read += 4;

	  // get the numOfPostings
	  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
	  memcpy(&numOfPostings, posting_info_buffer_ptr, 4);
	  posting_info_buffer_ptr += 4;
	  num_bytes_read += 4;

	  // get the docHit
	  assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
	  memcpy(&docHit, posting_info_buffer_ptr, 4);
	  posting_info_buffer_ptr += 4;
	  num_bytes_read += 4;
	  cout << "docID: " << docID << " numOfPostings: " << numOfPostings << " numOfPostingRecorded: " << numOfPostingRecorded << " docHit: " << docHit << endl;

	  // let's do the debug game now
	  //if(docID == 38){
	  //	  break;
	  //}

	  int doc_entry_size = 4 + 4 + 4 + numOfPostings * 4 * 4;
	  if (doc_entry_size > postingInfoBufferSize) {
		  postingInfoBufferSize = doc_entry_size;
		  delete[] posting_info_buffer;
		  posting_info_buffer = new char[postingInfoBufferSize];
		  lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		  read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		  if (read_ret < 0) {
			GetErrorLogger().LogErrno("trying to read,mark2", errno, true);
		  }
		  posting_info_buffer_ptr = posting_info_buffer;
	  }

	  // We need to read more postings into the buffer.
	  if (posting_info_buffer_ptr + (numOfPostings * 4 * 4) > posting_info_buffer + postingInfoBufferSize) {
		lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		if (read_ret < 0) {
		  GetErrorLogger().LogErrno("trying to read,mark3", errno, true);
		}
		posting_info_buffer_ptr = posting_info_buffer;
	  }

		for(uint32_t i = 0; i< numOfPostings; i++){
			assert((posting_info_buffer_ptr + (4 + 4 + 4 + 4)) <= (posting_info_buffer + postingInfoBufferSize));
			// get termID
			memcpy(&termID, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get BM25 impact score
			memcpy(&partialBM25Score, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get postingHit of a posting
			memcpy(&postingHit, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get pt of a posting
			memcpy(&pt, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			if (postingHit >= 2 and docIDSelectedDict.count(docID) > 0){
				// debug line
				// cout << i << " " << docID << " " << termIDANDTermDict[termID] << " " << termID << " " << partialBM25Score << " " << postingHit << endl;

				// impact score
				uint32_t impactScoreInPositionForm = partialBM25Score * 1000000;
				callback_->ProcessTerm(termIDANDTermDict[termID].c_str(), termIDANDTermDict[termID].length(), docID, impactScoreInPositionForm, 0, partialBM25Score);

				numOfPostingRecorded += 1;
				docIDAndSizeDict[docID] += 1;
			}
			else{
				// do nothing here
			}

		}
	}
	delete[] posting_info_buffer;
	posting_info_buffer = NULL;
	cout << "--->num_bytes_read: " << num_bytes_read << endl;

	// step4: fake fill the aux data structure
	cout << "fake fill begins2..." << endl;
	// fake fill the URL for the document collection
	string fakeURL = "http://www.loveWei.com";
	string fakeDocno = "GX000-00-00000000";
	// unsigned int fakeDocSize = 1;
	counter = 0;
	for(; counter <25205179;counter += 1){
	    uint32_t docID = counter;
		callback_->ProcessDocno(fakeDocno.c_str(), 17, docID);
	    callback_->ProcessUrl(fakeURL.c_str(), 22, docID);
	    callback_->ProcessDocLength(docIDAndSizeDict[docID], docID);
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs processed." << endl;
	    }
	}
	// cout << "docIDAndSizeDict[22470813]: " << docIDAndSizeDict[22470813] << endl;
	GetPostingCollectionController().Finish();
	// inputfile.close();
	cout << "Ends." << endl;
	*/
}

template<class Callback>
  void Parser<Callback>::BuildInvertedIndexForDocHits(){
	/*
	// step1: load the whole term and termID mapping table
	cout << "load the whole term and termID mapping table" << endl;
	cout << "Load term, termID info" << endl;
	map<string,string> termANDTermIDDict;
	map<string,string> termIDANDTermDict;

	// termIDANDTermDict["23990334"] = "irs";
	// termIDANDTermDict["28508548"] = "nyc";
	// termANDTermIDDict["irs"] = 23990334;
	// termANDTermIDDict["nyc"] = 28508548;

	// handle the VERY BIG number bug in my code
	termANDTermIDDict["thiswww8912thisthis"] = "50000000";
	termIDANDTermDict["50000000"] = "thiswww8912thisthis";
	termANDTermIDDict["thiswww8913thisthis"] = "100000000";
	termIDANDTermDict["100000000"] = "thiswww8913thisthis";
	string infoIncomingSourceFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kInfoIncomingSource));
    ifstream inputfile(infoIncomingSourceFileName.c_str());
    string currentLine;
    uint32_t lineCounter = 0;
    while ( inputfile.good() ){
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string termID;
			string term;
			string wildCat;

		    iss >> termID;
		    iss >> term;
		    iss >> wildCat;

		    termANDTermIDDict[term] = termID;
		    termIDANDTermDict[termID] = term;

		    lineCounter += 1;
		    if(lineCounter % 1000000 == 0){
		    	cout << lineCounter << " lines processed." << endl;
		    }
		}
	}
	// cout << "termANDTermIDDict['000000002']:" << termANDTermIDDict["000000002"] << endl;
	// cout << "termIDANDTermDict['10000']:" << termIDANDTermDict["10000"] << endl;
	cout << "termANDTermIDDict.size():" << termANDTermIDDict.size() << endl;
	cout << "termIDANDTermDict.size():" << termIDANDTermDict.size() << endl;

	// step2: load doc and docSize init table
	cout << "load doc and docSize init table" << endl;
	map<uint32_t,uint32_t> docIDAndSizeDict;
	uint32_t counter = 0;
	for(; counter <25205179;counter += 1){
		docIDAndSizeDict[counter] = 0;
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs init." << endl;
	    }
	}

	// current version in development
	// step3-1: load the set of documents needed to parse
	cout << "load the set of documents needed to extract and put them into index" << endl;
	map<uint32_t,bool> docIDSelectedDict;
	// in test
	// string ifn = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/tierFromJuan_20141009/tier1_OR_sortedByDocID_1";
	// in production
	string ifn = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kDocHitDocumentPartitionFileName));
	ifstream ifh(ifn.c_str());
	lineCounter = 0;
	while ( ifh.good() ){
		getline (ifh,currentLine);
		lineCounter += 1;
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string docIDInStr;
			string trecIDInStr;

		    iss >> docIDInStr;
		    iss >> trecIDInStr;

		    uint32_t docID = strtoul(docIDInStr.c_str(), NULL, 0);
		    docIDSelectedDict[docID] += 1;
		}
		// for debug purposes
		// if (lineCounter == 1000){
		// 	break;
		// }

	}
	ifh.close();
	cout << "docIDDict.size(): " << docIDSelectedDict.size() << endl;


	// step3-2: load the corresponding document posting array into the main memory
	cout << "step1_2_3: Load the real postings and make them back into inverted index format." << endl;
	// string inputBinaryFileName = "/home/vgc/wei/workspace/NYU_IRTK/data/unigramFromWei/documentPostingArrays/gov2/essentialityFactorAdded_20140929/GOV2_documentPostingArray_0M_1M";
	// kDocumentPostingArrayFileToSupportDocHits
	string inputBinaryFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kDocumentPostingArrayFileToSupportDocHits));
	uint32_t docID = 100000000;
	int numOfPostings = -1;
	uint32_t termID = 100000000;
	string termIDInStringFormat = "";
	float staticProbablity = 0.0;
	float dynamicProbablity = 0.0;
	float finalProbablityForComparizon = 0.0;
	float partialBM25Score = 0.0;
	int read_ret = 0;

    int posting_info_fd;
    posting_info_fd = open(inputBinaryFileName.c_str(), O_RDONLY);	// the input is in Binary format
    struct stat stat_buf;
    fstat(posting_info_fd, &stat_buf);
    off_t posting_info_file_size = stat_buf.st_size;
    off_t num_bytes_read = 0;

    cout << "posting_info_file_size: " << posting_info_file_size << endl;

    // Size of buffer for reading postings
    // 1024 * 1024 Bytes
    // int postingInfoBufferSize = 1073741824;
    int postingInfoBufferSize = 1048576;
    char* posting_info_buffer = new char[postingInfoBufferSize];	// Pointer to the current portion of the posting info we're buffering.
    char* posting_info_buffer_ptr = posting_info_buffer;                    // Current position in the posting info buffer.
    read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
    // uint32_t numOfDocsProcessed = 0;
    uint64_t numOfPostingRecorded = 0;
    while (num_bytes_read < posting_info_file_size) {

      // lseek(posting_info_fd, num_bytes_read, SEEK_SET);

	  // We need to load more data in this case.
	  if (posting_info_buffer_ptr + (4 + 4) > posting_info_buffer + postingInfoBufferSize) {
		lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		if (read_ret < 0) {
		  GetErrorLogger().LogErrno("trying to read,mark1", errno, true);
		}
		posting_info_buffer_ptr = posting_info_buffer;
	  }


      // get the docID
      assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
      memcpy(&docID, posting_info_buffer_ptr, 4);
      posting_info_buffer_ptr += 4;
      num_bytes_read += 4;

      // get the numOfPostings
      assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
      memcpy(&numOfPostings, posting_info_buffer_ptr, 4);
      posting_info_buffer_ptr += 4;
      num_bytes_read += 4;

      if (docIDSelectedDict.count(docID) > 0){
    	  docIDAndSizeDict[docID] = numOfPostings;
    	  // for debug
    	  // numOfDocsProcessed += 1;
    	  cout << "docID: " << docID << " numOfPostings: " << numOfPostings << " numOfPostingRecorded: " << numOfPostingRecorded << endl;
      }

	  int doc_entry_size = 4 + 4 + numOfPostings * 5 * 4;
	  if (doc_entry_size > postingInfoBufferSize) {
		  postingInfoBufferSize = doc_entry_size;
		  delete[] posting_info_buffer;
		  posting_info_buffer = new char[postingInfoBufferSize];
		  lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		  read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		  if (read_ret < 0) {
			GetErrorLogger().LogErrno("trying to read,mark2", errno, true);
		  }
		  posting_info_buffer_ptr = posting_info_buffer;
	  }

	  // We need to read more postings into the buffer.
	  if (posting_info_buffer_ptr + (numOfPostings * 5 * 4) > posting_info_buffer + postingInfoBufferSize) {
		lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		if (read_ret < 0) {
		  GetErrorLogger().LogErrno("trying to read,mark3", errno, true);
		}
		posting_info_buffer_ptr = posting_info_buffer;
	  }

		for(unsigned int i = 0; i< numOfPostings; i++){
			assert((posting_info_buffer_ptr + (4 + 4 + 4 + 4 + 4)) <= (posting_info_buffer + postingInfoBufferSize));
			memcpy(&termID, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// ignore the static probability value cause we will get combination probability directly
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// ignore the dynamic probability value cause we will get combination probability directly
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get the combined final probability value from here
			memcpy(&finalProbablityForComparizon, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			// get the impact score of BM25, that's the thing I really needed.
			memcpy(&partialBM25Score, posting_info_buffer_ptr, 4);
			posting_info_buffer_ptr += 4;
			num_bytes_read += 4;

			if(docIDSelectedDict.count(docID)>0){
				std::stringstream ss;
				ss << termID;
				ss >> termIDInStringFormat;
				// cout << termID << " " << partialBM25Score << endl;
				uint32_t impactScoreInPositionForm = partialBM25Score * 1000000;
				// cout << termIDANDTermDict[termIDInStringFormat] << " " << termIDANDTermDict[termIDInStringFormat] << " " << docID << " " << partialBM25Score << endl;
				// debug
				//if(termIDANDTermDict[termIDInStringFormat].length() == 0){
				//	cout << termIDInStringFormat << endl;
				//	cout << termIDANDTermDict[termIDInStringFormat] << endl;
				//	exit(1);
				//}

				callback_->ProcessTerm(termIDANDTermDict[termIDInStringFormat].c_str(), termIDANDTermDict[termIDInStringFormat].length(), docID, impactScoreInPositionForm, 0, partialBM25Score);
				numOfPostingRecorded += 1;
			}
		}
    }
    delete[] posting_info_buffer;
    posting_info_buffer = NULL;
	cout << "--->num_bytes_read: " << num_bytes_read << endl;

	// step4: fake fill the aux data structure
	cout << "fake fill begins2..." << endl;
	// fake fill the URL for the document collection
	string fakeURL = "http://www.loveWei.com";
	string fakeDocno = "GX000-00-00000000";
	// unsigned int fakeDocSize = 1;
	counter = 0;
	for(; counter <25205179;counter += 1){
	    uint32_t docID = counter;
		callback_->ProcessDocno(fakeDocno.c_str(), 17, docID);
	    callback_->ProcessUrl(fakeURL.c_str(), 22, docID);
	    callback_->ProcessDocLength(docIDAndSizeDict[docID], docID);
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs processed." << endl;
	    }
	}
	// cout << "docIDAndSizeDict[22470813]: " << docIDAndSizeDict[22470813] << endl;
	GetPostingCollectionController().Finish();
	inputfile.close();
	*/
}

/*
template<class Callback>
  void Parser<Callback>::ParseIncomingPostingStreamForFormat1(){
	cout << "correct version 20140922" << endl;
	string infoIncomingSourceFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kInfoIncomingSource));
	cout << "infoIncomingSourceFileName: " << infoIncomingSourceFileName << endl;
	if (infoIncomingSourceFileName == "N/A"){
		exit(1);
	}

	string incomingPostingStreamFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kIncomingPostingStreamFileNameForFormat1));
	cout << "incomingPostingStreamFileName: " << incomingPostingStreamFileName << endl;
	if (incomingPostingStreamFileName == "N/A"){
    	exit(1);
    }
	incomingPostingStreamFileName = "/home/vgc/wei/workspace/NYU_IRTK/results/unigramFromWei/gov2/" + incomingPostingStreamFileName;
	cout << "incomingPostingStreamFileName: " << incomingPostingStreamFileName << endl;
	// step1: load term and termID transfer
	cout << "Load term, termID info" << endl;
	// %, # of postings
	// 10
	// 64519480.1
	// 129038960.2
	// 193558440.3
	// 258077920.4
	// 322597400.5
	// 387116880.6
	// 451636360.7
	// 516155840.8
	// 580675320.9
	// 645194801.0
	// 1290389602.0
	// 1935584403.0
	// 2580779204.0
	// 3225974005.0
	// 3871168806.0
	// 4516363607.0
	// 5161558408.0
	// 5806753209.0

	uint64_t NUM_OF_POSTINGS_NEEDED_TO_PROCESS = atol(Configuration::GetConfiguration().GetValue(config_properties::kNumPostingsToProcess).c_str());

	map<string,uint32_t> termANDTermIDDict;
	map<uint32_t,string> termIDANDTermDict;
	// termIDANDTermDict[23990334] = "irs";
	// termIDANDTermDict[28508548] = "nyc";
	// termANDTermIDDict["irs"] = 23990334;
	// termANDTermIDDict["nyc"] = 28508548;

	string currentLine;
	ifstream inputfile(infoIncomingSourceFileName.c_str());
	uint32_t lineCounter = 0;
	cout << "NUM_OF_POSTINGS_NEEDED_TO_PROCESS(really): " << NUM_OF_POSTINGS_NEEDED_TO_PROCESS << endl;
	while ( inputfile.good() ){
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );

			string termIDInStrFormat;
			uint32_t termID;
			string term;
			string wildCat;

		    iss >> termIDInStrFormat;
		    iss >> term;
		    iss >> wildCat;

		    termID = atoi(termIDInStrFormat.c_str());
		    termANDTermIDDict[term] = termID;
		    termIDANDTermDict[termID] = term;

		    lineCounter += 1;
		    if(lineCounter % 1000000 == 0){
		    	cout << lineCounter << " lines processed." << endl;
		    }
		}
	}
	cout << "termANDTermIDDict[000000002]:" << termANDTermIDDict["000000002"] << endl;
	cout << "termIDANDTermDict[10000]:" << termIDANDTermDict[10000] << endl;
	cout << "termANDTermIDDict.size():" << termANDTermIDDict.size() << endl;
	cout << "termIDANDTermDict.size():" << termIDANDTermDict.size() << endl;


	// step2:
	// key: docID
	// value: a list of termIDs
	map<uint32_t,uint32_t> docIDAndSizeDict;
	uint32_t counter = 0;
	for(; counter <25205179;counter += 1){
		docIDAndSizeDict[counter] = 0;
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs init." << endl;
	    }
	}
	map<uint32_t,vector<uint32_t> > documentPostingTermIDArrayDict;
	map<uint32_t,vector<float> > documentPostingScoreArrayDict;

    int posting_info_fd;
    posting_info_fd = open(incomingPostingStreamFileName.c_str(), O_RDONLY);	// the input is in Binary format
    struct stat stat_buf;
    fstat(posting_info_fd, &stat_buf);
    off_t posting_info_file_size = stat_buf.st_size;
    off_t num_bytes_read = 0;

    cout << "posting_info_file_size: " << posting_info_file_size << endl;

    // Size of buffer for reading postings
    // 1024 * 1024 Bytes
    // int postingInfoBufferSize = 1073741824;
    int read_ret = 0;
    int postingInfoBufferSize = 1048576;
    char* posting_info_buffer = new char[postingInfoBufferSize];	// Pointer to the current portion of the posting info we're buffering.
    char* posting_info_buffer_ptr = posting_info_buffer;                    // Current position in the posting info buffer.
    read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
    uint64_t num_posting_processed = 0;
    while (num_bytes_read < posting_info_file_size) {

	  // We need to load more data in this case(In the buffer, there is not enough for one posting tuple information(16 bytes))
	  if (posting_info_buffer_ptr + (4 + 4 + 4 + 4) > posting_info_buffer + postingInfoBufferSize) {
		lseek(posting_info_fd, num_bytes_read, SEEK_SET);  // Seek just past where we last read data.
		read_ret = read(posting_info_fd, posting_info_buffer, postingInfoBufferSize);
		if (read_ret < 0) {
		  GetErrorLogger().LogErrno("trying to read,mark1", errno, true);
		}
		posting_info_buffer_ptr = posting_info_buffer;
	  }


	  // format of the incoming posting stream
	  // termID(4 bytes),docID(4 bytes),probability(4 bytes),impactScore(4 bytes)
      // get the docID
	  uint32_t termID = 0;
	  uint32_t docID = 0;
	  float probability = 0;
	  float impactScore = 0;

	  // get the termID
      assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
      memcpy(&termID, posting_info_buffer_ptr, 4);
      posting_info_buffer_ptr += 4;
      num_bytes_read += 4;

      // get the docID
      assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
      memcpy(&docID, posting_info_buffer_ptr, 4);
      posting_info_buffer_ptr += 4;
      num_bytes_read += 4;

      // get the probability
      assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
      memcpy(&probability, posting_info_buffer_ptr, 4);
      posting_info_buffer_ptr += 4;
      num_bytes_read += 4;

      // get the impact score
      assert((posting_info_buffer_ptr + 4) <= (posting_info_buffer + postingInfoBufferSize));
      memcpy(&impactScore, posting_info_buffer_ptr, 4);
      posting_info_buffer_ptr += 4;
      num_bytes_read += 4;
      num_posting_processed += 1;

      // cout << num_posting_processed++ << " " << termID << " " << docID << " " << probability << " " << impactScore << endl;

      docIDAndSizeDict[docID] += 1;
      documentPostingTermIDArrayDict[docID].push_back(termID);
      documentPostingScoreArrayDict[docID].push_back(impactScore);

  	  if(num_posting_processed % 100000 == 0){
  		  cout << "num_posting_processed: " << num_posting_processed << " processed." << endl;
  	  }

      if(num_posting_processed == NUM_OF_POSTINGS_NEEDED_TO_PROCESS){
    	  cout << "Done" << endl;
    	  break;
      }

    }

    cout << documentPostingTermIDArrayDict.size() << endl;
    cout << documentPostingScoreArrayDict.size() << endl;
    cout << "num_posting_processed: " << num_posting_processed << endl;
    // Let's deal with the posting popping problems.
    string tempTerm = "";
    float tempScore = 0.0;
    uint32_t impactScoreInPositionForm = 0;
    for(uint32_t docID = 0; docID < 25205179; docID++){
    	if (documentPostingTermIDArrayDict.count(docID) > 0){
    		for(uint32_t counter = 0; counter < documentPostingTermIDArrayDict[docID].size(); counter++){
    			tempTerm = termIDANDTermDict[documentPostingTermIDArrayDict[docID][counter]];
    			tempScore = documentPostingScoreArrayDict[docID][counter];
    			impactScoreInPositionForm = tempScore * 1000000;
    			// cout << documentPostingTermIDArrayDict[docID][counter] << " " << tempTerm << " " << docID << " " << tempScore << endl;
    			callback_->ProcessTerm(tempTerm.c_str(), tempTerm.length(), docID, impactScoreInPositionForm, 0, tempScore);
    		}
    	}
    	else{
    		// do NOT do anything
    	}
    }

	// step4: fake fill the aux data structure
	cout << "fake fill begins2..." << endl;
	// fake fill the URL for the document collection
	string fakeURL = "http://www.loveWei.com";
	string fakeDocno = "GX000-00-00000000";
	// unsigned int fakeDocSize = 1;
	counter = 0;
	for(; counter <25205179;counter += 1){
	    uint32_t docID = counter;
		callback_->ProcessDocno(fakeDocno.c_str(), 17, docID);
	    callback_->ProcessUrl(fakeURL.c_str(), 22, docID);
	    callback_->ProcessDocLength(docIDAndSizeDict[docID], docID);
	    if(counter % 1000000 == 0){
	    	cout << counter << " docs processed." << endl;
	    }
	}
	cout << "docIDAndSizeDict[22470813]: " << docIDAndSizeDict[22470813] << endl;

    GetPostingCollectionController().Finish();
}
*/


template<class Callback>
  int Parser<Callback>::ParseTrecDataBuffer(const char* buf, int buf_len, uint32_t& doc_id, int& avg_doc_length, const char*& curr_p) {
  assert(buf != NULL);
  assert(buf_len > 0);

  uint32_t initial_doc_id = doc_id;

  Tag tag_ret;  // The special type of tag we encountered.

  unsigned char context = '\0';  // Bit array for the context.
  uint32_t position = 0;         // Tracks position of each word, final position for a document is it's size in words.

  // For parsing HTML.
  bool in_closing_tag = false;  // True when we're parsing a closing tag.
  bool in_script = false;       // True when we're parsing contents of script tag.

  // For TREC documents.
  bool in_doc = false;     // True when we're parsing contents of doc tag.
  bool in_docno = false;   // True when we're parsing contents of docno tag.
  bool in_dochdr = false;  // True when we're parsing contents of dochdr tag.

  // Track the starting point of various things we want to parse out.
  const char* word_p;        // Standalone word.
  const char* url_p;         // TREC document URL.
  const char* docno_p;       // TREC document number.
  const char* tag_p = NULL;  // Tracks the starting point of a tag; doubles as a flag as to whether we're currently in a tag.

  //this is the dummy code:
  if(false){
	 cout << in_doc << endl;
  }

  while (IsWithinBounds(curr_p, buf, buf_len)) {
    if (!IsIndexable(*curr_p)) {
      if (*curr_p != '>') {
        if (*curr_p == '<') {
          tag_p = curr_p;
        }
        ++curr_p;
        continue;
      }

      if (!tag_p) {
        ++curr_p;
        continue;
      }

      // At this point, we must have just seen the end of a closing tag, '>'.
      ++curr_p;
      tag_ret = ProcessTag(tag_p, curr_p - tag_p, in_closing_tag, doc_id);

      switch (tag_ret) {
        case kTagNot:
          break;

        case kTagB:
          UpdateContext(context, in_closing_tag, kContextB);
          break;

        case kTagI:
          UpdateContext(context, in_closing_tag, kContextI);
          break;

        case kTagH:
          UpdateContext(context, in_closing_tag, kContextH);
          break;

        case kTagTitle:
          UpdateContext(context, in_closing_tag, kContextT);
          break;

        case kTagScript:
          in_script = in_closing_tag ? false : true;
          break;

        case kTagDoc:
          if (doc_type_ != kTrec)
            break;

          if (in_closing_tag) {
            in_doc = false;

            // The position at this time is actually the document length.
            avg_doc_length += position;
            /*
            #ifdef PARSERINL_DEBUG
              std::cout << "doc_length is: " << position << std::endl;
              std::cout << "doc_id is: " << doc_id << std::endl;
            #endif
            */
            callback_->ProcessDocLength(position, doc_id);

            // This only applies when we're parsing multiple documents in one go.
            if (parsing_mode_ == kManyDoc) {
              context = 0;
              position = 0;
              ++doc_id;

              // Need to reset certain properties before moving on to the next document.
              in_script = false;
            }
          }
          else {
            in_doc = true;
          }
          break;

        case kTagDocno:
          if (doc_type_ != kTrec)
            break;

          in_docno = in_closing_tag ? false : true;
          break;

        case kTagDochdr:
          if (doc_type_ != kTrec)
            break;

          in_dochdr = in_closing_tag ? false : true;
          break;

        default:
          break;
      }

      tag_p = NULL;
      continue;
    }

    // Ignore everything between <script></script> tags and ignore inner contents of tags.
    if (in_script || tag_p) {
      ++curr_p;
      continue;
    }

    if (doc_type_ == kTrec)
    {
      if (in_docno) {
        docno_p = curr_p;
        while (IsWithinBounds(curr_p, buf, buf_len) && *curr_p != '<') {
          ++curr_p;
        }

        /*
        #ifdef PARSERINL_DEBUG
          std::cout << "docno is: " << docno_p << std::endl;
          std::cout << "docno_len is: " << curr_p - docno_p << std::endl;
          std::cout << "doc_id is: " << doc_id << std::endl;
        #endif
        */
        callback_->ProcessDocno(docno_p, curr_p - docno_p, doc_id);

        continue;
      } else if (in_dochdr) {
        BitSet(context, kContextU);

        url_p = curr_p;
        while (IsWithinBounds(url_p, buf, buf_len) && *url_p != '\n') {
          if (!IsIndexable(*url_p)) {
            url_p++;
            continue;
          }

          word_p = url_p;
          while (IsWithinBounds(url_p, buf, buf_len) && IsIndexable(*url_p)) {
            url_p++;
          }

          //IndexingParserCallback::ProcessTerm called(), amazing and I do not know why?
          // std::cout << "test2 in" << std::endl;
          callback_->ProcessTerm(word_p, url_p - word_p, doc_id, position++, context, true);
          // std::cout << "test2 out" << std::endl;
        }

        BitUnset(context, kContextU);
        /*
        #ifdef PARSERINL_DEBUG
          std::cout << "url is: " << curr_p << std::endl;
          std::cout << "url_len is: " << url_p - curr_p << std::endl;
          std::cout << "doc_id is: " << doc_id << std::endl;
        #endif
        */
        callback_->ProcessUrl(curr_p, url_p - curr_p, doc_id);

        curr_p = url_p + 1;
        // Skip the rest of the dochdr contents (making sure that we're at the end of the dochdr).
        while (IsWithinBounds(curr_p, buf, buf_len)) {
          if (*curr_p == '<') {
            const char kDocHdrClosingTag[] = "</DOCHDR>";
            // Make sure it's actually the closing tag.
            if (IsWithinBounds(curr_p + sizeof(kDocHdrClosingTag) - 1, buf, buf_len) && strncasecmp(curr_p, kDocHdrClosingTag, sizeof(kDocHdrClosingTag) - 1) == 0) {
              break;
            }
          }
          ++curr_p;
        }

        continue;
      }
    }

    word_p = curr_p;
    while (IsWithinBounds(curr_p, buf, buf_len) && IsIndexable(*curr_p)) {
      ++curr_p;
    }
    // std::cout << "test3 in" << std::endl;
    callback_->ProcessTerm(word_p, curr_p - word_p, doc_id, position++, context, true);
    // std::cout << "test3 out" << std::endl;
  }
  return doc_id - initial_doc_id;
}

template<class Callback>
  int Parser<Callback>::ParseTrecDataBufferForBigrams(const char* buf, int buf_len, uint32_t& doc_id, int& avg_doc_length, const char*& curr_p) {
  std::cout << "ParseTrecDataBufferForBigrams() is called." << endl;
  assert(buf != NULL);
  assert(buf_len > 0);

  bool output_term_flag = false; // for the output of the bigrams
  uint32_t initial_doc_id = doc_id;
  Tag tag_ret;  // The special type of tag we encountered.

  unsigned char context = '\0';  // Bit array for the context.
  uint32_t position = 0;         // Tracks position of each word, final position for a document is it's size in words.

  // For parsing HTML.
  bool in_closing_tag = false;  // True when we're parsing a closing tag.
  bool in_script = false;       // True when we're parsing contents of script tag.

  // For TREC documents.
  bool in_doc = false;     // True when we're parsing contents of doc tag.
  bool in_docno = false;   // True when we're parsing contents of docno tag.
  bool in_dochdr = false;  // True when we're parsing contents of dochdr tag.

  // Track the starting point of various things we want to parse out.
  const char* word_p;        // Standalone word.
  const char* url_p;         // TREC document URL.
  const char* docno_p;       // TREC document number.
  const char* tag_p = NULL;  // Tracks the starting point of a tag; doubles as a flag as to whether we're currently in a tag.

  //this is the dummy code:
  if(false){
	 cout << in_doc << endl;
  }

  if (priority_docIDs_.count(doc_id) > 0){
	  output_term_flag = true;
	  // std::cout << "doc_id: " << doc_id << " " << output_term_flag << std::endl;
  }
  else{
	  output_term_flag = false;
	  // std::cout << "doc_id: " << doc_id << " " << output_term_flag << std::endl;
  }

  while (IsWithinBounds(curr_p, buf, buf_len)) {
    if (!IsIndexable(*curr_p)) {
      if (*curr_p != '>') {
        if (*curr_p == '<') {
          tag_p = curr_p;
        }
        ++curr_p;
        continue;
      }

      if (!tag_p) {
        ++curr_p;
        continue;
      }

      // At this point, we must have just seen the end of a closing tag, '>'.
      ++curr_p;
      tag_ret = ProcessTag(tag_p, curr_p - tag_p, in_closing_tag, doc_id);

      switch (tag_ret) {
        case kTagNot:
          break;

        case kTagB:
          UpdateContext(context, in_closing_tag, kContextB);
          break;

        case kTagI:
          UpdateContext(context, in_closing_tag, kContextI);
          break;

        case kTagH:
          UpdateContext(context, in_closing_tag, kContextH);
          break;

        case kTagTitle:
          UpdateContext(context, in_closing_tag, kContextT);
          break;

        case kTagScript:
          in_script = in_closing_tag ? false : true;
          break;

        case kTagDoc:
          if (doc_type_ != kTrec)
            break;

          if (in_closing_tag) {
            in_doc = false;

            // The position at this time is actually the document length.
            avg_doc_length += position;
            callback_->ProcessDocLength(position, doc_id);
            // debug
            // std::cout << "doc_length:" << " " << doc_id << " " << position << std::endl;

            // This only applies when we're parsing multiple documents in one go.
            if (parsing_mode_ == kManyDoc) {
              context = 0;
              position = 0;
              ++doc_id;
              if (priority_docIDs_.count(doc_id) > 0){
            	  output_term_flag = true;
            	  // std::cout << doc_id << " " << output_term_flag << std::endl;
              }
              else{
            	  output_term_flag = false;
            	  // std::cout << doc_id << " " << output_term_flag << std::endl;
              }
              // Need to reset certain properties before moving on to the next document.
              in_script = false;
            }
          }
          else {
            in_doc = true;
          }
          break;

        case kTagDocno:
          if (doc_type_ != kTrec)
            break;

          in_docno = in_closing_tag ? false : true;
          break;

        case kTagDochdr:
          if (doc_type_ != kTrec)
            break;

          in_dochdr = in_closing_tag ? false : true;
          break;

        default:
          break;
      }

      tag_p = NULL;
      continue;
    }

    // Ignore everything between <script></script> tags and ignore inner contents of tags.
    if (in_script || tag_p) {
      ++curr_p;
      continue;
    }

    if (doc_type_ == kTrec)
    {
      if (in_docno) {
        docno_p = curr_p;
        while (IsWithinBounds(curr_p, buf, buf_len) && *curr_p != '<') {
          ++curr_p;
        }
        callback_->ProcessDocno(docno_p, curr_p - docno_p, doc_id);

        continue;
      } else if (in_dochdr) {
        BitSet(context, kContextU);

        url_p = curr_p;
        while (IsWithinBounds(url_p, buf, buf_len) && *url_p != '\n') {
          if (!IsIndexable(*url_p)) {
            url_p++;
            continue;
          }

          word_p = url_p;
          while (IsWithinBounds(url_p, buf, buf_len) && IsIndexable(*url_p)) {
            url_p++;
          }

          callback_->ProcessTerm(word_p, url_p - word_p, doc_id, position++, context, true);
          if (output_term_flag){
        	  std::string curr_term = std::string(word_p, url_p - word_p);
        	  std::cout << "-----> " << doc_id << " " << curr_term << std::endl;
          }
        }

        BitUnset(context, kContextU);
        callback_->ProcessUrl(curr_p, url_p - curr_p, doc_id);

        curr_p = url_p + 1;
        // Skip the rest of the dochdr contents (making sure that we're at the end of the dochdr).
        while (IsWithinBounds(curr_p, buf, buf_len)) {
          if (*curr_p == '<') {
            const char kDocHdrClosingTag[] = "</DOCHDR>";
            // Make sure it's actually the closing tag.
            if (IsWithinBounds(curr_p + sizeof(kDocHdrClosingTag) - 1, buf, buf_len) && strncasecmp(curr_p, kDocHdrClosingTag, sizeof(kDocHdrClosingTag) - 1) == 0) {
              break;
            }
          }
          ++curr_p;
        }

        continue;
      }
    }

    word_p = curr_p;
    while (IsWithinBounds(curr_p, buf, buf_len) && IsIndexable(*curr_p)) {
      ++curr_p;
    }

    callback_->ProcessTerm(word_p, curr_p - word_p, doc_id, position++, context, true);
    if (output_term_flag){
  	  std::string curr_term = std::string(word_p, curr_p - word_p);
  	  std::cout << "-----> " << doc_id << " " << curr_term << std::endl;
    }
  }
  return doc_id - initial_doc_id;
}

template<class Callback>
  typename Parser<Callback>::Tag Parser<Callback>::ProcessTag(const char* tag, int tag_len, bool& in_closing_tag, uint32_t doc_id) {
    // Caller must ensure tag_len is always at least 2, for tag "<>".
    assert(tag_len >= 2);

    const char* curr_tag_p = tag + 1;

    // Check whether this is a closing tag.
    if (*curr_tag_p == '/') {
      in_closing_tag = true;
      ++curr_tag_p;
    } else {
      in_closing_tag = false;
    }

    switch (*curr_tag_p) {
      case 'a':
      case 'A': {
        const char* l_start = NULL;
        const char* l_end = NULL;

        for (++curr_tag_p; IsWithinBounds(curr_tag_p, tag, tag_len); ++curr_tag_p) {
          if (*curr_tag_p == '"' || *curr_tag_p == '\'') {
            if (!l_start) {
              l_start = curr_tag_p + 1;
            } else {
              l_end = curr_tag_p;
              break;
            }
          }
        }

        if (l_start && l_end) {
        	callback_->ProcessLink(l_start, l_end - l_start, doc_id);
        }

        return Parser::kTagNot;
      }
      case 'b':
      case 'B': {
        return IsValidTag(curr_tag_p, tag, tag_len, "b") ? Parser::kTagB : Parser::kTagNot;
      }
      case 'i':
      case 'I': {
        return IsValidTag(curr_tag_p, tag, tag_len, "i") ? Parser::kTagI : Parser::kTagNot;
      }
      case 'e':
      case 'E': {
        return IsValidTag(curr_tag_p, tag, tag_len, "em") ? Parser::kTagI : Parser::kTagNot;
      }
      case 'h':
      case 'H':
      {
        //Original Ver. Too beautiful, wei want to modify it.
        //return AreValidTags(curr_tag_p, tag, tag_len, "h", "1", "6", 2) ? Parser::kTagH : Parser::kTagNot;

        //Wei Ver.
        return AreValidTags(curr_tag_p, tag, tag_len, "h", "1", "6", 2) ? Parser::kTagH : IsValidTag(curr_tag_p, tag, tag_len, "html") ? Parser::kTagHtml : Parser::kTagNot;
      }
      case 't':
      case 'T': {
        return IsValidTag(curr_tag_p, tag, tag_len, "title") ? Parser::kTagTitle : Parser::kTagNot;
      }

      case 's':
      case 'S': {
        return IsValidTag(curr_tag_p, tag, tag_len, "strong") ? Parser::kTagB : IsValidTag(curr_tag_p, tag, tag_len, "script") ? Parser::kTagScript
                                                                                                                               : Parser::kTagNot;
      }
      case 'd':
      case 'D': {
        return IsValidTag(curr_tag_p, tag, tag_len, "dochdr") ? Parser::kTagDochdr
                                                              : IsValidTag(curr_tag_p, tag, tag_len, "docno") ? Parser::kTagDocno
                                                                                                              : IsValidTag(curr_tag_p, tag, tag_len, "doc") ? Parser::kTagDoc
                                                                                                                                                            : Parser::kTagNot;
      }
      default: {
        break;
      }
    }

    return Parser::kTagNot;
  }

template<class Callback>
  bool Parser<Callback>::IsValidTag(const char* curr_tag_p, const char* tag, int tag_len, const char curr_tag_name[]) {
    const size_t curr_tag_name_len = strlen(curr_tag_name);

    if (!strncasecmp(curr_tag_p, curr_tag_name, curr_tag_name_len)) {
      curr_tag_p += curr_tag_name_len;
      for (; IsWithinBounds(curr_tag_p, tag, tag_len); ++curr_tag_p) {
        if (*curr_tag_p != ' ') {
          if (*curr_tag_p == '>')
            return true;
          else
            break;
        }
      }
    }

    return false;
  }

template<class Callback>
  inline bool Parser<Callback>::IsWithinBounds(const char* curr, const char* start, int len) {
    return (curr - start) < len;
  }

template<class Callback>
  bool Parser<Callback>::AreValidTags(const char* curr_tag_p, const char* tag, int tag_len, const char curr_tag_base[], const char start_range[],
                                      const char end_range[], int range_len) {
    const size_t curr_tag_base_len = strlen(curr_tag_base);

    if (!strncasecmp(curr_tag_p, curr_tag_base, curr_tag_base_len)) {
      curr_tag_p += curr_tag_base_len;

      if (IsWithinBounds(curr_tag_p + range_len - 1, tag, tag_len)) {
        if (strncmp(curr_tag_p, start_range, range_len) >= 0 && strncmp(curr_tag_p, end_range, range_len) <= 0) {
          curr_tag_p += range_len;

          for (; IsWithinBounds(curr_tag_p, tag, tag_len); ++curr_tag_p) {
            if (*curr_tag_p != ' ') {
              if (*curr_tag_p == '>')
                return true;
              else
                break;
            }
          }
        }
      }
    }

    return false;
  }

template<class Callback>
  int Parser<Callback>::ProcessWarcInfoHeader(const char* buf, int buf_len, const char* curr_p, WarcHeader* header) {
      const char* header_start = curr_p;

      while (IsWithinBounds(curr_p, buf, buf_len)) {
        if (*curr_p == '\n') {
          ++curr_p;

          // Double new line means we read the complete header.
          if (IsWithinBounds(curr_p, buf, buf_len) && *curr_p == '\n') {
            ++curr_p;
            break;
          }

          const char* key_start = curr_p;
          int key_length = 0;
          while (IsWithinBounds(curr_p, buf, buf_len) && *curr_p != ':') {
            ++curr_p;
            ++key_length;
          }

          #ifdef PARSERINL_DEBUG
            std::string warc_key = std::string(key_start, key_length);
            //std::cout << "WARC key: " << warc_key << std::endl;
          #endif



          ++curr_p;

          if (IsWithinBounds(curr_p, buf, buf_len) && *curr_p == ' ') {
            ++curr_p;
          }

          const char* value_start = curr_p;
          int value_length = 0;

          while (IsWithinBounds(curr_p, buf, buf_len) && *curr_p != '\n') {
            ++curr_p;
            ++value_length;
          }

          const char kContentLengthStr[] = "Content-Length";
          const int kContentLengthStrLen = sizeof(kContentLengthStr) - 1;

          #ifdef PARSERINL_DEBUG
            std::string warc_value = std::string(value_start, value_length);
            //std::cout << "WARC value: " << warc_value << std::endl;
          #endif

          // We need to know how long the document content is.
          if (key_length == kContentLengthStrLen && strncmp(key_start, kContentLengthStr, kContentLengthStrLen) == 0) {
            char content_length_buf[11];  // Has to fit a 4 byte integer and a terminating null character.
            assert(value_length < 11);
            memcpy(content_length_buf, value_start, value_length);
            content_length_buf[value_length] = '\0';
            header->content_length = atoi(content_length_buf);
          }
        } else {
          ++curr_p; // Skips past the 'WARC/0.18' part.
        }
      }

      return (curr_p - header_start);
  }


template<class Callback>
	string Parser<Callback>::parseURLForDomainName(string inputURL){
		string line = "";
		string x = "";
		string host_name = "";
		trim(inputURL);
		x = inputURL;
		size_t sp = x.find_first_of( '/', 7 /* skip http:// part */ );
		if ( sp != string::npos ) {
			string base_url( x.begin()+7, x.begin()+sp );
			//cout << base_url << endl;
			if (boost::starts_with(base_url, "www.")){
				host_name = base_url.substr(4,base_url.size());
				//cout << host_name << endl;
			}
			else{
				host_name = base_url;
				//cout << host_name << endl;
			}
			sp = x.find_last_of( '/' );
			if ( sp != string::npos ) {
				string query( x.begin()+sp+1, x.end() );
				//cout << query << endl;
			}
		}
		return host_name;
	}

template<class Callback>
  int Parser<Callback>::ProcessWarcResponseAndHTTPHeader(const char* buf, int buf_len, const char* curr_p, WarcHeader* header, uint32_t& doc_id) {
      const char* header_start = curr_p;
      header->fastPrioritySkip = false;

      while (IsWithinBounds(curr_p, buf, buf_len))
      {
        if (*curr_p == '\n')
        {
          ++curr_p;

          // Double new line means we read the complete header.
          if (IsWithinBounds(curr_p, buf, buf_len) && *curr_p == '\n') {
            ++curr_p;
            break;
          }

          const char* key_start = curr_p;
          int key_length = 0;
          while (IsWithinBounds(curr_p, buf, buf_len) && *curr_p != ':') {
            ++curr_p;
            ++key_length;
          }
          std::string warc_key = std::string(key_start, key_length);

          ++curr_p;

          if (IsWithinBounds(curr_p, buf, buf_len) && *curr_p == ' ') {
            ++curr_p;
          }

          const char* value_start = curr_p;
          int value_length = 0;

          if(warc_key == "WARC-Target-URI"){
        	  header->url = value_start;
          }

          if(warc_key == "WARC-TREC-ID"){
        	  header->docno = value_start;
          }

          while (IsWithinBounds(curr_p, buf, buf_len) && *curr_p != '\n') {
            ++curr_p;
            ++value_length;
          }
          std::string warc_value = std::string(value_start, value_length);
          // std::cout << "warc_key: " << warc_key << std::endl;
          // std::cout << "warc_value: " << warc_value << std::endl;

          if(warc_key == "WARC-Target-URI"){
        	  header->url_len = value_length;
        	  callback_->ProcessUrl(header->url, header->url_len, doc_id);
          }

          if(warc_key == "WARC-TREC-ID"){
        	  header->docno_len = value_length;
        	  callback_->ProcessDocno(header->docno, header->docno_len, doc_id);
        	  // std::cout << "warc_value: " << warc_value << std::endl;
          }

          if(warc_key == "Content-Length"){
        	  header->content_length = atoi(warc_value.c_str());
          }
        }
        else
        {
          ++curr_p; // Skips past the 'WARC/0.18' part. Correct, this operation also gives me some hint.
        }
      }

      return (curr_p - header_start);
  }

#endif /* PARSERINL_H_ */
