//==============================================================================================================================================================
// Author(s): Roman Khmelichek, Wei Jiang
//
// We don't accumulate the whole vocabulary in main memory during indexing, instead we write out runs, (partial indices) and then merge them at the end.
// We accumulate postings in memory for each term in compressed form (using variable byte encoding). We use var byte coding even for non gap coded docIDs and
// positions (gap coding would produce much smaller integers, so compression would work better) since most docIDs and positions will be smaller than a full
// integer, so var byte coding would help in this case too.
//
// TODO: Need a way to handle encoding of "magic bytes" into our blocks. These can contain fixed width or variable width data. Can use contexts as a test case
//       of fixed width magic bytes.
//
// TODO: Might want to have per list compression (different compression algorithms for different lists, since they might have different characteristics). Also,
//       maybe per chunk compression (like OptPForDelta).
//
// TODO: Try new method of in-memory compression of postings. Collect n postings (or maybe until we run out of space in a block), then recompress them
//       (and sort them if they're not in order) so that we can compact the postings even further, with a good compression method.
//
// TODO: If building an index without positions, contexts, and other per docID information, can optimize usage of the memory pool as follows. For each
//       TermBlock, buffer the frequency count in memory for the last docID inserted for that particular term. Once we get a new docID (or we dump the run),
//       can write the buffered frequency count into the memory pool. This allows more efficient use of the memory pool since we write each docID only once
//       for a particular term, as opposed to multiple times, and then calculating the frequency later.
//==============================================================================================================================================================

#include "posting_collection.h"


#include <algorithm>
#include <boost/algorithm/string.hpp>
#include <cassert>
#include <cctype>
#include <cstdlib>
#include <cstring>
#include <math.h>
#include <iostream>
#include <iomanip>      // std::setprecision
#include <limits>
#include <string>
#include <utility>

#include "coding_policy_helper.h"
#include "config_file_properties.h"
#include "configuration.h"
#include "globals.h"
#include "index_build.h"
#include "index_util.h"
#include "logger.h"
#include "meta_file_properties.h"

using namespace boost;
using namespace std;
using namespace logger;

PostingCollectionController& GetPostingCollectionController() {
  static PostingCollectionController posting_collection_controller;
  return posting_collection_controller;
}

EdgeCollectionController& GetEdgeCollectionController(){
  static EdgeCollectionController edge_collection_controller;
  return edge_collection_controller;
}

MemoryPoolManager& GetMemoryPoolManager() {
  static MemoryPoolManager memory_pool_manager;
  return memory_pool_manager;
}

/**************************************************************************************************************************************************************
 * TermBlock
 *
 **************************************************************************************************************************************************************/
// Encoding / decoding for postings into memory pool blocks.
unsigned char TermBlock::compressed_tmp_posting[11] __attribute__ ((aligned (64)));  // Aligning to a typical cache line should result in
                                                                                     // slightly better performance.
int TermBlock::compressed_tmp_posting_len;

TermBlock::TermBlock(const char* term, int term_len) :
  memory_pool_manager_(&GetMemoryPoolManager()),
  term_(new char[term_len]),
  term_len_(term_len),
  index_positions_(true),
  index_contexts_(false),
  prev_doc_id_(0),
  prev_position_(0),
  block_list_(NULL),
  last_block_(NULL),
  curr_block_position_(NULL),
  next_(NULL) {
  // Need to transform term to lower case.
  for (int i = 0; i < term_len; i++) {
    term_[i] = tolower(static_cast<unsigned char> (term[i]));
  }
}

void TermBlock::InitBlockList(unsigned char* block_start) {
  BlockList* new_block = new BlockList(block_start);
  last_block_ = new_block;
  block_list_ = new_block;
}

void TermBlock::AddBlockToList(unsigned char* block_start) {
  BlockList* new_block = new BlockList(block_start);
  last_block_->set_next_block(new_block);
  last_block_ = new_block;
}

TermBlock::~TermBlock() {
  delete[] term_;
  ClearBlockList();
}

void TermBlock::ClearBlockList() {
  BlockList* curr_block = block_list_;
  BlockList* next_block;

  while (curr_block != NULL) {
    next_block = curr_block->next_block();
    delete curr_block;
    curr_block = next_block;
  }

  block_list_ = NULL;
  last_block_ = NULL;
}

void TermBlock::Encode(uint32_t num, unsigned char* out, int* len) {
  unsigned char tmp[5];
  int i;
  for (i = 0; i < 5; ++i) {
    tmp[i] = (num & 0x7F) << 1;
    if ((num >>= 7) == 0)
      break;
  }
  *len = i + 1;

  // i is already set at the correct value, since we break above, before it gets incremented.
  // i == 0 is handled outside the loop.
  for (; i > 0; --i) {
    *out++ = tmp[i] | 0x01;  // Turn on least significant bit.
  }
  *out = tmp[0] & ~0x01;  // Turn off least significant bit.
}

uint32_t TermBlock::Decode(const unsigned char* in, int* len) {
  uint32_t num = (*in >> 1);
  *len = 1;
  if ((*in & 0x1) != 0) {
    in++;
    num = (num << 7) | (*in >> 1);
    *len = 2;
    if ((*in & 0x1) != 0) {
      in++;
      num = (num << 7) | (*in >> 1);
      *len = 3;
      if ((*in & 0x1) != 0) {
        in++;
        num = (num << 7) | (*in >> 1);
        *len = 4;
      }
    }
  }
  return num;
}

// Returns the next var byte encoded integer from the current position in our postings list.
// Upon reaching the end of the postings list, returns the max value of uint32_t.
uint32_t TermBlock::GetVarByteInt() {
  if (block_list_ == NULL)
    return numeric_limits<uint32_t>::max();

  const int kMaxVarByteDataLen = 5; // 5 bytes for a 32 bit integer.
  unsigned char varbyte_data[kMaxVarByteDataLen];
  unsigned char* varbyte_data_ptr = varbyte_data;
  int varbyte_data_len = 0;

  bool no_more = false;
  while (true) {
    while (block_list_->IsWithinBlock(curr_block_position_)) {
      if (!VarByteHasMore(curr_block_position_)) {
        no_more = true;
      }

      assert((varbyte_data_ptr - varbyte_data) < 5);
      *(varbyte_data_ptr++) = *(curr_block_position_++);
      ++varbyte_data_len;

      if (no_more) {
        break;
      }
    }

    if (no_more) {
      break;
    } else {
      block_list_ = block_list_->next_block();
      if (block_list_ == NULL) {
        return numeric_limits<uint32_t>::max();  // Indicates we reached the end of the postings list.
      }

      curr_block_position_ = block_list_->block();
    }
  }

  int decoded_int_len;
  uint32_t decoded_int = Decode(varbyte_data, &decoded_int_len);
  assert(decoded_int != numeric_limits<uint32_t>::max());  // We reserved this value as an indicator of the end of the postings list.
  return decoded_int;
}

// Returns the value of the next byte from our postings list.
// Upon reaching the end of the postings list, returns the max value of uint32_t.
uint32_t TermBlock::GetByte() {
  if (block_list_ == NULL)
    return numeric_limits<uint32_t>::max();

  while (true) {
    while (block_list_->IsWithinBlock(curr_block_position_)) {
      return *(curr_block_position_++);
    }

    block_list_ = block_list_->next_block();
    if (block_list_ == NULL) {
      return numeric_limits<uint32_t>::max();
    }

    curr_block_position_ = block_list_->block();
  }
}

// Returns true upon decoding the next posting in the postings list for this term and stores the info into 'decoded_posting'.
// Returns false when there are no more postings in the postings lists for this term and 'decoded_posting' is not modified.
bool TermBlock::DecodePosting(DecodedPosting* decoded_posting) {
  // Decode doc_id.
  uint32_t doc_id = GetVarByteInt();
  if (doc_id == 0 || doc_id == numeric_limits<uint32_t>::max()) {
    // doc_id == 0:
    // A block of memory that contains zeroes for the docID means that we have no more postings encoded in the block,
    // so this is the end of the postings list for this term, even though we have extra unfilled space leftover in the block.
    // doc_id == UINT32_MAX:
    // We tried to get another posting, but we have no more in the block.
    return false;
  }
  // doc_id is decremented by one (one was added in the first place to maintain that a doc_id of zero means the end of the list).
  --doc_id;
  decoded_posting->set_doc_id(doc_id);

  // Decode position.
  uint32_t position;
  if (index_positions_) {
    position = GetVarByteInt();
    assert(position != numeric_limits<uint32_t>::max());  // End of list only makes sense at the start of the posting.
  } else {
    position = 0;
  }
  decoded_posting->set_position(position);

  // Decode context.
  uint32_t context;
  if (index_contexts_) {
    context = GetByte();
    assert(context != numeric_limits<uint32_t>::max());  // End of list only makes sense at the start of the posting.
  } else {
    context = 0;
  }
  decoded_posting->set_context(context);

  return true;
}

// This interface assumes the postings were accumulated in sorted order (which is true unless we're doing document reordering while indexing).
//
// '*num_docs' initially holds the size of the 'doc_ids' and 'frequencies' arrays.
// '*num_properties' initially holds the size of the 'positions' and 'contexts' arrays.
//
// The accumulated postings for this term will be decoded into the appropriate arrays,
// and '*num_docs' will hold the number of documents which were decoded,
// and '*num_properties' will hold the number of document properties (positions and contexts) which were decoded
// (this is the summation of all the frequency values).
//
// Returns true when some postings have been decoded and false when no postings were able to be decoded.
bool TermBlock::DecodePostings(uint32_t* doc_ids, uint32_t* frequencies, uint32_t* positions, unsigned char* contexts, int* num_docs, int* num_properties,
                               DecodedPosting* prev_posting, bool* prev_posting_valid, Posting* overflow_postings, int* num_overflow_postings,
                               uint32_t overflow_doc_id, uint32_t prev_chunk_last_doc_id) {
  assert(doc_ids != NULL);
  assert(frequencies != NULL);
  assert(positions != NULL);
  assert(contexts != NULL);
  assert(num_docs != NULL && *num_docs > 0);
  assert(num_properties != NULL && *num_properties > 0);
  assert(prev_posting != NULL);
  assert(prev_posting_valid != NULL);

  int num_docs_decoded = 0;
  int num_properties_decoded = 0;
  int num_properties_decoded_per_doc = 0;

  uint32_t curr_decoded_doc_id = prev_chunk_last_doc_id;
  uint32_t overflow_posting_position = 0;  // To decode the position gaps in the overflow posting.
  int overflow_postings_i = 0;

  if (!*prev_posting_valid) {
    if (DecodePosting(prev_posting) == false) {
      *num_docs = 0;
      *num_properties = 0;
      *num_overflow_postings = 0;
      return false;
    }
  } else {
    *prev_posting_valid = false;
  }

  // Decode the doc id from the gaps, and see if it's an overflow doc id.
  curr_decoded_doc_id += prev_posting->doc_id();

  if (*num_overflow_postings > 0 && curr_decoded_doc_id == overflow_doc_id) {
    assert(overflow_postings != NULL);
    assert(overflow_postings_i < *num_overflow_postings);

    overflow_posting_position += prev_posting->position();
    overflow_postings[overflow_postings_i++] = Posting(NULL, 0, curr_decoded_doc_id, overflow_posting_position, prev_posting->context());
  }

  doc_ids[num_docs_decoded] = prev_posting->doc_id();
  frequencies[num_docs_decoded] = 1;
  ++num_docs_decoded;

  positions[num_properties_decoded] = prev_posting->position();
  contexts[num_properties_decoded] = prev_posting->context();
  ++num_properties_decoded;
  ++num_properties_decoded_per_doc;

  // We read ahead one more document than we were allocated for to check whether the next document is a continuation of the previous one.
  // If it wasn't, since we can't "put back" the posting into the block, we'll store it into prev_posting and set prev_posting_valid to true, otherwise set it to false.
  DecodedPosting curr_posting;
  while ((num_docs_decoded < (*num_docs + 1)) && DecodePosting(&curr_posting)) {
    if (curr_posting.doc_id() == 0) {
      // A continuation of the same document.
      ++frequencies[num_docs_decoded - 1];
    } else if(num_docs_decoded == *num_docs) {
      // The posting we decoded was not a continuation of the same doc id
      // and we have no more room to put it into the output array.
      *prev_posting = curr_posting;
      *prev_posting_valid = true;
      break;
    }
    else {
      // Found a new document.
      prev_posting->set_doc_id(curr_posting.doc_id());

      doc_ids[num_docs_decoded] = curr_posting.doc_id();
      frequencies[num_docs_decoded] = 1;
      ++num_docs_decoded;

      num_properties_decoded_per_doc = 0;
    }

    // We're truncating the number of per document properties. However, this will not affect the frequency of the document.
    if (num_properties_decoded_per_doc < ChunkEncoder::kMaxProperties) {
      assert(num_properties_decoded < *num_properties);
      positions[num_properties_decoded] = curr_posting.position();
      contexts[num_properties_decoded] = curr_posting.context();
      ++num_properties_decoded;
      ++num_properties_decoded_per_doc;
    }

    // Decode the doc id from the gaps, and see if it's an overflow doc id.
    curr_decoded_doc_id += curr_posting.doc_id();

    if (*num_overflow_postings > 0 && curr_decoded_doc_id == overflow_doc_id) {
      assert(overflow_postings != NULL);
      assert(overflow_postings_i < *num_overflow_postings);

      overflow_posting_position += curr_posting.position();
      overflow_postings[overflow_postings_i++] = Posting(NULL, 0, curr_decoded_doc_id, overflow_posting_position, curr_posting.context());
    }
  }

  *num_docs = num_docs_decoded;
  *num_properties = num_properties_decoded;
  *num_overflow_postings = overflow_postings_i;
  return true;
}

// Compresses posting into a static buffer and records it's length.
void TermBlock::EncodePosting(const Posting& posting) {
  compressed_tmp_posting_len = 0;
  int encoding_offset;

  // If we're continuing to process the same document, can always take position deltas.
  uint32_t position_gap = posting.position();
  if (prev_doc_id_ == posting.doc_id()) {
    assert((prev_position_ == 0) ? position_gap >= prev_position_ : position_gap > prev_position_);
    position_gap -= prev_position_;
  }
  prev_position_ = posting.position();

  // Since docIDs are assigned such that they are always monotonically increasing, we take docID deltas.
  assert(posting.doc_id() >= prev_doc_id_);
  uint32_t doc_id_gap = posting.doc_id() - prev_doc_id_;
  prev_doc_id_ = posting.doc_id();

  // We increment all docID gaps by 1 so they're never 0. Remember to decrement by 1 when decoding!
  Encode(doc_id_gap + 1, compressed_tmp_posting, &encoding_offset);
  compressed_tmp_posting_len += encoding_offset;

  if (index_positions_) {
    Encode(position_gap, compressed_tmp_posting + compressed_tmp_posting_len, &encoding_offset);
    compressed_tmp_posting_len += encoding_offset;
  }

  if (index_contexts_) {
    *(compressed_tmp_posting + compressed_tmp_posting_len) = posting.context();
    ++compressed_tmp_posting_len;
  }
}

// Returns true when the compressed posting was successfully inserted into the TermBlock, and false
// when the memory pool either ran out of blocks or the last block remaining did not have enough space to
// fit the complete compressed posting.
bool TermBlock::AddCompressedPosting() {
  // We don't init it in the constructor because it's possible we have initialized a TermBlock
  // but we actually don't have any space in the memory pool.
  if (curr_block_position_ == NULL) {
    curr_block_position_ = memory_pool_manager_->AllocateBlock();
    if (curr_block_position_ == NULL) {
      return false;
    } else {
      // Only do this if it's the first posting we're adding to this term block.
      if (block_list_ == NULL) {
        InitBlockList(curr_block_position_);
      } else {
        AddBlockToList(curr_block_position_);
      }
    }
  }

  // If we need to dump run, set flag that we have a compressed posting in the buffer that needs to be put into a new TermBlock.
  if (!memory_pool_manager_->HaveSpace(curr_block_position_, compressed_tmp_posting_len)) {
    return false;
  }

  int remaining_posting_bytes = compressed_tmp_posting_len;

  while (remaining_posting_bytes > 0) {
    unsigned char* next_block_start = memory_pool_manager_->GetNextBlockStart(curr_block_position_);

    int space_left_curr_block = next_block_start - curr_block_position_;

    int curr_write = min(space_left_curr_block, remaining_posting_bytes);

    int offset = compressed_tmp_posting_len - remaining_posting_bytes;

    memcpy(curr_block_position_, compressed_tmp_posting + offset, curr_write);

    remaining_posting_bytes -= curr_write;
    curr_block_position_ += curr_write;

    // If we require an additional block.
    if (remaining_posting_bytes > 0) {
      // Allocate a new block.
      curr_block_position_ = memory_pool_manager_->AllocateBlock();

      assert(curr_block_position_ != NULL);  // We DumpRun() prior to avoid this exact situation.

      AddBlockToList(curr_block_position_);
    } else if (remaining_posting_bytes == 0 && curr_block_position_ == next_block_start) {
      // We fully filled this block.
      // A new block will be allocated if we encounter another posting for this term
      // with code at the beginning of this function.
      curr_block_position_ = NULL;
    }
  }

  // Compressed posting was successfully added.
  return true;
}

bool TermBlock::AddPosting(const Posting& posting) {
  EncodePosting(posting);
  // Our only assumption now is that the maximum compressed size of a single posting never exceeds the size of the memory pool.
  assert(compressed_tmp_posting_len <= GetMemoryPoolManager().kMemoryPoolSize);
  return AddCompressedPosting();
}

/**************************************************************************************************************************************************************
 * MemoryPoolManager
 *
 **************************************************************************************************************************************************************/
MemoryPoolManager::MemoryPoolManager() :
  kMemoryPoolSize(atol(Configuration::GetConfiguration().GetValue(config_properties::kMemoryPoolSize).c_str())),
  kBlockSize(atol(Configuration::GetConfiguration().GetValue(config_properties::kMemoryPoolBlockSize).c_str())),
  memory_pool_(new unsigned char[kMemoryPoolSize]),
  curr_allocated_block_(memory_pool_) {
  if (kMemoryPoolSize == 0) {
    GetErrorLogger().Log("Incorrect configuration value for 'memory_pool_size'", true);
  }

  if (kBlockSize == 0) {
    GetErrorLogger().Log("Incorrect configuration value for 'memory_pool_block_size'", true);
  }

  if (kMemoryPoolSize % kBlockSize != 0) {
    GetErrorLogger().Log("Incorrect configuration: 'memory_pool_size' must be a multiple of 'term_block_size'", true);
  }

  Init();
}

MemoryPoolManager::~MemoryPoolManager() {
  delete[] memory_pool_;
}

void MemoryPoolManager::Init() {
  memset(memory_pool_, 0, kMemoryPoolSize);
}

// Answers the question:
// Can we allocate enough blocks to be able to write out the posting with 'posting_len' into the current memory pool,
// starting from position 'curr_block_pos' in the currently allocated block?
bool MemoryPoolManager::HaveSpace(unsigned char* curr_block_pos, int posting_len) {
  unsigned char* next_block_start = GetNextBlockStart(curr_block_pos);

  posting_len -= (next_block_start - curr_block_pos);
  curr_block_pos = curr_allocated_block_;

  while (posting_len > 0) {
    curr_block_pos += kBlockSize;
    posting_len -= kBlockSize;
  }
  return (curr_block_pos >= (memory_pool_ + kMemoryPoolSize)) ? false : true;
}

unsigned char* MemoryPoolManager::AllocateBlock() {
  unsigned char* curr = curr_allocated_block_;
  curr_allocated_block_ += kBlockSize;

  if (curr != (memory_pool_ + kMemoryPoolSize)) {
    return curr;
  } else {
    return NULL;
  }
}

void MemoryPoolManager::Reset() {
  // Can start allocating from the beginning of the memory pool.
  curr_allocated_block_ = memory_pool_;
  Init();
}

/**************************************************************************************************************************************************************
 * EdgeCollectionController
 * This class has two purposes:
 * (1) output the gov2 Document With Their Connected Edges into one file
 * (2) output the gov2 Document With Their Forward Index into another file
 **************************************************************************************************************************************************************/
EdgeCollectionController::EdgeCollectionController() :
		  outputFileHandlerForDocumentEdges_( (Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kGov2DocumentWithTheirConnectedEdgesFileName))).c_str() ),
		  outputFileHandlerForDocumentForwardIndex_( (Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kGov2DocumentWithTheirForwardIndex))).c_str() ),
		  outputFileHandlerForDocumentCompletePostingSet_( (Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kGov2DocumentWithTheirCompletedPostingSet))).c_str() ),
		  outputFileHandlerForDocumentXdocValue_( (Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kGov2DocumentWithTheirXdocValue))).c_str() ),
		  outputFileHandlerForDocumentDistinctSetOfTerms_( (Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kGov2DocumentWithTheirNumOfPostingsRecordedAndTheDistinctSetOfTerms))).c_str() ),
		  overall_total_graph_generation_time_(0.0),
		  average_graph_generation_time_(0.0),
		  num_documents_processed_(0),
		  kLexiconSize(Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kLexiconSize))),
		  lexicon_(kLexiconSize, (Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kGov2OriginalLexiconUsedForPruningFileName))).c_str(), true){
	outputFileHandlerForDocumentEdges_ << "trecID" << " " << "threshold" << "(" << Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kTermPairEdgeFreqThreshold)) << ")" << " " << "numOfEdges" << " " << "edgeIDs" << endl;
	outputFileHandlerForDocumentForwardIndex_ << "trecID" << " " << "docSizeInWords(BM25 Component)" << " " << "docPostingsRecorded(Both appear in query trace and in current document)"<< " " << "postings" << endl;
	outputFileHandlerForDocumentCompletePostingSet_ << "trecID" << " " << "docSizeInWords(verification check)" << " " << "docSizeInWords(BM25 Component,actual computation value)" << " " << "postingList" << endl;
	outputFileHandlerForDocumentXdocValue_ << "trecID" << " " << "docSizeInWords(verification check)" << " " << "docSizeInWords(BM25 Component,actual computation value)" << " " << "docSizeInDistinctWords" << " " << "XdocValue(goldStandarded)" << " " << "XdocValue(1D)" << " " << "XdocValue(2D)" << " " << "XdocValue(goodTurning)"<< endl;
	// outputFileHandlerForDocumentDistinctSetOfTerms_ << "trecID" << " " << "docID" << " " << "docPostingsRecorded(ONLY in current document, not with any query trace)" << " " << "setOfDistinctTerms" << endl;
	LoadUpPopularTermPairUniqueIDAndFreqIntoDict();
	LoadUpPopularTermPairListIntoDict();
	LoadUpQueryTermsProbabilityDistribution();
	LoadUpMetaInfoLengthOfTheInvertedIndex();
}

EdgeCollectionController::~EdgeCollectionController() {
	outputFileHandlerForDocumentEdges_.close();
	outputFileHandlerForDocumentForwardIndex_.close();
	outputFileHandlerForDocumentCompletePostingSet_.close();
}

float EdgeCollectionController::getPostingUniversalProbabilityGivenThePriorProbability(string term, uint32_t docID, float postingUniversalProbabilityGivenTheQueryTermsTimesBigNumber){
	float postingUniversalProbability = 0.0;
	// The candidates dicts I can use for the probabilites are:
	// queryTermsGoldStandardRealProbabilityDistributionMap_
	// queryTerms1DPredictedProbabilityDistributionMap_
	// queryTerms2DPredictedProbabilityDistributionMap_
	// queryTermsGoodTurningProbabilityDistributionMap_
	postingUniversalProbability = queryTermsGoldStandardRealProbabilityDistributionMap_[term] * postingUniversalProbabilityGivenTheQueryTermsTimesBigNumber;
	return postingUniversalProbability;
}


float EdgeCollectionController::getPostingUniversalProbabilityGivenTheQueryTermsTimesBigNumber(string term, uint32_t docID, unsigned int f_d_t,float partialBM25Score){
	  // 3 arguments needed to be as input: partialBM25Score, f_d_t, doc_len

	  // keep it simple and please generate the thing quickly
	  // weights for the simpler logistic regression (Most of the feature weights are actually zero here but the performance is OK)
	  // This model has been trained months ago from now
	  float intercept_weight_0 = 1.2404;
	  float partialBM25_weight_1 = -0.4866;
	  float length_of_the_inverted_index_2 = 0.0;
	  float term_freq_in_collection_3 = 0.0;
	  float term_freq_in_doc_4 = -0.0068;
	  float doc_words_5 = 0.0001;
	  float term_freq_in_queries_6 = 0.0;
	  float posting_rank_in_list_7 = 0.0;
	  float posting_rank_in_doc_8 = 0.0;

	  float matrixMultiplicationScore = intercept_weight_0 * 1 +
			  	  	  	  	  	  	  	partialBM25_weight_1 * partialBM25Score +
			  	  	  	  	  	        // length_of_the_inverted_index_2 * sth +
			  	  	  	  	  	        // term_freq_in_collection_3 * sth +
			  	  	  	  	  	        term_freq_in_doc_4 * f_d_t +
			  	  	  	  	  	        doc_words_5 * getCurrDocLen() +
			  	  	  	  	  	        // term_freq_in_queries_6 * sth +
			  	  	  	  	  	        // posting_rank_in_list_7 * sth +
			  	  	  	  	  	        // posting_rank_in_doc_8 * sth +
			  	  	  	  	  	        0.0;

	  float probabilityGivenTheQueryTerms = 1/(1 + exp( matrixMultiplicationScore ));

	  // The true probability is usually very small, so I need to time a big number.
	  // option1:
	  float BIG_NUMBER = 1000000;

	  // option2:
	  // float BIG_NUMBER = 1;

	  float postingUniversalProbabilityGivenTheQueryTermsTimesBigNumber = probabilityGivenTheQueryTerms * BIG_NUMBER;


	  // for debug ONLY
	  // cout << queryTermsProbabilityDistributionMap_[term_] << " " << probabilityGivenTheQueryTerms << " " << returning_score << endl;

	  assert(!isnan(postingUniversalProbabilityGivenTheQueryTermsTimesBigNumber));

	  return postingUniversalProbabilityGivenTheQueryTermsTimesBigNumber;
}


// if method_value == 1, get the meta info for this specific term in the lexicon (traditional right way to do I think, on 2013/07/14)
// if method_value == 2, get the meta info for this specific term in the in-memory dict (shortcut way to do I think, on 2013/07/14)
float EdgeCollectionController::getPartialBM25Score(string term, uint32_t docID, unsigned int f_d_t, int method_value){
	  float returningScore = 0.0;

	  // Some common BM25 components
	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  const float kBm25K1 =  2.0;  // k1
	  const float kBm25B = 0.75;   // b

	  // We can precompute a few of the BM25 values here.
	  const float kBm25NumeratorMul = kBm25K1 + 1;
	  const float kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
	  const int collection_average_doc_len = 971;	   // The average size of document in words
	  uint32_t collection_total_num_docs = 25205179;   // The total number of documents in the indexed collection.
	  const float kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len;

	  if (method_value == 1){
		  LexiconData* lex_data = lexicon().GetEntry(term.c_str(), term.length());
		  if (lex_data != NULL){
			  /*
			  // debug logic
			  // Pass, check point1
			  cout << "term:" << term << endl;
			  cout << "docID:" << docID << endl;
			  cout << "lex_data->num_layers():" << lex_data->num_layers() << endl;
			  cout << "lex_data->layer_num_docs(0):" << lex_data->layer_num_docs(0) << endl;
			  cout << "getCurrDocLen():" << getCurrDocLen() << endl;
			  cout << "freq_in_doc:" << f_d_t << endl;
			  */

			  // BM25 components.
			  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in AND semantics.
			  float idf_t = log10(1 + (collection_total_num_docs - lex_data->layer_num_docs(0) + 0.5) / (lex_data->layer_num_docs(0) + 0.5));
			  partial_bm25 = idf_t * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * getCurrDocLen());

			  /*
			  // debug logic
			  cout << term << " " << docID << " " << fixed << setprecision(6) << partial_bm25 << endl;
			  cout << endl;
			  */

			  returningScore = partial_bm25;
		  }
		  else{
			  cout << "Can NOT find the entry for the term '" << term << "' in the current lexicon,MARK1" << endl;
		  }
	  }
	  else if (method_value == 2){
		  if( termsWithLengthOfTheInvertedIndexMap_.count(term)>0 ){
			  /*
			  // debug logic
			  // Pass, check point1
			  cout << "term:" << term << endl;
			  cout << "docID:" << docID << endl;
			  cout << "getCurrDocLen():" << getCurrDocLen() << endl;
			  cout << "freq_in_doc:" << f_d_t << endl;
			  */
			  // BM25 components.
			  float partial_bm25; // The partial BM25 score for each posting(document,term) we're processing in AND semantics.
			  float idf_t = log10(1 + (collection_total_num_docs - termsWithLengthOfTheInvertedIndexMap_[term] + 0.5) / ( termsWithLengthOfTheInvertedIndexMap_[term] + 0.5));
			  partial_bm25 = idf_t * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * getCurrDocLen());
			  /*
			  // debug logic
			  cout << term << " " << docID << " " << fixed << setprecision(6) << partial_bm25 << endl;
			  cout << endl;
			  */
			  returningScore = partial_bm25;
		  }
		  else{
			  cout << "Can NOT find the entry for the term '" << term << "' in the termsWithLengthOfTheInvertedIndexMap_,MARK1" << endl;
		  }
	  }
	  return returningScore;
}


void EdgeCollectionController::InsertTermIntoCorrespondingTermDicts(string term,uint32_t doc_id){
	  // for debug ONLY logic:
	  /*
	  tempCounterForDebug_ += 1;
	  cout << "tempCounterForDebug_:" << tempCounterForDebug_ << endl;
      */

	  // cout << "the function EdgeCollectionController::InsertTermIntoCorrespondingTermDicts(...) called" << endl;
	  // Updated by Wei on 2013/07/14 afternoon
	  // This function will update 2 data structures.
	  // One is doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_ and the other is
	  // doc_id_with_all_terms_in_current_doc_

	  // Update the following 2 data structures:
	  // (1) doc_id_with_all_terms_in_current_doc_dict_
	  // (2) all_terms_in_current_doc_id_vector_
	  if ( doc_id_with_all_terms_in_current_doc_dict_.count(doc_id) > 0 ){
		  // old doc
		  if(doc_id_with_all_terms_in_current_doc_dict_[doc_id].count( term ) > 0){
			  doc_id_with_all_terms_in_current_doc_dict_[doc_id][ term ] += 1;
			  // I do NOT need to execute the following statement cause I do NOT want to be duplicated.
			  all_terms_in_current_doc_id_vector_.push_back(term);
		  }
		  else{
			  doc_id_with_all_terms_in_current_doc_dict_[doc_id][ term ] = 1;
			  all_terms_in_current_doc_id_vector_.push_back(term);
		  }
	  }
	  else{
		  // new doc
		  doc_id_with_all_terms_in_current_doc_dict_[doc_id][ term ] = 1;
		  all_terms_in_current_doc_id_vector_.push_back(term);
	  }

	  // Update the following 2 data structures:
	  // (1) doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_
	  // (2) terms_both_in_current_doc_id_and_query_trace_vector_
	  // step1: check whether this is a query term
	  if ( popularTermWithSecondPartsAndFreqDict_.count( term ) > 0 ){
		  if ( doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_.count(doc_id) > 0 ){
			  // old doc
			  if(doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id].count( term ) > 0){
				  doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id][ term ] += 1;
				  // I do NOT need to execute the following statement cause I do NOT want to be duplcated.
				  // current_doc_id_with_terms_.push_back(term);
			  }
			  else{
				  doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id][ term ] = 1;
				  terms_both_in_current_doc_id_and_query_trace_vector_.push_back(term);
			  }
		  }
		  else{
			  // new doc
			  doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id][ term ] = 1;
			  terms_both_in_current_doc_id_and_query_trace_vector_.push_back(term);
		  }

		  // for debug ONLY 1
		  /*
		  for(map<uint32_t,map<string,int> >::iterator iter = doc_id_with_terms_dict_.begin(); iter != doc_id_with_terms_dict_.end(); iter++){
			  cout << iter->first << endl;
		  }
		  */
	  }
	  else{
		  // because this term is NOT in the query trace, so it is also NOT included in the termDocDict
	  }
}

size_t EdgeCollectionController::GetProperThreshold(){
	return terms_both_in_current_doc_id_and_query_trace_vector_.size();
}

void EdgeCollectionController::LoadUpMetaInfoLengthOfTheInvertedIndex() {
    cout << "EdgeCollectionController::LoadUpMetaInfoLengthOfTheInvertedIndex() called." << endl;
    string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kTermsWithTheirLengthOfInvertedIndexValue));
    cout << "inputFileName:" << inputFileName << endl;

    string currentLine;
	ifstream inputfile(inputFileName.c_str());
	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		if(currentLine != ""){
		    boost::algorithm::trim(currentLine);
			istringstream iss( currentLine );
		    string term;
		    string length_of_the_inverted_list_in_string_format;


			iss >> term;
			iss >> length_of_the_inverted_list_in_string_format;


			float length_of_the_inverted_list = atof(length_of_the_inverted_list_in_string_format.c_str());

			termsWithLengthOfTheInvertedIndexMap_[term] = length_of_the_inverted_list;

		}
	}
	inputfile.close();

	// for debug ONLY
	// cout << "termsWithLengthOfTheInvertedIndexMap_['http']:" << termsWithLengthOfTheInvertedIndexMap_["http"] << endl;

    if(termsWithLengthOfTheInvertedIndexMap_.size() == 0){
	    GetDefaultLogger().Log("Load Up terms with the meta value: length_of_the_inverted_index_map_ NOT Done--- Take Care", false);
    }
    else{
	    GetDefaultLogger().Log(Stringify(termsWithLengthOfTheInvertedIndexMap_.size()) + " <term,length_of_the_inverted_list> have been loaded.", false);
    }
}



void EdgeCollectionController::LoadUpQueryTermsProbabilityDistribution() {
	// Updated by Wei 2013/07/29
	// I need to update this loading function accordingly cause the file format has been changed: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130429
	string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kQueryTermProbablityDistributionFileNameAdvanced));
    // for debug ONLY
    // cout << "inputFileName:" << inputFileName << endl;
    string currentLine;
	ifstream inputfile(inputFileName.c_str());

	// skip the headline
	getline (inputfile,currentLine);

	while ( inputfile.good() )
	{
		getline (inputfile,currentLine);
		// for debug
		// cout << "currentLine:" << currentLine << endl;
		if(currentLine != ""){
		      boost::algorithm::trim(currentLine);

			  istringstream iss( currentLine );
		      string term;
		      string goldStandardRealProbability;
		      string predicted1DProbability;
		      string predicted2DProbability;
		      string predictedGoodTuringProbability;

			  iss >> term;
			  iss >> goldStandardRealProbability;
			  iss >> predicted1DProbability;
			  iss >> predicted2DProbability;
			  iss >> predictedGoodTuringProbability;

			  // for debug ONLY
			  /*
			  cout << term << endl;
			  cout << "goldStandardRealProbability:" << goldStandardRealProbability << endl;
			  cout << "predicted1DProbability:" << predicted1DProbability << endl;
			  cout << "predicted2DProbability:" << predicted2DProbability << endl;
			  cout << "predictedGoodTuringProbability:" << predictedGoodTuringProbability << endl;
			  cout << endl;
			  */

			  queryTermsGoldStandardRealProbabilityDistributionMap_[term] = atof(goldStandardRealProbability.c_str());
			  queryTerms1DPredictedProbabilityDistributionMap_[term] = atof(predicted1DProbability.c_str());
			  queryTerms2DPredictedProbabilityDistributionMap_[term] = atof(predicted2DProbability.c_str());
			  queryTermsGoodTurningProbabilityDistributionMap_[term] = atof(predictedGoodTuringProbability.c_str());

			  // The following statements are corresponding to 4 different methods and only the good turning method make sense.
			  // The other value assignment is just a place holder
			  // shortcut by Wei on 2013/07/29 night
			  // gold standard probablity needed to be 0
			  queryTermsGoldStandardRealProbabilityDistributionMap_["UNK"] = 0.0;
			  // 1D thing, it is using the sum probablity of column 0 in our predicted probablity table
			  queryTerms1DPredictedProbabilityDistributionMap_["UNK_SUM_0"] = 0.000057172;
			  // 2D thing, it is divided based also on the length of the inverted index in the document collection
			  queryTerms2DPredictedProbabilityDistributionMap_["UNK_ROW_1_0"] = 1.15262492427e-05;
			  queryTerms2DPredictedProbabilityDistributionMap_["UNK_ROW_2_0"] = 0.000660982699686;
			  queryTerms2DPredictedProbabilityDistributionMap_["UNK_ROW_3_0"] = 0.004744877;
			  queryTerms2DPredictedProbabilityDistributionMap_["UNK_ROW_4_0"] = 0.013339567;
			  queryTerms2DPredictedProbabilityDistributionMap_["UNK_ROW_5_0"] = 0.032566185;
			  queryTermsGoodTurningProbabilityDistributionMap_["UNK"] = 0.0000000012845;
		}
	}
	inputfile.close();

	// for debug ONLY
	string testTerm = "available";
	cout << testTerm << " " << queryTermsGoodTurningProbabilityDistributionMap_[testTerm] << endl;

    if(queryTermsGoodTurningProbabilityDistributionMap_.size() == 0){
	    GetDefaultLogger().Log("Load Up Query Terms Probability Distribution is NOT done --- Take Care", false);
    }
    else{
    	//cout << "The length of the queryTerms is:" << queryTerms.size() << endl;
    	//Currently, nothing has been done for this logic.
	    GetDefaultLogger().Log(Stringify(queryTermsGoodTurningProbabilityDistributionMap_.size()) + " Query Terms Probabilities have been loaded.", false);
    }
}

void EdgeCollectionController::LoadUpPopularTermPairUniqueIDAndFreqIntoDict(){
	  int termPairFreqThreshold = Configuration::GetResultValue<long int>(Configuration::GetConfiguration().GetNumericalValue(config_properties::kTermPairEdgeFreqThreshold));

	  string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPopularTermPairNoDirectionProbabilityFileName));

      ifstream inputFileHandler(inputFileName.c_str());

	  string currentLine = "";

	  vector<string> elementsForCurrentLine;

	  // there is currently NO header file involved
	  // get rid of the header line
	  // getline (inputFileHandler,currentLine);

	  while ( inputFileHandler.good() )
	  {
		  getline (inputFileHandler,currentLine);
		  if(currentLine != ""){
		      trim(currentLine);
		      split(elementsForCurrentLine, currentLine, is_any_of(" ") );
		      // current layout of the data file
		      // index value 0: query term pair index number (This index number can also be the uniqueID)
		      // index value 1: query term pair
		      // index value 2: query term pair freq
		      popularTermPairWithUniqueID_[ elementsForCurrentLine[1] ] = atoi( elementsForCurrentLine[0].c_str() );
		      popularTermPairWithFreq_[ elementsForCurrentLine[1] ] = atoi( elementsForCurrentLine[2].c_str() );
		      if ( atoi( elementsForCurrentLine[2].c_str() ) < termPairFreqThreshold ){
		    	  // Just NOT store this term pair into the term Pairs Above The Threshold Vector
		      }
		      else{
		    	  termPairsAboveTheThresholdVector_.push_back(elementsForCurrentLine[1]);
		      }
		  }
	  }
	  inputFileHandler.close();

	  // for debug ONLY
	  // traversal of the whole map
	  // float totalProbability = 0.0;
	  // for(map<string,float>::iterator iter = popularTermPairWithNoDirectionProbability_.begin(); iter != popularTermPairWithNoDirectionProbability_.end(); iter++)
	  // {
	     // cout << iter->first << " " << iter->second << endl;
		 // totalProbability += iter->second;
	  // }
	  // cout << "totalProbability:" << totalProbability << endl;

	  if(popularTermPairWithUniqueID_.size() == 0 or popularTermPairWithFreq_.size() == 0 or termPairsAboveTheThresholdVector_.size() == 0){
		  GetDefaultLogger().Log("Load Popular Term Pair List with their uniqueID is NOT Done --- Take Care", false);
		  GetDefaultLogger().Log("Load Popular Term Pair List with their freq is NOT Done --- Take Care", false);
		  GetDefaultLogger().Log("Load Popular Term Pair Above The Threshold is NOT Done --- Take Care", false);
	  }
	  else{
		  GetDefaultLogger().Log(Stringify(popularTermPairWithUniqueID_.size()) + " popular term pairs with their uniqueID have been loaded.", false);
		  GetDefaultLogger().Log(Stringify(popularTermPairWithFreq_.size()) + " popular term pairs with their freq have been loaded.", false);
		  GetDefaultLogger().Log(Stringify(termPairsAboveTheThresholdVector_.size()) + " popular term pairs which above the threshold have been loaded.", false);
	  }
}



void EdgeCollectionController::LoadUpPopularTermPairListIntoDict(){

	  // load the term pair into the class variable called map<string,int> popularTermPairDict_

      string inputFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPopularTermPairListFileName));

      ifstream inputFileHandler(inputFileName.c_str());

	  string currentLine = "";

	  vector<string> elementsForCurrentLine;

	  while ( inputFileHandler.good() )
	  {
		  getline (inputFileHandler,currentLine);
		  if(currentLine != ""){
		      trim(currentLine);
		      split(elementsForCurrentLine, currentLine, is_any_of(" ") );
		      int NUM_OF_SECOND_PARTS = atoi( elementsForCurrentLine[1].c_str() );
		      // For the case which NUM_OF_SECOND_PARTS == 0:
		      // The popularTermWithSecondPartsAndFreqDict_ are defined as following:
		      // map<string,map<string,int>  > popularTermWithSecondPartsAndFreqDict_;
		      // In this case, with a special symbol "N/A" and with the freq 0 will be added
		      popularTermWithSecondPartsAndFreqDict_[ elementsForCurrentLine[0] ]["N/A"] = 0;

		      // For the case which NUM_OF_SECOND_PARTS != 0:
		      for(int i = 0; i < NUM_OF_SECOND_PARTS; i++){
		    	  // Updated by Wei 2013/03/03
		    	  // I set the value to be 1 means there are second parts.
		    	  // I set the value to be 0 means there are NOT valid second parts.
		    	  // TODO: Ideally, I should set this value to be the freq which the second parts appeared.
		    	  // The variable popularTermWithSecondPartsAndFreqDict_ will be dynamically changed (remove some elements)

		    	  // popularTermPairWithFreq_[ elementsForCurrentLine[1] ] = atoi( elementsForCurrentLine[2].c_str() );
		    	  string testedKey1 = elementsForCurrentLine[0] + "_" + elementsForCurrentLine[2+i];
		    	  string testedKey2 = elementsForCurrentLine[2+i] + "_" + elementsForCurrentLine[0];
		    	  // for debug ONLY
		    	  // cout << "testedKey1:" << testedKey1 << endl;
		    	  // cout << "testedKey2:" << testedKey2 << endl;
		    	  if(elementsForCurrentLine[0] == elementsForCurrentLine[2+i]){
		    		  // do nothing for this situation.
		    		  // popularTermWithSecondPartsAndFreqDict_ will NOT be updated.
		    	  }
		    	  else{
			    	  if ( popularTermPairWithFreq_.count(testedKey1) > 0 ){
			    		  popularTermWithSecondPartsAndFreqDict_[ elementsForCurrentLine[0] ][elementsForCurrentLine[2+i]] = popularTermPairWithFreq_[testedKey1];
			    	  }
			    	  else if ( popularTermPairWithFreq_.count(testedKey2) > 0 ){
			    		  popularTermWithSecondPartsAndFreqDict_[ elementsForCurrentLine[0] ][elementsForCurrentLine[2+i]] = popularTermPairWithFreq_[testedKey2];
			    	  }
			    	  else{
			    		  cout << "unexpected behavior" << endl;
			    		  cout << "testedKey1:" << testedKey1 << endl;
			    		  cout << "testedKey2:" << testedKey2 << endl;
			    	  }
		    	  }
		      }
		  }
	  }
	  inputFileHandler.close();

	  // for debug ONLY
	  // traversal of the whole map

	  // for(map<string,int>::iterator iter = popularTermPairDict_.begin(); iter != popularTermPairDict_.end(); iter++)
	  // {
	  //   cout << iter->first << " " << iter->second << endl;
	  // }


	  if(popularTermWithSecondPartsAndFreqDict_.size() == 0){
		  GetDefaultLogger().Log("Load Popular Term Pair List is NOT Done --- Take Care", false);
	  }
	  else{
		  GetDefaultLogger().Log(Stringify(popularTermWithSecondPartsAndFreqDict_.size()) + " terms with their second parts(Including 1 term query) have been loaded.", false);
	  }
}

void EdgeCollectionController::GenerateDocumentPostingGraphMethod3(uint32_t doc_id, int thresholdFreq, bool debugMode, bool switchForCurrDoc, bool outputForwardIndexFlag, bool outputEdgesFlag, bool outputCompletePostingSetFlag, bool outputXdocValueFlag, bool outputDistinctSetOfTermsFlag){
	// Updated by Wei on 2013/09/26 afternoon at school
	// This is the trick I played with myself(It takes me half an hour to figure it out).
	// Some shortcut switches:
	bool outputXDocONLYFlag = false;	// If the flag is set to True, then, the function logic will ignore all the logic for outputing ForwardIndex, Edges, CompletePostingSet and ONLY do logic for XdocValue. And this will be fast

	// Start timing Generate Document Posting Graph process.
	Timer generate_graph_time;

	cout << "--->Generating document posting dependency graph for doc_id:" << doc_id << " " << "using Graph Generation Method3" << endl;
	cout << "--->Overall Computing Statistics:" << endl;
	cout << "--->(1)terms_both_in_current_doc_id_and_query_trace_vector_.size():" << terms_both_in_current_doc_id_and_query_trace_vector_.size() << endl;
	cout << "--->(2)doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id].size():" << doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id].size() << endl;
	cout << "--->(3)all_terms_in_current_doc_id_vector_.size():" << all_terms_in_current_doc_id_vector_.size() << endl;
	cout << "--->(4)doc_id_with_all_terms_in_current_doc_dict_[doc_id].size():" << doc_id_with_all_terms_in_current_doc_dict_[doc_id].size() << endl;

	// if set this comparison value to be != -1, it means that all the documents will be evaluated.
	// if set this comparison value to be == a specific docID, it means that ONLY that specific docID will be evaluated
	if (doc_id != -1){
		  if (!outputXDocONLYFlag){
			  cout << "--->debugMode:" << debugMode << endl;
			  if(debugMode){
				  cout << "--->debugMode is set to True" << endl;
				  cout << "--->output all the terms BOTH in current doc id and query trace vector" << endl;

				  for(unsigned i = 0; i < terms_both_in_current_doc_id_and_query_trace_vector_.size(); i++){
					  cout << terms_both_in_current_doc_id_and_query_trace_vector_[i] << " ";
				  }
				  cout << endl;
			  }
			  cout << "--->Generation Done(real)" << endl;

			  // Put the real term pairs into numbers and integers
			  // Here, I assume all the term pair has been ordered(actually, when in practice, there is NO order involved) by lexicon graphic order
			  vector<int> currentDocEdgesSortedByID;
			  // cout << "step1:do the combinations for the vector current_doc_id_with_terms_vector_" << endl;

			  // Q: do I need this operation?
			  // A(current answer): YES, because the combination library require the sequence to be sorted in the first hand.
			  sort(terms_both_in_current_doc_id_and_query_trace_vector_.begin(),terms_both_in_current_doc_id_and_query_trace_vector_.end()); // sort the vector

			  const int R = 2;
			  int N = 0;
			  string term_pair = "";
			  do {
				  ++N;
				  term_pair = terms_both_in_current_doc_id_and_query_trace_vector_[0] + "_" + terms_both_in_current_doc_id_and_query_trace_vector_[1];
				  // directly from here, I can do comparison, no need to add the term pair into the vector
				  if ( popularTermPairWithUniqueID_.count(term_pair) > 0){
					  currentDocEdgesSortedByID.push_back( popularTermPairWithUniqueID_[ term_pair ] );
				  }
				  else{
					  // The combination of this two query terms does NOT exist in a certain query
					  // Doesn't need to handle anything.

					  // (current answer) In this logic, it doesn't mean that the following unexpected situation.
					  // It just means that the combination of this two query terms is NOT existed in a certain query
					  // cout << "unexpected situation:" << endl;
					  // cout << "	the edge: " << term_pair << endl;
					  // currentDocEdgesSortedByID.push_back( -1 );
				  }
			  } while (next_combination(terms_both_in_current_doc_id_and_query_trace_vector_.begin(), terms_both_in_current_doc_id_and_query_trace_vector_.begin() + R, terms_both_in_current_doc_id_and_query_trace_vector_.end()));


			  // Updated by Wei 2013/08/12
			  if(outputForwardIndexFlag){
				  // logic for building the outputing file: outputFileHandlerForDocumentForwardIndex_ begins...
				  if (! switchForCurrDoc){
					  outputFileHandlerForDocumentForwardIndex_ << getPrevDocTrecID() << " ";
				  }
				  else{
					  outputFileHandlerForDocumentForwardIndex_ << getCurrDocTrecID() << " ";
				  }
				  outputFileHandlerForDocumentForwardIndex_ << getCurrDocLen() << " ";
				  outputFileHandlerForDocumentForwardIndex_ << terms_both_in_current_doc_id_and_query_trace_vector_.size() << " ";
				  for(unsigned i = 0; i < terms_both_in_current_doc_id_and_query_trace_vector_.size(); i++){
					  int freq_in_doc = doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id][ terms_both_in_current_doc_id_and_query_trace_vector_[i] ];
					  float partialBM25Score = getPartialBM25Score(terms_both_in_current_doc_id_and_query_trace_vector_[i], doc_id, freq_in_doc, 2);
					  float postingUniversalProbabilityGivenTheQueryTermsTimesBigNumber = getPostingUniversalProbabilityGivenTheQueryTermsTimesBigNumber(terms_both_in_current_doc_id_and_query_trace_vector_[i], doc_id,freq_in_doc,partialBM25Score);
					  float postingUniversalProbability = getPostingUniversalProbabilityGivenThePriorProbability(terms_both_in_current_doc_id_and_query_trace_vector_[i], doc_id,postingUniversalProbabilityGivenTheQueryTermsTimesBigNumber);
					  outputFileHandlerForDocumentForwardIndex_ << "("
																<< terms_both_in_current_doc_id_and_query_trace_vector_[i] << ","
																<< partialBM25Score << ","
																<< postingUniversalProbabilityGivenTheQueryTermsTimesBigNumber << ","
																<< postingUniversalProbability
																<< ")"
																<< " ";
				  }
				  outputFileHandlerForDocumentForwardIndex_ << endl;
				  // logic for building the outputing file: outputFileHandlerForDocumentForwardIndex_ ends.
			  }

			  // Updated by Wei 2013/08/12
			  if (outputEdgesFlag){
				  // logic for building the outputing file: outputFileHandlerForDocumentEdges_ begins...
				  // sort the vector
				  sort(currentDocEdgesSortedByID.begin(), currentDocEdgesSortedByID.end());

				  // dump the info to hard drive logic
				  if (! switchForCurrDoc){
					  outputFileHandlerForDocumentEdges_ << getPrevDocTrecID() << " ";
				  }
				  else{
					  outputFileHandlerForDocumentEdges_ << getCurrDocTrecID() << " ";
				  }
				  outputFileHandlerForDocumentEdges_ << currentDocEdgesSortedByID.size() << " ";
				  for(unsigned i = 0; i < currentDocEdgesSortedByID.size(); i++){
						outputFileHandlerForDocumentEdges_ << currentDocEdgesSortedByID[i] << " ";
				  }
				  outputFileHandlerForDocumentEdges_ << endl;
				  // logic for building the outputing file: outputFileHandlerForDocumentEdges_ ends.
			  }


			  // Updated by Wei 2013/07/14
			  if(outputCompletePostingSetFlag){
				  // logic for building the outputing file: outputFileHandlerForDocumentCompletePostingSet_ begins...
				  if (! switchForCurrDoc){
					  outputFileHandlerForDocumentCompletePostingSet_ << getPrevDocTrecID() << " ";
				  }
				  else{
					  outputFileHandlerForDocumentCompletePostingSet_ << getCurrDocTrecID() << " ";
				  }
				  outputFileHandlerForDocumentCompletePostingSet_ << getCurrDocLen() << " ";
				  outputFileHandlerForDocumentCompletePostingSet_ << all_terms_in_current_doc_id_vector_.size() << " ";
				  for(unsigned i = 0; i < all_terms_in_current_doc_id_vector_.size(); i++){
					  int freq_in_doc = doc_id_with_all_terms_in_current_doc_dict_[doc_id][ all_terms_in_current_doc_id_vector_[i] ];
					  float partialBM25Score = getPartialBM25Score(all_terms_in_current_doc_id_vector_[i], doc_id, freq_in_doc, 2);

					  outputFileHandlerForDocumentCompletePostingSet_ << "("
																<< all_terms_in_current_doc_id_vector_[i] << ","
																<< partialBM25Score
																<< ")"
																<< " ";
				  }
				  outputFileHandlerForDocumentCompletePostingSet_ << endl;
				  // logic for building the outputing file: outputFileHandlerForDocumentCompletePostingSet_ ends.
			  }

		  }

		  // Updated by Wei 2013/08/12
		  if (outputXdocValueFlag){
			  // logic for building the outputing file: outputFileHandlerForDocumentXdocValue_ begins...
			  if (! switchForCurrDoc){
				  outputFileHandlerForDocumentXdocValue_ << getPrevDocTrecID() << " ";
			  }
			  else{
				  outputFileHandlerForDocumentXdocValue_ << getCurrDocTrecID() << " ";
			  }
			  // for DEBUG
			  outputFileHandlerForDocumentXdocValue_ << endl;
			  outputFileHandlerForDocumentXdocValue_ << doc_id_with_all_terms_in_current_doc_dict_[doc_id].size() << endl;
			  // outputFileHandlerForDocumentXdocValue_ << getCurrDocLen() << " ";
			  // outputFileHandlerForDocumentXdocValue_ << doc_id_with_all_terms_in_current_doc_dict_[doc_id].size() << " ";
			  // outputFileHandlerForDocumentXdocValue_ << endl;

			  /*
			  // toy debug
			  for(unsigned i = 0; i < doc_id_with_all_terms_in_current_doc_dict_[doc_id].size(); i++){
				  outputFileHandlerForDocumentXdocValue_ << "("
				  	  	  	  	  	  	  	  	  	  	  	<< "N/A" << ","
						  	  	  	  	  	  	  	  	    << "0.0"
						  	  	  	  	  	  	  	  	    << ")"
						  	  	  	  	  	  	  	  	    << " ";
			  }
			  outputFileHandlerForDocumentXdocValue_ << endl;
			  */

			  // real thing (completed on 2013/07/30 night)
		  	  float accumulatedGoldStandardRealProbabilityXdocValue = 0.0;
	          float accumulated1DPredictedProbabilityXdocValue = 0.0;
			  float accumulated2DpredictedProbabilityXdocValue = 0.0;
		  	  float accumulatedGoodTurningProbabilityXdocValue = 0.0;
		      map<string,int>::iterator iter;
			  for(iter = doc_id_with_all_terms_in_current_doc_dict_[doc_id].begin(); iter != doc_id_with_all_terms_in_current_doc_dict_[doc_id].end(); iter++){
				  float currentTermGoldStandardRealProbability = 0.0;
				  float currentTerm1DPredictedProbability = 0.0;
				  float currentTerm2DpredictedProbability = 0.0;
				  float currentTermGoodTurningProbability = 0.0;
				  string currentTerm = (*iter).first;
				  if (queryTermsGoodTurningProbabilityDistributionMap_.count( currentTerm ) > 0){
					  currentTermGoldStandardRealProbability = queryTermsGoldStandardRealProbabilityDistributionMap_[ currentTerm ];
					  currentTerm1DPredictedProbability = queryTerms1DPredictedProbabilityDistributionMap_[ currentTerm ];
					  currentTerm2DpredictedProbability = queryTerms2DPredictedProbabilityDistributionMap_[ currentTerm ];
					  currentTermGoodTurningProbability = queryTermsGoodTurningProbabilityDistributionMap_[ currentTerm ];
				  }
				  else{
					  // assign the probability naturally to 0 because it is just NOT seen
					  currentTermGoldStandardRealProbability = queryTermsGoldStandardRealProbabilityDistributionMap_[ "UNK" ];
					  // assign the probability to what our best guess for the 1D UNK_SUM_0
					  currentTerm1DPredictedProbability = queryTerms1DPredictedProbabilityDistributionMap_[ "UNK_SUM_0" ];

					  // assign the probability also based on the length of the inverted list of that specific term
					  if (termsWithLengthOfTheInvertedIndexMap_.count(currentTerm) > 0){
						  if (termsWithLengthOfTheInvertedIndexMap_[currentTerm] >= 1 and termsWithLengthOfTheInvertedIndexMap_[currentTerm] < 100){
							  currentTerm2DpredictedProbability = queryTerms2DPredictedProbabilityDistributionMap_[ "UNK_ROW_1_0" ];
						  }
						  else if (termsWithLengthOfTheInvertedIndexMap_[currentTerm] >= 100 and termsWithLengthOfTheInvertedIndexMap_[currentTerm] < 665){
							  currentTerm2DpredictedProbability = queryTerms2DPredictedProbabilityDistributionMap_[ "UNK_ROW_2_0" ];
						  }
						  else if (termsWithLengthOfTheInvertedIndexMap_[currentTerm] >= 665 and termsWithLengthOfTheInvertedIndexMap_[currentTerm] < 2473){
							  currentTerm2DpredictedProbability = queryTerms2DPredictedProbabilityDistributionMap_[ "UNK_ROW_3_0" ];
						  }
						  else if (termsWithLengthOfTheInvertedIndexMap_[currentTerm] >= 2473 and termsWithLengthOfTheInvertedIndexMap_[currentTerm] < 9964){
							  currentTerm2DpredictedProbability = queryTerms2DPredictedProbabilityDistributionMap_[ "UNK_ROW_4_0" ];
						  }
						  else if (termsWithLengthOfTheInvertedIndexMap_[currentTerm] >= 9964 and termsWithLengthOfTheInvertedIndexMap_[currentTerm] <= 25205180){
							  currentTerm2DpredictedProbability = queryTerms2DPredictedProbabilityDistributionMap_[ "UNK_ROW_5_0" ];
						  }
						  else{
							  cout << "Can NOT assign the term: '" << currentTerm << "' a probability,MARK3" << "termsWithLengthOfTheInvertedIndexMap_[currentTerm]:" << termsWithLengthOfTheInvertedIndexMap_[currentTerm] << endl;
						  }
					  }
					  else{
						  cout << "Can NOT find the entry for the term '" << currentTerm << "' in the termsWithLengthOfTheInvertedIndexMap_,MARK2" << endl;
						  currentTerm2DpredictedProbability = 0.0;
					  }


					  // assign the unseen good turing probability to the unseen words
					  currentTermGoodTurningProbability = queryTermsGoodTurningProbabilityDistributionMap_[ "UNK" ];
				  }

				  // for DEBUG
				  outputFileHandlerForDocumentXdocValue_ << "---> " << currentTerm << " " << currentTermGoldStandardRealProbability << " " << currentTerm1DPredictedProbability << " " << currentTerm2DpredictedProbability << " " << currentTermGoodTurningProbability << endl;

				  accumulatedGoldStandardRealProbabilityXdocValue += currentTermGoldStandardRealProbability;
				  accumulated1DPredictedProbabilityXdocValue += currentTerm1DPredictedProbability;
				  accumulated2DpredictedProbabilityXdocValue += currentTerm2DpredictedProbability;
				  accumulatedGoodTurningProbabilityXdocValue += currentTermGoodTurningProbability;

				  // for DEBUG
				  // outputFileHandlerForDocumentXdocValue_ << "---> " << accumulatedGoldStandardRealProbabilityXdocValue << " " << accumulated1DPredictedProbabilityXdocValue << " " << accumulated2DpredictedProbabilityXdocValue << " " << accumulatedGoodTurningProbabilityXdocValue << endl;

				  // for internal debug section
				  /*
				  outputFileHandlerForDocumentXdocValue_ << "("
				  	  	  	  	  	  	  	  	  	  	  	<< (*iter).first << ","
				  	  	  	  	  	  	  	                << currentTermGoldStandardRealProbability << ","
				  	  	  	  	  	                        << currentTerm1DPredictedProbability << ","
				  	  	  	  	  						    << currentTerm2DpredictedProbability << ","
				  	  	  	  	  	  	  	  	  	  	  	<< currentTermGoodTurningProbability
						  	  	  	  	  	  	  	  	    << ")"
						  	  	  	  	  	  	  	  	    << " ";
				  */
				  // for temp debug
				  /*
				  // debug1
				  outputFileHandlerForDocumentXdocValue_ << (*iter).first << " " << currentTermGoldStandardRealProbability << " " << currentTerm1DPredictedProbability << " " << currentTerm2DpredictedProbability << " " << currentTermGoodTurningProbability << endl;
				  // debug2
				  outputFileHandlerForDocumentXdocValue_ << "("
				  	  	  	  	  	  	  	                << accumulatedGoldStandardRealProbabilityXdocValue << ","
				  	  	  	  	  	                        << accumulated1DPredictedProbabilityXdocValue << ","
				  	  	  	  	  						    << accumulated2DpredictedProbabilityXdocValue << ","
				  	  	  	  	  	  	  	  	  	  	  	<< accumulatedGoodTurningProbabilityXdocValue
						  	  	  	  	  	  	  	  	    << ")";
				  outputFileHandlerForDocumentXdocValue_ << endl;
				  */
			  }

			  // old version
			  /*
			  outputFileHandlerForDocumentXdocValue_ << "("
			  	  	  	  	  	  	  	                << accumulatedGoldStandardRealProbabilityXdocValue << ","
			  	  	  	  	  	                        << accumulated1DPredictedProbabilityXdocValue << ","
			  	  	  	  	  						    << accumulated2DpredictedProbabilityXdocValue << ","
			  	  	  	  	  	  	  	  	  	  	  	<< accumulatedGoodTurningProbabilityXdocValue
					  	  	  	  	  	  	  	  	    << ")";
			  outputFileHandlerForDocumentXdocValue_ << endl;
			  */

			  // current version
			  outputFileHandlerForDocumentXdocValue_ << accumulatedGoldStandardRealProbabilityXdocValue << " "
					  	  	  	  	  	  	  	  	 << accumulated1DPredictedProbabilityXdocValue << " "
					  	  	  	  	  	  	  	  	 << accumulated2DpredictedProbabilityXdocValue << " "
					  	  	  	  	  	  	  	  	 << accumulatedGoodTurningProbabilityXdocValue << endl;
			  // logic for building the outputing file: outputFileHandlerForDocumentXdocValue_ ends.
		  }


		  // Updated by Wei 2013/09/26 night at school
		  if(outputDistinctSetOfTermsFlag){
			  // logic for building the outputing file: outputFileHandlerForDocumentDistinctSetOfTerms_ begins...
			  if (! switchForCurrDoc){
				  outputFileHandlerForDocumentDistinctSetOfTerms_ << getPrevDocTrecID() << " ";
			  }
			  else{
				  outputFileHandlerForDocumentDistinctSetOfTerms_ << getCurrDocTrecID() << " ";
			  }
			  outputFileHandlerForDocumentDistinctSetOfTerms_ << doc_id << " ";
			  outputFileHandlerForDocumentDistinctSetOfTerms_ << getCurrDocLen() << " ";
			  outputFileHandlerForDocumentDistinctSetOfTerms_ << doc_id_with_all_terms_in_current_doc_dict_[doc_id].size() << " ";

			  // output the terms
		      map<string,int>::iterator iter;
			  for(iter = doc_id_with_all_terms_in_current_doc_dict_[doc_id].begin(); iter != doc_id_with_all_terms_in_current_doc_dict_[doc_id].end(); iter++){
				  string currentTerm = (*iter).first;
				  outputFileHandlerForDocumentDistinctSetOfTerms_ << currentTerm << " ";
			  }
			  outputFileHandlerForDocumentDistinctSetOfTerms_ << endl;
			  // logic for building the outputing file: outputFileHandlerForDocumentDistinctSetOfTerms_ ends.
		  }

		  // CLEANING PROCESS
		  // (1)clear the vector terms_both_in_current_doc_id_and_query_trace_vector_
		  terms_both_in_current_doc_id_and_query_trace_vector_.clear();

		  // (2)clear the vector currentDocEdgesSortedByID
		  // I don't know whether there will be a problem here. Updated by Wei 2013/07/30
		  // currentDocEdgesSortedByID.clear();

		  // (3)clear the vector terms_both_in_current_doc_id_and_query_trace_vector_
		  all_terms_in_current_doc_id_vector_.clear();

		  // (4)clear the used variable: doc_id_with_all_terms_in_current_doc_dict_[doc_id]
		  doc_id_with_all_terms_in_current_doc_dict_[doc_id].clear();
	}
	else{
		cout << "--->Generation Done(fake)" << endl;
		// There is currently NO LOGC for this
	}

	cout << "--->Erasion Done(real)" << endl;
	// current version
	ClearDocIDRelatedTermDicts(doc_id);
	ClearTermsBothInCurrentDocIDAndQueryTraceVector();

	// old version
	// key: doc_id
	// value: map<string,int>, this is the dict storing the whole doc unique words
	// doc_id_with_terms_dict_.erase(doc_id); // erasing the whole doc by key
	// terms_both_in_current_doc_id_and_query_trace_vector_.clear(); // clear the terms_both_in_current_doc_id_and_query_trace_vector_

	num_documents_processed_ += 1;
	current_graph_generation_time_ = generate_graph_time.GetElapsedTime();
	overall_total_graph_generation_time_ += current_graph_generation_time_;
	average_graph_generation_time_ = overall_total_graph_generation_time_ / num_documents_processed_;

	// option1: use the GetDefaultLogger().Log(...) to log sth.
	// example line: GetDefaultLogger().Log( + Stringify(), false);

	cout << "--->Number Of Documents Processed: " << num_documents_processed_ << endl;
	cout << "--->Current Graph Generation Time Elapsed: " << current_graph_generation_time_ << " seconds"<< endl;
	cout << "--->Overall Total Graph Generation Time Elapsed: " << overall_total_graph_generation_time_ << " seconds"<< endl;
	cout << "--->Average Graph Generation Time Elapsed: " << average_graph_generation_time_ << " seconds"<< endl;

	cout << endl;
}

void EdgeCollectionController::GenerateDocumentPostingGraphMethod1(uint32_t doc_id, int thresholdFreq, bool debugMode, bool switchForCurrDoc, bool outputForwardIndexFlag, bool outputEdgesFlag, bool outputCompletePostingSetFlag, bool outputXdocValueFlag, bool outputDistinctSetOfTermsFlag){
	// Updated by Wei on 2013/09/26 afternoon at school
	// This is the trick I played with myself(It takes me half an hour to figure it out)
	// Some shortcut switches:
	bool outputXDocONLYFlag = false; // If the flag is set to True, then, the function logic will ignore all the logic for outputing ForwardIndex, Edges, CompletePostingSet and ONLY do logic for XdocValue. And this will be fast

	// Start timing Generate Document Posting Graph process.
	Timer generate_graph_time;

	cout << "--->Generating document posting dependency graph for doc_id:" << doc_id << " " << "using Graph Generation Method1" << endl;
	cout << "--->Overall Computing Statistics:" << endl;
	cout << "--->(1)terms_both_in_current_doc_id_and_query_trace_vector_.size():" << terms_both_in_current_doc_id_and_query_trace_vector_.size() << endl;
	cout << "--->(2)doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id].size():" << doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id].size() << endl;
	cout << "--->(3)all_terms_in_current_doc_id_vector_.size():" << all_terms_in_current_doc_id_vector_.size() << endl;
	cout << "--->(4)doc_id_with_all_terms_in_current_doc_dict_[doc_id].size():" << doc_id_with_all_terms_in_current_doc_dict_[doc_id].size() << endl;

	/*
	// for debug ONLY begins...
	map<string,int>::iterator iter;
	int tempCounterForDebug = 0;
	for(iter = doc_id_with_all_terms_in_current_doc_dict_[doc_id].begin(); iter != doc_id_with_all_terms_in_current_doc_dict_[doc_id].end(); iter++){
		tempCounterForDebug += (*iter).second;
	}
	cout << "--->(5)tempCounterForDebug:" << tempCounterForDebug << endl;
	// for debug ONLY ends.
	*/

	// for debug ONLY
	// output the "you_your" freq
	// cout << popularTermWithSecondPartsAndFreqDict_[ "you" ][ "your" ] << endl;

	// if set this comparison value to be -1, it means that all the documents will be evaluated.
	// if set this comparison value to be a specific docID, it means that I just want to debug that specific docID
	if (doc_id != -1){
		if (!outputXDocONLYFlag){

			cout << "--->Generation Done(real)" << endl;

			// for debug ONLY
			/*
			for(map<string,int>::iterator iterForDocWords = doc_id_with_terms_dict_[doc_id].begin(); iterForDocWords != doc_id_with_terms_dict_[doc_id].end(); iterForDocWords++){
				cout << iterForDocWords->first << " ";
			}
			cout << endl;
			*/

			// deal with 2 data structures here. popularTermPairDict_ and doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_
			// map<uint32_t,map<string,int> > doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_;
			// map<string,map<string,int>  > popularTermWithSecondPartsAndFreqDict_;

			vector<int> currentDocEdgesSortedByID;
			// step1: let's have 2 iters, one for docWords, and one for the second part query term
			for(map<string,int>::iterator iterForDocWords = doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id].begin(); iterForDocWords != doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id].end(); iterForDocWords++){
				if( popularTermWithSecondPartsAndFreqDict_.count( iterForDocWords->first ) > 0 ){
					// this docWord is ALSO a query term
					for(map<string,int>::iterator iterForSecondPartQueryTerm = popularTermWithSecondPartsAndFreqDict_[iterForDocWords->first].begin(); iterForSecondPartQueryTerm != popularTermWithSecondPartsAndFreqDict_[iterForDocWords->first].end(); iterForSecondPartQueryTerm++){
						if(iterForSecondPartQueryTerm->second < thresholdFreq){
							// this edge will NOT be evaluated cause it is lower than the thresholdFreq, and do NOT need to consider.
							// we remove the entry of the second part query term of its associated second part dict
							// node1: iterForDocWords->first
							// node2: iterForSecondPartQueryTerm->first
							// TODO: need to check the logic again because it may have some problem
							popularTermWithSecondPartsAndFreqDict_[iterForSecondPartQueryTerm->first].erase(iterForDocWords->first);
						}
						else{
							if (doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id].count(iterForSecondPartQueryTerm->first) > 0 and iterForDocWords->first != iterForSecondPartQueryTerm->first){
								// for debug ONLY
								// cout << "	node1: " << iterForDocWords->first << endl;
								// cout << "	node2: " << iterForSecondPartQueryTerm->first << endl;

								string testedKey1 = iterForDocWords->first + "_" + iterForSecondPartQueryTerm->first;
								string testedKey2 = iterForSecondPartQueryTerm->first + "_" + iterForDocWords->first;

								// example for assigning popularTermWithSecondPartsAndFreqDict_
								// popularTermWithSecondPartsAndFreqDict_[ elementsForCurrentLine[0] ][elementsForCurrentLine[2+i]] = popularTermPairWithFreq_[testedKey1];

								if( popularTermPairWithUniqueID_.count(testedKey1) > 0){
									// for debug ONLY
									// cout << "	edge id(in dict): " << popularTermPairWithUniqueID_[key] << endl;
									currentDocEdgesSortedByID.push_back( popularTermPairWithUniqueID_[testedKey1] );
								}
								else if (popularTermPairWithUniqueID_.count(testedKey2) > 0){
									currentDocEdgesSortedByID.push_back( popularTermPairWithUniqueID_[testedKey2] );
								}
								else{
									// for debug ONLY
									cout << "unexpected situation:" << endl;
									cout << "	node1: " << iterForDocWords->first << endl;
									cout << "	node2: " << iterForSecondPartQueryTerm->first << endl;
									cout << "	edge id(not in dict): " << "-1" << endl;
									currentDocEdgesSortedByID.push_back( -1 );
								}
								// for debug ONLY
								// cout << endl;

								// The specific second part query term word will have its own second part connection.
								// In order NOT to double count the same edge, especially for the case which this edge do NOT have direction.
								// we remove the entry of the second part query term of its associated second part dict
								// node1: iterForDocWords->first
								// node2: iterForSecondPartQueryTerm->first
								popularTermWithSecondPartsAndFreqDict_[iterForSecondPartQueryTerm->first].erase(iterForDocWords->first);
							}
						}
					}
				}
				else{
					// this docWord is NOT a query term
				}
			}

			  // Updated by Wei 2013/08/12 morning
			  if(outputForwardIndexFlag){
				  // logic for building the outputing file: outputFileHandlerForDocumentForwardIndex_ begins...

				  // terms_both_in_current_doc_id_and_query_trace_vector_ has already been sorted
				  // dump the info to hard drive logic
				  if (! switchForCurrDoc){
					  outputFileHandlerForDocumentForwardIndex_ << getPrevDocTrecID() << " ";
				  }
				  else{
					  outputFileHandlerForDocumentForwardIndex_ << getCurrDocTrecID() << " ";
				  }
				  outputFileHandlerForDocumentForwardIndex_ << getCurrDocLen() << " ";
				  outputFileHandlerForDocumentForwardIndex_ << terms_both_in_current_doc_id_and_query_trace_vector_.size() << " ";
				  for(unsigned i = 0; i < terms_both_in_current_doc_id_and_query_trace_vector_.size(); i++){
					  int freq_in_doc = doc_id_with_terms_which_both_appear_in_current_doc_AND_QL_dict_[doc_id][ terms_both_in_current_doc_id_and_query_trace_vector_[i] ];
					  float partialBM25Score = getPartialBM25Score(terms_both_in_current_doc_id_and_query_trace_vector_[i], doc_id, freq_in_doc, 2);
					  float postingUniversalProbabilityGivenTheQueryTermsTimesBigNumber = getPostingUniversalProbabilityGivenTheQueryTermsTimesBigNumber(terms_both_in_current_doc_id_and_query_trace_vector_[i], doc_id,freq_in_doc,partialBM25Score);
					  float postingUniversalProbability = getPostingUniversalProbabilityGivenThePriorProbability(terms_both_in_current_doc_id_and_query_trace_vector_[i], doc_id,postingUniversalProbabilityGivenTheQueryTermsTimesBigNumber);
					  outputFileHandlerForDocumentForwardIndex_ << "("
					  	  	  	  	  	  	  	  	  	  	  	<< terms_both_in_current_doc_id_and_query_trace_vector_[i] << ","
							  	  	  	  	  	  	  	  	    << partialBM25Score << ","
							  	  	  	  	  	  	  	  	    << postingUniversalProbabilityGivenTheQueryTermsTimesBigNumber << ","
							  	  	  	  	  	  	  	  	    << postingUniversalProbability
							  	  	  	  	  	  	  	  	    << ")"
							  	  	  	  	  	  	  	  	    << " ";
				  }
				  outputFileHandlerForDocumentForwardIndex_ << endl;
				  // logic for building the outputing file: outputFileHandlerForDocumentForwardIndex_ ends.
			  }

			  // Updated by Wei 2013/08/12 morning
			  if (outputEdgesFlag){
					// logic for building the outputing file: outputFileHandlerForDocumentEdges_ begins...
					// sort the vector
					sort(currentDocEdgesSortedByID.begin(), currentDocEdgesSortedByID.end());
					// dump the info to hard drive logic
				    if (! switchForCurrDoc){
					  outputFileHandlerForDocumentEdges_ << getPrevDocTrecID() << " ";
				    }
				    else{
					  outputFileHandlerForDocumentEdges_ << getCurrDocTrecID() << " ";
				    }
					outputFileHandlerForDocumentEdges_ << currentDocEdgesSortedByID.size() << " ";
					for(unsigned i = 0; i < currentDocEdgesSortedByID.size(); i++){
						outputFileHandlerForDocumentEdges_ << currentDocEdgesSortedByID[i] << " ";
					}
					outputFileHandlerForDocumentEdges_ << endl;
					// logic for building the outputing file: outputFileHandlerForDocumentEdges_ ends.
			  }

			  // Updated by Wei 2013/08/12
			  if(outputCompletePostingSetFlag){
				  // logic for building the outputing file: outputFileHandlerForDocumentCompletePostingSet_ begins...
				  if (! switchForCurrDoc){
					  outputFileHandlerForDocumentCompletePostingSet_ << getPrevDocTrecID() << " ";
				  }
				  else{
					  outputFileHandlerForDocumentCompletePostingSet_ << getCurrDocTrecID() << " ";
				  }
				  outputFileHandlerForDocumentCompletePostingSet_ << getCurrDocLen() << " ";
				  outputFileHandlerForDocumentCompletePostingSet_ << all_terms_in_current_doc_id_vector_.size() << " ";
				  for(unsigned i = 0; i < all_terms_in_current_doc_id_vector_.size(); i++){
					  int freq_in_doc = doc_id_with_all_terms_in_current_doc_dict_[doc_id][ all_terms_in_current_doc_id_vector_[i] ];
					  float partialBM25Score = getPartialBM25Score(all_terms_in_current_doc_id_vector_[i], doc_id, freq_in_doc, 2);

					  outputFileHandlerForDocumentCompletePostingSet_ << "("
					  	  	  	  	  	  	  	  	  	  	  	<< all_terms_in_current_doc_id_vector_[i] << ","
							  	  	  	  	  	  	  	  	    << partialBM25Score
							  	  	  	  	  	  	  	  	    << ")"
							  	  	  	  	  	  	  	  	    << " ";
				  }
				  outputFileHandlerForDocumentCompletePostingSet_ << endl;
				  // logic for building the outputing file: outputFileHandlerForDocumentCompletePostingSet_ ends.
			  }
		}

		// Updated by Wei 2013/08/12
		if (outputXdocValueFlag){
			  // logic for building the outputing file: outputFileHandlerForDocumentXdocValue_ begins...
			  if (! switchForCurrDoc){
				  outputFileHandlerForDocumentXdocValue_ << getPrevDocTrecID() << " ";
			  }
			  else{
				  outputFileHandlerForDocumentXdocValue_ << getCurrDocTrecID() << " ";
			  }
			  // for DEBUG
			  outputFileHandlerForDocumentXdocValue_ << endl;
			  outputFileHandlerForDocumentXdocValue_ << doc_id_with_all_terms_in_current_doc_dict_[doc_id].size() << endl;
			  // outputFileHandlerForDocumentXdocValue_ << getCurrDocLen() << " ";
			  // outputFileHandlerForDocumentXdocValue_ << doc_id_with_all_terms_in_current_doc_dict_[doc_id].size() << " ";

			  /*
			  // toy debug
			  for(unsigned i = 0; i < doc_id_with_all_terms_in_current_doc_dict_[doc_id].size(); i++){
				  outputFileHandlerForDocumentXdocValue_ << "("
						  	  	  	  	  	  	  	  	    << "N/A" << ","
						  	  	  	  	  	  	  	  	 // << doc_id_with_all_terms_in_current_doc_dict_[doc_id][i] << ","
						  	  	  	  	  	  	  	  	    << "0.0"
						  	  	  	  	  	  	  	  	    << ")"
						  	  	  	  	  	  	  	  	    << " ";
			  }
			  outputFileHandlerForDocumentXdocValue_ << endl;
			  */

			  // real thing (completed on 2013/07/30 night)
		  	  float accumulatedGoldStandardRealProbabilityXdocValue = 0.0;
	          float accumulated1DPredictedProbabilityXdocValue = 0.0;
			  float accumulated2DpredictedProbabilityXdocValue = 0.0;
		  	  float accumulatedGoodTurningProbabilityXdocValue = 0.0;
		      map<string,int>::iterator iter;
			  for(iter = doc_id_with_all_terms_in_current_doc_dict_[doc_id].begin(); iter != doc_id_with_all_terms_in_current_doc_dict_[doc_id].end(); iter++){
				  float currentTermGoldStandardRealProbability = 0.0;
				  float currentTerm1DPredictedProbability = 0.0;
				  float currentTerm2DpredictedProbability = 0.0;
				  float currentTermGoodTurningProbability = 0.0;
				  string currentTerm = (*iter).first;
				  if (queryTermsGoodTurningProbabilityDistributionMap_.count( currentTerm ) > 0){
					  currentTermGoldStandardRealProbability = queryTermsGoldStandardRealProbabilityDistributionMap_[ currentTerm ];
					  currentTerm1DPredictedProbability = queryTerms1DPredictedProbabilityDistributionMap_[ currentTerm ];
					  currentTerm2DpredictedProbability = queryTerms2DPredictedProbabilityDistributionMap_[ currentTerm ];
					  currentTermGoodTurningProbability = queryTermsGoodTurningProbabilityDistributionMap_[ currentTerm ];
				  }
				  else{
					  // assign the probability naturally to 0 because it is just NOT seen
					  currentTermGoldStandardRealProbability = queryTermsGoldStandardRealProbabilityDistributionMap_[ "UNK" ];
					  // assign the probability to what our best guess for the 1D UNK_SUM_0
					  currentTerm1DPredictedProbability = queryTerms1DPredictedProbabilityDistributionMap_[ "UNK_SUM_0" ];

					  // assign the probability also based on the length of the inverted list of that specific term
					  if (termsWithLengthOfTheInvertedIndexMap_.count(currentTerm) > 0){
						  if (termsWithLengthOfTheInvertedIndexMap_[currentTerm] >= 1 and termsWithLengthOfTheInvertedIndexMap_[currentTerm] < 100){
							  currentTerm2DpredictedProbability = queryTerms2DPredictedProbabilityDistributionMap_[ "UNK_ROW_1_0" ];
						  }
						  else if (termsWithLengthOfTheInvertedIndexMap_[currentTerm] >= 100 and termsWithLengthOfTheInvertedIndexMap_[currentTerm] < 665){
							  currentTerm2DpredictedProbability = queryTerms2DPredictedProbabilityDistributionMap_[ "UNK_ROW_2_0" ];
						  }
						  else if (termsWithLengthOfTheInvertedIndexMap_[currentTerm] >= 665 and termsWithLengthOfTheInvertedIndexMap_[currentTerm] < 2473){
							  currentTerm2DpredictedProbability = queryTerms2DPredictedProbabilityDistributionMap_[ "UNK_ROW_3_0" ];
						  }
						  else if (termsWithLengthOfTheInvertedIndexMap_[currentTerm] >= 2473 and termsWithLengthOfTheInvertedIndexMap_[currentTerm] < 9964){
							  currentTerm2DpredictedProbability = queryTerms2DPredictedProbabilityDistributionMap_[ "UNK_ROW_4_0" ];
						  }
						  else if (termsWithLengthOfTheInvertedIndexMap_[currentTerm] >= 9964 and termsWithLengthOfTheInvertedIndexMap_[currentTerm] <= 25205180){
							  currentTerm2DpredictedProbability = queryTerms2DPredictedProbabilityDistributionMap_[ "UNK_ROW_5_0" ];
						  }
						  else{
							  cout << "Can NOT assign the term: '" << currentTerm << "' a probability,MARK3" << "termsWithLengthOfTheInvertedIndexMap_[currentTerm]:" << termsWithLengthOfTheInvertedIndexMap_[currentTerm] << endl;
						  }
					  }
					  else{
						  cout << "Can NOT find the entry for the term '" << currentTerm << "' in the termsWithLengthOfTheInvertedIndexMap_,MARK2" << endl;
						  currentTerm2DpredictedProbability = 0.0;
					  }


					  // assign the unseen good turing probability to the unseen words
					  currentTermGoodTurningProbability = queryTermsGoodTurningProbabilityDistributionMap_[ "UNK" ];
				  }

				  // for DEBUG
				  outputFileHandlerForDocumentXdocValue_ << "---> " << currentTerm << " " << currentTermGoldStandardRealProbability << " " << currentTerm1DPredictedProbability << " " << currentTerm2DpredictedProbability << " " << currentTermGoodTurningProbability << endl;

				  accumulatedGoldStandardRealProbabilityXdocValue += currentTermGoldStandardRealProbability;
				  accumulated1DPredictedProbabilityXdocValue += currentTerm1DPredictedProbability;
				  accumulated2DpredictedProbabilityXdocValue += currentTerm2DpredictedProbability;
				  accumulatedGoodTurningProbabilityXdocValue += currentTermGoodTurningProbability;
			  }

			  // current version
			  outputFileHandlerForDocumentXdocValue_ << accumulatedGoldStandardRealProbabilityXdocValue << " "
					  	  	  	  	  	  	  	  	 << accumulated1DPredictedProbabilityXdocValue << " "
					  	  	  	  	  	  	  	  	 << accumulated2DpredictedProbabilityXdocValue << " "
					  	  	  	  	  	  	  	  	 << accumulatedGoodTurningProbabilityXdocValue << endl;
			  // logic for building the outputing file: outputFileHandlerForDocumentXdocValue_ ends.
		}

	    // Updated by Wei 2013/09/26 night at school
	    if(outputDistinctSetOfTermsFlag){
	    	// logic for building the outputing file: outputFileHandlerForDocumentDistinctSetOfTerms_ begins...
	    	if (! switchForCurrDoc){
	    		outputFileHandlerForDocumentDistinctSetOfTerms_ << getPrevDocTrecID() << " ";
	    	}
	    	else{
	    		outputFileHandlerForDocumentDistinctSetOfTerms_ << getCurrDocTrecID() << " ";
	    	}
	    	outputFileHandlerForDocumentDistinctSetOfTerms_ << doc_id << " ";
	    	outputFileHandlerForDocumentDistinctSetOfTerms_ << getCurrDocLen() << " ";
	    	outputFileHandlerForDocumentDistinctSetOfTerms_ << doc_id_with_all_terms_in_current_doc_dict_[doc_id].size() << " ";

	    	// output the terms
	    	map<string,int>::iterator iter;
	    	for(iter = doc_id_with_all_terms_in_current_doc_dict_[doc_id].begin(); iter != doc_id_with_all_terms_in_current_doc_dict_[doc_id].end(); iter++){
	    		string currentTerm = (*iter).first;
	    		outputFileHandlerForDocumentDistinctSetOfTerms_ << currentTerm << " ";
	    	}
	    	outputFileHandlerForDocumentDistinctSetOfTerms_ << endl;
	    	// logic for building the outputing file: outputFileHandlerForDocumentDistinctSetOfTerms_ ends.
	    }

		// CLEANING PROCESS
		// (1)clear the vector terms_both_in_current_doc_id_and_query_trace_vector_
		terms_both_in_current_doc_id_and_query_trace_vector_.clear();

		// (2)clear the vector currentDocEdgesSortedByID
        // I don't know whether there will be a problem here. Updated by Wei 2013/07/30
		// currentDocEdgesSortedByID.clear();

		// (3)clear the vector terms_both_in_current_doc_id_and_query_trace_vector_
		all_terms_in_current_doc_id_vector_.clear();

		// (4)clear the used variable: doc_id_with_all_terms_in_current_doc_dict_[doc_id]
		doc_id_with_all_terms_in_current_doc_dict_[doc_id].clear();
	}
	else{
		cout << "--->Generation Done(fake)" << endl;
		// There is currently NO LOGC for this
	}

	cout << "--->Erasion Done(real)" << endl;

	// new version
	ClearDocIDRelatedTermDicts(doc_id);
	ClearTermsBothInCurrentDocIDAndQueryTraceVector();

	// old version
	// doc_id_with_terms_dict_.erase(doc_id); // erasing the whole doc by key
	// terms_both_in_current_doc_id_and_query_trace_vector_.clear();

	num_documents_processed_ += 1;
	current_graph_generation_time_ = generate_graph_time.GetElapsedTime();
	overall_total_graph_generation_time_ += current_graph_generation_time_;
	average_graph_generation_time_ = overall_total_graph_generation_time_ / num_documents_processed_;

	// option1: use the GetDefaultLogger().Log(...)
	/*
	GetDefaultLogger().Log("Number Of Documents Processed: " + Stringify(num_documents_processed_), false);
	GetDefaultLogger().Log("Current Graph Generation Time Elapsed: " + Stringify(current_graph_generation_time_) + " seconds", false);
	GetDefaultLogger().Log("Overall Total Graph Generation Time Elapsed: " + Stringify(overall_total_graph_generation_time_) + " seconds", false);
	GetDefaultLogger().Log("Average Graph Generation Time Elapsed: " + Stringify(average_graph_generation_time_) + " seconds", false);
	*/

	// option2: use the cout output function
	cout << "--->Number Of Documents Processed: " << num_documents_processed_ << endl;
	cout << "--->Current Graph Generation Time Elapsed: " << current_graph_generation_time_ << " seconds"<< endl;
	cout << "--->Overall Total Graph Generation Time Elapsed: " << overall_total_graph_generation_time_ << " seconds"<< endl;
	cout << "--->Average Graph Generation Time Elapsed: " << average_graph_generation_time_ << " seconds"<< endl;

	cout << endl;
}


/**************************************************************************************************************************************************************
 * PostingCollectionController
 *
 **************************************************************************************************************************************************************/
PostingCollectionController::PostingCollectionController() :
  index_count_(0),
  posting_collection_(new PostingCollection(index_count_, 0)),
  document_map_writer_("index.dmap_basic", "index.dmap_extended"),
  posting_count_(0) {
}

PostingCollectionController::~PostingCollectionController() {
  delete posting_collection_;
}

void PostingCollectionController::Finish()
{
  //I think it is going be put all the things into the hard disk.
  posting_collection_->DumpRun(false);
  posting_count_ += posting_collection_->posting_count();
  delete posting_collection_;
  posting_collection_ = NULL;
}

void PostingCollectionController::InsertPosting(const Posting& posting) {
  //If the original_posting_collection is not able to insert posting, a new_posting_collection should be generated.
  if (!posting_collection_->InsertPosting(posting))
  {
    posting_collection_->DumpRun(true);
    posting_count_ += posting_collection_->posting_count();

    GetMemoryPoolManager().Reset();
    PostingCollection* new_posting_collection = new PostingCollection(++index_count_, posting.doc_id());

    PostingCollection::OverflowPostings overflow_postings = posting_collection_->GetOverflowPostings();
    if (overflow_postings.postings() != NULL && overflow_postings.num_postings() > 0) {
      for (int i = 0; i < overflow_postings.num_postings(); ++i) {
        bool status = new_posting_collection->InsertPosting(overflow_postings.postings()[i]);
        if (!status)
          assert(false);
      }
    }

    // Add the leftover posting to the new index last, since it was inserted after the overflow postings.
    new_posting_collection->InsertPosting(posting);

    delete posting_collection_;
    posting_collection_ = new_posting_collection;
  }
}

void PostingCollectionController::SaveDocLength(int doc_length, uint32_t doc_id) {
  document_map_writer_.AddDocLen(doc_length, doc_id);
}

void PostingCollectionController::SaveDocUrl(const char* url, int url_len, uint32_t doc_id) {
  document_map_writer_.AddDocUrl(url, url_len, doc_id);
}

void PostingCollectionController::SaveDocno(const char* docno, int docno_len, uint32_t doc_id) {
  document_map_writer_.AddDocNum(docno, docno_len, doc_id);
}

/**************************************************************************************************************************************************************
 * PostingCollection
 *
 **************************************************************************************************************************************************************/
PostingCollection::PostingCollection(int index_count, uint32_t starting_doc_id) :
  kHashTableSize(atol(Configuration::GetConfiguration().GetValue(config_properties::kHashTableSize).c_str())),
  term_block_table_(kHashTableSize),
  overflow_postings_(NULL),
  first_doc_id_in_index_(starting_doc_id),
  last_doc_id_in_index_(0),
  num_overflow_postings_(-1),
  prev_doc_id_(numeric_limits<uint32_t>::max()),
  prev_doc_length_(0),
  total_document_lengths_(0),
  total_num_docs_(0),
  total_unique_num_docs_(0),
  index_count_(index_count),
  posting_count_(0),
  kIndexPositions((Configuration::GetConfiguration().GetValue(config_properties::kIncludePositions) == "true") ? true : false),
  kIndexContexts((Configuration::GetConfiguration().GetValue(config_properties::kIncludeContexts) == "true") ? true : false),
  doc_id_compressor_(CodingPolicy::kDocId),
  frequency_compressor_(CodingPolicy::kFrequency),
  position_compressor_(CodingPolicy::kPosition),
  block_header_compressor_(CodingPolicy::kBlockHeader) {
  if (kHashTableSize <= 0) {
    GetErrorLogger().Log("Incorrect configuration value for '" + string(config_properties::kHashTableSize) + "'", true);
  }

  coding_policy_helper::LoadPolicyAndCheck(doc_id_compressor_, Configuration::GetConfiguration().GetValue(config_properties::kIndexingDocIdCoding), "docID");
  coding_policy_helper::LoadPolicyAndCheck(frequency_compressor_, Configuration::GetConfiguration().GetValue(config_properties::kIndexingFrequencyCoding), "frequency");
  coding_policy_helper::LoadPolicyAndCheck(position_compressor_, Configuration::GetConfiguration().GetValue(config_properties::kIndexingPositionCoding), "position");
  coding_policy_helper::LoadPolicyAndCheck(block_header_compressor_, Configuration::GetConfiguration().GetValue(config_properties::kIndexingBlockHeaderCoding), "block header");
}

PostingCollection::~PostingCollection() {
  delete[] overflow_postings_;
}

bool PostingCollection::InsertPosting(const Posting& posting) {
  if (posting.doc_id() != last_doc_id_in_index_) {
    num_overflow_postings_ = -1;
    last_doc_id_in_index_ = posting.doc_id();
  }

  ++num_overflow_postings_;

  TermBlock* curr_tb = term_block_table_.Insert(posting.term(), posting.term_len());
  // Set these properties only on a newly created term block.
  if (curr_tb->block_list() == NULL) {
    curr_tb->set_index_positions(kIndexPositions);
    curr_tb->set_index_contexts(kIndexContexts);
  }

  bool status = curr_tb->AddPosting(posting);
  if (status) {
    // Find the length of each document based on the position of the last posting from a particular docID.
    // Also tracks the unique number of documents we have processed.
    if (posting.doc_id() != prev_doc_id_) {
      total_document_lengths_ += prev_doc_length_;
      prev_doc_id_ = posting.doc_id();
      ++total_unique_num_docs_;
    } else {
      prev_doc_length_ = posting.position();
    }
    ++posting_count_;
  } else {
    // This is the leftover posting, so we won't have any same docIDs as the leftover posting in this index.
    // This assumes that the postings inserted into the index all have monotonically increasing docIDs, which should always be the case.
    // All postings with the leftover posting docID will be saved and inserted into the index for the next run.
    last_doc_id_in_index_ = posting.doc_id() - 1;
    if (posting.doc_id() == prev_doc_id_) {
      --total_unique_num_docs_;
    }
  }

  return status;
}

// If 'out_of_memory_dump' is true, then we need to look for overflow postings, otherwise we don't.
// The leftover posting (there is only one), is the posting that we tried inserting, but didn't have enough room in the
// memory pool to insert, so it will have to go into the next index we write out.  The overflow postings are those postings
// that have the same docID as the leftover posting.  These postings will also have to be inserted into the next index we
// write out. The reason for this is to make sure that docIDs don't overlap between indices; this helps to make merging them easier.
void PostingCollection::DumpRun(bool out_of_memory_dump) {
  GetDefaultLogger().Log("Dumping Run # " + Stringify(index_count_), false);

  // Set to the range of docIDs in this index.
  total_num_docs_ = last_doc_id_in_index_ - first_doc_id_in_index_ + 1;

  if (!out_of_memory_dump) {
    // The total document lengths must be adjusted since we never see a new docID.
    total_document_lengths_ += prev_doc_length_;
  }

  int num_term_blocks = term_block_table_.num_elements();
  TermBlock** term_blocks = new TermBlock*[num_term_blocks];

  int i = 0;
  for (MoveToFrontHashTable<TermBlock>::Iterator it = term_block_table_.begin(); it != term_block_table_.end(); ++it) {
    TermBlock* curr_term_block = *it;

    // We don't include the TermBlock with the leftover posting if it has an empty block list (dereferencing it will cause a SEG fault).
    // Note that only the TermBlock that contains the leftover posting could possibly have a NULL block list
    // (since the TermBlock was initialized, but there was no room in the memory pool to insert the posting, that is, if this is the only posting for this term).
    if (curr_term_block->block_list() == NULL) {
      --num_term_blocks;
    } else {
      term_blocks[i++] = curr_term_block;
    }
  }

  sort(&term_blocks[0], &term_blocks[num_term_blocks], TermBlockCompare());

  IndexFiles curr_index_files = IndexFiles(0, index_count_);

  //From posting collection stage to the index builder stage.
  IndexBuilder* index_builder = new IndexBuilder(curr_index_files.lexicon_filename().c_str(), curr_index_files.index_filename().c_str(),
                                                 block_header_compressor_);

  // Since the following input arrays will be used as input to the various coding policies, and the coding policy might apply a blockwise coding compressor
  // (which would pad the array to the block size), the following rules apply:
  // For the docID and frequency arrays, the block size is expected to be the chunk size.
  // For the position and context arrays, the block size is expected to be a multiple of the maximum positions/contexts possible for a particular docID.
  // Some alternative designs would be to define a fixed maximum block size and make sure the arrays are properly sized for this maximum
  // (the position/context arrays in particular).
  // Another alternative is to make these arrays dynamically allocated.
  assert(doc_id_compressor_.block_size() == 0 || ChunkEncoder::kChunkSize == doc_id_compressor_.block_size());
  assert(frequency_compressor_.block_size() == 0 || ChunkEncoder::kChunkSize == frequency_compressor_.block_size());
  assert(position_compressor_.block_size() == 0 || (ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties) % position_compressor_.block_size() == 0);

  uint32_t doc_ids[ChunkEncoder::kChunkSize];
  uint32_t frequencies[ChunkEncoder::kChunkSize];
  uint32_t positions[ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties];
  unsigned char contexts[ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties];

  overflow_postings_ = NULL;
  int overflow_postings_offset = 0;

  if (!out_of_memory_dump)
    num_overflow_postings_ = 0;

  if (num_overflow_postings_ > 0)
    overflow_postings_ = new Posting[num_overflow_postings_];

  int num_overflow_postings_remaining = num_overflow_postings_;

  // Go through each term in lexicographical order.
  for (i = 0; i < num_term_blocks; ++i) {
    TermBlock* curr_term_block = term_blocks[i];

    // Save the block list since we're gonna be modifying it.
    // We need to restore it before deleting the term block to avoid a memory leak.
    BlockList* start_of_list = curr_term_block->block_list();

    // Set our position in the term block to the beginning.
    curr_term_block->ResetCurrBlockPosition();

    DecodedPosting prev_posting;
    bool prev_posting_valid = false;

    uint32_t prev_chunk_last_doc_id = 0;
    int num_docs;
    int num_properties;
    int num_overflow_postings;

    do {
      // Collect all chunks of the current term.
      num_docs = ChunkEncoder::kChunkSize;
      num_properties = ChunkEncoder::kChunkSize * ChunkEncoder::kMaxProperties;
      num_overflow_postings = num_overflow_postings_remaining;

      bool have_chunk = curr_term_block->DecodePostings(doc_ids, frequencies, positions, contexts, &num_docs, &num_properties, &prev_posting,
                                                        &prev_posting_valid, overflow_postings_ + overflow_postings_offset, &num_overflow_postings,
                                                        last_doc_id_in_index_ + 1, prev_chunk_last_doc_id);

      if (num_overflow_postings > 0) {
        num_properties -= min(frequencies[num_docs - 1], static_cast<uint32_t> (ChunkEncoder::kMaxProperties));
        num_docs -= 1;

        for (int j = overflow_postings_offset; j < (overflow_postings_offset + num_overflow_postings); ++j) {
          overflow_postings_[j].set_term(curr_term_block->term());
          overflow_postings_[j].set_term_len(curr_term_block->term_len());
        }

        num_overflow_postings_remaining -= num_overflow_postings;
        overflow_postings_offset += num_overflow_postings;
      }

      if (have_chunk && num_docs > 0) {
        assert(num_properties > 0);
        ChunkEncoder chunk(doc_ids, frequencies, (kIndexPositions ? positions : NULL), (kIndexContexts ? contexts : NULL), num_docs, num_properties,
                           prev_chunk_last_doc_id, doc_id_compressor_, frequency_compressor_, position_compressor_);
        prev_chunk_last_doc_id = chunk.last_doc_id();
        index_builder->Add(chunk, curr_term_block->term(), curr_term_block->term_len());
      }
    } while (num_docs == ChunkEncoder::kChunkSize);

    // Restore block list.
    curr_term_block->set_block_list(start_of_list);
  }

  index_builder->Finalize();

  WriteMetaFile(index_builder, curr_index_files.meta_info_filename());

  delete[] term_blocks;
  delete index_builder;
}

void PostingCollection::WriteMetaFile(const IndexBuilder* index_builder, const string& meta_filename) {
  KeyValueStore index_metafile;
  ostringstream metafile_values;

  metafile_values << kIndexPositions;
  index_metafile.AddKeyValuePair(meta_properties::kIncludesPositions, metafile_values.str());
  metafile_values.str("");

  metafile_values << kIndexContexts;
  index_metafile.AddKeyValuePair(meta_properties::kIncludesContexts, metafile_values.str());
  metafile_values.str("");

  metafile_values << Configuration::GetConfiguration().GetValue(config_properties::kIndexingDocIdCoding);
  index_metafile.AddKeyValuePair(meta_properties::kIndexDocIdCoding, metafile_values.str());
  metafile_values.str("");

  metafile_values << Configuration::GetConfiguration().GetValue(config_properties::kIndexingFrequencyCoding);
  index_metafile.AddKeyValuePair(meta_properties::kIndexFrequencyCoding, metafile_values.str());
  metafile_values.str("");

  metafile_values << Configuration::GetConfiguration().GetValue(config_properties::kIndexingPositionCoding);
  index_metafile.AddKeyValuePair(meta_properties::kIndexPositionCoding, metafile_values.str());
  metafile_values.str("");

  metafile_values << Configuration::GetConfiguration().GetValue(config_properties::kIndexingBlockHeaderCoding);
  index_metafile.AddKeyValuePair(meta_properties::kIndexBlockHeaderCoding, metafile_values.str());
  metafile_values.str("");

  index_metafile.AddKeyValuePair(meta_properties::kTotalNumChunks, Stringify(index_builder->total_num_chunks()));
  index_metafile.AddKeyValuePair(meta_properties::kTotalNumPerTermBlocks, Stringify(index_builder->total_num_per_term_blocks()));

  metafile_values << total_document_lengths_;
  index_metafile.AddKeyValuePair(meta_properties::kTotalDocumentLengths, metafile_values.str());
  metafile_values.str("");

  metafile_values << total_num_docs_;
  index_metafile.AddKeyValuePair(meta_properties::kTotalNumDocs, metafile_values.str());
  metafile_values.str("");

  metafile_values << total_unique_num_docs_;
  index_metafile.AddKeyValuePair(meta_properties::kTotalUniqueNumDocs, metafile_values.str());
  metafile_values.str("");

  metafile_values << first_doc_id_in_index_;
  index_metafile.AddKeyValuePair(meta_properties::kFirstDocId, metafile_values.str());
  metafile_values.str("");

  metafile_values << last_doc_id_in_index_;
  index_metafile.AddKeyValuePair(meta_properties::kLastDocId, metafile_values.str());
  metafile_values.str("");

  metafile_values << posting_count_;
  index_metafile.AddKeyValuePair(meta_properties::kDocumentPostingCount, metafile_values.str());
  metafile_values.str("");

  metafile_values << index_builder->posting_count();
  index_metafile.AddKeyValuePair(meta_properties::kIndexPostingCount, metafile_values.str());
  metafile_values.str("");

  metafile_values << index_builder->num_unique_terms();
  index_metafile.AddKeyValuePair(meta_properties::kNumUniqueTerms, metafile_values.str());
  metafile_values.str("");

  metafile_values << index_builder->total_num_block_header_bytes();
  index_metafile.AddKeyValuePair(meta_properties::kTotalHeaderBytes, metafile_values.str());
  metafile_values.str("");

  metafile_values << index_builder->total_num_doc_ids_bytes();
  index_metafile.AddKeyValuePair(meta_properties::kTotalDocIdBytes, metafile_values.str());
  metafile_values.str("");

  metafile_values << index_builder->total_num_frequency_bytes();
  index_metafile.AddKeyValuePair(meta_properties::kTotalFrequencyBytes, metafile_values.str());
  metafile_values.str("");

  metafile_values << index_builder->total_num_positions_bytes();
  index_metafile.AddKeyValuePair(meta_properties::kTotalPositionBytes, metafile_values.str());
  metafile_values.str("");

  metafile_values << index_builder->total_num_wasted_space_bytes();
  index_metafile.AddKeyValuePair(meta_properties::kTotalWastedBytes, metafile_values.str());
  metafile_values.str("");

  index_metafile.WriteKeyValueStore(meta_filename.c_str());
}

// TODO: If the load factor in the hash table is too high, we can dump the run to disk.
// A high load factor means we might have to look through a long chain of TermBlocks to find the right one, adversely affecting performance.
// Although, this is mitigated by the move-to-front extension to the hash table, so the load factor can get fairly high while still achieving good performance.
bool PostingCollection::ReachedThreshold() const {
  const int kLoadFactor = 20;  // Picked arbitrarily; could experiment with reasonable value if used.
  double load_factor = static_cast<double> (term_block_table_.num_elements()) / static_cast<double> (kHashTableSize);
  if (load_factor > kLoadFactor)
    return false;
  return true;
}

/*
// Updated by Wei 2013/03/13. This function is currently NO USE at all.
// I use the Method1 and Method3 instead.
void EdgeCollectionController::GenerateDocumentPostingGraphMethod2(uint32_t doc_id, int thresholdFreq){
	// Start timing Generate Document Posting Graph process.
	Timer generate_graph_time;

	cout << "--->Generating document posting dependency graph for doc_id:" << doc_id << " " << "using Graph Generation Method2" << endl;
	cout << "--->terms_both_in_current_doc_id_and_query_trace_vector_.size():" << terms_both_in_current_doc_id_and_query_trace_vector_.size() << endl;
	cout << "--->doc_id_with_terms_dict_[doc_id].size():" << doc_id_with_terms_dict_[doc_id].size() << endl;

	// for debug ONLY
	// output the "you_your" freq
	// cout << popularTermWithSecondPartsAndFreqDict_[ "you" ][ "your" ] << endl;

	// if set this comparison value to be != -1, it means that all the documents will be evaluated.
	// if set this comparison value to be == a specific docID, it means that I just want to debug that specific docID
	if (doc_id != -1){
		  cout << "--->Generation Done(real)" << endl;
		  // cout << "step1:do the combinations for the vector current_doc_id_with_terms_vector_" << endl;

		  sort(terms_both_in_current_doc_id_and_query_trace_vector_.begin(),terms_both_in_current_doc_id_and_query_trace_vector_.end()); // sort the vector

		  const int r = 2;
		  int N = 0;
		  do {
		      ++N;
		      string term_pair = terms_both_in_current_doc_id_and_query_trace_vector_[0] + "_" + terms_both_in_current_doc_id_and_query_trace_vector_[1];
			  current_doc_id_with_term_pair_vector_.push_back(term_pair);
		  } while (next_combination(terms_both_in_current_doc_id_and_query_trace_vector_.begin(), terms_both_in_current_doc_id_and_query_trace_vector_.begin() + r, terms_both_in_current_doc_id_and_query_trace_vector_.end()));

		  // for debug ONLY
		  // cout << "Found " << N << " combinations of size " << r << " without repetitions"
		  //		<< " from a set of " << current_doc_id_with_terms_vector_.size() << " elements." << endl;


		  // for debug ONLY
		  // cout << "mark1" << endl;

		  sort(current_doc_id_with_term_pair_vector_.begin(), current_doc_id_with_term_pair_vector_.end());
		  terms_both_in_current_doc_id_and_query_trace_vector_.clear();

		  // for debug ONLY
		  // cout << "mark2" << endl;

		  // let's do the intersection process

		  // TODO: example of how to use the max(3.14,2.73)
		  vector<string> intersectionResults( max( current_doc_id_with_term_pair_vector_.size(), termPairsAboveTheThresholdVector_.size() ) );
		  vector<string>::iterator it;

		  // let's do the intersection process

		  // needed to be debugged
		  // current_doc_id_with_term_pair_vector_, current_doc_id_with_term_pair_vector_ + current_doc_id_with_term_pair_vector_.size()
		  // it = set_intersection(current_doc_id_with_term_pair_vector_, current_doc_id_with_term_pair_vector_ + current_doc_id_with_term_pair_vector_.size(), iter->second, iter->second + iter->second.size(), intersectionResults.begin());


		  // example of how to use the set_intersection
		  // vector<int> first;
		  // first.push_back(5);
		  // first.push_back(10);
		  // first.push_back(15);
		  // first.push_back(20);
		  // first.push_back(25);

		  // vector<int> second;
		  // second.push_back(10);
		  // second.push_back(20);
		  // second.push_back(30);
		  // second.push_back(40);
		  // second.push_back(50);

		  // vector<int> intersectionResultsForInt;
		  // std::set_intersection (first.begin(), first.end(), second.begin(), second.end(), intersectionResultsForInt.begin());


		  // for debug ONLY
		  // cout << "current_doc_id_with_term_pair_vector_.size():" << current_doc_id_with_term_pair_vector_.size() << endl;
		  // cout << "termPairsAboveTheThresholdVector_.size():" << termPairsAboveTheThresholdVector_.size() << endl;



		  // it = set_intersection(current_doc_id_with_term_pair_vector_.begin(), current_doc_id_with_term_pair_vector_.end(), iter->second.begin(), iter->second.end(), intersectionResults.begin());
		  it = set_intersection(current_doc_id_with_term_pair_vector_.begin(), current_doc_id_with_term_pair_vector_.end(), termPairsAboveTheThresholdVector_.begin(), termPairsAboveTheThresholdVector_.end(), intersectionResults.begin());
		  // remember to empty the repetitive use of the data structure: current_doc_id_with_term_pair_vector_
		  current_doc_id_with_term_pair_vector_.clear();

		  intersectionResults.resize( it - intersectionResults.begin());



		  // for debug ONLY
		  // cout << "mark3" << endl;
		  // cout << "intersectionResults.size():" << intersectionResults.size() << endl;


		  // Put the real term pairs into numbers and integers
		  // Here, I assume all the term pair has been ordered(actually, when in practice, there is NO order involved) by lexicon graphic order
		  vector<int> currentDocEdgesSortedByID;

		  // do the actual transfer
		  for(int tempCounter = 0; tempCounter < intersectionResults.size(); tempCounter++){
			  if( popularTermPairWithUniqueID_.count( intersectionResults[tempCounter] ) > 0){
					// for debug ONLY
					// cout << "	edge id(in dict): " << popularTermPairWithUniqueID_[key] << endl;
					currentDocEdgesSortedByID.push_back( popularTermPairWithUniqueID_[ intersectionResults[tempCounter] ] );
			  }
			  else{
					// for debug ONLY
					cout << "unexpected situation:" << endl;
					cout << "	the edge: " << intersectionResults[tempCounter] << endl;
					// I have to put an -1 into the vector
					currentDocEdgesSortedByID.push_back( -1 );
			  }
		  }


		  // sort the vector
		  sort(currentDocEdgesSortedByID.begin(), currentDocEdgesSortedByID.end());

		  // dump the info to hard drive logic
		  outputFileHandlerForDocumentEdges_ << doc_id << " ";
		  outputFileHandlerForDocumentEdges_ << currentDocEdgesSortedByID.size() << " ";
		  for(unsigned i = 0; i < currentDocEdgesSortedByID.size(); i++){
				outputFileHandlerForDocumentEdges_ << currentDocEdgesSortedByID[i] << " ";
		  }
		  outputFileHandlerForDocumentEdges_ << endl;


	}
	else{
		cout << "--->Generation Done(fake)" << endl;
		// There is currently NO LOGC for this
	}

	cout << "--->Erasion Done(real)" << endl;
	doc_id_with_terms_dict_.erase(doc_id); // erasing the whole doc by key
	terms_both_in_current_doc_id_and_query_trace_vector_.clear();

	num_documents_processed_ += 1;
	current_graph_generation_time_ = generate_graph_time.GetElapsedTime();
	overall_total_graph_generation_time_ += current_graph_generation_time_;
	average_graph_generation_time_ = overall_total_graph_generation_time_ / num_documents_processed_;

	GetDefaultLogger().Log("Number Of Documents Processed: " + Stringify(num_documents_processed_), false);
	GetDefaultLogger().Log("Current Graph Generation Time Elapsed: " + Stringify(current_graph_generation_time_) + " seconds", false);
	GetDefaultLogger().Log("Overall Total Graph Generation Time Elapsed: " + Stringify(overall_total_graph_generation_time_) + " seconds", false);
	GetDefaultLogger().Log("Average Graph Generation Time Elapsed: " + Stringify(average_graph_generation_time_) + " seconds", false);
	cout << endl;
}
*/
