//==============================================================================================================================================================
// Author(s): Roman Khmelichek, Wei Jiang
//
//==============================================================================================================================================================

#ifndef INDEX_LAYERIFY_H_
#define INDEX_LAYERIFY_H_

// Enables debugging output for this module.
// #define INDEX_LAYERIFY_DEBUG

#include <cassert>
#include <cmath>
#include <fstream>
#include <iostream>
#include <map>

// for the machine pangolin
// #include <python2.6/Python.h>

// for the machine dodo
#include <python2.7/Python.h>

#include <sstream>
#include <stdint.h>
#include <string>
#include <vector>

#include "coding_policy.h"
#include "document_map.h"
#include "index_layout_parameters.h"
#include "index_util.h"
#include "sofia-ml-methods.h"


using namespace std;


typedef struct MetaInfo_SET {
	// Seems that all the variables init here are necessary
	uint32_t current_doc_id;
	double current_largest_value_of_the_posting_array;
	int current_size_of_the_posting_array_filled;
	int current_size_of_the_posting_array_allocated;
	int current_beginning_index_of_the_posting_array;
	double selected_Xdoc;
	double all_Xdoc;
} MetaInfo_SET;

// Updated by Wei 2013/09/14 afternoon at school
// Still in test and I am NOT 100% sure that is the right way to implement stuff
// The tuple will have 2 values.
// The 1st value is float which is the largest probability among those postings inside the document
// The 2th value is a pointer to the actual array of postings
// This data structure also follows Prof's practice
typedef std::pair<MetaInfo_SET, IndexDocEntry*> docPostingsTuple;

// Updated by Wei 2013/09/20 morning at school
// The tuple will have 2 values.
// The 1st value is a struct called MetaInfo_SET
// The 2th value is a pointer to the actual array of postings for that document
typedef std::pair<MetaInfo_SET, IndexDocOptimizedEntry*> docPostingsOptimizedTuple;

/**************************************************************************************************************************************************************
 * DocPostingOptimizedCompare
 * Updated by Wei 2013/09/21 afternoon at school
 * If the room here is NOT big enough for the logic, then move the logic to the .cc file:)
 **************************************************************************************************************************************************************/
class DocPostingOptimizedCompareClass{
public:
	DocPostingOptimizedCompareClass(const map<uint32_t, float>& termID_with_their_first_factor_probability_map, float a, float b, float currentSelectXDOC, float currentALLXDOC);

	void setCurrentSelectXDOC(float currentSelectXDOC){
		currentSelectXDOC_ = currentSelectXDOC;
	}
	void setCurrentALLXDOC(float currentALLXDOC){
		currentALLXDOC_ = currentALLXDOC;
	}
	double getStaticPartScore(const IndexDocOptimizedEntry& entry);
	double getDynamicPartScore(const IndexDocOptimizedEntry& entry);
	double score(const IndexDocOptimizedEntry& entry);
	bool operator()(const IndexDocOptimizedEntry& lhs, const IndexDocOptimizedEntry& rhs);

private:
  const map<uint32_t, float>& termID_with_their_first_factor_probability_map_;
  float a_;	// The weight controlling the static probability part (NOT USED Since 2013/10/24 afternoon by Wei at school)
  float b_;	// The weight controlling the dynamic probability part (USED ONLY Since 2013/10/24 afternoon by Wei at school)
  float currentSelectXDOC_;
  float currentALLXDOC_;
};

/**************************************************************************************************************************************************************
 * DocPostingOptimizedCompareForTest
 * Updated by Wei 2013/09/22 night at school
 **************************************************************************************************************************************************************/
struct DocPostingOptimizedCompareForTest{
  bool operator()(const IndexDocOptimizedEntry& l, const IndexDocOptimizedEntry& r) const {
	  // The operators:
	  // > descending order
	  // < ascending order
	  return true;
  }
};

/**************************************************************************************************************************************************************
 * DocPostingCompare
 * Updated by Wei 2013/09/14 night at school
 **************************************************************************************************************************************************************/
struct DocPostingCompare{
  bool operator()(const IndexDocEntry& l, const IndexDocEntry& r) const {
	  /*
	  // Notes:
	  (1) Sort elements in range
	  (2) Sorts the elements in the range [first,last) into ascending(increasing) order.
	  (3) The elements are compared using operator< for the first version, and comp for the second.
	  (4) Equivalent elements are not guaranteed to keep their original relative order (see stable_sort).
	  */
	  // The operator should be >(descending order), but NOT <
	  // option1:
	  return l.valueToComputeANDStoreCombined > r.valueToComputeANDStoreCombined;
	  // option2:
	  // return l.partialBM25 > r.partialBM25;
  }
};

/**************************************************************************************************************************************************************
 * HeapTupleOptimizedCompare
 * Updated by Wei 2013/09/21 night at school
 **************************************************************************************************************************************************************/
struct HeapTupleOptimizedCompare {
  bool operator()(const docPostingsOptimizedTuple& l, const docPostingsOptimizedTuple& r) const {
	  // option1:
	  return l.first.current_largest_value_of_the_posting_array < r.first.current_largest_value_of_the_posting_array;
  }
};

/**************************************************************************************************************************************************************
 * HeapTupleCompare
 * Updated by Wei 2013/09/14 night at school
 **************************************************************************************************************************************************************/
struct HeapTupleCompare {
  bool operator()(const docPostingsTuple& l, const docPostingsTuple& r) const {
	  // option1:
	  return l.first.current_largest_value_of_the_posting_array < r.first.current_largest_value_of_the_posting_array;
	  // option2:
	  // return l.second[0].partialBM25 < r.second[0].partialBM25;
  }
};

/**************************************************************************************************************************************************************
 * LayeredIndexGenerator
 *
 **************************************************************************************************************************************************************/
class DocIdScoreComparison;
class DocIdScoreComparisonWei;
class ExternalIndexBuilder;
class IndexBuilder;

class LayeredIndexGenerator {
public:
  LayeredIndexGenerator(const IndexFiles& input_index_files, const std::string& output_index_prefix);
  ~LayeredIndexGenerator();

  void CreateLayeredIndex();

  // Updated by Wei 2013/03/21
  // build the forward index.
  // input: original inverted index
  // output: forward index while maintaining the same info from the original inverted index
  void BuildForwardIndex(vector<string> & queryTerms,bool debugFlag,bool store_computed_score_into_external_index_flag);

  // Updated by Wei on 2013/09/22 Sun at school
  bool scoreInTest(const IndexDocOptimizedEntry& l,const IndexDocOptimizedEntry& r);

  // Updated by Wei on 2013/09/20 afternoon at school
  void ComputeHowMuchMemoryWillBeUsed();

  // Updated by Wei on 2013/09/20 morning at school
  void PrototypingOfThePostingOrientedUniformPruningMethodOptimizedVersion(bool debugFlag);

  // Updated by Wei on 2013/09/12 night at school
  void PrototypingOfThePostingOrientedUniformPruningMethod(bool debugFlag);
  void StorePostingRankInListToExternalIndex(map<string,int> & queryTermsDictForDebugging, bool debugFlag);

  // The following function has two purposes.
  // (1) It is a good prototype for the previous literature. Basically, it iterates through every query term
  // (2) Also, I can use this function to store the probability into the external index
  void CreatePrunedIndexForMultipleTerms(vector<string> & queryTerms, bool debugFlag,bool store_computed_score_into_external_index_flag,float percentageToKeepOfTheWholeIndex,int pruningMethodCodeOfTheWholeIndex,map<string,float> &queryTermsProbabilityDistributionMap);
  void CreatePrunedIndexForMultipleTermsBasedOnUniversalScoreImportanceOLDAndNotUsed(vector<string> & queryTerms, bool debugFlag,bool store_computed_score_into_external_index_flag,float percentageToKeepOfTheWholeIndex,int pruningMethodCodeOfTheWholeIndex);
  void CreateExternalScoreFileForEachQueryTerm(vector<string> & queryTerms, bool debugFlag, int pruningMethodCodeForTheTerm);
  void CutBasedOnUniversalImportanceScore(vector<string> & queryTerms, bool debugFlag, bool store_computed_score_into_external_index_flag, float percentageToKeepOfTheWholeIndex, int pruningMethodCodeOfTheWholeIndex);

  // Updated by Wei on 2013/09/24 night at school
  void OutputASetOfDocumentsNeededToBeParsedGivenASetOfPostingsAsInput();

  // Updated by Wei on 2013/08/08 night at school
  void ConvertingOLD32BitLexiconToNEW64BitLexicon();

  // Updated by Wei on 2013/08/07 night at school
  void OutputingEachDocumentWithTheirNumOfPostingsInIndex();

  // Updated by Wei on 2013/08/06 night at school
  void ProduceProbabilitiesForRandomlySelectedPostings();

  // Updated by Wei on 2013/08/28 night at school
  // Updated by Wei on 2013/12/05 night at school with the score command code == 7
  void ProduceProbabilitiesForRandomlySelectedPostingsBaseline();

  // Updated by Wei on 2014/01/03 afternoon at school
  // test the sort
  void SortTest();

  // Updated by Wei on 2014/04/26 afternoon at school
  void OutputRankInListForEachPosting();


  // Updated by Wei on 2013/08/05 night at school
  void CreateHistogram(vector<string> & queryTerms, bool debugFlag, int sortingMethodCodeForTheTerm);
  void CreateCutThresholdOfEachTermBasedOnPercentageForMultipleTerms(map<string,int> & queryTerms, bool debugFlag);


  string make_the_value_into_string_format_with_fixed_mode(float originalValue, int precisionNumber, bool debugFlag);
  string make_the_value_into_string_format(float originalValue);
  string make_the_value_into_string_format(int originalValue);

  // Updated by Wei 2013/01/27 night
  // This function output a aux file with 2 columns
  // 1st column: curr_term
  // 2ed column: num_docs_in_unpruned_list (# of postings for this term)
  void CreatePrunedIndexAuxInfo();

  // Updated by Wei 2013/08/07 night
  void LoadUpSelectedTerms();

  // Updated by Wei 2013/09/15 afternoon
  void LoadUpTermIDANDTermPairs();

  // Updated by Wei 2013/09/14 afternoon
  void LoadUpDocIDANDNumOfPostingPairs();

  // Updated by Wei 2013/09/13 afternoon
  void LoadUpProbabilityTableBasedOnListLengthANDRelativeRank();

  // Updated by Wei 2013/08/28 night
  void LoadUpTheCombinationOfSecondANDThirdFactorProbabilityTable1D();

  // Updated by Wei 2013/12/05 night
  void LoadUpTheCombinationOfSecondANDThirdFactorProbabilityTable2D();

  // Updated by Wei 2014/01/05 afternoon
  void LoadUpTermPieceInfoForRelRank();

  // Updated by Wei 2013/08/06 night
  void LoadUpRandomlySelectedPostings();

  // Updated by Wei on 2013/09/22 afternoon at school
  void buildTermIDWithTheirFirstFactorProbabilityMap();

  // Reused by Wei on 2013/09/19 afternoon at school
  void LoadUpFinalTOP10DocumentResultRelatedPostings();

  // Reused by Wei on 2013/09/17 night at school
  // Updated by Wei on 2013/08/05 night at school
  void LoadUpAuxFilesForFirstProbabilityFactor();
  void LoadUpAuxFilesForSecondProbabilityFactor();
  void OutputTrecIDAndDocIDAndDocSizeInWordsToScreen();

  void LoadUpThreeFeatureValuesForMachineLearnedTraining();

  // Updated by Wei 2013/12/07 afternoon at school
  // Purpose: in order to take the terms' list length into consideration
  void LoadTermsWithTheirLengthOfList();

private:
  // Updated by Wei 2013/01/27.
  // TODO: This function should be reverted back to the original one
  void DumpToIndex(const DocIdScoreComparison& doc_id_score_comparator, IndexEntry* index_entries, int num_index_entries, const char* curr_term,
                   int curr_term_len,bool store_computed_score_into_external_index_flag);
  /*
  void DumpToIndexForPruningProject(const DocIdScoreComparison& doc_id_score_comparator, IndexEntry* index_entries, int num_index_entries,
                                          const char* curr_term, int curr_term_len, bool store_computed_score_into_external_index_flag, int specialNumberValue);
  */

  void DumpToIndexForPruningProjectWeiWithoutTheArgumentDoc_id_score_comparator(IndexEntryWeiForPruning* index_entries, int num_index_entries,
                                          const char* curr_term, int curr_term_len, bool store_computed_score_into_external_index_flag, int specialNumberValue);

  void DumpToIndexForPruningProjectWei(DocIdScoreComparisonWei& doc_id_score_comparator, IndexEntry* index_entries, int num_index_entries,
                                          const char* curr_term, int curr_term_len, bool store_computed_score_into_external_index_flag, int specialNumberValue);
  float GetChunkMaxScore(const DocIdScoreComparison& doc_id_score_comparator, IndexEntry* chunk_entries, int num_chunk_entries);
  float GetChunkMaxScoreWei(DocIdScoreComparisonWei& doc_id_score_comparator, IndexEntry* chunk_entries, int num_chunk_entries);
  float GetChunkMaxScoreWeiWithoutTheArgumentDoc_id_score_comparator(IndexEntryWeiForPruning* chunk_entries, int num_chunk_entries);

  void WriteMetaFile(const std::string& meta_filename, int functionValue, float percentageToKeepOfTheWholeIndex, int pruningMethodCodeOfTheWholeIndex);
  void WriteMetaFile(const std::string& meta_filename, int functionValue);

  IndexFiles output_index_files_;                 // The index filenames for the layered index.
  Index* index_;                                  // The index we're creating layers for.
  ExternalIndexBuilder* external_index_builder_;  // Responsible for building the external index, necessary for maximum block and chunk score information and potentially more info.
  IndexBuilder* index_builder_;                   // The current layered index we're building.

  // Some index properties.
  bool includes_contexts_;
  bool includes_positions_;

  // Layering settings for the new index to be generated.
  bool overlapping_layers_;
  int num_layers_;
  std::string layering_strategy_;



  // Compressors to be used for various parts of the index.
  CodingPolicy doc_id_compressor_;
  CodingPolicy frequency_compressor_;
  CodingPolicy position_compressor_;
  CodingPolicy block_header_compressor_;

  // The following properties are derived from the original index meta info file.
  uint32_t total_num_docs_;          // The total number of documents.
  uint32_t total_unique_num_docs_;   // The total number of unique documents.
  uint64_t total_document_lengths_;  // The total document lengths of all documents.
  uint64_t document_posting_count_;  // The total document posting count.
  uint64_t index_posting_count_;     // The total index posting count.
  uint32_t first_doc_id_in_index_;   // The first docID in the index.
  uint32_t last_doc_id_in_index_;    // The last docID in the index.

  map<string,float> query_terms_length_of_the_inverted_index_map_;
  map<string,float> query_terms_term_freq_in_collection_map_;
  map<string,float> query_terms_term_freq_in_queries_map_;

  // Updated by Wei 2013/08/05 night
  // for the first probability factor
  // aux maps for the first probability factor
  map<int,float> freq_first_factor_probability_map_;
  map<string,int> terms_with_corresponding_species_belonging_to_map_;

  // Updated by Wei 2013/08/05 night
  // for the second probability factor
  // aux maps for the second probability factor
  map<int,float> query_length_probability_map_;
  map<string,float> docID_With_Xdoc_Value_goldStandarded_map_;
  map<string,float> docID_With_Xdoc_Value_1D_map_;
  map<string,float> docID_With_Xdoc_Value_2D_map_;
  map<string,float> docID_With_Xdoc_Value_goodTurning_map_;

  // Updated by Wei 2013/08/06 night
  // for the randomly selected postings mechanism
  map<string,map<int,string> > term_with_selected_postings_map_;

  // Updated by Wei 2014/01/05 afternoon
  // key: term
  // value: map<int,long>
  	  // key: pieceID
  	  // value: # of postings in that piece
  map<string,map<int,long> > term_with_piece_info_map_;
  // Updated by Wei 2014/01/05 afternoon
  // key: term
  // value: belonging class ID based on list length
  map<string,int > term_with_their_belonging_class_map_;

  // Updated by Wei 2013/09/22 sun at school
  map<uint32_t, float> termID_with_their_first_factor_probability_map_;

  // Updated by Wei 2013/09/19 afternoon at school
  map<string, int> top10RelatedPostingsDict_;

  // Updated by Wei 2013/09/16 afternoon at school
  map<uint32_t, string> termIDWithTermDict_;

  // Updated by Wei 2013/09/15 afternoon at school
  map<string, uint32_t> termWithTermIDDict_;

  // Updated by Wei 2013/09/14 afternoon at school
  // key: docID in uint32_t format
  // value: True if the docID related heap tuple has been created. False otherwise.
  map<uint32_t, bool > docIDWithTupleCreatedDict_;
  // key: docID in uint32_t format
  // value: # of postings recorded in doc in int format
  map<uint32_t, int > docIDWithNumOfPostingsRecordedDict_;

  // Updated by Wei 2013/09/13 afternoon at school
  // 1st level
  // key: classLabel in int format
  // value: a dict containing pieces info
  	  // 2th level
      // key: the pieceID in int format
  	  // value: the probability that the next posting will hit this piece area in float format
  map<int, map<int,float> > classLabelWithPiecesMetaInfoDict_;

  // Updated by Wei 2013/08/29 morning
  // key: class label in int format
  // value: class partial bm25 lower bound in float format
  // This is the variable(1 out of 2) for the naive pruning method based on some partialBM25 ranges
  map<int, float> class_label_with_lower_bounds_map_;

  // Updated by Wei 2013/12/05 night
  // 2 dicts, the outside dict and the inside dict
  // One is int format and one is float format
  // Load this two dicts
  map<int, uint32_t> class_label_with_lower_bounds_of_list_length_map_;
  // Updated by Wei on 2014/01/02 night at school
  // Note: the variable called class_label_with_lower_bounds_of_impact_scores_map_ has two purposes:
  // option1:
  // for impact score and quadtree etc:
  // 	key: impact score class label
  // 	value: impact score class lower bound

  // option2:
  // for the relrank(piecewise) etc:
  //	key: class label based on list length
  //	value: # of pieces/gaps/ranges for that class label
  map<int, float> class_label_with_lower_bounds_of_impact_scores_map_;
  map<string, float> class_label_with_probability_of_2D_ranges_map_;
  map<string,int> terms_with_length_of_the_lists_map_;

  // Updated by Wei 2013/12/21 night
  map<string, float> class_label_with_quad_tree_probability_map_;

  // key: class label in int format
  // value class probability in float format
  // This is the variable(2 out of 2) for the naive pruning method based on some partialBM25 ranges
  map<int, float> class_label_with_probability_map_;

  // Updated by Wei 2013/08/07 night
  map<string,int> selected_terms_map_;
  map<uint32_t,int> docIDs_with_num_of_postings_map_;

};

/**************************************************************************************************************************************************************
 * DocIdScoreComparison
 *
 * Uses the partial BM25 score to compare two documents from the same list.
 **************************************************************************************************************************************************************/
class DocIdScoreComparison {
public:
	  void hello() const{
	  }

	  int get_posting_rank_in_list_test(string term, string trecID){

		    PyObject *pFunc_local, *pArgs_local, *pValue_local;
		    long posting_rank_in_list = 0;
		    cout << "test point 0" << endl;
		    pFunc_local = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_assignTheRank");
			// pFunc_local = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_assignTheRank");
			// pFunc is a new reference
			cout << "test point 1" << endl;
			if (pFunc_local && PyCallable_Check(pFunc_local)) {
				pArgs_local = PyTuple_New(2);

				// argument 1:

				pValue_local = PyString_FromString( term.c_str() );

				if (!pValue_local) {
					Py_DECREF(pArgs_local);
					Py_DECREF(pModule_);
					fprintf(stderr, "Cannot convert argument\n");
				}
				// pValue reference stolen here:
				PyTuple_SetItem(pArgs_local, 0, pValue_local);

				// argument 2:

				pValue_local = PyString_FromString( trecID.c_str() );

				if (!pValue_local) {
					Py_DECREF(pArgs_local);
					Py_DECREF(pModule_);
					fprintf(stderr, "Cannot convert argument\n");
				}
				// pValue reference stolen here:
				PyTuple_SetItem(pArgs_local, 1, pValue_local);

				cout << "test point 2" << endl;

				pValue_local = PyObject_CallObject(pFunc_local, pArgs_local);

				Py_DECREF(pArgs_local);
				if (pValue_local != NULL) {
					posting_rank_in_list = PyInt_AsLong(pValue_local);
					printf("posting_rank_in_list: %ld\n", posting_rank_in_list);
					Py_DECREF(pValue_local);
				}
				else {
					Py_DECREF(pFunc_local);
					Py_DECREF(pModule_);
					PyErr_Print();
					fprintf(stderr,"Call failed\n");
				}
			}
			else {
				if (PyErr_Occurred())
					PyErr_Print();
				fprintf(stderr, "Cannot find function \"%s\"\n", "multiply");
			}
			Py_XDECREF(pFunc_local);

		    return posting_rank_in_list;
	  }

	  int get_posting_rank_in_list(string term, string trecID) const{
		    cout << "This function can NOT be used." << endl;
		  	/*
		    PyObject *pFunc_local, *pArgs_local, *pValue_local;
		    long posting_rank_in_list = 0;
		    cout << "test point 0" << endl;
		    pFunc_local = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_assignTheRank");
			// pFunc_local = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_assignTheRank");
			// pFunc is a new reference
			cout << "test point 1" << endl;
			if (pFunc_local && PyCallable_Check(pFunc_local)) {
				pArgs_local = PyTuple_New(2);

				// argument 1:
				string queryTerm = "000sites";
				pValue_local = PyString_FromString( queryTerm.c_str() );

				if (!pValue_local) {
					Py_DECREF(pArgs_local);
					Py_DECREF(pModule_);
					fprintf(stderr, "Cannot convert argument\n");
				}
				// pValue reference stolen here:
				PyTuple_SetItem(pArgs_local, 0, pValue_local);

				// argument 2:
				string trecID = "GX259-64-8400118";
				pValue_local = PyString_FromString( trecID.c_str() );

				if (!pValue_local) {
					Py_DECREF(pArgs_local);
					Py_DECREF(pModule_);
					fprintf(stderr, "Cannot convert argument\n");
				}
				// pValue reference stolen here:
				PyTuple_SetItem(pArgs_local, 1, pValue_local);

				cout << "test point 2" << endl;

				pValue_local = PyObject_CallObject(pFunc_local, pArgs_local);

				Py_DECREF(pArgs_local);
				if (pValue_local != NULL) {
					posting_rank_in_list = PyInt_AsLong(pValue_local);
					printf("posting_rank_in_list: %ld\n", posting_rank_in_list);
					Py_DECREF(pValue_local);
				}
				else {
					Py_DECREF(pFunc_local);
					Py_DECREF(pModule_);
					PyErr_Print();
					fprintf(stderr,"Call failed\n");
				}
			}
			else {
				if (PyErr_Occurred())
					PyErr_Print();
				fprintf(stderr, "Cannot find function \"%s\"\n", "multiply");
			}
			Py_XDECREF(pFunc_local);
			*/
		    return -1;
	  }

	void LoadModelFromFileWei(const string& file_name, SfWeightVector** w) {
	  if (*w != NULL) {
	    delete *w;
	  }

	  /*
	  // version with std
	  std::fstream model_stream;
	  model_stream.open(file_name.c_str(), std::fstream::in);
	  if (!model_stream) {
	    std::cerr << "Error opening model input file " << file_name << std::endl;
	    exit(1);
	  }

	  std::cerr << "Reading model from: " << file_name << std::endl;
	  string model_string;
	  std::getline(model_stream, model_string);
	  model_stream.close();
	  std::cerr << "   Done." << std::endl;

	  *w = new SfWeightVector(model_string);
	  assert(*w != NULL);
	  */

	  // version without std
	  fstream model_stream;
	  model_stream.open(file_name.c_str(), fstream::in);
	  if (!model_stream) {
	    cerr << "Error opening model input file " << file_name << endl;
	    exit(1);
	  }

	  cerr << "Reading model from: " << file_name << endl;
	  string model_string;
	  getline(model_stream, model_string);
	  model_stream.close();
	  // cerr << "   Done." << endl;

	  *w = new SfWeightVector(model_string);
	  assert(*w != NULL);

	}

  ~DocIdScoreComparison(){
      // final step: deconstruct the things we do NOT need
      Py_DECREF(pModule_);
      Py_Finalize();
  }

  DocIdScoreComparison(const DocumentMapReader& doc_map_reader, int num_docs_t, int average_doc_len, int total_num_docs, int sorting_method_code):
	  sorting_method_code_(sorting_method_code),
	  doc_map_reader_(doc_map_reader){

	  if (sorting_method_code_ == 1){
		  // Based on docID
	  }
	  else if(sorting_method_code_ == 2){
		  // Based on partial BM25 score
		  // compute the partial BM25 score component here.
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));

		  num_docs_t_ = num_docs_t;
	  }
	  else if(sorting_method_code_ == 3){

		  Rmin_ = -1.0;
		  Rmax_ = 1.0;

		  Dmin_feature1_ = 0.001967;
		  Dmax_feature1_ = 17.711489;

		  Dmin_feature2_ = 2.0;
		  Dmax_feature2_ = 23077260;

		  Dmin_feature3_ = 1.0;
		  Dmax_feature3_ = 11531;

		  Dmin_feature4_ = 4.0;
		  Dmax_feature4_ = 87803;

		  // Based on partial BM25 score
		  // compute the partial BM25 score component here.
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));

		  num_docs_t_ = num_docs_t;
		  // Based on machine learning score assigner (Updated by Wei 2013/01/18)
		  // SfWeightVector* w = NULL;
		  // w = new SfWeightVector(20);

		  // Model1
		  // Model2: minimal features contained from the toolkit(BM25), 4 features, 5 dimensions
		  // Model3
		  // Model4
		  // Model5
		  // Model6
		  // Model7
		  // Model8
		  // Model9

		  // This dimension setting is for Model2
		  w_ = new SfWeightVector(5);
		  LoadModelFromFileWei("/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model2/model", &w_);
	  }
  }

  DocIdScoreComparison(const DocumentMapReader& doc_map_reader, int num_docs_t, int average_doc_len, int total_num_docs, int sorting_method_code, string term):
	  sorting_method_code_(sorting_method_code),
	  term_(term),
	  doc_map_reader_(doc_map_reader){

	  if (sorting_method_code_ == 1){
		  // Based on docID
	  }
	  else if(sorting_method_code_ == 2){
		  // Based on partial BM25 score
		  // compute the partial BM25 score component here.
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));

		  num_docs_t_ = num_docs_t;
	  }
	  else if(sorting_method_code_ == 3){

		// updated 2013/01/21
		// python modules init process begins...
		Py_Initialize();

		pName_ = PyString_FromString("pythonModuleForCallingFromC");

		/* Error checking of pName left out */

		pModule_ = PyImport_Import(pName_);
		Py_DECREF(pName_);

		PyRun_SimpleString("from time import time,ctime\n"
						   "print 'Today is',ctime(time())\n");

		// call the function pythonModuleForCallingFromC_loadTheAuxInfoIntoMemory
		pFunc_ = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_loadTheAuxInfoIntoMemory");
		// pFunc is a new reference

		if (pFunc_ && PyCallable_Check(pFunc_)) {
			pArgs_ = PyTuple_New(0);
			pValue_ = PyObject_CallObject(pFunc_, pArgs_);
			Py_DECREF(pArgs_);
			if (pValue_ != NULL) {
				printf("Result of call: %ld\n", PyInt_AsLong(pValue_));
				Py_DECREF(pValue_);
			}
			else {
				Py_DECREF(pFunc_);
				Py_DECREF(pModule_);
				PyErr_Print();
				fprintf(stderr,"Call failed\n");

			}
		}
		else {
			if (PyErr_Occurred())
				PyErr_Print();
			fprintf(stderr, "Cannot find the function");
		}
		Py_XDECREF(pFunc_);

		/*
		// load the experimental term: 000sites to test the it is OK
		pFunc_ = PyObject_GetAttrString(pModule_, "pythonModuleForCallingFromC_loadTheActualDataIntoMemory");
		// pFunc is a new reference

		if (pFunc_ && PyCallable_Check(pFunc_)) {
			pArgs_ = PyTuple_New(1);
			pValue_ = PyString_FromString( term_.c_str() );

			if (!pValue_) {
				Py_DECREF(pArgs_);
				Py_DECREF(pModule_);
				fprintf(stderr, "Cannot convert argument\n");
			}
			// pValue reference stolen here:
			PyTuple_SetItem(pArgs_, 0, pValue_);

			pValue_ = PyObject_CallObject(pFunc_, pArgs_);

			Py_DECREF(pArgs_);
			if (pValue_ != NULL) {
				printf("Result of call: %ld\n", PyInt_AsLong(pValue_));
				Py_DECREF(pValue_);
			}
			else {
				Py_DECREF(pFunc_);
				Py_DECREF(pModule_);
				PyErr_Print();
				fprintf(stderr,"Call failed\n");
			}
		}
		else {
			if (PyErr_Occurred())
				PyErr_Print();
			fprintf(stderr, "Cannot find the function \"%s\"\n", "pythonModuleForCallingFromC_loadTheActualDataIntoMemory");
		}
		Py_XDECREF(pFunc_);
		// python modules init process ends.
		*/

		  Rmin_ = -1.0;
		  Rmax_ = 1.0;

		  Dmin_feature1_ = 0.001967;
		  Dmax_feature1_ = 17.711489;

		  Dmin_feature2_ = 2.0;
		  Dmax_feature2_ = 23077260;

		  Dmin_feature3_ = 1.0;
		  Dmax_feature3_ = 11531;

		  Dmin_feature4_ = 4.0;
		  Dmax_feature4_ = 87803;

		  // Based on partial BM25 score
		  // compute the partial BM25 score component here.
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));

		  num_docs_t_ = num_docs_t;
		  // Based on machine learning score assigner (Updated by Wei 2013/01/18)
		  // SfWeightVector* w = NULL;
		  // w = new SfWeightVector(20);

		  // Model1
		  // Model2: minimal features contained from the toolkit(BM25), 4 features, 5 dimensions
		  // Model3
		  // Model4
		  // Model5
		  // Model6
		  // Model7
		  // Model8
		  // Model9

		  // This dimension setting is for Model2
		  w_ = new SfWeightVector(5);
		  LoadModelFromFileWei("/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model2/model", &w_);
	  }
  }



  float score(const IndexEntry& entry) const {
	  float returning_score = 0.0;

	  if(sorting_method_code_ == 1){
		  // do nothing.
		  returning_score = -float(entry.doc_id)/100000000;
		  // for debugging.
		  //cout << "returning_score:" << returning_score << endl;
	  }
	  else if(sorting_method_code_ == 2){
		  uint32_t f_d_t = entry.frequency;
		  int doc_len = doc_map_reader_.GetDocumentLength(entry.doc_id);
		  returning_score = kIdfT * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
		  assert(!isnan(returning_score));
	  }
	  else if(sorting_method_code_ == 3){
		  // vector<float> predictions;
		  // SfDataSet test_data("/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/prune-classify20130117-scaled_test.dat",40,!false);
		  // SfDataSet test_data("/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/prune-classify20130117-scaled_test.dat");

		  /*
		  SfDataSet data_set_2(!false);
		  data_set_2.AddVector("1 1:-0.75604 2:-0.845027 3:-0.999306 4:-0.963644 5:0.333333 6:-0.678994 7:-0.960852 8:-0.801709 9:-0.990538 10:-0.999867 11:-0.948377 12:-1 13:-1 14:-1 15:1 16:-1 17:-0.986025 18:-0.932678");
		  data_set_2.AddVector("1 1:-0.830307 2:-0.411056 3:-0.996357 4:-0.963644 5:0.333333 6:-0.730276 7:-0.960852 8:-0.801709 9:-0.990538 10:-0.999867 11:-0.948377 12:-1 13:-1 14:-1 15:1 16:-1 17:-0.981382 18:-0.982682");
		  data_set_2.AddVector("1 1:-0.976443 2:0.848874 3:-0.984042 4:-0.963644 5:0.333333 6:1 7:-0.960852 8:-0.801709 9:-0.990538 10:-0.999867 11:-0.948377 12:-1 13:1 14:-1 15:1 16:-1 17:-0.996866 18:-0.994331");
		  data_set_2.AddVector("1 1:-0.658205 2:-0.874374 3:-0.997918 4:-0.963644 5:0.333333 6:-0.866206 7:-0.960852 8:-0.801709 9:-0.990538 10:-0.999867 11:-0.948377 12:-1 13:-1 14:-1 15:1 16:-1 17:-0.993847 18:-0.968835");
		  sofia_ml::SvmPredictionsOnTestSet(data_set_2, *w, &predictions);
		  */

		  /*
		  // option1:
		  sofia_ml::SvmPredictionsOnTestSet(test_data, *w, &predictions);
		  for (unsigned int i = 0; i < predictions.size(); ++i) {
		      std::cout << i << ":"<< predictions[i] << std::endl;
		  }
		  */

		  // option2:try to run on a single example.
		  // 1st, do the minimal set of features first
		  // currently have models listing:
		  // Model1
		  // Model2: minimal features contained from the toolkit(BM25)
		  // Model3
		  // Model4
		  // Model5
		  // Model6
		  // Model7
		  // Model8
		  // Model9

		  // The label provided here doesn't mean anything
		  // string currentTrainingInstanceString = "1 1:-0.75604 2:-0.845027 3:-0.999306 4:-0.963644";

		  uint32_t f_d_t = entry.frequency;
		  int doc_len = doc_map_reader_.GetDocumentLength(entry.doc_id);
		  float partialBM25Score = kIdfT * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);

		  // feature1 generation
		  stringstream ss1 (stringstream::in | stringstream::out);

		  float partialBM25Score_scaled = 0.0;
		  partialBM25Score_scaled = partialBM25Score * (Rmax_ - Rmin_) / (Dmax_feature1_ - Dmin_feature1_) + (Rmin_ * Dmax_feature1_ - Rmax_ * Dmin_feature1_) / (Dmax_feature1_ - Dmin_feature1_);

		  ss1 << partialBM25Score_scaled;
		  string partialBM25ScoreInStringFormat = ss1.str();

		  // feature2 generation
		  stringstream ss2 (stringstream::in | stringstream::out);
		  float num_docs_t_scaled = 0.0;
		  num_docs_t_scaled = num_docs_t_ * (Rmax_ - Rmin_) / (Dmax_feature2_ - Dmin_feature2_) + (Rmin_ * Dmax_feature2_ - Rmax_ * Dmin_feature2_) / (Dmax_feature2_ - Dmin_feature2_);

		  ss2 << num_docs_t_scaled;
		  string num_docs_t_InStringFormat = ss2.str();

		  // feature3 generation
		  stringstream ss3 (stringstream::in | stringstream::out);
		  float f_d_t_scaled = 0.0;
		  f_d_t_scaled = f_d_t * (Rmax_ - Rmin_) / (Dmax_feature3_ - Dmin_feature3_) + (Rmin_ * Dmax_feature3_ - Rmax_ * Dmin_feature3_) / (Dmax_feature3_ - Dmin_feature3_);
		  ss3 << f_d_t_scaled;
		  string f_d_tInStringFormat = ss3.str();

		  // feature4 generation
		  stringstream ss4 (stringstream::in | stringstream::out);
		  float doc_len_scaled = 0.0;
		  doc_len_scaled = doc_len * (Rmax_ - Rmin_) / (Dmax_feature4_ - Dmin_feature4_) + (Rmin_ * Dmax_feature4_ - Rmax_ * Dmin_feature4_) / (Dmax_feature4_ - Dmin_feature4_);
		  ss4 << doc_len_scaled;
		  string doc_lenInStringFormat = ss4.str();


		  // aux file generation
		  stringstream ss5 (stringstream::in | stringstream::out);
		  ss5 << entry.doc_id;
		  string doc_idInStringFormat = ss5.str();

		  // feature5 generation: posting_rank_in_the_list
	      // try to assign the rank given the term and the trecID
		  int posting_rank_in_list = get_posting_rank_in_list(term_, doc_idInStringFormat);
		  cout << "posting_rank_in_list:" << posting_rank_in_list << endl;

		  // string currentTrainingInstanceString = string("1") + "2";
		  string currentTrainingInstanceString = string("1") + " " + "1:" + partialBM25ScoreInStringFormat + " " + "2:" + num_docs_t_InStringFormat + " " + "3:" + f_d_tInStringFormat + " " + "4:" + doc_lenInStringFormat;
		  // string currentTrainingInstanceString = "1 1:-0.75604 2:-0.845027 3:-0.999306 4:-0.963644";
		  // cout << currentTrainingInstanceString << " " << doc_idInStringFormat << endl;
		  SfSparseVector singleTrainingInstance(currentTrainingInstanceString.c_str(),!false);
		  returning_score = sofia_ml::SingleSvmPrediction(singleTrainingInstance,*w_);
		  // cout << "returning_score:" << returning_score << endl;
	  }

	  return returning_score;
  }

  bool operator()(const IndexEntry& lhs, const IndexEntry& rhs) const {
    return score(lhs) > score(rhs);
  }

private:
  // 1: not sorted at all
  // 2: sorted based on partial bm25 score
  // 3: sorted based on a specific machine learned score and a hard-cutoff (Updated by Wei 20130118)
  int sorting_method_code_;

  // the current query term which this class is working on
  string term_;

  // For method1: not sorted at all

  // For method2: sorted based on partial bm25 score
  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
  float kBm25K1;  // k1
  float kBm25B;   // b

  // We can precompute a few of the BM25 values here.
  float kBm25NumeratorMul;
  float kBm25DenominatorAdd;
  float kBm25DenominatorDocLenMul;
  float kIdfT;  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for the entire list.
  int num_docs_t_; // this is actually the feature: term_freq_in_collection

  const DocumentMapReader& doc_map_reader_;

  // For method3: sorted based on a specific machine learned score and a hard-cutoff (Updated by Wei 20130118)
  // The variables can be set here
  SfWeightVector* w_;

  // Let's hard code the scaling weights here. This is ONLY for model2:
  float Rmin_;
  float Rmax_;

  float Dmin_feature1_;
  float Dmax_feature1_;

  float Dmin_feature2_;
  float Dmax_feature2_;

  float Dmin_feature3_;
  float Dmax_feature3_;

  float Dmin_feature4_;
  float Dmax_feature4_;

  // Python object for c++
  PyObject *pName_, *pModule_;
  PyObject *pFunc_, *pArgs_, *pValue_;

};

/**************************************************************************************************************************************************************
 * DocIdScoreComparisonWei
 *
 * Uses many different kinds of score to compare two documents from the same list.
 **************************************************************************************************************************************************************/
class DocIdScoreComparisonWei {
public:
	  ~DocIdScoreComparisonWei();

	  // You can have multiple constructors overloaded

	  // Updated on 2013/08/29 by Wei at school
	  // sorting method = 6 has been added
	  // Updated on 2013/08/04 by Wei at school
	  // Note: upgrade the class in order to support to 3 factor probablity formula
	  // Updated by Wei 2013/06/17
	  // Note: Need another huge upgrade for the class to support DocIdScoreComparisonWei(? I don't know what that means)
	  DocIdScoreComparisonWei(const DocumentMapReader& doc_map_reader, int num_docs_t, int average_doc_len, int total_num_docs, int sorting_method_code, string term, map<int,float>& freq_first_factor_probability_map, map<string,int>& terms_with_corresponding_species_belonging_to_map, map<int,float>& query_length_probability_map, map<int, float>& class_label_with_lower_bounds_map, map<int, float>& class_label_with_probability_map);

	  // Updated on 2013/09/13 by Wei at school
	  // Keep this class constructor in order to be compatible with any other usage
	  DocIdScoreComparisonWei(const DocumentMapReader& doc_map_reader, int num_docs_t, int average_doc_len, int total_num_docs, int sorting_method_code, string term, map<string,float> queryTermsProbabilityDistributionMap);

	  // Updated on 2013/12/07 night by Wei at school
	  DocIdScoreComparisonWei(const DocumentMapReader& doc_map_reader, int num_docs_t, int average_doc_len, int total_num_docs, int sorting_method_code, string term, map<int,float>& freq_first_factor_probability_map, map<string,int>& terms_with_corresponding_species_belonging_to_map, map<int, uint32_t>& class_label_with_lower_bounds_of_list_length_map, map<int, float>& class_label_with_lower_bounds_of_impact_scores_map , map<string, float>& class_label_with_probability_of_2D_ranges_map);

	  string get_high_level_features_including_rank_in_the_doc(string term, string trecID, bool debugFlag);
	  int get_term_freq_in_queries(string term, bool debugFlag);
	  int get_posting_rank_in_list(string term, string docID, bool debugFlag);
	  void load_model_from_file_wei(const string& file_name, SfWeightVector** w);
	  void pre_load_aux_file_for_feature_rank_in_the_list();
	  void pre_load_aux_file_for_high_level_features_including_rank_in_the_doc();
	  void pre_load_the_actual_query_term_list_data_into_memory();
	  void pre_load_aux_file_freq_Of_terms_in_queries();

	  float scale_the_value(float originalValue, float Rmin, float Rmax, float Dmin_feature, float Dmax_feature);
	  string make_the_value_into_string_format(float originalValue);
	  float score(const IndexEntry& entry);
	  bool operator()(const IndexEntry& lhs, const IndexEntry& rhs);
	  string get_term(){
		  return term_;
	  }

	  float get_probabilityGivenTheQueryTermsTimesBigNumberValue(){
		  return thirdFactorProbabilityValueTimesBigNumberValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingTrueQueryTermProbabilityDistributionTimesBigNumberValue(){
		  return probabilityNOTGivenQueryTermsUsingTrueQueryTermProbabilityDistributionTimesBigNumberValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingOur1DQueryTermProbabilityDistributionTimesBigNumberValue(){
		  return probabilityNOTGivenQueryTermsUsingOur1DQueryTermProbabilityDistributionTimesBigNumberValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingOur2DQueryTermProbabilityDistributionTimesBigNumberValue(){
		  return probabilityNOTGivenQueryTermsUsingOur2DQueryTermProbabilityDistributionTimesBigNumberValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingGoodTuringQueryTermProbabilityDistributionTimesBigNumberValue(){
		  return probabilityNOTGivenQueryTermsUsingGoodTuringQueryTermProbabilityDistributionTimesBigNumberValue_;
	  }

	  float get_1_FactorProbabilityOriginalValue(){
		  return firstFactorProbabilityOriginalValue_;
	  }

	  float get_2_FactorProbabilityOriginalValue(){
		  return secondFactorProbabilityOriginalValue_;
	  }

	  float get_3_FactorProbabilityOriginalValue(){
		  return thirdFactorProbabilityOriginalValue_;
	  }

	  float get_1_2_3_FactorProbabilitiesCombinedOriginalValue(){
		  return final_1_2_3_FactorsProbabilityCombinedOriginalValue_;
	  }

	  float get_1_3_FactorProbabilitiesCombinedOriginalValue(){
		  return final_1_3_FactorsProbabilityCombinedOriginalValue_;
	  }

	  float get_2_3_FactorProbabilitiesCombinedOriginalValue(){
		  return final_2_3_FactorsProbabilityCombinedOriginalValue_;
	  }

	  // Updated by Wei 2013/08/29 afternoon at school
	  float get_1_2_3_FactorProbabilitiesCombinedOriginalValueBaseline(){
		  return final_1_2_3_FactorsProbabilityCombinedOriginalValueBaseline_;
	  }

	  // Updated by Wei 2013/08/29 afternoon at school
	  float get_2_3_FactorProbabilitiesCombinedOriginalValueBaseline(){
		  return final_2_3_FactorsProbabilityCombinedOriginalValueBaseline_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingTrueQueryTermProbabilityDistributionOriginalValue(){
		  return probabilityNOTGivenQueryTermsUsingTrueQueryTermProbabilityDistributionOriginalValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingOur1DQueryTermProbabilityDistributionOriginalValue(){
		  return probabilityNOTGivenQueryTermsUsingOur1DQueryTermProbabilityDistributionOriginalValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingOur2DQueryTermProbabilityDistributionOriginalValue(){
		  return probabilityNOTGivenQueryTermsUsingOur2DQueryTermProbabilityDistributionOriginalValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingGoodTuringQueryTermProbabilityDistributionOriginalValue(){
		  return probabilityNOTGivenQueryTermsUsingGoodTuringQueryTermProbabilityDistributionOriginalValue_;
	  }

	  // public variable members (short cut for direct access from outside)
	  float valueOfCurrentPostingLengthOfTheInvertedList_;
	  float valueOfCurrentPostingTermFreqInCollection_;
	  float valueOfcurrentPostingTermFreqInQueries_;
	  float XDocValue_;
	  // value listing(N/A):
	  // 1:
	  // 2:
	  // 3:
	  // 4:
	  // 5:
	  // 6:
	  // 7:
	  unsigned int sorting_method_code_;
	  string term_;

	  // Updated by Wei on 2013/08/04 by Wei at school
	  // aux maps for the first probability factor
	  map<int,float> freq_first_factor_probability_map_;
	  map<string,int> terms_with_corresponding_species_belonging_to_map_;
	  // aux map for the second probability factor
	  map<int,float> query_length_probability_map_;
	  map<int, uint32_t> class_label_with_lower_bounds_of_list_length_map_;
	  map<int, float> class_label_with_lower_bounds_of_impact_scores_map_;
	  map<string, float> class_label_with_probability_of_2D_ranges_map_;

	  // Updated by Wei on 2014/01/05 afternoon by Wei at school
	  // Updated by Wei 2014/01/05 afternoon
	  // key: term
	  // value: map<int,long>
	    // key: pieceID
	    // value: # of postings in that piece
	  map<string,map<int,long> > term_with_piece_info_map_;

	  // Updated by Wei 2014/01/05 afternoon
	  // key: term
	  // value: belonging class ID based on list length
	  map<string,int > term_with_their_belonging_class_map_;

private:
	  // For method1: not sorted at all

	  // For method2: sorted based on partial bm25 score
	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  float kBm25K1;  // k1
	  float kBm25B;   // b

	  // We can precompute a few of the BM25 values here.
	  float kBm25NumeratorMul;
	  float kBm25DenominatorAdd;
	  float kBm25DenominatorDocLenMul;
	  float kIdfT;  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for the entire list.
	  int num_docs_t_; // this is actually the feature: term_freq_in_collection
	  const DocumentMapReader& doc_map_reader_;

	  // been dummped since 2013/08/04 by Wei at school
	  map<string,float> queryTermsTrueProbabilityDistributionMap_;
	  map<string,float> queryTerms1DProbabilityDistributionMap_;
	  map<string,float> queryTerms2DProbabilityDistributionMap_;
	  map<string,float> queryTermsGoodTuringProbabilityDistributionMap_;

	  // currently used since 2013/08/30 by Wei at school, this two variables should be init in the constructor function
	  // key: class label in int format
	  // value: class partial bm25 lower bound in float format
	  // This is the variable(1 out of 2) for the naive pruning method based on some partialBM25 ranges
	  map<int, float> class_label_with_lower_bounds_map_;

	  // key: class label in int format
	  // value class probability in float format
	  // This is the variable(2 out of 2) for the naive pruning method based on some partialBM25 ranges
	  map<int, float> class_label_with_probability_map_;




	  // map<string,float> docID_With_Xdoc_Value_map_;


	  // For method3: sorted based on a specific machine learned score and a hard-cutoff (Updated by Wei 20130118)
	  // The variables can be set here
	  SfWeightVector* w_;

	  // Let's hard code the scaling weights here. This is now ONLY for model4:
	  float Rmin_;
	  float Rmax_;

	  float Dmin_feature1_;
	  float Dmax_feature1_;

	  float Dmin_feature2_;
	  float Dmax_feature2_;

	  float Dmin_feature3_;
	  float Dmax_feature3_;

	  float Dmin_feature4_;
	  float Dmax_feature4_;

	  float Dmin_feature5_;
	  float Dmax_feature5_;

	  float Dmin_feature6_;
	  float Dmax_feature6_;

	  /*
	  float Dmin_feature7_;
	  float Dmax_feature7_;

	  float Dmin_feature8_;
	  float Dmax_feature8_;

	  float Dmin_feature9_;
	  float Dmax_feature9_;

	  float Dmin_feature10_;
	  float Dmax_feature10_;

	  float Dmin_feature11_;
	  float Dmax_feature11_;

	  float Dmin_feature12_;
	  float Dmax_feature12_;

	  float Dmin_feature13_;
	  float Dmax_feature13_;

	  float Dmin_feature14_;
	  float Dmax_feature14_;

	  float Dmin_feature15_;
	  float Dmax_feature15_;

	  float Dmin_feature16_;
	  float Dmax_feature16_;

	  float Dmin_feature17_;
	  float Dmax_feature17_;
	  */

	  // Updated by Wei 2013/07/17
	  // The variables here are for the Logistic Regression method (currently using the machine learning tool called weka)
	  float intercept_weight_0_;
	  float partialBM25ScoreComponentPart1_IDF_weight_1_;
	  float partialBM25ScoreComponentPart2_TF_weight_2_;
	  float partialBM25_weight_3_;
	  float length_of_the_inverted_index_weight_4_;
	  float term_freq_in_doc_weight_5_;
	  float doc_words_weight_6_;
	  float term_freq_in_training_head95K_queries_weight_7_;
	  float term_freq_in_collection_weight_8_;
	  float posting_rank_in_doc_weight_9_;
	  float posting_rank_in_list_weight_10_;


	  float thirdFactorProbabilityValueTimesBigNumberValue_;
	  float probabilityNOTGivenQueryTermsUsingTrueQueryTermProbabilityDistributionTimesBigNumberValue_;
	  float probabilityNOTGivenQueryTermsUsingOur1DQueryTermProbabilityDistributionTimesBigNumberValue_;
	  float probabilityNOTGivenQueryTermsUsingOur2DQueryTermProbabilityDistributionTimesBigNumberValue_;
	  float probabilityNOTGivenQueryTermsUsingGoodTuringQueryTermProbabilityDistributionTimesBigNumberValue_;

	  // Updated by Wei 2013/08/04
	  float firstFactorProbabilityOriginalValue_;
	  float secondFactorProbabilityOriginalValue_;
	  float thirdFactorProbabilityOriginalValue_;
	  float final_1_3_FactorsProbabilityCombinedOriginalValue_;
	  float final_2_3_FactorsProbabilityCombinedOriginalValue_;
	  float final_1_2_3_FactorsProbabilityCombinedOriginalValue_;

	  // Updated by Wei 2013/08/29
	  float final_2_3_FactorsProbabilityCombinedOriginalValueBaseline_;
	  float final_1_2_3_FactorsProbabilityCombinedOriginalValueBaseline_;

	  float probabilityNOTGivenQueryTermsUsingTrueQueryTermProbabilityDistributionOriginalValue_;
	  float probabilityNOTGivenQueryTermsUsingOur1DQueryTermProbabilityDistributionOriginalValue_;
	  float probabilityNOTGivenQueryTermsUsingOur2DQueryTermProbabilityDistributionOriginalValue_;
	  float probabilityNOTGivenQueryTermsUsingGoodTuringQueryTermProbabilityDistributionOriginalValue_;


	  // Python object for c++
	  PyObject *pName_, *pModule_;
	  PyObject *pFunc_, *pArgs_, *pValue_;
};

/**************************************************************************************************************************************************************
 * aux file for pruning term entry
 *
 **************************************************************************************************************************************************************/
class aux_pruning_term_entry {
public:
	aux_pruning_term_entry(string term, uint32_t num_of_posting_pruned, uint32_t num_of_posting_in_the_original_list, float current_min_value, uint32_t current_min_docID, bool whether_there_are_still_postings_to_prune);
	string term_;
	int num_of_posting_pruned_;
	int num_of_posting_in_the_original_list_;
	float current_min_value_;
	uint32_t current_min_docID_;
	bool whether_there_are_still_postings_to_prune_;
private:
};

/**************************************************************************************************************************************************************
 * MinValueCompare
 *
 **************************************************************************************************************************************************************/
struct MinValueCompare {
  // Only compare the min value, don't care about the others.
  bool operator()(const aux_pruning_term_entry& l, const aux_pruning_term_entry& r) const {
    return l.current_min_value_ > r.current_min_value_;
  }
};




#endif /* INDEX_LAYERIFY_H_ */
