//==============================================================================================================================================================
// Author(s): Roman Khmelichek, Wei Jiang
//
//==============================================================================================================================================================

#ifndef INDEX_LAYERIFY_H_
#define INDEX_LAYERIFY_H_

// Enables debugging output for this module.
// #define INDEX_LAYERIFY_DEBUG

#include <cassert>
#include <cmath>
#include <fstream>
#include <iostream>
#include <map>

// across all servers
// #include <python2.7/Python.h>
// vidaserver1
// #include <python2.6/Python.h>

#include <sstream>
#include <stdint.h>
#include <string>
#include <vector>

#include "coding_policy.h"
#include "document_map.h"
#include "external_index.h"
#include "index_layout_parameters.h"
#include "index_util.h"
#include "sofia-ml-methods.h"
#include "posting_quantization.h"

using namespace std;


typedef struct MetaInfo_SET {
	// Seems that all the variables init here are necessary
	uint32_t current_doc_id;
	double current_largest_value_of_the_posting_array;
	int current_size_of_the_posting_array_filled;
	int current_size_of_the_posting_array_allocated;
	int current_beginning_index_of_the_posting_array;
	double selected_Xdoc;
	double all_Xdoc;
} MetaInfo_SET;

// Updated by Wei 2013/09/14 afternoon at school
// Still in test and I am NOT 100% sure that is the right way to implement stuff
// The tuple will have 2 values.
// The 1st value is float which is the largest probability among those postings inside the document
// The 2th value is a pointer to the actual array of postings
// This data structure also follows Prof's practice
typedef std::pair<MetaInfo_SET, IndexDocEntry*> docPostingsTuple;

// Updated by Wei 2013/09/20 morning at school
// The tuple will have 2 values.
// The 1st value is a structure called MetaInfo_SET
// The 2th value is a pointer to the actual array of postings for that document
typedef std::pair<MetaInfo_SET, IndexDocOptimizedEntry*> docPostingsOptimizedTuple;

// Updated by Wei 2014/02/11 afternoon at school
// The tuple will have 2 values.
// The 1st value is a structure called MetaInfo_SET
// The 2th value is a pointer to the actual array of postings for that document
typedef std::pair<MetaInfo_SET, IndexDocReOptimizedEntry*> docPostingsReOptimizedTuple;

/**************************************************************************************************************************************************************
 * DocPostingOptimizedCompare
 * Updated by Wei 2013/09/21 afternoon at school
 * If the room here is NOT big enough for the logic, then move the logic to the .cc file:)
 **************************************************************************************************************************************************************/
class DocPostingOptimizedCompareClass{
public:
	DocPostingOptimizedCompareClass(const map<uint32_t, float>& termID_with_their_first_factor_probability_map, float a, float b, float currentSelectXDOC, float currentALLXDOC);

	void setIndexDocAuxEntryBuffer(IndexDocAuxEntry* index_doc_aux_entry_buffer){
		index_doc_aux_entry_buffer_ = index_doc_aux_entry_buffer;
	}

	/*
	void setCurrentPostingFirstProbability(float currentPostingFirstProbability){
		currentPostingFirstProbability_ = currentPostingFirstProbability;
	}
	*/

	void setCurrentSelectXDOC(float currentSelectXDOC){
		currentSelectXDOC_ = currentSelectXDOC;
	}

	void setCurrentALLXDOC(float currentALLXDOC){
		currentALLXDOC_ = currentALLXDOC;
	}

	double getStaticProbability(){
		return staticProbability_;
	}

	double getDynamicProbability(){
		return dynamicProbability_;
	}

	float getPartialBM25(){
		return partialBM25_;
	}

	double getStaticPartScore(const IndexDocOptimizedEntry& entry);
	double getDynamicPartScore(const IndexDocOptimizedEntry& entry);
	double score(const IndexDocOptimizedEntry& entry);
	bool operator()(const IndexDocOptimizedEntry& lhs, const IndexDocOptimizedEntry& rhs);

private:
  const map<uint32_t, float>& termID_with_their_first_factor_probability_map_;
  float a_;	// The weight controlling the static probability part (NOT USED Since 2013/10/24 afternoon by Wei at school)
  float b_;	// The weight controlling the dynamic probability part (USED ONLY Since 2013/10/24 afternoon by Wei at school)
  float currentSelectXDOC_;
  float currentALLXDOC_;
  // float currentPostingFirstProbability_;
  // Updated on 2013/11/10 afternoon by Wei at school (still in development and debug)
  IndexDocAuxEntry* index_doc_aux_entry_buffer_;
  double staticProbability_;
  double dynamicProbability_;
  float partialBM25_;
};

/**************************************************************************************************************************************************************
 * DocPostingOptimizedCompareForTest
 * Updated by Wei 2013/09/22 night at school
 **************************************************************************************************************************************************************/
struct DocPostingOptimizedCompareForTest{
  bool operator()(const IndexDocOptimizedEntry& l, const IndexDocOptimizedEntry& r) const {
	  // The operators:
	  // > descending order
	  // < ascending order
	  return true;
  }
};

/**************************************************************************************************************************************************************
 * DocPostingCompare
 * Updated by Wei 2013/09/14 night at school
 **************************************************************************************************************************************************************/
struct DocPostingCompare{
  bool operator()(const IndexDocEntry& l, const IndexDocEntry& r) const {
	  /*
	  // Notes:
	  (1) Sort elements in range
	  (2) Sorts the elements in the range [first,last) into ascending(increasing) order.
	  (3) The elements are compared using operator< for the first version, and comp for the second.
	  (4) Equivalent elements are not guaranteed to keep their original relative order (see stable_sort).
	  */
	  // The operator should be >(descending order), but NOT <
	  // option1:
	  return l.valueToComputeANDStoreCombined > r.valueToComputeANDStoreCombined;
	  // option2:
	  // return l.partialBM25 > r.partialBM25;
  }
};

/**************************************************************************************************************************************************************
 * HeapTupleReOptimizedCompare
 * Updated by Wei 2014/02/11 afternoon at school
 **************************************************************************************************************************************************************/
struct HeapTupleReOptimizedCompare {
  bool operator()(const docPostingsReOptimizedTuple& l, const docPostingsReOptimizedTuple& r) const {
	  // option1:
	  return l.first.current_largest_value_of_the_posting_array < r.first.current_largest_value_of_the_posting_array;
  }
};

/**************************************************************************************************************************************************************
 * HeapTupleOptimizedCompare
 * Updated by Wei 2013/09/21 night at school
 **************************************************************************************************************************************************************/
struct HeapTupleOptimizedCompare {
  bool operator()(const docPostingsOptimizedTuple& l, const docPostingsOptimizedTuple& r) const {
	  // option1:
	  return l.first.current_largest_value_of_the_posting_array < r.first.current_largest_value_of_the_posting_array;
  }
};

/**************************************************************************************************************************************************************
 * HeapTupleCompare
 * Updated by Wei 2013/09/14 night at school
 **************************************************************************************************************************************************************/
struct HeapTupleCompare {
  bool operator()(const docPostingsTuple& l, const docPostingsTuple& r) const {
	  // option1:
	  return l.first.current_largest_value_of_the_posting_array < r.first.current_largest_value_of_the_posting_array;
	  // option2:
	  // return l.second[0].partialBM25 < r.second[0].partialBM25;
  }
};

/**************************************************************************************************************************************************************
 * LayeredIndexGenerator
 *
 **************************************************************************************************************************************************************/
class DocIdScoreComparison;
class DocIdScoreComparisonWei;
class ExternalIndexBuilder;
class IndexBuilder;

class LayeredIndexGenerator {
public:
  LayeredIndexGenerator(const IndexFiles& input_index_files, const std::string& output_index_prefix);
  ~LayeredIndexGenerator();

  void Make64BitLexicon();
  void Compute3DDenominatorTable();
  void RecordTermUpperBoundsOfTermsForWANDANDMaxScore();
  void CreateLayeredIndex();

  // Updated by Wei 2013/03/21
  // build the forward index.
  // input: original inverted index
  // output: forward index while maintaining the same info from the original inverted index
  void BuildForwardIndex(vector<string> & queryTerms,bool debugFlag,bool store_computed_score_into_external_index_flag);

  // Updated by Wei on 2013/09/22 Sun at school
  bool scoreInTest(const IndexDocOptimizedEntry& l,const IndexDocOptimizedEntry& r);

  // Updated by Wei on 2013/09/20 afternoon at school
  void ComputeHowMuchMemoryWillBeUsed();

  int codePostDummy(int termid, double score, unsigned char *data_buffer_write_ptr,unsigned char *data_buffer_read_ptr);

  // Updated by Wei on 2014/02/19 night at school
  void quantization();

  // Updated by Wei on 2014/03/10 afternoon at school
  void do_simple_popping(const uint32_t NUM_OF_DOCS_IN_GOV2_COLLECTION_LOWER_BOUND, const uint32_t NUM_OF_DOCS_IN_GOV2_COLLECTION_UPPER_BOUND, string inputBinaryFileName, string outputFileNameInBinaryFormat,const float b, bool dynamicWeightSwitch, bool staticMode);

  // Updated by Wei on 2014/06/05 afternoon at school
  // DO NOT USE anymore
  // Updated by Wei on 2014/02/10 afternoon at school
  // void do_simple_popping_Since20140304Afternoon(bool debugFlag);

  // Updated by Wei on 2014/03/04 night at school
  void load_aux_file_for_posting_info_file_navigation(string inputFileName);

  // Updated by Wei on 2013/09/20 morning at school
  void PrototypingOfThePostingOrientedUniformPruningMethodOptimizedVersion(bool debugFlag);

  // Updated by Wei on 2014/06/07
  void OutputSimpleDocumentPostingArray();

  // Updated by Wei on 2014/09/30 at school
  void SetUpperBoundTermScoresInLexicon();

  // Updated by Wei on 2014/06/17 at school
  void GetRankFromExternalIndex(bool debugFlag);

  // Updated by Wei on 2014/06/14
  // has the potential to be dump
  void OfflineDocumentAnalysis_onlineRankComputing(bool debugFlag);

  // Updated by Wei on 2014/06/17
  void OfflineDocumentAnalysis_offlineRankRetrieving_using2DTable(bool debugFlag);

  // Updated by Wei on 2014/10/25
  void OfflineDocumentAnalysis_offlineRankRetrieving_using3DTable(bool debugFlag);

  // Updated by Wei on 2013/11/06 night at school
  void DocumentAnalyzingAboutTheirPrefixBehaviour_OLD_VERSION(bool debugFlag);

  // Updated by Wei on 2013/09/12 night at school
  void PrototypingOfThePostingOrientedUniformPruningMethod(bool debugFlag);
  void StorePostingRankInListToExternalIndex(map<string,int> & queryTermsDictForDebugging, bool debugFlag);
  void OutputImpactScoreForGov2();
  void ComputePostingRankInListForClueweb09B();
  // The following function has two purposes.
  // (1) It is a good prototype for the previous literature. Basically, it iterates through every query term
  // (2) Also, I can use this function to store the probability into the external index
  void CreatePrunedIndexForMultipleTerms(vector<string> & queryTerms, bool debugFlag,bool store_computed_score_into_external_index_flag,float percentageToKeepOfTheWholeIndex,int pruningMethodCodeOfTheWholeIndex,map<string,float> &queryTermsProbabilityDistributionMap);
  void CreatePrunedIndexForMultipleTermsBasedOnUniversalScoreImportanceOLDAndNotUsed(vector<string> & queryTerms, bool debugFlag,bool store_computed_score_into_external_index_flag,float percentageToKeepOfTheWholeIndex,int pruningMethodCodeOfTheWholeIndex);
  void CreateExternalScoreFileForEachQueryTerm(vector<string> & queryTerms, bool debugFlag, int pruningMethodCodeForTheTerm);
  void CutBasedOnUniversalImportanceScore(vector<string> & queryTerms, bool debugFlag, bool store_computed_score_into_external_index_flag, float percentageToKeepOfTheWholeIndex, int pruningMethodCodeOfTheWholeIndex);

  // Updated by Wei on 2013/09/24 night at school
  void OutputASetOfDocumentsNeededToBeParsedGivenASetOfPostingsAsInput();

  // Updated by Wei on 2013/08/08 night at school
  void CheckNewlyGenerated64BitIndex();

  // Updated by Wei on 2013/08/07 night at school
  void OutputingDocIDANDNumOfPostingsStoredInIndex();

  // Updated by Wei on 2013/08/06 night at school
  void ProduceProbabilitiesForRandomlySelectedPostings();

  // Updated by Wei on 2014/06/15 at school
  void ProduceScoresForRandomlySelectedPostings();

  // Updated by Wei on 2013/08/05 night at school
  void CreateHistogram(vector<string> & queryTerms, bool debugFlag, int sortingMethodCodeForTheTerm);
  void CreateCutThresholdOfEachTermBasedOnPercentageForMultipleTerms();
  void CreateCutThresholdOfEachTermBasedOnPercentageForMultipleTerms2();


  string make_the_value_into_string_format_with_fixed_mode(float originalValue, int precisionNumber, bool debugFlag);
  string make_the_value_into_string_format(float originalValue);
  string make_the_value_into_string_format(int originalValue);

  // Updated by Wei on 2014/09/07
  // 1st column: curr_term
  // 2ed column: length of the list in index
  void OutputTermAndTermListLength();

  // Updated by Wei 2014/10/08
  void LoadDocIDAndBinValue(map<uint32_t,int> &docIDAndBinValue);

  // Updated by Wei 2014/07/18
  void LoadUpSelectedTerms(map<string,int> &queryTerms);

  // Updated by Wei 2013/09/15 afternoon
  void LoadUpTermIDANDTermPairs();

  // Updated by Wei 2013/09/14 afternoon
  void LoadUpDocIDANDNumOfPostingPairs();

  // Updated by Wei 2013/09/13 afternoon
  void LoadUpProbabilityTableBasedOnListLengthANDRelativeRank();

  // Updated by Wei 2013/08/28 night
  void LoadUpTheCombinationOfSecondANDThirdFactorProbabilityTable();

  // Updated by Wei 2013/08/06 night
  void LoadUpRandomlySelectedPostings();

  // Updated by Wei on 2013/09/22 afternoon at school
  void buildTermIDWithTheirFirstFactorProbabilityMap();

  // Updated by Wei on 2014/06/13 at school
  void LoadFirstFactorProbability();


  // Reused by Wei on 2013/09/17 night at school
  // Updated by Wei on 2013/08/05 night at school
  void LoadUpAuxFilesForFirstProbabilityFactor();
  void LoadUpAuxFilesForSecondProbabilityFactor();
  void OutputDocIDANDTrecIDANDDocSizeInWords();

  void LoadUpThreeFeatureValuesForMachineLearnedTraining();

  void LoadUpEssentialityForPosting();

  void LoadUp3DTableProbabilities();

  void LoadUp2DTableProbabilities();

  void LoadUpDocHitInfoForThirdDimension();

  void makeIRToolkitCompatibleIndex();

private:
  const ExternalIndexReader* GetExternalIndexReader(const char* external_index_filename) const;

  // Updated by Wei 2013/01/27.
  // TODO: This function should be reverted back to the original one
  void DumpToIndex(const DocIdScoreComparison& doc_id_score_comparator, IndexEntry* index_entries, int num_index_entries, const char* curr_term,
                   int curr_term_len,bool store_computed_score_into_external_index_flag);
  /*
  void DumpToIndexForPruningProject(const DocIdScoreComparison& doc_id_score_comparator, IndexEntry* index_entries, int num_index_entries,
                                          const char* curr_term, int curr_term_len, bool store_computed_score_into_external_index_flag, int specialNumberValue);
  */

  void DumpToIndexForPruningProjectWeiWithoutTheArgumentDoc_id_score_comparator(IndexEntryWeiForPruning* index_entries, int num_index_entries,
                                          const char* curr_term, int curr_term_len, bool store_computed_score_into_external_index_flag, int specialNumberValue);

  void DumpToIndexForPruningProjectWei(DocIdScoreComparisonWei& doc_id_score_comparator, IndexEntry* index_entries, int num_index_entries,
                                          const char* curr_term, int curr_term_len, bool store_computed_score_into_external_index_flag, int specialNumberValue);
  float GetChunkMaxScore(const DocIdScoreComparison& doc_id_score_comparator, IndexEntry* chunk_entries, int num_chunk_entries);
  float GetChunkMaxScoreWei(DocIdScoreComparisonWei& doc_id_score_comparator, IndexEntry* chunk_entries, int num_chunk_entries);
  float GetChunkMaxScoreWeiWithoutTheArgumentDoc_id_score_comparator(IndexEntryWeiForPruning* chunk_entries, int num_chunk_entries);

  void WriteMetaFile(const std::string& meta_filename, int functionValue, float percentageToKeepOfTheWholeIndex, int pruningMethodCodeOfTheWholeIndex);
  void WriteMetaFile(const std::string& meta_filename, int functionValue);

  IndexFiles output_index_files_;                 // The index filenames for the layered index.
  Index* index_;                                  // The index we're creating layers for.
  ExternalIndexBuilder* external_index_builder_;  // Responsible for building the external index, necessary for maximum block and chunk score information and potentially more info.
  IndexBuilder* index_builder_;                   // The current layered index we're building.

  // Some index properties.
  bool includes_contexts_;
  bool includes_positions_;

  // Layering settings for the new index to be generated.
  bool overlapping_layers_;
  int num_layers_;
  std::string layering_strategy_;



  // Compressors to be used for various parts of the index.
  CodingPolicy doc_id_compressor_;
  CodingPolicy frequency_compressor_;
  CodingPolicy position_compressor_;
  CodingPolicy block_header_compressor_;

  // The following properties are derived from the original index meta info file.
  uint32_t total_num_docs_;          // The total number of documents.
  uint32_t total_unique_num_docs_;   // The total number of unique documents.
  uint64_t total_document_lengths_;  // The total document lengths of all documents.
  uint64_t document_posting_count_;  // The total document posting count.
  uint64_t index_posting_count_;     // The total index posting count.
  uint32_t first_doc_id_in_index_;   // The first docID in the index.
  uint32_t last_doc_id_in_index_;    // The last docID in the index.

  map<string,float> query_terms_length_of_the_inverted_index_map_;
  map<string,float> query_terms_term_freq_in_collection_map_;
  map<string,float> query_terms_term_freq_in_queries_map_;

  // Updated by Wei 2013/08/05 night
  // for the first probability factor
  // aux maps for the first probability factor
  map<int,float> freq_first_factor_probability_map_;
  map<string,int> terms_with_corresponding_species_belonging_to_map_;

  // Updated by Wei 2013/08/05 night
  // for the second probability factor
  // aux maps for the second probability factor
  map<int,float> query_length_probability_map_;
  map<string,float> docID_With_Xdoc_Value_goldStandarded_map_;
  map<string,float> docID_With_Xdoc_Value_1D_map_;
  map<string,float> docID_With_Xdoc_Value_2D_map_;
  map<string,float> docID_With_Xdoc_Value_goodTurning_map_;

  // Updated by Wei 2014/08/14
  map<float,float> posting_essentiality_probablity_map_;

  // Updated by Wei 2014/06/15
  // for the randomly selected postings mechanism
  map<string,map<int,string> > term_with_selected_postings_map_;

  // Updated by Wei 2013/09/22 sun at school
  map<uint32_t, float> termID_with_their_first_factor_probability_map_;

  // Updated by Wei 2013/09/19 afternoon at school
  map<string, int> top10RelatedPostingsDict_;

  // Updated by Wei 2013/09/16 afternoon at school
  map<uint32_t, string> termIDWithTermDict_;

  // Updated by Wei 2013/09/15 afternoon at school
  map<string, uint32_t> termWithTermIDDict_;


  // key: docID in uint32_t format
  // value: # of postings recorded in doc in int format
  map<uint32_t, int > docIDWithNumOfPostingsRecordedDict_;

  // Updated by Wei on 2014/03/04 night at school
  map<uint32_t, off_t > docIDMarkWithBeginningByteDict_;

  // Updated by Wei 2013/09/13 afternoon at school
  // 1st level
  // key: classLabel in int format
  // value: a dict containing pieces info
  	  // 2th level
      // key: the pieceID in int format
  	  // value: the probability that the next posting will hit this piece area in float format
  map<int, map<int,float> > classLabelWithPiecesMetaInfoDict_;

  // Updated by Wei 2013/08/29 morning
  // key: class label in int format
  // value: class partial bm25 lower bound in float format
  // This is the variable(1 out of 2) for the naive pruning method based on some partialBM25 ranges
  map<int, float> class_label_with_lower_bounds_map_;

  // key: class label in int format
  // value class probability in float format
  // This is the variable(2 out of 2) for the naive pruning method based on some partialBM25 ranges
  map<int, float> class_label_with_probability_map_;

  // Updated by Wei 2013/08/07 night
  map<string,int> selected_terms_map_;
  map<uint32_t,int> docIDs_with_num_of_postings_map_;

  // Updated by Wei 2014/02/04 night at school.
  map<int, uint32_t> class_label_with_lower_bounds_of_list_length_map_;
  map<int, float> class_label_with_lower_bounds_of_impact_scores_map_;
  map<string, float> class_label_with_probability_;
  map<uint32_t,string> docIDAndDocHitBelongingClassDict_;

};

/**************************************************************************************************************************************************************
 * DocIdScoreComparison
 *
 * Uses the partial BM25 score to compare two documents from the same list.
 **************************************************************************************************************************************************************/
class DocIdScoreComparison {
public:
	  void hello() const{
	  }

	void LoadModelFromFileWei(const string& file_name, SfWeightVector** w) {
	  if (*w != NULL) {
	    delete *w;
	  }

	  /*
	  // version with std
	  std::fstream model_stream;
	  model_stream.open(file_name.c_str(), std::fstream::in);
	  if (!model_stream) {
	    std::cerr << "Error opening model input file " << file_name << std::endl;
	    exit(1);
	  }

	  std::cerr << "Reading model from: " << file_name << std::endl;
	  string model_string;
	  std::getline(model_stream, model_string);
	  model_stream.close();
	  std::cerr << "   Done." << std::endl;

	  *w = new SfWeightVector(model_string);
	  assert(*w != NULL);
	  */

	  // version without std
	  fstream model_stream;
	  model_stream.open(file_name.c_str(), fstream::in);
	  if (!model_stream) {
	    cerr << "Error opening model input file " << file_name << endl;
	    exit(1);
	  }

	  cerr << "Reading model from: " << file_name << endl;
	  string model_string;
	  getline(model_stream, model_string);
	  model_stream.close();
	  // cerr << "   Done." << endl;

	  *w = new SfWeightVector(model_string);
	  assert(*w != NULL);

	}

  ~DocIdScoreComparison(){
      // final step: deconstruct the things we do NOT need
      // Py_DECREF(pModule_);
      // Py_Finalize();
  }

  DocIdScoreComparison(const DocumentMapReader& doc_map_reader, int num_docs_t, int average_doc_len, int total_num_docs, int sorting_method_code):
	  sorting_method_code_(sorting_method_code),
	  doc_map_reader_(doc_map_reader){

	  if (sorting_method_code_ == 1){
		  // Based on docID
	  }
	  else if(sorting_method_code_ == 2){
		  // Based on partial BM25 score
		  // compute the partial BM25 score component here.
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));

		  num_docs_t_ = num_docs_t;
	  }
	  else if(sorting_method_code_ == 3){

		  Rmin_ = -1.0;
		  Rmax_ = 1.0;

		  Dmin_feature1_ = 0.001967;
		  Dmax_feature1_ = 17.711489;

		  Dmin_feature2_ = 2.0;
		  Dmax_feature2_ = 23077260;

		  Dmin_feature3_ = 1.0;
		  Dmax_feature3_ = 11531;

		  Dmin_feature4_ = 4.0;
		  Dmax_feature4_ = 87803;

		  // Based on partial BM25 score
		  // compute the partial BM25 score component here.
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));

		  num_docs_t_ = num_docs_t;
		  // Based on machine learning score assigner (Updated by Wei 2013/01/18)
		  // SfWeightVector* w = NULL;
		  // w = new SfWeightVector(20);

		  // Model1
		  // Model2: minimal features contained from the toolkit(BM25), 4 features, 5 dimensions
		  // Model3
		  // Model4
		  // Model5
		  // Model6
		  // Model7
		  // Model8
		  // Model9

		  // This dimension setting is for Model2
		  w_ = new SfWeightVector(5);
		  LoadModelFromFileWei("/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model2/model", &w_);
	  }
	  else if(sorting_method_code_ == 4){
		  // Updated by Wei on 2014/09/30
		  // init nothing.
	  }
  }

  DocIdScoreComparison(const DocumentMapReader& doc_map_reader, int num_docs_t, int average_doc_len, int total_num_docs, int sorting_method_code, string term):
	  sorting_method_code_(sorting_method_code),
	  term_(term),
	  doc_map_reader_(doc_map_reader){

	  if (sorting_method_code_ == 1){
		  // Based on docID
	  }
	  else if(sorting_method_code_ == 2){
		  // Based on partial BM25 score
		  // compute the partial BM25 score component here.
		  kBm25K1 = 2.0;
		  kBm25B = 0.75;
		  kBm25NumeratorMul = kBm25K1 + 1;
		  kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B);
		  kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / average_doc_len;
		  kIdfT = log10(1 + (total_num_docs - num_docs_t + 0.5) / (num_docs_t + 0.5));

		  num_docs_t_ = num_docs_t;
	  }
  }



  float score(const IndexEntry& entry) const {
	  float returning_score = 0.0;

	  if(sorting_method_code_ == 1){
		  // do nothing.
		  returning_score = -float(entry.doc_id)/100000000;
		  // for debugging.
		  //cout << "returning_score:" << returning_score << endl;
	  }
	  else if(sorting_method_code_ == 2){
		  uint32_t f_d_t = entry.frequency;
		  int doc_len = doc_map_reader_.GetDocumentLength(entry.doc_id);
		  returning_score = kIdfT * (f_d_t * kBm25NumeratorMul) / (f_d_t + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * doc_len);
		  assert(!isnan(returning_score));
	  }
	  else if(sorting_method_code_ == 3){
		  return 0.0;
	  }
	  else if(sorting_method_code_ == 4){
		  return entry.partialBM25;
	  }
	  return returning_score;
  }

  bool operator()(const IndexEntry& lhs, const IndexEntry& rhs) const {
    return score(lhs) > score(rhs);
  }

private:
  // 1: not sorted at all
  // 2: sorted based on partial bm25 score
  // 3: sorted based on a specific machine learned score and a hard-cutoff (Updated by Wei 20130118)
  int sorting_method_code_;

  // the current query term which this class is working on
  string term_;

  // For method1: not sorted at all

  // For method2: sorted based on partial bm25 score
  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
  float kBm25K1;  // k1
  float kBm25B;   // b

  // We can precompute a few of the BM25 values here.
  float kBm25NumeratorMul;
  float kBm25DenominatorAdd;
  float kBm25DenominatorDocLenMul;
  float kIdfT;  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for the entire list.
  int num_docs_t_; // this is actually the feature: term_freq_in_collection

  const DocumentMapReader& doc_map_reader_;

  // For method3: sorted based on a specific machine learned score and a hard-cutoff (Updated by Wei 20130118)
  // The variables can be set here
  SfWeightVector* w_;

  // Let's hard code the scaling weights here. This is ONLY for model2:
  float Rmin_;
  float Rmax_;

  float Dmin_feature1_;
  float Dmax_feature1_;

  float Dmin_feature2_;
  float Dmax_feature2_;

  float Dmin_feature3_;
  float Dmax_feature3_;

  float Dmin_feature4_;
  float Dmax_feature4_;
};

/**************************************************************************************************************************************************************
 * DocIdScoreComparisonWei
 *
 * Uses many different kinds of score to compare two documents from the same list.
 **************************************************************************************************************************************************************/
class DocIdScoreComparisonWei {
public:
	  ~DocIdScoreComparisonWei();
	  DocIdScoreComparisonWei(const DocumentMapReader& doc_map_reader, int num_docs_t, int average_doc_len, int total_num_docs, int sorting_method_code, string term, map<int,float>& freq_first_factor_probability_map, map<string,int>& terms_with_corresponding_species_belonging_to_map, map<int,float>& query_length_probability_map, map<int, float>& class_label_with_lower_bounds_map, map<int, float>& class_label_with_probability_map);
	  DocIdScoreComparisonWei(const DocumentMapReader& doc_map_reader, int num_docs_t, int average_doc_len, int total_num_docs, int sorting_method_code, string term, map<string,float> queryTermsProbabilityDistributionMap);

	  string get_high_level_features_including_rank_in_the_doc(string term, string trecID, bool debugFlag);
	  int get_term_freq_in_queries(string term, bool debugFlag);
	  int get_posting_rank_in_list(string term, string docID, bool debugFlag);
	  void load_model_from_file_wei(const string& file_name, SfWeightVector** w);
	  void pre_load_aux_file_for_feature_rank_in_the_list();
	  void pre_load_aux_file_for_high_level_features_including_rank_in_the_doc();
	  void pre_load_the_actual_query_term_list_data_into_memory();
	  void pre_load_aux_file_freq_Of_terms_in_queries();

	  float scale_the_value(float originalValue, float Rmin, float Rmax, float Dmin_feature, float Dmax_feature);
	  string make_the_value_into_string_format(float originalValue);
	  float score(const IndexEntry& entry);
	  bool operator()(const IndexEntry& lhs, const IndexEntry& rhs);
	  string get_term(){
		  return term_;
	  }

	  float get_probabilityGivenTheQueryTermsTimesBigNumberValue(){
		  return thirdFactorProbabilityValueTimesBigNumberValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingTrueQueryTermProbabilityDistributionTimesBigNumberValue(){
		  return probabilityNOTGivenQueryTermsUsingTrueQueryTermProbabilityDistributionTimesBigNumberValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingOur1DQueryTermProbabilityDistributionTimesBigNumberValue(){
		  return probabilityNOTGivenQueryTermsUsingOur1DQueryTermProbabilityDistributionTimesBigNumberValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingOur2DQueryTermProbabilityDistributionTimesBigNumberValue(){
		  return probabilityNOTGivenQueryTermsUsingOur2DQueryTermProbabilityDistributionTimesBigNumberValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingGoodTuringQueryTermProbabilityDistributionTimesBigNumberValue(){
		  return probabilityNOTGivenQueryTermsUsingGoodTuringQueryTermProbabilityDistributionTimesBigNumberValue_;
	  }

	  float get_1_FactorProbabilityOriginalValue(){
		  return firstFactorProbabilityOriginalValue_;
	  }

	  float get_2_FactorProbabilityOriginalValue(){
		  return secondFactorProbabilityOriginalValue_;
	  }

	  float get_3_FactorProbabilityOriginalValue(){
		  return thirdFactorProbabilityOriginalValue_;
	  }

	  float get_1_2_3_FactorProbabilitiesCombinedOriginalValue(){
		  return final_1_2_3_FactorsProbabilityCombinedOriginalValue_;
	  }

	  float get_1_3_FactorProbabilitiesCombinedOriginalValue(){
		  return final_1_3_FactorsProbabilityCombinedOriginalValue_;
	  }

	  float get_2_3_FactorProbabilitiesCombinedOriginalValue(){
		  return final_2_3_FactorsProbabilityCombinedOriginalValue_;
	  }

	  // Updated by Wei 2013/08/29 afternoon at school
	  float get_1_2_3_FactorProbabilitiesCombinedOriginalValueBaseline(){
		  return final_1_2_3_FactorsProbabilityCombinedOriginalValueBaseline_;
	  }

	  // Updated by Wei 2013/08/29 afternoon at school
	  float get_2_3_FactorProbabilitiesCombinedOriginalValueBaseline(){
		  return final_2_3_FactorsProbabilityCombinedOriginalValueBaseline_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingTrueQueryTermProbabilityDistributionOriginalValue(){
		  return probabilityNOTGivenQueryTermsUsingTrueQueryTermProbabilityDistributionOriginalValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingOur1DQueryTermProbabilityDistributionOriginalValue(){
		  return probabilityNOTGivenQueryTermsUsingOur1DQueryTermProbabilityDistributionOriginalValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingOur2DQueryTermProbabilityDistributionOriginalValue(){
		  return probabilityNOTGivenQueryTermsUsingOur2DQueryTermProbabilityDistributionOriginalValue_;
	  }

	  float get_probabilityNOTGivenQueryTermsUsingGoodTuringQueryTermProbabilityDistributionOriginalValue(){
		  return probabilityNOTGivenQueryTermsUsingGoodTuringQueryTermProbabilityDistributionOriginalValue_;
	  }

	  // public members (short cut for direct access from outside)
	  float valueOfCurrentPostingLengthOfTheInvertedList_;
	  float valueOfCurrentPostingTermFreqInCollection_;
	  float valueOfcurrentPostingTermFreqInQueries_;
	  float XDocValue_;

private:
	  // value listing:
	  // 1: not sorted at all
	  // 2: sorted based on partial bm25 score
	  // 3: sorted based on a specific machine learned score and a hard-cutoff (Updated by Wei 20130118)
	  int sorting_method_code_;

	  // the current query term which this class(score assigner) is working on
	  string term_;

	  // For method1: not sorted at all

	  // For method2: sorted based on partial bm25 score
	  // BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
	  float kBm25K1;  // k1
	  float kBm25B;   // b

	  // We can precompute a few of the BM25 values here.
	  float kBm25NumeratorMul;
	  float kBm25DenominatorAdd;
	  float kBm25DenominatorDocLenMul;
	  float kIdfT;  // Compute the inverse document frequency component. It is not document dependent, so we can compute it just once for the entire list.
	  int num_docs_t_; // this is actually the feature: term_freq_in_collection

	  const DocumentMapReader& doc_map_reader_;

	  // been dummped since 2013/08/04 by Wei at school
	  map<string,float> queryTermsTrueProbabilityDistributionMap_;
	  map<string,float> queryTerms1DProbabilityDistributionMap_;
	  map<string,float> queryTerms2DProbabilityDistributionMap_;
	  map<string,float> queryTermsGoodTuringProbabilityDistributionMap_;

	  // currently used since 2013/08/30 by Wei at school, this two variables should be init in the constructor function
	  // key: class label in int format
	  // value: class partial bm25 lower bound in float format
	  // This is the variable(1 out of 2) for the naive pruning method based on some partialBM25 ranges
	  map<int, float> class_label_with_lower_bounds_map_;

	  // key: class label in int format
	  // value class probability in float format
	  // This is the variable(2 out of 2) for the naive pruning method based on some partialBM25 ranges
	  map<int, float> class_label_with_probability_map_;

	  // currently used since 2013/08/04 by Wei at school
	  // aux maps for the first probability factor
	  map<int,float> freq_first_factor_probability_map_;
	  map<string,int> terms_with_corresponding_species_belonging_to_map_;

	  // aux maps for the second probability factor
	  map<int,float> query_length_probability_map_;

	  // map<string,float> docID_With_Xdoc_Value_map_;


	  // For method3: sorted based on a specific machine learned score and a hard-cutoff (Updated by Wei 20130118)
	  // The variables can be set here
	  SfWeightVector* w_;

	  // Let's hard code the scaling weights here. This is now ONLY for model4:
	  float Rmin_;
	  float Rmax_;

	  float Dmin_feature1_;
	  float Dmax_feature1_;

	  float Dmin_feature2_;
	  float Dmax_feature2_;

	  float Dmin_feature3_;
	  float Dmax_feature3_;

	  float Dmin_feature4_;
	  float Dmax_feature4_;

	  float Dmin_feature5_;
	  float Dmax_feature5_;

	  float Dmin_feature6_;
	  float Dmax_feature6_;

	  /*
	  float Dmin_feature7_;
	  float Dmax_feature7_;

	  float Dmin_feature8_;
	  float Dmax_feature8_;

	  float Dmin_feature9_;
	  float Dmax_feature9_;

	  float Dmin_feature10_;
	  float Dmax_feature10_;

	  float Dmin_feature11_;
	  float Dmax_feature11_;

	  float Dmin_feature12_;
	  float Dmax_feature12_;

	  float Dmin_feature13_;
	  float Dmax_feature13_;

	  float Dmin_feature14_;
	  float Dmax_feature14_;

	  float Dmin_feature15_;
	  float Dmax_feature15_;

	  float Dmin_feature16_;
	  float Dmax_feature16_;

	  float Dmin_feature17_;
	  float Dmax_feature17_;
	  */

	  // Updated by Wei 2013/07/17
	  // The variables here are for the Logistic Regression method (currently using the machine learning tool called weka)
	  float intercept_weight_0_;
	  float partialBM25ScoreComponentPart1_IDF_weight_1_;
	  float partialBM25ScoreComponentPart2_TF_weight_2_;
	  float partialBM25_weight_3_;
	  float length_of_the_inverted_index_weight_4_;
	  float term_freq_in_doc_weight_5_;
	  float doc_words_weight_6_;
	  float term_freq_in_training_head95K_queries_weight_7_;
	  float term_freq_in_collection_weight_8_;
	  float posting_rank_in_doc_weight_9_;
	  float posting_rank_in_list_weight_10_;


	  float thirdFactorProbabilityValueTimesBigNumberValue_;
	  float probabilityNOTGivenQueryTermsUsingTrueQueryTermProbabilityDistributionTimesBigNumberValue_;
	  float probabilityNOTGivenQueryTermsUsingOur1DQueryTermProbabilityDistributionTimesBigNumberValue_;
	  float probabilityNOTGivenQueryTermsUsingOur2DQueryTermProbabilityDistributionTimesBigNumberValue_;
	  float probabilityNOTGivenQueryTermsUsingGoodTuringQueryTermProbabilityDistributionTimesBigNumberValue_;

	  // Updated by Wei 2013/08/04
	  float firstFactorProbabilityOriginalValue_;
	  float secondFactorProbabilityOriginalValue_;
	  float thirdFactorProbabilityOriginalValue_;
	  float final_1_3_FactorsProbabilityCombinedOriginalValue_;
	  float final_2_3_FactorsProbabilityCombinedOriginalValue_;
	  float final_1_2_3_FactorsProbabilityCombinedOriginalValue_;

	  // Updated by Wei 2013/08/29
	  float final_2_3_FactorsProbabilityCombinedOriginalValueBaseline_;
	  float final_1_2_3_FactorsProbabilityCombinedOriginalValueBaseline_;

	  float probabilityNOTGivenQueryTermsUsingTrueQueryTermProbabilityDistributionOriginalValue_;
	  float probabilityNOTGivenQueryTermsUsingOur1DQueryTermProbabilityDistributionOriginalValue_;
	  float probabilityNOTGivenQueryTermsUsingOur2DQueryTermProbabilityDistributionOriginalValue_;
	  float probabilityNOTGivenQueryTermsUsingGoodTuringQueryTermProbabilityDistributionOriginalValue_;

};

/**************************************************************************************************************************************************************
 * aux file for pruning term entry
 *
 **************************************************************************************************************************************************************/
class aux_pruning_term_entry {
public:
	aux_pruning_term_entry(string term, uint32_t num_of_posting_pruned, uint32_t num_of_posting_in_the_original_list, float current_min_value, uint32_t current_min_docID, bool whether_there_are_still_postings_to_prune);
	string term_;
	int num_of_posting_pruned_;
	int num_of_posting_in_the_original_list_;
	float current_min_value_;
	uint32_t current_min_docID_;
	bool whether_there_are_still_postings_to_prune_;
private:
};

/**************************************************************************************************************************************************************
 * MinValueCompare
 *
 **************************************************************************************************************************************************************/
struct MinValueCompare {
  // Only compare the min value, don't care about the others.
  bool operator()(const aux_pruning_term_entry& l, const aux_pruning_term_entry& r) const {
    return l.current_min_value_ > r.current_min_value_;
  }
};




#endif /* INDEX_LAYERIFY_H_ */
