//==============================================================================================================================================================
// Author(s): Roman Khmelichek, Wei Jiang
//
//==============================================================================================================================================================

#include "document_collection.h"


/**************************************************************************************************************************************************************
 * Document
 *
 **************************************************************************************************************************************************************/
Document::Document(const char* doc_buf, int doc_len, const char* url_buf, int url_len, uint32_t doc_id) :
  doc_buf_(doc_buf), doc_len_(doc_len), url_buf_(url_buf), url_len_(url_len), doc_id_(doc_id) {
}

/**************************************************************************************************************************************************************
 * DocumentCollection
 *
 **************************************************************************************************************************************************************/
DocumentCollection::DocumentCollection(const string& file_path) :
  processed_(false), file_path_(file_path), lang_(DocumentCollection::ENGLISH), initial_doc_id_(0), final_doc_id_(0) {
}

/*
 * return the document_collection_buf_len
 * */
int DocumentCollection::Fill(char** document_collection_buf, int* document_collection_buf_size) {
  int document_collection_buf_len;
  UncompressFile(file_path_.c_str(), document_collection_buf, document_collection_buf_size, &document_collection_buf_len);
  return document_collection_buf_len;
}

/**************************************************************************************************************************************************************
 * Class IndexCollection
 *
 **************************************************************************************************************************************************************/

// Add document collection using the path given by the argument.
void IndexCollection::AddDocumentCollection(const string& path) {
  ifstream ifs;
  ifs.open(path.c_str(), ifstream::in);
  ifs.close();
  if (!ifs.fail()) {
    DocumentCollection doc_collection(path);
    doc_collections_.push_back(doc_collection);
  } else {
    GetErrorLogger().Log("Could not open document collection file '" + path + "'. Skipping...", false);
  }
}

// Empty document collections.
void IndexCollection::EmptyDocumentCollections() {
	doc_collections_.clear();
	//GetErrorLogger().Log("doc_collections_.clear()", false);

}


//Deal with the input file and adding the paths to parse.
void IndexCollection::ProcessDocumentCollections(istream& is) {
  string path;
  while (getline(is, path)) {
    if (path.size() > 0)
      AddDocumentCollection(path);
  }
}

/**************************************************************************************************************************************************************
 * CollectionIndexer
 *
 **************************************************************************************************************************************************************/
CollectionIndexer::CollectionIndexer() :
  document_collection_buffer_size_(atol(Configuration::GetConfiguration().GetValue(config_properties::kDocumentCollectionBufferSize).c_str())),
      document_collection_buffer_(new char[document_collection_buffer_size_]), parser_callback_(&GetPostingCollectionController(), &GetEdgeCollectionController() ),
      parser_(Parser<IndexingParserCallback>::kManyDoc, GetAndVerifyDocType(), &parser_callback_), doc_id_(0), avg_doc_length_(0) {

  if (document_collection_buffer_size_ == 0){
	  GetErrorLogger().Log("Check configuration setting for '" + string(config_properties::kDocumentCollectionBufferSize) + "'.", true);
  }
}

CollectionIndexer::~CollectionIndexer() {
  delete[] document_collection_buffer_;
}

Parser<IndexingParserCallback>::DocType CollectionIndexer::GetAndVerifyDocType() {
  string document_collection_format =
      Configuration::GetResultValue(Configuration::GetConfiguration().GetStringValue(config_properties::kDocumentCollectionFormat));

  Parser<IndexingParserCallback>::DocType doc_type = Parser<IndexingParserCallback>::GetDocumentCollectionFormat(document_collection_format.c_str());
  if (doc_type == Parser<IndexingParserCallback>::kNoSuchDocType) {
    Configuration::ErroneousValue(config_properties::kDocumentCollectionFormat, document_collection_format);
  }

  return doc_type;
}

void CollectionIndexer::LoadUpWarcDocumentsPriorityList(map<string, int> &priorityLookUpDict){
    //This should be another function to load the priority list.
    //I just directly do logic here and this is not the proper way. I will have to factor my code a bit later.
    string priorityListFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPriorityWarcDocumentsIDsListFilePath));
    //map<string, int> priorityLookUpDict;
    map<string, int>::iterator priorityLookUpDictIter;
	  string priorityLine = "";
	  ifstream priorityInputfile(priorityListFileName.c_str());
	  vector<string> priorityListElements;
	  while ( priorityInputfile.good() )
	  {
		  getline (priorityInputfile,priorityLine);
		  if(priorityLine != ""){
		      trim(priorityLine);
		      split(priorityListElements, priorityLine, is_any_of(" ") );
		      priorityLookUpDict[ priorityListElements[0] ] = atoi( priorityListElements[1].c_str() );
		  }
	  }
	  priorityInputfile.close();

	  if(priorityLookUpDict.size() == 0){
		  GetDefaultLogger().Log("Load Priority Document IDs List is NOT Done", true);


	  }
	  else{
		  GetDefaultLogger().Log(Stringify(priorityLookUpDict.size()) + " Priority Document IDs tuples have been loaded.", false);
	  }
}

void CollectionIndexer::LoadUpWarcSpamScoreList(map<string, long> &lookUpDict){
    //This should be another function to load this spam report.
    //I just directly do logic here and this is not the proper way. I will have to factor my code a bit later.

    string dictFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kSpamRankDictFile));
    //map<string, long> lookUpDict;
    map<string, long>::iterator lookUpDictIter;
	  string line = "";
	  ifstream inputfile(dictFileName.c_str());
	  vector<string> elements;

	  while ( inputfile.good() )
	  {
		  getline (inputfile,line);
		  if(line != ""){
		      trim(line);
		      split(elements, line, is_any_of(" ") );
		      // If this is the case, fu*k
		      lookUpDict[ elements[0] ] = atol( elements[1].c_str() );
		  }
	  }
	  inputfile.close();
	  if(lookUpDict.size() == 0){
		  GetDefaultLogger().Log("Load Spam Rank List for Clueweb09 dataset is NOT Done", true);
	  }
	  else{
		  GetDefaultLogger().Log(Stringify(lookUpDict.size()) + " Spam Score Document tuples have been loaded (Wei:2012/10/02 May have some problems, it is NOT loading the completed whole file).", false);
	  }
}



void CollectionIndexer::LoadUpPopularSiteList(map<string, string> &lookUpDictForPopularSite){
    //This should be another function to load this popular site report from disk.
    //I just directly do logic here and this is not the proper way. I will have to factor my code a bit later.
    string dataFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPopularSiteFile));
    //map<string, string> lookUpDictForPopularSite;
    map<string, string>::iterator lookUpDictIterForPopularSite;
	  string currentLine = "";
	  ifstream inputfileForPopularSites(dataFileName.c_str());
	  vector<string> elementsForPopularSites;

	  while ( inputfileForPopularSites.good() )
	  {
		  getline (inputfileForPopularSites,currentLine);
		  if(currentLine != ""){
		      trim(currentLine);
		      split(elementsForPopularSites, currentLine, is_any_of(",") );
		      // If this is the case, fu*k
		      lookUpDictForPopularSite[ elementsForPopularSites[1] ] = elementsForPopularSites[0];
		  }
	  }
	  inputfileForPopularSites.close();

	  //traversal of the whole map

	  //for(lookUpDictIterForPopularSite = lookUpDictForPopularSite.begin(); lookUpDictIterForPopularSite != lookUpDictForPopularSite.end(); lookUpDictIterForPopularSite++)
	  //{
	  //  cout << lookUpDictIterForPopularSite->first << " " << lookUpDictIterForPopularSite->second << endl;
	  //}
	  if(lookUpDictForPopularSite.size() == 0){
		  GetDefaultLogger().Log("Load Popular Sites List is NOT Done", true);
	  }
	  else{
		  GetDefaultLogger().Log(Stringify(lookUpDictForPopularSite.size()) + " popular sites have been loaded.", false);
	  }
}

//Special function for wei to use, updated 2012/06/27
void CollectionIndexer::showContentOfDocumentForWei(const long beginningPosition, const long endingPosition,const char* term, int term_len)
{

  for (vector<DocumentCollection>::iterator i = doc_collections_.begin(); i != doc_collections_.end(); ++i)
  {
    GetDefaultLogger().Log("Processing: " + i->file_path(), false);

    int document_collection_buffer_len = i->Fill(&document_collection_buffer_, &document_collection_buffer_size_);

    // set but not used variable here is document_collection_buffer_len
    if(false){
    	cout << document_collection_buffer_len << endl;
    }

    char* document_starting_point = document_collection_buffer_ + beginningPosition;
    int document_size_in_bytes = endingPosition-beginningPosition;
    parser_.ShowContentOfSpecifcDocument(document_starting_point, document_size_in_bytes ,beginningPosition,endingPosition,term,term_len);
  }
}

//Special function for wei to use, updated 2012/06/27
void CollectionIndexer::showContentOfDocumentForWei(const long beginningPosition, const long endingPosition, vector<string> &queryID_Term_docIDList, string currentDocID, bool reloadCompressedFileFlag,ofstream &outputFileHandler)
{
	char* document_starting_point;
	int document_size_in_bytes = endingPosition-beginningPosition;
	if (reloadCompressedFileFlag){
		  for (vector<DocumentCollection>::iterator i = doc_collections_.begin(); i != doc_collections_.end(); ++i)
		  {
		    GetDefaultLogger().Log("Retrieve and Uncompress: " + i->file_path() + " in memory", false);

		    int document_collection_buffer_len = i->Fill(&document_collection_buffer_, &document_collection_buffer_size_);

		    // set but not used variable here is document_collection_buffer_len
		    if(false){
		    	cout << document_collection_buffer_len << endl;
		    }

		    document_starting_point = document_collection_buffer_ + beginningPosition;
		    parser_.ShowContentOfSpecifcDocument(document_starting_point, document_size_in_bytes ,beginningPosition,endingPosition,queryID_Term_docIDList,currentDocID, outputFileHandler);
		  }
	}
	else{
		//use the old buffer and will be fine.
		GetDefaultLogger().Log("Directly use the uncompressed file in memory", false);
		document_starting_point = document_collection_buffer_ + beginningPosition;
		parser_.ShowContentOfSpecifcDocument(document_starting_point, document_size_in_bytes ,beginningPosition,endingPosition,queryID_Term_docIDList,currentDocID, outputFileHandler);
	}
}


//Special function for wei to use, updated 2012/06/27
void CollectionIndexer::showContentOfDocumentForWei(long beginningPosition, long endingPosition, string currentDocID, bool reloadCompressedFileFlag)
{
	char* document_starting_point;
	int document_size_in_bytes = endingPosition-beginningPosition;
	if (reloadCompressedFileFlag){
		  for (vector<DocumentCollection>::iterator i = doc_collections_.begin(); i != doc_collections_.end(); ++i)
		  {
		    GetDefaultLogger().Log("Retrieve and Uncompress: " + i->file_path() + " in memory", false);

		    int document_collection_buffer_len = i->Fill(&document_collection_buffer_, &document_collection_buffer_size_);

		    // set but not used variable here is document_collection_buffer_len
		    if(false){
		    	cout << document_collection_buffer_len << endl;
		    }

		    document_starting_point = document_collection_buffer_ + beginningPosition;
		    parser_.ShowContentAndSimpleParse(document_starting_point, document_size_in_bytes);
		  }
	}
	else{
		//use the old buffer and will be fine.
		GetDefaultLogger().Log("Directly use the uncompressed file in memory", false);
		document_starting_point = document_collection_buffer_ + beginningPosition;
		parser_.ShowContentAndSimpleParse(document_starting_point, document_size_in_bytes);
	}
}

// This function is used for parsing gov2 dataset ONLY currently.
void CollectionIndexer::ParseDocumentCollectionsAndExtractingInfoForPhase2Pruning()
{
  // First, read the collection type from the .conf file to tell whether it is WARC type or TREC type.
  string collectionFormat = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kDocumentCollectionFormat));

  int total_num_docs_found = 0;

  //TODO: currently, just do a string comparison. In future, more proper way should be needed.
  if (collectionFormat == "warc"){
	  GetDefaultLogger().Log("dataset input format:CLUEWEB2009", false);
	  GetDefaultLogger().Log("This dataset format is currently NOT supported", true);
  }
  else if (collectionFormat == "trec"){
	  GetDefaultLogger().Log("dataset input format:GOV2", false);
	  for (vector<DocumentCollection>::iterator i = doc_collections_.begin(); i != doc_collections_.end(); ++i){
	    GetDefaultLogger().Log("Processing: " + i->file_path(), false);
	    int document_collection_buffer_len = i->Fill(&document_collection_buffer_, &document_collection_buffer_size_);
	    i->set_initial_doc_id(doc_id_);
	    int num_docs_found = parser_.ParseDocumentCollectionAndExtractingInfoForPhase2Pruning(document_collection_buffer_, document_collection_buffer_len, doc_id_, avg_doc_length_);
	    i->set_processed(true);
	    GetDefaultLogger().Log("Found: " + Stringify(num_docs_found) + " documents.", false);
	    i->set_final_doc_id(doc_id_ - 1);
	    total_num_docs_found += num_docs_found;
	  }
  }
  else{
	  GetDefaultLogger().Log("Unknown NOT supported dataset format", true);
  }

  GetDefaultLogger().Log("Total number of documents found(total_num_docs_found): " + Stringify(total_num_docs_found), false);
  GetDefaultLogger().Log("Total number of documents indexed(doc_id_): " + Stringify(doc_id_), false);

  GetPostingCollectionController().Finish();
}

//This function is used for parsing either the TREC or WARC dataset.
//But currently, it is only good for part of the WARC dataset, I have to fix it to support the TREC dataset as proper as possible.
void CollectionIndexer::ParseDocumentCollections()
{
  // First, read the collection type from the .conf file to tell whether it is WARC type or TREC type.
  string collectionFormat = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kDocumentCollectionFormat));

  // For debugging purposes.
  // cout << "*" << collectionFormat << "*" << endl;
  int total_num_docs_found = 0;

  //TODO: currently, just do a string comparison. In future, more proper way should be needed.
  if (collectionFormat == "warc"){
	  GetDefaultLogger().Log("dataset input format:CLUEWEB2009", false);
	  // Just execute the normal clueweb09 paring logic will be fine.
	  bool normalParsingLogicForClueweb09Flag = true;

	  std::cout << "normalParsingLogicForClueweb09Flag:" << normalParsingLogicForClueweb09Flag << std::endl;

	  if(normalParsingLogicForClueweb09Flag){

		  for (vector<DocumentCollection>::iterator i = doc_collections_.begin(); i != doc_collections_.end(); ++i)
		  {
			GetDefaultLogger().Log("Processing: " + i->file_path(), false);
		    int document_collection_buffer_len = i->Fill(&document_collection_buffer_, &document_collection_buffer_size_);

		    i->set_initial_doc_id(doc_id_);
		    int num_docs_parsed = parser_.ParseDocumentCollection(document_collection_buffer_, document_collection_buffer_len, doc_id_, avg_doc_length_);
		    i->set_processed(true);
		    GetDefaultLogger().Log("Parsed: " + Stringify(num_docs_parsed) + " documents.", false);
		    i->set_final_doc_id(doc_id_ - 1);

		    total_num_docs_found += num_docs_parsed;


		  }
	  }
	  else{
		  // Wei: 2012/10/02. This is for the learning to impact project.

		  // Here, let's do some pre-processing work. Load up 3 very important lists and the minimun number of docs to parse
		  // They are: priorityLookUpDict, spamLookUpDict, popularSitelookUpDict and minimun number of docs to parse

		  // Step1: Load the priorityLookUpDict
		  map<string, int> priorityLookUpDict;
		  LoadUpWarcDocumentsPriorityList(priorityLookUpDict);

		  // Step2: Load the spamLookUpDict
		  map<string, long> spamScoreLookUpDict;
		  LoadUpWarcSpamScoreList(spamScoreLookUpDict);

		  // Step3: Load the popularSitelookUpDict
		  map<string, string> lookUpDictForPopularSite;
		  LoadUpPopularSiteList(lookUpDictForPopularSite);

		  // this is the mechanism for controlling the total number of docs to parse. In order to simplify, we just comment out this fuctionality first.
		  // Step4: minimun number of documents needed to parse.
		  string minNumberOfDocumentsNeededToCollectInStringFormat = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kMinNumberOfDocumentsNeedToCollect));
		  const long Min_NUMBER_OF_DOCUMENTS_TO_COLLECT = atol( minNumberOfDocumentsNeededToCollectInStringFormat.c_str() );


		  for (vector<DocumentCollection>::iterator i = doc_collections_.begin(); i != doc_collections_.end(); ++i)
		  {
			GetDefaultLogger().Log("# of priority documents left: " + Stringify(priorityLookUpDict.size()), false);

			GetDefaultLogger().Log("Processing: " + i->file_path(), false);
		    int document_collection_buffer_len = i->Fill(&document_collection_buffer_, &document_collection_buffer_size_);

		    i->set_initial_doc_id(doc_id_);
		    int num_docs_parsed = parser_.ParseDocumentCollection(document_collection_buffer_, document_collection_buffer_len, doc_id_, avg_doc_length_, priorityLookUpDict, spamScoreLookUpDict, lookUpDictForPopularSite);
		    i->set_processed(true);
		    GetDefaultLogger().Log("Parsed: " + Stringify(num_docs_parsed) + " documents.", false);
		    i->set_final_doc_id(doc_id_ - 1);

		    total_num_docs_found += num_docs_parsed;

		    if(Min_NUMBER_OF_DOCUMENTS_TO_COLLECT < total_num_docs_found){
		    	//Already exceed the requirement in this case.
		    	break;
		    }
		  }
	  }
  }
  else if (collectionFormat == "trec"){
	  GetDefaultLogger().Log("dataset input format:GOV2", false);
	  for (vector<DocumentCollection>::iterator i = doc_collections_.begin(); i != doc_collections_.end(); ++i)
	  {
	    GetDefaultLogger().Log("Processing: " + i->file_path(), false);
	    int document_collection_buffer_len = i->Fill(&document_collection_buffer_, &document_collection_buffer_size_);
	    i->set_initial_doc_id(doc_id_);
	    int num_docs_parsed = parser_.ParseDocumentCollection(document_collection_buffer_, document_collection_buffer_len, doc_id_, avg_doc_length_);
	    i->set_processed(true);
	    GetDefaultLogger().Log("Parsed: " + Stringify(num_docs_parsed) + " documents.", false);
	    i->set_final_doc_id(doc_id_ - 1);
	    total_num_docs_found += num_docs_parsed;
	  }
  }
  else{
	  GetDefaultLogger().Log("Unknown NOT supported dataset format", true);
  }

  GetDefaultLogger().Log("Total number of documents parsed(total_num_docs_found): " + Stringify(total_num_docs_found), false);
  GetDefaultLogger().Log("Total number of documents indexed(doc_id_): " + Stringify(doc_id_), false);

  GetPostingCollectionController().Finish();
}


void CollectionIndexer::OutputDocumentCollectionDocIdRanges(const char* filename) {
  ofstream document_collections_doc_id_ranges_stream(filename);
  if (!document_collections_doc_id_ranges_stream) {
    GetErrorLogger().Log("Could not open '" + string(filename) + "' for writing.", true);
  }

  document_collections_doc_id_ranges_stream << "'Document Collection Filename'" << "\t" << "'Initial DocID'" << "\t" << "'Final DocID'" << "\n";
  for (vector<DocumentCollection>::iterator i = doc_collections_.begin(); i != doc_collections_.end(); ++i) {
    if (i->processed())
      document_collections_doc_id_ranges_stream << i->file_path() << "\t" << i->initial_doc_id() << "\t" << i->final_doc_id() << "\n";
  }
  document_collections_doc_id_ranges_stream.close();
}

/**************************************************************************************************************************************************************
 * CollectionUrlExtractor
 *
 **************************************************************************************************************************************************************/
CollectionUrlExtractor::CollectionUrlExtractor() :
  document_collection_buffer_size_(atol(Configuration::GetConfiguration().GetValue(config_properties::kDocumentCollectionBufferSize).c_str())),
      document_collection_buffer_(new char[document_collection_buffer_size_]),
      parser_(Parser<DocUrlRetrievalParserCallback>::kManyDoc, GetAndVerifyDocType(), &parser_callback_), doc_id_(0), avg_doc_length_(0) {
  if (document_collection_buffer_size_ == 0)
    GetErrorLogger().Log("Check configuration setting for '" + string(config_properties::kDocumentCollectionBufferSize) + "'.", true);
}

CollectionUrlExtractor::~CollectionUrlExtractor() {
  delete[] document_collection_buffer_;
}

Parser<DocUrlRetrievalParserCallback>::DocType CollectionUrlExtractor::GetAndVerifyDocType() {
  string document_collection_format =
      Configuration::GetResultValue(Configuration::GetConfiguration().GetStringValue(config_properties::kDocumentCollectionFormat));

  Parser<DocUrlRetrievalParserCallback>::DocType doc_type = Parser<DocUrlRetrievalParserCallback>::GetDocumentCollectionFormat(document_collection_format.c_str());
  if (doc_type == Parser<DocUrlRetrievalParserCallback>::kNoSuchDocType) {
    Configuration::ErroneousValue(config_properties::kDocumentCollectionFormat, document_collection_format);
  }

  return doc_type;
}
/*
void CollectionUrlExtractor::ParseTrec(const char* document_urls_filename) {
	  // Step1: Load the priorityLookUpDict
		map<string, int> priorityLookUpDict;
		//LoadUpWarcDocumentsPriorityList(priorityLookUpDict);

	  // Step2: Load the spamLookUpDict
		map<string, long> spamScoreLookUpDict;
		//LoadUpWarcSpamScoreList(spamScoreLookUpDict);
	  // Step3: Load the popularSitelookUpDict
		map<string, string> lookUpDictForPopularSite;
		//LoadUpPopularSiteList(lookUpDictForPopularSite);
  int total_num_docs_found = 0;
  for (vector<DocumentCollection>::iterator i = doc_collections_.begin(); i != doc_collections_.end(); ++i) {
    GetDefaultLogger().Log("Processing: " + i->file_path(), false);

    int document_collection_buffer_len = i->Fill(&document_collection_buffer_, &document_collection_buffer_size_);

    i->set_initial_doc_id(doc_id_);
    int num_docs_parsed = parser_.ParseDocumentCollection(document_collection_buffer_, document_collection_buffer_len, doc_id_, avg_doc_length_,priorityLookUpDict,spamScoreLookUpDict,lookUpDictForPopularSite);
    i->set_processed(true);
    GetDefaultLogger().Log("Found: " + Stringify(num_docs_parsed) + " documents.", false);
    i->set_final_doc_id(doc_id_ - 1);

    total_num_docs_found += num_docs_parsed;
  }

  GetDefaultLogger().Log("Total number of documents found: " + Stringify(total_num_docs_found), false);

  // Sort the URL and docID pairs.
  sort(parser_callback_.document_urls().begin(), parser_callback_.document_urls().end());

  // Write the new mapped docID, original docID, and URL of the sorted URL and docID pairs to a file.
  ofstream document_urls_stream(document_urls_filename);
  if (!document_urls_stream) {
    GetErrorLogger().Log("Could not open '" + string("document_urls") + "' for writing.", true);
  }

  uint32_t mapped_doc_id = 0;
  for (std::vector<std::pair<std::string, uint32_t> >::iterator i = parser_callback_.document_urls().begin(); i != parser_callback_.document_urls().end(); ++i) {
    document_urls_stream << mapped_doc_id << " " << i->second << " " << i->first << "\n";
    ++mapped_doc_id;
  }
  document_urls_stream.close();
}
*/
