//==============================================================================================================================================================
// Author(s): Roman Khmelichek Wei Jiang
//
// TODO: DOCNO can be determined algorithmically from the docID (instead of storing as a string)? We also have to consider docID remapping.
// TODO: Need to implement modes 'kSingleDoc' and 'kStandard'.
//
// Some WARC records in ClueWeb09 are malformed. See 'http://www.umiacs.umd.edu/~jimmylin/cloud9/docs/content/clue.html' for more information. Here, we handle the
// common problem of having an extra new line in the WARC header. A few of the records also happen to have garbled URLs.
//==============================================================================================================================================================

#ifndef PARSERINL_H_
#define PARSERINL_H_

// Enables debugging output for this module.
#define PARSERINL_DEBUG

#include <cstdlib>
#include <cstring>
#include <fstream>
#include <map>
#include <string>



#ifdef PARSERINL_DEBUG
#include <string>
#include <iostream>
#endif

template<class Callback>
  Parser<Callback>::Parser(const Parser<Callback>::ParsingMode& parsing_mode, const Parser<Callback>::DocType& doc_type, Callback* callback) :
    parsing_mode_(parsing_mode), doc_type_(doc_type), callback_(callback) {
    assert(callback_ != NULL);
  }

template<class Callback>
  int Parser<Callback>::LoadClueweb2009SpamReportDict(ifstream &inputfileForDocument) {
	return 0;
}

// Returns the number of documents parsed if parsing mode is set to 'MANY_DOC', otherwise 0.
// TODO: The base URL can be set by a <base> tag within the page and by the Content-Location field in the web server's HTTP response header.
//        These cases are not currently covered.
template<class Callback>
  int Parser<Callback>::ParseDocumentCollection(const char* buf, int buf_len, uint32_t& doc_id, int& avg_doc_length) {
    assert(buf != NULL);
    assert(buf_len > 0);

    int num_docs_parsed = 0;

    const char* curr_p = buf;  // Tracks the current point in the buffer.

    if (doc_type_ == kWarc){
      // The WARC format starts each bundle with 6 info lines, which we skip here.
      // the WARC-Type is:warcinfo

      WarcHeader warc_header;
      curr_p += ProcessWarcInfoHeader(buf, buf_len, curr_p, &warc_header);
      curr_p += warc_header.content_length;

      /******************************************************************************************************************************/
      //This should be another function to load the priority list.
      //I just directly do logic here and this is not the proper way. I will have to factor my code a bit later.
      string priorityListFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPriorityWarcDocumentsIDsListFilePath));
      map<string, int> priorityLookUpDict;
      map<string, int>::iterator priorityLookUpDictIter;
	  string priorityLine = "";
	  ifstream priorityInputfile(priorityListFileName.c_str());
	  vector<string> priorityListElements;
	  while ( priorityInputfile.good() )
	  {
		  getline (priorityInputfile,priorityLine);
		  if(priorityLine != ""){
		      trim(priorityLine);
		      split(priorityListElements, priorityLine, is_any_of(" ") );
		      priorityLookUpDict[ priorityListElements[0] ] = atoi( priorityListElements[1].c_str() );
		  }
	  }
	  priorityInputfile.close();

	  if(priorityLookUpDict.size() == 0){
		  cout << "Load Task is NOT OK" << endl;
		  exit(1);
	  }
	  else{
		  cout << priorityLookUpDict.size() << " priority ids have been loaded." << endl;
	  }
      /******************************************************************************************************************************/
      /******************************************************************************************************************************/
      //This should be another function to load this spam report.
      //I just directly do logic here and this is not the proper way. I will have to factor my code a bit later.

      string dictFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kSpamRankDictFile));
      map<string, long> lookUpDict;
      map<string, long>::iterator lookUpDictIter;
	  string line = "";
	  ifstream inputfile(dictFileName.c_str());
	  vector<string> elements;

	  while ( inputfile.good() )
	  {
		  getline (inputfile,line);
		  if(line != ""){
		      trim(line);
		      split(elements, line, is_any_of(" ") );
		      // If this is the case, fu*k
		      lookUpDict[ elements[0] ] = atol( elements[1].c_str() );
		  }
	  }
	  inputfile.close();

	  /*
	  //traversal of the whole map
	  for(lookUpDictIter = lookUpDict.begin(); lookUpDictIter != lookUpDict.end(); lookUpDictIter++)
	  {
		  cout << lookUpDictIter->first << " " << lookUpDictIter->second << endl;
	  }
	  */

	  if(lookUpDict.size() == 0){
		  cout << "Load Task is NOT OK" << endl;
		  exit(1);
	  }
	  else{
		  cout << lookUpDict.size() << " spam score tuples have been loaded." << endl;
	  }
	  /******************************************************************************************************************************/

	  /******************************************************************************************************************************/
      //This should be another function to load this popular site report from disk.
      //I just directly do logic here and this is not the proper way. I will have to factor my code a bit later.
      string dataFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kPopularSiteFile));
      map<string, string> lookUpDictForPopularSite;
      map<string, string>::iterator lookUpDictIterForPopularSite;
	  string currentLine = "";
	  ifstream inputfileForPopularSites(dataFileName.c_str());
	  vector<string> elementsForPopularSites;

	  while ( inputfileForPopularSites.good() )
	  {
		  getline (inputfileForPopularSites,currentLine);
		  if(currentLine != ""){
		      trim(currentLine);
		      split(elementsForPopularSites, currentLine, is_any_of(",") );
		      // If this is the case, fu*k
		      lookUpDictForPopularSite[ elementsForPopularSites[1] ] = elementsForPopularSites[0];
		  }
	  }
	  inputfileForPopularSites.close();

	  //traversal of the whole map

	  //for(lookUpDictIterForPopularSite = lookUpDictForPopularSite.begin(); lookUpDictIterForPopularSite != lookUpDictForPopularSite.end(); lookUpDictIterForPopularSite++)
	  //{
	  //  cout << lookUpDictIterForPopularSite->first << " " << lookUpDictIterForPopularSite->second << endl;
	  //}

	  cout << lookUpDictForPopularSite.size() << " popular sites have been loaded." << endl;
	  /******************************************************************************************************************************/


      string outputDocumentResultFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kDocumentFeatureOutputFilePath));
      ofstream outputfileForDocument( outputDocumentResultFileName.c_str(), ios::out | ios::out );


      while (true) {
    	  //std::cout << "in range1" << std::endl;
        // If using the WARC format, need to process the header first for each document.
        // Right, each document will also have a warc header.
        // This warc header type is:response
        int header_bytes = ProcessWarcResponseAndHTTPHeader(buf, buf_len, curr_p, &warc_header, doc_id);
        if (header_bytes == 0)
          break;

        //original ver.
        curr_p += header_bytes;
        const char* content_start = curr_p;

        //the original ver
        //std::cout << "in:" << num_docs_parsed << std::endl;
        num_docs_parsed += ParseWarcDataBuffer(content_start, warc_header.content_length, doc_id, avg_doc_length, curr_p, &warc_header);
        //std::cout << "out:" << num_docs_parsed << std::endl;


        // Since we're parsing one document at a time, we need to update the docID count, average doc length here.
        ++doc_id;
        ++num_docs_parsed;

        // testing stage.
        // Write the features into the feature file.
        /******************************************************************************************************************************/
        //Here, let's grab the spam value from the file.
        vector<string> elementsForLookUpString;
        split(elementsForLookUpString, warc_header.feature_WARC_TREC_ID, is_any_of("-") );
        string searchString = elementsForLookUpString[1] + "-" + elementsForLookUpString[2];
        //cout << "search string:" << searchString << endl;

		if(lookUpDict.find(searchString) != lookUpDict.end()){
			lookUpDictIter = lookUpDict.find(searchString);
			//cout << "Find Entry:" << lookUpDictIter->second << endl;
			warc_header.feature_killing_spam_score = getHamProbablity(lookUpDictIter->second , warc_header.feature_WARC_TREC_ID);
		}
		else{
			cout << "Can not find the probability value for:" << warc_header.feature_WARC_TREC_ID << endl;
			warc_header.feature_killing_spam_score = -1;
		}
        /******************************************************************************************************************************/


		/******************************************************************************************************************************/
		//Here,let's figure it out whether this url(web page) is popular or not.

		//cout << "the host name I want is:" << warc_header.host_name << endl;
		//cout << "simple test" << endl;
		//cout << lookUpDictForPopularSite.count("google.com");

		if ( lookUpDictForPopularSite.count(warc_header.host_name) > 0){
			warc_header.feature_isPopular = true;
		}
		else{
			warc_header.feature_isPopular = false;
		}
		/******************************************************************************************************************************/

		//TODO output url as well
        outputfileForDocument << warc_header.feature_WARC_TREC_ID << " " << warc_header.urlInStringFormat << " "<< warc_header.feature_doc_id << " " << warc_header.feature_killing_spam_score << " " << warc_header.feature_isPopular << " " << warc_header.feature_domainName << " " << warc_header.feature_urlLength << " " << warc_header.feature_documentSize << " " << warc_header.feature_number_nesting_levels << " "<< warc_header.feature_numberOfWordsInDoc << " " << warc_header.feature_numberOfDistantWordsInDoc << endl;
        //std::cout << "num WARC docs parsed: " << num_docs_parsed << std::endl;

        if ((curr_p - content_start) != warc_header.content_length) {
          assert(false);
        }
        //std::cout << "out range1" << std::endl;
      }

      outputfileForDocument.close();
    }
    else if (doc_type_ == kTrec)
    {
        //Supportive doc_type_ Trec
        num_docs_parsed += ParseTrecDataBuffer(buf, buf_len, doc_id, avg_doc_length, curr_p);
    }
    else{
        //No Supportive doc_type_
        std::cout << "Not Supportive Doc Type, please wait for the update" << std::endl;
    }
    return num_docs_parsed;
  }

template<class Callback>
  int Parser<Callback>::getHamProbablity(long offset, string WARCTRECIDNeededToLookUp){
		  string dataFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kSpamRankFile));
	  	  ifstream myinputFileActualLookUp(dataFileName.c_str());
	  	  string line = "";
	  	  int percentageValue = -1;

	  	  //TODO
		  //currently no ending bound check and that is not good.
		  myinputFileActualLookUp.seekg(offset, ios::beg);
		  //cout << "offset:" << offset << endl;
		  //cout << "WARCTRECIDNeededToLookUp:" << WARCTRECIDNeededToLookUp << endl;


		  while ( myinputFileActualLookUp.good() )
		  {

			  //Do a sequential scan here.
			  getline (myinputFileActualLookUp,line);

			  if(line != ""){
				  trim(line);
				  //cout << "line:" << line << endl;


				  vector<string> elementsForGoal1;
				  vector<string> elementsForGoal2;

				  split(elementsForGoal1, line, is_any_of(" ") );
				  split(elementsForGoal2, elementsForGoal1[1], is_any_of("-") );

				  string currentMatchingString = elementsForGoal2[0] + "-" + elementsForGoal2[1] + "-" + elementsForGoal2[2] + "-" + elementsForGoal2[3];

				  //cout << "currentMatchingString:" << currentMatchingString << endl;
				  //cout << "lookUpString:" << lookUpString << endl;
				  if (currentMatchingString == WARCTRECIDNeededToLookUp){
					  //cout << "WARC_TREC_ID:" << WARCTRECIDNeededToLookUp << endl;
					  //cout << "ham probability:" << elementsForGoal1[0] << "%" << endl;
					  percentageValue = atoi( elementsForGoal1[0].c_str() );
					  break;
				  }
			  }
		  }
		  myinputFileActualLookUp.close();
		  return percentageValue;
}

template<class Callback>
  int Parser<Callback>::ParseWarcDataBuffer(const char* buf, int buf_len, uint32_t& doc_id, int& avg_doc_length, const char*& curr_p, WarcHeader* header) {
	//std::cout << "Process Web Pages Data Buffer(WARC format)" << std::endl;
	int wordCounter = 0;
	int distantWordCounter = 0;
	string currentWord;
	map<string, int> wordLookUpDictForEachDocument;
	map<string, int>::iterator wordUpDictIterForEachDocument;
    string outputTermResultFileName = Configuration::GetResultValue( Configuration::GetConfiguration().GetStringValue(config_properties::kTermFeaturePartialOutputFilePath));
    ofstream outputfileForTerm( outputTermResultFileName.c_str(), ios::out | ios::app);


    assert(buf != NULL);
    assert(buf_len > 0);

    uint32_t initial_doc_id = doc_id;

    Tag tag_ret;  // The special type of tag we encountered.

    unsigned char context = '\0';  // Bit array for the context.
    uint32_t position = 0;         // Tracks position of each word, final position for a document is it's size in words.

    // For parsing HTML.
    bool in_closing_tag = false;  // True when we're parsing a closing tag.
    bool in_script = false;       // True when we're parsing contents of script tag.

    // For TREC documents.
    bool in_doc = false;     // True when we're parsing contents of doc tag.
    bool in_docno = false;   // True when we're parsing contents of docno tag.
    bool in_dochdr = false;  // True when we're parsing contents of dochdr tag.

    // Track the starting point of various things we want to parse out.
    const char* word_p;        // Standalone word.
    const char* url_p;         // TREC document URL.
    const char* docno_p;       // TREC document number.
    const char* tag_p = NULL;  // Tracks the starting point of a tag; doubles as a flag as to whether we're currently in a tag.

    // The main parsing loop
    while (IsWithinBounds(curr_p, buf, buf_len)) {
      if (!IsIndexable(*curr_p)) {
        if (*curr_p != '>') {
          if (*curr_p == '<') {
            tag_p = curr_p;
          }
          ++curr_p;
          continue;
        }

        if (!tag_p) {
          ++curr_p;
          continue;
        }

        // At this point, we must have just seen the end of a closing tag, '>'.
        ++curr_p;
        tag_ret = ProcessTag(tag_p, curr_p - tag_p, in_closing_tag, doc_id);

        switch (tag_ret) {
          case kTagNot:
            break;

          case kTagB:
            UpdateContext(context, in_closing_tag, kContextB);
            break;

          case kTagI:
            UpdateContext(context, in_closing_tag, kContextI);
            break;

          case kTagH:
            UpdateContext(context, in_closing_tag, kContextH);
            break;

          case kTagTitle:
            UpdateContext(context, in_closing_tag, kContextT);
            break;

          case kTagScript:
            in_script = in_closing_tag ? false : true;
            break;

          //If this tag is to be </Html>.
          case kTagHtml:
            if (doc_type_ != kWarc)
              break;

            if (in_closing_tag) {
              in_doc = false;
              // The position at this time is actually the document length.
              avg_doc_length += position;

              //wei: I do not think this is necessary. I have deal with it in the function called: WarcResponse Header ONLY
              //callback_->ProcessDocLength(position, doc_id);
              break;
            }

          //If this tag is to be </Doc>.
          case kTagDoc:
            if (doc_type_ != kWarc)
              break;

            if (in_closing_tag) {
              in_doc = false;

              // The position at this time is actually the document length.
              avg_doc_length += position;


              //wei: I do not think this is necessary. I have deal with it in the function called: WarcResponse Header ONLY
              //callback_->ProcessDocLength(position, doc_id);


            }
            else {
              in_doc = true;
            }
            break;

          case kTagDocno:
            if (doc_type_ != kTrec)
              break;

            in_docno = in_closing_tag ? false : true;
            break;

          case kTagDochdr:
            if (doc_type_ != kTrec)
              break;

            in_dochdr = in_closing_tag ? false : true;
            break;

          default:
            break;
        }

        tag_p = NULL;
        continue;
      }

      // Ignore everything between <script></script> tags and ignore inner contents of tags.
      if (in_script || tag_p) {
        ++curr_p;
        continue;
      }



      word_p = curr_p;
      while (IsWithinBounds(curr_p, buf, buf_len) && IsIndexable(*curr_p)) {
        ++curr_p;
      }



      //This is just for temp implementation.
      currentWord = "";
      for(int iCounter = 0; iCounter < curr_p - word_p; iCounter++){
		  //outputfileForTerm << word_p[iCounter];
      	currentWord += tolower(word_p[iCounter]);
      }
      // <=0:is not an element of mymap. >0:is an element of mymap.
      if ( wordLookUpDictForEachDocument.count(currentWord) <= 0){
    	  wordLookUpDictForEachDocument[currentWord] = 1;
      }
      outputfileForTerm << currentWord;
	  outputfileForTerm << " ";
	  outputfileForTerm << doc_id;
	  outputfileForTerm << " ";
	  outputfileForTerm << position;
	  outputfileForTerm << " ";
	  outputfileForTerm << (int)context;
	  outputfileForTerm << std::endl;

      //generate the term level feature
	  wordCounter ++;



      callback_->ProcessTerm(word_p, curr_p - word_p, doc_id, position++, context);

    }

    header->feature_numberOfWordsInDoc = wordCounter;
    header->feature_numberOfDistantWordsInDoc = wordLookUpDictForEachDocument.size();
    outputfileForTerm.close();
    return doc_id - initial_doc_id;
  }

template<class Callback>
  int Parser<Callback>::ParseTrecDataBuffer(const char* buf, int buf_len, uint32_t& doc_id, int& avg_doc_length, const char*& curr_p) {

  //every dataset format is calling this ParseBuffer function and I have to add something into this function for the support of WARC data.
  /*
  #ifdef PARSERINL_DEBUG
	std::cout << "ParseBuffer Called: " << std::endl;
  #endif
  */


  assert(buf != NULL);
  assert(buf_len > 0);

  uint32_t initial_doc_id = doc_id;

  Tag tag_ret;  // The special type of tag we encountered.

  unsigned char context = '\0';  // Bit array for the context.
  uint32_t position = 0;         // Tracks position of each word, final position for a document is it's size in words.

  // For parsing HTML.
  bool in_closing_tag = false;  // True when we're parsing a closing tag.
  bool in_script = false;       // True when we're parsing contents of script tag.

  // For TREC documents.
  bool in_doc = false;     // True when we're parsing contents of doc tag.
  bool in_docno = false;   // True when we're parsing contents of docno tag.
  bool in_dochdr = false;  // True when we're parsing contents of dochdr tag.

  // Track the starting point of various things we want to parse out.
  const char* word_p;        // Standalone word.
  const char* url_p;         // TREC document URL.
  const char* docno_p;       // TREC document number.
  const char* tag_p = NULL;  // Tracks the starting point of a tag; doubles as a flag as to whether we're currently in a tag.

  while (IsWithinBounds(curr_p, buf, buf_len)) {
    if (!IsIndexable(*curr_p)) {
      if (*curr_p != '>') {
        if (*curr_p == '<') {
          tag_p = curr_p;
        }
        ++curr_p;
        continue;
      }

      if (!tag_p) {
        ++curr_p;
        continue;
      }

      // At this point, we must have just seen the end of a closing tag, '>'.
      ++curr_p;
      tag_ret = ProcessTag(tag_p, curr_p - tag_p, in_closing_tag, doc_id);

      switch (tag_ret) {
        case kTagNot:
          break;

        case kTagB:
          UpdateContext(context, in_closing_tag, kContextB);
          break;

        case kTagI:
          UpdateContext(context, in_closing_tag, kContextI);
          break;

        case kTagH:
          UpdateContext(context, in_closing_tag, kContextH);
          break;

        case kTagTitle:
          UpdateContext(context, in_closing_tag, kContextT);
          break;

        case kTagScript:
          in_script = in_closing_tag ? false : true;
          break;

        case kTagDoc:
          if (doc_type_ != kTrec)
            break;

          if (in_closing_tag) {
            in_doc = false;

            // The position at this time is actually the document length.
            avg_doc_length += position;
            /*
            #ifdef PARSERINL_DEBUG
              std::cout << "doc_length is: " << position << std::endl;
              std::cout << "doc_id is: " << doc_id << std::endl;
            #endif
            */
            callback_->ProcessDocLength(position, doc_id);

            // This only applies when we're parsing multiple documents in one go.
            if (parsing_mode_ == kManyDoc) {
              context = 0;
              position = 0;
              ++doc_id;

              // Need to reset certain properties before moving on to the next document.
              in_script = false;
            }
          } else {
            in_doc = true;


          }
          break;

        case kTagDocno:
          if (doc_type_ != kTrec)
            break;

          in_docno = in_closing_tag ? false : true;
          break;

        case kTagDochdr:
          if (doc_type_ != kTrec)
            break;

          in_dochdr = in_closing_tag ? false : true;
          break;

        default:
          break;
      }

      tag_p = NULL;
      continue;
    }

    // Ignore everything between <script></script> tags and ignore inner contents of tags.
    if (in_script || tag_p) {
      ++curr_p;
      continue;
    }

    if (doc_type_ == kTrec)
    {
      if (in_docno) {
        docno_p = curr_p;
        while (IsWithinBounds(curr_p, buf, buf_len) && *curr_p != '<') {
          ++curr_p;
        }

        /*
        #ifdef PARSERINL_DEBUG
          std::cout << "docno is: " << docno_p << std::endl;
          std::cout << "docno_len is: " << curr_p - docno_p << std::endl;
          std::cout << "doc_id is: " << doc_id << std::endl;
        #endif
        */
        callback_->ProcessDocno(docno_p, curr_p - docno_p, doc_id);

        continue;
      } else if (in_dochdr) {
        BitSet(context, kContextU);

        url_p = curr_p;
        while (IsWithinBounds(url_p, buf, buf_len) && *url_p != '\n') {
          if (!IsIndexable(*url_p)) {
            url_p++;
            continue;
          }

          word_p = url_p;
          while (IsWithinBounds(url_p, buf, buf_len) && IsIndexable(*url_p)) {
            url_p++;
          }

          //IndexingParserCallback::ProcessTerm called(), amazing and I do not why?
          callback_->ProcessTerm(word_p, url_p - word_p, doc_id, position++, context);
        }

        BitUnset(context, kContextU);
        /*
        #ifdef PARSERINL_DEBUG
          std::cout << "url is: " << curr_p << std::endl;
          std::cout << "url_len is: " << url_p - curr_p << std::endl;
          std::cout << "doc_id is: " << doc_id << std::endl;
        #endif
        */
        callback_->ProcessUrl(curr_p, url_p - curr_p, doc_id);

        curr_p = url_p + 1;
        // Skip the rest of the dochdr contents (making sure that we're at the end of the dochdr).
        while (IsWithinBounds(curr_p, buf, buf_len)) {
          if (*curr_p == '<') {
            const char kDocHdrClosingTag[] = "</DOCHDR>";
            // Make sure it's actually the closing tag.
            if (IsWithinBounds(curr_p + sizeof(kDocHdrClosingTag) - 1, buf, buf_len) && strncasecmp(curr_p, kDocHdrClosingTag, sizeof(kDocHdrClosingTag) - 1) == 0) {
              break;
            }
          }
          ++curr_p;
        }

        continue;
      }
    }

    word_p = curr_p;
    while (IsWithinBounds(curr_p, buf, buf_len) && IsIndexable(*curr_p)) {
      ++curr_p;
    }

    callback_->ProcessTerm(word_p, curr_p - word_p, doc_id, position++, context);
  }

  return doc_id - initial_doc_id;
}

template<class Callback>
  typename Parser<Callback>::Tag Parser<Callback>::ProcessTag(const char* tag, int tag_len, bool& in_closing_tag, uint32_t doc_id) {
    // Caller must ensure tag_len is always at least 2, for tag "<>".
    assert(tag_len >= 2);

    const char* curr_tag_p = tag + 1;

    // Check whether this is a closing tag.
    if (*curr_tag_p == '/') {
      in_closing_tag = true;
      ++curr_tag_p;
    } else {
      in_closing_tag = false;
    }

    switch (*curr_tag_p) {
      case 'a':
      case 'A': {
        const char* l_start = NULL;
        const char* l_end = NULL;

        for (++curr_tag_p; IsWithinBounds(curr_tag_p, tag, tag_len); ++curr_tag_p) {
          if (*curr_tag_p == '"' || *curr_tag_p == '\'') {
            if (!l_start) {
              l_start = curr_tag_p + 1;
            } else {
              l_end = curr_tag_p;
              break;
            }
          }
        }

        if (l_start && l_end) {
          callback_->ProcessLink(l_start, l_end - l_start, doc_id);
        }

        return Parser::kTagNot;
      }
      case 'b':
      case 'B': {
        return IsValidTag(curr_tag_p, tag, tag_len, "b") ? Parser::kTagB : Parser::kTagNot;
      }
      case 'i':
      case 'I': {
        return IsValidTag(curr_tag_p, tag, tag_len, "i") ? Parser::kTagI : Parser::kTagNot;
      }
      case 'e':
      case 'E': {
        return IsValidTag(curr_tag_p, tag, tag_len, "em") ? Parser::kTagI : Parser::kTagNot;
      }
      case 'h':
      case 'H':
      {
        //Original Ver. Too beautiful, wei want to modify it.
        //return AreValidTags(curr_tag_p, tag, tag_len, "h", "1", "6", 2) ? Parser::kTagH : Parser::kTagNot;

        //Wei Ver.
        return AreValidTags(curr_tag_p, tag, tag_len, "h", "1", "6", 2) ? Parser::kTagH : IsValidTag(curr_tag_p, tag, tag_len, "html") ? Parser::kTagHtml : Parser::kTagNot;
      }
      case 't':
      case 'T': {
        return IsValidTag(curr_tag_p, tag, tag_len, "title") ? Parser::kTagTitle : Parser::kTagNot;
      }

      case 's':
      case 'S': {
        return IsValidTag(curr_tag_p, tag, tag_len, "strong") ? Parser::kTagB : IsValidTag(curr_tag_p, tag, tag_len, "script") ? Parser::kTagScript
                                                                                                                               : Parser::kTagNot;
      }
      case 'd':
      case 'D': {
        return IsValidTag(curr_tag_p, tag, tag_len, "dochdr") ? Parser::kTagDochdr
                                                              : IsValidTag(curr_tag_p, tag, tag_len, "docno") ? Parser::kTagDocno
                                                                                                              : IsValidTag(curr_tag_p, tag, tag_len, "doc") ? Parser::kTagDoc
                                                                                                                                                            : Parser::kTagNot;
      }
      default: {
        break;
      }
    }

    return Parser::kTagNot;
  }

template<class Callback>
  bool Parser<Callback>::IsValidTag(const char* curr_tag_p, const char* tag, int tag_len, const char curr_tag_name[]) {
    const size_t curr_tag_name_len = strlen(curr_tag_name);

    if (!strncasecmp(curr_tag_p, curr_tag_name, curr_tag_name_len)) {
      curr_tag_p += curr_tag_name_len;
      for (; IsWithinBounds(curr_tag_p, tag, tag_len); ++curr_tag_p) {
        if (*curr_tag_p != ' ') {
          if (*curr_tag_p == '>')
            return true;
          else
            break;
        }
      }
    }

    return false;
  }

template<class Callback>
  inline bool Parser<Callback>::IsWithinBounds(const char* curr, const char* start, int len) {
    return (curr - start) < len;
  }

template<class Callback>
  bool Parser<Callback>::AreValidTags(const char* curr_tag_p, const char* tag, int tag_len, const char curr_tag_base[], const char start_range[],
                                      const char end_range[], int range_len) {
    const size_t curr_tag_base_len = strlen(curr_tag_base);

    if (!strncasecmp(curr_tag_p, curr_tag_base, curr_tag_base_len)) {
      curr_tag_p += curr_tag_base_len;

      if (IsWithinBounds(curr_tag_p + range_len - 1, tag, tag_len)) {
        if (strncmp(curr_tag_p, start_range, range_len) >= 0 && strncmp(curr_tag_p, end_range, range_len) <= 0) {
          curr_tag_p += range_len;

          for (; IsWithinBounds(curr_tag_p, tag, tag_len); ++curr_tag_p) {
            if (*curr_tag_p != ' ') {
              if (*curr_tag_p == '>')
                return true;
              else
                break;
            }
          }
        }
      }
    }

    return false;
  }

template<class Callback>
  int Parser<Callback>::ProcessWarcInfoHeader(const char* buf, int buf_len, const char* curr_p, WarcHeader* header) {
      std::cout << "Process ProcessWarcInfoHeader" << std::endl;
      const char* header_start = curr_p;

      while (IsWithinBounds(curr_p, buf, buf_len)) {
        if (*curr_p == '\n') {
          ++curr_p;

          // Double new line means we read the complete header.
          if (IsWithinBounds(curr_p, buf, buf_len) && *curr_p == '\n') {
            ++curr_p;
            break;
          }

          const char* key_start = curr_p;
          int key_length = 0;
          while (IsWithinBounds(curr_p, buf, buf_len) && *curr_p != ':') {
            ++curr_p;
            ++key_length;
          }

          #ifdef PARSERINL_DEBUG
            std::string warc_key = std::string(key_start, key_length);
            //std::cout << "WARC key: " << warc_key << std::endl;
          #endif



          ++curr_p;

          if (IsWithinBounds(curr_p, buf, buf_len) && *curr_p == ' ') {
            ++curr_p;
          }

          const char* value_start = curr_p;
          int value_length = 0;

          while (IsWithinBounds(curr_p, buf, buf_len) && *curr_p != '\n') {
            ++curr_p;
            ++value_length;
          }

          const char kContentLengthStr[] = "Content-Length";
          const int kContentLengthStrLen = sizeof(kContentLengthStr) - 1;

          #ifdef PARSERINL_DEBUG
            std::string warc_value = std::string(value_start, value_length);
            //std::cout << "WARC value: " << warc_value << std::endl;
          #endif

          // We need to know how long the document content is.
          if (key_length == kContentLengthStrLen && strncmp(key_start, kContentLengthStr, kContentLengthStrLen) == 0) {
            char content_length_buf[11];  // Has to fit a 4 byte integer and a terminating null character.
            assert(value_length < 11);
            memcpy(content_length_buf, value_start, value_length);
            content_length_buf[value_length] = '\0';
            header->content_length = atoi(content_length_buf);
          }
        } else {
          ++curr_p; // Skips past the 'WARC/0.18' part.
        }
      }

      return (curr_p - header_start);
  }






template<class Callback>
  int Parser<Callback>::ProcessWarcResponseAndHTTPHeader(const char* buf, int buf_len, const char* curr_p, WarcHeader* header, uint32_t& doc_id) {
      const char* header_start = curr_p;
      //std::cout << "Process WarcResponse Header ONLY(not include the HTTP Header)" << std::endl;
/*
      if (doc_id == 5){
    	  std::cout << "why in?" << std::endl;
      }
  */
      while (IsWithinBounds(curr_p, buf, buf_len))
      {


        if (*curr_p == '\n')
        {
          ++curr_p;

          // Double new line means we read the complete header.
          if (IsWithinBounds(curr_p, buf, buf_len) && *curr_p == '\n') {
            ++curr_p;
            break;
          }

          const char* key_start = curr_p;
          int key_length = 0;
          while (IsWithinBounds(curr_p, buf, buf_len) && *curr_p != ':') {
            ++curr_p;
            ++key_length;
          }


          std::string warc_key = std::string(key_start, key_length);



          ++curr_p;

          if (IsWithinBounds(curr_p, buf, buf_len) && *curr_p == ' ') {
            ++curr_p;
          }

          const char* value_start = curr_p;
          int value_length = 0;

          while (IsWithinBounds(curr_p, buf, buf_len) && *curr_p != '\n') {
            ++curr_p;
            ++value_length;
          }


           std::string warc_value = std::string(value_start, value_length);


           if(warc_key == "WARC-Target-URI")
           {


        	    //std::cout << "in the WARC-Target-URI,shit?" << std::endl;
                //This is just for temp implementation.
                //I can not make this so ugly.
        	    //TODO: find a good url parser. But I do not want to do this now.

        	    /*
            	vector<string> strs;
            	vector<string> elements;
            	split(strs, warc_value, is_any_of("/") );
            	split(elements, strs[2], is_any_of(".") );

            	//assume that elements[1] + elements[2] is with the popular site format.
            	header->host_name = elements[1] + "." + elements[2];

            	header->feature_domainName = elements[2];
            	header->feature_urlLength = warc_value.size();

            	header->feature_number_nesting_levels = strs.size() -3;
*/
            	header->host_name = "google.com";

            	header->feature_domainName = "com";
            	header->feature_urlLength = warc_value.size();

            	header->feature_number_nesting_levels = 3;

                //std::cout << "Feature:url_len:" << warc_value.length() << std::endl;
            	header->urlInStringFormat = warc_value;
            	header->url = warc_value.c_str();
            	header->url_len = warc_value.length();

            	callback_->ProcessUrl(header->url, header->url_len, doc_id);
            	//std::cout << "out the WARC-Target-URI,shit?" << std::endl;
           }
           else if(warc_key == "WARC-TREC-ID")
           {
          	  //This is just for temp implementation.
          	  //outputfileForDocument << doc_id << " ";
          	  header->feature_doc_id = doc_id;
          	  header->feature_WARC_TREC_ID = warc_value;
              header->docno = warc_value.c_str();
              header->docno_len = warc_value.length();
              cout << "	Processing:" << warc_value << endl;

              callback_->ProcessDocno(header->docno, header->docno_len, doc_id);
           }
           else if(warc_key == "Content-Length")
           {

        	  //This is just for temp implementation.
        	  //outputfileForDocument << warc_value << " ";
        	  header->feature_documentSize = warc_value;

              header->content_length = atoi(warc_value.c_str());

              //std::cout << "header->content_length:" << header->content_length << std::endl;

              callback_->ProcessDocLength(atoi(warc_value.c_str()), doc_id);
           }

        }
        else
        {
          ++curr_p; // Skips past the 'WARC/0.18' part. Correct, this operation also gives me some hint.
        }
      }
      /*
      if (doc_id == 5){
    	  std::cout << "why out?" << std::endl;
      }
      */
      return (curr_p - header_start);
  }

#endif /* PARSERINL_H_ */
