/*
 * Page.cpp
 *
 *  Created on: Feb 27, 2012
 *      Author: beebe
 */

#include "Page.h"
#include <stdio.h>


Page::Page(const string & urlText):rawHtml(""),wordCount(0),description(""),hasDescription(false){
	this->loadPageToString(urlText);
	allLinks = new LinkedList<class Url>();
	//rawHtml = "<html> <head><title>Crawler Test</title><meta name=\"author\" content=\"Jared Van Leeuwen\" /></head><body><CENTER><H1>First Header</H1></CENTER><p>Web Crawlers need to look for a variety of words. Even if the word is a strange one like Rhinoserous. They should not read the stop words though. It makes me wonder if individual letters should be included in the stop words file or if they should be understood as a stopword and don\'t need to be loaded into the file. <!-- Dragon --></p><ul><li><a href=\"crawler/TestOne.html\">Robot Test</a></li><li><a href=\"crawler/testTwo.html\">Second Test</a></li><li><a href=\"crawler/testThree.html\">HTML Test</a></li><li><A href=\"crawler/testFour.htm\">Description Test</A></li><li><a href=\"crawler/testFive.html\">Additional notes</a></li><li><a title=\"Move your Mouse\" href=\"crawler/frame/testSix.html\">Frame tester</a></li><!--<li><a href=\"crawler/testSeven.html\">Redirect Tests</a></li>--></ul><p>Horse</p></body></html>";
	string temp = rawHtml;
	HTMLTokenizer tokenizer(rawHtml);
	parsePage(tokenizer);
}
Page::~Page(){

	delete(allLinks);
	allLinks = NULL;
}



//!Returns all the valid words on the page
string * Page::getAllWords(){
	return allWords;
}
int Page::getWordCount(){
	return wordCount;
}
//!Returns the list which contains every link on the page
LinkedList<Url> * Page::getUrlList(){
	return allLinks;
}
string * Page::GetDescription(){
	return &description;
}
string * Page::GetBaseurl(){
	return &baseUrl;
}

//!Returns the page as it was download from the internet
std::string * Page::getRawHtml(){
	return &rawHtml;
}


//!Take in a page that has already been tokenized
//!Parse the page and extract a description, all words, and all links
//!It saves them off to the associated lists.
bool Page::parsePage(HTMLTokenizer tokenizer){
	bool indexWords = false;
	string state="";

	while(tokenizer.HasNextToken()){

		HTMLToken currentToken = tokenizer.GetNextToken();
		string value = currentToken.GetValue();
		StringUtil::ToLower(value);


		//If we don't have a description yet then keep working on extracting it
		if(hasDescription==false)
			state = extractDescription(currentToken.GetType(),value,state);

		//See if it's a link, if so add to to the list
		extractLinks(currentToken,value);

		//Index only words in the title and the body
		//If it's indexable data and it's text then process it
		indexWords = setIndexable(indexWords, currentToken.GetType(), value);
		if(indexWords==true && currentToken.GetType()==3){
			StringUtil::Trim(value);
			seperateWords(value);
		}

	}
	if(hasDescription == false)
		description = first50;
	return true;
}

//!Takes in a string and spits out all the individual words
//!
//!word-chard = a-z,0-9,_,-
//!{non-word-chars}WORD{non-word-chars}WORD{non-word-chars}WORD...
void Page::seperateWords(string & words){
	string exp = "[^a-z0-9_-]";
	string text = words;

	const int NOT_MATCH = -1;
	//Create a regular expression, but make it case-insensitive
	boost::regex re(exp, boost::regex_constants::icase);
	boost::sregex_token_iterator no_match;
	//Find all items between matches to re. This treats re as a delimiter.
	boost::sregex_token_iterator delimFinder(text.begin(), text.end(), re, NOT_MATCH);

	while(delimFinder != no_match)
	{
		//here is a more verbose way of getting the token
		string tok(delimFinder->first, delimFinder->second);
		if(isValidWord(tok))
			allWords[wordCount++]=tok;
		*delimFinder++; //move to the next token, or to no_match state
	}

}

//!A helper function to toggle indexing on and off
//!Used specifically by parsePage()
bool Page::setIndexable(bool current, int type, string value){
	bool indexWords = current;

	if(type == 0 && value.compare("title") == 0)
		indexWords = true;
	if(type == 0 && value.compare("body") == 0)
		indexWords = true;
	if(type == 1 && value.compare("title") == 0)
		indexWords = false;
	if(type == 1 && value.compare("body") == 0)
		indexWords = false;

	return indexWords;
}



//! Saves the webpage to rawHtml
void Page::loadPageToString(const string & url){
	try
		{
			string path(url);
			URLInputStream stream(path);
			//Handle redirections (Find out where we redirected to)
			baseUrl = stream.GetLocation();
			while (!stream.IsDone()){
				rawHtml += stream.Read();
			}
		}
		catch (std::exception &e)
		{
			std::cout << "Exception Occurred:" << e.what() << std::endl;
		}
		catch (CS240Exception &e)
		{
			std::cout << "Exception Occurred:" << e.GetMessage() << std::endl;
		}
		catch (...)
		{
			std::cout << "Unknown Exception Occurred" << std::endl;
		}
}

//! Returns true if the tag is considered a header
bool Page::isHeaderTag(string & tag){
	if(tag.compare("h1")==0 || tag.compare("h2")==0 || tag.compare("h3")==0 || tag.compare("h4")==0)
		return true;
	else
		return false;
}

//!Helper method used almost only by first50
//!Counts all the whitespace in the string
int Page::count_whitespace(string& s) {
  int count = 0;

  for (int i = 0; i < s.size(); i++)
    if (s[i] == ' ') count++;

  return count;
}

//!Note that this function can accept any kind of InputStream, not just URLInputStream
//!Prints the contents of a steam to cout
void Page::print_contents (InputStream &stream){
    while (!stream.IsDone()){
        std::cout << stream.Read();
    }
}

//!A valid word bust start with a letter
bool Page::isValidWord(string & tok){
	string text = tok;
	boost::regex re("^[a-z].*");
	if(boost::regex_match(text, re))
	{
		return true;
	}
	else
	{
		return false;
	}
}

//!Checks the file extension for a valid html page
bool Page::SuffixIsHtml(string & urlPath){
	string suffixes[] = {".html",".htm",".shtml","cgi","jsp","asp","aspx","php","pl","cfm"};
	for(int i=0;i<10;i++){
		if(StringUtil::IsSuffix(urlPath,suffixes[i]))
			return true;
	}
	return false;
}

//!Checks a url path to make sure it points to a valid html page
bool Page::isValidHtmlPage(string & urlPath){
	if(urlPath[urlPath.size()-1] == '/'){
		return true;
	} else if(urlPath.find('.')==NULL){
		return true;
	} else if(SuffixIsHtml(urlPath)){
		return true;
	} else
		return false;
}

//!Run with parsePage()
//!Extracts a description on priority
//!!1.Title
//!!2.Header Tag
//!!3.First 100 characters
string Page::extractDescription(int type, string & value,string & state){
	//States for the small Finite state machine below
	//inBetween
	//inTitle
	//inHeader

	//State Changer
	if(value.compare("body")==0 && type==0)
		state = "inBetween";
	else if(value.compare("title")==0 && type==0)
		state = "inTitle";
	else if(value.compare("title")==0 && type==1){
		state = "inBetween";
		if(description.compare("")!=0)
			hasDescription = true;
	}else if(isHeaderTag(value) && type==0)
		state = "inHeader";
	else if(isHeaderTag(value) && type==1){
		state = "inBetween";
		if(description.compare("")!=0)
			hasDescription = true;
	} else {//If it's a title tag grab the inbetween
		if(state.compare("inTitle")==0){
			description += value;
		} else if(state.compare("inHeader")==0 && type==3){
			description += value;
		} else if(state.compare("inBetween")==0 && type==3){
			int curPos = 0;
			first50 += " ";
			StringUtil::Trim(value);
			while(curPos < value.size() && (first50.size() - count_whitespace(first50)) < 100){
				first50 += value[curPos];
				curPos++;
			}
		}

	}
	return state;
}
//!Run with parsePage()
//!Extracts a link and creates the url object
void Page::extractLinks(HTMLToken & currentToken, string & value){
	if(value.compare("a")==0 && currentToken.GetType()==0){
		string urlString = currentToken.GetAttribute("href");
		Url * tempUrl = new Url(urlString,baseUrl);//<---------------------allocate memory here
		string tempPath = tempUrl->GetPath();
		if(isValidHtmlPage(tempPath)){
			allLinks->Insert(*tempUrl,NULL);//<----------------------------store pointer in allLinks
		} else {
			delete(tempUrl);
		}
	}
}

