/*
 * WebCrawler.h
 *
 *  Created on: Feb 15, 2012
 *      Author: beebe
 */

#ifndef WEBCRAWLER_H_
#define WEBCRAWLER_H_

#include <stdio.h>
#include <stdlib.h>
#include "LinkedList.h"
#include "Page.h"
#include "Url.h"
#include "BST.h"
#include "Index.h"

class WebCrawler{
public:
	WebCrawler(string startingUrl);
	~WebCrawler();
	//Index * myindex = new Index();
	//!  Initiates the webcrawl begining with the first link
	//!
	//!  @return True if the crawl was a sucess
	bool beginCrawl();

	//! Returns true if the url is in the base directory we started in
	bool IsInWebsite(string &  url);

	//! Returns true if we have already indexes this page
	bool IsVisited(string & url);

	//! Load a file into the stop word BST
	bool loadStopWords(string loc);

	//! Prints the index to a file
	void printXml(string& loc);
private:
	//! First url to be parsed
	Url * startUrl;

	//! Linked List with the urls we will be indexing.
	LinkedList<Url> * urlqueu;

	//! A list of all the links that we shouldn't index
	BST * excludedLinks;

	//! BST of urls we have visited and indexed already
	BST * visitedLinks;

	//! Array of words we should ignore
	int stopWordCount;
	string stopWords[300000];

	//! Our index
	Index * myIndex;

	//! Helper function to extract urls from a page
	//! and put them into the urlqueu
	void extractUrls(Page * temppage);

	//! Helper function to extract words from a page
	//! and insert them into the index
	void extractWords(Page * temppage, const string & path);

	//! Add a url to the visited links signlaing we have indexed it
	//! Also adds a description
	void saveToVisited(Page * temppage, const string & path);

};


#endif /* WEBCRAWLER_H_ */


