<?php	
	//delay between crawling pages
	$crawlDelay = 10;
	
	//file where we log activity
	$logfile = fopen("log.txt", 'a+');
	
	//user agent string
	$userAgentString = "vark-websys";
	
	//$disallowedPaths is an associative array where the key is the domain and 
		//the value is an array of paths disallowed by robots.txt
	$disallowedPaths = array();
	
	//global variable for the domain we are crawling 
		//(Except for debugging purposes, this is set in initCrawl())
	$domain = "";
	
	//set the user-agent string used when we make requests
	ini_set('user_agent', $userAgentString);
	//set the length of time we spend waiting for a page to respond.
	ini_set('default_socket_timeout', 30);	
	
	//use my own error handler so I'm not inundated with 404s because some domains 
		//don't have a robots.txt page.
	set_error_handler("handle404Warnings", E_WARNING);

	//connect to the db
	$conn = mysql_connect("localhost", "root", "");
	$db = mysql_select_db( 'vandes4_PROJECT',   $conn);
	
	//start crawling
	initCrawl();	
	
	//Crawling functions
	//============================================================================
	
	//initCrawl finds the first url in the db with a session value of 0 
		//(indicating not yet crawled) and starts crawling there.  It then repeats 
		//until all pages have been crawled.
	function initCrawl()
	{	
		global $domain;
		global $crawlDelay;
		
		$sql = "select full_path from crawl limit 1";
		$result = mysql_query($sql) or die("Error in initCrawl while selecting first url: ".mysql_error());
		$row = mysql_fetch_assoc($result);
		$url = $row['full_path'];
		
		//try to find the domain (assume it is the domain of the first link.)	
		$domain = getDomain($url);
		parseRobotsDotText($domain);
		
		initLog();
		logMessage("Beginning crawl... \n===================");
		//start with the first url we haven't crawled in this session of crawling
		$sql = "select crawl_id, full_path from crawl where session = 0 limit 1";
		$result = mysql_query($sql) or die("Error in initCrawl while selecting first url to crawl: ".mysql_error());
		
		if(mysql_num_rows($result) < 1)
		{
			logError("No unvisited link to crawl.  Update session values.");
			logError("Exiting.");
			return;
		}
		
		$row = mysql_fetch_assoc($result);
		$url = $row['full_path'];
		$id = $row['crawl_id'];
		logMessage("Starting crawl at ".$url.", with id ".$id);
		
		set_time_limit(300);
		crawl($url, $id);
		
		while(1)
		{
			$sql = "select full_path, crawl_id from crawl where session = 0 limit 1";
			$result = mysql_query($sql);
			if(mysql_num_rows($result) == 0)
			{
				break;
			}
			$row = mysql_fetch_assoc($result);
			$url = $row['full_path'];
			$id = $row['crawl_id'];
			crawl($url, $id);
		}
		
		finishCrawl();
	}

	//finishCrawl prints a success message to the log and resets all the session 
		//variables.
	function finishCrawl()
	{
		global $logfile;
		//go through all of the urls in the db and set the session back to 0.
		$sql = "update crawl set session = 0";
		mysql_query($sql) or die("Error in finishCrawl: ".mysql_error());
		
		logMessage("Finished successfully.");
		
		fclose($logfile);
	}

	//crawl crawls a page and records links and their keywords.
	//Inputs: $url, the url of the page, and $id, the crawl_id of the page in the 
		//db.
	function crawl($url, $id)
	{
		global $crawlDelay;
		global $debug;
		
		if(!shouldCrawl($url, $id))
		{
			logError("Skipping ".$url);
			markVisited($id);
			return;
		}
		logMessage("crawling ".$url);
		
		//grab the page content
		$page = file_get_contents($url);
		if($page == false)
		{
			logError("404'd at ".$url);
			markVisited($id);
			return;
		}
		
		//we are about to crawl for keywords.  
		//Delete all current keywords coming from this page so we don't create 
		//duplicates.
		deleteKeywordsFrom($url);
		
		//capture links and their text
		$regex = '/href[ \t]*=[ \t]*"([^"]+)"[^>]*>([^<]+)/su';
		preg_match_all($regex, $page, $matches);

		//handle links and their text
		for($i = 0; $i < count($matches[0]); $i++)
		{
			$link = $matches[1][$i];
			$keyword = strtolower(trim($matches[2][$i]));
		
			//absolutize any relative paths...
			$link = relToAbsolute($link, $url);
			
			if(isHttp($link) && 
				 urlIsInDomain($link) && 
				 $keyword != "" && 
				 robotsAllowed($link))
			{
				$link = formatUrl($link);
				$keyword = mysql_real_escape_string($keyword);
				insertUrl($link);
				insertUrlKeywordPair($link, $keyword, $id);
			}
		}
		
		//we grabbed the page, mark it as crawled.
		markVisited($id);
		
		//update the last crawled date in the db
		$sql = "update crawl set date = NOW() where full_path = '".
						mysql_real_escape_string($url)."'";
		mysql_query($sql) or die("Error in crawl while updating crawl date: ".mysql_error());
		
		set_time_limit($crawlDelay + 3600);
		sleep($crawlDelay);
	}
	
	//Logging functions
	//============================================================================
	
	//initLog prints a couple of newlines to the logfile to prepare it for a new 
		//session of crawling.
	function initLog()
	{
		global $logfile;
		
		fwrite($logfile, "\n\n");
	}
	
	//logMessage prints a timestamped message to the log file.
	function logMessage($message)
	{	
		global $logfile;
		
		$message = "[".timestamp()."]\t\t".$message;
		fwrite($logfile, $message);
		fwrite($logfile, "\n");
		flush();
	}
	
	//logError prints a timestamped message to the log file, at a different 
		//indentation than a message so as to draw attention.
	function logError($message)
	{	
		global $logfile;
		
		$message = "[".timestamp()."] ".$message;
		fwrite($logfile, $message);
		fwrite($logfile, "\n");
		flush();
	}
	
	//timestamp returns a string of the current time
	function timestamp()
	{
		return strftime("%Y-%m-%d %H:%M:%S", time());
	}
	
	//URL functions
	//============================================================================
	
	//relToAbsolute converts a relative url to an absolute url, based on the 
		//location, $base, the relative url was found on.
	//Inputs:
	//	link: the relative url
	//	base: the page where the relative url was found.
	//Outputs: an absolute link.
	function relToAbsolute($link, $base)
	{	
		//strip any hashes off the end of $link
		$regex = '"#.*$"';
		if(preg_match($regex, $link, $matches, PREG_OFFSET_CAPTURE) > 0)
		{
			$hashOffset = $matches[0][1];
			$link = substr($link, 0, $hashOffset);
		}
		
	
		//check if there is a protocol at the beginning of $link, meaning it is an
		//absolute url already.
		$regex = '"^[^:]+:"';
		if(preg_match($regex, $link, $matches) > 0)
		{
			return $link;
		}
		
		//check for a file at the end of the base url
		$regex = '"[^:]+://[^/]+/([^/]+/)*([^.]+\.[^.]+)$"';
		if(preg_match($regex, $base, $matches) > 0) 
		{
			//the base url contains a file, strip it off.
			$base = substr($base, 0, strlen($base) - strlen($matches[2]));
		}
		$base = trim($base, "/");
		$link = trim($link, "/");
		return $base."/".$link;
	}
		
	// isValidFileType determines if the file found at a URL is of the 
		//appropriate type for us to work with.
	// Inputs:  the headers from the url in question.
	// Outputs: true if there is a header specifying a valid file type,
		//false otherwise.
	function isValidFileType($headers)
	{
		$isLink = false;

		// Headers being read one at a time.
		foreach ($headers as $header) 
		{
			// If the content Type is set to text/html
			if(strtolower(substr($header, 0, 23)) == 'content-type: text/html')
			{
				return true;
			}
		}
		
		return false;
	}
	
	//urlIsInDomain checks if a url is in the global $domain that we are crawling.
	//Inputs: 
	//	url: an absolute url
	//Outputs: true if $url is in $domain or in a subdomain of $domain, false 
		//otherwise.
	function urlIsInDomain($url)
	{
		global $domain;
		if(substr($url, 0, strlen($domain)) == $domain)
		{
			return true;
		}
		
		//now check if it's in a subdomain of $domain...
		//second capture of $regex should grab "rpi.edu" from 
			//http://www.rpi.edu OR from http://www.cs.rpi.edu
		$regex = '"[^:]+://([^./]+\.)*([^./]+\.[^./]+)"';
		
		preg_match($regex, $url, $matches);
		$urlDomainSimple = $matches[2];
		
		preg_match($regex, $domain, $matches);
		$domainSimple = $matches[2];
		
		if($domainSimple == $urlDomainSimple)
		{
			return true;
		}
		return false;
	}
	
	//urlRedirectsOutsideDomain checks if a url redirects outside the domain 
	//Inputs: 
	//	headers: the headers from url
	//	url: an absolute url
	//Outputs: true if there is a location header indicating that the page is 
		//redirected outside the domain, false otherwise.
	function urlRedirectsOutsideDomain($headers, $url)
	{
		global $domain;
		$headers = array_reverse($headers, true);
		$atFinalDestination = false;
		foreach ($headers as $header)
		{
			if($atFinalDestination && substr($header, 0, 9) == "Location:")
			{
				$realUrl = trim(substr($header, 9));
				if(!isAbsolute($realUrl))
				{
					$realUrl = relToAbsolute($realUrl, $url);
				}
				
				if(urlIsInDomain($realUrl, $domain))
				{
					return false;
				}
				return true;
			}
			if($header == "HTTP/1.1 200 OK")
			{
				$atFinalDestination = true;
			}
		}
		
		//Maybe there was no redirection at all, so no location header.
		return false;
	}
	
	//getDomain tries to find the domain of a url
	//Inputs:
	//	url: an absolute url
	//Outputs: false if a domain can't be found, the domain otherwise.
	function getDomain($url)
	{
		$regex = '"[^:]+://[^/]+"';
		preg_match($regex, $url, $matches);
		if(count($matches) > 0)
		{
			return $matches[0];
		}
		return false;
	}
	
	//isHttp checks if a url is using http
	//Inputs:
	//	absoluteUrl: an absolute url.
	//Outputs: true if the url begins with http, false otherwise.
	function isHttp($absoluteUrl)
	{
		if(substr($absoluteUrl, 0, 5) == "http:") 
		{
			return true;
		}
		return false;
	}
	
	//shouldCrawl checks if a url should be crawled.
	//Inputs:
	//	url: an absolute url
	//	id: the crawl_id of the url in the db
	//Outputs: true if the url:
	//		uses http
	//		is not disallowed by robots.txt
	//		is in the domain or a subdomain of the domain we are crawling
	//		doesn't redirect outside the domain
	//		points to a desired file type
	//		has been modified since the time we last crawled.
	function shouldCrawl($url, $id)
	{
		global $domain;
		if(!isHttp($url))
		{
			logError($url." is not using http");
			return false;
		}
	
		if(!robotsAllowed($url))
		{
			logError("Robots.txt doesn't allow ".$url);
			return false;
		}
		
		if(!urlIsInDomain($url))
		{
			logError($url." is not in the domain ".$domain.".");
			return false;
		}
		
		//grab the headers so we can check what type of file this is and 
			//when it was last modified.
		$headers = get_headers($url);
		if($headers == false)
		{
			$sql = "update crawl set session = 1 where crawl_id = ".
						mysql_real_escape_string($id)."";
			mysql_query($sql) or die("Error in shouldCrawl while trying to set session: ".mysql_error());
			logError("404'd at ".$url);
			return false;
		}
		
		if(urlRedirectsOutsideDomain($headers, $url))
		{
			logError($url." redirects outside the domain ".$domain);
			return false;
		}
		
		if(!isValidFileType($headers))
		{
			logError($url." is not a desired file type.");
			return false;
		}
		
		if(!modifiedSinceLastCrawl($id, $headers))
		{
			logError($url." has not been modified since last crawl.");
			return false;
		}
		
		/*
		//check that we have not already been here
		if(visited($url))
		{
			logError($url." has been visited.");
			return false;
		}*/
		
		return true;
	}
	
	//isAbsolute checks if a url is an absolute url.
	//Inputs:
	//	link: an absolute or relative url
	//Outputs: true if the url begins with something that looks like a protocol, 
		//false otherwise
	function isAbsolute($link)
	{
		$regex = "/^[^:.]+:/";
		if(preg_match($regex, $link) > 0)
		{
			return true;
		}
		return false;
	}
	
	//formatUrl prepares a url for insertion in the database. It eliminates 
		//fragment ids (hashes), decodes html entities, and url encodes spaces.
	function formatUrl($url)
	{
		$pieces = parse_url($url);
		
		if($pieces == false || !isset($pieces['host']) || !isset($pieces['scheme']))
		{
			return $url;
		}
		
		$url = $pieces['scheme']."://".$pieces['host'];
		
		
		if(isset($pieces['path']))
		{
			$url .= $pieces['path'];
		}
		
		
		if(isset($pieces['query']))
		{
			$url .= "?".$pieces['query'];
		}
		
		$url = mysql_real_escape_string(html_entity_decode($url));
		
		$url = urlEncodeSpaces($url);
		
		
		return $url;
	}
	
	//urlEncodeSpaces replaces spaces in a url with %20
	function urlEncodeSpaces($url)
	{
		$regex = "/ /";
		$url = preg_replace($regex, "%20", $url);
		return $url;
	}
	
	//DB functions
	//============================================================================
	
	//insertUrl inserts a url in the database if it does not already exist.
	function insertUrl($url)
	{
		if(urlIsInDB($url))
		{
			return;
		}
		$sql = "insert into crawl (full_path, date) values ('".
						mysql_real_escape_string($url).
						"', '0000-00-00 00:00:00')";
		mysql_query($sql) or die("Error in insertUrl: ".mysql_error());
	}
	
	//insertUrlKeywordPair inserts a keyword in the db for a url.
	//Inputs: 
	//	url: the absolute url this keyword is for
	//	keyword: the keyword
	//	sourceId: the id of the url that this keyword came from
	function insertUrlKeywordPair($url, $keyword, $sourceId)
	{
		$sql = "select crawl_id from crawl where full_path = '".
						mysql_real_escape_string($url)."'";
		$result = mysql_query($sql);
		if(mysql_num_rows($result) > 0)
		{
			$row = mysql_fetch_assoc($result);
			$id = $row['crawl_id'];
			$words = preg_split("/[\s]|[^0-9a-x]+/", $keyword);
			foreach ($words as $word)
			{
				$word = trim($word);
				if($word != "")
				{
					$sql = "insert into keyword (word, crawl_id, source_id) values ('".
									mysql_real_escape_string($word)."', ".
									mysql_real_escape_string($id).", ".
									mysql_real_escape_string($sourceId).")";
					$result = mysql_query($sql) or die("Error in insertUrlKeyWordPair: ".mysql_error());
				}
			}
			return true;
		}
		else
		{
			return false;
		}
	}
	
	
	//markVisited changes the session field of the url with the crawl_id $id to 1,
		//marking it as visited.
	function markVisited($id)
	{
		$sql = "update crawl set session = 1 where crawl_id = ".
						mysql_real_escape_string($id);
		mysql_query($sql) or die("Error in markVisited: ".mysql_error());
	}

	//urlIsInDB checks if a url exists in the database.
	//Outputs: true if the url exists in the database, false otherwise.
	function urlIsInDB($url)
	{
		$sql = "select crawl_id from crawl where full_path = '".
						mysql_real_escape_string($url)."'";
		$result = mysql_query($sql) or die("Error in urlIsInDB: ".mysql_error());
		if(mysql_num_rows($result) > 0)
		{
			return true;
		}
		return false;
	}
	
	//modifiedSinceLastCrawl checks if the url with id $id has been modified since 
		//it was last crawled.
	//Inputs:
	//	id: the crawl_id of the url
	//	headers: the headers from the url
	//Outputs: false if a Last-Modified header exists and its date is before we 
		//last crawled, true otherwise
	function modifiedSinceLastCrawl($id, $headers)
	{
		$sql = "select date from crawl where crawl_id = ".
						mysql_real_escape_string($id)." limit 1";
		$result = mysql_query($sql) or die("Error in modifiedSinceLastCrawl: ".mysql_error());
		$row = mysql_fetch_assoc($result);
		$lastCrawled = strtotime($row['date']);
		
		$lastModified = 0;
		foreach ($headers as $header) 
		{
			if(substr($header,0,14) == 'Last-Modified:')
			{
				$lastModified = strtotime(substr($header,15));
			}
		}
		
		if($lastModified == 0)
		{
			$lastModified = time();
		}
		
		if($lastModified > $lastCrawled)
		{
			return true;
		}
		return false;
	}
	
	//deleteKeywordsFrom deletes keywords that came from $url from the db, for use 
		//when we are re-crawling $url.
	function deleteKeywordsFrom($url)
	{
		$sql = "select crawl_id from crawl where full_path = '".
						mysql_real_escape_string($url)."'";
		$result = mysql_query($sql) or die("Error in deleteKeywordsFrom while finding crawl_id: ".mysql_error());
		
		$row = mysql_fetch_assoc($result);
		$id = $row['crawl_id'];
		
		$sql = "delete from keyword where source_id = ".
						mysql_real_escape_string($id);
		$result = mysql_query($sql) or die("Error in deleteKeywordsFrom while deleting keyword: ".mysql_error());
	}

	//Robots.txt functions 
	//============================================================================
	
	//parseRobotsDotText tries to fetch a robots.txt file from $domain.  If one 
		//exists, it parses out the relevant disallowed paths and adds them to the 
		//global $disallowed structure.
	function parseRobotsDotText($domain)
	{
		global $disallowedPaths;
		global $userAgentString;
		
		$disallowedPaths[$domain] = array();
		
		$robotsDotText = file_get_contents(relToAbsolute("robots.txt", $domain));
		if(!$robotsDotText)
		{
			return;
		}
		
		$regex = "/User-agent: .*/";
		$ruleses = preg_split($regex, $robotsDotText);
		
		$regex= "/User-agent: (.*)/";
		preg_match($regex, $robotsDotText, $agents);
		
		for($i = 0; $i < count($agents); $i++)
		{
			$agent = trim($agents[$i]);
			$rules = $ruleses[$i];
			$rules = preg_split("/[\n|\r]/", $rules);
			
			if($agent == "*" || 
				 substr($agent, 0, strlen($userAgentString)) == $userAgentString)
			{
				foreach($rules as $rule)
				{
					if(substr($rule, 0, 9) == "Disallow:")
					{
						$disallowedPath = trim(substr($rule, 9));
						if($disallowedPath == "")
						{
							break;
						}
						$disallowedPath = relToAbsolute($disallowedPath, $domain);
						if(substr($disallowedPath, strlen($disallowedPath) - 1) != "/")
						{
							$disallowedPath = $disallowedPath."/";
						}
						array_push($disallowedPaths[$domain], $disallowedPath); 
					}
				}
			}
		}
		
		/*
		if(isset($disallowedPaths[$domain]))
		{
			logMessage("DisallowedPaths for ".$domain.":");
			foreach ($disallowedPaths[$domain] as $path)
			{
				logMessage($path);
			}
		}
		*/
	}
	
	
	//robotsAllowed checks if robots are allowed to crawl $url.
	function robotsAllowed($url)
	{	
		global $disallowedPaths;
		if(substr($url, strlen($url)-1) != "/")
		{
			$url = $url."/";
		}
		
		$domain = getDomain($url);
		if(!isset($disallowedPaths[$domain]))
		{
			parseRobotsDotText($domain);
		}
		
		
		foreach ($disallowedPaths[$domain] as $disallowed)
		{
			if(substr(urldecode($url), 0, strlen($disallowed)) == $disallowed)
			{	
				logError("Robots not allowed at ".$url);
				return false;
			}
		}
		return true;
	}

	
	//Error-handling functions
	//============================================================================
	
	//handle404Warnings is used to suppress 404 errors, since they do not indicate 
		//exceptional behavior in a web crawler.  Especially since we are trying to 
		//fetch robots.txt (which may not exist) for every subdomain we encounter, 
		//it is useful to filter 404 warnings out of the output so that more 
		//relevant warnings/errors are more visible.
	function handle404Warnings($errorLevel, $errorString)
	{
		if($errorLevel == E_WARNING)
		{	
			$regex = "+file_get_contents\([^)]*\) \[<a href='function\.file-get-contents'>function\.file-get-contents</a>\]: failed to open stream: HTTP request failed! HTTP/1\.1 404 Not Found+";
			if(preg_match($regex, $errorString) > 0)
			{
				//signify that the error or warning is handled and don't print anything.
				return true;
			}
		}
		
		logError($errorString);
		//error or warning not handled, let php do its default.
		return false;
	}
	
	
?>