<?php
require_once '../utils/Validator.php';
require_once '../classes/WCException.php';

/**
 * Scraper class is used to 'scrape' a url for content and return the content either as a list of values
 * or as a XML document.
 * 
 * @author daniel.klco
 *
 */
class Scraper{ 
	//TODO: Need to be able to configure the scraper after creating it.
	
	private $url;
	private $userAgent = 'Googlebot/2.1 (http://www.googlebot.com/bot.html)';
	private $USER_AGENT = array('Google_Image'=>'Googlebot-Image/1.0 ( http://www.googlebot.com/bot.html)',
		'MSN_Live'=>'msnbot-Products/1.0 (+http://search.msn.com/msnbot.htm)',
		'Yahoo'=>'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
		'Firefox'=>'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6',
		'IE7'=>'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
		'IE6'=>'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
		'Safari'=>'Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en) AppleWebKit/522.11 (KHTML, like Gecko) Safari/3.0.2',
		'Opera'=>'Opera/9.00 (Windows NT 5.1; U; en)');
	private $failOnError=true;
	private $followLocation=true;
	private $autoReferer=true;
	private $returnTransfer=true;
	private $timeOut=10;
	private $html;

	/**
	 * Public constructor, takes a string representation of the user_agent to use as well as additional
	 * attributes to configure how CURL runs (not implemented yet)
	 * @param $ua
	 * @param $additional_attributes
	 * @return unknown_type
	 */
	public function Scraper($ua,$additional_attributes){
		if(isset($USER_AGENT[$ua])){
			$this->userAgent = $USER_AGENT[$ua];
		}
		//TODO: Set additional attributes
		//setAdditionalAttributes();
	}
	
	/**
	 * Unsets the html attribute to save memory.
	 * 
	 * @return 
	 */
	public function free(){
		unset($this->html);
	}
	
	/** 
	 * Performs the scrape of the website and saves the results into an internal variable 'html'
	 * @param $target_url
	 * @return boolean success
	 */
	public function scrape($target_url){
		if(Validator::isURL($target_url)){
			$this->url = $target_url;
		}else{
			throw new WCException(WCException::CURL_BAD_URL,'Error: Invalid URL "'.$target_url.'" specified.');
		}
		if(isset($USER_AGENT[$ua])){
			$this->userAgent = $USER_AGENT[$ua];
		}
		
		//TODO: Set additional attributes
		//setAdditionalAttributes();
		
		// make the cURL request to $target_url
		$ch = curl_init();
		curl_setopt($ch,CURLOPT_USERAGENT,$this->userAgent);
		curl_setopt($ch,CURLOPT_URL,$this->url);
		curl_setopt($ch,CURLOPT_FAILONERROR,$this->failOnError);
		curl_setopt($ch,CURLOPT_FOLLOWLOCATION,$this->followLocation);
		curl_setopt($ch,CURLOPT_AUTOREFERER,$this->autoReferer);
		curl_setopt($ch,CURLOPT_RETURNTRANSFER,$this->returnTransfer);
		curl_setopt($ch,CURLOPT_TIMEOUT,$this->timeOut);
		$this->html=curl_exec($ch);

		//if html is not set return false and save error
		if (!$this->html){
			throw new WCException(curl_errno($ch),'Error retrieving HTML: '.curl_error($ch));
		}
		return true;
	}

	/**
	 * Returns a node list of the elements matching the provided xpath.  Will return false with the
	 * error BAD_HTML if the html is not set.
	 * 
	 * @param $xpath_str
	 * @return DOMNodeList - nodes matching the xpath in the html stored in from the scrape
	 */
	public function getNodeList($xpath_str){
		if(!$this->html){
			throw new WCException(WCException::CURL_BAD_HTML,'Error: HTML not loaded.');
		}

		//parse the html into a DOMDocument
		$dom = new DOMDocument();
		@$dom->loadHTML($this->html);

		//grab all the on the page
		$xpath = new DOMXPath($dom);
		return $xpath->evaluate($xpath_str);
	}

	/**
	 * Returns a new DOMDocument of the elements matching the provided xpath with the root element 
	 * 'result'.  
	 * 
	 * @param $xpath_str
	 * @return DOMDocument
	 */
	public function getXML($xpath_str){ 
		//TODO: I don't check if the nodelist was successfully retrieved
		//okay maybe I am dumb but I don't remember if this thing can not retrieve values... I really want it to error 
		//if the site isn't scraped and just to throw any other errors it recieves and let the calling method choose how 
		//to handle it...
		$nodeList = $this->getNodeList($xpath_str);
		
		$newDom = new DOMDocument('1.0','UTF-8');
		$root = $newDom->createElement('result');
		$root = $newDom->appendChild($root);

		//append all nodes from $nodeList to the new dom, as children of $root
		foreach($nodeList as $domElement){
		   $domNode = $newDom->importNode($domElement, true);
		   $root->appendChild($domNode);
		}
		return $newDom;
	}

	/**
	 * Returns an array of string representing the values of the elements matching the provided xpath.
	 * 
	 * @param $xpath_str
	 * @return unknown_type
	 */
	public function getValues($xpath_str){
		//TODO: I don't check if the nodelist was successfully retrieved
		$nodeList = $this->getNodeList($xpath_str);
		
		$strings = array();
		foreach($nodeList as $domElement){
		   $strings[] = $domElement->node_value();
		}
		return $strings;
	}
}
?>