<?php

/**
 * @package Analyser
 * @author Yves Peeters
 * @todo make ini file work, use Yaml?
 * @todo add option to import all known links from google
 * @todo add option to import all links from sitemap.xml file
 */
/*
  $ini = parse_ini_file("config.ini",true);

  require_once ($ini['Classes']['curl']);
  require_once ($ini['Classes']['pagespider']);
  require_once ($ini['Classes']['url']);
  require_once ($ini['Classes']['dbspider']);
  require_once ($ini['Classes']['link']);
  require_once ($ini['Classes']['memory']);
  unset($ini);
 */
include_once "PHP_Curl_Crawler.php";
include_once "PageSpider.php";
include_once "SpiderDB.php";
include_once "Url.php";
include_once "Link.php";
include_once "MemoryUsage.php";
include_once "simple_html_dom.php";
include_once "Pdo.php";
include_once "Logger.php";

/**
 * Spiders one or more websites for urls
 * 
 * Takes a url and spidertype. <br />
 * 
 * A pagequeue is created in the database and the url is added.
 * Spider then parses the first page in the queue (the url that was recieved as parameter) and adds all found links to the queue.
 * After that spider looks for the next unparsed page and adds the result to the back of the queue.
 * This repeats until the entire queue has been checked.
 * 
 * The script can take some time to finish. It is advised to give php more memory and time with<br />
 * <code>
 * ini_set("memory_limit","xxxM");
 * set_time_limit(xxx);// time in seconds
 * </code>
 * Exmpl.
 * <code>
 * $url = 'http://code.google.com/p/siteresearch/';
 * $scraper = new Spider($url);
 * </code>
 * @todo create class to setup spider and database (installer)
 * @package Analyser
 */
class Spider {

    /**
     * holds the start url
     * @var String
     */
    private $domain;
    /**
     * You can choose to spider a domain, entire internet (not advised)
     * All spider options should be defined in this array
     * @var Array
     */
    private $spiderTypeOptions = array('domain', 'world');
    /**
     * spidertype
     * @var String
     */
    private $spiderType = "domain";
    /**
     * Holds the current page
     * @var String
     */
    private $current_page;
    /**
     * Holds the database
     * @var <type>
     */
    private $db;
    /**
     * todo
     * @var boolean
     */
    private $stripIndexfromUrl = false;
    
    /**
     * remove parameters for found urls?
     * @var boolean; 
     */
    private $removeparameters= true;
    /**
     * array of index files
     * @var array
     */
    private $Indexes = array('index.php', 'index.aspx', 'index.asp', 'index.html', 'index.htm');
    private $logger;

    /**
     * Validate parameters and start the spider
     * {@source}
     * @todo remove dependencys
     * @param String $url
     * @param String $spiderType
     * @todo spiderType should support subdomains as well
     */
    public function __construct($url, $spiderType = 'domain') {
        $this->logger = new Logger("logs/spiderlog.txt");
        if (in_array($spiderType, $this->spiderTypeOptions) && is_string($url)) {
            $this->setTpiderType = $spiderType;
            $this->setDomain = $url;
            $this->db = new SpiderDB(); // remove dependency
            $this->db->initSpider();
            $this->addUrls(array($url), "1");
            $this->startSpider();
        } else {
            $this->logger->log("wrong  parameters");
        }
    }
    
    public function getDomain() {
        return $this->domain;
    }

    public function setDomain($domain) {
        $this->domain = $domain;
    }

    public function getSpiderTypeOptions() {
        return $this->spiderTypeOptions;
    }

    public function setSpiderTypeOptions($spiderTypeOptions) {
        $this->spiderTypeOptions = $spiderTypeOptions;
    }

    public function getSpiderType() {
        return $this->spiderType;
    }

    public function setSpiderType($spiderType) {
        $this->spiderType = $spiderType;
    }

    public function getCurrent_page() {
        return $this->current_page;
    }

    public function setCurrent_page($current_page) {
        $this->current_page = $current_page;
    }

    public function getDb() {
        return $this->db;
    }

    public function setDb($db) {
        $this->db = $db;
    }

    public function getStripIndexfromUrl() {
        return $this->stripIndexfromUrl;
    }

    public function setStripIndexfromUrl($stripIndexfromUrl) {
        $this->stripIndexfromUrl = $stripIndexfromUrl;
    }

    public function getRemoveparameters() {
        return $this->removeparameters;
    }

    public function setRemoveparameters($removeparameters) {
        $this->removeparameters = $removeparameters;
    }

    public function getIndexes() {
        return $this->Indexes;
    }

    public function setIndexes($Indexes) {
        $this->Indexes = $Indexes;
    }

        



    /**
     * initialise DB connection
     * {@source}
     */
    private function initDB() {
        //$this->db->init();
    }

    /**
     * Start spidering
     * {@source}
     */
    private function startSpider() {

        $this->logger->log("run spider");
        while (count($this->db->getUrlsToDo()) > 0) {// Check db for urls that have not been parsed
            $this->logger->log("found unparsed urls");
            $this->current_page = $this->db->getUrlToWorkOn(); // get a page to parse
            $this->logger->log("spider " . $this->current_page);
            $crwlr = new PHP_Curl_Crawler();
            $crwlr->init();
            
            if ($crwlr->pageExists($this->current_page)) {// check if the page exists
                $this->db->setResponsHeader(200, $this->current_page);
                $this->db->setUrlAsGood($this->current_page); // set found in db to true
                $crwlr->close();

                $crwlr->init();
                $page = new PageSpider($crwlr->getContent($this->current_page)); // get page content
                $crwlr->close();

                unset($crwlr);
                
                
                $this->addUrls($page->getPageUrls(), $this->current_page); // add found urls to queue

            } else {
                $this->logger->log("page doesn't exists");
                $this->logger->log("page moved: ".$crwlr->pageMoved($this->current_page));
                if($crwlr->pageMoved($this->current_page)){// test if current url has redirect header if yes, add the redirect location to the queue
                    $this->db->setResponsHeader(301, $this->current_page);
                    $newurl[0] =$crwlr->getRedirectLocation($this->current_page);
                    $this->addUrls($newurl, $this->current_page); // add found urls to queue
                    $this->logger->log("moved url points to $newurl");
                    //@todo add redirect to seperate table for easy access.
                }
                else{
                $this->db->setUrlAsBad($this->current_page); // set found in db to false
                }
            }
        }
    }

    /**
     * adds urls to queue
     * {@source}
     * @param Array $urls
     */
    private function addUrls($urls, $page) {
        if ((array) $urls !== $urls) {
            echo "addUrls expects array";
        } else {
            if($this->removeparameters){
             $urls =$this->removeParameters($urls);
            }
            $urls = $this->validateUrls($urls);
            $done = $this->db->getUrlsDone();
            $todo = $this->db->getUrlsToDo();
            $urls = array_diff($urls, $done, $todo); // get list of urls that have not been checked
            $urls = array_unique($urls); // keep only the unique urls
            $this->db->addUrls($urls, $page);
        }
    }

    /**
     * returns one url to start scraping
     * {@source}
     * @return String
     */
    private function getUrlToWorkOn() {
        $todo = $this->db->getUrlToWorkOn();
        //echo (MemoryUsage::getUsage()),$todo,"\n";
        return $todo;
    }
    /**
     * removes all parameters in urls. Strips the url begining with the ? symbol
     * @param type $urls
     * @return array 
     */
    
        private function removeParameters($urls){
            $returnarray= array();
            foreach($urls as $url){
                $strippedurl = Url::removeParameters($url);
                array_push($returnarray,$strippedurl);
            }
            return $returnarray;
    }

    /**
     * validates returns a proper list of urls (array)
     * makes urls absolute instead of relative<br />
     * strips urls to images, css and js<br />
     * checks that the url is in our domain (unless scrapertype is set to world)<br />
     * {@source}
     */
    

    private function validateUrls($urls) {
        //@todo remove stripindex from spider and place it in url class
        if ($this->stripIndexfromUrl) {
            $temparray = array();
            foreach ($urls as $url) {
                foreach ($this->Indexes as $index) {
                    $url = str_replace($index, "", $url);
                }
                array_push($temparray, $url);
            }
            $urls = $temparray;
        }
        $validurls = array();
        foreach ($urls as $url) {
            array_push($validurls, Url::relative_to_abolute_url($this->current_page, $url));
        }
        if ($this->spiderType == 'domain') {
            $validurls = Url::remove_urls_not_in_domain($validurls, $this->domain);
        }
        $validpages = array();
        foreach ($validurls as $page) {
            if (checkIfLink::is_page($page)) {
                array_push($validpages, $page);
            }
        }
        unset($validurls);
        return $validpages;
    }

}

?>
