<?php

/*
 * Crawler.php
 * 
 * Copyright 2013 Kaushal L R <exp10r3r@vyadhamarg.info>
 * 
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
 * MA 02110-1301, USA.
 * 
 * 
 */

//It may take a while to crawl a site, hence a time-limit has to be set.
//This file will run only for this set time limit
set_time_limit(10000);

// Include the PHPCrawl main class
require_once "../lib/nusoap/lib/nusoap.php";
include("../lib/PHPCrawl/libs/PHPCrawler.class.php");
require_once '../lib/simple_html_dom.php';

/**
 * Configuration for: Error reporting
 * Useful to show every little problem during development, but only show hard errors in production
 */
//error_reporting(E_ALL);
//ini_set("display_errors", 1);

/**
 * This Class extends the PHPCrawler class and has been implemented to run over Web Services.
 */
class SpiderBot extends PHPCrawler {

    /**
     * This function will be called over every unique URL that PHPCrawl finds out.
     * It will then get the Data from this URL and pass This Data and URL address to our Indexer for Indexing.
     * @param PHPCrawlerDocumentInfo $DocInfo The Document Object with all other necessary attributes
     */
    function handleDocumentInfo(PHPCrawlerDocumentInfo $DocInfo) {
        $client = new nusoap_client("http://spiderbot.local/WebServices/IndexerService.php?wsdl",true);
        $error = $client->getError();
        if ($error) {
            echo "<h2>Constructor error</h2><pre>" . $error . "</pre>";
        }
        // Just detect linebreak for output ("\n" in CLI-mode, otherwise "<br>"). 
        if (PHP_SAPI == "cli") {
            $lb = "\n";
        } else {
            $lb = "<br />";
        }
        // Print the URL and the HTTP-status-Code 
        echo "Page requested: " . $DocInfo->url . " (" . $DocInfo->http_status_code . ")" . $lb;

        // Print the refering URL 
        echo "Referer-page: " . $DocInfo->referer_url . $lb;

        // Print if the content of the document was be recieved or not 
        if ($DocInfo->received == true) {
            echo "Content received: " . $DocInfo->bytes_received . " bytes" . $lb;
            //echo 'Data' . file_get_html($DocInfo->url)->plaintext;
            echo '<b> Calling indexer</b>';
            $result = $client->call("indexDocument", array("documentData" => file_get_html($DocInfo->url)->plaintext, "url" => $DocInfo->url));
            if ($client->fault) {
                echo '<br/><b> Fault occured</b>';
            } else {
                $err = $client->getError();
                if ($err) {
                    // Display the error
                    echo '<h2>Error</h2><pre>' . $err . '</pre>';
                } else {
                    // Display the result
                    echo '<h2>Result</h2><pre>';
                    print_r($result);
                    echo '</pre>';
                }
            }
        } else {
            echo "Content not received" . $lb;
        }

        // Now you should do something with the content of the actual 
        // received page or file ($DocInfo->source), we skip it in this example  

        echo $lb;

        flush();
    }

}

// Now, create a instance of your class, define the behaviour 
// of the crawler (see class-reference for more options and details) 
// and start the crawling-process.  

$crawler = new SpiderBot();

// URL to crawl 
$crawler->setURL($_POST['URL']);

// Only receive content of files with content-type "text/html" 
$crawler->addContentTypeReceiveRule("#text/html#");

// Ignore links to pictures, dont even request pictures 
$crawler->addURLFilterRule("#\.(jpg|jpeg|gif|png)$# i");

// Store and send cookie-data like a browser does 
$crawler->enableCookieHandling(true);

// Set the traffic-limit to 1 MB (in bytes, 
// for testing we dont want to "suck" the whole site) 
$crawler->setTrafficLimit(500 * 128);

echo "<b>Indexing : ". $_POST['URL'] . "</b><br/>";

// Thats enough, now here we go 
$crawler->go();

// At the end, after the process is finished, we print a short 
// report (see method getProcessReport() for more information) 
$report = $crawler->getProcessReport();

if (PHP_SAPI == "cli") {
    $lb = "\n";
} else {
    $lb = "<br />";
}

echo "Summary:" . $lb;
echo "Links followed: " . $report->links_followed . $lb;
echo "Documents received: " . $report->files_received . $lb;
echo "Bytes received: " . $report->bytes_received . " bytes" . $lb;
echo "Process runtime: " . $report->process_runtime . " sec" . $lb;
