<?php

require_once "../lib/nusoap/lib/nusoap.php";
//$client = new nusoap_client("http://localhost/spidercrawler/trunk/SpiderBot/WebServices/IndexerService.php");
//
//$error = $client->getError();
//if ($error) {
//    echo "<h2>Constructor error</h2><pre>" . $error . "</pre>";
//}
//$result = "";
//$result = $client->call("getProd", array("category" => "books"));
//$result2 = $client->call("getProduct");
//$result3 = $client->call("getAsh");
//$result4 = $client->call("getPublication", array("year" => "2010", "author" => "Herbert"));
//
//$result = $client->call("getProd", array("category" => "books", "author" => "rushdie")); //calling the SOAP method
////checks for fault
//if ($client->fault) {
//    echo "<h2>Fault</h2><pre>";
//    print_r($result);
//    echo "</pre>";
//} else {
//    $error = $client->getError();
//    if ($error) {
//        echo "<h2>Error</h2><pre>" . $error . "</pre>";
//    } else {
//        echo "<h2>Books</h2><pre>";
//        echo $result;
//        echo "</pre>";
//        echo "<hr/><h2> Project : " . $result2 . " </h2>";
//        echo "<hr/><h2> Name:" . $result3 . "</h2>";
//        echo "<hr/><h2> Publication:" . $result4 . "</h2>";
//    }
//}
// Now, create a instance of your class, define the behaviour
// of the crawler (see class-reference for more options and details)
// and start the crawling-process. 
//
//$crawler = new SpiderBot();
//$crawler->$client = new nusoap_client("http://localhost/spidercrawler/trunk/SpiderBot/WebServices/IndexerService.php");
//$error = $crawler->$client->getError();
//if ($error) {
//echo "<h2>Constructor error</h2><pre>" . $error . "</pre>";
//} else {
//    echo "Works just fine0";
//}
//// URL to crawl
//$crawler->setURL("http://localhost/spidercrawler/trunk/SpiderBot/WebServices/client_book.php");
//
//// Only receive content of files with content-type "text/html"
//$crawler->addContentTypeReceiveRule("#text/html#");
//
//// Ignore links to pictures, dont even request pictures
//$crawler->addURLFilterRule("#\.(jpg|jpeg|gif|png)$# i");
//
//// Store and send cookie-data like a browser does
//$crawler->enableCookieHandling(true);
//
//// Set the traffic-limit to 1 MB (in bytes,
//// for testing we dont want to "suck" the whole site)
//$crawler->setTrafficLimit(1000 * 1024);
//
//// Thats enough, now here we go
//$crawler->go();
//
//// At the end, after the process is finished, we print a short
//// report (see method getProcessReport() for more information)
//$report = $crawler->getProcessReport();
//
//if (PHP_SAPI == "cli") {
//    $lb = "\n";
//} else {
//    $lb = "<br />";
//}
//
//echo "Summary:" . $lb;
//echo "Links followed: " . $report->links_followed . $lb;
//echo "Documents received: " . $report->files_received . $lb;
//echo "Bytes received: " . $report->bytes_received . " bytes" . $lb;
//echo "Process runtime: " . $report->process_runtime . " sec" . $lb;
