package br.ufpe.cin.rdfilter.main;

import java.net.URI;
import java.util.ArrayList;
import java.util.List;

import org.semanticweb.yars.nx.Node;
import org.semanticweb.yars.nx.Resource;
import br.ufpe.cin.rdfilter.config.CrawlerConfig;

import com.ontologycentral.ldspider.Crawler;
import com.ontologycentral.ldspider.frontier.BasicFrontier;
import com.ontologycentral.ldspider.frontier.Frontier;
import com.ontologycentral.ldspider.hooks.content.ContentHandler;
import com.ontologycentral.ldspider.hooks.content.ContentHandlerNx;
import com.ontologycentral.ldspider.hooks.content.ContentHandlerRdfXml;
import com.ontologycentral.ldspider.hooks.content.ContentHandlers;
import com.ontologycentral.ldspider.hooks.links.LinkFilter;
import com.ontologycentral.ldspider.hooks.links.LinkFilterSelect;
import com.ontologycentral.ldspider.hooks.sink.Sink;
import com.ontologycentral.ldspider.hooks.sink.SinkSparul;

/**
 * 
 * @author Alberto This class manages the crawling process from a given seed
 *         list, using a breadth first approach with parameters defined by the
 *         system, storing the result in a specified triple store. This class is
 *         used to discover new datasources on Web of Data.
 */

public class CrawlingManager {

	private Crawler crawler;
	private ArrayList<URI> seeds;

	public CrawlingManager(ArrayList<URI> seeds) {
		this.crawler = new Crawler();
		this.seeds = seeds;
	}

	public void executeCrawling() {

		// setting the URI seeds to crawling
		Frontier frontier = new BasicFrontier();

		for (URI seed : seeds) {
			frontier.add(seed);
		}

		// setting the list of the predicates that will be followed by the
		// crawler
		List<Node> predicates = new ArrayList<Node>();

		predicates.add(new Resource("http://www.w3.org/2002/07/owl#sameAs"));
		predicates.add(new Resource(
				"http://www.w3.org/2000/01/rdf-schema#seeAlso"));
		predicates.add(new Resource(
				"http://www.w3.org/2002/07/owl#equivalentClass"));

		// configuring the frontier (defining the seeds) and predicates as
		// filters to the crawler
		LinkFilter links = null;
		links = new LinkFilterSelect(frontier, predicates, true);
		crawler.setLinkFilter(links);

		// configuring the ContentHandler
		ContentHandler contentHandler = new ContentHandlers(
				new ContentHandlerRdfXml(), new ContentHandlerNx());
		crawler.setContentHandler(contentHandler);

		// configuring the Sink to a SPARQL/Update Endpoint, to storing of the
		// output
		Sink sink = new SinkSparul(CrawlerConfig.ENDPOINT_OUTPUT, true);
		crawler.setOutputCallback(sink);

		// initializing the crawling, using the breadth first approach with
		// specified configuration paramaters
		crawler.evaluateBreadthFirst(frontier, CrawlerConfig.DEPTH,
				CrawlerConfig.URI_LIMIT, CrawlerConfig.PLD_LIMIT);
	}
}