package crawler;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.InputStream;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;

import rdfdistiller.RDFDistiller;
import sparql.QueryService;

import com.hp.hpl.jena.rdf.model.Model;
import com.hp.hpl.jena.rdf.model.ModelFactory;

import edu.uci.ics.crawler4j.crawler.CrawlController;
import edu.uci.ics.crawler4j.crawler.Page;
import edu.uci.ics.crawler4j.crawler.WebCrawler;
import edu.uci.ics.crawler4j.parser.HtmlParseData;
import edu.uci.ics.crawler4j.url.WebURL;

public class Crawler extends WebCrawler {
	int i = 0;
	int m = 0;
	boolean b = false;
	private final static Pattern FILTERS = Pattern
			.compile(".*(\\.(css|js|bmp|gif|jpe?g"
					+ "|png|tiff?|mid|mp2|mp3|mp4"
					+ "|wav|avi|mov|mpeg|ram|m4v|pdf"
					+ "|rm|smil|wmv|swf|wma|zip|rar|gz))$");

	/**
	 * You should implement this function to specify whether the given url
	 * should be crawled or not (based on your crawling logic).
	 */
	@Override
	public boolean shouldVisit(WebURL url) {
		String href = url.getURL().toLowerCase();
		return !FILTERS.matcher(href).matches()
				&& href.startsWith("http://www.food.com/recipe/creamy-c");
	}

	public Crawler() {
		super();
	}

	/**
	 * This function is called when a page is fetched and ready to be processed
	 * by your program.
	 */
	@Override
	public void visit(Page page) {
		/**
		 * That url we will pass to Distiller
		 */
		String url = page.getWebURL().getURL();

		// You can make more filters here for what url to contain or not
		
		if (page.getParseData() instanceof HtmlParseData
				&& !page.getParseData().toString().contains("photo")

		)

		{
			HtmlParseData htmlParseData = (HtmlParseData) page.getParseData();

			/**
			 * More specified filtering to exclude review pages or pages with
			 * slideshow which contains our URL
			 */

			if (!url.contains("review") && !url.contains("null")
					&& !url.contains("slideshow")) {

				/**
				 * Here we call getUrls() method which calls Distiller for every
				 * page which is aggreable with crawler filters
				 */
				System.out.println("URL" + url);
				getUrls(url);

			} else {
				System.out.print(".");
			}

		}
	}

	/**
	 * That is a method which calls Distiller for specified url.
	 */
	public void getUrls(String url) {
		try {
			InputStream resultsStream = RDFDistiller.distillRDF(url, "rdfa",
					"rdfxml");

			Model rdfGraph = ModelFactory.createDefaultModel();
			rdfGraph.read(resultsStream, null, "RDF/XML");

			//Saving distilled data to files;
			
			rdfGraph.write(new FileOutputStream("Recipe" + i+".rdf"));
			i++;
			
			
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
}
