package com.book.inteliget.crawler;

import java.io.BufferedWriter;
import java.io.FileWriter;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;

import com.book.inteliget.readMain.RDFDistiller;
import com.hp.hpl.jena.rdf.model.Model;
import com.hp.hpl.jena.rdf.model.ModelFactory;

import edu.uci.ics.crawler4j.crawler.Page;
import edu.uci.ics.crawler4j.crawler.WebCrawler;
import edu.uci.ics.crawler4j.parser.HtmlParseData;
import edu.uci.ics.crawler4j.url.WebURL;

public class MyCrawler extends WebCrawler {
	private final static Pattern FILTERS = Pattern
			.compile(".*(\\.(css|js|bmp|gif|jpe?g"
					+ "|png|tiff?|mid|mp2|mp3|mp4"
					+ "|wav|avi|mov|mpeg|ram|m4v|pdf"
					+ "|rm|smil|wmv|swf|wma|zip|rar|gz))$");

	/**
	 * You should implement this function to specify whether the given url
	 * should be crawled or not (based on your crawling logic).
	 */
//	int a;
//	public MyCrawler () {
//		super();
//		this.a = read();
//		if(a>7){
//			return;
//		}else {
//			write(a);
//		}
//		
//	}
	
	
	
	
	@Override
	public boolean shouldVisit(WebURL url) {
		String href = url.getURL().toLowerCase();
		return !FILTERS.matcher(href).matches()
				&& href.startsWith("http://www.goodreads.com/book/show");
	}

	/**
	 * This function is called when a page is fetched and ready to be processed
	 * by your program.
	 */
	
//	public void write(int i){
//		try {
//			 
// 
//			File file = new File("links");
// 
//			// if file doesnt exists, then create it
//			if (!file.exists()) {
//				file.createNewFile();
//			}
// 
//			FileWriter fw = new FileWriter(file.getAbsoluteFile());
//			BufferedWriter bw = new BufferedWriter(fw);
//			i++;
//			bw.write(Integer.toString(i));
////			bw.write("dfjvndjf");
//			bw.flush();
//			bw.close();
//		} catch (IOException e) {
//			e.printStackTrace();
//		}
//	}
	
//	public int read(){
//		String line = null;
//		int b = -1;
//	    try {
//	    	BufferedReader br = new BufferedReader(new FileReader("links"));
//	        line = br.readLine();
//	        br.close();
//	        b = Integer.parseInt(line);
//	    } catch (Exception e){
//	    	e.printStackTrace();
//	    }
//	    
//	    return b;
//	}
	
	public void runMain(String url){
			try {
				InputStream resultsStream = RDFDistiller.distillRDF(url,
						"rdfa", "rdfxml");

				Model rdfGraph = ModelFactory.createDefaultModel();
				rdfGraph.read(resultsStream, null);

				rdfGraph.write(System.out, "TURTLE");

			} catch (Exception e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
	}
	
	
	@Override
	public void visit(Page page) {
		String url = page.getWebURL().getURL();
		if (page.getParseData() instanceof HtmlParseData) {
			HtmlParseData htmlParseData = (HtmlParseData) page.getParseData();
			String text = htmlParseData.getText();
			String html = htmlParseData.getHtml();
			List<WebURL> links = htmlParseData.getOutgoingUrls();
			System.out.println("Text length: " + text.length());
			System.out.println("Html length: " + html.length());
			System.out.println("Number of outgoing links: " + links.size());
			System.out.println("Url: " + url);
			System.out.println("RDF: ");
//			System.out.println("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" + a);
			runMain(url);
		}
	}
	
	
}
