package br.rnp.linkcrawler;

import java.io.IOException;
import java.io.StringReader;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.List;
import java.util.Queue;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentLinkedQueue;

import javax.swing.text.html.HTMLEditorKit;

import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.ResponseHandler;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.BasicResponseHandler;
import org.apache.http.impl.client.DefaultHttpClient;

public class Crawler {

	private URL url;
	private String host;
	private String[] paths;
	private SortedSet<URL> links = new TreeSet<URL>(new URLComparator());
	private SortedSet<URL> visited = new TreeSet<URL>(new URLComparator());

	private Crawler(URL url) {
		this.url = url;
	}

	public Crawler within(String host) {
		this.host = host;
		return this;
	}

	public Crawler under(String... paths) {
		this.paths = paths;
		return this;
	}

	public SortedSet<URL> getLinks() {

		Queue<URL> toVisit = new ConcurrentLinkedQueue<URL>();
		toVisit.add(url);
		List<URL> currentLinks = null;
		URL current;
		while (!toVisit.isEmpty() && visited.size() < 10000) {
			System.out.println("To Visit: " + toVisit.size() + " visited: "
					+ visited.size());
			current = toVisit.poll();
			currentLinks = getLink(current);
			links.addAll(currentLinks);
			visited.add(current);
			currentLinks.removeAll(visited);
			currentLinks.removeAll(toVisit);
			String allowedHost;
			for (URL url : currentLinks) {
				allowedHost = (host == null) ? url.getHost() : host;
				filter: for (String path : paths) {
					if (url.getPath().startsWith(path)
							&& url.getHost().equals(allowedHost)) {
						toVisit.add(url);
						break filter;
					}
				}
			}

		}
		return links;
	}

	private static List<URL> getLink(URL url) {
		HttpClient httpclient = new DefaultHttpClient();
		HTMLEditorKit.Parser parse = new HTMLParser().getParser();
		LinkParserCallback callback = new LinkParserCallback(url);
		ResponseHandler<String> responseHandler = new BasicResponseHandler();
		try {
			HttpGet httpget;
			try {
				httpget = new HttpGet(url.toURI());
			} catch (URISyntaxException e) {
				throw new IllegalArgumentException(
						"Link inicial não é uma URI válida.");
			}
			System.out.println("executing request " + httpget.getURI());
			String responseBody = httpclient.execute(httpget, responseHandler);
			StringReader r = new StringReader(responseBody);
			parse.parse(r, callback, true);

		} catch (ClientProtocolException e) {
			System.out.println("Erro ao processar o link ");
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} finally {
			httpclient.getConnectionManager().shutdown();
			System.out.println("closed connection");
		}
		return callback.getLinks();

	}

	public static Crawler crawlFrom(URL url) {
		return new Crawler(url);
	}
}
