package com.google.code.jcrawler.main;

import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import java.util.TreeSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.httpclient.HttpException;

import com.google.code.jcrawler.curl.CurlRequest;
import com.google.code.jcrawler.output.SitemapOutput;

public class Main {
	static Set<String> indexedUrl = new TreeSet<String>();
	static long recursionLevel = 0;
	static String host = "";

	/**
	 * Main entry of the OpenJcrawler
	 * 
	 * @param args[0] = host that you want to crawl without the last /
	 * 		  args[1] = the destination folder which you're planing to save the sitemap.xml
	 * @throws HttpException
	 * @throws IOException
	 */
	public static void main(String[] args) throws HttpException, IOException {
		host = args[0];
		String home = host.concat("/");
		indexedUrl.add(home);
		walk(home);
		System.out.println(indexedUrl.size());
		new SitemapOutput(home).exportToFile(indexedUrl, args[1]);

	}

	public static void walk(String path) throws HttpException, IOException {
		Set<String> listFound = matchesHref(path, host);
		System.out.println("-------------------------------------------");
		System.out.println("Recursao Level : " + (++recursionLevel));
		System.out.println("-------------------------------------------");
		for (String s : listFound) {
			if (!s.startsWith(host)) {
				if (!s.startsWith("/")) {
					String pathTemp = path;
					if (!path.endsWith("/")) {
						if (!path.equals(host)) {
							pathTemp = path.substring(0, path.lastIndexOf("/"));
						}
						s = "/".concat(s);
					}
					s = pathTemp.concat(s);
				} else {
					s = host.concat(s);
				}
			}
			if (indexedUrl.add(s)) {
				System.out.println("Nova Url:" + s);
				walk(s);
			}
		}
	}

	public static Set<String> matchesHref(String path, String host) throws HttpException, IOException {
		Set<String> foundUrls = new HashSet<String>();
		String content = CurlRequest.get(path);
		if (content != null && !content.equals("")) {
			Pattern pattern = Pattern
					.compile("\\s*(?i)(?<!\\b(\\?|\\<\\!|\\&|fb)\\b)(?!\\b(.*css|.*js|.*gif|.*ico|.*jpg|.*png|.*bmp|.*pdf)\\b)href\\s*=\\s*(\"([^\"]*\")|'[^']*'|([^'\">\\s]+))");
			Matcher m = pattern.matcher(content.substring(content.indexOf("<body>") + 4));
			while (m.find()) {
				String url = m.group(3);
				url = url.replace("\"", "").replace("'", "").trim();
				if (!url.startsWith("http") && !url.startsWith("javascript") && !url.startsWith("#") && !url.startsWith("\\")
						&& !url.trim().equals("")) {
					foundUrls.add(url);
				} else {
					if (url.startsWith(host)) {
						url = url.replace(host, "");
						if (!url.trim().equals("")) {
							foundUrls.add(url);
						}
					}
				}
			}
		}
		return foundUrls;
	}

}
