import java.io.*;
import java.net.*;
import java.util.regex.*;

/**
 * Web crawler that can apply regular expressions to HTML.
 *
 * @version 1.0
 * @since 2013-02-13
 */
public class RegexCrawler {
	/**
	 * Gets the contents of an HTML document.
	 *
	 * @param url The URL to get the contents from.
	 * 
	 * @return A string containing the HTML document, or null on error.
	 */
	public String getUrlDocument(String url) {
		URL u;

		try {
			u = new URL(url);
		} catch (MalformedURLException ex) {
			System.err.println(ex);
			return null;
		}

		try {
			String line;
			StringBuffer doc = new StringBuffer();
			BufferedReader rdr = new BufferedReader(new InputStreamReader(u.openStream()));

			while ((line = rdr.readLine()) != null) {
				doc.append(line + "\n");
			}

			return doc.toString();
		} catch (IOException ex) {
			System.err.println(ex);
			return null;
		}
	}

	/**
	 * Applies a regular expression to a document and outputs the results.
	 *
	 * See the following link for an overview of the regular expression grammar
	 * implemented by Java.
	 *
	 * http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html
	 *
	 * @param doc The HTML document contents.
	 * @param regex The regular expression to apply.
	 */
	public void extractData(String doc, String regex) {
		Pattern p = Pattern.compile(regex);
		Matcher m = p.matcher(doc);

		System.out.println("Matching pattern " + regex);

		while (m.find()) {
			for (int i = 0; i <= m.groupCount(); i++) {
				System.out.println("Group(" + i + ") : " + m.group(i));
			}
		}
	}

	public static void main(String[] args) {
		if (args.length != 1) {
			System.err.println("Usage:  java RegexCrawler <url>");
			System.exit(1);
		}

		String url = args[0];
		RegexCrawler crawler = new RegexCrawler();
		String doc = crawler.getUrlDocument(url);

		if (doc != null) {
			//a. The html tags for links with the form 
			//<a href=”http://xxx”> ... </a> where xxx and ... can be any sequence of characters or digits.
			crawler.extractData(doc, "<a\\s+href=\"(http://\\S+)\">.*</a>");

			//b. Modify your code in a) to extract links with the format 
			//<a href=”xxx”> ... </a> where xxx and ... can be any sequence of characters or digits but xxx cannot start with http://
			// [^http://]
			// (?!X) - negative lookahead operator
			crawler.extractData(doc, "<a\\s+href=\"(?!http://)(\\S+)\">.*</a>");

			//c. Modify your code in b) to extract links with the format
			//<a href=”http://xxx”> ... </a> and <a href=”xxx”> ... </a> where xxx and ... can be any sequence of characters or digits
			crawler.extractData(doc, "<a\\s+href=\"(\\S+)\">.*</a>");
			
			//d. Modify your code in c) to extract links of images with the format
			//<img ... src="xxx" ... /> where xxx and ... can be any sequence of characters or digits
			// [^>] - everything until >
			crawler.extractData(doc, "<img[^>]*src=\"([^\"]*).*/>");
		}
	}
}
