/*
 * TCSS 422 - Spring quarter 2011
 * Team: 	Googlers
 * Members: Deepa Sahni, Krisnil Gounder, Michael Schweiger
 * Date: 	April 25, 2011
 */
package web;

import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import main.Main;

import buffers.URLBuffer;

/**
 * A parser designed to parse out hypertext links from html code and pass the links to
 * a PageBuffer object.
 * @author Michael Schweiger - API framework
 * @author Krisnil Gounder - Page Parser implementation.
 * @version 0.1
 */
public final class PageParser implements Runnable {
	
	/**
	 * Pattern to be parsed if it is a href from the html source.
	 */
	private static final Pattern hrefMatch = Pattern.compile("<a href=[\'\"]?([^\'\" >]+)");
	/**
	 * Pattern to be parsed if it is a tag from the html source.
	 */
	private static final Pattern tagMatch = Pattern.compile("<[^>]*?>");
	/**
	 * Pattern to be parsed if it is a head from the html source.
	 */
	private static final Pattern headMatch = Pattern.compile("<head[^>]*>(.*?)</head>", Pattern.DOTALL);
	/**
	 * Pattern to be parsed if it is a script from the html source.
	 */
	private static final Pattern scriptMatch = Pattern.compile("<script[^>]*>(.*?)</script>");
	/**
	 * Pattern to be parsed if it is a word from the html source.
	 */
	private static final Pattern wordMatch = Pattern.compile("\\w*");
	/**
	 * Pattern to be parsed if it is a space from the html source.
	 */
	private static final Pattern spaceMatch = Pattern.compile("[\t\r\n]*");
	/**
	 * Pattern to be parsed if it is a ascii from the html source.
	 */
	private static final Pattern asciiMatch = Pattern.compile("&.*?;");
	/**
	 * Total time to parse the Page.
	 */
	private static long totRunninTime = 0;
		
	/**
	 * Parsed data.
	 */
	private List<String> wordsFound = new ArrayList<String>();
	/**
	 * Parsed links.
	 */
	private List<String> linksFound = new ArrayList<String>();

	//********************************************************************
	//	Private fields given by constructor
	//********************************************************************
	private String my_url;
	private Gatherer2 my_gatherer;
	private String my_page;
	
	/**
	 * Constructor for this parser that takes a web page as a string.
	 * It will also parse the links and store it to the links Blocking Queue
	 * so that the Page Retriever can retrieve more html sites.
	 * 
	 * @param the_page The page as a string of text.  Cannot be null.
	 * @param the_reporter The reporter for displaying data, cannot be null.
	 * @throws IllegalArgumentException if params are null.
	 */
	public PageParser(String url, Gatherer2 the_gatherer, String the_page)
	throws IllegalArgumentException {
		if (the_page == null) {
			throw new IllegalArgumentException("Page string cannot be null!");
		} else if (the_gatherer == null) {
			throw new IllegalArgumentException("The gatherer cannot be null!");
		} else {
			my_url = url;
			my_gatherer = the_gatherer;
			my_page = the_page;		
		}
		
	}
	
	
		
	/**
	 * This method removes the defined strings.
	 * 
	 * @param page
	 * @param the_p
	 * @param spaceAppend, reserve space for the match or not.
	 * @return page
	 */
	private String removeMatch(String page, final Pattern the_p, final boolean spaceAppend) {
		String[] temp = the_p.split(page);
		page = "";
		for (String s : temp) {
			page += s;
			if (spaceAppend) {
				page += ' ';
			}
		}
		return page;
	}
	
	/**
	 * Return number of Links.
	 */
	public int getNumOfLinks() {
		return linksFound.size();
	}

	/**
	 * Does all the real work.  Parses through the page and identifies links.
	 * Adds the links to URLBuffer and sends the final results to Gatherer.
	 */
	public void run() {
		if (my_url.endsWith(".txt")) {
			//if it's a text file, ignore parsing and just send whole page to Gatherer
			my_gatherer.addPage(my_url + " 0 0 " + my_page);
			return;
		}
		
		long startTime = System.currentTimeMillis();
		wordsFound.add(my_url);
		
		my_page = removeMatch(my_page, headMatch, false);
		my_page = removeMatch(my_page, scriptMatch, false);
		Matcher hrefMatches = hrefMatch.matcher(my_page);
		
		// Identify links
		while (hrefMatches.find()) {
			String link = my_page.substring(hrefMatches.start(), hrefMatches.end());
			if (link.contains("javascript:")) {
				continue;
			}
			
			if (link.contains("#")) {
				link = link.split("#")[0];  //take part before the # sign
			}
			
			for (String s : link.split("\"")) {	
				try {
					URI auth = new URI(my_url);
					URI link2 = auth.resolve(s);
					//If no syntax exception was caught, the uri resolved ok.
					try {
						URLBuffer.getInstance().add(link2.toURL());
						linksFound.add(link2.toString());
						//System.out.println(link2);
					} catch (MalformedURLException e) {
						//shouldn't get here, but in case it does, output an error message and continue.
						System.err.println("Error creating URL " + link2.toString());
					}
				} catch (URISyntaxException e) {
					//ignore, URI didn't resolve
				} catch (IllegalArgumentException e) {
					//ignore, URI didn't resolve
				}
			}
		}

		//remove tags
		if (!my_page.isEmpty()) {
			my_page = removeMatch(my_page, tagMatch, false);
			my_page = removeMatch(my_page, asciiMatch, true);
			my_page = removeMatch(my_page, spaceMatch, false);
			
			if (!my_page.isEmpty()) {
				Matcher wordMatches = wordMatch.matcher(my_page);

				while (wordMatches.find()) {
					String word = my_page.substring(wordMatches.start(), wordMatches.end());
					if (!word.startsWith(" ") && !word.equals("")) {
						wordsFound.add(word);
					}
				}
			}
		}
		
		totRunninTime = (System.currentTimeMillis() - startTime); //in ms
		
	    String linkSize = Integer.toString(linksFound.size());
		String strTime = Long.toString(totRunninTime);
		wordsFound.add(1, linkSize);
		wordsFound.add(2, strTime);
		
		//Convert the words found array to a string WITHOUT the comma separation.
		StringBuilder builder = new StringBuilder();
		for (int i = 0; i < wordsFound.size(); i++) {
			builder.append(wordsFound.get(i) + " ");
		}
		my_gatherer.addPage(builder.toString()); 
		//System.out.println(builder.toString());
		Main.finishParser();
	}
	
	
	/**
	 * toString for testing purpose.
	 */
	public void testOutput() {
		System.out.println("Words in the page: " + wordsFound);
		System.out.println("Links in the page: " + linksFound);
	}
	
}
