package page;

import java.util.*;

import queue.BlockingObject;

import data.DataGatherer;

/**
 * Parses web page data for URLs and keywords.
 * Can be executed as single-threaded via parse() method
 * or multi-threaded, using setPage() and start() methods.
 * 
 * @author Ken "David" McClain
 * @author Peter Mosher
 * @version 1
 */
public class PageParser extends Thread {
	
	/** Map of keywords to count. */
	final private Map<String, Integer> keywords = new HashMap<String, Integer>();
	
	/** Queue to add URLs to. */
	final private LinkedList<String> queue;
	
	/** Set of URLs already visited. */
	final private Set<String> visitedURLs = new HashSet<String>();
	
	/** Data Gatherer to report parsed data to. */
	final private DataGatherer dataGatherer;
	
	/** Flag to let PageBuffer know we are still working. */
	private boolean busy = false;
	
	/** The page to parse, given by PageBuffer in multi-threaded implementation. */
	private Page pageToParse;
	
	/** Objects to notify when this thread finishes executing. */
	private BlockingObject blockToNotify, blockedUntilFinished;
	
	/**
	 * Constructor, sets list of keywords, queue, and data gatherer.
	 * 
	 * @param the_keywords Array of keywords to count within the data to parse.
	 * @param the_urlQueue Queue to enqueue URLs into.
	 * @param the_dataGatherer Data gatherer to report statistics to.
	 */
	public PageParser(final String[] the_keywords, 
					  final LinkedList<String> the_urlQueue, 
					  final DataGatherer the_dataGatherer) {
		this.queue = the_urlQueue;
		
		for (String keyword : the_keywords) {
			keywords.put(keyword, 0);
		}
		
		this.dataGatherer = the_dataGatherer;
	}
	
	/** 
	 * Let PageBuffer know the thread is still running.
	 * @return True if thread is still parsing, false if idle.
	 */
	synchronized public boolean isBusy() { return busy; }
	
	/** 
	 * Sets the page to parse. Used in multi-threaded implementation.
	 * @param page The page to parse when this.start() is invoked.
	 */
	public void setPage(final Page page) {
		this.pageToParse = page;
	}
	
	/**
	 * Sets the object checked by PageBuffer (the parser loop).
	 * @param blockToNotify The object to notify when this thread finishes executing.
	 */
	public void setBlockingObject(final BlockingObject blockToNotify) {
		this.blockToNotify = blockToNotify;
	}
	
	/**
	 * Sets the object checked by ModelMain (the main while loop).
	 * @param blockedUntilFinished The object to notify when thread finishes executing.
	 */
	public void setBlockedUntilFinished(final BlockingObject blockedUntilFinished) {
		this.blockedUntilFinished = blockedUntilFinished;
	}
	
	/**
	 * @param busy True if the thread is busy, false otherwise.
	 */
	synchronized public void setBusy(boolean busy) {
		this.busy = busy;
	}
	
	/** Threaded. "busy" is true while this thread is executing. */
	public void run() {
		
		setBusy(true);
		
		parse(pageToParse.getUrl(), pageToParse.getBody());
		
		setBusy(false);
		
		// Notify the blocking object in case the PageBuffer object 
		// was waiting for a thread to become available.
		synchronized (blockToNotify) {
			blockToNotify.notify();
		}
		
		// Notify ModelMulti that this thread has finished.
		// This wakes up the main loop and checks if we are done.
		synchronized(blockedUntilFinished) {
			blockedUntilFinished.notify();
		}
		
	}
	
	/**
	 * Parses web page data.
	 * @param fromURL URL the data was retrieved from. For HREF link purposes.
	 * @param data String representation of the web page data.
	 */
	public void parse(final String fromURL, final String data) {
		
		final long startTime = System.currentTimeMillis(); // The time we started parsing.
		
		// Reset word counts to 0
		for (String kw : keywords.keySet())
			keywords.put(kw, 0);
		
		int wordCount = 0; // Total # of words found.
		int urlCount = 0;  // Total # of URLs found.
		
		// Strip HTML tags
		String dataNoTags = stripTags(data);
		
		// We will use a Tokenizer to iterate over every "word" in the page body.
		final StringTokenizer tokens = new StringTokenizer(dataNoTags);
		while (tokens.hasMoreTokens()) {
			
			wordCount++; // Increment total # of words on this page.
			
			final String token = tokens.nextToken(); // Current word
			
			// Check if this word is in the Map
			if (keywords.containsKey(token)) {
				keywords.put(token, keywords.get(token) + 1);
			}
			
		}
		
		// Don't find URLs for .TXT files
		if (!fromURL.toLowerCase().endsWith(".txt"))
			// Find URLs, add them to queue.
			urlCount = extractURLs(fromURL, data);
		
		// Total time taken to parse this page.
		final long timeTaken = System.currentTimeMillis() - startTime;
		
		// Send this data to the DataGatherer object.
		dataGatherer.parsed(fromURL, keywords, wordCount, urlCount, timeTaken);
		
	}
	
	
	/** @return 'data' with all HTML tags removed. */
	private final String stripTags(final String data) {
		return data.replaceAll("\\<.*?>",""); // RegEx!
	}
	
	
	/**
	 * Adds found URLs to "queue". This is a helper method for 'parse'.
	 * 
	 * @param fromURL Originated URL.
	 * @param data The web page data (including tags).
	 * @return Number of URLs encountered, including URLs not added to queue.
	 */
	private int extractURLs(String fromURL, String data) {
		int urlCount = 0; // Number of URLs found.
		
		// Iterate over every hyperlink found
		for (String url : between(data, "href=\"", "\"")) {
			
			// Remove hash tag if needed
			if (url.indexOf('#') != -1)
				url = url.substring(0, url.indexOf('#'));
			
			// Now we need to fix relative addressing...
			if (url.startsWith("../")) {
				// Relative addressing, stay within current URL's subdirectories
				url = fromURL.substring(0, fromURL.lastIndexOf('/')) + url.substring(2);
				
			} else if (url.startsWith("/")) {
				// Using / addressing. Similar to "./"
				url = fromURL.substring(0, fromURL.indexOf('/', 8)) + url;
				
			} else if (url.startsWith("./")) {
				// Using ./ relative addressing 
				url = fromURL.substring(0, fromURL.lastIndexOf('/')) + url.substring(1);

			} else if (url.startsWith("http")) {
				// Don't need to change anything
				
			} else {
				// Using either ./ address or no addressing at all.
				if (fromURL.indexOf('/', 8) == -1)
					fromURL += '/';
				url = fromURL.substring(0, fromURL.lastIndexOf('/') + 1) + url;
			}
			
			// Avoid Mobus' blog.
			if (!url.contains("questioneverything.typepad.com") && !url.startsWith("mailto:")) {
				// Ensure the URL uses the proper extension.
				if (url.endsWith(".html") || url.endsWith(".htm") || url.endsWith(".txt")) {
					if (!visitedURLs.contains(url))
						queue.add(url);
				}
				
			}
			urlCount++;
		}
		
		return urlCount;
	}
	
	/**
	 * Helper method which returns all strings contained within 'original' between 'beginning' and 'ending'.
	 * Ignores case.
	 * 
	 * @param original
	 * @param beginning
	 * @param ending
	 * @return all strings contained within 'original' between 'beginning' and 'ending'.
	 */
	private ArrayList<String> between(String original, String beginning, String ending) {
		ArrayList<String> result = new ArrayList<String>();
		
		String originalLower = original.toLowerCase();
		String beginningLower = beginning.toLowerCase();
		String endingLower = ending.toLowerCase();
		
		int i = originalLower.indexOf(beginningLower);
		while (i != -1) {
			int j = originalLower.indexOf(endingLower, i + beginning.length());
			if (j == -1) 
				break;
			result.add(original.substring(i + beginning.length(), j));
			i = originalLower.indexOf(beginningLower, j + ending.length());
		}
		
		return result;
	}
	
}
