/*
 * University Of Washington - Tacoma
 * TCSS-422, Operating Systems
 * Assignment 1 - Web Crawler
 * April 26, 2011
 * 
 * Team: Kernel Monkeys
 * Andrew Boguk
 * Sergiy Kulyk
 * Nicholas Swartzendruber
 */

package crawler.engine;

import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Observable;
import java.util.Set;
import java.util.concurrent.BlockingQueue;


/**
 * The PageParser is used to parse information from a webpage.
 * This webpage data must be contained in an ordered (same as 
 * the order it appears in the html) list where each index contains
 * a string representing sequential lines of html. The class is runnable
 * so that it may be ran on its own thread. The constructor takes a series
 * of parameters that will initialize the object to run for one
 * single webpage. When running, the page parser will extract hyperlinks
 * and actual content. Upon completion, the content extracted will be placed
 * in the data gatherer, and any unvisited hyperlinks will be placed in the
 * RetrieveQueue. Any observers will also be notified. 
 * 
 * @author Nicholas Swartzendruber
 * @version 1.0
 */
public class PageParser extends Observable implements Runnable{

	//Constants	
	/** 
	 * A delimiter used to remove *most* of the punctuation.
	 */
	private static final String DELIMITER = "[\\s.\t\",?:!)(\\[\\];\\-\\~]";
	  
	//Fields
	
	/** Queue to place hyperlinks in. */
    final private BlockingQueue<String> retrieveQueue;
    
    /** Set that contains webpages that have already been visited. */
    final private Set<String> alreadyVisited;
    
    /** Data gatherer object to report to. */
    final private DataGatherer dataGatherer;
    
    /** A list that contains the content of a webpage or txt file. */
    final private List<String> webpageContent;
    
    /** A map of words to look for to counts. */
    private Map<String, Integer> wordMap;
    
    /** A variable to keep track of the current total word count. */
    private int wc;
    
    /** A variable to keep track of url count. */
    private int urlCount;
	
    /**
     * Initializes the page parser. The information passed into
     * the constructor will be available for when the object is 
     * executed on its own thread.
     * 
     * @param retrieveQueue The queue to place new hyperlinks in. 
     * @param alreadyVisited A set of strings containing URL's that have already
     * 						 been visited. Needs to be thread safe.
     * @param words			 A set of words to look for. Needs to be thread safe.
     * @param dataGatherer   The data gatherer object to unload on when the page parser
     *                       has finished its job.
     * @param webpageContent A list of strings that contain data from an HTML or text page. 
     * 						 Each index of the list must represent sequential lines as found
     * 						 on the actual HTML or txt file. The first index MUST be the website
     * 						 where the content is from.
     */
	public PageParser(final BlockingQueue<String> retrieveQueue,
					  final Set<String> alreadyVisited,
					  final Set<String> words,
					  final DataGatherer dataGatherer,
					  final List<String> webpageContent) {
	   this.retrieveQueue     =  retrieveQueue;
	   this.alreadyVisited    =	 alreadyVisited;
	   this.dataGatherer      =	 dataGatherer;
	   this.webpageContent    =	 webpageContent;
	   this.wordMap 	      =  new HashMap<String, Integer>();
	   
	   wc = 0;
	   urlCount = 0;
	   
	   synchronized(words) {
		   //Populate our map with keys.
		   for(String word : words){
			   wordMap.put(word.toLowerCase(), 0);
		   }
	   }
	}

	/**
	 * Called when the thread starts. This method will take
	 * all the information passed to the constructor, and 
	 * parse the information from webpageContent. When all
	 * the data has been parsed, the content will be passed
	 * to data gatherer, and any new links will be placed into the
	 * retrieveQueue. Any observers will also be notified upon completion. 
	 */
	@Override
	public void run() {
		String webpage = "";
		long start = System.currentTimeMillis();
		
		//True if we have hit the body.
		boolean hitBody = false;
		
		String tag = "";
		boolean finishTag = false;
		
		//Is text file?
		boolean isText = false;
		
		for (String line : webpageContent) {
			if(webpage.isEmpty()) {
				webpage = line;
				if(line.endsWith(".txt")){
					isText = true;
				}
				continue;
			}
			//Special variable to keep track if we want to break out of the inner loop.
			boolean breakOut = false;
			
			line = line.trim().toLowerCase();
			
			while (!line.isEmpty() && !breakOut) {	
				
				line = line.trim();
				//We have a tag
				if((line.charAt(0) == '<' && !isText)|| finishTag) {
					boolean isClosing = line.length() > 2 && !finishTag ? line.charAt(1) == '/' : false;
					
					if(!isClosing) {
						int index = 0;
						
						//Move up to the proper index.
						while(line.length() > index && 
							  line.charAt(index) != ' ' && 
							  line.charAt(index) != '>'){
							index++;
						}
						
						//Grab the actual tag.
						tag = finishTag ? tag : line.substring(1, index);
						
						//Skip a line if there is no end.
						if(line.indexOf('>') == -1){
							//Skip the line if it does not close on this line.
							breakOut = true;
							finishTag = true;
							continue;
						}
						
						//Make sure we don't have a self closing tag, and
						//that it's not a comment.
						if(line.indexOf('>') == 0 || (line.charAt(line.indexOf('>') - 1) != '/' &&
						   !tag.startsWith("!--"))){
							if(tag.equalsIgnoreCase("a")) {
								parseLink(webpage, line.substring(0, line.indexOf('>') + 1));
							}else if(tag.equalsIgnoreCase("body")) {
								hitBody = true;
							}
						}			
					}
					line = line.substring(line.indexOf('>') + 1);
					finishTag = false;
				}else{			
					//We have content.
					int index = line.indexOf('<');
					if(index == -1){
						if(hitBody && !tag.equalsIgnoreCase("script")) {
							//only content
							parseContent(line);			
						}
						line = "";
					} else {
						if(hitBody && !tag.equalsIgnoreCase("script")) {
							parseContent(line.substring(0, index));
						}
						line = line.substring(index);
					}
				}
			}
		}

		//Add all of our data to the data gatherer.
		dataGatherer.addData(wordMap, wc, urlCount, webpage, 
				             (int)(System.currentTimeMillis() - start));
		
		setChanged();
		notifyObservers();
	}
	
	/**
	 * A helper method used to parse content and collect information about it.
	 * @param content The content to parse. 
	 */
	private void parseContent(final String content) {
		String[] tok = content.split(DELIMITER);
		
		for(int i = 0; i < tok.length; i++){
			String word = tok[i].toLowerCase().trim();
			if(!word.isEmpty()){
				if(wordMap.containsKey(word)){
					//Increment count for that word.
					wordMap.put(word, wordMap.get(word) + 1);
				}
				//Increment total word count.
				wc++;
			}
		}
	}
	
	/**
	 * A helper method used to parse out links from an anchor tag.
	 * @param currentPage The current webpage that's being parsed. 
	 * @param tag The full anchor tag, elements and all.
	 */
	private void parseLink(final String currentPage, final String tag) {
		
		//Find out what kind of quote was used.
		char delim = '"';
		if(!tag.contains(delim + "")){
			delim = '\'';
		}
		//Make sure it really is an anchor tag.
		if(tag.length() > 3 && tag.charAt(1) == 'a' && tag.charAt(2) == ' ') {
			int index = tag.indexOf("href");
			//If an href is found.
			if(index != -1) {
				urlCount++;
				
				//Grab the url between the quotes. 6 is needed because we need to skip
				//to right after the first quote (1h 2r 3e 4f 5= 6")
				int index2 = tag.indexOf(delim, index + 6);
				String href = !(index2 == -1) ? tag.substring(index + 6, index2).trim() : "";
				String url = "";
				//Limit the links that we follow.
				if(!href.endsWith(".txt") &&
				   !href.endsWith(".html") &&
				   !href.endsWith(".htm") ) {
					return;
				}
				
				if(href.startsWith("http://") ||
				   href.startsWith("https://")) {
					//Absolute address
					url = href;
				}else if(href.isEmpty() || href.charAt(0) == '#') {
					//Break out if we have an invalid link.
					return;
				}else {
					URL absolute = null;
					try {
						URL base = new URL(currentPage);
						absolute = new URL(base, href);
					} catch (MalformedURLException e) {
						e.printStackTrace();
					}
					url = absolute.toExternalForm();
				}

				//If we have not visited the url, then add it.
				if(url.startsWith("http") && alreadyVisited.add(url)) {
					retrieveQueue.add(url);
				}		
			}
		}
	}
}