//Fusion of the 422 Proportion
//Dustin Striplin, Tyler Simrell, Lawrence Grass, and Jacob Hall
//Finished 5/1/13

package controller;


import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;

import model.PageInfo;
import net.htmlparser.jericho.Config;
import net.htmlparser.jericho.Element;
import net.htmlparser.jericho.HTMLElementName;
import net.htmlparser.jericho.LoggerProvider;
import net.htmlparser.jericho.MasonTagTypes;
import net.htmlparser.jericho.MicrosoftConditionalCommentTagTypes;
import net.htmlparser.jericho.Renderer;
import net.htmlparser.jericho.Source;


/**
 * Parses .htm, .html, .txt files
 * @author Dustin Striplin, Lawrence Grass
 *
 */
public class Parser implements Runnable{
		//use these constants for string checking
	private static final String HTTP = "http://";
	private static final String FTP = "ftp://";
	private static final String HTML = ".html";
	private static final String HTM = ".htm";
	private static final String TXT = ".txt";
	
	private static int threadNum = 0; //the thread ID of the Parser
	
	private Thread thread;
	private MainController mc; //needed for access to blocked data structures
	
	/**
	 * initialize the parser & start the thread if it is to be run as
	 * a thread
	 * @param whether or not to run this as a thread
	 */
	public Parser(final MainController mc, final boolean threaded) {
		MicrosoftConditionalCommentTagTypes.register();
		MasonTagTypes.register();
		Config.LoggerProvider=LoggerProvider.DISABLED;
		this.mc = mc;
		if(threaded) {
			threadNum++;
			thread = new Thread(this, "Parser" + threadNum);
			thread.start(); //starts running the thread, run() method executed
		}
	}
	
	@Override
	public void run() {
		boolean running = true; //set to false when the max number of pages has been retrieved
		ArrayList<PageInfo> links;
		//loop can be broken if the limit of pages has been reached
		while (running) {
			try {
				if(mc.continueParsing()) {
					
					PageInfo pi = mc.takeItemPageBuffer();  //can be blocked
					if(pi == null) continue;  //no more parsing needed
					long parseTime = System.nanoTime();
					links = parse(pi);
					parseTime = System.nanoTime() - parseTime;
					pi.setTimeToParsePage(parseTime);
					mc.insertItemsToRetrieve(links); //can be blocked
					mc.addPageInfo(pi);
				} else {
					running = false;
				}	
			} catch (InterruptedException e) {
				e.printStackTrace();
				Thread.currentThread().interrupt(); //stops the thread
			}
		}
	}
	
	/**
	 * Accepts a String representation of the .htm, .html, .txt or file
	 * @param htmlText the entire file as a string.
	 * @return an ArrayList<PageInfo> containing the links found on the page
	 */
	public ArrayList<PageInfo> parse(PageInfo pi) {	
		ArrayList<PageInfo> links = null;
		Source source = new Source(pi.getPageContent());		
		source.fullSequentialParse(); //used to increase performance
		links = getHyperLinks(source, pi.getURL());
		pi.setNumURLOnPage(links.size());
		pi.setPageLimit(mc.getPageLimit()); //sets the total number of pages to be retrieved
											//for reference in each page.
		checkKeywords(source, pi);
		return links;
	}
	
	/**
	 * Gets all of the hyperlinks from a webpage. Uses the Jericho HTML parser
	 * that was written by Martin Jericho.
	 * @param source The website's source object that was created by Jericho Parser
	 * @param url the url of the website we are parsing
	 * @return the links on the webpage
	 */
	private ArrayList<PageInfo> getHyperLinks(final Source source, String url) {

		//get all of the links from the source, format them, then stick em in
		//an ArrayList.
		List<Element> links = source.getAllElements(HTMLElementName.A); //get all <a href> tags
		ArrayList<PageInfo> formattedLinks = new ArrayList<PageInfo>();
		String tmp;
		String parent = url;

		for(Element ele: links) {
			tmp = ele.getAttributeValue("href"); //returns null if there is no value
			
			if(tmp!= null) {
				if(tmp.contains(FTP)) continue; //ignore ftp sites
				if(tmp.contains("questioneverything.typepad.com")) continue; //avoid professor's blog
				if(tmp.contains("#"))  //remove any references
					tmp = tmp.substring(0, tmp.indexOf('#'));  
				if(tmp.equals("")) continue;//could be empty after checking '#'
				if(url.charAt(url.length() - 1) == '/') {  //remove the last '/' from the url
					url = url.substring(0, url.length() - 1); //avoid duplicate '/' & make checking easy
				}
				if(url.endsWith(HTML) || url.endsWith(HTM) || url.endsWith(TXT)) {
					url = url.substring(0, url.lastIndexOf('/')); //get rid of extension
				}
				if(tmp.charAt(0) == '/') { //set to full url
					tmp = url + tmp;
				}
				else if(!tmp.contains(HTTP)) { //if it is something like "file.html" not "/file.html"
					tmp = url + '/' + tmp;
				}
				PageInfo newInfo = new PageInfo(tmp);      //create a new PageInfo for items to be retrieved
				newInfo.setKeyWordMap(mc.getKeywordMap());
				newInfo.setParent(parent);
				formattedLinks.add(newInfo);
			}
		}	
		return formattedLinks;
	}
	
	/**
	 * Parses the document to find instances of keywords. If a keyword is found it is recorded.
	 * @param source The source of the file being read
	 * @param pi the PageInfo for the current URL being processed.
	 */
	private void checkKeywords(final Source source, PageInfo pi) {
		Renderer rend = new Renderer(source);
		StringTokenizer allText = new StringTokenizer(rend.toString());
		String current = null;
		int total = 0;
		while(allText.hasMoreElements()) {
			total++;
			current = allText.nextToken().toLowerCase();
			if(pi.getKeyWordMap().containsKey(current)) { //if are is in the map then increment it.
				pi.getKeyWordMap().put(current, pi.getKeyWordMap().get(current) + 1);
			}
		}
		pi.setNumWordsOnPage(total);
	}
}
