package multithreaded;

import java.util.List;
import java.util.concurrent.BlockingQueue;

import utility.PageInfo;

import net.htmlparser.jericho.Element;
import net.htmlparser.jericho.HTMLElementName;
import net.htmlparser.jericho.Segment;
import net.htmlparser.jericho.TextExtractor;


/**
 * Threaded version of the page parser.
 * Constantly checks the my_pages Blocking Queue for a new page to parse.
 * If a page is not available it waits until one is or it is interrupted.
 * When a page is available, it removes it from the queue and parses the text
 * and urls on the page, then enqueues them in the appropriate Blocking Queue for
 * the other threads in the crawler. 
 * @author Daniel Anderson and Luc H. Le
 * @version TCSS422 2012
 */
public class ThreadedPageParser implements Runnable{
	
	/**Amount of time that has been spent parsing pages*/
	private long my_parse_time;
	
	/**Number of pages parsed so far*/
	private int my_parsed_count;
	
	/**Number of URLs parsed so far.*/
	private int my_url_count;
	
	/**Queue of pages to be parsed.*/
	public BlockingQueue<PageInfo> my_pages;
	
	/**Queue of urls that the parser fills as it parses pages.*/
	public BlockingQueue<String> my_unfiltered_urls;
	
	/**Queue of text that the parser fills as it parses pages.*/
	public BlockingQueue<String> my_text;
	
	/**Ends the thread if it is set to true*/
	private boolean my_finish;
	
	/**
	 * Constructor for ThreadedPageParser.
	 * @param pages Queue of pages to be parsed.
	 * @param unfiltered Queue of URLs that will be filled by the parser.
	 * @param text Queue of text from pages that will be filled by the parser.
	 */
	public ThreadedPageParser(BlockingQueue<PageInfo> pages, BlockingQueue<String> unfiltered, BlockingQueue<String> text) {
		my_parse_time = 0;
		my_parsed_count = 0;
		my_url_count = 0;
		my_pages = pages;
		my_text = text;
		my_unfiltered_urls = unfiltered;
		my_finish = false;
	}//end of constr

	/**
	 * Run method for the ThreadedPageParser thread.
	 * Ends when my_finish is set to true by outside sources
	 * or an unexpected interrupt. 
	 */
	public void run() {
		while(!my_finish) {
			try {
				doWork();
			} catch (InterruptedException e) {
				my_finish = true;
			}
		}
	}//end of run
	
	/**Ends the thread.*/
	public void finish() {
		my_finish = true;
	}//end of finish
	
	/**Get the amount of time spent parsing so far*/
	public long getParseTime() {
		return my_parse_time;
	}//end of get
	
	/**Get the number of parsed pages so far.*/
	public int getParseCount() {
		return my_parsed_count;
	}//end of get
	
	/**Get the number of URLs parsed so far.*/
	public int geURLCount() {
		return my_url_count;
	}//end of get
	
	/**
	 * Method where the main work of the thread takes place when
	 * there are available pages to parse.
	 * Thread is idle otherwise.
	 * @throws InterruptedException The thread was interrupted and should exit.
	 */
	private void doWork() throws InterruptedException {
		if(!my_pages.isEmpty()) {
			long start = System.currentTimeMillis();
			PageInfo page = my_pages.take(); //TODO
			Segment seg = new Segment(page.getSource(), 0, page.getSource().length());
			parseText(seg);
			parseURLs(seg, page.getURL());
			long end = System.currentTimeMillis();
			my_parse_time = my_parse_time + end - start;
			my_parsed_count++;
		}
	}//end of doWork
	
	/**
	 * Parse a segment from a html document into text and
	 * add to the text queue to be analyzed.
	 * @param seg The segment to be parsed into text.
	 */
	private void parseText(Segment seg) {
		TextExtractor te = new TextExtractor(seg);
		my_text.add(te.toString());
	}//end of parseText

	/**
	 * Takes a segment of html and parses for urls within the page.
	 * @param seg A segment of html to be parsed for urls.
	 * @param parent The url that this segment came from.
	 */
	private void parseURLs(Segment seg, String parent) {
		List<Element> e = seg.getAllElements(HTMLElementName.A);
		for(int i = 0; i < e.size(); i++) {
			if(e.get(i).getAttributeValue("href") != null) {
				String str = buildURL(parent, e.get(i).getAttributeValue("href").toString());
				if(!str.equals("null")) {
					my_url_count++;
					my_unfiltered_urls.add(str);
				}
			}
		}
	}//end of parseURLs

	/**
	 * Checks if a URL is relative or absolute and constructs a proper URL.
	 * @param prime Root URL of webpage.
	 * @param sub Possible relative URL on webpage.
	 * @return A valid URL
	 */
	private String buildURL(String prime, String sub) {
		String str = "null";
		if(!prime.endsWith("/")) {
			prime = prime.substring(0, prime.lastIndexOf("/") + 1);
		} 
		if(sub.startsWith("/")) {
			sub = sub.substring(1);
		}
		if(sub.endsWith(".txt") || sub.endsWith(".html") || sub.endsWith("/")){ //TODO make this not filter out legit webpages.
			//System.out.println(prime + sub);
			str = prime + sub;
		}
		if(sub.startsWith("http://")) {
			//System.out.println(sub);
			str = sub;
		}
		//System.out.println("prime: " + prime + " Sub: " + sub);
		return str;
	}//end of buildURL
}//end of PageParser
