/* Liviu Patrasco & Holly Beach
 * TCSS422 Project 1: Web Crawler
 * February 6, 2012
 */

import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.BlockingQueue;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/** The MThreadedPageParser acts as a consumer of 
 * raw data and a producer of parsed data.  It is
 * designed to work in a multi-threaded environment using
* blocking queues.
* @author Liviu and Holly
*/
public class MThreadedPageParser implements Runnable{
	public static final Page NO_MORE_PAGES_MARKER = new Page("X");
	public static final Page OUT_OF_URLS_MARKER = new Page ("NOT ENOUGH URLS");
	public static final Page COMM_ERROR_MARKER = new Page("COMM ERROR");
	public static final Page INTERRUPTED_MARKER = new Page("INTERRUPTED");
	private BlockingQueue<RawPage> retrieved_docs_q;
	private BlockingQueue<String > url_q;
	private BlockingQueue<Page> parsed_pages_q;
	private Page parsed_page;
	private boolean done = false;

	/**
	 * 
	 * @param ret_docs retrieved docs
	 * @param urls
	 * @param parsed_pages
	 */
	public MThreadedPageParser(final BlockingQueue<RawPage> ret_docs, 
			final BlockingQueue<String> urls, final BlockingQueue<Page> parsed_pages) {
		retrieved_docs_q = ret_docs;
		url_q = urls;
		parsed_pages_q = parsed_pages;
	}

	@Override
	public void run() {
		String the_doc;
		while (!done){		
			try {
				RawPage raw_page;
				raw_page = retrieved_docs_q.take();
				//see if retrieved doc is a flag to stop execution
				if(raw_page.equals(MThreadedPageRetriever.NO_MORE_DOCS_MARKER)){
					parsed_page = NO_MORE_PAGES_MARKER;
					done = true;
				} else if (raw_page.equals(MThreadedPageRetriever.NO_MORE_URLS_MARKER)){
					parsed_page = OUT_OF_URLS_MARKER;
					done = true;
				} else if (raw_page.equals(MThreadedPageRetriever.INTERRUPTED_MARKER)) {
					parsed_page = INTERRUPTED_MARKER;
					done = true;
				} else if (raw_page.equals(MThreadedPageRetriever.COMM_ERROR_MARKER)) {
					parsed_page = COMM_ERROR_MARKER;
					done = true;
				} else{ //parse the retrieved doc
					the_doc = raw_page.page;
					List<String> link_list = new LinkedList<String>();
					long start = System.currentTimeMillis();
					Document doc = Jsoup.parse(the_doc, raw_page.from_url);
					//only retrieve links from non text files
					if (!(raw_page.from_url).endsWith(".txt")){
						Elements links = doc.select("a[href]");
						for (Element link : links) {
							String href = link.attr("abs:href");
							if (href !=null && !href.isEmpty()){
								url_q.add(href);
								link_list.add(href);
							}
						}
					}
					long stop = System.currentTimeMillis();
					parsed_page = new Page(doc.body().text(), link_list, stop-start);
				}
				parsed_pages_q.add(parsed_page);
				
			} catch (InterruptedException e) {
				//keep trying to parse.  
				continue;
			}	
		}
	}
}
