package page;

import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;

import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
 * PageParser class parses HTML code for URLs and words.
 * 
 * @version February 6, 2012
 * @author Jay Fernandez & Kyle Richardson
 * @author Kyle Richardson
 */
public class PageParser implements Runnable {
	
	private int URLsFound = 0;
	
	private LinkedBlockingQueue<List<String>> parsed = new LinkedBlockingQueue<List<String>>();
	
	private PageRetriever retriever;
	
	private long parse_time;
	
	private int parse_count;
	
	private boolean cont;
	
	public PageParser (PageRetriever retriever) {
		this.retriever = retriever;
		parse_time = 0;
		parse_count = 0;
		cont = true;
	}
	/**
	 * Parses the web page for all URLs.
	 * 
	 * @param page The web page.
	 * @return list of addresses.
	 */
	public List<String> parseURLs(Document doc) {
		final List<String> pages = new ArrayList<String>();
		
		Elements links = doc.select("a");
		for (Element link : links) {
			pages.add(link.attr("abs:href"));
		}
		URLsFound += pages.size();
		return pages;
	}
	
	/**
	 * Parses the web page for all used words by extracting only the text.
	 * 
	 * @param url The URL of the page.
	 * @return list of words.
	 */
	public List<String> parseWords(Document doc) {
		final List<String> words = new ArrayList<String>();
		
		String text = doc.body().text();
		text = text.toLowerCase();
		String[] parsed = text.split("[ /\\?,.!@#$%^&*(){}+]+");
		for (int i = 0; i < parsed.length; i++) {
	         words.add(parsed[i]);
		}
		return words;
	}
	
	public int getParsed(){
		return URLsFound;
	}
	
	public int checkParsedPages() {
		return parsed.size();
	}
	
	public List<String> getParsedPage() throws InterruptedException {
		return parsed.take();
	}
	private void parse() throws InterruptedException {
		if (retriever.checkIfReady() > 0) {
			long start = System.currentTimeMillis();
			Document toBeParsed = retriever.getNextDocument();
			retriever.addAllPages(parseURLs(toBeParsed));
			parsed.put(parseWords(toBeParsed));
			long end = System.currentTimeMillis();
			parse_time = parse_time + end - start;
			parse_count++;
		}
	}
	
	public long getParseTime() {
		return parse_time;
	}
	
	public int getParseCount() {
		return parse_count;
	}
	
	public void forceQuit() {
		cont = false;
	}
	@Override
	public void run(){
		while (cont) {
			try {
				parse();
			} catch (InterruptedException e) {
				cont = false;
			}
		}
	}
}
