/* Liviu Patrasco & Holly Beach
 * TCSS422 Project 1: Web Crawler
 * February 6, 2012
 */import java.util.LinkedList;
import java.util.List;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/** The PageParser acts as a consumer of 
 * raw data and a producer of parsed data.  It is
 * designed to work in a single threaded environment.
 * @author Liviu and Holly
 */
public class PageParser {
	private List<Page> parsed_pages = new LinkedList<Page>();
	private List<String> new_links = new LinkedList<String>();

	public PageParser() {
		// nothing to do
	}
	
	/**
	 * 
	 * @param raw_page raw html or text page to be parsed
	 */
	public void parse(final RawPage raw_page) {
		long start = System.currentTimeMillis();
		List<String> link_list = new LinkedList<String>(); 

		Document doc = Jsoup.parse(raw_page.page, raw_page.from_url);
		if (!raw_page.from_url.endsWith(".txt")) {//only get links from non text files.
			Elements links = doc.select("a[href]");
			for (Element link : links) {
				String href = link.attr("abs:href");
				if (href !=null && !href.isEmpty())
					link_list.add(href);
			}	
		}
		long stop = System.currentTimeMillis();
		parsed_pages.add(new Page(doc.body().text(), link_list, stop-start));
		new_links.clear();
		new_links.addAll(link_list);
	}

	/**
	 * @return the parsed_pages
	 */
	protected List<Page> getParsed_pages() {
		return parsed_pages;
	}
	protected void reset() {
		parsed_pages.clear();
	}

	/**
	 * @return the new_links from parsed html page
	 */
	protected List<String> getNew_links() {
		return new_links;
	}
}
