/**
 * HTML Parser for 422 project.
 * Accepts HTML data from a queue and outputs links to the data 
 * retriever in a queue and outputs data to the reporter.
 * 
 * @author Zach Turk, Dickson Kwong, Stephen Amos
 */
package parser;

import java.net.MalformedURLException;
import java.net.URL;
import java.util.List;
import java.util.Observable;
import java.util.concurrent.BlockingQueue;

import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

public class Parser extends Observable implements Runnable 
{
	/**
	 * reference to the queue used to store the HTTP document objects.
	 */
	private BlockingQueue<Document> queue;

	/**
	 * reference to the queue used to store the URL objects.
	 */
	private BlockingQueue<URL> url_queue;

	/**
	 * reference to the document used to store the current working HTTP document.
	 */
	private Document buffer;

	/**
	 * reference to the keywords that the parser will be looking for.
	 * the assignment puts a cap at 10, but this is only an imposed limit.
	 */
	private String[] keywords;

	/**
	 * the links on the page.
	 */
	private URL[] links;

	/**
	 * the links previously visited.
	 */
	private List<URL> visited_links;
	
	/**
	 * The amount of URLs found at this particular buffer document
	 * which is of protocol type HTTP or HTTPS and ends with an address
	 * consisting of .html, .htm, or /
	 */
	private int urls_found_in_this_buffer;

	/**
	 * Constructor for Parser.
	 * @param document_queue Input - The queue of string buffers to be parsed.
	 * @param url_queue Output - The queue of URLs to write to.
	 * @param keywords The keywords for the parser to look for.
	 */
	public Parser(BlockingQueue<Document> document_queue, BlockingQueue<URL> url_queue, 
			String[] keywords, List<URL>visited_urls) {
		queue = document_queue;
		this.url_queue = url_queue;
		this.keywords = keywords;
		visited_links = visited_urls;
	}

	/**
	 * Loads the parser with the next page in the queue.
	 */
	private void loadNext() {
		try {
			buffer = queue.take();
		} catch(InterruptedException e) { }
	} 
	
	private void addURL(URL url) {
		try {
			url_queue.put(url);
			visited_links.add(url);
		} catch (InterruptedException e) { }
	}

	/**
	 * Sets the links parsed to the URL queue and adds those
	 * links to the visited collection.
	 */
	private void setLinks() {
		//Get the links
		Elements linklist = buffer.select("a[href]");
		this.links = new URL[linklist.size()];
		
		//Add the new links (unvisited) to url_queue.
		//Track the new links in the visited_links list.
		int i = 0;
		for (Element link : linklist) {
			try {
				URL url = new URL(link.attr("abs:href"));
				this.links[i] = url;
				if (!visited_links.contains(url)) {
					//check to see that link is valid for our purposes.
					if ( 
							(url.getProtocol().equals("http") || url.getProtocol().equals("https") ) 
							&& 
							( url.getFile().endsWith(".html") || url.getFile().endsWith(".htm") || url.getFile().endsWith("/") )
							&& 
							(!url.toString().contains("#") )
							&& 
							(!url.toString().contains("questioneverything.typepad"))  
						)  {
						addURL(url);
					}
				}
				i++;
			} catch (MalformedURLException e) { }
		}
	}

	/**
	 * Returns the links found on the page.
	 * 
	 * @return the links found on the page.
	 */
	public URL[] getLinks() {
		return links;
	}

	/**
	 * Returns the number of links on the page.
	 * 
	 * @return The number of links on the page.
	 */
	public int countLinks() {
		return links.length;
	}

	/**
	 * Returns the number of words on the page.
	 * 
	 * @return The number of words on the page.
	 */
	public int countWords() {
		String[] text = buffer.text().split(" ");
		return text.length;
	}

	/**
	 * Returns the number of keywords on the page.
	 * 
	 * @return The number of keywords on the page.
	 */
	public int[] countKeywords() {
		String text = buffer.text().toLowerCase();
		int[] keywordCount = new int[keywords.length];
		if (text == null) return keywordCount;

		int index = 0;
		for(int i = 0; i < keywords.length; i++) {
			index = text.indexOf(keywords[i], index);
			while(index != -1) {
				keywordCount[i]++;
				index++;
				index = text.indexOf(keywords[i], index);
			}
			index = 0;
		}
		return keywordCount;
	}

	/**
	 * Runs the parser.
	 */
	@Override
	public void run() {
		//remove a document from the input queue and process
		loadNext();
		setLinks();
		setChanged();
		notifyObservers();
	}
	
	
	/**
	 * Gets the amount of links found at the retrieved page.
	 * 
	 * @return the amount of links found at the retrieved page.
	 */
	public int getLinksFound()
	{
		calculateLinkCount();
		return urls_found_in_this_buffer;
	}
	
	/**
	 * Gets the buffer's URL address.
	 * 
	 * @return the buffer's URL address.
	 */
	public String getURLAddress()
	{
		return buffer.baseUri();
	}
	
	/**
	 * Calculates the amount of links found at the retrieved page.
	 */
	private void calculateLinkCount() 
	{
		Elements linklist = buffer.select("a[href]");

		for (Element link : linklist) 
		{
			try 
			{
				URL url = new URL(link.attr("abs:href"));
				if ( 
						(url.getProtocol().equals("http") || url.getProtocol().equals("https") ) 
						&& 
						( url.getFile().endsWith(".html") || url.getFile().endsWith(".htm") || url.getFile().endsWith("/") )
						&& 
						(!url.toString().contains("#") )
						&& 
						(!url.toString().contains("questioneverything.typepad"))  
					) 
				{
					urls_found_in_this_buffer++;

				}
			}
			catch (MalformedURLException e) 
			{ 

			}
		}
	}
	

}
