import java.net.MalformedURLException;
import java.net.URL;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.TreeMap;
import java.util.concurrent.BlockingQueue;

import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

public class ExternalParser implements Runnable 
{
	/**
	 * Host for the instructor's blog.
	 */
	private static final String MOBUS_BLOG = "questioneverything.typepad.com";
	
	/**
	 * The Queue to add new URL Strings to.
	 */
	private final BlockingQueue<String> my_url_queue;
	
	/**
	 * The code Document to parse.
	 */
	private final Document my_data;
	
	/**
	 * The DataGather to report data to.
	 */
	private final DataGatherer my_gatherer;
	
	/**
	 * Constructs a new ExternalParser that will parse the data Document looking for new URLs String to
	 * add to the URL Queue and report its findings to the DataGatherer.
	 * @param the_url_queue The Queue to add new URL Strings to.
	 * @param the_data The data Document to parse.
	 * @param the_data_gatherer The DataGathere object to report to.
	 */
	public ExternalParser(final BlockingQueue<String> the_url_queue, final Document the_data, final DataGatherer the_data_gatherer)
	{
		my_url_queue = the_url_queue;
		my_data = the_data;
		my_gatherer = the_data_gatherer;
	}
	
	/**
	 * Parses the Document given as a constructor argument. First tokens are extracted and keywords specified by the 
	 * DataGatherer are stored in a map to count their occurrences. Second URL Strings are extracted and offered to
	 * the URL Queue after being sanitized. Finally other data collected is given to the DataGatherer. 
	 */
	public void run() 
	{
		final Elements links = my_data.select("a[href]"); 
		final long start = System.currentTimeMillis();
		final StringTokenizer data_tokens = new StringTokenizer(my_data.text(), " ");
		final Map<String, Integer> token_map = new TreeMap<String, Integer>();
		int number_of_words = 0;
		int number_of_urls = 0;
		
		while(data_tokens.hasMoreTokens())
		{
			final String token = data_tokens.nextToken();
			
			if(my_gatherer.getKeywords().contains(token.toLowerCase()))
			{
				if(token_map.containsKey(token.toLowerCase()))
				{
					token_map.put(token.toLowerCase(), token_map.get(token.toLowerCase()) + 1);		
				}
				else
				{
					token_map.put(token.toLowerCase(), 1);
				}
			}
			number_of_words++;
		}
		
		for(Element page : links)
		{
			String url = page.attr("abs:href");
			
			
			url = url.contains("#") ? url.substring(0, url.indexOf('#')) : url; 
			url = url.contains("?") ? url.substring(0, url.indexOf('?')) : url; 
			
			if(!url.contains(MOBUS_BLOG) && url.contains("http"))
			{	
				try 
				{
					final URL url_object = new URL(url);			
					final String path = url_object.getPath();//It is difficult to extract the path so the URL class is used.
					
					if(path.contains(".html") || path.contains(".htm") || path.contains(".txt") || path.length() == 0 || path.equals("/") || !path.contains("."))
					{								
						my_url_queue.offer(url);
					}
					
				} 
				catch(final MalformedURLException the_exception) 
				{
					the_exception.printStackTrace();
				}
			}

			number_of_urls++;
		}
		
		my_gatherer.parsePassedData(System.currentTimeMillis() - start, token_map, number_of_words, my_data.baseUri().toString(), number_of_urls);			
	}
}
