/*
 * Improbability Drive
 * Phillip Cardon, Thach Nguyen, Cristopher Claeys
 * 4/26/2011
 */
package background;

import java.net.MalformedURLException;
import java.net.URL;

import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import structures.GathererStruct;
import structures.Tuple;
import buffers.SynchronizedBuffer;

/**
 * PageParser, Url Dection code based on information retrieved from
 * http://jsoup.org/cookbook/extracting-data/working-with-urls
 * Note: Links are filtered to only keep htm, html, and txt files.
 * @author Jonathan Hedley - Contributed JSoup library
 * @author Thach Nguyen
 */
public class PageParser extends Thread
{
	//Constants
	/**
	 * String used for URL Pattern.
	 */
	//private static final String URL_PATTERN = "\\(?\\b(http://|www[.])[-A-Za-z0-9+&@#/%?=~_()|!:,.;]*[-A-Za-z0-9+&@#/%=~_()|]";
	
	/**
	 * The nano to milli ratio.
	 */
	private static final int RATIO = 1000000;
	
	//Instance Variables
	/**
	 * Buffer containing URLs of pages which need to be retrieved.
	 */
	private final SynchronizedBuffer<URL> pageToRetrieveBuffer;
	
	/**
	 * Buffer to place retrieved pages.
	 */
    private final SynchronizedBuffer<Tuple<Document, URL>> pageBuffer;
    
    /**
     * Pattern to detect URL's.
     */
    //private final Pattern urlPattern;
    
    /**
     * The Data-Gatherer for data mining.
     */
    private final DataGatherer myGatherer;
    
    /**
     * Flag to kill the thread.
     */
    private static boolean alive;
    
    /**
     * Constructor
     * @param theRetrieveBuffer buffer for urls.
     * @param thePages buffer for pages.
     * @param theVisited pages visited.
     */
    public PageParser(SynchronizedBuffer<URL> theRetrieveBuffer,
                      SynchronizedBuffer<Tuple<Document, URL>> thePages,
    				  DataGatherer theGatherer)
    {
    	pageToRetrieveBuffer = theRetrieveBuffer;
    	pageBuffer = thePages;
    	myGatherer = theGatherer;
    	//urlPattern = Pattern.compile(URL_PATTERN);
    	alive = true;
    }
    
    /**
     * Runs until pageBuffer is empty.
     */
	public void run()
	{
		Document page;
		Tuple<Document, URL> fromBuffer;
		do
		{
			fromBuffer = null;
			page = null;
			synchronized (pageBuffer)
			{
				if (pageBuffer.size() > 0)
				{
					fromBuffer = pageBuffer.dequeue();
					page = fromBuffer.getFirst();	
				}
			}
			
			if (fromBuffer != null && page != null) //check for null page
			{ 
				int linkCount = 0;
				long startTime = System.nanoTime();
				Elements links = page.select("a"); //Extract all links
				//System.out.println(links.size());
				for (Element link : links)
				{
					//System.out.println("stuck in first for");
					String theLink = link.attr("abs:href");
					//Filter out links.
					if (theLink.contains("#"))
						theLink = theLink.substring(0, theLink.indexOf('#'));
					if (theLink.endsWith("html") || theLink.endsWith("htm") ||
						theLink.endsWith("txt") || theLink.endsWith("/"))
					{
						//System.out.println(theLink);
						try
						{
							pageToRetrieveBuffer.enqueue(new URL(theLink));
							linkCount++;
						} catch (MalformedURLException mue)
						{
							System.err.println("These aren't links.");
						}
					}
				}
				//System.out.println("here");
				//System.out.println(page.body().text());
				long parseTime = (System.nanoTime() - startTime) / RATIO;
				myGatherer.add(new GathererStruct(linkCount, fromBuffer.getSecond(),
						       page.body().text(), parseTime));
		
			}
			
		} while (alive);
    } //end run()
	
	/**
	 * Method to kill thread.
	 */
	public void kill()
	{
		alive = false;
	}
}
