/**
 * Application Name: TCSS422_WebCrawler
 * Group Name: The Other Guys
 * Members: Scott Freeman, Anthony Melcher, Jason Green
 * Date: November 10, 2011
 * 
 * Related libraries: Jericho HTML Page Parser (author unknown) 
 * 					  http://jericho.htmlparser.net/docs/index.html
 */
package jericho_parser_wrapper;

import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URLConnection;
import java.util.concurrent.TimeUnit;

import model.Page;
import net.htmlparser.jericho.Source;
import controller.Spider;

/**
 * This class will take a URL and make a connection
 * over the Internet to retrieve the resulting content
 * from the page retrieved.
 */
public class PageRetriever implements Runnable {

	/**
	 * This method attempts to retrieve a new URL and retrieve the corresponding
	 * HTML page over the Internet. If the URL and the page is found, the page
	 * contents are passed on to a Page collection.
	 */
	@Override
	public void run() {
		try {
			URI url = Spider.urlBuffer.poll(1, TimeUnit.NANOSECONDS);
			if (url == null) {
				return;
			}
			
			URLConnection connection = url.toURL().openConnection();
			connection.setConnectTimeout(100);
			connection.setReadTimeout(100);
				
			Source src = new Source(connection);
			//String src = new Source(connection).toString();

			Spider.pageBuffer.offer(new Page(src.toString(), url), 1, TimeUnit.NANOSECONDS);
			src.clearCache();
			src = null;

			Spider.retrievedPages.incrementAndGet();
			connection = null;
			url = null;
		} catch (final MalformedURLException e) {
			// do nothing
    	} catch (final IOException e) {
    		// do nothing
		} catch (final NullPointerException e) {
			// do nothing
		} catch (final InterruptedException e) {
			// do nothing
		}
	}
}
