/**
 * Application Name: TCSS422_WebCrawler
 * Group Name: The Other Guys
 * Members: Scott Freeman, Anthony Melcher, Jason Green
 * Date: November 10, 2011
 * 
 * Related libraries: Jericho HTML Page Parser (author unknown) 
 * 					  http://jericho.htmlparser.net/docs/index.html
 */
package controller;

import java.net.URI;
import java.net.URISyntaxException;
import EDU.oswego.cs.dl.util.concurrent.BoundedBuffer;
import EDU.oswego.cs.dl.util.concurrent.PooledExecutor;
import report.DataGatherer;

import jericho_parser_wrapper.PageParser;
import jericho_parser_wrapper.PageRetriever;

/**
 * This class represents the single threaded 
 * version to crawl the Web retrieving web
 * pages, parsing the pages, and collecting
 * data about them.
 */
public class MultiThread  {
	/**
	 * The root seed page url address.
	 */
	private String url;
	
	/**
	 * The page parser thread pool;
	 */
	private PooledExecutor parser;
	
	/**
	 * The page retriever thread pool.
	 */
	private PooledExecutor retriever;
	
	/**
	 * The number of pages to process.
	 */
	private int maxPages;	
	
	/**
	 * Instantiates a SingleThread object.
	 * 
	 * @param seed the root URL to start from
	 */
	public MultiThread(final String seed) {
		url = seed;
		parser = new PooledExecutor();
		retriever = new PooledExecutor();
		maxPages = Spider.maxPages.get();
	}
	
	/**
	 * This method sets the Parser thread pool to a 
	 * fixed sized. If the number of threads passed in
	 * is 0 then the thread pool will be in an automatic
	 * mode in which the pool manages the thread count.
	 * 
	 * @param count the number of threads to set
	 */
	public void setParserThreadCount(final int count) {
		if(count != 0) {			
			parser = new PooledExecutor(new BoundedBuffer(count), count);
			parser.createThreads(count);	
			parser.setKeepAliveTime(1000);
		    parser.discardWhenBlocked();
		}
	}
	
	/**
	 * This method sets the Retriever thread pool to a 
	 * fixed sized. If the number of threads passed in
	 * is 0 then the thread pool will be in an automatic
	 * mode in which the pool manages the thread count.
	 * 
	 * @param count the number of threads to set
	 */
	public void setRetrieverThreadCount(final int count) {
		if(count != 0) {
			retriever = new PooledExecutor(new BoundedBuffer(count), count);
			retriever.createThreads(count);
			parser.setKeepAliveTime(1000);
			retriever.discardWhenBlocked();
		}
	}	
	
	/**
	 * This method starts the web crawler. It handles
	 * getting new HTML pages, parsers the pages, collects
	 * data, and reports the data.
	 * 
	 * @throws URISyntaxException if the seed URL is in a invalid format
	 */
	public void execute() throws URISyntaxException {
		final DataGatherer dataGatherer = new DataGatherer();
		Thread data = new Thread() {
			@Override
			public void run() {
				dataGatherer.run();
			}
		};
		
		Spider.urlBuffer.add(new URI(url));
		while(true) {	
			if((Spider.DATA_PAGE_LIST.size() + dataGatherer.pagesRetrieved) >= maxPages) {
				parser.shutdownNow();
				retriever.shutdownNow();
				
				do {
					data.run();
				} while(!Spider.DATA_PAGE_LIST.isEmpty());
				
				break;
			} else {
				data.run();
			}	
			
			if(!Spider.urlBuffer.isEmpty()) {
				
				try {
					retriever.execute(new PageRetriever());						
				} catch (final InterruptedException e) {
						// do nothing
				}					
			} 
			
			if(!Spider.pageBuffer.isEmpty()) {
				try {
					parser.execute(new PageParser());					
				} catch (final InterruptedException e) {
					// do nothing
				}		
			} 
		}
	}
}
