/**
 * 
 */
package controller;

import java.net.URI;
import java.net.URISyntaxException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import report.DataGatherer;

import jericho_parser_wrapper.PageParser;
import jericho_parser_wrapper.PageRetriever;

/**
 * 
 * @author jasongreen
 *
 */
public class MultiThread {
	/**
	 * The root seed page url address.
	 */
	private String url;
	private int maxPages;
	private ExecutorService parser;
	private ExecutorService retriever;
	public static int counter = 0;
	
	/**
	 * Instantiates a SingleThread object.
	 * 
	 * @param seed the root URL to start from
	 */
	public MultiThread(final String seed, final int maxPages) {
		url = seed;
		this.maxPages = maxPages;
		parser = Executors.newCachedThreadPool();
		retriever = Executors.newCachedThreadPool();
	}
	
	/**
	 * This method sets the Parser thread pool to a 
	 * fixed sized. If the number of threads passed in
	 * is 0 then the thread pool will be in an automatic
	 * mode in which the pool manages the thread count.
	 * 
	 * @param count the number of threads to set
	 */
	public void setParserThreadCount(final int count) {
		if(count != 0) {
			parser = Executors.newFixedThreadPool(count);
		}
	}
	
	/**
	 * This method sets the Retriever thread pool to a 
	 * fixed sized. If the number of threads passed in
	 * is 0 then the thread pool will be in an automatic
	 * mode in which the pool manages the thread count.
	 * 
	 * @param count the number of threads to set
	 */
	public void setRetrieverThreadCount(final int count) {
		if(count != 0) {
			retriever = Executors.newFixedThreadPool(count);
		}
	}
	
	/**
	 * This method starts the web crawler. It handles
	 * getting new HTML pages, parsers the pages, collects
	 * data, and reports the data.
	 * 
	 * @throws URISyntaxException if the seed URL is in a invalid format
	 */
	public void execute() throws URISyntaxException {
		final DataGatherer dataGatherer = new DataGatherer();
		
		Spider.urlBuffer.add(new URI(url));
		
		while(true) {	
			
			if(Spider.urlBuffer.size() > 0 && Spider.retrievedPages.get() < maxPages) {
				retriever.execute(new PageRetriever());
				
			} else if (!retriever.isShutdown() && Spider.retrievedPages.get() >= maxPages) {
				retriever.shutdownNow();
			}
			
			if(Spider.pageBuffer.size() > 0 && Spider.processedPages.get() < maxPages) {
				parser.submit(new PageParser());				
			} else if (!parser.isShutdown() && Spider.processedPages.get() >= maxPages) {
				parser.shutdownNow();
			}
			
			if(dataGatherer.pagesRetrieved == maxPages) break;
			new Thread() {
				public void run() {
					dataGatherer.run();
				}
			}.run();
			if(!dataGatherer.isRunning()) {
				
			}
		}
		parser.shutdownNow();
		retriever.shutdownNow();
		System.out.printf("\nURL Buffer Size: %-5d Page Buffer Size: %-5d Processed Pages: %-5d DataGatherer Pages Retrieved: %-5d", 
				Spider.urlBuffer.size(), Spider.pageBuffer.size(), Spider.processedPages.get(), dataGatherer.pagesRetrieved);
	}
}
