import java.util.*;

import page.PageParser;
import page.PageRetriever;
import data.DataGatherer;


/**
 * Model which contains all of the functionality of the Spider program.
 * This class manages all of the objects required by the Spider.
 * 
 * Basically, we can use this class to execute the 
 * spider program with one command: execute(...)
 * 
 * @author Ken "David" McClain 
 * @author Peter Mosher
 * @version 1
 */
public class ModelSingle {
	
	/** Constructor, does nothing. */
	public ModelSingle() { }
	
	/** Creates all necessary objects and ensures they work together.
	 * 
	 * @param pageLimit Maximum number of pages to retrieve.
	 * @param seedURL   Starting webpage.
	 * @param keywords  Collection of Strings to look for and key count of.
	 */
	public void execute(final int pageLimit, final String seedURL, 
			final String[] keywords) {
		
		final DataGatherer dg = new DataGatherer(pageLimit);
		
		final LinkedList<String> queue = new LinkedList<String>();
		queue.add(seedURL);
		
		final PageRetriever pr = new PageRetriever();
		final PageParser 	pp = new PageParser(keywords, queue, dg);
		
		Set<String> visited = new HashSet<String>();
		
		String url, data;
		
		while (!queue.isEmpty() && !dg.hitLimit()) {
			url = queue.remove();
			
			// Ensure we do not retrieve duplicate URLs.
			if (visited.contains(url)) continue;
			visited.add(url);
			
			// Retrieve page data from the web page. 
			// Notice we are NOT using the multi-threaded implementation (.start)
			data = pr.getHTML(url);
			if ("".equals(data))
				continue;
			
			// Parse data. This method will automatically report to DataGatherer & Reporter.
			// Notice, again, we are calling the method directly and not using .start
			pp.parse(url, data);
		}
	}
	
}
