/*
 * University Of Washington - Tacoma
 * TCSS-422, Operating Systems
 * Assignment 1 - Web Crawler
 * April 26, 2011
 * 
 * Team: Kernel Monkeys
 * Andrew Boguk
 * Sergiy Kulyk
 * Nicholas Swartzendruber
 */

package crawler.main;

/**
 * This program is a web crawler (spider) that retrieves web pages
 * from the Internet and searches for the keywords that were provided
 * to it. It works only with .html, .htm and .txt files. While crawling
 * the web, it accumulates statistics and prints it on the per page basis.
 * 
 * @author Andrew Boguk
 * @author Sergiy Kulyk 
 * @version April 25, 2011
 */
public class Crawler 
{
	/**
	 * The usage line.
	 */
	public static final String USAGE = 
		"Usage: java -jar crawler.jar <num_threads> <output_file_name>";
	
	
	/**
	 * Start the program.
	 * 
	 * @param the_args Commang line arguments expected:
	 *                 [1] - number of threads to use.
	 */
	public static void main(final String[] the_args) {
		
		if (the_args.length < 2) {
			
			System.out.println(USAGE);
			
		} else {
			
			int threads = Integer.parseInt(the_args[0]);
			if (threads < 1) {
				threads = 1;
			}
			final CrawlerUI ui = new CrawlerUI(threads, the_args[1]);
			ui.run();
		}		
	}
}