/**
 * Application Name: TCSS422_WebCrawler
 * Group Name: The Other Guys
 * Members: Scott Freeman, Anthony Melcher, Jason Green
 * Date: November 10, 2011
 * 
 * Related libraries: Jericho HTML Page Parser (author unknown) 
 * 					  http://jericho.htmlparser.net/docs/index.html
 */
package controller;

import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.InputMismatchException;
import java.util.Map;
import java.util.Scanner;
import java.util.Set;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;

import model.Page;

/**
 * This class is the program start point. It displays
 * various UI menus and handles user action accordingly
 * to start the program. 
 */
public class Spider {
	
	/**
	 * The allowed file types when crawling the web.
	 */
	@SuppressWarnings("serial")
	public final static Set<String> VALID_PAGE_TYPE = 
		new HashSet<String>(){{ add(".html"); add(".htm"); add(".txt"); }};
	
	/**
	 * A URL not to make an connection to.
	 */
	public final static String DONT_GO_HERE = "http://questioneverything.typepad.com/";
	
	/**
	 * The maximum number of pages allowed to be processed.
	 */
	private static final int PAGE_LIMIT = 10000;
	
	/**
	 * The amount of threads for the Page Parser
	 * thread pool.
	 */
	private static final int DEFAULT_PARSER_THREAD = 15;
	
	/**
	 * The amount of threads for the Page Retriever
	 * thread pool.
	 */
	private static final int DEFAULT_RETRIEVER_THREAD = 25;

	/**
	 * A UI prompt for user input.
	 */
	private final static String INPUT_PROMPT = "  Input >> ";
	
	/**
	 * A generic error message.
	 */
	private final static String INPUT_ERROR = "\n  *** Input Error ***";
	
	/**
	 * The collection that stores web pages that have 
	 * already been retrieved from the Web.
	 */
	public static LinkedBlockingQueue<Page> pageBuffer;
	
	/**
	 * The collection that stores URLs that will be
	 * processed sometime in the future.
	 */
	public static LinkedBlockingQueue<URI> urlBuffer;
	
	/**
	 * The collection that stores a history of the 
	 * URLs that have already been visited.
	 */
	public static Set<URI> urlHistory;
	
	/**
	 * The collection that keeps tracks of the keywords
	 * and number of occurrences of each word.
	 */
	public static Map<String,AtomicInteger> keywordsMap;
	
	/**
	 * The collection that stores the various data from
	 * web pages to be processed for statistics.
	 */
	public static LinkedBlockingQueue<Page> DATA_PAGE_LIST;
	
	/**
	 * The number of web pages to process.
	 */
	public static AtomicInteger maxPages;
	
	/**
	 * Determines which file to write to. The two
	 * options are the file for the single threaded
	 * version and the file for the multi-threaded
	 * version.
	 */
	public static boolean file_out_selection;
	
	/**
	 * The number of pages that are currently
	 * already processed.
	 */
	public static AtomicInteger processedPages;
	
	/**
	 * The number of pages that are currently
	 * already retrieved.
	 */
	public static AtomicInteger retrievedPages;
	
	/**
	 * The default root URL seed if one is not given
	 * by the user.
	 */
	private static String defaultURL = "http://faculty.washington.edu/gmobus/";
	
	/**
	 * The default word search list if search words are not 
	 * given by the user.
	 */
	private static String defaultWordSearch = "intelligence artificial agent university research science robot";
	
	/**
	 * This method is the main portal to start execution
	 * of the program.
	 */
	public static void main(String... args) {
		displayUI();
	}
	
	/**
	 * This method instantiates the various collections and
	 * class variables used by a parsing object. This
	 * method also turns off the Jericho HTML Page Parser
	 * logging data.
	 * 
	 * @param pages the max number of pages to process.
	 */
	private static void init(final int pages) {
		// turn off parser debug info
		net.htmlparser.jericho.Config.LoggerProvider = 
			net.htmlparser.jericho.LoggerProvider.DISABLED;
		
		retrievedPages = new AtomicInteger();
		processedPages = new AtomicInteger();
		maxPages = new AtomicInteger(pages);
		
		keywordsMap = new HashMap<String, AtomicInteger>();
		pageBuffer = new LinkedBlockingQueue<Page>();
		urlBuffer = new LinkedBlockingQueue<URI>(5000);
						
		urlHistory = Collections.synchronizedSet(new HashSet<URI>(50000));
		DATA_PAGE_LIST = new LinkedBlockingQueue<Page>();
	}

	
	/**
	 * This method takes a word and places into a 
	 * keyword list and initializes a counter for
	 * that keyword.
	 * 
	 * @param word the word value
	 */
	private static void addKeyWord(String word) {
		if(word.isEmpty()) {
			word = defaultWordSearch;
		}
		Scanner scan = new Scanner(word);
		scan.useDelimiter(" ");
		int count = 0;
		while(scan.hasNext()) {
			
			if(count++ < 10 ) {
				keywordsMap.put(scan.next().toLowerCase(), new AtomicInteger(0));
			} else {
				break;
			}
		}
	}
	
	/**
	 * This method is named horribly. We know. It's purpose is 
	 * try to add a URL to the url history collection. 
	 * 
	 * @param url the URL to try to add
	 * @return T if the URL was added, F otherwise
	 */
	public static synchronized boolean sycSetAdd (final URI url) {		
		   return urlHistory.add(url);
    }
	
	/**
	 * This method is the initial portal for the Main menu
	 * of the program.
	 */
	public static void displayUI() {
		init(1);
		boolean repeat = true;
		System.out.println(" Greetings! This is The Other Guys' webcrawler project.\n  How may we assist you?");
		
		while(repeat) {
			resetData();

			System.out.println("\n _____________________________________________________");
			System.out.println("  1: Crawl the web");
			System.out.println("  2: Set Keyword to search (10 Maximum)");
			System.out.println("  3: Quit");
			System.out.print(INPUT_PROMPT);
			Scanner choice = new Scanner(System.in);
			try {
				switch(choice.nextInt()) {
					case 1 : 
						displayCrawlChoice(); 
						break;
					case 2 : 
						keywordsMap.clear();
						System.out.println("\n  What to search for?  " +
								"(10 Maximum. Press Enter to use default word search list)");
						System.out.print(INPUT_PROMPT);
						addKeyWord(new Scanner(System.in).nextLine().toLowerCase());
						break;
					case 3 :
						repeat = false; 
						break;
					default:
						System.out.println(INPUT_ERROR);
				}
			} catch (final InputMismatchException e) {
				System.out.println(INPUT_ERROR);
			}
			choice.nextLine();
		}
		System.out.println("\n ** Program has terminated **");
	}

	/**
	 * This method displays and executes the actions
	 * associated with the two versions of the program
	 * which are the single and multi-threaded web 
	 * crawlers.
	 */
	private static void displayCrawlChoice() {
		MultiThread multi = null;
		boolean repeat = true;
		String seed = "";
		
		while(repeat) {
			System.out.println("\n _____________________________________________________");
			System.out.println("  1: Use a Single Threaded Implementation");
			System.out.println("  2: Use a Multi-Threaded Implementation");
			System.out.println("  3: Return to previous Menu");
			System.out.print(INPUT_PROMPT);
			Scanner choice = new Scanner(System.in);
			
			try {
				switch(choice.nextInt()) {
					case 1 : 
						System.out.println("\n  How many web pages should we search?");
						System.out.print(INPUT_PROMPT);
						
						int pageAmmount = Integer.parseInt(new Scanner(System.in).next());
						if(pageAmmount > 0 && pageAmmount <= PAGE_LIMIT) {
							maxPages = new AtomicInteger(pageAmmount);
						} else {
							System.out.println("\n  ** Maximum amount of pages exceeded.");
							break;
						}
						
						System.out.println("\n  What is the root website?  " +
								"(Press Enter to use default root website)");
						System.out.print(INPUT_PROMPT);
						file_out_selection = true;
						
						seed = validateURLFormat();
						if(seed.isEmpty()) {
							seed = defaultURL;
						}
						new SingleThread(seed).execute();
						repeat = false;
						break;
					case 2 : 
						System.out.println("\n  How many web pages should we search?");
						System.out.print(INPUT_PROMPT);
						pageAmmount = Integer.parseInt(new Scanner(System.in).next());
						
						if(pageAmmount > 0 && pageAmmount <= PAGE_LIMIT) {
							maxPages = new AtomicInteger(pageAmmount);
						} else {
							System.out.println("\n  ** Maximum amount of pages exceeded.");
							break; 						
						}
						
						System.out.println("\n  What is the root website?  " +
								"(Press Enter to use default root website)");
						System.out.print(INPUT_PROMPT);						
						
						file_out_selection = false;
						seed = validateURLFormat();
						
						if(seed.isEmpty()) {
							seed = defaultURL;
						}
						multi = new MultiThread(new URL(seed).toString());
						multi.setParserThreadCount(DEFAULT_PARSER_THREAD);
						multi.setRetrieverThreadCount(DEFAULT_RETRIEVER_THREAD);
						multi.execute();
						multi = null;
						repeat = false;
						break;
					case 3:
						repeat = false;
						return;
					default:
						System.out.println(INPUT_ERROR);
				}
			} catch (final URISyntaxException e) {
				System.out.println("\n  ** We're sorry, the website you attempted to set as the root was invalid.");
			} catch (final IllegalArgumentException e) {
				System.out.println(INPUT_ERROR);
			} catch (final InputMismatchException e) {
				System.out.println(INPUT_ERROR);
			} catch (final MalformedURLException e) {
				System.out.println("\n  ** We're sorry, the website you attempted to set as the root was invalid.");
			}
		}
		System.out.println("  Do you want to preform a new query? (Yes/No)");
		System.out.print(INPUT_PROMPT);
		
		String answer = new Scanner(System.in).next().toLowerCase();
		if("no".equals(answer) || "n".equals(answer)) {
			System.out.println("\n ** Program has terminated **");
			System.exit(0);
		};
	}
	
	/**
	 * This method fixes URLs that do not have an http or
	 * https protocol.
	 * @return a string of the fixed URL
	 */
	private static String validateURLFormat() {
		String seed = new Scanner(System.in).nextLine().toLowerCase();
		if(!seed.isEmpty() &&!seed.startsWith("http://") && !seed.startsWith("https://")) {
			seed = "http://" + seed;
		}
		return seed;
	}
	
	/**
	 * This method clears/resets the various collections
	 * used for storing data.
	 */
	private static void resetData() {
		for(String word : keywordsMap.keySet()) {
			keywordsMap.get(word).set(0);
		}
		urlBuffer.clear();
		pageBuffer.clear();
		DATA_PAGE_LIST.clear();
		urlHistory.clear();
		processedPages.set(0);
		retrievedPages.set(0);		
	}
}


