/*
 * Web Crawler
 * Date: November 01, 2011
 * Group JEDi
 * Author(s): James Lovato, Efim Todorasco, Daniel Garrison
 */
package multithread;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Observable;
import java.util.Observer;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;

import org.jsoup.nodes.Document;

import singlethread.CrawlerPageRetriever;
import singlethread.CrawlerParser;
/**
 * This is the multi-threaded crawler.  This will contain the Thread pools for
 * the ThreadedParsers and ThreadedRetrievers.  It will also contain the thread-safe
 * queues that the ThreadedParsers and ThreadedRetrievers will draw from.
 * @author James Lovato, Efim Todorasco, Daniel Garrison
 * @version Fall 2011
 *
 */
public class ThreadedCrawler implements Observer {
	/**
	 * A thread safe BlockingQueue to store page documents that have been retrieved
	 * and need to be parsed.
	 */
	private final BlockingQueue<Document> my_page_documents = new LinkedBlockingQueue<Document>();
	
	/**
	 * A thread safe BlockingQueue to store links to be retrieved.
	 */
	private final BlockingQueue<String> my_links = new LinkedBlockingQueue<String>();
	
	/**
	 * List of words to search for.
	 */
	private String[] my_search_words;

	/**
	 * the pool of parser threads.
	 */
	ExecutorService my_pool_of_parsers;
	
	/**
	 * The pool of retriever threads.
	 */
	private ExecutorService my_pool_of_retrievers;
	
	/**
	 * the current number of pages retrieved.
	 */
	private AtomicInteger my_pages_retrieved;
	
	/**
	 * The current number of pages parsed.
	 */
	private AtomicInteger my_page_parse_counter;
	
	/**
	 * the total number of pages to retrieve and parse in total.
	 */
	private int my_number_of_pages;
	
	/**
	 * A set of url strings for checking for unique links.
	 */
	private Set<String> my_finished_links;
	
	/**
	 * The data gatherer.
	 */
	private ThreadSafeGatherer my_data_gatherer;
	
	/**
	 * Constructor for the ThreadedCrawler.
	 * This implementation limits the number of ThreadedParsers and ThreadedRetrievers
	 * equally with the_thread_max.
	 * @param the_number_of_pages the number of pages to be crawled.
	 * @param the_search_words the list of words to be searched for on each page.
	 * @param the_thread_max the max number of threads (for each type not total combined)
	 * @param the_seed_url the original url address to start the crawler.
	 */
	
	public ThreadedCrawler(int the_number_of_pages, String[] the_search_words, int the_thread_max, String the_seed_url,
			Observer the_report_observer) {
		my_data_gatherer = new ThreadSafeGatherer(the_number_of_pages, the_search_words, the_report_observer,
				the_thread_max);
		my_number_of_pages = the_number_of_pages;
		my_links.add(the_seed_url);
		my_search_words = the_search_words;
		my_finished_links = Collections.synchronizedSet(new HashSet<String>());
		my_pages_retrieved = new AtomicInteger();
		my_page_parse_counter = new AtomicInteger();
		my_pool_of_retrievers = Executors.newFixedThreadPool(the_thread_max);
		my_pool_of_parsers = Executors.newFixedThreadPool(the_thread_max);
		my_data_gatherer.addObserver(this);
		
		// Initialize the first retriever
		RetrieverPopUp initial_retriever = new RetrieverPopUp();
		my_pool_of_retrievers.execute(initial_retriever);
	}
	
	/**
	 * A retriever that only retrieves one page.  An executor may attempt to
	 * reuse a RetrieverPopUp if the conditions are right.
	 * 
	 * @author James Lovato, Efim Todorasco, Daniel Garrison
	 * @version Fall 2011
	 *
	 */
	private class RetrieverPopUp implements Runnable {
		Document document;
		String url_string;		
		CrawlerPageRetriever retriever;
		
		public RetrieverPopUp() {
			retriever = new CrawlerPageRetriever();
		}
		
		public void run() {
			int current_page = my_page_parse_counter.get();
			if (current_page < my_number_of_pages) {
				try {
					url_string = my_links.take();
					try {
						document = retriever.pageRetriever(url_string);
						if (document != null) {
							my_page_documents.put(document);
							current_page = my_page_parse_counter.incrementAndGet();
							if (current_page <= my_number_of_pages) {
								ParserPopUp parser = new ParserPopUp();
								try {
									my_pool_of_parsers.execute(parser);
								} catch (RejectedExecutionException e) {
									System.err.println("Parser task rejected: " + e.getMessage());
								}
							}
						} 
					} catch (IOException e) {
						// do nothing
					}
				} catch (InterruptedException e) {
					// according to this site http://www.ibm.com/developerworks/java/library/j-jtp05236/index.html
					// InterruptedExceptions does not stop the action per say so we can finish with the .take()
					// but we want this thread to interrupt properly after the completion so this is a way
					// for the .take() to complete but interrupt safely.  We shall see.
					Thread.currentThread().interrupt();
				}
			}
		}
	}
	
	/**
	 * A parser thread that only parses one page.  An executor may decide to reuse this
	 * thread if the conditions are correct.
	 * @author James Lovato, Efim Todorasco, Daniel Garrison
	 */
	private class ParserPopUp implements Runnable {
		/**
		 * the start time of this parsing.
		 */
		private long start_time;
		
		/**
		 * The document to be parsed.
		 */
		private Document document;
		
		/**
		 * The parser object to do the parsing.
		 */
		private CrawlerParser parser;
		
		/**
		 * The current number of the page being parsed.
		 */
		private int current_page_number;
		
		/**
		 * The data collected from the parsed page.
		 */
		private DataMetrics page_data;
		
		/**
		 * Constructor creates a new parsing object.
		 */
		public ParserPopUp() {
			parser = new CrawlerParser(my_search_words);
		}
		
		/**
		 * Tries to get the next document object for parsing.
		 */
		public void run() {
			start_time = System.nanoTime();
			if (my_pages_retrieved.get() < my_number_of_pages) {
				try {
					document = my_page_documents.take();
					try {
						parser.parsePage(document);
						current_page_number = my_pages_retrieved.incrementAndGet();
						int number_of_links = parser.getMyLinks().size();
						List<String> links = parser.getMyLinks();
						/*
						 * synchronize on the HashSet my_finished_links.
						 * This allows only one thread to access the HashSet at a time.
						 */
						synchronized (my_finished_links) {
							int hash_count = my_finished_links.size();
							while (!links.isEmpty()) {
								my_finished_links.add(links.get(0));
								/*
								 * if the size of the set increases then this is a unique link.
								 * put unique links into my_links.
								 * increase the count of the set.
								 * Note:  concurrent collections cannot guarantee the run time of
								 * .size() so we just increment the original hash when the .size()
								 * increases instead of calling hash_count = my_finished_links.size() again.
								 */
								if (my_finished_links.size() > hash_count) {
									my_links.put(links.get(0));
									hash_count++;
									if (current_page_number < my_number_of_pages) {
										RetrieverPopUp retriever = new RetrieverPopUp();
										try {
											my_pool_of_retrievers.execute(retriever);
										} catch (RejectedExecutionException e) {
											System.err.println("Retriever task rejected: " + e.getMessage());
										}
									} else if (current_page_number == my_number_of_pages) {
										//my_data_gatherer.initiateShutdownSequence(my_number_of_pages);
										my_pool_of_retrievers.shutdown();
										my_pool_of_parsers.shutdown();
									}
								}
					        	links.remove(0);
					        }
						} // finish synchronized (my_finished_links)
						page_data = new DataMetrics(current_page_number, parser.numberOfWords(), number_of_links,
								parser.wordCounts(), document.baseUri(), System.nanoTime() - start_time);
						my_data_gatherer.submit(page_data);
					} catch (IOException e) {
						// do nothing							
					}
				} catch (InterruptedException e) {
					// according to this site http://www.ibm.com/developerworks/java/library/j-jtp05236/index.html
					// InterruptedExceptions does not stop the action per say so we can finish with the .take()
					// but we want this thread to interrupt properly after the completion so this is a way
					// for the .take() to complete but interrupt safely.  We shall see.
					Thread.currentThread().interrupt();
				}
			}
		}
	}

	/**
	 * Observes ThreadSafeGatherer to see when it is finished.
	 */
	public void update(Observable the_observed, Object the_arg) {
		my_pool_of_retrievers.shutdownNow();
		my_pool_of_parsers.shutdownNow();
	}
}
