/*
 * Web Crawler
 * Date: November 01, 2011
 * Group JEDi
 * Author(s): James Lovato, Efim Todorasco, Daniel Garrison
 */
package singlethread;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.LinkedBlockingQueue;

import org.jsoup.nodes.Document;

/**
 * Class to do multi-threaded web crawling.
 * @author James Lovato, Efim Todorasco, Daniel Garrison
 * @version 10/28/11
 */
public class CrawlerMultiThread {
	
	/**
	 * Clocked time at the start of web crawling.
	 */
	private long start_time;
	
	/**
	 * Clocked time at the end of web crawling.
	 */
	private long end_time;
	
	/**
	 * The number of pages that have been crawled through.
	 */
	private int page_count;
		
	/**
	 * Count of the pages that are going to be crawled through.
	 */
	private int my_count;
	
	/**
	 * List of URLs in a blocking queue to be crawled through.
	 */
	private Queue<String> my_to_do = new LinkedBlockingQueue<String>();
	
	/**
	 * A collection of links that have already been visited.
	 * Using hashset to ensure quick insert times and no duplicates.
	 */
	private Set<String> my_have_done = new HashSet<String>();
	
	/**
	 * Constructor accepts the number of pages to be crawled.
	 * @param the_count Number of pages to be crawled.
	 */
	public CrawlerMultiThread(final int the_count) {
		my_count = the_count;
	}
	
	/**
	 * Starts up the web crawling by adding the initial link.
	 * @throws IOException
	 */
	public void start() throws IOException {				
		CrawlerPageRetriever pr = new CrawlerPageRetriever();
		pr.pageRetriever("http://faculty.washington.edu/gmobus/");
		my_to_do.add("http://faculty.washington.edu/gmobus/");
		my_have_done.add("http://faculty.washington.edu/gmobus/");
		fillQueue();
	}
	
	/**
	 * Removes the document at the front of the queue and sends it off
	 * to be parsed and retrieves the list of new documents which need 
	 * to be parsed.
	 * @throws IOException
	 */
	public void fillQueue() throws IOException {
		String link;
		Document document;
		CrawlerPageRetriever pr;
		start_time = System.nanoTime();
		while(my_to_do.size() > 0 && my_count > 0) {
			link = my_to_do.remove();
			pr = new CrawlerPageRetriever();
			document = pr.pageRetriever(link);
			if (document != null) {
				my_count--;
			} else {
			}
		}
		end_time = System.nanoTime();
	}
	
	/**
	 * Adds string tag to the hashset my_have_done.  If size grows then it has not been
	 * visited so add to my_to_do. This is so there are no duplicates in the my_to_do.
	 * @param tags The list of tags to be added to the queues.
	 */
	public void loadList(List<String> tags) {
		page_count++;
		System.err.println(page_count + " - " + page_count);
		page_count++;
		int hash_count = my_have_done.size();
		while (!tags.isEmpty()) {
			my_have_done.add(tags.get(0));
			if (my_have_done.size() > hash_count) {
				my_to_do.add(tags.get(0));
			}
        	tags.remove(0);
        }
	}
}
