/*
 * Web Crawler
 * Date: November 01, 2011
 * Group JEDi
 * Author(s): James Lovato, Efim Todorasco, Daniel Garrison
 */
package singlethread;
import java.io.IOException;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Observer;
import java.util.Queue;
import java.util.Set;

import multithread.DataMetrics;

import org.jsoup.nodes.Document;
/**
 * The single threaded implementation of the crawler.
 * 
 * @author James Lovato, Efim Todorasco, Daniel Garrison
 * @version 10/20/11
 */
public class CrawlerSingleThread {
	/**
	 * The starting time in nanotime for when this crawler begins.
	 */
	private long my_start_time;
	
	/**
	 * The estimated number of elements the hashset has per page.
	 */
	private static final int HASHSET_LINKS_PER_PAGE = 25;
	/**
	 * Count of the pages that are going to be crawled through.
	 */
	private int my_count;
	
	/**
	 * The current page number that is being parsed.  Dropped pages do not count.
	 */
	private int my_current_page_number;
	
	/**
	 * List of URLs in queue to be crawled through.
	 */
	private Queue<String> my_to_do = new LinkedList<String>();
	
	/**
	 * A collection of links that have already been visited.
	 * Using hashset to ensure quick insert times and no duplicates.
	 */
	private Set<String> my_have_done;
	
	/**
	 * The list of words being searched for in each document.
	 */
	private String[] my_search_words;
	
	/**
	 * Total time spent parsing all pages.
	 */
	private long my_total_parse_time;
	
	private Observer my_observer;
	/**
	 * Constructor accepts the number of pages to be crawled.
	 * @param the_count Number of pages to be crawled.
	 */
	public CrawlerSingleThread(final int the_count, String[] the_search_words) {

		my_search_words = the_search_words;
		my_count = the_count;
		my_have_done = new HashSet<String>(the_count * HASHSET_LINKS_PER_PAGE * 2);
	}
	
	/**
	 * Starts up the web crawling by adding the initial link.
	 * @throws IOException
	 */
	public void start(String the_sid) throws IOException {				
		CrawlerPageRetriever pr = new CrawlerPageRetriever();
		pr.pageRetriever(the_sid);
		my_to_do.add(the_sid);
		my_have_done.add(the_sid);
		fillQueue();
	}
	
	/**
	 * Removes the document at the front of the queue and sends it off
	 * to be parsed and retrieves the list of new documents which need 
	 * to be parsed.
	 * @throws IOException
	 */
	public void fillQueue() throws IOException {
		long parse_start;
		long parse_stop;
		Document document;
		CrawlerPageRetriever pr = new CrawlerPageRetriever();
		CrawlerParser cp = new CrawlerParser(my_search_words);
		DataGatherer data_gatherer = new DataGatherer(my_search_words, my_count);
		Reporter reporter = new Reporter(my_count, my_search_words, my_observer);
		int link_count;
		
		my_start_time = System.nanoTime();
		
		while(my_to_do.size() > 0 && my_count > 0) {
			String url = my_to_do.remove();			
			document = pr.pageRetriever(url);
			if (document != null && !url.endsWith("txt")) {
				parse_start = System.nanoTime();
				my_current_page_number++;
				cp.parsePage(document);	
				my_count--;
				link_count = cp.numberOfLinks();
				loadList(cp.getMyLinks());
				parse_stop = System.nanoTime();
				my_total_parse_time += parse_stop - parse_start;
				data_gatherer.submit(new DataMetrics(0, cp.numberOfWords(), cp.numberOfLinks(), 
						cp.wordCounts(), document.baseUri(), parse_stop - parse_start));
				reporter.reportPage(url, my_current_page_number, cp.wordCounts(), 
						parse_stop - parse_start, link_count);
			} else {
				// don't do anything with this document.
			}
		}
		reporter.finalReport(System.nanoTime() - my_start_time, my_total_parse_time);
	}
	
	/**
	 * Adds string tag to the hashset my_have_done.  If size grows then it has not been
	 * visited so add to my_to_do. This is so there are no duplicates in the my_to_do.
	 * @param tags The list of tags to be loaded into the waiting queue.
	 */
	public void loadList(List<String> tags) {
		int hash_count = my_have_done.size();
		while (!tags.isEmpty()) {
			my_have_done.add(tags.get(0));
			if (my_have_done.size() > hash_count) {
				my_to_do.add(tags.get(0));
			}
        	tags.remove(0);
        }
	}
}
