package webcrawler;

import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Observable;
import java.util.Observer;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;

import org.jsoup.nodes.Document;

import parser.Parser;
import retriever.PageRetriever;

/**
 * Runs the parser and retriever to gather statistical data
 * based on keywords inputted by the user.
 * 
 * @author Zach Turk, Dickson Kwong, Stephen Amos
 *
 */
public class DataGatherer implements Observer
{
	/**
	 * The number of threads running at a time.
	 */
	private int nthreads = 10;
	
	/**
	 * The queue of URLs to process.
	 */
	private BlockingQueue<URL> url_queue = new LinkedBlockingQueue<URL>();
	
	/**
	 * The queue of source files to process.
	 */
	private BlockingQueue<Document> source_queue = new LinkedBlockingQueue<Document>();
	
	/**
	 * The collection of visited links.
	 */
	private ArrayList<URL> visited_places = new ArrayList<URL>();
	
	/**
	 * The seed URL
	 */
	private String seed_url;
	
	/**
	 * The list of keywords to search for.
	 */
	private String[] keywords;
	
	/**
	 * The amount of pages retrieved.
	 */
	private int pages_retrieved = 0;
	
	/**
	 * Total words parsed.
	 */
	private int total_words;
	
	/**
	 * Average words per page.
	 */
	private double average_words_per_page;
	
	/**
	 * Average URLs per page.
	 */
	private double average_urls_per_page;
	
	/**
	 * Total URLs found thus far from the pages parsed.
	 */
	private int total_urls;
	
	private int parsers;
	
	private int retrievers;
	
	private long timer;
	
	/**
	 * A map where each keyword provided by the user has an associated value.
	 * This value is the total amount of times that word has been found during
	 * the web crawling process.
	 */
	Map<String, Double> keyword_occurences = new HashMap<String, Double>();
	
	/**
	 * The amount of pages to be retrieved until termination of the program.
	 */
	private int pages_to_retrieve;
	
	/**
	 * The executor.
	 */
	private ExecutorService pool;
	
	/**
	 * Debug mode turns on some reporting.
	 */
	private boolean debug;
	
	/**
	 * The file writer for file output.
	 */
	FileWriter outFile;
	
	/**
	 * The output print writer.
	 */
	PrintWriter out;
	
	/**
	 * An empty constructor.
	 */
	DataGatherer(String the_seed_url, String[] the_keywords, int retrieve, int threads, boolean the_dubug_flag)
	{
		String the_file_name = "";
		if (threads == 1)
			the_file_name = "ST";
		else
			the_file_name = "MT";
		
		try 
		{
			outFile = new FileWriter("spiderRun" + the_file_name + ".txt");
			out = new PrintWriter(outFile);
		} 
		catch (IOException e) 
		{
			
		}
		
		seed_url = the_seed_url;
		keywords = the_keywords;
		pages_to_retrieve = retrieve;
		
		for (String keyword : the_keywords)
		{
			keyword_occurences.put(keyword, 0.0);
		}
		
		setSeed(the_seed_url);
		
		nthreads = threads;
		debug = the_dubug_flag;
	}
	
	/**
	 * Set the seed URL.
	 */
	private void setSeed(String url) {
		try {
			url_queue.put(new URL(seed_url));
		} catch (Exception e) { }
	}

	/**
	 * Runs the data gatherer.
	 * @throws MalformedURLException 
	 */
	public void run()
	{
		//Multi-Threaded stuff
		pool = Executors.newFixedThreadPool(nthreads);
		for(int i = 0; i < nthreads; i++) {
			PageRetriever retriever = new PageRetriever(url_queue, source_queue);
			retrievers++;
			retriever.addObserver(this);
			pool.execute(retriever);
		}
		timer = System.nanoTime();
	} 

	@Override
	public synchronized void update(Observable arg0, Object arg1) 
	{
		if (pages_retrieved >= pages_to_retrieve) {
			double final_timer = (System.nanoTime()-timer)/1000000.0;
			
			out.append("\r\n");
			out.append("Average parse time per page: " + final_timer/pages_retrieved + " Milliseconds");
			out.append("\r\n");
			out.append("Finished in: " + final_timer + " Milliseconds");
			
			System.out.println();
			System.err.println("Average parse time per page: " + final_timer/pages_retrieved + " Milliseconds");
			System.err.println("Finished in: " + final_timer + " Milliseconds");
			
			out.close();
			System.exit(0);
		}
		if (arg0 instanceof PageRetriever) {
			retrievers--;
			if (url_queue.size() == 0 || source_queue.size() > average_urls_per_page) {
				Parser parser = new Parser(source_queue, url_queue, keywords, visited_places);
				parsers++;
				parser.addObserver(this);
				pool.execute(parser);
			} else {
				PageRetriever retriever = new PageRetriever(url_queue, source_queue);
				retrievers++;
				retriever.addObserver(this);
				pool.execute(retriever);
			}
			return;
		}
		
		Parser parser = (Parser) arg0;
		
		total_urls += parser.getLinksFound();
		pages_retrieved++;
		total_words += parser.countWords();
		average_words_per_page = total_words / pages_retrieved;
		average_urls_per_page = total_urls / pages_retrieved;
		
		int[] counts = parser.countKeywords();
		for (int i = 0; i < keywords.length; i++)
		{
			double old_value = keyword_occurences.get(keywords[i]);
			double new_value = old_value + counts[i];
			keyword_occurences.put(keywords[i], new_value);
		}
		
		System.out.println("...................");
		out.append("...................");
		out.append("\r\n");
		if (debug) {
			System.err.println("Links: " + url_queue.size() + " Pages: " + source_queue.size());
			out.append("Links: " + url_queue.size() + " Pages: " + source_queue.size());
			out.append("\r\n");
			System.err.println("Parsers: " + parsers + " Retrievers: " + retrievers);
			out.append("Parsers: " + parsers + " Retrievers: " + retrievers);
			out.append("\r\n");
		}
		System.out.println("Parsed: " + parser.getURLAddress());
		out.append("Parsed: " + parser.getURLAddress());
		out.append("\r\n");
		
		System.out.println("Pages Retrieved: " + pages_retrieved);
		out.append("Pages Retrieved: " + pages_retrieved);
		out.append("\r\n");
		
		System.out.println("Total URLs parsed thus far: " + total_urls);
		out.append("Total URLs parsed thus far: " + total_urls);
		out.append("\r\n");
		
		System.out.println("Words at this page: " + parser.countWords());
		out.append("Words at this page: " + parser.countWords());
		out.append("\r\n");
		
		System.out.println("Average words per page: " + average_words_per_page);
		out.append("Average words per page: " + average_words_per_page);
		out.append("\r\n");
		
		System.out.println("Average URLs per page: " + average_urls_per_page);
		out.append("Average URLs per page: " + average_urls_per_page);
		out.append("\r\n");
		
		for (String word : keywords)
		{
			System.out.print("Keyword: " + "[" + word + "]");
			out.append("Keyword: " + "[" + word + "]");
			
			System.out.print("\t Ave. Hits per Page: " + keyword_occurences.get(word) + "/" + pages_retrieved);
			out.append("\t Ave. Hits per Page: " + keyword_occurences.get(word) + "/" + pages_retrieved);
						
			System.out.println("\t Total Hits: "  + keyword_occurences.get(word));
			out.append("\t Total Hits: "  + keyword_occurences.get(word));
			out.append("\r\n");
		}
		System.out.println("...................");
		out.append("...................");
		out.append("\r\n");
		
		parsers--;
		if (source_queue.size() == 0 || url_queue.size() > average_urls_per_page) {
			PageRetriever retriever = new PageRetriever(url_queue, source_queue);
			retrievers++;
			retriever.addObserver(this);
			pool.execute(retriever);
		} else {
			parser = new Parser(source_queue, url_queue, keywords, visited_places);
			parsers++;
			parser.addObserver(this);
			pool.execute(parser);
		}
		return;
	}
}