/*
 * Team Java
 * Michael Pogson, Michael Satranjr, Stephen Hunter
 * Project 1
 * 4/26/12
 */

package controller;

import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;

import queue.ListQueue;
import queue.QueueADT;
import view.ResultsFrame;

import httpclient.*;

/**
 * This class manages the threads and data class.
 * @author Michael Pogson, Michael Satran Jr, Stephen Hunter
 * @version Spring 2012
 */
public class Manager implements Runnable {
	
	private final ResultsFrame my_frame;
	
	/**
	 * The Max urls that we will search
	 */
	public static int MAX_URLS = 1000;
	
	/**
	 * Time used to count how long it takes to parse a page.
	 */
	public double my_time;
	
	/**
	 * The amount of urls done so far.
	 */
	public int my_url_count;
	
	/**
	 * 
	 */
	private QueueADT<Website> my_websites;
	
	/**
	 * 
	 */
	private QueueADT<URL> my_urls;
	
	private List<Website> my_retrieved_websites;
	
	/**
	 * Pool of threads used for the parser
	 */
	private Thread[] parser_pool;
	
	/**
	 * pool of threads used for the retriever
	 */
	private Thread[] retriever_pool;
	
	/**
	 * 
	 */
	private Retriever[] retriever_pool_objects;
	
	/**
	 * 
	 */
	private Parser[] parser_pool_objects;
	
	/**
	 * Retriever class object
	 */
	private Retriever my_retriever;
	
	/**
	 * parser object
	 */
	private Parser my_parser;
	
	/**
	 * Data gatherer object
	 */
	private DataGatherer my_gatherer;
	
	/**
	 * Key words the user wants to search for.
	 */
	private List<String> my_keywords;
	
	/**
	 * Overall program start time.
	 */
	private long my_start_time;
	
	/**
	 * 
	 */
	private double parsing_times;
	
	/**
	 * 
	 */
	private int parsing_count;

	/**
	 * Creates a manager object and sets up some data.
	 * @param seed the website to start at
	 * @param keywords the keywords that we are going to search for
	 */
	public Manager(List<URL> seed, List<String> keywords)
	{
		my_start_time = System.nanoTime();
		//Started at 1 in order to avoid divide by zero.
		parsing_count = 1;
		parsing_times = 0;
		my_keywords = new LinkedList<String>();
		my_url_count = 0;
		Map<String, Integer> keyword_map = new HashMap<String, Integer>();
		my_urls = new ListQueue<URL>();
		for (URL href : seed)
		{
			my_urls.enque(href);
		}
		
		for (String keyword : keywords)
		{
			keyword_map.put(keyword, 0);
			my_keywords.add(keyword);
		}
		my_retrieved_websites = new LinkedList<Website>();
		my_websites = new ListQueue<Website>();
		my_gatherer = new DataGatherer(keyword_map);
		my_frame = new ResultsFrame();
	}
	
	/**
	 * This method creates the objects needed to crawl the web.
	 */
	public void initiate()
	{
		
		my_frame.setUp();
		my_retriever = new Retriever(null);
		my_parser = new Parser(my_gatherer, this);
	}
	
	/**
	 * starts the process with multithreading
	 * @param retriever_thread_amt amount of threads to use for the retriever
	 * @param parser_thread_amt amount of threads to use for the parser
	 */
	public void startThreaded(int retriever_thread_amt, int parser_thread_amt)
	{
		
		//Initialize Thread Pools
		parser_pool = new Thread[parser_thread_amt];
		retriever_pool = new Thread[retriever_thread_amt];
		parser_pool_objects = new Parser[parser_thread_amt];
		retriever_pool_objects = new Retriever[retriever_thread_amt];
		
		//Initiate and start threads in pool.
		for (int i = 0; i < parser_thread_amt; i++)
		{
			parser_pool_objects[i] = new Parser(my_gatherer, this);
			
			parser_pool[i] = new Thread(parser_pool_objects[i]);
			parser_pool[i].start();
		}
		
		//Initiate and start threads in pool.
		for (int i = 0; i < retriever_thread_amt; i++)
		{
			
			retriever_pool_objects[i] = new Retriever(this, null);
			retriever_pool[i] = new Thread(retriever_pool_objects[i]);
			retriever_pool[i].start();
		}
	}
	
	/**
	 * Stops all the threads when the program is done.
	 */
	public void terminateThreads()
	{
		for (int i = 0; i < retriever_pool_objects.length; i++)
		{
			retriever_pool_objects[i].stop();
		}
		
		for (int i = 0; i < parser_pool_objects.length; i++)
		{
			parser_pool_objects[i].stop();
		}
	}
	
	/**
	 * Starts the program with a single thread. 
	 */
	public void startUnthreaded()
	{
		
		while(my_gatherer.getNumOfPages() < MAX_URLS)
		{
		
			try 
			{
				Website web;
				if (!my_urls.isEmpty())
				{
			
					my_retriever.setURL(my_urls.deque());
					web = my_retriever.call();
					long parse_time = System.nanoTime();
					if(!web.my_url.toString().equals( "http://questioneverything.typepad.com/")){
						my_parser.setWebsite(web);
					} else {
						my_retriever.setURL(my_urls.deque());
						web = my_retriever.call();
						System.out.println(web.my_url.toString());
						my_parser.setWebsite(web);
					}
						
					
					List<URL> parsed_urls = my_parser.call();
					
					
					parsing_times += (System.nanoTime() - parse_time)  / (double)1000000;
					parsing_count++;
					for (URL url : parsed_urls)
					{
						my_urls.enque(url);
					
					}
					my_retrieved_websites.add(web);
					
					my_url_count += parsed_urls.size();
					
					//printData(web.my_url);
				}
			} catch(Exception e){;}
		}
		
		while (my_retrieved_websites.size() < MAX_URLS && !my_urls.isEmpty())
		{
			try
			{
				
				Website web;
				my_retriever.setURL(my_urls.deque());
				web = my_retriever.call();
				my_retrieved_websites.add(web);
				long parse_time = System.nanoTime();
				my_parser.setWebsite(web);
				my_parser.run();
				parsing_times += (System.nanoTime() - parse_time) / (double)1000000;
				parsing_count++;
				printData(web.my_url);
				
			}
			catch(Exception e)
			{
				//e.printStackTrace();
			}
		}
	}
	
	/**
	 * creates a string builder object for the screen.
	 * @param url_parsed url that was parsed
	 * @return a string for the console and gui to use.
	 */
	public void printData(URL url_parsed)
	{
		
		long running_time = System.nanoTime() - my_start_time;
		
		StringBuilder sb = new StringBuilder();
		sb.append("\n\nParsed: " + url_parsed + "\n" +
				  "Pages Retrieved: " + my_gatherer.getNumOfPages() + "\n" +
				  "Average words per page: " + my_gatherer.averageWordsPerPage() + "\n" +
				  "Keyword\t\tAvg. Hits Per Page\t\t Total Hits" + "\n");
		for (String keyword : my_keywords)
		{
			sb.append(keyword + "\t\t" + my_gatherer.getAverageForWord(keyword) + "\t\t\t\t" + my_gatherer.getTotalHitsForWord(keyword) + "\n");
		}
		
		sb.append("Page Limit: " + MAX_URLS + "\n" +
				  "Average Parse Time Per Page: " + my_time/parsing_count + " Msec" + "\n" +
				  "Total Running Time: " + running_time / (double)1000000000 + " Sec" + "\n\n");
		
		System.out.println(sb.toString());
		my_frame.write(sb.toString());
		
	}
	
	/**
	 * 
	 * @param web
	 */
	public void enqueWebsite(Website web)
	{
		my_websites.enque(web);
		my_retrieved_websites.add(web);
	}
	
	/**
	 * Enqueues the url that was found on the page.
	 * @param url the url to be put into the queue.
	 */
	public void enqueURL(URL url)
	{
		
		if (my_urls.size() + my_retrieved_websites.size() <= MAX_URLS) {
		my_urls.enque(url);
		}
		else if (my_retrieved_websites.size() >= MAX_URLS)
		{
			
			terminateThreads();
		}
	}
	
	/**
	 * 
	 * @param amount
	 * @return
	 */
	public QueueADT<URL> grabURLSToRetrieve(int amount)
	{
		QueueADT<URL> grabbed_urls = new ListQueue<URL>();
		
		for (int i = 0; i < amount; i++)
		{
			if (!my_urls.isEmpty())
			{
				try
				{
					grabbed_urls.enque(my_urls.deque());
				} catch(Exception e)
				{
					//Keep trying!
					i -= 1;
					continue;
				}
			}
			else
			{
				//If the Queue is empty stop the useless loop we can't afford a loss of time.
				break;
			}
		}
		
		return grabbed_urls;
	}
	
	/**
	 * 
	 * @param amount
	 * @return
	 */
	public QueueADT<Website> grabWebsitesToParse(int amount)
	{
		QueueADT<Website> grabbed_websites = new ListQueue<Website>();
		
		for (int i = 0; i < amount; i++)
		{
			if (!my_websites.isEmpty())
			{
				try
				{
					grabbed_websites.enque(my_websites.deque());
				}catch (Exception e)
				{
					//Keep trying!
					i -= 1;
					continue;
				}
			}
			else
			{
				//If the Queue is empty stop the useless loop we can't afford a loss of time.
				break;
			}
		}
		
		return grabbed_websites;
	}
	
	/**
	 * This method sets the max number of urls that we are going to parse.
	 * @param the_max max number of urls to look at
	 */
	public void setMaxURLs(final int the_max) {
		MAX_URLS = the_max;
	}
	
	/**
	 * The total time the page took to parse.
	 * @param the_time the total time it took to parse a page.
	 */
	public void setTime(double the_time) {
		my_time = the_time;
	}

	@Override
	public void run() {
		startUnthreaded();
		
	}
}
