//Fusion of the 422 Proportion
//Dustin Striplin, Tyler Simrell, Lawrence Grass, and Jacob Hall
//Finished 5/1/13

package controller;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Observable;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ConcurrentHashMap;

import crawlercommons.robots.BaseRobotRules;

import model.DataGatherer;
import model.PageInfo;
import model.PageRetriever;
import model.SetupInfo;

/**
 * Controls the parsing & retrieving of threads
 * @author Dustin Striplin, Tyler Simrell, Lawrence Grass, Jacob Hall
 *
 */
public class MainController extends Observable{
	
	/*
	 * The file name for the single threaded text file with the statistical results
	 * from the program's analysis
	 */
	private static final String ST_FILE_NAME = "spiderRunST.txt";
	
	/*
	 * The file name for the multi-threaded text file with the statistical results
	 * from the program's analysis
	 */
	private static final String MT_FILE_NAME = "spiderRunMT.txt";
	
	private int pagesRetrieved = 0;
	private DataGatherer dataGatherer;
	
		/*ArrayBlockingQueue will make a thread wait if the queue is empty
		 *unfortunately it also blocks when the queue is full, we need to 
		 *check if the queue is full before adding to it in order to avoid
		 *a block.*/
	private ArrayBlockingQueue<PageInfo> pageBuffer;
	private ArrayBlockingQueue<PageInfo> pageToRetrieve;
	private ConcurrentHashMap<String, Object> alreadyFrequented;
	private Map<String, BaseRobotRules> accessMap; //synchronized to prevent simultaneous access.
	private SetupInfo setupInfo;
	
	private int pageBufferSize;
	
	private int pageToRetrieveSize;
	
	private String fileName = "xxx.txt";
	
	public MainController(){

	}
	
	public void startButton(SetupInfo setup, DataGatherer data){
		setupInfo = setup;
		pageBufferSize = setupInfo.getPageLimit();
		pageToRetrieveSize = setupInfo.getPageLimit();
		pagesRetrieved = 0;
		dataGatherer = data;
		pageBuffer = new ArrayBlockingQueue<PageInfo>(pageBufferSize);//I'm not sure how much
		pageToRetrieve = new ArrayBlockingQueue<PageInfo>(pageToRetrieveSize);//space to give these
		alreadyFrequented = new ConcurrentHashMap<String, Object>(pageBufferSize, .85f, 8); //It should never be bigger than the buffer
		pageBufferSize = setupInfo.getPageLimit();
		pageToRetrieveSize = setupInfo.getPageLimit();
		pageBuffer = new ArrayBlockingQueue<PageInfo>(pageBufferSize);//I'm not sure how much
		pageToRetrieve = new ArrayBlockingQueue<PageInfo>(pageToRetrieveSize);//space to give these
		alreadyFrequented = new ConcurrentHashMap<String, Object>(pageBufferSize, .85f, 8); //It should never be bigger than the buffer
		insertSeedURL();  //inserts a new PageInfo into the queue
		accessMap = Collections.synchronizedMap(new HashMap<String, BaseRobotRules>()); //only one thread at a time may access.
		if(setupInfo.isMultiThreaded()) {
			fileName = MT_FILE_NAME;
			multiThreadExecution();
		} 
		else 
		{ 
			fileName = ST_FILE_NAME;
			SingleThread worker = new SingleThread();
			Thread workerThread = new Thread(worker, "worker thread");
			workerThread.start();
		}
	}
	
	/**
	 * Will run program in multithreaded mode.
	 */
	private void multiThreadExecution() {
		new PageRetriever(this, true); //create seed retriever
		new Parser(this, true);   //start getting links for the new retrievers
		for(int i = 1; i < setupInfo.getMaxRetrievers(); i++) {
			new PageRetriever(this, true);
		}
		for(int i = 1; i < setupInfo.getMaxParsers(); i++) {
			new Parser(this, true);
		}
	}
	
	/**
	 * Runs program in single thread mode.
	 */
	private void singleThreadExecution()
	{
		Parser pageParser = new Parser(this, false);
		PageRetriever pr = new PageRetriever(this, false);
		PageInfo currentPI = null;
		
		while(pagesRetrieved < setupInfo.getPageLimit())
		{
			try 
			{
				currentPI = pageToRetrieve.take();
				currentPI.setPageStartTime(System.nanoTime());
				pr.setCurrentPageInfo(currentPI);
				if(pr.getOutput() == null) continue;
				insertItemPageBuffer(currentPI);
				if(currentPI.getPageContent() == null)
				{
					continue;
				}

				long parseTime = System.nanoTime();
				insertItemsToRetrieve(pageParser.parse(currentPI));
				parseTime = System.nanoTime() - parseTime;
				currentPI.setTimeToParsePage(parseTime);
				addPageInfo(currentPI);
			} 
			catch(InterruptedException exception)
			{
				exception.printStackTrace();
				Thread.currentThread().interrupt(); //Stops the thread
			} 
			catch (IOException e) {/*do nothing, link dead*/ }
		}
		setChanged();
		notifyObservers(dataGatherer);
	}
	
	/**
	 * inserts an item into the page buffer, the buffer is a blocking queue
	 * @param pi the new PageInfo to be parsed
	 * @returns whether or not the maximum amount of pages have been reached
	 * @throws InterruptedException 
	 */
	public boolean insertItemPageBuffer(PageInfo pi) throws InterruptedException {
		if(pagesRetrieved < setupInfo.getPageLimit()) {
			pi.setPageEndTime(System.nanoTime());
			pageBuffer.put(pi);
			pagesRetrieved++; //outside loop so it will go past 
		} else {
			return false; //max pages has been reached
		}
		return true;  //max pages has not been reached
	}
	
	/**
	 * Takes an item from the page buffer & removes the head of the queue, the buffer
	 * is a blocking queue.
	 * @return The PageItem at the head of the queue, null if parsing is done
	 * @throws InterruptedException
	 */
	public synchronized PageInfo takeItemPageBuffer() throws InterruptedException {
		if(continueParsing())
			return pageBuffer.take();
		else
			return null;
	}
	
	/**
	 * Puts a list of items in the page to retrieve queue, the buffer is a blocking queue.
	 * If the queue is full then adding new links will be aborted in order to speed up the 
	 * program.
	 * @param piList the ArrayList of items
	 * @throws InterruptedException
	 */
	public void insertItemsToRetrieve(ArrayList<PageInfo> piList) throws InterruptedException {
		String URL;
		for(PageInfo pi: piList) 
		{
			URL = pi.getURL();
			if(!alreadyFrequented.containsKey(URL))
			{
				if(pageToRetrieve.remainingCapacity() != 0) 
				{                                              //don't wait to add new links if
					pageToRetrieve.put(pi);                    //the queue is full
					alreadyFrequented.put(URL, new Object());
				} 
				else 
				{
					return;
				}
			}
		}
	}
	
	/**
	 * Takes an item from the page to retrieve buffer & removes the head of the queue, the buffer
	 * is a blocking queue.
	 * @return The PageItem at the head of the queue
	 * @throws InterruptedException
	 */
	public PageInfo takeItemToRetrieve() throws InterruptedException {
		return pageToRetrieve.take();
	}
	
	/**
	 * used by the page parser to see if it should grab a new pageInfo, or if it
	 * should close the thread. If the max pages have not been retrieved, or if the 
	 * page buffer is not empty, then this method will return true.
	 * @return whether or not to continue retrieving pages
	 */
	public boolean continueParsing() {
		return ((pagesRetrieved < setupInfo.getPageLimit()) || !pageBuffer.isEmpty());
	}
	
	/**
	 * Adds the parsed PageInfo to the DataGatherer as well as notifies the GUI
	 * that a new page is done.
	 * @param pi the PageInfo
	 */
	public void addPageInfo(final PageInfo pi) {
		dataGatherer.addPageInfo(pi);
		setChanged();
		notifyObservers(pi);
	}
	
	/**
	 * 
	 * @return a copy of the keyword HashMap<String, Integer>
	 */
	public HashMap<String, Integer> getKeywordMap() {
		return setupInfo.getKeywords();
	}
	
	/**
	 * Number defaults to the ABSOLUTE_PAGE_LIMIT static variable set in the
	 * SetupInfo Class if the user exceeds that value
	 * @return the page limit that was set by the user interface.
	 */
	public int getPageLimit()
	{
		return setupInfo.getPageLimit();
	}

	/**
	 * Returns the number of pages processed as determined by the dataGatherer, which is
	 * the last Object to handle, process, and print out the pages.  Value can be used 
	 * as a reasonable estimate as to the percentage of the entire process that has been
	 * completed for the UI progress bar.
	 * @return the number of pages that have been processed by the dataGatherer
	 */
	public int getNumPagesRetrieved()
	{
		return dataGatherer.getNumPagesProcessed();
	}
	
	/**
	 * Calls the printFile() method in the DataGatherer class which will print all of
	 * the program's statistics to a text file.
	 */
	public void printDataToFile()
	{
		dataGatherer.printFile(fileName);
	}
	
	private void insertSeedURL() {
		PageInfo tmp = new PageInfo(setupInfo.getSeedUrl());
		tmp.setKeyWordMap(setupInfo.getKeywords());
		tmp.setPageLimit(setupInfo.getPageLimit());
		pageToRetrieve.add(tmp); //add does not block, put does
	}
	
	/**
	 * Looks up a url to see if it is allowed. This is a synchronized operation,
	 * additions to the HashMap may change the hash values, we want to avoid this.
	 * @param baseURL the base domain
	 * @param requestedUrl the url that we are attempting to be access.
	 * @return whether or not the url is blocked.
	 */
	public boolean isAllowed(String baseURL, String requestedUrl) {
		return accessMap.get(baseURL).isAllowed(requestedUrl);
	}
	
	/**
	 * Add the RobotRules to the HashMap. It is possible that another thread just added
	 * the rules, in that case the rules will be overwritten; this risk is taken to increase
	 * speed. It would take longer to lock the HashMap and wait for a server request for robots.txt
	 * than to just replace the existing rules.
	 * @param baseURL the website's baseUrl
	 * @param rules The robot rules we aquired from the baseURL's robots.txt
	 */
	public void addRobotRules(String baseURL, BaseRobotRules rules) {
		accessMap.put(baseURL, rules);
	}
	
	/**
	 * Checks if the baseUrl is in the map. This is synchronized to prevent additions from
	 * occurring during a lookup of a hash. Additions may change the hash values of the data set.
	 * @param baseURL The website's base URL.
	 * @return Whether or not the key is found in the map
	 */
	public boolean containsKey(String baseURL) {
		return accessMap.containsKey(baseURL);
	}
	
	private class SingleThread implements Runnable
	{
		@Override
		public void run() 
		{
			singleThreadExecution();	
		}	
	}
	
	
}
