//Fusion of the 422 Proportion
//Dustin Striplin, Tyler Simrell, Lawrence Grass, and Jacob Hall
//Finished 5/1/13

package model;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;

import net.htmlparser.jericho.Source;

import org.apache.log4j.BasicConfigurator;
import org.apache.log4j.varia.NullAppender;

import controller.MainController;
import crawlercommons.fetcher.SimpleHttpFetcher;
import crawlercommons.fetcher.UserAgent;
import crawlercommons.robots.BaseRobotRules;
import crawlercommons.robots.RobotUtils;
import crawlercommons.robots.SimpleRobotRulesParser;


/**
 * PageRetriever.java - Reads in a URL and puts its contents into
 * a PageInfo class.
 * @author Tyler Simrell, Dustin Striplin, Lawrence Grass
 * @version 4/20/13
 */
public class PageRetriever implements Runnable
{
	
	/**
	 * The PageInfo object of the currently running web site.
	 */
	private PageInfo currentPI;
	
	/**
	 * The link back to the main controller that allows access to data
	 * structures (data structures can block). 
	 */
	private MainController controller;
	
	/**
	 * The number for the thread.
	 */
	private static int threadNum = 0;
	
	/**
	 * The thread object
	 */
	private Thread thread;
	
	/**
	 * parses robots.txt files
	 */
	private SimpleRobotRulesParser robotParser;
	
	private SimpleHttpFetcher fetcher;
	
	
	/**
	 * This constructor takes in the PageInfo a web site to be retrieved.
	 * It then gets the url of the web site and puts it into a URL object.
	 * @param webPage The PageInfo for a web site to be retrieved.
	 */
	public PageRetriever(final MainController mc, final boolean threaded)
	{
		controller = mc;
		robotParser = new SimpleRobotRulesParser();
		fetcher = new SimpleHttpFetcher(new UserAgent("User-Agent", "TCSS422", " "));
		fetcher.setConnectionTimeout(1000);
		fetcher.setSocketTimeout(1000);
		fetcher.setMaxRedirects(0);
		fetcher.setMaxConnectionsPerHost(1);
		fetcher.setMaxRetryCount(0);
		BasicConfigurator.configure(new NullAppender()); //don't display log4j logs in console
		if(threaded)
		{
			threadNum++;
			thread = new Thread(this, "retriever " + threadNum);
			thread.start();
		}
	}
	
	/**
	 * This method gets the BufferedReader from the URL object, then
	 * puts it into the respective PageInfo object. After the whole page
	 * has been put into a string, it then gets put into the PageInfo object
	 * and returned.
	 * @return The PageInfo object with the HTML data within it. Returns null if the website
	 * 			is blocked by robots.txt, or if the file is not html/txt contentType.
	 * @throws IOException If the currentURL is malformed, it will throw an
	 * IO exception. 
	 */
	public PageInfo getOutput() throws IOException
	{
		URL currentURL = validateLegalURL();
		if(currentURL == null) return null; //if it's not legal then quit
		HttpURLConnection conn = (HttpURLConnection) currentURL.openConnection();
		try
		{
			conn.setConnectTimeout(1000);
			conn.setReadTimeout(1000);
			conn.setRequestProperty("User-Agent", "TCSS422 spider ninja");
			String contentType = conn.getContentType();
			if(contentType != null && !contentType.equals("text/html")) //check for txt/html files
				return null;
			InputStreamReader rdr = new InputStreamReader(conn.getInputStream());
			currentPI.setPageContent(new Source(new BufferedReader(rdr)));
			rdr.close();
			conn.disconnect();
		}
		catch(java.net.SocketTimeoutException exception)
		{
			return null;
		}
		
		return currentPI;
	}
	
	/**
	 * Used for single threaded mode, this method allows for manually
	 * putting a new PageInfo object into the currentPI variable
	 * @param newPI The new page info object
	 */
	public void setCurrentPageInfo(PageInfo newPI)
	{
		currentPI = newPI; 
	}

	@Override
	public void run() 
	{
		boolean running = true;
		
		while(running)
		{
			try
			{
				currentPI = controller.takeItemToRetrieve();
				currentPI.setPageStartTime(System.nanoTime());
				currentPI.setPageLimit(controller.getPageLimit());
				PageInfo thePage = getOutput(); //if this wasn't a valid link null is returned
				if(thePage != null) 
					running = controller.insertItemPageBuffer(thePage);
			}
			catch(InterruptedException exception)
			{
				exception.printStackTrace();
				Thread.currentThread().interrupt(); //Stops the thread
			} 
			catch (IOException e) {	
				//Do nothing with exception
			}
		}
	}
	
	/**
	 * Checks to see of the url has been blocked by robots.txt. If there is no info on the 
	 * url's base url then get it from the base url's robots.txt.
	 * @return A url if we can search the site, otherwise return null.
	 */
	private URL validateLegalURL() {
		URL currentURL = null;
		try {
			currentURL = new URL(currentPI.getURL());

		String baseURL = "http://"+currentURL.getHost();  //get base for checking robots.txt
		if(!controller.containsKey(baseURL)) {
			//get the robot rules
			
			BaseRobotRules rules = RobotUtils.getRobotRules(fetcher, robotParser, new URL(baseURL + "/robots.txt"));
			//add them to the controller's HashMap
			controller.addRobotRules(baseURL, rules);
		}
		if(!controller.isAllowed(baseURL, currentPI.getURL())) { //if the site is not allowed then stop
			return null;
		}
		} catch (MalformedURLException e) {
			e.printStackTrace();
		}
		return currentURL;
	}
}
