package nz.ac.massey.spidernetpn.webcrawler;

import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.Authenticator;
import java.net.MalformedURLException;
import java.net.PasswordAuthentication;
import java.net.URL;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.log4j.Appender;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.HTMLLayout;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.SimpleLayout;
import org.apache.log4j.WriterAppender;

import edu.uci.ics.jung.graph.DirectedGraph;
import edu.uci.ics.jung.graph.DirectedSparseMultigraph;
import edu.uci.ics.jung.graph.util.Graphs;

/**
 * <p>
 * Implementation of a crawler with Three (3) threads in a thread pool. You
 * start off the web crawler by calling submit(String url), and you can limit
 * the number of websites the crawler will visit through methods that affect the
 * depth of the graph, and the absolute number of nodes the graph will use.
 * </p>
 * 
 * <p>
 * To stop the crawler, just set any references to it to null, and call the
 * garbage collector manually. You can do that like this: <br />
 * <code>WebCrawler crawler = new WebCrawler(); <br />
 * ... <br />
 * // Finished using the crawler now <br />
 * crawler = null; <br />
 * System.gc(); <br />
 * </code> Connections to websites should close automatically.
 * </p>
 * 
 * @author rose
 * @see {@link #submit(String)}
 */
public class WebCrawler extends BlacklistedWebpages {
	private static final Logger LOGGER = Logger.getLogger(WebCrawler.class);
	static {
		String pattern = "Milliseconds since program start: %r %n";
		pattern += "Classname of caller: %C %n";
		pattern += "Date in ISO8601 format: %d{ISO8601} %n";
		pattern += "Location of log event: %l %n";
		pattern += "Message: %m %n %n";

		// FIXME Get this appender working.
		//LOGGER.addAppender(new ConsoleAppender(new PatternLayout(pattern)));
		
	}
	
	private static final String VALID_HTTP_CHARS = "[a-zA-Z0-9_=~./:&#%\\?\\-]*";
	// private static final Pattern HTTP_PATTERN =
	// Pattern.compile("[(http)(file)]://*[a-zA-Z1-9&$_\\.\\-]+");
	private static final Pattern HTTP_PATTERN = Pattern
			.compile("href[ ]*=[ ]*\"" + VALID_HTTP_CHARS + "\"");

	private Collection<ActionListener> listeners = new HashSet<ActionListener>();
	private ExecutorService threadPool = null;
	private DirectedGraph<URL, Integer> directedGraph = new DirectedSparseMultigraph<URL, Integer>();
	private Set<String> visitedUrls = Collections.synchronizedSet(new HashSet<String>());
	
	private int maxUrls = Integer.MAX_VALUE;
	private int maxDepth = Integer.MAX_VALUE;

	private int edge = 0; // Unique identifier for links in the graph.
	private List<Runnable> pausedThreads;

	/**
	 * Default constructor.
	 */
	public WebCrawler(boolean UseProxy) {
		threadPool = Executors.newFixedThreadPool(3);
		if(UseProxy){
		System.getProperties().setProperty("http.proxySet", "true");

		 System.setProperty("http.proxyHost","tur-cache2.massey.ac.nz") ;
		    System.setProperty("http.proxyPort", "8080") ;
		 
		    Authenticator.setDefault(new Authenticator() {
		      protected PasswordAuthentication getPasswordAuthentication() {
		        return new
		           PasswordAuthentication("SEAT\\06147763","9988".toCharArray());
		    }});
		}
	}

	/**
	 * This here is a place holder method. It doesn't do anything right now.
	 * 
	 * @param max
	 */
	public synchronized void setMaxDepth(int max) {
		this.maxDepth = max;
	}

	/**
	 * Allows you to listen out for updates to the graph.
	 * 
	 * @param listener
	 *            The listener that'll be notified of any added vertices to the
	 *            graph.
	 */
	public synchronized void addActionListener(ActionListener listener) {
		listeners.add(listener);
	}

	/**
	 * @param max
	 *            Limits the number of URL's the crawler will crawl.
	 */
	public synchronized void setMaxUrls(int max) {
		this.maxUrls = max;
	}

	/**
	 * Adds a node to the directed graph.
	 * 
	 * @param from
	 *            The url we're linking out of
	 * @param to
	 *            The end point. This doesn't necessarily link back.
	 */
	private synchronized void addLink(URL from, URL to) {
		try {
			pauseThreadsIfReachedMax();
				directedGraph.addEdge(edge++, from, to);
				
				for (ActionListener listener : listeners)
					listener.actionPerformed(new ActionEvent(from,
							edge,
							"Found another edge on the Digraph."));
		} catch (Exception e) {
			LOGGER.debug("Failed to add edge from " + from.getPath()
					+ " to " + to.getPath());
		}
	}

	/**
	 * This is what is called from the GO button, or whatever adds the first URL
	 * to the program. You can call this as many times as you like to add more
	 * URL's.
	 * 
	 * @param url
	 *            A string representing a URL.
	 * @throws MalformedURLException
	 *             - Thrown when the url input isn't recognised.
	 */
	public synchronized void submit(String url) throws MalformedURLException,
			FoundMaxException {
		int currentDepth = 0;
		
		if(threadPool.isShutdown())
			pausedThreads.add(new RunnableDownloaderThread(url, currentDepth));
		
		else
			submit(url, currentDepth);
		
	}
	
	protected synchronized void pauseThreadsIfReachedMax() throws FoundMaxException {
		if(maxUrls <= directedGraph.getVertexCount()){
			pause();
			throw new FoundMaxException("Paused because we have found the " +
					"maximum number of URLs");
		}
			
	}
	
	protected synchronized boolean hasReachedMaxDepth(int depth) {
		return maxDepth < depth;
	}
	
	private void submit(String url, int depth) throws MalformedURLException,
			FoundMaxException {
		// Stop if we've hit the maximum number of urls.
		if(this.hasReachedMaxDepth(depth)) {
			return;
		}
		
		// Given that you've not already visited this url
		if (!visitedUrls.contains(url)) {
			// TODO this is a good place to limit the depth of the graph
			visitedUrls.add(url);
			threadPool.execute(new RunnableDownloaderThread(url, depth));
		}
	}

	private boolean isOnBlackList(final String url) {
		final int locationOfDot = url.lastIndexOf('.');

		// If no extension, stop here.
		if (locationOfDot < 0)
			return false;

		// Extract extension
		final String extension = url.substring(locationOfDot);

		return BLACKLIST.contains(extension.toLowerCase());
	}

	/**
	 * This method here will return you what's currently been crawled by the web
	 * crawler.
	 * 
	 * @return A directed graph, linking URL objects together.
	 */
	public DirectedGraph<URL, Integer> getGraph() {
		return Graphs.unmodifiableDirectedGraph(directedGraph);
	}

	/**
	 * This is called when the garbage collector takes this object away. It'll
	 * try to stop active threads.
	 */
	@Override
	protected synchronized void finalize() throws Throwable {
		threadPool.shutdownNow();
		super.finalize();
	}

	public synchronized void pause() {
		if(!threadPool.isShutdown())
			pausedThreads = threadPool.shutdownNow();
	}
	
	public synchronized void unpause() {
		if(threadPool.isShutdown()) {
			threadPool = Executors.newFixedThreadPool(3);
			for(Runnable pausedThread : pausedThreads)
				threadPool.execute(pausedThread);
		}
	}

	/**
	 * This inner class accesses variables inside the outer class. It represents
	 * a running <b>thread</b> that pulls down a URL to strip it of it's links.
	 * 
	 * @author rose
	 */
	class RunnableDownloaderThread implements Runnable {
		private URL url;
		private InputStream connection;
		private int depth;

		/**
		 * Constructor. This method takes in the URL we're to process and will
		 * wait till the run() method is invoked.
		 * 
		 * @param url
		 *            The URL we're going to crawl.
		 * @throws MalformedURLException
		 */
		public RunnableDownloaderThread(String url, int depth)
				throws MalformedURLException {
			this.url = new URL(url); // It's important that the url's syntax is
			// checked when the object is set up.
			this.depth = depth;
		}

		/**
		 * This method is called when the thread executes. It follows a process
		 * like this:<br/>
		 * <li>Buffer and pull down HTML from the URL's address</li> <li>Match
		 * each line of the page to a pattern matcher that looks like another
		 * URL</li> <li>Extract this URL, add it to the list of URL's to process
		 * and record the link</li>
		 */
		@Override
		public void run() {
			// Don't bother using this url if it's already been crawled.
			if (visitedUrls.contains(url))
				return;
			
			
			
			// Open URL connection and stream.
			try {
				connection = url.openConnection().getInputStream();

				BufferedReader pageReader = new BufferedReader(
						new InputStreamReader(connection));
				LOGGER.info("URL stream: " + url + " opened.");

				String line = pageReader.readLine();
				while (line != null && !hasReachedMaxDepth(depth)) {
					// Check for urls using regular expressions.
					final Matcher matcher = HTTP_PATTERN.matcher(line);

					// For every url we find
					while (matcher.find() && !hasReachedMaxDepth(depth)) {
						String anotherURL = matcher.group();
						
						// take out href in anotherURL
						int beginIndex = anotherURL.indexOf('"') + 1; // not
						// including
						// quote
						int endIndex = anotherURL.lastIndexOf('"');
						anotherURL = anotherURL.substring(beginIndex, endIndex);
						anotherURL.trim();

						// If the link is an anchor, remove the anchor part of
						// the link.
						int anchorLocation = anotherURL.lastIndexOf("#");
						if (0 <= anchorLocation)
							anotherURL.substring(0, anchorLocation);

						anotherURL = toAbsoluteLink(anotherURL);

						try {
							if (!isOnBlackList(anotherURL)) {
								// add the url to the queue of urls to be
								// processed
								submit(anotherURL, depth + 1);

								// add this link to the graph
								addLink(url, new URL(anotherURL));
							}
						} catch (MalformedURLException e) {
							/*
							 * Fail silently. If the URL isn't valid at this
							 * stage, then we'll just keep looking for others.
							 */
						} catch (FoundMaxException e) {
							// Just stop now. We've found the maximum number
							// we're going to find.
						}
					}

					line = pageReader.readLine();
				}
			} catch (MalformedURLException e) {
				return;
			} catch (IOException e) {
				return;
			} catch (Exception e) {
			} finally {
				if (connection != null) {
					try {
						connection.close();
					} catch (IOException e) {
					}
				}
			}
		}

		/**
		 * @param link
		 * @return
		 */
		private String toAbsoluteLink(String link) {
			String absoluteUrl = null;
			String host = this.url.getHost();
			String protocol = this.url.getProtocol() + "://";

			// for cases like href="http://www.something.com/"
			if (link.contains("://"))
				absoluteUrl = link;

			// for cases like href="/index.html"
			else if (link.startsWith("/")) {
				absoluteUrl = protocol + host + link;
			}

			// for cases like "href=index.html"
			else {
				String path = this.url.getPath();

				// Cut off any pages at the end.
				int lastSlash = path.lastIndexOf("/") + 1;
				path = path.substring(0, lastSlash);
				absoluteUrl = protocol + host + path + link;
			}


	        String newURL = absoluteUrl.replaceAll("&amp;", "&");

	        absoluteUrl = newURL.replaceAll("&", "&amp;");

			 //HACK merge to one line THIS IS NEED TO SAVE TO XML, removes "&" symbol
			LOGGER.debug("Hacked URL to replace & with &amp; "+absoluteUrl);
			
			return absoluteUrl;
		}

		/**
		 * Close the connection if forced to shut down.
		 */
		@Override
		protected void finalize() throws Throwable {
			if (connection != null) {
				try {
					connection.close();
				} catch (IOException e) {
				}
			}
			super.finalize();
		}
	}
}
