/*
 * Copyright 2013 Solace Systems, Inc.
 * 
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

package com.solacesystems.tools.ant;

import java.util.List;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLConnection;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentSkipListSet;

import org.ccil.cowan.tagsoup.Parser;
import org.xml.sax.InputSource;
import com.solacesystems.tools.ant.LinkFinder.LinkEventListener;

/**
 * Manages a queue of documents to parse and scan for links
 * @author swilson
 *
 */
public class Crawler {
	
	private static final String[] DEFAULT_EXTS = {"html", "htm"};
	private static final String[] REMOTE_SCHEMES = {"http", "https", "ftp"};
	
	private Queue<Target> m_Targets = new ConcurrentLinkedQueue<Target>();
	private Set<Target> m_Visited = new ConcurrentSkipListSet<Target>();
	private Parser m_Parser = new Parser();
	private final LinkFinder m_LinkFinder;
	private List<String> m_ParsedExts = Arrays.asList(DEFAULT_EXTS.clone());
	private List<String> m_RemoteSchemes = Arrays.asList(REMOTE_SCHEMES.clone());
	
	private boolean m_FollowLinks = false;
	private boolean m_FollowRemote = false;
	private boolean m_StripQueryAndFragment = false;
	private EventHandler m_Handler;
	
	/**
	 * Abstract base class that interested parties should subclass to receive
	 * events from the Crawler
	 * @author swilson
	 *
	 */
	public static abstract class EventHandler {
		/**
		 * Called by a Crawler when an exception is thrown during the processing
		 * of a file.
		 * @param location Closest location in a document to where the exception was thrown
		 * @param ex The exception that was thrown
		 */
		public void error(Location location, Exception ex) {
			System.err.println("Unhandled exception while processing file: " + location.toString());
			ex.printStackTrace();
		}
		
		/**
		 * Called by a Crawler when a document is discovered to be missing
		 * @param file The file that is missing
		 * @param locations Array of Location objects where the missing file is referenced
		 */
		public void missing(URI file, Location[] locations) {
			System.err.println("File not found: " + file);
			for (Location location : locations) {
				System.err.println("\tReferenced from: " + location.toString());
			}
		}
	}
	
	/**
	 * Non-abstract class that is the default event handler for the Crawler
	 * @author swilson
	 *
	 */
	private static class NullEventHandler extends EventHandler {}
	
	/**
	 * Holds a document to be scanned and the first reference to said document.
	 * @author swilson
	 *
	 */
	static final class Target implements Comparable<Target> {
		private final URI m_URI;
		private final Location m_Location;
		
		public Target(Location location, URI uri) {
			this.m_URI = uri;
			this.m_Location = location;
		}
		
		public URI getURI() {
			return m_URI;
		}
		
		public Location getLocation() {
			return m_Location;
		}
		
		@Override
		public boolean equals(Object obj) {
			if (obj instanceof Target) {
				return ((Target)obj).m_URI.equals(this.m_URI);
			}
			return false;
		}
		
		@Override
		public int hashCode() {
			return m_URI.hashCode();
		}

		@Override
		public int compareTo(Target o) {
			return m_URI.compareTo(o.m_URI);
		}
	}
	
	/**
	 * Creates a Crawler and initializes it with the default event handler
	 */
	public Crawler() {
		this(new NullEventHandler());
	}
	
	/**
	 * Create a Crawler and initialize it with the specified event handler
	 * @param crawlerEventHandler The object which will receive notifications from the Crawler
	 */
	public Crawler(EventHandler crawlerEventHandler) {
		this(crawlerEventHandler, new LinkFinder());
	}
	
	Crawler(EventHandler crawlerEventHandler, LinkFinder linkFinder) {
		m_LinkFinder = linkFinder;
		m_Handler = crawlerEventHandler;
		m_Parser.setContentHandler(m_LinkFinder);
		m_Parser.setErrorHandler(m_LinkFinder);
		
		m_LinkFinder.addLinkEventListener(new LinkEventListener() {
			@Override
			public void linkFound(Location location, URI uri) {
				if (!m_FollowLinks) {
					// It might be better style to use inheritance to disable this
					return;
				}
				
				// Check if the uri is remote
				if (!m_FollowRemote && !uri.getScheme().equalsIgnoreCase("file")) {
					return;
				}
				
				// Check if the file type is allowed
				if (uri.getPath() != null) {
					String[] components = uri.getPath().split("/");
					if (components.length > 0) {
						String last = components[components.length-1];
						int idx = last.lastIndexOf(".");
						if (idx >= 0) {
							last = last.substring(idx+1).toLowerCase();
							if (!m_ParsedExts.contains(last)) {
								return;
							}
						}
					}
				}
			
				if (m_StripQueryAndFragment) {
					// Strip the query and fragment
					try {
						uri = new URI(uri.getScheme(),
								uri.getUserInfo(),
								uri.getHost(),
								uri.getPort(),
								uri.getPath(),
								null,
								null);
					} catch (URISyntaxException e) {
						error(location, e);
						return;
					}
				}
				
				addTarget(new Target(location, uri));
			}
			
			@Override
			public void error(Location location, Exception ex) {
				Crawler.this.raiseError(location, ex);
			}
		});
	}
	
	private void raiseError(Location location, Exception e) {
		m_Handler.error(location, e);
	}
	
	/**
	 * Add a new target to be parsed
	 * @param uri The target to be parsed
	 */
	public void addTarget(URI uri) {
		addTarget(new Target(new Location(), uri));
	}
	
	private void addTarget(Target target) {
		// the set's add should be atomic, so this should be thread safe
		if (m_Visited.add(target)) {
			m_Targets.add(target);
		}
	}
	
	/**
	 * Add a new target to be parsed from a String
	 * @param path URI of target to parse
	 * @throws URISyntaxException When the given path cannot be parsed into a URI
	 */
	public void addTarget(String path) throws URISyntaxException {
		addTarget(new URI(path));
	}
	
	protected Target nextTarget() {
		return m_Targets.poll();
	}
	
	/**
	 * The bread and butter of the Crawler. Call this to process <b>all</b>
	 * targets, including those discovered during the crawling process.
	 */
	public void crawl() {
		while (crawlOne());
	}
	
	/**
	 * Crawl a single page
	 * @return True if a document was crawled, or false if the queue is empty
	 */
	public boolean crawlOne() {
		Target currentT = nextTarget();
		
		if (currentT == null) {
			return false;
		}
		
		URI currentTarget = currentT.getURI();
		
		try {
			InputSource source;
			if (m_RemoteSchemes.contains(currentTarget.getScheme().toLowerCase())) {
				// Special Handling for remote streams to check the MIME type
				URL currentURL = currentTarget.toURL();
				URLConnection connection = currentURL.openConnection();
				connection.connect();
				
				if (!connection.getContentType().toLowerCase().contains("html")) {
					return true; // Not html, don't care!
				}
				
				source = new InputSource(connection.getInputStream());
				source.setSystemId(currentTarget.toString());
				
				// TODO: Maybe use the charset information provided here
				
			} else {
				source = new InputSource(currentTarget.toString());
			}
			
			m_Parser.parse(source);
		} catch (IOException e) {
				m_Handler.missing(currentTarget,
						new Location[] {currentT.getLocation()});
		} catch (Exception e) {
			m_Handler.error(currentT.getLocation(), e);
		}
		
		return true;
	}
	
	/**
	 * If true the crawler will follow links found in documents
	 * @return True if the crawler should follow links
	 */
	public boolean getFollowLinks() {
		return m_FollowLinks;
	}
	
	/**
	 * If true the crawler will follow links found in documents
	 * @param v Set to true to make the Crawler follow links
	 */
	public void setFollowLinks(boolean v) {
		m_FollowLinks = v;
	}
	
	/**
	 * If true the crawler will follow links that are on remote servers.
	 * 
	 * This is really just a simple test to see if the URIs have the file schema
	 * or not.
	 * @param v Set to true to make the Crawler follow remote links
	 */
	public void setFollowRemote(boolean v) {
		m_FollowRemote = v;
	}
	
	/**
	 * If true the crawler will follow links that are on remote servers.
	 * 
	 * This is really just a simple test to see if the URIs have the file schema
	 * or not.
	 * @return True if the crawler will follow remote links
	 */
	public boolean getFollowRemote() {
		return m_FollowRemote;
	}
	
	/**
	 * If true, the crawler will remove the query and fragment (hash, anchor)
	 * portions of URIs found in documents.
	 * @param v True if the crawler will strip query and fragments from URIs
	 */
	public void setStripQueryAndFragment(boolean v) {
		m_StripQueryAndFragment = v;
	}
	
	/**
	 * If true, the crawler will remove the query and fragment (hash, anchor)
	 * portions of URIs found in documents.
	 * @return True if the crawler will strip query and fragments from URIs
	 */
	public boolean getStripQueryAndFragment() {
		return m_StripQueryAndFragment;
	}

	/**
	 * Method for adding new remote schemes
	 * @param scheme New scheme to add
	 */
	void addRemoteScheme(String scheme) {
		List<String> schemes = new ArrayList<String>(m_RemoteSchemes);
		schemes.add(scheme);
		this.m_RemoteSchemes = schemes;
	}
}
