// HTMLParser Library - A java-based parser for HTML
// http://htmlparser.org
// Copyright (C) 2006 Somik Raha
//
// Revision Control Information
//
// $URL: https://svn.sourceforge.net/svnroot/htmlparser/trunk/parser/src/main/java/org/htmlparser/Parser.java $
// $Author: derrickoswald $
// $Date: 2006-09-17 21:02:25 -0400 (Sun, 17 Sep 2006) $
// $Revision: 8 $
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the Common Public License; either
// version 1.0 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// Common Public License for more details.
//
// You should have received a copy of the Common Public License
// along with this library; if not, the license is available from
// the Open Source Initiative (OSI) website:
//   http://opensource.org/licenses/cpl1.0.php

package edu.hit.pact.pgse.crawler.util;

import java.net.URLConnection;

import org.htmlparser.Node;
import org.htmlparser.NodeFactory;
import org.htmlparser.NodeFilter;
import org.htmlparser.PrototypicalNodeFactory;
import org.htmlparser.lexer.Lexer;
import org.htmlparser.lexer.Page;
import org.htmlparser.util.DefaultParserFeedback;
import org.htmlparser.util.IteratorImpl;
import org.htmlparser.util.NodeIterator;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;
import org.htmlparser.util.ParserFeedback;

/**
 * The main parser class. This is the primary class of the HTML Parser library.
 * It provides constructors that take a {@link #Parser(String) String}, a
 * {@link #Parser(URLConnection) URLConnection}, or a {@link #Parser(Lexer)
 * Lexer}. In the case of a String, a check is made to see if the first
 * non-whitespace character is a &lt;, in which case it is assumed to be HTML.
 * Otherwise an attempt is made to open it as a URL, and if that fails it
 * assumes it is a local disk file. If you want to parse a String after using
 * the {@link #Parser() no-args} constructor, use {@link #setInputHTML
 * setInputHTML()}, or you can use {@link #createParser}.
 * <p>
 * The Parser provides access to the contents of the page, via a
 * {@link #elements() NodeIterator}, a {@link #parse(NodeFilter) NodeList} or a
 * {@link #visitAllNodesWith NodeVisitor}.
 * <p>
 * Typical usage of the parser is: <code>
 * <pre>
 * Parser parser = new Parser ("http://whatever");
 * NodeList list = parser.parse (null);
 * // do something with your list of nodes.
 * </pre>
 * </code>
 * </p>
 * <p>
 * What types of nodes and what can be done with them is dependant on the setup,
 * but in general a node can be converted back to HTML and it's children
 * (enclosed nodes) and parent can be obtained, because nodes are nested. See
 * the {@link Node} interface.
 * </p>
 * <p>
 * For example, if the URL contains:<br>
 * <code>
 * {@.html
 * <html>
 * <head>
 * <title>Mondays -- What a bad idea.</title>
 * </head>
 * <body BGCOLOR="#FFFFFF">
 * Most people have a pathological hatred of Mondays...
 * </body>
 * </html>}
 * </code><br>
 * and the example code above is used, the list contain only one element, the
 * {@.html <html>} node. This node is a {@link org.htmlparser.tags tag},
 * which is an object of class {@link org.htmlparser.tags.Html Html} if the
 * default {@link NodeFactory} (a {@link PrototypicalNodeFactory}) is used.
 * </p>
 * <p>
 * To get at further content, the children of the top level nodes must be
 * examined. When digging through a node list one must be conscious of the
 * possibility of whitespace between nodes, e.g. in the example above: <code>
 * <pre>
 * Node node = list.elementAt (0);
 * NodeList sublist = node.getChildren ();
 * System.out.println (sublist.size ());
 * </pre>
 * </code> would print out 5, not 2, because there are newlines after {@.html
 *  <html>}, {@.html </head>} and {@.html </body>} that
 * are children of the HTML node besides the {@.html <head>} and
 * {@.html <body>} nodes.
 * </p>
 * <p>
 * Because processing nodes is so common, two interfaces are provided to ease
 * this task, {@link org.htmlparser.filters filters} and
 * {@link org.htmlparser.visitors visitors}.
 */
public class CustomParser {
	/**
	 * Feedback object.
	 */
	private ParserFeedback mFeedback;

	/**
	 * The html lexer associated with this parser.
	 */
	private Lexer mLexer;

	/**
	 * A quiet message sink. Use this for no feedback.
	 */
	public static final ParserFeedback DEVNULL = new DefaultParserFeedback(
			DefaultParserFeedback.QUIET);

	/**
	 * A verbose message sink. Use this for output on <code>System.out</code>.
	 */
	public static final ParserFeedback STDOUT = new DefaultParserFeedback();

	/**
	 * Creates the parser on an input string.
	 * 
	 * @param html
	 *            The string containing HTML.
	 * @param charset
	 *            <em>Optional</em>. The character set encoding that will be
	 *            reported by {@link #getEncoding}. If charset is
	 *            <code>null</code> the default character set is used.
	 * @return A parser with the <code>html</code> string as input.
	 * @exception IllegalArgumentException
	 *                if <code>html</code> is <code>null</code>.
	 */
	public static CustomParser createParser(String url, String html,
			String charset) {
		CustomParser ret;

		if (null == url) {
			throw new IllegalArgumentException("url cannot be null");
		}

		if (null == html) {
			throw new IllegalArgumentException("html cannot be null");
		}

		Page page = new Page(html, charset);
		Lexer lexer = new Lexer(page);
		ret = new CustomParser(lexer, STDOUT);

		ret.setURL(url);

		return (ret);
	}

	/**
	 * Construct a parser using the provided lexer and feedback object. This
	 * would be used to create a parser for special cases where the normal
	 * creation of a lexer on a URLConnection needs to be customized.
	 * 
	 * @param lexer
	 *            The lexer to draw characters from.
	 * @param fb
	 *            The object to use when information, warning and error messages
	 *            are produced. If <em>null</em> no feedback is provided.
	 */
	private CustomParser(Lexer lexer, ParserFeedback fb) {
		setFeedback(fb);
		setLexer(lexer);
		setNodeFactory(new PrototypicalNodeFactory());
	}

	/**
	 * Set the URL for this parser. This method creates a new Lexer reading from
	 * the given URL. Trying to set the url to null or an empty string is a
	 * no-op.
	 * 
	 * @param url
	 *            The new URL for the parser.
	 * @throws ParserException
	 *             If the url is invalid or creation of the underlying Lexer
	 *             cannot be performed.
	 * @exception ParserException
	 *                if a problem occurs in connecting.
	 * @see #getURL
	 */
	private void setURL(String url) {
		getLexer().getPage().setUrl(url);
	}

	/**
	 * Set the encoding for the page this parser is reading from.
	 * 
	 * @param encoding
	 *            The new character set to use.
	 * @throws ParserException
	 *             If the encoding change causes characters that have already
	 *             been consumed to differ from the characters that would have
	 *             been seen had the new encoding been in force.
	 * @see org.htmlparser.util.EncodingChangeException
	 * @see #getEncoding
	 */
	public void setEncoding(String encoding) throws ParserException {
		getLexer().getPage().setEncoding(encoding);
	}

	/**
	 * Set the lexer for this parser. The current NodeFactory is transferred to
	 * (set on) the given lexer, since the lexer owns the node factory object.
	 * It does not adjust the <code>feedback</code> object.
	 * 
	 * @param lexer
	 *            The lexer object to use.
	 * @see #setNodeFactory
	 * @see #getLexer
	 * @exception IllegalArgumentException
	 *                if <code>lexer</code> is <code>null</code>.
	 */
	public void setLexer(Lexer lexer) {
		NodeFactory factory;
		String type;

		if (null == lexer)
			throw new IllegalArgumentException("lexer cannot be null");
		// move a node factory that's been set to the new lexer
		factory = null;
		if (null != getLexer())
			factory = getLexer().getNodeFactory();
		if (null != factory)
			lexer.setNodeFactory(factory);
		mLexer = lexer;
		// warn about content that's not likely text
		type = mLexer.getPage().getContentType();
		if (type != null && !type.startsWith("text"))
			getFeedback().warning(
					"URL " + mLexer.getPage().getUrl()
							+ " does not contain text");
	}

	/**
	 * Returns the lexer associated with the parser.
	 * 
	 * @return The current lexer.
	 * @see #setLexer
	 */
	public Lexer getLexer() {
		return (mLexer);
	}

	/**
	 * Set the current node factory.
	 * 
	 * @param factory
	 *            The new node factory for the current lexer.
	 * @see #getNodeFactory
	 * @exception IllegalArgumentException
	 *                if <code>factory</code> is <code>null</code>.
	 */
	public void setNodeFactory(NodeFactory factory) {
		if (null == factory)
			throw new IllegalArgumentException("node factory cannot be null");
		getLexer().setNodeFactory(factory);
	}

	/**
	 * Sets the feedback object used in scanning.
	 * 
	 * @param fb
	 *            The new feedback object to use. If this is null a
	 *            {@link #DEVNULL silent feedback object} is used.
	 * @see #getFeedback
	 */
	public void setFeedback(ParserFeedback fb) {
		if (null == fb)
			mFeedback = DEVNULL;
		else
			mFeedback = fb;
	}

	/**
	 * Returns the current feedback object.
	 * 
	 * @return The feedback object currently being used.
	 * @see #setFeedback
	 */
	public ParserFeedback getFeedback() {
		return (mFeedback);
	}

	//
	// Public methods
	//
	/**
	 * Returns an iterator (enumeration) over the html nodes.
	 * {@link org.htmlparser.nodes Nodes} can be of three main types:
	 * <ul>
	 * <li>{@link org.htmlparser.nodes.TagNode TagNode}</li>
	 * <li>{@link org.htmlparser.nodes.TextNode TextNode}</li>
	 * <li>{@link org.htmlparser.nodes.RemarkNode RemarkNode}</li>
	 * </ul>
	 * In general, when parsing with an iterator or processing a NodeList, you
	 * will need to use recursion. For example: <code>
	 * <pre>
	 * void processMyNodes (Node node)
	 * {
	 *     if (node instanceof TextNode)
	 *     {
	 *         // downcast to TextNode
	 *         TextNode text = (TextNode)node;
	 *         // do whatever processing you want with the text
	 *         System.out.println (text.getText ());
	 *     }
	 *     if (node instanceof RemarkNode)
	 *     {
	 *         // downcast to RemarkNode
	 *         RemarkNode remark = (RemarkNode)node;
	 *         // do whatever processing you want with the comment
	 *     }
	 *     else if (node instanceof TagNode)
	 *     {
	 *         // downcast to TagNode
	 *         TagNode tag = (TagNode)node;
	 *         // do whatever processing you want with the tag itself
	 *         // ...
	 *         // process recursively (nodes within nodes) via getChildren()
	 *         NodeList nl = tag.getChildren ();
	 *         if (null != nl)
	 *             for (NodeIterator i = nl.elements (); i.hasMoreElements (); )
	 *                 processMyNodes (i.nextNode ());
	 *     }
	 * }
	 * 
	 * Parser parser = new Parser ("http://www.yahoo.com");
	 * for (NodeIterator i = parser.elements (); i.hasMoreElements (); )
	 *     processMyNodes (i.nextNode ());
	 * </pre>
     * </code>
	 * 
	 * @throws ParserException
	 *             If a parsing error occurs.
	 * @return An iterator over the top level nodes (usually {@.html
	 *         <html>}).
	 */
	public NodeIterator elements() throws ParserException {
		return (new IteratorImpl(getLexer(), getFeedback()));
	}

	/**
	 * Parse the given resource, using the filter provided. This can be used to
	 * extract information from specific nodes. When used with a
	 * <code>null</code> filter it returns an entire page which can then be
	 * modified and converted back to HTML (Note: the synthesis use-case is not
	 * handled very well; the parser is more often used to extract information
	 * from a web page).
	 * <p>
	 * For example, to replace the entire contents of the HEAD with a single
	 * TITLE tag you could do this:
	 * 
	 * <pre>
	 * NodeList nl = parser.parse (null); // here is your two node list
	 * NodeList heads = nl.extractAllNodesThatMatch (new TagNameFilter (&quot;HEAD&quot;))
	 * if (heads.size () &gt; 0) // there may not be a HEAD tag
	 * {
	 *     Head head = heads.elementAt (0); // there should be only one
	 *     head.removeAll (); // clean out the contents
	 *     Tag title = new TitleTag ();
	 *     title.setTagName (&quot;title&quot;);
	 *     title.setChildren (new NodeList (new TextNode (&quot;The New Title&quot;)));
	 *     Tag title_end = new TitleTag ();
	 *     title_end.setTagName (&quot;/title&quot;);
	 *     title.setEndTag (title_end);
	 *     head.add (title);
	 * }
	 * System.out.println (nl.toHtml ()); // output the modified HTML
	 * </pre>
	 * 
	 * @return The list of matching nodes (for a <code>null</code> filter this
	 *         is all the top level nodes).
	 * @param filter
	 *            The filter to apply to the parsed nodes, or <code>null</code>
	 *            to retrieve all the top level nodes.
	 * @throws ParserException
	 *             If a parsing error occurs.
	 */
	public NodeList parse(NodeFilter filter) throws ParserException {
		NodeIterator e;
		Node node;
		NodeList ret;

		ret = new NodeList();
		for (e = elements(); e.hasMoreNodes();) {
			node = e.nextNode();
			if (null != filter)
				node.collectInto(ret, filter);
			else
				ret.add(node);
		}

		return (ret);
	}

	/**
	 * Extract all nodes matching the given filter.
	 * 
	 * @see Node#collectInto(NodeList, NodeFilter)
	 * @param filter
	 *            The filter to be applied to the nodes.
	 * @throws ParserException
	 *             If a parse error occurs.
	 * @return A list of nodes matching the filter criteria, i.e. for which the
	 *         filter's accept method returned <code>true</code>.
	 */
	public NodeList extractAllNodesThatMatch(NodeFilter filter)
			throws ParserException {
		NodeIterator e;
		NodeList ret;

		ret = new NodeList();
		for (e = elements(); e.hasMoreNodes();)
			e.nextNode().collectInto(ret, filter);

		return (ret);
	}

}
