package crawler.web.component;

import java.util.regex.Matcher;
import java.util.regex.Pattern;

import library.Tracer;
import library.bean.Coordinator;
import library.functions.Function1PR;
import library.functions.Function2PR;
import library.utils.HtmlEscaper;

import org.apache.commons.lang.StringUtils;
import org.htmlparser.Node;
import org.htmlparser.Parser;
import org.htmlparser.nodes.TextNode;
import org.htmlparser.tags.CompositeTag;
import org.htmlparser.tags.Html;
import org.htmlparser.tags.LinkTag;
import org.htmlparser.tags.MetaTag;
import org.htmlparser.tags.ScriptTag;
import org.htmlparser.util.NodeIterator;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;
import org.htmlparser.util.SimpleNodeIterator;

import crawler.web.beans.CrawlPageContext;
import crawler.web.beans.CrawlURL;

/**
 * @author waynechen Mar 16, 2012 11:03:17 AM
 */
public class HtmlDigger {
	/**
	 * 行结束
	 */
	private static final String lineEnd = System.getProperty("line.separator");
	/**
	 * 换行符长度
	 */
	private static final int lineSign_size = lineEnd.length();
	/**
	 * 获取域名
	 */
	private static Pattern urlPattern = Pattern.compile("(http://[^/]*?" + "/)(.*?)", Pattern.CASE_INSENSITIVE + Pattern.DOTALL);

	/**
	 * 查找第一个出现的html标签
	 * 
	 * @author waynechen
	 * @param parser
	 * @return
	 * @throws ParserException
	 */
	public static Html findHtmlNode(Parser parser) throws ParserException {
		parser.reset();
		for (NodeIterator it = parser.elements(); it.hasMoreNodes();) {
			Node node = it.nextNode();
			if (node instanceof Html)
				return (Html) node;
		}
		return null;
	}

	/**
	 * 除去无效字符
	 * 
	 * @author waynechen
	 * @param string
	 * @return
	 */
	public static String collapse(String string) {
		string = string.replaceAll("&nbsp;", "").replaceAll("\\s", "");
		int chars;
		int length;
		int state;
		char character;
		StringBuilder buffer = new StringBuilder();
		chars = string.length();
		if (0 != chars) {
			length = buffer.length();
			state = ((0 == length) || (buffer.charAt(length - 1) == ' ') || //
			((lineSign_size <= length) && buffer.substring(length - lineSign_size, length).equals(lineEnd))) ? 0 : 1;

			for (int i = 0; i < chars; i++) {
				character = string.charAt(i);
				switch (character) {
				case '\u0020':
				case '\u0009':
				case '\u000C':
				case '\u200B':
				case '\u00a0':
				case '\r':
				case '\n':
					if (0 != state) {
						state = 1;
					}
					break;
				default:
					if (1 == state) {
						buffer.append(' ');
					}
					state = 2;
					buffer.append(character);
				}
			}
		}
		return HtmlEscaper.html2Text(buffer.toString().trim());
	}

	/**
	 * 分析 metaTag
	 * 
	 * @author waynechen
	 * @param context
	 * @param node
	 */
	public static void parseMetaTag(CrawlPageContext context, MetaTag node) {
		String name = node.getAttribute("name");
		if (StringUtils.isNotBlank(name)) {
			String content = node.getAttribute("content");
			if (name.equals("keywords") || name.equals("title"))
				context.setKeywords(content);
			else if (name.equals("description")) {
				context.setDescription(content);
			}
		}
	}

	/**
	 * 获取域名<br>
	 * 从一个字符串中提取出链接
	 */
	public static String getSiteDomainURL(String link) {
		Matcher matcher = urlPattern.matcher(link);
		String url = "";
		while (matcher.find()) {
			int start = matcher.start(1);
			int end = matcher.end(1);
			url = link.substring(start, end - 1).trim();
		}
		return url;
	}

	/**
	 * 分析出本页面的所有url<br>
	 * 对url进行形式上的验证
	 * 
	 * @author waynechen
	 * @param root
	 * @param context
	 */
	public static void digURL(Html root, CrawlPageContext context) {
		HtmlDigger.digSubNodes(LinkTag.class, root, context, new Function2PR<Node, Object, Node>() {
			@Override
			public Node apply(Node a, Object b) {
				LinkTag linkTag = (LinkTag) a;
				CrawlPageContext context = (CrawlPageContext) b;
				CrawlURL url = new CrawlURL(linkTag.getLink().trim());
				url.setTitle(HtmlDigger.collapse(linkTag.getLinkText()));
				context.getUrlList().add(url);
				return null;
			}
		});
	}

	/**
	 * 提取一个节点下的所有TextNode到StringBuilder
	 * 
	 * @author waynechen
	 * @param root
	 * @param sb
	 * @param split
	 * @param filter
	 */
	public static void digAllText(CompositeTag root, StringBuilder sb, final String split, final Function1PR<Node, Boolean> filter) {
		HtmlDigger.digSubNodes(TextNode.class, root, sb, new Function2PR<Node, Object, Node>() {
			@Override
			public Node apply(Node a, Object b) {
				String s = split;
				if (split == null)
					s = "";
				TextNode text = (TextNode) a;
				if (filter != null && !filter.apply(text))
					return null;
				StringBuilder sb = (StringBuilder) b;
				sb.append(convert(text) + s);
				return null;
			}

			private String convert(TextNode text) {
				return HtmlEscaper.html2Text(text.getText());
			}
		});
	}

	/**
	 * 如果 func 返回一个Node,则删除这个Node
	 * 
	 * @author waynechen
	 * @param clazz
	 *            要提取的类型
	 * @param root
	 *            父结点
	 * @param context
	 * @param func
	 *            回调, 处理
	 * 
	 * @return 返回true表示 删掉过节点
	 */
	public static Boolean digSubNodes(//
	final Class<?> clazz, CompositeTag root, Object context, Function2PR<Node, Object, Node> func) {
		Boolean apply = false;
		//如果是复合的标签, 则可以使用searchFor函数
		final NodeList ns = root.searchFor(clazz, true);
		for (int i = 0; i < ns.size(); i++) {
			Node node = func.apply(ns.elementAt(i), context);
			if (node != null) {
				apply = true;
				//如果是复合节点则先删除下面的所有子节点
				if (node instanceof CompositeTag) {
					travelTree((CompositeTag) node, null, false, new Function2PR<Node, Object, Boolean>() {
						@Override
						public Boolean apply(Node a, Object b) {
							if (clazz.equals(a.getClass())) {
								Tracer.debug("删除子节点: " + a);
								ns.remove(a);
							}
							return true;
						}
					}, null);
				}
				i--;
				Tracer.debug("删除节点: " + node);
				ns.remove(node);
			}
		}
		return apply;
	}

	/**
	 * 遍历一个Node, 和下面的子Node
	 * 
	 * @author waynechen
	 * @param root
	 * @param context
	 * @param breakAfterFirstCall
	 *            是否在第一次调用func后跳出遍历
	 * @param func
	 *            回调, 处理
	 * @param filter
	 *            回调, 选择提取哪些节点
	 * @return 返回为true表示 在func中有所动作 有所收获
	 */
	public static Boolean travelTree(//
	CompositeTag root, Object context, boolean breakAfterFirstCall, //
			Function2PR<Node, Object, Boolean> func, //
			Function1PR<Node, Boolean> filter//
	) {
		Boolean apply = false;
		NodeList children = root.getChildren();
		if (children != null) {
			SimpleNodeIterator it = children.elements();
			while (it.hasMoreNodes()) {
				Node node = it.nextNode();
				if (filter != null && filter.apply(node)) {
					Boolean tmp = func.apply(node, context);
					if (tmp != null)
						apply = tmp || apply;
					if (breakAfterFirstCall)
						break;
				}
				if (node instanceof CompositeTag) {
					apply = travelTree((CompositeTag) node, context, breakAfterFirstCall, func, filter) || apply;
				}
			}
		}
		return apply;
	}

	public static class GoogleMap {
		//http://maps.google.com/maps?ll=24.33297,102.4929&z=12&t=m&hl=en-US
		//var Info = new Array("23.16038","113.29664"
		private static final Pattern mapPattern = Pattern.compile(//
		"Array\\(\"([\\.\\d]+)\",\"([\\.\\d]+)\"");

		public static Coordinator parseCoorFromHtml(Html html) {
			final Coordinator coor = new Coordinator();
			HtmlDigger.travelTree(html, null, true, new Function2PR<Node, Object, Boolean>() {
				@Override
				public Boolean apply(Node n, Object b) {
					ScriptTag s = (ScriptTag) n;
					String attribute = s.getStringText();
					Matcher m = mapPattern.matcher(attribute);
					if (m.find()) {
						coor.setLat(m.group(1));
						coor.setLng(m.group(2));
					}
					return null;
				}
			}, new Function1PR<Node, Boolean>() {
				@Override
				public Boolean apply(Node a) {
					if (a instanceof ScriptTag) {
						ScriptTag s = (ScriptTag) a;
						String text = s.getStringText();
						if (text.contains("var Info = new Array"))
							return true;
					}
					return false;
				}
			});
			return coor;
		}
	}
}

//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
