package com.yaoandw.crawler;



import java.util.HashSet;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.log4j.Logger;
import org.htmlparser.Node;
import org.htmlparser.NodeFilter;
import org.htmlparser.Parser;
import org.htmlparser.filters.NodeClassFilter;
import org.htmlparser.filters.OrFilter;
import org.htmlparser.tags.LinkTag;
import org.htmlparser.tags.MetaTag;
import org.htmlparser.tags.ScriptTag;
import org.htmlparser.tags.TitleTag;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;

public class HtmlParserTool {
	private static Logger logger = Logger.getLogger(HtmlParserTool.class);
	// 获取一个网站上的链接,filter 用来过滤链接
	public static Set<String> extracLinks(String url, LinkFilter filter) {
		ExtracPageInfo obj = extracLinksAndText(url, filter);
		Set<String> links = obj.getLinks();
		return links;
	}
	// 获取一个网站上的链接,filter 用来过滤链接
		public static ExtracPageInfo extracLinksAndText(String url, LinkFilter filter) {

			Set<String> links = new HashSet<String>();
			String text = "";
			String title = "";
			String contentType = "";
			String metaDescription = "";
			try {
//				System.out.println("===");
				//处理带有中文的url
				url = UrlTool.processUrl(url);
				Parser parser = new Parser(url);
				parser.setEncoding("gbk");//此处还是要设置，编码不正确会导致获取到的标签的内容错误， TODO 不同编码的网页，如何自动获取。。
				contentType = parser.getLexer().getPage().getContentType();
//				System.out.println("parser encoding:"+parser.getEncoding()+",contentType:"+contentType);
				if (contentType != null && !contentType.startsWith ("text")){
					return new ExtracPageInfo(links, text, contentType,title,metaDescription);
				}
				//parser.setEncoding("utf-8");
				// 过滤 <frame >标签的 filter，用来提取 frame 标签里的 src 属性所表示的链接
				NodeFilter frameFilter = new NodeFilter() {
					public boolean accept(Node node) {
						if (node.getText().startsWith("frame src=")) {
							return true;
						} else {
							return false;
						}
					}
				};
				// OrFilter 来设置过滤 <a> 标签，和 <frame> 标签
				OrFilter linkFilter = new OrFilter(new NodeClassFilter(
						LinkTag.class), frameFilter);
				OrFilter titleFilter = new OrFilter(new NodeClassFilter(
						TitleTag.class), linkFilter);
				OrFilter metaFilter = new OrFilter(new NodeClassFilter(
						MetaTag.class), titleFilter);
				OrFilter scriptFilter = new OrFilter(new NodeClassFilter(
						ScriptTag.class), metaFilter);
				// 得到所有经过过滤的标签
				NodeList list = parser.extractAllNodesThatMatch(scriptFilter);
				for (int i = 0; i < list.size(); i++) {
					Node tag = list.elementAt(i);
					if(tag instanceof ScriptTag){
						ScriptTag scriptTag = (ScriptTag)tag;
//						System.out.println(scriptTag.getScriptCode());
//						System.out.println("======");
						Set<String> scriptLinks = getLinks(scriptTag.getStringText(),filter);
						if(scriptLinks != null && !scriptLinks.isEmpty())
							links.addAll(scriptLinks);
					} else if (tag instanceof MetaTag)	{// <meta> 标签
						MetaTag metaTag = (MetaTag) tag;
						String metaName = metaTag.getAttribute("name");
						if("description".equals(metaName))
							metaDescription = metaTag.getAttribute("content");
//						System.out.println("metaDescription : "+metaDescription);
					} else if (tag instanceof TitleTag)	{// <title> 标签
						TitleTag titleTag = (TitleTag) tag;
						title = titleTag.getTitle();// title
//						System.out.println("title : "+title);
					} else if (tag instanceof LinkTag)	{// <a> 标签
						LinkTag link = (LinkTag) tag;
						String linkUrl = link.getLink();// url
						if (filter == null ||(filter != null && filter.accept(linkUrl)))
							links.add(linkUrl);
					} else{// <frame> 标签
						// 提取 frame 里 src 属性的链接如 <frame src="test.html"/>
						String frame = tag.getText();
						int start = frame.indexOf("src=");
						frame = frame.substring(start);
						int end = frame.indexOf(" ");
						if (end == -1)
							end = frame.indexOf(">");
						String frameUrl = frame.substring(5, end - 1);
						if (filter == null || (filter != null && filter.accept(frameUrl)))
							links.add(frameUrl);
					}
				}
//				System.out.println(":"+parser.getLexer().getPage().getText()+";");
				text = parser.getLexer().getPage().getText();
//				contentType = parser.getLexer().getPage().getContentType();
			} catch (ParserException e) {
				logger.error("",e);
				LinkQueue.addBadUrl(url);
			}
			return new ExtracPageInfo(links, text, contentType,title,metaDescription);
		}
	/**
	 * 从javascript中解析url，这里简单的通过正则表达式处理下
	 * 复杂的需要通过v8等js解析器处理+html解析，TODO
	 * @param stringText
	 * @param filter
	 */
	private static Set<String> getLinks(String stringText, LinkFilter filter) {
		String regex = "http://([\\w-]+\\.)+[\\w-]+(/[\\w- ./?%&=]*)?";
		Set<String> links = new HashSet<String>();
		Pattern pattern = Pattern.compile(regex);
		Matcher matcher = pattern.matcher(stringText);
		while (matcher.find()) {
			String linkUrl = matcher.group();
//			System.out.println(linkUrl);
			if (filter == null ||(filter != null && filter.accept(linkUrl)))
				links.add(linkUrl);
		}
		return links;
	}
	public static void main(String[] args){
		String stringText= " var thunder_url = \"http://txt.bxwx.org/packdown/fulltxt//29/29173.txt\";"+
	    "var thunder_pid = \"30977\";"+
	    " var thunder_url = \"http://txt.bxwx.org/packdown/fulltxt//29/29173.txt1\";"+
	    "var restitle = \"\";";
		getLinks(stringText, null);
	}
}
