package com.flute.icrawler.policy.updatepolicy.util;

import java.io.ByteArrayInputStream;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import org.apache.log4j.Logger;
import org.w3c.dom.Document;
import org.wltea.analyzer.IKSegmentation;
import org.wltea.analyzer.Lexeme;

import com.flute.icrawler.htmlparser.impl.BasicHtmlParser;

public class HtmlAnalyzer {
	private static Logger logger = Logger.getLogger(HtmlAnalyzer.class);

	private static List<String> filterTags = new ArrayList<String>();

	private static List<String> filterAttrs = new ArrayList<String>();

	private static String[] tags = { "script", "noscript", "iframe", "frame", "link", "style", "meta", "base", "form",
			"object", "img", "embed", "br" };

	private static String[] attrs = { "style", "class", "target" };

	static {
		initFilter();
	}

	private static void initFilter() {
		for (String tag : tags) {
			filterTags.add(tag.toUpperCase());
		}
		for (String attr : attrs) {
			filterAttrs.add(attr.toLowerCase());
		}
	}

	/**
	 * 获取过滤节点后的页面包含的词组
	 * 
	 * @param url
	 * @return
	 * @throws Exception
	 */
	public static Set<String> getSegmentation(String content) {
		try {
			String value = getFilterValue(content);
			return analyzer(value.substring(0, value.length() / 3));
		} catch (Exception e) {
			logger.error(e);
		}
		return null;
	}

	/**
	 * 获取过滤节点后的页面内容
	 * 
	 * @param url
	 * @return
	 * @throws Exception
	 */
	private static String getFilterValue(String value) throws Exception {
		try {
			Document doc = new BasicHtmlParser().getDocument(new ByteArrayInputStream(value.getBytes("utf-8")));
			return DOMUtil.getNodeValue(doc, filterTags);
		} catch (Exception e) {
			throw e;
		}
	}

	/**
	 * 内容分词
	 * 
	 * @param content
	 * @return
	 * @throws Exception
	 */
	private static Set<String> analyzer(String content) throws Exception {
		Set<String> set = new HashSet<String>();
		try {
			IKSegmentation analyzer = new IKSegmentation(new StringReader(content), true);
			Lexeme lexeme = null;
			while ((lexeme = analyzer.next()) != null) {
				set.add(lexeme.getLexemeText().trim());
			}
		} catch (Exception e) {
			throw e;
		}
		return set;
	}

}
