package com.thinkgem.jeesite.modules.search.core.util;

import java.io.IOException;
import java.io.StringReader;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.wltea.analyzer.lucene.IKAnalyzer;

public class IKUtil {
	/**
	 * 根据文本输入获取分词
	 * @param text
	 * @param isIntelligent
	 * @param filedName
	 * @return
	 * @throws IOException
	 */
	public String getFenci(String text, boolean isIntelligent, String filedName)
			throws IOException {
		if (filedName == null) {
			filedName = "";
		}
		// 创建分词对象
		// isIntelligent是true的时候使用智能分词，false的时候使用最小粒度分词
		Analyzer anal = new IKAnalyzer(isIntelligent);
		StringReader reader = new StringReader(text);
		// 分词
		TokenStream ts = anal.tokenStream(filedName, reader);
		CharTermAttribute term = ts.getAttribute(CharTermAttribute.class);
		// 遍历分词数据
		StringBuffer result = new StringBuffer();
		while (ts.incrementToken()) {
			result.append(term.toString() + "|");
		}
		reader.close();
		anal.close();
		return result.toString();
	}

	/**
	 * 根据分词结果获取每个关键词在文本中出现的次数
	 * @param source
	 * @return
	 */
	public Map<String, Integer> getCipin(String source) {
		Map<String, Integer> resultMap = new HashMap<String, Integer>();
		Set<String> keywordSet = new HashSet<String>();
		String[] splitArray = source.split("\\|");
		for (String word : splitArray) {
			if (!keywordSet.contains(word)) {
				keywordSet.add(word);
				resultMap.put(word, 1);
			} else {
				int quantity = resultMap.get(word);
				resultMap.put(word, ++quantity);
			}
		}

		return resultMap;
	}
}
