package cn.hchaojie.lucene;

import java.io.IOException;
import java.util.Arrays;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;

import com.hankcs.lucene.HanLPAnalyzer;

public class LuceneNlpChineseAnalyzer {
	public static void main(String[] args) {
		// String sentence = "Overlapping ordered SpanNearQuery spans should not match.";
		// String sentence = "江泽民崔永元炮轰范冰冰毛泽东全文检索是将整本书java、整篇文章中的任意内容信息查找出来的检索啊";
		String sentence = "林志玲亮相网友:确定不是波多野结衣";
		
//		// stopWords停用词
//		CharArraySet stopWords = new CharArraySet(Arrays.asList("江泽民", "啊"), true);

		// try (resource)
		try (
			Analyzer analyzer = new HanLPAnalyzer();
			// Term = 域名称（关键字所在的字段） + 关键字
			TokenStream ts = analyzer.tokenStream("name", sentence);) {
			
			ts.reset();
			
			// 取流里面每个单词位置
			OffsetAttribute offsetAttr = ts.addAttribute(OffsetAttribute.class);
			
			// 取单词内容
			CharTermAttribute charTermAttr = ts.addAttribute(CharTermAttribute.class);
			
			// 词性
		    TypeAttribute typeAttr = ts.getAttribute(TypeAttribute.class);
		
			// 遍历流里面的每一个单词
			while (ts.incrementToken()) {
				int start = offsetAttr.startOffset();
				int end = offsetAttr.endOffset();
				String term = charTermAttr.toString();
				String type = typeAttr.type();
				
				System.out.printf("%s [%d, %d] %s\n", term, start, end, type);
			}
			
			ts.end();
		} catch (IOException e) {
			e.printStackTrace();
		}
	}
}
