package cn.chencaiju.lucene;

import java.io.IOException;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;

public class LuceneAnalyzer {
	private static String sentence="import org.apache.lucene.analysis.standard.StandardAnalyzer;";
	public static void main(String[] args) throws IOException {
		//创建分析器
		Analyzer analyzer = new StandardAnalyzer();
		
		//使用分析器对文本分词，生成tokenStream
		TokenStream tokenStream = analyzer.tokenStream("name", sentence);
		tokenStream.reset();
		
		// 取流里面每个单词位置
		OffsetAttribute offsetAttr = tokenStream.addAttribute(OffsetAttribute.class);
		// 取单词内容
		CharTermAttribute charTermAttr = tokenStream.addAttribute(CharTermAttribute.class);		
		// 遍历流里面的每一个单词
		while (tokenStream.incrementToken()) {
			int start = offsetAttr.startOffset();
			int end = offsetAttr.endOffset();
			String term = charTermAttr.toString();
			
			System.out.printf("%s [%d, %d]\n", term, start, end);
		}
		tokenStream.end();
		analyzer.close();
	}
}
