package com.and.analyzer;

import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.wltea.analyzer.lucene.IKAnalyzer;

public class Analyzers {

	public static void test(String text) throws Exception {
		testAnalyzer(new IKAnalyzer(), text); // 词库分词
	}

	/**
	 * 使用指定的分词器对指定的文本进行分词，并打印出分出的词
	 * 
	 * @param analyzer
	 * @param text
	 * @throws Exception
	 */
	public static List<String> testAnalyzer(Analyzer analyzer, String text)
			throws Exception {
		List<String> listAnalyzer = new ArrayList<String>();
		TokenStream tokenStream = analyzer.tokenStream("content",
				new StringReader(text));
		tokenStream.addAttribute(TermAttribute.class);
		while (tokenStream.incrementToken()) {
			TermAttribute termAttribute = tokenStream
					.getAttribute(TermAttribute.class);
			listAnalyzer.add(termAttribute.term());
		}
		return listAnalyzer;
	}
}
