package lucene.analyzer;

import java.io.StringReader;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.SimpleAnalyzer;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;

public class VariousAnalyzers {

	private static String str = "中华人民共和国简称中国，是一个有13亿人口的国家";

	public static void main(String[] args) throws Exception {

		Analyzer analyzer = null;

		analyzer = new StandardAnalyzer();//标准分词
		System.out.println("标准分词"+analyzer.getClass().getName());
		printAnalyzer(analyzer);
		
		analyzer = new WhitespaceAnalyzer();//空格分词
		System.out.println("空格分词"+analyzer.getClass().getName());
		printAnalyzer(analyzer);
		
		analyzer = new SimpleAnalyzer();//简单分词
		System.out.println("简单分词"+analyzer.getClass().getName());
		printAnalyzer(analyzer);
		
				

	}

	public static void printAnalyzer(Analyzer analyzer) throws Exception {
		StringReader reader = new StringReader(str);
		TokenStream tokenStream = analyzer.tokenStream(str, reader);
		tokenStream.reset();
		CharTermAttribute termAttribute = tokenStream.getAttribute(CharTermAttribute.class);
		while (tokenStream.incrementToken()) {
			System.out.print(termAttribute.toString() + "|");
		}
		System.out.println("\n");
		analyzer.close();

	}
}
