package com.example.fenci;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
//import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 
public class TokenizerExample {
    public static void main(String[] args) {
        String text = "这是一个分词的示例";
 
        // 使用空白分词器
      /*  Analyzer whitespaceAnalyzer = new Analyzer() {
            @Override
            protected TokenStreamComponents createComponents(String fieldName) {
                return new TokenStreamComponents(new WhitespaceTokenizer());
            }
        };*/
 
        // 使用标准分词器
        Analyzer standardAnalyzer = new Analyzer() {
            @Override
            protected TokenStreamComponents createComponents(String fieldName) {
                return new TokenStreamComponents(new StandardTokenizer());
            }
        };
 
        // 分词
       // tokenize(text, whitespaceAnalyzer);
        tokenize(text, standardAnalyzer);
    }
 
    private static void tokenize(String text, Analyzer analyzer) {
        try {
            TokenStream tokenStream = analyzer.tokenStream("text", text);
            tokenStream.reset();
            CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);
 
            while (tokenStream.incrementToken()) {
                System.out.println(charTermAttribute.toString());
            }
 
            tokenStream.end();
            tokenStream.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}