package cn.com.dmg.myspringboot.hanlp;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;

import java.io.StringReader;

public class TokenizerExample {
    public static void main(String[] args) {
        String text = "2021年HanLPv2.1为生产环境带来次世代最先进的多语种NLP技术。阿婆主来到北京立方庭参观自然语义科技公司。";

        // 使用空白分词
//        Analyzer whitespaceAnalyzer = new Analyzer() {
//            @Override
//            protected TokenStreamComponents createComponents(String fieldName) {
//                return new TokenStreamComponents(new WhitespaceTokenizer());
//            }
//        };

        // 使用标准分词
        Analyzer standardAnalyzer = new Analyzer() {
            @Override
            protected TokenStreamComponents createComponents(String fieldName) {
                return new TokenStreamComponents(new StandardTokenizer());
            }
        };

        // 分词并打印结果
        //tokenize(text, whitespaceAnalyzer);
        tokenize(text, standardAnalyzer);
    }

    private static void tokenize(String text, Analyzer analyzer) {
        try (TokenStream tokenStream = analyzer.tokenStream("", new StringReader(text))) {
            tokenStream.reset();
            CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);
            while (tokenStream.incrementToken()) {
                System.out.println(charTermAttribute.toString());
            }
            tokenStream.end();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
