package org.study.lucene.api.analyzer;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

/**
 * StandardAnalyzer 测试
 *
 * @author Administrator
 * @date 2021-12-29
 */
public class StandardAnalyzerTest {
    public static void main(String[] args) throws IOException {
        String english = "Analysis is one of the main causes of slow indexing. Simply put, the more you analyze the slower analyze the indexing (in most cases).";
        String chinese = "交易中台架构设计：海量并发的高扩展，新业务秒级接入";
        testSmartChineseAnalyzer(english);
        testSmartChineseAnalyzer(chinese);


    }

    /**
     * 参考 https://blog.csdn.net/u010357298/article/details/80776902
     */
    public static void testSmartChineseAnalyzer(String text) throws IOException {
        System.out.println("【使用 SmartChineseAnalyzer】原语句：" + text);
        try (Analyzer analyzer = new StandardAnalyzer();
             TokenStream tokenStream = analyzer.tokenStream("testField", text)) {
            tokenStream.reset();
            OffsetAttribute offsetAttribute = tokenStream.addAttribute(OffsetAttribute.class);
            List<String> tokens = new ArrayList<>();
            while (tokenStream.incrementToken()) {
                tokens.add(offsetAttribute.toString());
            }
            tokenStream.end();
            System.out.println("【使用 SmartChineseAnalyzer】分词结果：" + tokens);
        }
    }
}
