package com.yaorange.common.core.utils;

import lombok.extern.slf4j.Slf4j;
import org.wltea.analyzer.IKSegmentation;
import org.wltea.analyzer.Lexeme;

import java.io.IOException;
import java.io.StringReader;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;

/**
* 〈IK分词器工具类〉
*
* @author coach tam
* @email 327395128@qq.com
* @create 2023/4/28
* @since 1.0.0
* 〈坚持灵活 灵活坚持〉
*/
@Slf4j
public class IKAnalyzerUtils {
    /**
     *
     * @param sentence
     * @param isMaxWordLength true:ik_smart    false:ik_max_word 是否粗粒度
     * @return
     */
    public static Set<String> parseSentence(String sentence,boolean isMaxWordLength) {
        if (StringUtils.isEmpty(sentence)) {
            return Collections.emptySet();
        }
        Set<String> keywords = new HashSet<>();
        StringReader reader = new StringReader(sentence);
        IKSegmentation segmenter = new IKSegmentation(reader, isMaxWordLength);
        try{
            Lexeme lexeme;
            while ((lexeme = segmenter.next()) != null) {
                keywords.add(lexeme.getLexemeText());
            }
        }catch (IOException e){
            log.error("解析相关分词异常", e);
        }
        return keywords;
    }

    public static void main(String[] args) {
        Set<String> stringSet = parseSentence("java数据类型",false);
        System.out.println(stringSet);
    }
}
