package com.atguigu.util;

import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;

public class KeywordUtil {

    public static List<String> splitKeyWord(String keyWord) throws IOException {

        ArrayList<String> list = new ArrayList<>();

        //创建IK分词器对象 ik_smart:贪婪模式,尽量将一个词足够长  ik_max_word:最多的词
        IKSegmenter ikSegmenter = new IKSegmenter(new StringReader(keyWord), false);

        //获取分词后的数据
        Lexeme lexeme = ikSegmenter.next();
        while (lexeme != null) {
            String word = lexeme.getLexemeText();
            list.add(word);
            lexeme = ikSegmenter.next();
        }

        return list;
    }


    public static void main(String[] args) throws IOException {
        //true：[尚, 硅谷, 大数, 据, 项目, 之, flink, 实, 时数, 仓]
        //false：[尚, 硅谷, 大数, 数据项, 数据, 项目, 之, flink, 实时, 时数, 仓]
        System.out.println(splitKeyWord("尚硅谷大数据项目之Flink实时数仓"));
    }

}
