package util;

import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

import java.io.IOException;
import java.io.StringReader;
import java.util.Collection;
import java.util.HashSet;

public class IKanalyzerUtil {

    public static Collection<String> analyzer(String kw) throws IOException {
        Collection<String> result = new HashSet<>();
        StringReader reader = new StringReader(kw);
        //内存Reader: StringReader
        //通过IKSegmenter类进行分词
        IKSegmenter ikSegmenter = new IKSegmenter(reader, true);//true为smart模式,false为max_word,分词数量会更多
        Lexeme next = ikSegmenter.next();
        while (next != null) {
            String lexemeText = next.getLexemeText();
            result.add(lexemeText);
            next = ikSegmenter.next();
        }
        return result;
    }

    public static void main(String[] args) throws IOException {
        System.out.println(IKanalyzerUtil.analyzer("我是中国人"));
    }
}
