package Yanxin.atguigu.yx.util;

import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;

public class IkUtil {

    //切词主方法
    public static List<String> split(String keyword) {

        //将数据读取到内存中
        StringReader reader = new StringReader(keyword);
        IKSegmenter seg = new IKSegmenter(reader, true);

        //用一个list存切词结果
        ArrayList<String> result = new ArrayList<>();

        //io流的循环读取行
        Lexeme le =null;
        try {
            le = seg.next();
            while (le!=null){
                String keyword_tmp = le.getLexemeText();
                //切词结果放入result
                result.add(keyword_tmp);
                le  = seg.next();
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        return result;
    }

    //带去重的切词
    public static List<String> split(String keyword, boolean isDuplicateRemoval) {
        //
        if (isDuplicateRemoval){
            List<String> list = split(keyword);
            HashSet<String> set = new HashSet<>(list);
            return new ArrayList<>(set);
        }else {
            return split(keyword);
        }

    }

}
