package com.jackstraw.jack_base.util;

import org.apdplat.word.WordSegmenter;
import org.apdplat.word.segmentation.Word;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

/**
 * 分词
 */
public class SegmenterUtil {

    public static Set<String> allSegment(String text) {
        Set<String> resultset = new HashSet<>();
        resultset.addAll(ikSegment(text));
        resultset.addAll(wordSegment(text));
        return resultset;
    }


    /**
     * 使用ik分词器获取文本的所有分词结果
     *
     * @param text
     * @return
     */
    public static Set<String> ikSegment(String text) {
        StringReader sr = new StringReader(text);
        IKSegmenter ik = new IKSegmenter(sr, true);
        Lexeme lex = null;
        Set<String> resultset = new HashSet<>();
        while (true) {
            try {
                if (!((lex = ik.next()) != null)) {
                    break;
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
            resultset.add(lex.getLexemeText());
        }
        return resultset;
    }


    /**
     * 使用word分词器获取文本的所有分词结果
     *
     * @param text
     * @return
     */
    public static Set<String> wordSegment(String text) {
        List<Word> words = WordSegmenter.seg(text);
        Set<String> resultset = new HashSet<>();
        for (Word word : words) {
            resultset.add(word.getText());
        }
        return resultset;
    }


}
