package com.slipper.core.similarity;

import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.NotionalTokenizer;

import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;

public class HanLPUtil {
    /**
     * 添加停用词
     * @param stopWords 停用词列表
     */
    public static void addStopWord(String ...stopWords) {
        HanLPStopWord.add(stopWords);
    }

    /**
     * 添加停用词
     * @param wordList 停用词列表
     */
    public static void addStopWord(List<String> wordList) {
        HanLPStopWord.add(wordList.toArray(new String[0]));
    }

    /**
     * 使用HanLP进行分词
     * @param text 文本
     * @return 分词列表
     */
    public static List<String> splitWord(String text) {
        List<Term> termList = NotionalTokenizer.segment(text);
        List<String> words = termList.stream().map(it -> it.word).collect(Collectors.toList());
        return HanLPStopWord.clean(words);
    }

    /**
     * 使用HanLP进行分词，并计算词频
     * @param text 文本
     * @return 词频关系
     */
    public static Map<String, Long> splitWordAndFrequency(String text) {
        return splitWord(text).stream().collect(Collectors.groupingBy(Function.identity(), Collectors.counting()));
    }
}
