package com.wugh.fast.kernel.util;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;

import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;

/**
 * 分词工具
 *
 * @author 吴冠辉
 * @date 2018/7/2 18:38
 * @deprecated 请使用hutool的分词工具 TokenizerUtil,引入hutool-extra
 */
@Deprecated
public class ParticipleUtils {
    private ParticipleUtils() {
    }

    /**
     * 分割单词
     *
     * @param string
     * @return
     */
    public static List<String> participle(String string) {
        if (StringUtils.isBlank(string)) {
            return new ArrayList<>(0);
        }
        List<Term> segment = HanLP.segment(string);
        List<String> collect = segment.stream().map(term -> term.word).collect(Collectors.toList());
        return collect;
    }

    /**
     * 分词生成like语句
     *
     * @param string 需要分的词
     * @return 分词后like值 如 %广东%养老%
     */
    public static String participle2LikeString(String string) {
        List<String> words = participle(string);
        if (CollectionUtils.isEmpty(words)) {
            return "";
        }
        StringBuilder sb = new StringBuilder("%");
        for (String word : words) {
            sb.append(word).append("%");
        }
        return sb.toString();
    }
}
