package com.lzx.utils;

import com.huaban.analysis.jieba.JiebaSegmenter;

import java.io.*;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * @author: 如寄
 * @version: v1.0
 * @description: com.test
 * @date:2021/4/14
 */
public class TermUtils {
    //停词表
    private List<String> stopWord = new ArrayList<>();

    public TermUtils()
    {
        try {
            InputStream resourceAsStream = this.getClass().getResourceAsStream("/stopwords/stopwords.txt");
//            File file = ResourceUtils.getFile("classpath:stopwords\\stopwords.txt");
            Reader inputStreamReader = new InputStreamReader(resourceAsStream);
            BufferedReader bufferedReader = new BufferedReader(inputStreamReader);
            String temp = null;
            while ((temp = bufferedReader.readLine()) != null) {
                stopWord.add(temp.trim());
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    /*
    *   分词、去除停词 和 一些无用的数据
    */
    public List<String> RemoveOfStopWord(String str) throws IOException {
        JiebaSegmenter segmenter = new JiebaSegmenter();//jieba分词
        List<String> list = segmenter.sentenceProcess(str);
        List<String> termList = new ArrayList<>();
        Pattern pattern = Pattern.compile("^[-\\+]?[\\d]*$");
        Pattern p = Pattern.compile("[-_——=—～３１４０８７９６1２23467890*+!　 ) (！@#$&。，、‘；“”：}{】【？》《.,?/';:|~`·（）……￥#  ／－]");
        for (String S : list) {
            Matcher m = p.matcher(S);
            boolean match = m.find();
            if (S.length()<2||pattern.matcher(S).matches() || match)
                continue;
            termList.add(S);
        }
        termList.removeAll(stopWord);
        return termList;
    }
}
