package ltd.hxya.novel.common.utils;

import com.alibaba.fastjson.JSON;
import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import lombok.extern.slf4j.Slf4j;
import ltd.hxya.novel.common.bean.Word;
import ltd.hxya.novel.common.bean.WordNutureEnum;
import org.apache.commons.io.FileUtils;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
 
/*
*
 * 中文分词工具类*/

@Slf4j
public class Tokenizer {
 
/**
     * 分词*/

    public static List<Word> segment(String sentence) {
 
        //1、 采用HanLP中文自然语言处理中标准分词进行分词
        List<Term> termList = HanLP.segment(sentence);
 
        //上面控制台打印信息就是这里输出的
        //log.info(termList.toString());
 
        //2、重新封装到Word对象中（term.word代表分词后的词语，term.nature代表改词的词性）
        return termList.stream().map(term -> new Word(term.word, term.nature.toString())).collect(Collectors.toList());
    }

/**
     * 对中问分词，并舍弃其中的某些词性*/



    public static List<Word> filterSegment(String sentence) throws IOException {

        //1、 采用HanLP中文自然语言处理中标准分词进行分词
        List<Term> termList = HanLP.segment(sentence);

        //上面控制台打印信息就是这里输出的
        System.out.println(termList.toString());
        List<String> useLessNature = new ArrayList<>();
        filterWord(useLessNature);
        termList = termList.stream().filter(term -> {
            if (useLessNature.contains(term.nature.toString())) {
                return false;
            }
            return true;
        }).collect(Collectors.toList());
        //添加一些需要过滤掉的词
         termList = secondFilterWord(termList);
        //2、重新封装到Word对象中（term.word代表分词后的词语，term.nature代表改词的词性）
        return termList.stream().map(term -> new Word(term.word, term.nature.toString())).collect(Collectors.toList());
    }

    private static List<Term> secondFilterWord(List<Term> termList) throws IOException {
        //new List
        List<String> keywords = FileUtils.readLines(new File("D:\\Javastu\\project\\novel\\novel-common\\novel-utils\\src\\main\\resources\\exclude_keyword.txt"), "utf-8");
        List<Term> terms= termList.stream().filter(term -> {
            if (keywords.contains(term.word)) {
                return false;
            }
            return true;
        }).collect(Collectors.toList());

        return terms;
    }

    private static void filterWord(List<String> useLessNature) {
        useLessNature.add(WordNutureEnum.E.key);
        useLessNature.add(WordNutureEnum.W.key);
        useLessNature.add(WordNutureEnum.UJ.key);
        useLessNature.add(WordNutureEnum.R.key);
        useLessNature.add(WordNutureEnum.U.key);
        useLessNature.add(WordNutureEnum.P.key);
        useLessNature.add(WordNutureEnum.Q.key);
        useLessNature.add(WordNutureEnum.UV.key);
        useLessNature.add(WordNutureEnum.Z.key);
        useLessNature.add(WordNutureEnum.B.key);
        useLessNature.add(WordNutureEnum.C.key);
        useLessNature.add(WordNutureEnum.M.key);
        useLessNature.add(WordNutureEnum.UL.key);
        useLessNature.add(WordNutureEnum.D.key);
        useLessNature.add(WordNutureEnum.V.key);
        useLessNature.add(WordNutureEnum.A.key);
        useLessNature.add(WordNutureEnum.AG.key);
        useLessNature.add(WordNutureEnum.NX.key);
        useLessNature.add(WordNutureEnum.F.key);
    }

}
