package com.maistars.spider.infrastructure.hanlp;

import com.hankcs.hanlp.HanLP;
import com.maistars.spider.common.util.HtmlUtil;
import org.jsoup.Jsoup;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;

/*
 * @author dhr
 * @date 2020/5/19 1:08 下午
 */
public class HanlpDocService {

    private static final String doc = "为data的父目录即可，比如data目录是/Users/hankcs/Documents/data，那么root=/Users/hankcs/Documents/ 。\n" +
            "\n" +
            "如果选用mini词典的话，则需要修改配置文件：\n" +
            "CoreDictionaryPath=data/dictionary/CoreNatureDictionary.mini.txt\n" +
            "BiGramDictionaryPath=data/dictionary/CoreNatureDictionary.ngram.mini.txt\n" +
            "\n" +
            "最后将HanLP.properties放入classpath即可，对于任何项目，都可以放到src目录或resources目录下，编译时IDE会自动将其复制到classpath中。\n" +
            "\n" +
            "如果放置不当，HanLP会智能提示当前环境下的合适路径，并且尝试从项目根目录读取数据集。\n" +
            "\n" +
            "调用方法\n" +
            "HanLP几乎所有的功能都可以通过工具类HanLP快捷调用，当你想不起来调用方法时，只需键入HanLP.，IDE应当会给出提示，并展示HanLP完善的文档。\n" +
            "\n" +
            "推荐用户始终通过工具类HanLP调用，这么做的好处是，将来HanLP升级后，用户无需修改调用代码。";

    public static void setDataRoot(String dataRoot){
        System.setProperty("HANLP_ROOT",dataRoot);
    }

    public static List<String> extractKeyword(String doc,int maxLength){
        List<String> keywords = HanLP.extractKeyword(HtmlUtil.htmlReplace(doc),maxLength);
        return keywords;
    }

    public static String extractKeywordString(String doc,int maxLength){
        List<String> keywords = HanLP.extractKeyword(HtmlUtil.htmlReplace(doc),maxLength);
        return String.join(",",keywords);
    }

    public static String extractKeywordString(String doc,int maxLength,boolean isFilter){
        List<String> keywords = HanLP.extractKeyword(HtmlUtil.htmlReplace(doc),maxLength);
        if(isFilter){
            keywords = HtmlUtil.specialCharFilte(keywords);
        }
        String str = String.join(",",keywords);
        return str;
    }

    /**
     * @param size     需要的关键句的个数
     * @param doc
     * @return
     */
    public static List<String> extractSummary(String doc,int size){
        List<String> summary = HanLP.extractSummary(HtmlUtil.htmlReplace(doc),size);
        return summary;
    }

    /**
     * @param size     需要的关键句的个数
     * @param doc
     * @return
     */
    public static String extractSummaryString(String doc,int size){
        return extractSummaryString(doc,size,false);
    }

    /**
     * 获得指定行数的自动摘要
     * @param doc
     * @param maxLength 摘要长度
     * @return
     */
    public static String getSummaryString(String doc,int maxLength){
        return HanLP.getSummary(doc,maxLength);
    }

    /**
     * 抽取指定行数的摘要，并说明是否要过滤特殊字符
     * @param doc
     * @param size
     * @param isFilter
     * @return
     */
    public static String extractSummaryString(String doc,int size,boolean isFilter){
        List<String> summarys = HanLP.extractSummary(HtmlUtil.htmlReplace(doc),size);
        if(isFilter){
            summarys = HtmlUtil.specialCharFilte(summarys);
        }
        String str = String.join(",",summarys);
        return str;
    }

    /*     * 获得两个句子的相似度
     * @param sentence1
     * @param sentence2
     * @return
     */
    public static double getSimilarity(String sentence1, String sentence2) {
        List<String> sent1Words = getSplitWords(sentence1);
        System.out.println(sent1Words);
        List<String> sent2Words = getSplitWords(sentence2);
        System.out.println(sent2Words);
        List<String> allWords = mergeList(sent1Words, sent2Words);

        int[] statistic1 = statistic(allWords, sent1Words);
        int[] statistic2 = statistic(allWords, sent2Words);

        double dividend = 0;
        double divisor1 = 0;
        double divisor2 = 0;
        for (int i = 0; i < statistic1.length; i++) {
            dividend += statistic1[i] * statistic2[i];
            divisor1 += Math.pow(statistic1[i], 2);
            divisor2 += Math.pow(statistic2[i], 2);
        }

        return dividend / (Math.sqrt(divisor1) * Math.sqrt(divisor2));
    }

    private static int[] statistic(List<String> allWords, List<String> sentWords) {
        int[] result = new int[allWords.size()];
        for (int i = 0; i < allWords.size(); i++) {
            result[i] = Collections.frequency(sentWords, allWords.get(i));
        }
        return result;
    }

    private static List<String> mergeList(List<String> list1, List<String> list2) {
        List<String> result = new ArrayList<>();
        result.addAll(list1);
        result.addAll(list2);
        return result.stream().distinct().collect(Collectors.toList());
    }

    private static List<String> getSplitWords(String sentence) {
        // 去除掉html标签
        sentence = Jsoup.parse(sentence.replace("&nbsp;","")).body().text();
        // 标点符号会被单独分为一个Term，去除之
        return HanLP.segment(sentence).stream().map(a -> a.word).
                filter(s -> !"`~!@#$^&*()=|{}':;',\\[\\].<>/?~！@#￥……&*（）——|{}【】‘；：”“'。，、？ ".contains(s)).collect(Collectors.toList());
    }

    public static void main(String[] args){
        extractKeyword(doc,20);
        extractSummary(doc,10);
    }

}
