package com.ocr.utils.impl;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.dictionary.CustomDictionary;
import com.hankcs.hanlp.seg.common.Term;
import com.ocr.utils.beans.WordLabe;
import org.jsoup.Jsoup;

import java.util.*;
import java.util.stream.Collectors;

/**
 * @author 化粪池堵塞的凶手
 */
public class SimilarityUtil {
    public static List<String> s =new ArrayList<>();

    static {
        s.add("she");s.add("he");s.add("I");s.add("the");s.add("to");s.add("you");s.add("的");s.add("is");s.add("I");s.add("s");s.add("是");s.add("他");s.add("她");s.add("一");
        CustomDictionary.add("子类");
        CustomDictionary.add("父类");
    }

    private SimilarityUtil() {
    }

    /**
     * 获得两个句子的相似度
     *
     * @param sentence1
     * @param sentence2
     * @return
     */
    public static double getSimilarity(String sentence1, String sentence2) {
        List<String> sent1Words = getSplitWords(sentence1);
//        System.out.println(sent1Words);
        List<String> sent2Words = getSplitWords(sentence2);
//        System.out.println(sent2Words);
        List<String> allWords = mergeList(sent1Words, sent2Words);

        int[] statistic1 = statistic(allWords, sent1Words);
        int[] statistic2 = statistic(allWords, sent2Words);

        double dividend = 0;
        double divisor1 = 0;
        double divisor2 = 0;
        for (int i = 0; i < statistic1.length; i++) {
            dividend += statistic1[i] * statistic2[i];
            divisor1 += Math.pow(statistic1[i], 2);
            divisor2 += Math.pow(statistic2[i], 2);
        }

        return dividend / (Math.sqrt(divisor1) * Math.sqrt(divisor2));
    }

    private static int[] statistic(List<String> allWords, List<String> sentWords) {
        int[] result = new int[allWords.size()];
        for (int i = 0; i < allWords.size(); i++) {
            result[i] = Collections.frequency(sentWords, allWords.get(i));
        }
        return result;
    }

    private static List<String> mergeList(List<String> list1, List<String> list2) {
        List<String> result = new ArrayList<>();
        result.addAll(list1);
        result.addAll(list2);
        return result.stream().distinct().collect(Collectors.toList());
    }

    private static List<String> getSplitWords(String sentence) {
        // 去除掉html标签
        sentence = Jsoup.parse(sentence.replace("&nbsp;","")).body().text();
        // 标点符号会被单独分为一个Term，去除之
        return HanLP.segment(sentence).stream().map(a -> a.word).filter(s -> !"`~!@#$^&*()=|{}':;',\\[\\].<>/?~！@#￥……&*（）——|{}【】‘；：”“'。，、？ I ".contains(s)).collect(Collectors.toList());
    }

    public static List<String> getKeyWord(String content, int topNum){

        List<WordLabe> list = new SimilarityUtil().wordDemoFun(content,topNum);
        List<String> strings = new ArrayList<>();
        for (WordLabe wordLabe: list) {
            strings.add(wordLabe.getLabel());
        }
        strings.removeAll(s);
        return strings;
    }


    /**
     * 获取分词并计算词频
     * @param content 需要分词的内容
     * @param topNum 需要返回前多少条分词
     * @return
     */
    public  List<WordLabe> wordDemoFun(String content, int topNum){
        //需要过滤的字符
        String filterStr = "`~!@#$^&*()=|{}':;',\\[\\].<>/?~！@#￥……&*（）——|{}【】‘；：”“'。，、？ ";
        //保存数据分词数据
        Map<String, WordLabe> extractLabelMap = new HashMap<String, WordLabe>(16);
        //计算分词
        List<Term> termList = HanLP.segment(content);
        //过滤一下字符
        List<String> list = termList.stream().map(a -> a.word).filter(s -> !filterStr.contains(s)).collect(Collectors.toList());

        //计算词频并封装ExtractLabelDto对象
        for (String word : list) {
            //判断不存在则新增(去重)
            if (extractLabelMap.get(word) == null) {
                //计算获取词频(获取word在list中重复的次数)
                int count = Collections.frequency(list, word);
                //封装成WordLabe对象
                extractLabelMap.put(word, new WordLabe(word, count));
            }
        }
        //map转list
        List<WordLabe> extractLabellist = new ArrayList<WordLabe>(extractLabelMap.values());
        //针对词频数量降序排序
        Collections.sort(extractLabellist, new Comparator<WordLabe>() {
            @Override
            public int compare(WordLabe o1, WordLabe o2) {
                //降序排序
                return o2.getWordFrequency() - o1.getWordFrequency();
            }
        });
        //如果大于topNum 则返回前 topNum 个
        return extractLabellist.size() > topNum ? extractLabellist.subList(0,topNum) : extractLabellist;
    }


    //测试
    public static void main(String[] args) {
        List<WordLabe> list = new SimilarityUtil().wordDemoFun("这是测试呀测试测呀 斯卡萨",5);
        for (WordLabe i:list) {
            System.out.println(i);
        }

    }
}
