package util;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.util.CoreMap;

import java.io.FileNotFoundException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentSkipListSet;

/**
 * 文本相似度计算
 * 判定方式：Jaccard相似性系数（Jaccard similarity coefficient） ，通过计算两个集合交集的大小除以并集的大小来评估他们的相似度
 * 算法步骤描述：
 * 1、分词
 * 2、词干提取
 * 3.去停用词
 * 4、求交集（去重），计算交集的不重复词的个数 intersectionSize
 * 5、求并集（去重），计算并集的不重复词的个数 unionSize
 * 6、2中的值除以3中的值 intersectionSize/(double)unionSize
 */
public class JaccardTextSimilarity {
    protected static final Logger LOGGER = LoggerFactory.getLogger(JaccardTextSimilarity.class);
    public static double getSimilarity(String document1, String document2) {
        //获取词干对应的文本，封装成集合
        List<String> wordslist1 = getlema(document1);
        List<String> wordslist2 = getlema(document2);
        Set<String> words2Set = new HashSet<>();
        words2Set.addAll(wordslist2);
        //求交集
        Set<String> intersectionSet = new ConcurrentSkipListSet<>();
        wordslist1.parallelStream().forEach(word -> {
            if (words2Set.contains(word)) {
                intersectionSet.add(word);
            }
        });
        //交集的大小
        int intersectionSize = intersectionSet.size();
        //求并集
        Set<String> unionSet = new HashSet<>();
        wordslist1.forEach(word -> unionSet.add(word));
        wordslist2.forEach(word -> unionSet.add(word));
        //并集的大小
        int unionSize = unionSet.size();
        //相似度分值
        double score = intersectionSize / (double) unionSize;
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug("交集的大小：" + intersectionSize);
            LOGGER.debug("并集的大小：" + unionSize);
            LOGGER.debug("相似度分值=" + intersectionSize + "/(double)" + unionSize + "=" + score);
        }
        return score;
    }

    public static List<String> getlema(String text){
        //词干对应的单词集合
        List<String> wordslist = new ArrayList<>();;
        //StanfordCoreNLP获取词干
//        Properties props = new Properties();  // set up pipeline properties
//        props.put("annotators", "tokenize, ssplit, pos, lemma");   //分词、分句、词性标注和次元信息。
//        StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
        StanfordCoreNLP pipeline = PipelineUtil.getInstance().getPipeline();
        Annotation document = new Annotation(text);
        pipeline.annotate(document);
        List<CoreMap> words = document.get(CoreAnnotations.SentencesAnnotation.class);
        for(CoreMap word_temp: words) {
            for (CoreLabel token: word_temp.get(CoreAnnotations.TokensAnnotation.class)) {
                String lema = token.get(CoreAnnotations.LemmaAnnotation.class);  // 获取对应上面word的词元信息，即我所需要的词形还原后的单词
                wordslist.add(lema);
//                System.out.println(lema);
            }
        }
        return wordslist;
    }
    public static void main(String[] args) throws UnsupportedEncodingException, FileNotFoundException {
//      BufferedReader reader = new BufferedReader( new InputStreamReader( new FileInputStream( new File("")),"gbk"));

        String text1 = FileTool.readFileByLines(JaccardTextSimilarity.class.getClassLoader().getResource("text1").getPath());
        String text2 = FileTool.readFileByLines(JaccardTextSimilarity.class.getClassLoader().getResource("text2").getPath());
        System.out.println(getSimilarity(text1,text2));
    }
}


