package com.samp.solr.hanlp;

import java.util.*;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.dictionary.CustomDictionary;
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary;
import com.hankcs.hanlp.seg.Segment;
import com.hankcs.hanlp.seg.common.Term;

public class SimilarityAnalyze {

    public static double DEFAULT_NO_EXIST_WEIHT = 0.1 ;
    public static double DEFAULT_EXIST_WEIHT = 1.0 ;

    public static List<String> tokenize( String doc ){
        List<String> tokenizeList = new ArrayList<>() ;//对输入进行分词
        Segment segment=HanLP.newSegment().enableCustomDictionary(true);
        loCustomDict();
        List<Term> termList=segment.seg(doc);
        loadStopWords();
        CoreStopWordDictionary.apply(termList);

        for(Term term:termList){
            tokenizeList.add(term.toString().split("/")[0]);
        }
        System.out.println( "分词结果：" + tokenizeList );
        return tokenizeList;
    }

    /**
     * 返回自定义词汇。
     * @return
     */
    private static void loCustomDict(){
        List<String> customDict = new ArrayList<>();
        customDict.add("李校长");
        customDict.add("抓到");
        if( customDict != null && customDict.size() > 0 ) {
            for (String customWord : customDict) {
                CustomDictionary.add(customWord);
            }
        }
    }

    private static void loadStopWords(){
        CoreStopWordDictionary.add("的");
        CoreStopWordDictionary.add("在");
        CoreStopWordDictionary.add("里");
        CoreStopWordDictionary.add("被");
        CoreStopWordDictionary.add("了");
    }

    public static double getWordExistWight(String word){
        if( "李四".equals(word)){
            return 3.0;
        }
        return DEFAULT_EXIST_WEIHT;
    }

    public static double getWordNoExistWight(String word){
        return DEFAULT_NO_EXIST_WEIHT;
    }
    public static double computeSimilarity(List<String> doc1, List<String> doc2){
        //检查参数
        if( doc1 == null || doc1.size() ==0 || doc2 == null || doc2.size() == 0 ){
            return 0.0;
        }
        Map<String, double[]> dataMap = new HashMap<>();
        for( String word: doc1 ){
            if( word != null && !word.trim().equals("") ){
                // 目前对重复的词，只取一次，没做其它处理。
                double[] compareData = new double[2];
                compareData[0] = getWordExistWight(word);
                compareData[1] = getWordNoExistWight(word);
                dataMap.put(word, compareData);
            }
        }
        for( String word: doc2 ){
            if( word != null && !word.trim().equals("") ){
                if( dataMap.get(word) != null ){
                    //两个文档都有同一个词，比较分数都设置为1
                    dataMap.get(word)[1] = getWordExistWight(word);
                }else {
                    double[] compareData = new double[2];
                    compareData[0] = getWordNoExistWight(word);
                    compareData[1] = getWordExistWight(word);
                    dataMap.put(word, compareData);
                }
            }
        }
        //循环计算每个关键字的分数
        double sum1 = 0 , sum2 = 0, sum12 = 0;
        for( String word:dataMap.keySet() ) {
            double[] compareData = dataMap.get(word);
            System.out.println("word:["+word+"] data[0] =["+compareData[0]+"], data[1]=["+compareData[1]+"]");
            sum12 += compareData[0]*compareData[1];
            sum1 += compareData[0]*compareData[0];
            sum2 += compareData[1]*compareData[1];
        }
        System.out.println("sum1="+sum1+", sum2="+sum2+", sum12 = "+sum12);
        return sum12 / Math.sqrt( sum1*sum2 );
    }

    public static void main(String[] args)
    {
        String doc1 = "张三的哥哥：王五，在游戏厅里玩游戏,玩WOW,被李校长抓到了";
        String doc2 = "张三的弟弟：王五，在手机上玩游戏,玩WOW,被李校长抓到了";
//        doc2 = "循环计算每个关键字的分数";
        List<String> tokenList1 = tokenize(doc1);
        List<String> tokenList2 = tokenize(doc2);
        double similarity = computeSimilarity(tokenList1, tokenList2);
        System.out.println(similarity);
    }
}
