package com.shijunhao.graduation.service.business.impl;

import com.huaban.analysis.jieba.JiebaSegmenter;
import com.shijunhao.graduation.domain.bean.Keyword;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.*;

@Component
public class SimilarAnalyzer {

    private HashMap<String, Double> idfMap;
    private HashSet<String> stopWordsSet;
    private double idfMedian;

    private JiebaSegmenter segmenter = new JiebaSegmenter();


    /**
     * @return: void
     * @author: shijunhao
     * @date: 2022/5/8 4:17
     * @description: 初始化idf词库与停用词
     */
    @PostConstruct
    private void init() {
        if (stopWordsSet == null) {
            stopWordsSet = new HashSet<>();
            loadStopWords(stopWordsSet, this.getClass().getResourceAsStream("/static/dict/stop_words.txt"));
        }
        if (idfMap == null) {
            idfMap = new HashMap<>();
            loadIDFMap(idfMap, this.getClass().getResourceAsStream("/static/dict/idf_dict.txt"));
        }
    }

    /**
     * @param content: 文本内容
     * @param topN:    返回最大权值词汇数
     * @return: 关键词列表
     * @author: shijunhao
     * @date: 2022/5/8 4:17
     * @description: 获取关键词列表
     */
    public List<Keyword> analyze(String content, int topN) {
        List<Keyword> keywordList = this.analyze(content);

        if (keywordList.size() > topN) {
            int num = keywordList.size() - topN;
            for (int i = 0; i < num; i++) {
                keywordList.remove(topN);
            }
        }
        return keywordList;
    }

    /**
     * @param content: 分析内容
     * @return: 所有关键词
     * @author: shijunhao
     * @date: 2022/5/8 4:17
     * @description: 获取关键词所有列表
     */
    public List<Keyword> analyze(String content) {
        List<Keyword> keywordList = new ArrayList<>();

        if (stopWordsSet == null || idfMap == null) {
            init();
        }

        Map<String, Double> tfMap = getTF(content);
        for (String word : tfMap.keySet()) {
            // 若该词不在idf文档中，则使用平均的idf值
            if (idfMap.containsKey(word)) {
                keywordList.add(new Keyword(word, idfMap.get(word) * tfMap.get(word)));
            } else
                keywordList.add(new Keyword(word, idfMedian * tfMap.get(word)));
        }

        Collections.sort(keywordList);
        return keywordList;
    }

    /**
     * @param keywords1: 关键词列表1
     * @param keywords2: 关键词列表2
     * @return: 余弦相似度
     * @author: shijunhao
     * @date: 2022/5/8 4:17
     * @description: 返回两个关键词列表的余弦相似度
     * ∑(v1[i]*v2[i])/(∑(v1[i]*v1[i]))^(1/2)*(∑(v2[i]*v2[i]))^(1/2)
     */
    public double similar(List<Keyword> keywords1, List<Keyword> keywords2) {
        double sum = 0;
        double a1 = 0;
        double a2 = 0;

        for (Keyword k : keywords1) {
            int index = keywords2.indexOf(k);
            sum += k.getTfidfvalue() * (index == -1 ? 0 : keywords2.get(index).getTfidfvalue());
            a1 += k.getTfidfvalue() * k.getTfidfvalue();
        }
        for (Keyword k : keywords2) {
            a2 += k.getTfidfvalue() * k.getTfidfvalue();
        }

        return sum / (Math.sqrt(a1) * Math.sqrt(a2));
    }


    /**
     * @param content: 将语句转换为TF词汇库
     * @return: <word,tfValue>
     * @author: shijunhao
     * @date: 2022/5/8 4:17
     * @description: tf值计算公式
     * tf=N(i,j)/(sum(N(k,j) for all k))
     * N(i,j)表示词语Ni在该文档d（content）中出现的频率，sum(N(k,j))代表所有词语在文档d中出现的频率之和
     */
    private Map<String, Double> getTF(String content) {
        Map<String, Double> tfMap = new HashMap<>();
        if (content == null || content.equals(""))
            return tfMap;

        List<String> segments = segmenter.sentenceProcess(content);
        Map<String, Integer> freqMap = new HashMap<>();

        int wordSum = 0;
        for (String segment : segments) {
            //停用词不予考虑，单字词不予考虑
            if (!stopWordsSet.contains(segment) && segment.length() > 1) {
                wordSum++;
                if (freqMap.containsKey(segment)) {
                    freqMap.put(segment, freqMap.get(segment) + 1);
                } else {
                    freqMap.put(segment, 1);
                }
            }
        }

        // 计算double型的tf值
        for (String word : freqMap.keySet()) {
            tfMap.put(word, freqMap.get(word) * 0.1 / wordSum);
        }

        return tfMap;
    }

    /**
     * @param set: 停用词汇库
     * @param in:  文件流
     * @return: void
     * @author: shijunhao
     * @date: 2022/5/8 4:17
     * @description: 加载停用词汇库
     */
    private void loadStopWords(Set<String> set, InputStream in) {
        BufferedReader bufr;
        try {
            bufr = new BufferedReader(new InputStreamReader(in));
            String line;
            while ((line = bufr.readLine()) != null) {
                set.add(line.trim());
            }
            try {
                bufr.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    /**
     * @param map: 返回idf预料库
     * @param in:  文件输入流
     * @return: void
     * @author: shijunhao
     * @date: 2022/5/8 4:17
     * @description: 加载idf语料库
     */
    private void loadIDFMap(Map<String, Double> map, InputStream in) {
        BufferedReader buffer;
        try {
            buffer = new BufferedReader(new InputStreamReader(in));
            String line;
            while ((line = buffer.readLine()) != null) {
                String[] kv = line.trim().split(" ");
                map.put(kv[0], Double.parseDouble(kv[1]));
            }
            try {
                buffer.close();
            } catch (IOException e) {
                e.printStackTrace();
            }

            // 计算idf值的中位数
            List<Double> idfList = new ArrayList<>(map.values());
            Collections.sort(idfList);
            idfMedian = idfList.get(idfList.size() / 2);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

