package com.qianxinyao.analysis.jieba.keyword;

import java.io.*;
import java.util.*;

import com.huaban.analysis.jieba.JiebaSegmenter;

/**
 * @author Tom Qian
 * @email tomqianmaple@outlook.com
 * @github https://github.com/bluemapleman
 * @date Oct 20, 2018
 * tfidf算法原理参考：http://www.cnblogs.com/ywl925/p/3275878.html
 * 部分实现思路参考jieba分词：https://github.com/fxsjy/jieba
 */
public class TFIDFAnalyzer {

    static HashMap<String, Double> idfMap;
    static HashSet<String> stopWordsSet;
    static double idfMedian;

    /**
     * tfidf分析方法
     *
     * @param content 需要分析的文本/文档内容
     * @param topN    需要返回的tfidf值最高的N个关键词，若超过content本身含有的词语上限数目，则默认返回全部
     * @return
     */
    public List<Keyword> analyze(String content, int topN) {
        List<Keyword> keywordList = new ArrayList<>();

        if (stopWordsSet == null) {
            stopWordsSet = new HashSet<>();
            loadStopWords(stopWordsSet, this.getClass().getResourceAsStream("/stop_words.txt"));
        }
        if (idfMap == null) {
            idfMap = new HashMap<>();
            loadIDFMap(idfMap, this.getClass().getResourceAsStream("/idf_dict.txt"));
        }

        Map<String, Double> tfMap = getTF(content);
        for (String word : tfMap.keySet()) {
            // 若该词不在idf文档中，则使用平均的idf值(可能定期需要对新出现的网络词语进行纳入)
            if (idfMap.containsKey(word)) {
                keywordList.add(new Keyword(word, idfMap.get(word) * tfMap.get(word)));
            } else
                keywordList.add(new Keyword(word, idfMedian * tfMap.get(word)));
        }

        Collections.sort(keywordList);

        if (keywordList.size() > topN) {
            int num = keywordList.size() - topN;
            for (int i = 0; i < num; i++) {
                keywordList.remove(topN);
            }
        }
        return keywordList;
    }

    /**
     * tf值计算公式
     * tf=N(i,j)/(sum(N(k,j) for all k))
     * N(i,j)表示词语Ni在该文档d（content）中出现的频率，sum(N(k,j))代表所有词语在文档d中出现的频率之和
     *
     * @param content
     * @return
     */
    private Map<String, Double> getTF(String content) {
        Map<String, Double> tfMap = new HashMap<>();
        if (content == null || content.equals(""))
            return tfMap;

        JiebaSegmenter segmenter = new JiebaSegmenter();
        List<String> segments = segmenter.sentenceProcess(content);
        // 统计词条个数，并输出看一下
        print(frequencyOfListElements(segments));

        Map<String, Integer> freqMap = new HashMap<>();

        int wordSum = 0;
        for (String segment : segments) {
            //停用词不予考虑，单字词不予考虑
            if (!stopWordsSet.contains(segment) && segment.length() > 1) {
                wordSum++;
                if (freqMap.containsKey(segment)) {
                    freqMap.put(segment, freqMap.get(segment) + 1);
                } else {
                    freqMap.put(segment, 1);
                }
            }
        }

        // 计算double型的tf值
        for (String word : freqMap.keySet()) {
            tfMap.put(word, freqMap.get(word) * 0.1 / wordSum);
        }

        return tfMap;
    }

    private void print(Map<String, Integer> frequencyOfListElements) {
        Iterator<Map.Entry<String, Integer>> it = frequencyOfListElements.entrySet().iterator();
        while (it.hasNext()) {
            Map.Entry<String, Integer> entry = it.next();

            // 此处数据可以选择存储起来，用作词云展示
            System.out.println(entry.getKey() + " ---  " + entry.getValue() + " 次");
        }
    }

    /**
     * 统计每一个词出现的次数，并按从高倒地排序
     *
     * @param items
     * @return
     */
    public static Map<String, Integer> frequencyOfListElements(List<String> items) {
        if (items == null || items.size() == 0) return null;
        Map<String, Integer> map = new HashMap<String, Integer>();
        for (String temp : items) {
            if (!stopWordsSet.contains(temp) && temp.length() > 1) {// 单词不考虑
                Integer count = map.get(temp);
                map.put(temp, (count == null) ? 1 : count + 1);
            }
        }
        return sort(map);
    }

    /**
     * 根据value的值从大到小排序
     *
     * @param oriMap
     * @return
     */
    private static Map<String, Integer> sort(Map<String, Integer> oriMap) {
        if (oriMap == null || oriMap.isEmpty()) {
            return null;
        }
        Map<String, Integer> sortedMap = new LinkedHashMap<String, Integer>();
        List<Map.Entry<String, Integer>> entryList = new ArrayList<Map.Entry<String, Integer>>(oriMap.entrySet());
        Collections.sort(entryList, new MapValueComparator());
        Iterator<Map.Entry<String, Integer>> iter = entryList.iterator();
        Map.Entry<String, Integer> tmpEntry = null;
        while (iter.hasNext()) {
            tmpEntry = iter.next();
            sortedMap.put(tmpEntry.getKey(), tmpEntry.getValue());
        }
        return sortedMap;
    }

    /**
     * 比较器
     */
    static class MapValueComparator implements Comparator<Map.Entry<String, Integer>> {
        @Override
        public int compare(Map.Entry<String, Integer> map1, Map.Entry<String, Integer> map2) {

            return map2.getValue().compareTo(map1.getValue());
        }
    }

    /**
     * 默认jieba分词的停词表
     * url:https://github.com/yanyiwu/nodejieba/blob/master/dict/stop_words.utf8
     *
     * @param set
     * @param
     */
    private void loadStopWords(Set<String> set, InputStream in) {
        BufferedReader bufr;
        try {
            bufr = new BufferedReader(new InputStreamReader(in));
            String line = null;
            while ((line = bufr.readLine()) != null) {
                set.add(line.trim());
            }
            try {
                bufr.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    /**
     * idf值本来需要语料库来自己按照公式进行计算，不过jieba分词已经提供了一份很好的idf字典，所以默认直接使用jieba分词的idf字典
     * url:https://raw.githubusercontent.com/yanyiwu/nodejieba/master/dict/idf.utf8
     *
     * @param
     * @param
     */
    private void loadIDFMap(Map<String, Double> map, InputStream in) {
        BufferedReader bufr;
        try {
            bufr = new BufferedReader(new InputStreamReader(in));
            String line = null;
            while ((line = bufr.readLine()) != null) {
                String[] kv = line.trim().split(" ");
                map.put(kv[0], Double.parseDouble(kv[1]));
            }
            try {
                bufr.close();
            } catch (IOException e) {
                e.printStackTrace();
            }

            // 计算idf值的中位数
            List<Double> idfList = new ArrayList<>(map.values());
            Collections.sort(idfList);
            idfMedian = idfList.get(idfList.size() / 2);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    public static void main(String[] args) {
        String content = "结巴分词(java版) jieba-analysis\n" +
                "首先感谢jieba分词原作者fxsjy，没有他的无私贡献，我们也不会结识到结巴 分词. 同时也感谢jieba分词java版本的实现团队huaban，他们的努力使得Java也能直接做出效果很棒的分词。\n" +
                "\n" +
                "不过由于huaban已经没有再对java版进行维护，所以我自己对项目进行了开发。除了结巴分词(java版)所保留的原项目针对搜索引擎分词的功能(cutforindex、cutforsearch)，我加入了tfidf的关键词提取功能，并且实现的效果和python的jieba版本的效果一模一样！\n" +
                "\n" +
                "（以下内容在基于jieba-java版本README.md的基础上，加入了对我新加入的tfidf关键词提取模块的相关说明)\n" +
                "\n" +
                "简介\n" +
                "支持分词模式\n" +
                "Search模式，用于对用户查询词分词\n" +
                "Index模式，用于对索引文档分词\n" +
                "特性\n" +
                "支持多种分词模式\n" +
                "全角统一转成半角\n" +
                "用户词典功能\n" +
                "conf 目录有整理的搜狗细胞词库\n" +
                "因为性能原因，最新的快照版本去除词性标注，也希望有更好的 Pull Request 可以提供该功能。\n" +
                "新特性：tfidf算法提取关键词\n" +
                "\n" +
                "    public static void main(String[] args)\n" +
                "    {\n" +
                "        String content=\"孩子上了幼儿园 安全防拐教育要做好\";\n" +
                "        int topN=5;\n" +
                "        TFIDFAnalyzer tfidfAnalyzer=new TFIDFAnalyzer();\n" +
                "        List<Keyword> list=tfidfAnalyzer.analyze(content,topN);\n" +
                "        for(Keyword word:list)\n" +
                "            System.out.println(word.getName()+\":\"+word.getTfidfvalue()+\",\");\n" +
                "        // 防拐:0.1992,幼儿园:0.1434,做好:0.1065,教育:0.0946,安全:0.0924\n" +
                "    }\n" +
                "如何使用\n" +
                "Demo\n" +
                "\n" +
                "@Test\n" +
                "public void testDemo() {\n" +
                "    JiebaSegmenter segmenter = new JiebaSegmenter();\n" +
                "    String[] sentences =\n" +
                "        new String[] {\"这是一个伸手不见五指的黑夜。我叫孙悟空，我爱北京，我爱Python和C++。\", \"我不喜欢日本和服。\", \"雷猴回归人间。\",\n" +
                "                      \"工信处女干事每月经过下属科室都要亲口交代24口交换机等技术性器件的安装工作\", \"结果婚的和尚未结过婚的\"};\n" +
                "    for (String sentence : sentences) {\n" +
                "        System.out.println(segmenter.process(sentence, SegMode.INDEX).toString());\n" +
                "    }\n" +
                "}\n" +
                "算法(wiki补充...)\n" +
                " 基于 trie 树结构实现高效词图扫描\n" +
                " 生成所有切词可能的有向无环图 DAG\n" +
                " 采用动态规划算法计算最佳切词组合\n" +
                " 基于 HMM 模型，采用 Viterbi (维特比)算法实现未登录词识别\n" +
                "性能评估\n" +
                "测试机配置\n" +
                "Processor 2 Intel(R) Pentium(R) CPU G620 @ 2.60GHz\n" +
                "Memory：8GB\n" +
                "\n" +
                "分词测试时机器开了许多应用(eclipse、emacs、chrome...)，可能\n" +
                "会影响到测试速度\n" +
                "测试文本\n" +
                "\n" +
                "测试结果(单线程，对测试文本逐行分词，并循环调用上万次)";

        // 原始文本处理
        content = originalContentProcessing(content);


        int topN = content.length();
        TFIDFAnalyzer tfidfAnalyzer = new TFIDFAnalyzer();
        List<Keyword> list = tfidfAnalyzer.analyze(content, topN);
        for (Keyword word : list) {
            System.out.print(word.getName() + ":" + word.getTfidfvalue() + "\n");
        }
    }

    /**
     * 原始文本处理，去掉一些杂项
     *
     * @param content
     * @return
     */
    private static String originalContentProcessing(String content) {
        // 此处简单的去掉一下换行
        return content.replaceAll("\n", "");
    }
}

