package com.qianxinyao.analysis.jieba.keyword;

import java.io.*;
import java.util.*;

import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.POSToken;
import com.huaban.analysis.jieba.POSTokenizer;
import com.huaban.analysis.jieba.SegToken;
/**
 * @author Tom Qian
 * @email tomqianmaple@outlook.com
 * @github https://github.com/bluemapleman
 * @date Oct 20, 2018
 * tfidf算法原理参考：http://www.cnblogs.com/ywl925/p/3275878.html
 * 部分实现思路参考jieba分词：https://github.com/fxsjy/jieba
 */
public class TFIDFAnalyzer
{
	
	static HashMap<String,Double> idfMap;
	static HashSet<String> stopWordsSet;
	static double idfMedian;
	
	/**
	 * tfidf分析方法
	 * @param content 需要分析的文本/文档内容
	 * @param topN 需要返回的tfidf值最高的N个关键词，若超过content本身含有的词语上限数目，则默认返回全部
	 * @return
	 */
	public List<Keyword> analyze(String content, int topN) {
		List<Keyword> keywordList = new ArrayList<>();

		if (stopWordsSet == null) {
			stopWordsSet = new HashSet<>();
			loadStopWords(stopWordsSet, this.getClass().getResourceAsStream("/stop_words.txt"));
		}
		if (idfMap == null) {
			idfMap = new HashMap<>();
			loadIDFMap(idfMap, this.getClass().getResourceAsStream("/processed_dict2.txt"));
		}

		Map<String, Double> tfMap = getTF(content);

		for (String word : tfMap.keySet()) {
			if (idfMap.containsKey(word)) {
				keywordList.add(new Keyword(word, idfMap.get(word) * tfMap.get(word)));
			} else {
				keywordList.add(new Keyword(word, idfMedian * tfMap.get(word)));
			}
		}

		Collections.sort(keywordList, Comparator.comparingDouble(Keyword::getTfidfvalue).reversed());

		if (keywordList.size() > topN) {
			keywordList = keywordList.subList(0, topN);
		}

		return keywordList;
	}
	
	/**
	 * tf值计算公式
	 * tf=N(i,j)/(sum(N(k,j) for all k))
	 * N(i,j)表示词语Ni在该文档d（content）中出现的频率，sum(N(k,j))代表所有词语在文档d中出现的频率之和
	 * @param content
	 * @return
	 */
	private Map<String, Double> getTF(String content) {
		Map<String, Double> tfMap = new HashMap<>();
		if (content == null || content.equals("")) {
			return tfMap;
		}

		JiebaSegmenter segmenter = new JiebaSegmenter();
		POSTokenizer posTokenizer = new POSTokenizer();

		// 获取分词结果
		List<SegToken> segments = segmenter.process(content, JiebaSegmenter.SegMode.SEARCH);
		// 获取分词结果及其词性
		List<POSToken> postokens = posTokenizer.process(content, segments);

		Map<String, Integer> freqMap = new HashMap<>();
		int wordSum = 0;

		for (POSToken postoken : postokens) {
			String word = postoken.word;
			String pos = postoken.flag; // 获取词性

			// 停用词不予考虑，单字词不予考虑
			if ((idfMap.get(word) !=null) && word.length() > 1) {
				// 只保留名词和动词
				if (pos.startsWith("n") || pos.startsWith("v")) {
					wordSum++;
					freqMap.put(word, freqMap.getOrDefault(word, 0) + 1);
				}
			}
		}

		// 计算double型的tf值
		for (String word : freqMap.keySet()) {
			tfMap.put(word, (double) freqMap.get(word) / wordSum);
		}

		return tfMap;
	}



	/**
	 * 默认jieba分词的停词表
	 * url:https://github.com/yanyiwu/nodejieba/blob/master/dict/stop_words.utf8
	 * @param set
	 * @param filePath
	 */
	private void loadStopWords(Set<String> set, InputStream in){
		BufferedReader bufr;
		try
		{
			bufr = new BufferedReader(new InputStreamReader(in));
			String line=null;
			while((line=bufr.readLine())!=null) {
				set.add(line.trim());
			}
			try
			{
				bufr.close();
			}
			catch (IOException e)
			{
				e.printStackTrace();
			}
		}
		catch (Exception e)
		{
			e.printStackTrace();
		}
	}
	
	/**
	 * idf值本来需要语料库来自己按照公式进行计算，不过jieba分词已经提供了一份很好的idf字典，所以默认直接使用jieba分词的idf字典
	 * url:https://raw.githubusercontent.com/yanyiwu/nodejieba/master/dict/idf.utf8
	 * @param set
	 * @param filePath
	 */
	private void loadIDFMap(Map<String,Double> map, InputStream in ){
		BufferedReader bufr;
		try
		{
			bufr = new BufferedReader(new InputStreamReader(in));
			String line=null;
			while((line=bufr.readLine())!=null) {
				String[] kv=line.trim().split(" ");
				map.put(kv[0],Double.parseDouble(kv[1]));
			}
			try
			{
				bufr.close();
			}
			catch (IOException e)
			{
				e.printStackTrace();
			}
			
			// 计算idf值的中位数
			List<Double> idfList=new ArrayList<>(map.values());
			Collections.sort(idfList);
			idfMedian=idfList.get(idfList.size()/2);
		}
		catch (Exception e)
		{
			e.printStackTrace();
		}
	}


	static String content = "维修工单故障说明：设备编号[W20242011029]的水泵突发故障没电了。表现为水泵运行时发出异常噪音且震动剧烈,然后停电。初步判断可能是内部零件松动。该故障影响生产进度，急需维修。需准备相应工具检查内部结构，更换可能松动的零件，尽快恢复设备正常运行。";

	public static void main(String[] args)
	{

		JiebaSegmenter segmenter = new JiebaSegmenter();
		System.out.println(segmenter.sentenceProcess(content));

		System.out.println("\n");
		String[] sentences =
				new String[] {content};
		for (String sentence : sentences) {
			System.out.println(segmenter.process(sentence, JiebaSegmenter.SegMode.INDEX).toString());
		}
		System.out.println("\n");
//		String content="孩子上了幼儿园 安全防拐教育要做好";
		int topN=10;
		TFIDFAnalyzer tfidfAnalyzer=new TFIDFAnalyzer();
		List<Keyword> list=tfidfAnalyzer.analyze(content,topN);
		for(Keyword word:list)
			System.out.print(word.getName()+":"+word.getTfidfvalue()+",");
	}
}

