package com.util.textProcess;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;

import com.mysql.jdbc.Connection;
import com.mysql.jdbc.PreparedStatement;
import com.util.textProcess.NLPIR.CLibrary;

public class CalculateTF_IDF {
	private static CLibrary nLPIR = NLPIR.getInstance();
	static ConnectionPool pool = null;
	static Connection conn;
	static PreparedStatement stmt;

	// 用来存放停用词的集合
	static Set<String> stopWordSet = new HashSet<String>();
	// 存放查询文件的分词、去停用词后的结果
	static Map<String, Integer> word_segs = new HashMap<String, Integer>();
	static HashMap<String, Double> tf = new HashMap<String, Double>();// 正规化
	/**
	 * 统计包含单词的文档数 key:单词 value:包含该词的文档数
	 */
	private static Map<String, Integer> containWordOfAllDocNumberMap = new HashMap<String, Integer>();
	/**
	 * 所有文件分词的idf结果.key:文件名,value:词w在整个文档集合中的逆向文档频率idf (Inverse Document
	 * Frequency)，即文档总数n与词w所出现文件数docs(w, D)比值的对数
	 */
	private static Map<String, Double> idfMap = new HashMap<String, Double>();
	/**
	 * 所有文件分词结果.key:文件名,value:该文件分词统计
	 */
	private static Map<Long, Map<String, Integer>> allFilesSegsMap = new HashMap<Long, Map<String, Integer>>();
	/**
	 * 统计单词的TF-IDF key:文件名 value:该文件tf-idf
	 */
	private static Map<String, Double> tfIdfMap = new HashMap<String, Double>();

	static {
		// 初始化NLPIR
		int init_flag;
		try {
			init_flag = NLPIR.init_NLPIR();
			if (init_flag == 0) {
				System.out.println("NLPIR初始化失败！");
			} else {
				System.out.println("NLPIR初始化成功！");
			}
			pool = ConnectionPool.getInstance();
			conn = (Connection) pool.getConnection();
		} catch (UnsupportedEncodingException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

	/**
	 * @param args
	 * @throws SQLException
	 */
	// read all news
	public static HashMap<Long, String> readAllNews()
			throws FileNotFoundException, IOException, SQLException {
		String sql = "SELECT infoId,contentBody FROM knowledgeInfo";
		stmt = (PreparedStatement) conn.prepareStatement(sql);
		ResultSet rs = stmt.executeQuery();
		String content = "";
		Long infoId = 0L;
		HashMap<Long, String> map = new HashMap<Long, String>();
		while (rs.next()) {
			infoId = rs.getLong("infoId");
			content = rs.getString("contentBody");
			map.put(infoId, content);
		}
		return map;
	}

	// read news
	public static String readNews(long newsid) throws FileNotFoundException,
			IOException, SQLException {
		String sql = "SELECT infoId,contentBody FROM knowledgeInfo WHERE infoId=?";
		stmt = (PreparedStatement) conn.prepareStatement(sql);
		stmt.setLong(1, newsid);
		ResultSet rs = stmt.executeQuery();
		String content = "";
		while (rs.next()) {
			content = rs.getString("contentBody");
		}
		return content;
	}

	private static Set<String> getStopWordSet() throws IOException {
		// String
		// stopWordPath=System.getProperty("user.dir")+"\\file\\StopWord.txt";
		InputStreamReader is = new InputStreamReader(new FileInputStream(
				NLPIR.stopWordPath), "UTF-8");
		BufferedReader br = new BufferedReader(is);
		Set<String> stopWordSet_pri = new HashSet<String>();
		// 初始化停用词集
		String stopWord = null;
		for (; (stopWord = br.readLine()) != null;) {
			stopWordSet_pri.add(stopWord);
		}
		br.close();
		return stopWordSet_pri;
	}

	// 分词
	private static Map<String, Integer> segString(String content) {

		Map<String, Integer> words = new HashMap<String, Integer>();
		String cut_words[] = nLPIR.NLPIR_ParagraphProcess(content, 0)
				.split(" ");
		// 过滤停用词
		for (int i = 0; i < cut_words.length; i++) {
			if (stopWordSet.contains(cut_words[i])) {
				cut_words[i] = null;
			}
		}
		// 把过滤后的字符串数组存入到一个字符串中
		StringBuffer finalStr = new StringBuffer();
		for (int i = 0; i < cut_words.length; i++) {
			if (cut_words[i] != null && !cut_words[i].equals(null)
					&& !cut_words[i].equals("null")) {
				finalStr = finalStr.append(cut_words[i]).append(" ");
			}
		}
		// 得到去除停用词后的字符串数组cut_words2
		String cut_words2[] = new String(finalStr).trim().split(" ");
		for (String word : cut_words2) {
			if (!word.equals("")) {
				if (words.containsKey(word)) {
					words.put(word, words.get(word) + 1);
				} else {
					words.put(word, 1);
				}
			}
		}
		return words;
	}

	/**
	 * 
	 * @Title: tf
	 * @Description: 分词结果转化为tf,公式为:tf(w,d) = count(w, d) / size(d)
	 *               即词w在文档d中出现次数count(w, d)和文档d中总词数size(d)的比值
	 * @param @param segWordsResult
	 * @param @return
	 * @return HashMap<String,Double>
	 * @throws
	 */
	private static HashMap<String, Double> cal_tf(
			Map<String, Integer> segWordsResult) {

		if (segWordsResult == null || segWordsResult.size() == 0) {
			return tf;
		}
		Double size = Double.valueOf(segWordsResult.size());
		Set<String> keys = segWordsResult.keySet();
		for (String key : keys) {
			Integer value = segWordsResult.get(key);
			tf.put(key, Double.valueOf(value) / size);
		}
		return tf;
	}

	/**
	 * 
	 * @Title: containWordOfAllDocNumber
	 * @Description: 统计包含单词的文档数 key:单词 value:包含该词的文档数
	 * @param @param allSegsMap
	 * @param @return
	 * @return Map<String,Integer>
	 * @throws
	 */
	private static Map<String, Integer> containWordOfAllDocNumber(
			Map<Long, Map<String, Integer>> allSegsMap) {
		if (allSegsMap == null || allSegsMap.size() == 0) {
			return containWordOfAllDocNumberMap;
		}

		Set<Long> fileList = allSegsMap.keySet();
		for (Long fileID : fileList) {
			Map<String, Integer> fileSegs = allSegsMap.get(fileID);
			// 获取该文件分词为空或为0,进行下一个文件
			if (fileSegs == null || fileSegs.size() == 0) {
				continue;
			}
			// 统计每个分词的idf
			Set<String> segs = fileSegs.keySet();
			for (String seg : segs) {
				if (containWordOfAllDocNumberMap.containsKey(seg)) {
					containWordOfAllDocNumberMap.put(seg,
							containWordOfAllDocNumberMap.get(seg) + 1);
				} else {
					containWordOfAllDocNumberMap.put(seg, 1);
				}
			}

		}
		return containWordOfAllDocNumberMap;
	}

	/**
	 * 
	 * @Title: idf
	 * @Description: idf = log(n / docs(w, D))
	 * @param @param containWordOfAllDocNumberMap
	 * @param @return Map<String, Double>
	 * @return Map<String,Double>
	 * @throws
	 */
	public static Map<String, Double> idf(
			Map<Long, Map<String, Integer>> allSegsMap) {
		if (allSegsMap == null || allSegsMap.size() == 0) {
			return idfMap;
		}
		containWordOfAllDocNumberMap = containWordOfAllDocNumber(allSegsMap);
		// Set<String> words=containWordOfAllDocNumberMap.keySet();
		// Double wordSize=Double.valueOf(containWordOfAllDocNumberMap.size());
		Double wordSize = Double.valueOf(allSegsMap.size());
		Set<String> words = word_segs.keySet();
		for (String word : words) {
			Double number = 0d;
			if (containWordOfAllDocNumberMap.containsKey(word)) {
				number = Double.valueOf(containWordOfAllDocNumberMap.get(word));
			}
			idfMap.put(word, Math.log(wordSize / (number + 1.0d)));
		}
		return idfMap;
	}

	/**
	 * 
	 * @Title: tfIdf
	 * @Description: tf-idf
	 * @param @param tf,idf
	 * @return Map<String, Map<String, Double>>
	 * @throws
	 */
	public static Map<String, Double> tfIdf(Map<String, Double> TfMap,
			Map<String, Double> idf) {

		Set<String> words = tf.keySet();
		for (String word : words) {
			Double tfValue = Double.valueOf(tf.get(word));
			Double idfValue = idf.get(word);
			tfIdfMap.put(word, tfValue * idfValue);
		}
		return tfIdfMap;
	}

	// public static void main(String[] args) throws FileNotFoundException,
	// IOException, SQLException {
	public static Map<String, Double> getTF_IDF(Long infoId)
			throws FileNotFoundException, IOException, SQLException {
		// TODO Auto-generated method stub
		stopWordSet = getStopWordSet();
		// 根据参数ID读入文章
		String content = readNews(infoId);
		// String
		// content="每逢冷空气过境，因受凉而引发感冒的人数就会大增。事实上，进入冬季后，人的精神情绪也容易“ 感冒”，主要表现为心情不佳、忧伤、悲观、焦虑、食欲降低、睡眠质量下降等。那么，怎样预防或减轻“情绪感冒”呢？ ";
		// 分词、去停用词
		word_segs = segString(content);
		// 计算词条TF值
		HashMap<String, Double> tf = cal_tf(word_segs);
		/*
		 * Set<String> tf_keyset=tf.keySet();
		 * System.out.println("TF值如下：————————"); for(String str:tf_keyset){
		 * System.out.println(str+"----"+tf.get(str)); }
		 */
		// 所有文件
		HashMap<Long, String> news_map = readAllNews();
		Set<Long> all_news_set = news_map.keySet();
		for (Long set : all_news_set) {
			Map<String, Integer> segs = segString(news_map.get(set));
			allFilesSegsMap.put(set, segs);
		}
		idfMap = idf(allFilesSegsMap);
		/*
		 * Set<String> idf_keyset=idf_map.keySet();
		 * System.out.println("IDF值如下：————————"); for(String str:idf_keyset){
		 * System.out.println(str+"----"+idf_map.get(str)); }
		 */
		tfIdfMap = tfIdf(tf, idfMap);
		Map<String, Double> sortedMap = new LinkedHashMap<String, Double>();
		List<Map.Entry<String, Double>> entryList = new ArrayList<Map.Entry<String, Double>>(
				tfIdfMap.entrySet());// 转化为list
		Collections.sort(entryList,
				new Comparator<Map.Entry<String, Double>>() {

					@Override
					public int compare(Entry<String, Double> o1,
							Entry<String, Double> o2) {
						// TODO Auto-generated method stub
						return o2.getValue().compareTo(o1.getValue());
					}

				});
		Iterator<Entry<String, Double>> iter = entryList.iterator();
		Entry<String, Double> tmpEntry = null;
		while (iter.hasNext()) {
			tmpEntry = iter.next();
			sortedMap.put(tmpEntry.getKey(), tmpEntry.getValue());
		}
		// 取第一个值
		int i = sortedMap.size() - 1;
		Map<Integer, String> mapKey = new HashMap<Integer, String>();
		Map<Integer, Double> mapValue = new HashMap<Integer, Double>();
		for (Entry<String, Double> entry : sortedMap.entrySet()) {
			// 将原来MAP的VALUE放入新的MAP的VALUE里面
			mapKey.put(i, entry.getKey());
			// 将原来MAP的KEY放入新的MAP的VALUE 里面
			mapValue.put(i, entry.getValue());
			i--;
		}
		Map<String, Double> result_map = new HashMap<String, Double>();
		result_map.put(mapKey.get(0), mapValue.get(0));
		return result_map;

		/*
		 * Set<String> tfidf_keyset=sortedMap.keySet();
		 * System.out.println("TF-IDF值如下：————————"); for(String
		 * str:tfidf_keyset){ System.out.println(str+"----"+sortedMap.get(str));
		 * }
		 */
	}

}
