package com.util.textProcess;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;

import com.mysql.jdbc.Connection;
import com.mysql.jdbc.PreparedStatement;
import com.util.textProcess.NLPIR.CLibrary;

/**
 * 
 * <p>
 * Title:TfIdfAlgorithm
 * </p>
 * <p>
 * Description: tf-idf算法实现
 * </p>
 * 
 * @createDate：2013-8-25
 * @author xq
 * @version 1.0
 */
public class TfIdfAlgorithm {
	static ConnectionPool pool = null;
	static Connection conn;
	static PreparedStatement stmt;

	static {
		pool = ConnectionPool.getInstance();
		conn = (Connection) pool.getConnection();
	}

	// 用来存放停用词的集合
	static Set<String> stopWordSet = new HashSet<String>();

	// 初始化NLIPIR分词系统
	public static CLibrary nLPIR = NLPIR.getInstance();
	/**
	 * 所有文件tf结果.key:文件名,value:该文件tf
	 */
	private static Map<Long, Map<String, Double>> allTfMap = new HashMap<Long, Map<String, Double>>();

	/**
	 * 所有文件分词结果.key:文件名,value:该文件分词统计
	 */
	private static Map<Long, Map<String, Integer>> allSegsMap = new HashMap<Long, Map<String, Integer>>();

	/**
	 * 所有文件分词的idf结果.key:文件名,value:词w在整个文档集合中的逆向文档频率idf (Inverse Document
	 * Frequency)，即文档总数n与词w所出现文件数docs(w, D)比值的对数
	 */
	private static Map<String, Double> idfMap = new HashMap<String, Double>();

	/**
	 * 统计包含单词的文档数 key:单词 value:包含该词的文档数
	 */
	private static Map<String, Integer> containWordOfAllDocNumberMap = new HashMap<String, Integer>();

	/**
	 * 统计单词的TF-IDF key:文件名 value:该文件tf-idf
	 */
	private static Map<Long, Map<String, Double>> tfIdfMap = new HashMap<Long, Map<String, Double>>();
	static HashMap<Long, LinkedHashMap<String, Double>> docsVector = new HashMap<Long, LinkedHashMap<String, Double>>();

	private static HashMap<Long, String> allFileMap = new HashMap<Long, String>();

	private static HashMap<Long, String> allKeyWordsMap = new HashMap<Long, String>();

	/**
	 * @param args
	 * @throws SQLException
	 */
	// read all news
	public static HashMap<Long, String> readAllNews()
			throws FileNotFoundException, IOException, SQLException {
		String sql = "SELECT infoId,contentBody,nlpirKeyWords FROM knowledgeInfo";
		stmt = (PreparedStatement) conn.prepareStatement(sql);
		ResultSet rs = stmt.executeQuery();
		String content = "";
		Long infoId = 0L;
		String nlpirKeyWords = "";
		HashMap<Long, String> map = new HashMap<Long, String>();
		while (rs.next()) {
			infoId = rs.getLong("infoId");
			content = rs.getString("contentBody");
			nlpirKeyWords = rs.getString("nlpirKeyWords");
			map.put(infoId, content);
			allKeyWordsMap.put(infoId, nlpirKeyWords);
		}
		return map;
	}

	// read news
	public static String readNews(long newsid) throws FileNotFoundException,
			IOException, SQLException {
		String sql = "SELECT infoId,contentBody FROM knowledgeInfo WHERE infoId=?";
		stmt = (PreparedStatement) conn.prepareStatement(sql);
		stmt.setLong(1, newsid);
		ResultSet rs = stmt.executeQuery();
		String content = "";
		while (rs.next()) {
			content = rs.getString("contentBody");
		}
		return content;
	}

	/**
	 * 
	 * @Title: segString
	 * @Description: 用ik进行字符串分词,统计各个词出现的次数
	 * @param @param content
	 * @param @return Map<String, Integer>
	 * @return Map<String,Integer>
	 * @throws
	 */
	private static Map<String, Integer> segString(String content) {
		Map<String, Integer> words = new HashMap<String, Integer>();
		String cut_words[] = nLPIR.NLPIR_ParagraphProcess(content, 0)
				.split(" ");
		// 过滤停用词
		for (int i = 0; i < cut_words.length; i++) {
			if (stopWordSet.contains(cut_words[i])) {
				cut_words[i] = null;
			}
		}
		// 把过滤后的字符串数组存入到一个字符串中
		StringBuffer finalStr = new StringBuffer();
		for (int i = 0; i < cut_words.length; i++) {
			if (cut_words[i] != null && !cut_words[i].equals(null)
					&& !cut_words[i].equals("null")) {
				finalStr = finalStr.append(cut_words[i]).append(" ");
			}
		}
		// 得到去除停用词后的字符串数组cut_words2
		String cut_words2[] = new String(finalStr).trim().split(" ");
		for (String word : cut_words2) {
			if (!word.equals("")) {
				if (words.containsKey(word)) {
					words.put(word, words.get(word) + 1);
				} else {
					words.put(word, 1);
				}
			}
		}
		return words;
	}

	/**
	 * 
	 * @Title: segStr
	 * @Description: 返回LinkedHashMap的分词
	 * @param @param content
	 * @param @return
	 * @return Map<String,Integer>
	 * @throws
	 */
	public static Map<String, Integer> segStr(String content) {
		Map<String, Integer> words = new LinkedHashMap<String, Integer>();
		String cut_words[] = nLPIR.NLPIR_ParagraphProcess(content, 0)
				.split(" ");
		// 过滤停用词
		for (int i = 0; i < cut_words.length; i++) {
			if (stopWordSet.contains(cut_words[i])) {
				cut_words[i] = null;
			}
		}
		// 把过滤后的字符串数组存入到一个字符串中
		StringBuffer finalStr = new StringBuffer();
		for (int i = 0; i < cut_words.length; i++) {
			if (cut_words[i] != null && !cut_words[i].equals(null)
					&& !cut_words[i].equals("null")) {
				finalStr = finalStr.append(cut_words[i]).append(" ");
			}
		}
		// 得到去除停用词后的字符串数组cut_words2
		String cut_words2[] = new String(finalStr).trim().split(" ");
		for (String word : cut_words2) {
			if (!word.equals("")) {
				if (words.containsKey(word)) {
					words.put(word, words.get(word) + 1);
				} else {
					words.put(word, 1);
				}
			}
		}
		return words;
	}

	public static Map<String, Integer> getMostFrequentWords(int num,
			Map<String, Integer> words) {

		Map<String, Integer> keywords = new LinkedHashMap<String, Integer>();
		int count = 0;
		// 词频统计
		List<Map.Entry<String, Integer>> info = new ArrayList<Map.Entry<String, Integer>>(
				words.entrySet());
		Collections.sort(info, new Comparator<Map.Entry<String, Integer>>() {
			public int compare(Map.Entry<String, Integer> obj1,
					Map.Entry<String, Integer> obj2) {
				return obj2.getValue() - obj1.getValue();
			}
		});

		// 高频词输出
		for (int j = 0; j < info.size(); j++) {
			// 词-->频
			if (info.get(j).getKey().length() > 1) {
				if (num > count) {
					keywords.put(info.get(j).getKey(), info.get(j).getValue());
					count++;
				} else {
					break;
				}
			}
		}
		return keywords;
	}

	/**
	 * 
	 * @Title: tf
	 * @Description: 分词结果转化为tf,公式为:tf(w,d) = count(w, d) / size(d)
	 *               即词w在文档d中出现次数count(w, d)和文档d中总词数size(d)的比值
	 * @param @param segWordsResult
	 * @param @return
	 * @return HashMap<String,Double>
	 * @throws
	 */
	private static HashMap<String, Double> tf(
			Map<String, Integer> segWordsResult) {

		HashMap<String, Double> tf = new HashMap<String, Double>();// 正规化
		if (segWordsResult == null || segWordsResult.size() == 0) {
			return tf;
		}
		Double size = Double.valueOf(segWordsResult.size());
		Set<String> keys = segWordsResult.keySet();
		for (String key : keys) {
			Integer value = segWordsResult.get(key);
			tf.put(key, Double.valueOf(value) / size);
		}
		return tf;
	}

	/**
	 * @throws SQLException
	 * 
	 * @Title: allTf
	 * @Description: 得到所有文件的tf
	 * @param @param dir
	 * @param @return Map<Long, Map<String, Double>>
	 * @return Map<String,Map<String,Double>>
	 * @throws
	 */
	public static Map<Long, Map<String, Double>> allTf() throws SQLException {
		try {
			allFileMap = readAllNews();
			Set<Long> file_keySet = allFileMap.keySet();
			for (Long fileId : file_keySet) {
				String content = allFileMap.get(fileId);
				Map<String, Integer> segs = segString(content);
				allSegsMap.put(fileId, segs);
				allTfMap.put(fileId, tf(segs));
			}
		} catch (FileNotFoundException ffe) {
			ffe.printStackTrace();
		} catch (IOException io) {
			io.printStackTrace();
		}
		return allTfMap;
	}

	/**
	 * @throws SQLException
	 * 
	 * @Title: wordSegCount
	 * @Description: 返回分词结果,以LinkedHashMap保存
	 * @param @param dir
	 * @param @return
	 * @return Map<String,Map<String,Integer>>
	 * @throws
	 */
	public static Map<Long, Map<String, Integer>> wordSegCount()
			throws SQLException {
		try {
			allFileMap = readAllNews();
			Set<Long> file_keySet = allFileMap.keySet();
			for (Long fileId : file_keySet) {
				String content = allFileMap.get(fileId);
				Map<String, Integer> segs = segStr(content);
				allSegsMap.put(fileId, segs);
			}
		} catch (FileNotFoundException ffe) {
			ffe.printStackTrace();
		} catch (IOException io) {
			io.printStackTrace();
		}
		return allSegsMap;
	}

	/**
	 * 
	 * @Title: containWordOfAllDocNumber
	 * @Description: 统计包含单词的文档数 key:单词 value:包含该词的文档数
	 * @param @param allSegsMap
	 * @param @return
	 * @return Map<String,Integer>
	 * @throws
	 */
	private static Map<String, Integer> containWordOfAllDocNumber(
			Map<Long, Map<String, Integer>> allSegsMap) {
		if (allSegsMap == null || allSegsMap.size() == 0) {
			return containWordOfAllDocNumberMap;
		}

		Set<Long> fileId = allSegsMap.keySet();
		for (Long id : fileId) {
			Map<String, Integer> fileSegs = allSegsMap.get(id);
			// 获取该文件分词为空或为0,进行下一个文件
			if (fileSegs == null || fileSegs.size() == 0) {
				continue;
			}
			// 统计每个分词的idf
			Set<String> segs = fileSegs.keySet();
			for (String seg : segs) {
				if (containWordOfAllDocNumberMap.containsKey(seg)) {
					containWordOfAllDocNumberMap.put(seg,
							containWordOfAllDocNumberMap.get(seg) + 1);
				} else {
					containWordOfAllDocNumberMap.put(seg, 1);
				}
			}

		}
		return containWordOfAllDocNumberMap;
	}

	/**
	 * 
	 * @Title: idf
	 * @Description: idf = log(n / docs(w, D))
	 * @param @param containWordOfAllDocNumberMap
	 * @param @return Map<String, Double>
	 * @return Map<String,Double>
	 * @throws
	 */
	public static Map<String, Double> idf(
			Map<Long, Map<String, Integer>> allSegsMap) {
		if (allSegsMap == null || allSegsMap.size() == 0) {
			return idfMap;
		}
		containWordOfAllDocNumberMap = containWordOfAllDocNumber(allSegsMap);
		Set<String> words = containWordOfAllDocNumberMap.keySet();
		Double wordSize = Double.valueOf(containWordOfAllDocNumberMap.size());
		for (String word : words) {
			Double number = Double.valueOf(containWordOfAllDocNumberMap
					.get(word));
			idfMap.put(word, Math.log(wordSize / (number + 1.0d)));
		}
		return idfMap;
	}

	/**
	 * 
	 * @Title: tfIdf
	 * @Description: tf-idf
	 * @param @param tf,idf
	 * @return Map<String, Map<String, Double>>
	 * @throws
	 */
	public static Map<Long, Map<String, Double>> tfIdf(
			Map<Long, Map<String, Double>> allTfMap, Map<String, Double> idf) {

		Set<Long> fileId = allTfMap.keySet();
		for (Long id : fileId) {
			Map<String, Double> tfMap = allTfMap.get(id);
			Map<String, Double> docTfIdf = new HashMap<String, Double>();
			Set<String> words = tfMap.keySet();
			for (String word : words) {
				Double tfValue = Double.valueOf(tfMap.get(word));
				Double idfValue = idf.get(word);
				docTfIdf.put(word, tfValue * idfValue);
			}
			tfIdfMap.put(id, docTfIdf);
		}
		return tfIdfMap;
	}

	private static Set<String> getStopWordSet() throws IOException {
		InputStreamReader is = new InputStreamReader(new FileInputStream(
				NLPIR.stopWordPath), "UTF-8");
		BufferedReader br = new BufferedReader(is);
		Set<String> stopWordSet_pri = new HashSet<String>();
		// 初始化停用词集
		String stopWord = null;
		for (; (stopWord = br.readLine()) != null;) {
			stopWordSet_pri.add(stopWord);
		}
		br.close();
		return stopWordSet_pri;
	}

	static {
		// 初始化NLPIR
		int init_flag;
		try {
			init_flag = NLPIR.init_NLPIR();
			if (init_flag == 0) {
				System.out.println("NLPIR初始化失败！");
			} else {
				System.out.println("NLPIR初始化成功！");
				// 去停用词
				int addStopWordFlag = nLPIR
						.NLPIR_ImportKeyBlackList(NLPIR.stopWordPath);
				if (addStopWordFlag == 0) {
					System.out.println("去停用词失败！");
				} else {
					System.out.println("去停用词成功且停用词数目为：" + addStopWordFlag);
				}
			}

		} catch (UnsupportedEncodingException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

	public static HashMap<Long, LinkedHashMap<String, Double>> getDocVector()
			throws IOException, SQLException {
		stopWordSet = getStopWordSet();
		Map<Long, Map<String, Double>> allTfMap = allTf();
		Map<String, Double> idfMap = idf(allSegsMap);
		tfIdfMap = tfIdf(allTfMap, idfMap);

		Set<Long> fileId = tfIdfMap.keySet();
		for (Long id : fileId) {
			Map<String, Double> tfIdf = tfIdfMap.get(id);
			List<String> keys = new ArrayList<String>(tfIdf.keySet());// 得到key集合
			Set<String> segs = new HashSet<String>(keys);
			// 用来存放特征项的集合
			Set<String> keyWordSet = new HashSet<String>();
			// String content=readNews(id);
			// String keywords_str=nLPIR.NLPIR_GetKeyWords(content, 10, false);
			String keywords_str = allKeyWordsMap.get(id);
			// 得到特征项字符串数组keywords_arr
			String keywords_arr[] = keywords_str.split("\\#");
			for (String keyword : keywords_arr) {
				keyWordSet.add(keyword);
			}
			LinkedHashMap<String, Double> linkmap = new LinkedHashMap<String, Double>();
			for (String word : segs) {
				if (keyWordSet.contains(word)) {
					linkmap.put(word, tfIdf.get(word));
				}
			}
			docsVector.put(id, linkmap);
		}
		return docsVector;
	}
	/*
	 * public static void main(String[] args) throws FileNotFoundException,
	 * IOException, SQLException{ stopWordSet=getStopWordSet(); Map<Long,
	 * Map<String, Double>> allTfMap=allTf(); Map<String, Double>
	 * idfMap=idf(allSegsMap); tfIdfMap=tfIdf(allTfMap, idfMap);
	 * 
	 * Set<Long> fileId=tfIdfMap.keySet(); for(Long id:fileId){ Map<String,
	 * Double> tfIdf=tfIdfMap.get(id); List<String> keys = new
	 * ArrayList<String>(tfIdf.keySet());//得到key集合 Set<String> segs=new
	 * HashSet<String>(keys); LinkedHashMap<String,Double> linkmap=new
	 * LinkedHashMap<String, Double>(); for(String word: segs){
	 * if(tfIdf.get(word)>0.2){
	 * System.out.println("fileid:"+id+"---特征项："+word+"---TF-IDF值为："
	 * +tfIdf.get(word)); } } docsVector.put(id, linkmap); } }
	 */
}
