package com.ruoyi.nlp.protocol.imp;

import java.io.*;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import com.ruoyi.nlp.buildwords.Config;
import com.ruoyi.nlp.protocol.INlpService;
import com.ruoyi.nlp.utils.RuleUtil;
import com.ruoyi.nlp.utils.SQLManager;
import com.ruoyi.nlp.utils.TxtReadUtil;
import com.ruoyi.nlp.buildwords.BuildNewWords;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.corpus.dependency.CoNll.CoNLLSentence;
import com.hankcs.hanlp.corpus.dependency.CoNll.CoNLLWord;
import com.hankcs.hanlp.dictionary.CoreSynonymDictionary;
import com.hankcs.hanlp.seg.Segment;
import com.hankcs.hanlp.seg.common.Term;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Lazy;
import org.springframework.stereotype.Service;

/**
 * 使用此service时需要在注解上加入@lazy懒加载
 */
@Service
public class NlpServiceImp implements INlpService {

	// 分词器
	Segment segment = null;
	SQLManager aSQLManager = null;
	List<HashMap<String, String>> posCategoryList;
	List<HashMap<String, String>> dependencyList;


	// 词典分词变量
	// 普通词表+叙词表
	private static final HashSet<String> dic_all = new HashSet<>();
	private static int max_length_all = 0;

	// 普通词表
	private static final HashSet<String> dic_general = new HashSet<>();
	private static int max_length_general = 0;

	// 叙词表
	private static final HashSet<String> dic_thesauri = new HashSet<>();
	private static int max_length_thesauri = 0;

	// 关键词txt文件
	private static final List<String> dic_keywords = new ArrayList<>();
	private static int max_length_keywords = 0;

	//关键词-复数形式
	private static final HashMap<String,String> plurals_keywords = new HashMap<String,String>();

	//关键词-禁用词
	private static final HashSet<String> stop_keywords = new HashSet<>();

	public NlpServiceImp() {

		// 分词器
		if (segment == null) {
			segment = HanLP.newSegment();
		}
		// 读取数据库数据
		if (aSQLManager == null) {
			aSQLManager = new SQLManager();
		}

		//初始化禁用词
		if (stop_keywords.size() == 0) {
			List<String> stopWordList = aSQLManager.getNlpStopWord();
			for (String wordName : stopWordList) {
				stop_keywords.add(wordName);
			}
		}
		stop_keywords.add("is");
		stop_keywords.add("not");
		stop_keywords.add("方法");

		// 加载普通词表+叙词表
		if (dic_all.size() == 0) {
			lordBuddhaBless();//佛主保佑
			try {
				System.out.println("开始初始化[普通词表+叙词表]词典");
				int max = 1;
				int count = 0;
				// 普通词表
				List<String> wordList = aSQLManager.getGeneralWord();
				for (String wordName : wordList) {
					if (wordName.split("≡").length > 0) {
						dic_all.add(wordName.split("≡")[0]);
						count++;
						if (wordName.split("≡")[0].length() > max) {
							max = wordName.split("≡")[0].length();
						}
					}
					if (wordName.split("≡").length > 1) {
						dic_all.add(wordName.split("≡")[1]);
						count++;
						if (wordName.split("≡")[1].length() > max) {
							max = wordName.split("≡")[1].length();
						}
					}
					//
				}
				// 叙词表
				wordList = aSQLManager.getThesauriWord();
				for (String wordName : wordList) {
					if (wordName.split("≡").length > 0) {
						dic_all.add(wordName.split("≡")[0]);
						count++;
						if (wordName.split("≡")[0].length() > max) {
							max = wordName.split("≡")[0].length();
						}
					}
					if (wordName.split("≡").length > 1) {
						dic_all.add(wordName.split("≡")[1]);
						count++;
						if (wordName.split("≡")[1].length() > max) {
							max = wordName.split("≡")[1].length();
						}
					}
					//
				}
				//关键词复数
				try {
					System.out.println("开始初始化[关键词-复数]词典");
					String encoding = "utf-8";
//					InputStreamReader read = new InputStreamReader(
//							new FileInputStream(new File(TxtReadUtil.class
//									.getResource("/").getPath() + "nlp/keywords_plurals.txt")),
//							encoding);// 考虑到编码格式
//					BufferedReader br = new BufferedReader(read);// 构造一个BufferedReader类来读取文件

					BufferedReader br = new BufferedReader(new InputStreamReader(this.getClass().getClassLoader().getResourceAsStream("nlp/keywords_plurals.txt"), encoding));


					String s = null;
					// 使用readLine方法，一次读一行
					while ((s = br.readLine()) != null) {
						String s_word = s.split("###")[0].toLowerCase();
						String s_word_plurals = s.split("###")[1].toLowerCase();
//						System.out.println(s_word + "/" + s_word_plurals);
						if(!plurals_keywords.containsKey(s_word_plurals)) {
							plurals_keywords.put(s_word_plurals, s_word);
						}
						dic_all.add(s_word_plurals);
						count++;
						if (s_word_plurals.length() > max) {
							max = s_word_plurals.length();
						}
						//
					}
					br.close();
					System.out.println("完成初始化[关键词-复数]词典，词数目：" + count);
				} catch (IOException ex) {
					ex.printStackTrace();
					System.err.println("[关键词-复数]词典装载失败:" + ex.getMessage());
				}

				max_length_all = max;
				System.out.println("完成初始化[普通词表+叙词表]词典，词数目：" + count);
				System.out.println("最大分词长度：" + max_length_all);

			} catch (Exception ex) {
				ex.printStackTrace();
				System.err.println("[普通词表+叙词表+关键词-复数]词典装载失败:" + ex.getMessage());
			}
		}

		// 加载普通词表
		if (dic_general.size() == 0) {
			try {
				System.out.println("开始初始化[普通词表]词典");
				int max = 1;
				int count = 0;
				// 普通词表
				List<String> wordList = aSQLManager.getGeneralWord();
				for (String wordName : wordList) {
					if (wordName.split("≡").length > 0) {
						dic_general.add(wordName.split("≡")[0]);
						count++;
						if (wordName.split("≡")[0].length() > max) {
							max = wordName.split("≡")[0].length();
						}
					}
					if (wordName.split("≡").length > 1) {
						dic_general.add(wordName.split("≡")[1]);
						count++;
						if (wordName.split("≡")[1].length() > max) {
							max = wordName.split("≡")[1].length();
						}
					}
					//
				}
				max_length_general = max;
				System.out.println("完成初始化[普通词表]词典，词数目：" + count);
				System.out.println("最大分词长度：" + max_length_general);
			} catch (Exception ex) {
				ex.printStackTrace();
				System.err.println("[普通词表]词典装载失败:" + ex.getMessage());
			}
		}

		// 加载叙词表
		if (dic_thesauri.size() == 0) {
			try {
				System.out.println("开始初始化[叙词表]词典");
				int max = 1;
				int count = 0;
				// 叙词表
				List<String> wordList = aSQLManager.getThesauriWord();
				for (String wordName : wordList) {
					if (wordName.split("≡").length > 0) {
						dic_thesauri.add(wordName.split("≡")[0]);
						count++;
						if (wordName.split("≡")[0].length() > max) {
							max = wordName.split("≡")[0].length();
						}
					}
					if (wordName.split("≡").length > 1) {
						dic_thesauri.add(wordName.split("≡")[1]);
						count++;
						if (wordName.split("≡")[1].length() > max) {
							max = wordName.split("≡")[1].length();
						}
					}
					//
				}
				max_length_thesauri = max;
				System.out.println("完成初始化[叙词表]词典，词数目：" + count);
				System.out.println("最大分词长度：" + max_length_thesauri);
			} catch (Exception ex) {
				ex.printStackTrace();
				System.err.println("[叙词表]词典装载失败:" + ex.getMessage());
			}
		}

		// 加载关键词
		if (dic_keywords.size() == 0) {
			try {
				System.out.println("开始初始化[关键词]词典");
				int max = 1;
				int count = 0;
				String encoding = "utf-8";
//				InputStreamReader read = new InputStreamReader(
//						new FileInputStream(new File(TxtReadUtil.class
//								.getResource("/").getPath() + "nlp/keywords.txt")),
//						encoding);// 考虑到编码格式
//				BufferedReader br = new BufferedReader(read);// 构造一个BufferedReader类来读取文件

				BufferedReader br = new BufferedReader(new InputStreamReader(this.getClass().getClassLoader().getResourceAsStream("nlp/keywords.txt"), encoding));

				String s = null;
				// 使用readLine方法，一次读一行
				while ((s = br.readLine()) != null) {
					dic_keywords.add(s);
					count++;
					if (s.length() > max) {
						max = s.length();
					}
				}
				br.close();
				max_length_keywords = max;
				System.out.println("完成初始化[关键词]词典，词数目：" + count);
				System.out.println("最大分词长度：" + max_length_keywords);
			} catch (IOException ex) {
				ex.printStackTrace();
				System.err.println("[关键词]词典装载失败:" + ex.getMessage());
			}

		}

	}

	/**
	 * 通过词性获得颜色
	 *
	 * @param natureStr
	 * @return
	 */
	private String getNatureColor(String natureStr) {
		String r = "";
		if (posCategoryList == null || posCategoryList.size() == 0) {
			getAllPosCategory();
		}
		for (int i = 0; i < posCategoryList.size(); i++) {
			HashMap<String, String> mp = posCategoryList.get(i);
			String color = mp.get("color");
			String code = mp.get("code");
			if (natureStr.equals(code)) {
				r = color;
			}
		}
		return r;
	}

	private String getNatureName(String natureStr) {
		String r = "";
		if (posCategoryList == null || posCategoryList.size() == 0) {
			getAllPosCategory();
		}
		for (int i = 0; i < posCategoryList.size(); i++) {
			HashMap<String, String> mp = posCategoryList.get(i);
			String name = mp.get("name");
			String code = mp.get("code");
			if (natureStr.equals(code)) {
				r = name;
			}
		}
		return r;
	}

	private String getDepRelCode(String depRelStr) {
		String r = "";
		if (dependencyList == null || dependencyList.size() == 0) {
			getAllDependency();
		}
		for (int i = 0; i < dependencyList.size(); i++) {
			HashMap<String, String> mp = dependencyList.get(i);
			String name = mp.get("name");
			String code = mp.get("code");
			if (name.equals(depRelStr)) {
				r = code;
			}
		}
		return r;
	}

	@Override
	public List<HashMap<String, String>> getAllDependency() {
		// TODO Auto-generated method stub
		if (dependencyList == null || dependencyList.size() == 0) {
//			dependencyList = TxtReadUtil.txt2String(new File(TxtReadUtil.class
//					.getResource("/").getPath() + "nlp/dependency.txt"));

			String path = "nlp/dependency.txt";
			try{
				String encoding = "utf-8";
				BufferedReader br = new BufferedReader(new InputStreamReader(this.getClass().getClassLoader().getResourceAsStream(path), encoding));
				dependencyList = TxtReadUtil.txt2String(br);
			}catch (Exception e){
				System.out.println("获取文件"+path+"内容失败");
			}
		}
		return dependencyList;
	}

	@Override
	public List<HashMap<String, String>> getAllPosCategory() {
		// TODO Auto-generated method stub
		if (posCategoryList == null || posCategoryList.size() == 0) {
//			posCategoryList = TxtReadUtil.txt2StringForPos(new File(
//					TxtReadUtil.class.getResource("/").getPath()
//							+ "nlp/pos_category.txt"));
			String path = "nlp/pos_category.txt";
			try{
				String encoding = "utf-8";
				BufferedReader br = new BufferedReader(new InputStreamReader(this.getClass().getClassLoader().getResourceAsStream(path), encoding));
				posCategoryList = TxtReadUtil.txt2StringForPos(br);
			}catch (Exception e){
				System.out.println("获取文件"+path+"内容失败");
			}

		}
		return posCategoryList;
	}

	@Override
	public List<HashMap<String, Object>> posTagging(String text) {
		// TODO Auto-generated method stub
		List<HashMap<String, Object>> r = new ArrayList<HashMap<String, Object>>();
		HashMap<String, Object> mp = null;
		segment.enableCustomDictionary(false).enablePartOfSpeechTagging(true);
		List<Term> list = segment.seg(text);
		for (int i = 0; i < list.size(); i++) {
			Term aTerm = list.get(i);
			mp = new HashMap<String, Object>();
			mp.put("word", aTerm.toString().split("/")[0]);
			mp.put("color", getNatureColor(aTerm.toString().split("/")[1]));
			mp.put("nature", aTerm.toString().split("/")[1]);
			mp.put("frequency", aTerm.getFrequency());
			r.add(mp);
		}
		return r;
	}

	@Override
	public List<HashMap<String, Object>> posTaggingByWords(String text) {
		// TODO Auto-generated method stub
		List<HashMap<String, Object>> r = new ArrayList<HashMap<String, Object>>();
		HashMap<String, Object> mp = null;
		segment.enableCustomDictionary(true).enablePartOfSpeechTagging(true);
		List<Term> list = segment.seg(text);
		for (int i = 0; i < list.size(); i++) {
			Term aTerm = list.get(i);
			System.out.println("aTerm=" + aTerm);
			mp = new HashMap<String, Object>();
			mp.put("word", aTerm.toString().split("/")[0]);
			mp.put("color", getNatureColor(aTerm.toString().split("/")[1]));
			mp.put("nature", aTerm.toString().split("/")[1]);
			mp.put("frequency", aTerm.getFrequency());
			r.add(mp);
		}
		return r;
	}

	@Override
	public List<HashMap<String, Object>> recognizeEntity(String text) {
		// TODO Auto-generated method stub
		List<HashMap<String, Object>> r = new ArrayList<HashMap<String, Object>>();
		HashMap<String, Object> mp = null;
		// 是否启用所有的命名实体识别
		segment.enableCustomDictionary(false).enableAllNamedEntityRecognize(
				true);
		List<Term> list = segment.seg(text);
		for (int i = 0; i < list.size(); i++) {
			Term aTerm = list.get(i);
			mp = new HashMap<String, Object>();
			mp.put("word", aTerm.toString().split("/")[0]);
			mp.put("color", getNatureColor(aTerm.toString().split("/")[1]));
			mp.put("nature", aTerm.toString().split("/")[1]);
			mp.put("frequency", aTerm.getFrequency());
			r.add(mp);
		}
		return r;
	}

	@Override
	public List<HashMap<String, Object>> dependencyParser(String text) {
		// TODO Auto-generated method stub
		List<HashMap<String, Object>> r = new ArrayList<HashMap<String, Object>>();
		HashMap<String, Object> mp = null;
		CoNLLSentence sentence = HanLP.parseDependency(text);
		System.out.println(sentence);
		for (int i = 0; i < sentence.getWordArray().length; i++) {
			CoNLLWord aCoNLLWord = sentence.getWordArray()[i];
			mp = new HashMap<String, Object>();
			mp.put("dep_rel", getDepRelCode(aCoNLLWord.DEPREL));
			mp.put("father_id", aCoNLLWord.HEAD.ID);
			mp.put("id", aCoNLLWord.ID);
			mp.put("postag", aCoNLLWord.CPOSTAG);
			mp.put("word", aCoNLLWord.LEMMA);
			r.add(mp);
		}
		/*************** 输出信息 start ***********************/
		// System.out.println(sentence);
		// // 可以方便地遍历它
		// for (CoNLLWord word : sentence) {
		// System.out.printf("%s --(%s)--> %s\n", word.LEMMA, word.DEPREL,
		// word.HEAD.LEMMA);
		// }
		// // 也可以直接拿到数组，任意顺序或逆序遍历
		// CoNLLWord[] wordArray = sentence.getWordArray();
		// for (int i = wordArray.length - 1; i >= 0; i--) {
		// CoNLLWord word = wordArray[i];
		// System.out.printf("%s --(%s)--> %s\n", word.LEMMA, word.DEPREL,
		// word.HEAD.LEMMA);
		// }
		// // 还可以直接遍历子树，从某棵子树的某个节点一路遍历到虚根
		// CoNLLWord head = wordArray[12];
		// while ((head = head.HEAD) != null) {
		// if (head == CoNLLWord.ROOT)
		// System.out.println(head.LEMMA);
		// else
		// System.out.printf("%s --(%s)--> ", head.LEMMA, head.DEPREL);
		// }
		/*************** 输出信息 end ***********************/

		return r;
	}

	@Override
	public List<String> autoSummary(String text, int count) {
		// TODO Auto-generated method stub
		return HanLP.extractSummary(text, count);
	}

	@Override
	public List<String> autoKeyword(String text, int count) {
		// TODO Auto-generated method stub
		return HanLP.extractKeyword(text, count);
	}

	@Override
	public List<HashMap<String, Object>> wordDistance(String[] words,
			String word) {
		// TODO Auto-generated method stub
		List<HashMap<String, Object>> r = new ArrayList<HashMap<String, Object>>();
		HashMap<String, Object> mp = null;
		for (String a : words) {
			mp = new HashMap<String, Object>();
			mp.put("wordA", a); // 词A
			mp.put("wordB", word); // 词B
			mp.put("distance", CoreSynonymDictionary.distance(a, word));// 语义距离
			mp.put("similarity", CoreSynonymDictionary.similarity(a, word));// 语义相似度
			r.add(mp);
		}
		return r;
	}

	@Override
	public List<String> segByKeywords(String text) {
		// TODO Auto-generated method stub
		List<String> result = new ArrayList<>();
		while (text.length() > 0) {
			int len = max_length_keywords;
			if (text.length() < len) {
				len = text.length();
			}
			// 取指定的最大长度的文本去词典里面匹配
			String tryWord = text.substring(0, 0 + len);
			while (!dic_keywords.contains(tryWord)) {
				// 如果长度为一且在词典中未找到匹配，则按长度为一切分
				if (tryWord.length() == 1) {
					break;
				}
				// 如果匹配不到，则长度减一继续匹配
				tryWord = tryWord.substring(0, tryWord.length() - 1);
			}
			if (dic_keywords.contains(tryWord)) {
				if (tryWord.length() > 1) {
					result.add(tryWord);
				}
			}
			// 从待分词文本中去除已经分词的文本
			text = text.substring(tryWord.length());
		}
		return result;

	}

	@Override
	public List<String> segByAllWords(String text) {
		// TODO Auto-generated method stub
		List<String> result = new ArrayList<>();
		while (text.length() > 0) {
			int len = max_length_all;
			if (text.length() < len) {
				len = text.length();
			}
			// 取指定的最大长度的文本去词典里面匹配
			String tryWord = text.substring(0, 0 + len);
			while (!dic_all.contains(tryWord)) {
				// 如果长度为一且在词典中未找到匹配，则按长度为一切分
				if (tryWord.length() == 1) {
					break;
				}
				// 如果匹配不到，则长度减一继续匹配
				tryWord = tryWord.substring(0, tryWord.length() - 1);
			}
			if (dic_all.contains(tryWord)) {
				if (tryWord.length() > 1) {
					result.add(tryWord);
				}
			}
			// 从待分词文本中去除已经分词的文本
			text = text.substring(tryWord.length());
		}
		return result;

	}

	@Override
	public List<String> segByGeneralWords(String text) {
		// TODO Auto-generated method stub
		List<String> result = new ArrayList<>();
		while (text.length() > 0) {
			int len = max_length_general;
			if (text.length() < len) {
				len = text.length();
			}
			// 取指定的最大长度的文本去词典里面匹配
			String tryWord = text.substring(0, 0 + len);
			while (!dic_general.contains(tryWord)) {
				// 如果长度为一且在词典中未找到匹配，则按长度为一切分
				if (tryWord.length() == 1) {
					break;
				}
				// 如果匹配不到，则长度减一继续匹配
				tryWord = tryWord.substring(0, tryWord.length() - 1);
			}
			if (dic_general.contains(tryWord)) {
				if (tryWord.length() > 1) {
					result.add(tryWord);
				}
			}
			// 从待分词文本中去除已经分词的文本
			text = text.substring(tryWord.length());
		}
		return result;

	}

	@Override
	public List<String> segByThesauriWords(String text) {
		// TODO Auto-generated method stub
		List<String> result = new ArrayList<>();
		while (text.length() > 0) {
			int len = max_length_thesauri;
			if (text.length() < len) {
				len = text.length();
			}
			// 取指定的最大长度的文本去词典里面匹配
			String tryWord = text.substring(0, 0 + len);
			while (!dic_thesauri.contains(tryWord)) {
				// 如果长度为一且在词典中未找到匹配，则按长度为一切分
				if (tryWord.length() == 1) {
					break;
				}
				// 如果匹配不到，则长度减一继续匹配
				tryWord = tryWord.substring(0, tryWord.length() - 1);
			}
			if (dic_thesauri.contains(tryWord)) {
				if (tryWord.length() > 1) {
					result.add(tryWord);
				}
			}
			// 从待分词文本中去除已经分词的文本
			text = text.substring(tryWord.length());
		}
		return result;

	}

	@Override
	public List<HashMap<String, Object>> segByAllWordsColor(String text) {
		// TODO Auto-generated method stub
		List<HashMap<String, Object>> r = new ArrayList<HashMap<String, Object>>();
		HashMap<String, Object> mp = null;
		while (text.length() > 0) {
			int len = max_length_all;
			if (text.length() < len) {
				len = text.length();
			}
			// 取指定的最大长度的文本去词典里面匹配
			String tryWord = text.substring(0, 0 + len);
			while (!dic_all.contains(tryWord)) {
				// 如果长度为一且在词典中未找到匹配，则按长度为一切分
				if (tryWord.length() == 1) {
					break;
				}
				// 如果匹配不到，则长度减一继续匹配
				tryWord = tryWord.substring(0, tryWord.length() - 1);
			}

			if (dic_all.contains(tryWord)) {
				// 匹配上
				mp.put("word", tryWord);
				mp.put("color", "#EE9A00");
			} else {
				mp.put("word", tryWord);
				mp.put("color", "#FFDEAD");
			}
			r.add(mp);
			// 从待分词文本中去除已经分词的文本
			text = text.substring(tryWord.length());
		}
		return r;
	}

	@Override
	public List<String> segHeightByAllWords(String title, String keywords,
			String summary, String text, String keywordsSeparator,
			double titleHeight, double keywordsHeight, double summaryHeight,
			double textHeight, double thresholdHeight) {
		return segHeightByAllWords(title, keywords, summary, text,
				keywordsSeparator, titleHeight, keywordsHeight, summaryHeight,
				textHeight, thresholdHeight, 0);
	}

	@Override
	public List<String> segHeightByAllWords(String title, String keywords,
			String summary, String text, String keywordsSeparator,
			double titleHeight, double keywordsHeight, double summaryHeight,
			double textHeight, int topNumber) {
		return segHeightByAllWords(title, keywords, summary, text,
				keywordsSeparator, titleHeight, keywordsHeight, summaryHeight,
				textHeight, 0.0, topNumber);
	}

	private List<String> segHeightByAllWords(String title, String keywords,
			String summary, String text, String keywordsSeparator,
			double titleHeight, double keywordsHeight, double summaryHeight,
			double textHeight, double thresholdHeight, int topNumber) {
		// TODO Auto-generated method stub
		List<String> r = new ArrayList<String>();

		Map<String, Double> wordMap = new HashMap<String, Double>();
		// 标题
		List<String> titleList = segByAllWords(title);
		// 关键词
		List<String> keywordsList = new ArrayList<String>();
		String[] keywordsArray = keywords.split(keywordsSeparator);

		// 验证是否有该关键词
		// 需要判断关键词词表是否有
		for (int i = 0; i < keywordsArray.length; i++) {
			if (aSQLManager.verificationGeneralWord(keywordsArray[i])) {
				keywordsList.add(keywordsArray[i]);
			}
			if (aSQLManager.verificationThesauriWord(keywordsArray[i])) {
				keywordsList.add(keywordsArray[i]);
			}
		}

		// 摘要权重
		List<String> summaryList = segByAllWords(summary);
		// 正文权重
		List<String> textList = segByAllWords(text);
		// 权重阈值

		for (String wordName : titleList) {
			if (wordMap.containsKey(wordName)) {
				wordMap.put(wordName, wordMap.get(wordName) + titleHeight);
			} else {
				wordMap.put(wordName, titleHeight);
			}
		}
		for (String wordName : keywordsList) {
			if (!"".equals(wordName)) {
				if (wordMap.containsKey(wordName)) {
					wordMap.put(wordName, wordMap.get(wordName)
							+ keywordsHeight);
				} else {
					wordMap.put(wordName, keywordsHeight);
				}
			}
		}
		for (String wordName : textList) {
			if (wordMap.containsKey(wordName)) {
				wordMap.put(wordName, wordMap.get(wordName) + textHeight);
			} else {
				wordMap.put(wordName, textHeight);
			}
		}
		for (String wordName : summaryList) {
			if (wordMap.containsKey(wordName)) {
				wordMap.put(wordName, wordMap.get(wordName) + summaryHeight);
			} else {
				wordMap.put(wordName, summaryHeight);
			}
		}

		// 排序
		List<Map.Entry<String, Double>> wordList = new ArrayList<Map.Entry<String, Double>>(
				wordMap.entrySet());
		Collections.sort(wordList, new Comparator<Map.Entry<String, Double>>() {
			public int compare(Map.Entry<String, Double> o1,
					Map.Entry<String, Double> o2) {
				if ((o2.getValue() - o1.getValue()) > 0)
					return 1;
				else if ((o2.getValue() - o1.getValue()) == 0)
					return 0;
				else
					return -1;
			}
		});

		System.out.println("wordList=" + wordList);

		if (thresholdHeight > 0) {
			for (int i = 0; i < wordList.size(); i++) {
				String wordName = wordList.get(i).getKey();
				double score = wordList.get(i).getValue();
				if (score >= thresholdHeight) {
					r.add(wordName + "#" + score);
				}
			}
		} else if (topNumber > 0) {
			if (topNumber > wordList.size()) {
				topNumber = wordList.size();
			}
			for (int i = 0; i < topNumber; i++) {
				String wordName = wordList.get(i).getKey();
				double score = wordList.get(i).getValue();
				r.add(wordName + "#" + score);
			}
		}

		return r;
	}

	/**
	 * 根据词典里的词标引词
	 *
	 * @param words
	 *            词集合
	 * @param maxWordLength
	 *            集合里最大词长度
	 * @param text
	 *            待标引词
	 * @return
	 */
	public List<String> segBywords(List<String> words, int maxWordLength,
			String text) {
		// TODO Auto-generated method stub
		List<String> result = new ArrayList<>();
		while (text.length() > 0) {
			int len = maxWordLength;
			if (text.length() < len) {
				len = text.length();
			}
			// 取指定的最大长度的文本去词典里面匹配
			String tryWord = text.substring(0, 0 + len);
			while (!words.contains(tryWord)) {
				// 如果长度为一且在词典中未找到匹配，则按长度为一切分
				if (tryWord.length() == 1) {
					break;
				}
				// 如果匹配不到，则长度减一继续匹配
				tryWord = tryWord.substring(0, tryWord.length() - 1);
			}
			if (words.contains(tryWord)) {
				if (tryWord.length() > 1) {
					result.add(tryWord);
				}
			}
			// 从待分词文本中去除已经分词的文本
			text = text.substring(tryWord.length());
		}
		return result;

	}

	// 加载词典
	private void loadDic(String dicName, List<String> initWords,
			int maxWordLength) {
		// 加载关键词
		if (initWords.size() == 0) {
			try {
				System.out.println("开始初始化" + dicName + "词典");
				int max = 1;
				int count = 0;
				String encoding = "utf-8";
				InputStreamReader read = new InputStreamReader(
						new FileInputStream(new File(TxtReadUtil.class
								.getResource("/").getPath() + "nlp/" + dicName + ".txt")),
						encoding);// 考虑到编码格式
				BufferedReader br = new BufferedReader(read);// 构造一个BufferedReader类来读取文件
				String s = null;
				// 使用readLine方法，一次读一行
				while ((s = br.readLine()) != null) {
					initWords.add(s);
					count++;
					if (s.length() > max) {
						max = s.length();
					}
				}
				br.close();
				maxWordLength = max;
				System.out.println("完成初始化" + dicName + "词典，词数目：" + count);
				System.out.println("最大分词长度：" + maxWordLength);
			} catch (IOException ex) {
				ex.printStackTrace();
				System.err.println(dicName + "词典装载失败:" + ex.getMessage());
			}

		}
	}

	@Override
	public List<Map<String, Object>> bulidWords(String text, int maxLen,
			int minCiPin, double zyd, int ngd) throws IOException {
		Config.maxLen = maxLen;
		Config.minCiPin = minCiPin;
		Config.minLeftRight = zyd;
		Config.minWhole = ngd;
		BuildNewWords bnw = new BuildNewWords();
		bnw.run(text);
		return bnw.getWordsMap();
	}

	@Override
	public List<String> automaticTextClassification(String text, int limit) {
		// TODO Auto-generated method stub
		Config.maxLen = 15;
		Config.minCiPin = 1;
		Config.minLeftRight = 0;
		Config.minWhole = 0;
		BuildNewWords bnw = new BuildNewWords();
		List<Map<String, Object>> list = null;
		List<String> r = new ArrayList<String>();
		try {
			bnw.run(text);
			list = bnw.getWordsMap();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		if (list != null && list.size() > 0) {
			if (list.size() <= limit) {
				limit = list.size();
			}
			for (int i = 0; i < limit; i++) {
				String word = list.get(i).get("word").toString();
				r.add(word);
			}
		}
		return r;
	}

	public Map<String,String> getClassAndCode(String title,String summary,String keywords,String text){
		List<String> words = new ArrayList<String>();
		if(title !=null && title.length() != 0) {
			title = title.replaceAll("\\("," ").replaceAll("\\)"," ").replaceAll("\\-"," ").replaceAll("\\/"," ").toLowerCase();
			words.addAll(segByKeywords(title));
		}
		if(keywords !=null && keywords.length() != 0) {
			keywords = keywords.replaceAll("\\("," ").replaceAll("\\)"," ").replaceAll("\\-"," ").replaceAll("\\/"," ").toLowerCase();
			words.addAll(segByKeywords(keywords));
		}
		if(summary !=null && summary.length() != 0) {
			summary = summary.replaceAll("\\("," ").replaceAll("\\)"," ").replaceAll("\\-"," ").replaceAll("\\/"," ").toLowerCase();
			words.addAll(segByKeywords(summary));
		}
		if(text !=null && text.length() != 0) {
			text = text.replaceAll("\\("," ").replaceAll("\\)"," ").replaceAll("\\-"," ").replaceAll("\\/"," ").toLowerCase();
			words.addAll(segByKeywords(text));
		}
		Set<String> wordSet = new HashSet<String>();
		for(String word:words){
			wordSet.add(word);
		}
		return RuleUtil.getClassNameAndCode(wordSet);
	}

	@Override
	public String getWordByPlurals(String wordPlurals) {
		// TODO Auto-generated method stub
		return plurals_keywords.get(wordPlurals);
	}

	@Override
	public boolean getWordByStop(String wordStop) {
		// TODO Auto-generated method stub
		return stop_keywords.contains(wordStop);
	}
	public void lordBuddhaBless(){
		 System.out.println(" ......................我佛慈悲......................");
	       System.out.println("                       _oo0oo_                      ");
	       System.out.println("                      o8888888o                     ");
	       System.out.println("                      88\" . \"88                     ");
	       System.out.println("                      (| -_- |)                     ");
	       System.out.println("                      0\\  =  /0                     ");
	       System.out.println("                    ___/‘---’\\___                   ");
	       System.out.println("                  .' \\|       |/ '.                 ");
	       System.out.println("                 / \\\\|||  :  |||// \\                ");
	       System.out.println("                / _||||| -卍-|||||_ \\               ");
	       System.out.println("               |   | \\\\\\  -  /// |   |              ");
	       System.out.println("               | \\_|  ''\\---/''  |_/ |              ");
	       System.out.println("               \\  .-\\__  '-'  ___/-. /              ");
	       System.out.println("             ___'. .'  /--.--\\  '. .'___            ");
	       System.out.println("          .\"\" ‘<  ‘.___\\_<|>_/___.’ >’ \"\".          ");
	       System.out.println("         | | :  ‘- \\‘.;‘\\ _ /’;.’/ - ’ : | |        ");
	       System.out.println("         \\  \\ ‘_.   \\_ __\\ /__ _/   .-’ /  /        ");
	       System.out.println("     =====‘-.____‘.___ \\_____/___.-’___.-’=====     ");
	       System.out.println("                       ‘=---=’                      ");
	       System.out.println("                                                    ");
	       System.out.println("....................佛祖保佑 ,永无BUG...................");
	}
}
