package com.huiquan.vocab.service;

import com.huiquan.foundation.constant.SubmitStatus;
import com.huiquan.foundation.util.BusinessUtil;
import com.huiquan.framework.base.BaseService;
import com.huiquan.framework.base.ReturnData;
import com.huiquan.framework.constant.BaseContants;
import com.huiquan.framework.utils.CollectionUtil;
import com.huiquan.framework.utils.FileUtils;
import com.huiquan.framework.utils.ReturnUtil;
import com.huiquan.vocab.dao.VocabDyadInitDao;
import com.huiquan.vocab.dao.VocabDyadRemoveDao;
import com.huiquan.vocab.dao.VocabElementDao;
import com.huiquan.vocab.domain.*;
import com.huiquan.vocab.utils.Word2VecAlgo;
import org.ansj.domain.Result;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.DicAnalysis;
import org.apache.commons.lang.StringUtils;
import org.nlpcn.commons.lang.tire.domain.Forest;
import org.nlpcn.commons.lang.tire.library.Library;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.util.*;

@Service
public class VocabDyadInitService extends BaseService {

	@Autowired
	private VocabDyadInitDao vocabDyadInitDao;
	@Autowired
	private VocabDyadRemoveDao vocabDyadRemoveDao;
	@Autowired
	private VocabDyadService vocabDyadService;
	@Autowired
	private VocabElementDao vocabElementDao;

	/**
	 * 初始化二元组，分三个部分：第一部分通过sql标准化症状对应的小词，然后初始化非并列拆分关系的小词对应二元组；第二部分用java代码来拆分并列关系；
	 * 第三部分维护好排序字段，将分词与属性去重生成第二张预备词表；第四部分用remove功能去重，将去重后数据维护到第三张预备词表中；
	 * 第五部分通过sql将生成好的二元组预备词转换成需审核的remove数据；第六部分增加二元组数据；第七部分初始化排序和页面
	 */
	public ReturnData init() {

		if (!SubmitStatus.getSubmitRecords().contains(SubmitStatus.TYPE_DYAD_INIT)) {

			// 二元组初始化设置为运行中
			SubmitStatus.addSubmitRecords(SubmitStatus.TYPE_DYAD_INIT);

			LOGGER.info("Init dyad start!");

			init1_1();
			LOGGER.info("Init dyad step 1 end!");

			init2();
			LOGGER.info("Init dyad step 2 (o+de,s+de,si+de,pre) end!");

			init3();
			LOGGER.info("Init dyad step 3 (order_vocabulary,pre2) end!");

			init4();
			LOGGER.info("Init dyad step 4 (pre3,remove) end!");

			init5();
			LOGGER.info("Init dyad step 5 (init remove object) end!");

			init6();
			LOGGER.info("Init dyad step 6 (init dyad object) end!");

			init7();
			LOGGER.info("Init dyad step 7 (init order page) end!");

			init8();
			LOGGER.info("Init dyad step 8 (submit p+p,p+o,o+o) end!");

			SubmitStatus.removeSubmitRecords(SubmitStatus.TYPE_DYAD_INIT);

			LOGGER.info("Init dyad end!");
		}

		return ReturnUtil.success();
	}

	private void init1_1() {

		// 初始化p+o合并的分词表
		vocabDyadInitDao.initPOMerge();

		// 重新生成表“症状分词及标准词”
		vocabDyadInitDao.initSymptomElemenStd();

		// 维护表“症状分词及标准词”中的po合并的数据
		initSymptomElemenStdPOMerge();

		// 初始化pre表
		vocabDyadInitDao.initPreByElementStd();
	}

	private void initSymptomElemenStdPOMerge() {

		// 获取需要合并po的症状
		List<VocabSymptomPOMerge> spomList = vocabDyadInitDao.retrieveSymptomPOMerge();

		// 循环获取合并后的症状分词标准词
		List<VocabSymptomElementStd> mergeResult = new ArrayList<>();
		StringBuffer sb = new StringBuffer();
		for (VocabSymptomPOMerge merge : spomList) {
			mergeResult.addAll(getStdsBySymptomPOMerge(merge));
			sb.append(merge.getSymptomId());
			sb.append(",");
		}

		// 先在症状分词标准词表中删除这部分po需合并的数据
		if (sb.length() > 0) {
			vocabDyadInitDao.deleteSymptomElementStdBySymptomIds(sb.substring(0, sb.length() - 1).toString());

			// 将合并po后的数据加入到数据库中
			if (!mergeResult.isEmpty()) {
				vocabDyadInitDao.batchInsertSymptomElementStd(mergeResult);
			}
		}
	}

	/**
	 * 根据po合并获取一个症状对应的分词
	 * 
	 * @param merge
	 * @return
	 */
	private List<VocabSymptomElementStd> getStdsBySymptomPOMerge(VocabSymptomPOMerge merge) {
		Long symptomId = merge.getSymptomId();
		String vocabs = merge.getVocabs();
		String props = merge.getProps();
		String stds = merge.getStds();
		String[] poArrays = merge.getPoArray().split("\t");
		String[] poStds = merge.getPoStd().split("\t");

		for (int i = 0; i < poArrays.length; i++) {
			String array = poArrays[i];
			String std = poStds[i];
			String[] vocabsSplit = vocabs.substring(1).split(",");
			String[] propsSplit = props.substring(1).split(",");
			String[] stdsSplit = stds.substring(1).split(",");

			// 如果有部分和po合并的匹配上了则先记录在临时变量中，最终确定了再加入正式数据
			String vocabsTemp = "", propsTemp = "", stdsTemp = "";
			vocabs = ",";
			props = ",";
			stds = ",";
			for (int j = 0; j < vocabsSplit.length; j++) {
				if (array.equals("," + vocabsTemp + vocabsSplit[j] + ",")) {
					// 临时变量和po吻合了，则将临时变量变为po合并后数据然后加入正式数据
					vocabs += (vocabsTemp + vocabsSplit[j]).replace(",", "") + ",";
					stds += std.replace(",", "") + ",";
					if ((propsTemp + propsSplit[j]).indexOf("oa") > -1) {
						props += "oa,";
					} else {
						props += "o,";
					}

					vocabsTemp = "";
					propsTemp = "";
					stdsTemp = "";
				} else if (array.startsWith("," + vocabsTemp + vocabsSplit[j] + ",")) {
					// 临时变量只是po合并后的前面部分，则先维护好临时变量
					vocabsTemp = vocabsTemp + vocabsSplit[j] + ",";
					propsTemp = propsTemp + propsSplit[j] + ",";
					stdsTemp = stdsTemp + stdsSplit[j] + ",";
				} else {
					// 其他情况，判断临时变量是否已存在数据
					if (vocabsTemp.isEmpty()) {
						// 如果临时变量中没数据，则只需要将当前词语加到正式数据中
						vocabs += vocabsSplit[j] + ",";
						props += propsSplit[j] + ",";
						stds += stdsSplit[j] + ",";
					} else {
						// 临时变量中存在数据则需要将临时变量数据加入到正式数据中
						vocabs += vocabsTemp;
						props += propsTemp;
						stds += stdsTemp;

						// 如果当前词语是po合并的开端，则临时变量需要维护成该词；否则临时变量置空，改词加入到正式数据中
						if (array.startsWith("," + vocabsSplit[j] + ",")) {
							vocabsTemp = vocabsSplit[j] + ",";
							propsTemp = propsSplit[j] + ",";
							stdsTemp = stdsSplit[j] + ",";
						} else {
							vocabs += vocabsSplit[j] + ",";
							props += propsSplit[j] + ",";
							stds += stdsSplit[j] + ",";

							vocabsTemp = "";
							propsTemp = "";
							stdsTemp = "";
						}
					}
				}
			}

			// 循环结束后如果临时变量中还有词语则需要将临时变量中数据加入到正式数据中
			if (!vocabsTemp.isEmpty()) {
				vocabs += vocabsTemp;
				props += propsTemp;
				stds += stdsTemp;
			}
		}

		// po合并处理好了之后获取分词
		List<VocabSymptomElementStd> result = new ArrayList<>();
		String[] vocabsSplit = vocabs.substring(1).split(",");
		String[] propsSplit = props.substring(1).split(",");
		String[] stdsSplit = stds.substring(1).split(",");
		for (int i = 0; i < vocabsSplit.length; i++) {
			VocabSymptomElementStd std = new VocabSymptomElementStd();
			std.setSymptomId(symptomId);
			std.setVocabulary(vocabsSplit[i]);
			std.setProperty(propsSplit[i]);
			std.setStd(stdsSplit[i]);

			result.add(std);
		}

		return result;
	}

	// 生成o+s搜索分词表
	public void initOSSearch() {
		LOGGER.info("init o+s search  begin!");
		vocabDyadInitDao.initOSSearch();
		LOGGER.info("init o+s search  end!");
	}

	/**
	 * 初始化把s+de ,o+de,si+de拆分到最细加入到pre表中
	 */
	public void init2() {
		LOGGER.info("Init2_2 start!");
		initDePre("s+de");
		LOGGER.info("init2_2 s+de success!");
		initDePre("o+de");
		LOGGER.info("init2_2 o+de success!");
		initDePre("si+de");
		LOGGER.info("init2_2 si+de success!");

	}

	private void initDePre(String type) {
		// 获取当前类型的需要拆分的数据
		List<VocabDyadPre> valueList = vocabDyadInitDao.retrieveDeList(type);
		// 需要添加进pre表中的记录
		List<VocabDyadPre> addList = new ArrayList<>();
		if (valueList != null && valueList.size() > 0) {
			for (VocabDyadPre value : valueList) {
				addList.addAll(getDeNeedToAddList(type, value));
			}
			// 将准备好的预备词添加到数据库中
			vocabDyadInitDao.batchInsertPre(addList);
		}
	}

	private List<VocabDyadPre> getDeNeedToAddList(String type, VocabDyadPre value) {
		List<VocabDyadPre> addList = new ArrayList<>();
		String[] words = value.getArray().substring(1).split(",");
		String[] properties = value.getPartPattern().substring(1).split(",");
		String[] stds = value.getStdArray().substring(1).split(",");
		String tempWord = "";
		String tempProperty = "";
		String tempStd = "";
		// 循环生成最细的拆分记录
		// 记录是否已经到关键词
		if (StringUtils.equals(type, "s+de") || StringUtils.equals(type, "si+de")) {
			// 寻找关键词 si+de --si s+de --s
			for (int i = 0; i < words.length; i++) {
				if (StringUtils.equals(type, "s+de") && StringUtils.equals(properties[i], "s")) {
					tempWord = words[i];
					tempProperty = "s";
					tempStd = stds[i];

				} else if (StringUtils.equals(type, "si+de") && StringUtils.equals(properties[i], "si")) {
					tempWord = words[i];
					tempProperty = "si";
					tempStd = stds[i];
				}
			}
			boolean flag = true;
			for (int i = 0; i < words.length; i++) {
				// 如果没有到关键词则放在关键词的左边拼接,否则拼右边
				if (StringUtils.equals(tempWord, words[i])) {
					flag = false;
					continue;
				}
				// 用list进行排序stdSynonym字段
				List<String> sortList = new ArrayList<>();
				sortList.add(stds[i]);
				sortList.add(tempStd);
				Collections.sort(sortList);
				addList.add(getAddPreValue(value.getSymptomId(), type,
						flag ? "," + words[i] + "," + tempWord + "," : "," + tempWord + "," + words[i] + ",",
						flag ? "," + properties[i] + "," + tempProperty + ","
								: "," + tempProperty + "," + properties[i] + ",",
						sortList.get(0) + sortList.get(1),
						flag ? "," + stds[i] + "," + tempStd + "," : "," + tempStd + "," + stds[i] + ","));
			}
		} else if (StringUtils.equals(type, "o+de")) {
			for (int i = 0; i < words.length; i++) {
				String property = properties[i];
				String word = words[i];
				for (int j = i; j < words.length; j++) {
					if (StringUtils.equals(property, "o") || StringUtils.equals(property, "oa")) {
						if (StringUtils.equals(properties[j], "de")) {
							List<String> sortList = new ArrayList<>();
							sortList.add(stds[i]);
							sortList.add(stds[j]);
							Collections.sort(sortList);
							addList.add(getAddPreValue(value.getSymptomId(), "o+de", "," + word + "," + words[j] + ",",
									"," + property + "," + properties[j] + ",", sortList.get(0) + sortList.get(1),
									"," + stds[i] + "," + stds[j] + ","));
						}
					} else if (StringUtils.equals(property, "de")) {
						if (StringUtils.equals(properties[j], "o") || StringUtils.equals(properties[j], "oa")) {
							List<String> sortList = new ArrayList<>();
							sortList.add(stds[i]);
							sortList.add(stds[j]);
							Collections.sort(sortList);
							addList.add(getAddPreValue(value.getSymptomId(), "o+de", "," + word + "," + words[j] + ",",
									"," + property + "," + properties[j] + ",", sortList.get(0) + sortList.get(1),
									"," + stds[i] + "," + stds[j] + ","));
						}
					}
				}
			}

		}
		return addList;

	}

	private VocabDyadPre getAddPreValue(long symptomId, String type, String array, String partPattern,
			String stdSynonym, String stdArray) {
		VocabDyadPre addValue = new VocabDyadPre();
		addValue.setSymptomId(symptomId);
		addValue.setType(type);
		addValue.setArray(array);
		addValue.setPartPattern(partPattern);
		addValue.setStdSynonym(stdSynonym);
		addValue.setStdArray(stdArray);
		addValue.setOrderVocabulary("");
		return addValue;
	}

	private void init8() {
		LOGGER.info("Init8 start!");

		vocabDyadService.stdSubmitBase("p+p");
		LOGGER.info("Submit p+p success!");

		vocabDyadService.stdSubmitBase("p+o");
		LOGGER.info("Submit p+o success!");

		vocabDyadService.stdSubmitBase("o+o");
		LOGGER.info("Submit p+o success!");
		// 生成o+s搜索分词表
		initOSSearch();
	}

	public void init7() {
		LOGGER.info("Init7 start!");

		List<VocabDyadStdOrder> stdOrderList = vocabDyadInitDao.retrieveStdOrder();

		if (!stdOrderList.isEmpty()) {
			// 根据类型将排序数据分组
			Map<String, List<VocabDyadStdOrder>> orderMap = new HashMap<>();
			String type = "";
			List<VocabDyadStdOrder> tempList = new ArrayList<>();

			for (VocabDyadStdOrder order : stdOrderList) {
				if (!type.equals(order.getType())) {
					type = order.getType();
					tempList = new ArrayList<>();
					orderMap.put(type, tempList);
				}
				tempList.add(order);
			}

			// 将各个类型的排序字段转变为分页（如果一页不足20条数据，则从列表最后找数量互补的数据；该列表取出时已经过条数排序）
			List<VocabDyadStdOrder> pageList = new ArrayList<>();
			VocabDyadStdOrder order, tailOrder;
			int pageNo;
			for (String key : orderMap.keySet()) {
				pageNo = 1; // 记录页码
				tempList = orderMap.get(key);
				int tail = tempList.size() - 1; // 列表从后往前找页码互补数据的游标
				int pageCount;
				for (int i = 0; i <= tail; i++) {
					order = tempList.get(i);
					pageCount = order.getCnt();
					String orderVocabulary = "'" + order.getOrderVocabulary() + "'";
					// 如果数量少于20，而且尾部还有数据，则可以拼凑
					while (pageCount < 20 && i < tail) {
						tailOrder = tempList.get(tail);
						pageCount += tailOrder.getCnt();
						orderVocabulary += ",'" + tailOrder.getOrderVocabulary() + "'";
						tail -= 1;
					}
					// 创建新的page
					VocabDyadStdOrder page = new VocabDyadStdOrder();
					page.setCnt(pageCount);
					page.setFlag(order.getFlag());
					page.setType(order.getType());
					page.setOrderVocabulary(orderVocabulary);
					page.setPageNo(pageNo);
					pageList.add(page);

					pageNo++;
				}
			}

			// 存入数据库
			vocabDyadInitDao.batchInsertPage(pageList);
			LOGGER.info("Batch insert success!");
		}
	}

	/**
	 * 根据pre3过滤已校验的二元组增加二元组数据
	 */
	private void init6() {
		LOGGER.info("Init6 start!");

		vocabDyadInitDao.insertDyadByPre3();

		vocabDyadInitDao.initDyadSentence();

		LOGGER.info("Insert dyad success!");
	}

	/**
	 * 用remove表去重
	 */
	private void init4() {
		LOGGER.info("Init4 start!");

		// 'o+in','o+in+s','o+oa';（'p+o','o+s'类型下分词有两个o(oa)以上的数据）
		Map<String, Object> param = new HashMap<String, Object>();
		List<VocabDyadPre> preList = vocabDyadInitDao.retrieveRemovePreList(param);
		LOGGER.info("Get pre list success,size is " + preList.size());

		// 维护remove数据中的标准词（remove数据必须是二元组中产生，如果不在二元组中则不起作用）
		vocabDyadInitDao.initRemoveStd(null);
		LOGGER.info("Init remove std success!");

		// 第一次remove获取所有的o+oa的remove列表
		Map<String, Object> param1 = new HashMap<>();
		param1.clear();
		param1.put("removeFlag", BaseContants.FLAG_YES);
		param1.put("types", "'o+oa'");
		List<VocabDyadRemove> firstRemoveList = vocabDyadRemoveDao.retrieveList(param1);

		// 根据remove列表的规则将pre列表进行处理
		removePreList(preList, firstRemoveList);

		// 第二次获取除了o+oa所有的可用的remove数据
		Map<String, Object> param2 = new HashMap<>();
		param2.put("removeFlag", BaseContants.FLAG_YES);
		param2.put("noTypes", "'o+oa'");
		List<VocabDyadRemove> removeList = vocabDyadRemoveDao.retrieveList(param2);
		LOGGER.info("Get remvove list success,size is " + removeList.size());

		// 根据remove列表的规则将pre列表进行处理
		removePreList(preList, removeList);

		// 将pre3保存到数据库(5000条一批次)
		vocabDyadInitDao.deleteRemovePre3();
		int start = 0, end, size = preList.size();
		List<VocabDyadPre> tempList;
		LOGGER.info("Insert into pre3 start!");

		while (start < size) {
			if (start + 5000 < size) {
				end = start + 5000;
			} else {
				end = size;
			}
			tempList = preList.subList(start, end);
			vocabDyadInitDao.batchInsertPre3(tempList);
			start += 5000;
		}

		// 删除不是二元组的数据
		vocabDyadInitDao.deleteWrongPre3();

		// 维护pre3中的同义词字段
		vocabDyadInitDao.setSynonymToPre3(new HashMap<String, Object>());
	}

	/**
	 * 根据remove列表的规则将pre列表进行处理
	 * 
	 * @param preList
	 * @param removeList
	 */
	protected Set<VocabDyadPre> removePreList(List<VocabDyadPre> preList, List<VocabDyadRemove> removeList) {
		// 将预备词根据分词分组
		Map<String, Set<VocabDyadPre>> typePresMap = new HashMap<>();
		Set<VocabDyadPre> vocabPreSet;
		for (VocabDyadPre pre : preList) {
			// remove信息，如果没有remove的数据也不能是null
			if (pre.getRemoveArray() == null) {
				pre.setRemoveArray("");
				pre.setRemoveRa("");
				pre.setRemoveRp("");
			}

			String[] preVocabs = pre.getStdArray().substring(1).split(",");
			for (String preVocab : preVocabs) {
				vocabPreSet = typePresMap.get(preVocab);
				// 如果该词语下还没有列表则新增
				if (vocabPreSet == null) {
					vocabPreSet = new HashSet<>();
					typePresMap.put(preVocab, vocabPreSet);
				}
				vocabPreSet.add(pre);
			}
		}
		LOGGER.info("Get type pres map success!");

		// 对remove规则进行处理
		String[] removeStdVocabs;
		String[] removeStdProps;
		// 预备词标准分词列表，remove标准词分词列表，预备词属性列表，预备词分词列表
		List<String> preStdVocabularyList, removeStdVocabularyList, prePropertyList, preVocabularyList;
		// remove分词数组，remove结果属性数组，remove结果分词数组
		String[] removeVocabularies, removeResultProperties, removeResultVocabularies, removeResultStdVocabularies;
		// 用来记录结果词的下标，因为结果词少于原分词，下标需要独立记录
		int resultIndex = 0;

		// 记录下被修改过的pre的列表
		Set<VocabDyadPre> removedSet = new HashSet<>();

		boolean patternFlag = true; // 预备词里是否还有可能需要remove的数据
		while (patternFlag) {
			patternFlag = false; // 匹配置为false，如果remove处理过程中有分词个数变化的，则置为true进入下一次循环

			LOGGER.info("Start to match remove!");
			for (VocabDyadRemove remove : removeList) {
				// 找到标准词全包含的所有预备词
				removeStdVocabs = remove.getStdArray().substring(1).split(",");
				removeStdProps = remove.getPartPattern().substring(1).split(",");

				Set<VocabDyadPre> patternPreSet = getPatternPreSet(removeStdVocabs, removeStdProps, typePresMap);
				// 如果全包含的标准词列表不为空，则将该列表所有数据进行处理
				if (!patternPreSet.isEmpty()) {
					for (VocabDyadPre patternPre : patternPreSet) {
						preStdVocabularyList = CollectionUtil
								.getListByArray(patternPre.getStdArray().substring(1).split(","));
						prePropertyList = CollectionUtil
								.getListByArray(patternPre.getPartPattern().substring(1).split(","));

						// 判断类型是否符合
						boolean removeTypeFlag = (remove.getType().equals("o+oa")
								&& patternPre.getType().indexOf("o") > -1)
								|| patternPre.getType().indexOf(remove.getType()) > -1;

						// 判断该预备词是否符合类型，而且包含所有的remove标准词
						if (removeTypeFlag && preContainsRemoveStd(preStdVocabularyList, remove, prePropertyList)) {
							preVocabularyList = CollectionUtil
									.getListByArray(patternPre.getArray().substring(1).split(","));
							removeStdVocabularyList = CollectionUtil
									.getListByArray(remove.getStdArray().substring(1).split(","));
							removeVocabularies = remove.getArray().substring(1).split(",");
							removeResultProperties = remove.getResultPattern().substring(1).split(",");
							removeResultVocabularies = remove.getResultArray().substring(1).split(",");
							removeResultStdVocabularies = remove.getResultStdArray().substring(1).split(",");
							resultIndex = 0;
							// 根据remove的原分词进行循环
							boolean removeFlag = false, removeWordFlag = false;
							for (int i = 0; i < removeVocabularies.length; i++) {
								if (removeResultVocabularies.length > resultIndex
										&& removeResultVocabularies[resultIndex].equals(removeVocabularies[i])) {
									// 如果结果分词对应存在，则对预备词中对应位置分词属性进行更改（o+oa除了去除分词还可以更改对应分词属性）
									for (int j = 0; j < preStdVocabularyList.size(); j++) {
										if (preStdVocabularyList.get(j).equals(removeStdVocabularyList.get(i))) {
											prePropertyList.set(j, removeResultProperties[resultIndex]);
											// o和oa互通时，如果属性更改，则对应标准词也需要更改
											preStdVocabularyList.set(j, removeResultStdVocabularies[resultIndex]);
											removeFlag = true;
											removedSet.add(patternPre);
										}
									}
									resultIndex++;
								} else {
									// 如果结果分词对应不存在，则表示预备词中对应分词需删除
									for (int j = preStdVocabularyList.size() - 1; j >= 0; j--) {
										if (preStdVocabularyList.get(j).equals(removeStdVocabularyList.get(i))) {
											prePropertyList.remove(j);
											preVocabularyList.remove(j);
											preStdVocabularyList.remove(j);

											removeFlag = true;
											removedSet.add(patternPre);

											removeWordFlag = true;
											break;
										}
									}
								}
							}
							if (getONoByList(prePropertyList) > 1) {
								removeFlag = false;
							} else if (removeWordFlag) {
								// 确定有二元组中词语被删减了
								patternFlag = true;
							}

							if (removeFlag) {
								patternPre.setArray(BusinessUtil.getStringFromListBySeparator(preVocabularyList, ","));
								patternPre.setPartPattern(
										BusinessUtil.getStringFromListBySeparator(prePropertyList, ","));
								patternPre.setStdArray(
										BusinessUtil.getStringFromListBySeparator(preStdVocabularyList, ","));
								patternPre.setStdSynonym(
										BusinessUtil.getStringFromListBySeparator(preStdVocabularyList, ","));

								// 维护类型
								patternPre.setType(getTypeByPattern(patternPre.getType(), patternPre.getPartPattern()));
								// 维护排序字段(带in的数据根据in排序)
								if (patternPre.getType().indexOf("in") < 0) {
									patternPre.setOrderVocabulary(
											getOByPattern(patternPre.getPartPattern(), patternPre.getStdArray()));
								}

								// 维护pre3中的remove信息
								setRemoveInfo2Pre3(remove, patternPre);

								// 经过一次remove后需要从词语对应列表中移除
								for (int i = 0; i < removeStdVocabs.length; i++) {
									// 判断预备词标准词中是否还存在该词语，如果不存在则从map中移除
									boolean vocabExist = patternPre.getStdArray()
											.indexOf("," + removeStdVocabs[i] + ",") > -1;
									if (typePresMap.get(removeStdVocabs[i]) != null && !vocabExist) {
										typePresMap.get(removeStdVocabs[i]).remove(patternPre);
									}
								}
							}
						}
					}
				}
			}
		}
		return removedSet;
	}

	/**
	 * 获取属性列表中属性o的个数
	 * 
	 * @param prePropertyList
	 * @return
	 */
	private int getONoByList(List<String> prePropertyList) {
		int i = 0;
		for (String property : prePropertyList) {
			if ("o".equals(property)) {
				i++;
			}
		}
		return i;
	}

	/**
	 * 根据分词的包含来获取匹配的预备词集合
	 * 
	 * @param vocabs
	 * @param props
	 * @param typePresMap
	 * @return
	 */
	protected Set<VocabDyadPre> getPatternPreSet(String[] vocabs, String[] props,
			Map<String, Set<VocabDyadPre>> typePresMap) {
		Set<VocabDyadPre> patternPreSet = getPreSetByStd(vocabs[0], props[0], typePresMap);

		if (!patternPreSet.isEmpty()) {
			for (int i = 1; i < vocabs.length; i++) {
				Set<VocabDyadPre> otherPreSet = getPreSetByStd(vocabs[i], props[i], typePresMap);
				if (otherPreSet.isEmpty()) {
					patternPreSet = new HashSet<>();
					break;
				} else {
					patternPreSet.retainAll(otherPreSet);
				}
			}
		}

		return patternPreSet;
	}

	private Set<VocabDyadPre> getPreSetByStd(String vocab, String prop, Map<String, Set<VocabDyadPre>> typePresMap) {
		Set<VocabDyadPre> preSet = new HashSet<>();
		if (typePresMap.get(vocab) != null) {
			preSet.addAll(typePresMap.get(vocab));
		}
		return preSet;
	}

	/**
	 * 判断预备词的标准词是否完全包含remove的标准词
	 * 
	 * @param preStdVocabularyList
	 * @param remove
	 * @param prePropertyList
	 * @return
	 */
	protected boolean preContainsRemoveStd(List<String> preStdVocabularyList, VocabDyadRemove remove,
			List<String> prePropertyList) {
		String[] removeStdVocabularies = remove.getStdArray().substring(1).split(",");

		// remove需要按顺序匹配，不能有倒装
		int removeIndex = 0;
		for (int i = 0; i < preStdVocabularyList.size(); i++) {
			boolean stdEquals = preStdVocabularyList.get(i).equals(removeStdVocabularies[removeIndex]);

			if (stdEquals) {
				// 如果所有的remove标准词都符合了，则返回正确
				if (removeIndex == removeStdVocabularies.length - 1) {
					return true;
				} else {
					removeIndex++;
				}
			} else {
				removeIndex = 0;
			}
		}

		return false;
	}

	private String getOByPattern(String partPattern, String array) {
		String[] properties = partPattern.split(",");
		String[] vocabularies = array.split(",");
		for (int i = 0; i < properties.length; i++) {
			if (properties[i].equals("o")) {
				return vocabularies[i];
			}
		}
		return "";
	}

	private String getTypeByPattern(String type, String partPattern) {
		if (type.equals("o+in+s") && partPattern.indexOf("o") < 0) {
			type = "in+s";
		}
		return type;
	}

	private void setRemoveInfo2Pre3(VocabDyadRemove remove, VocabDyadPre pre3) {
		pre3.setRemoveArray(pre3.getRemoveArray() + remove.getArray() + "|");
		pre3.setRemoveRa(pre3.getRemoveRa() + remove.getResultArray() + "|");
		pre3.setRemoveRp(pre3.getRemoveRp() + remove.getResultPattern() + "|");
	}

	/**
	 * 生成o+o、o+in+s等类型的数据到remove表中
	 */
	private void init5() {
		// 初始化remove表
		vocabDyadInitDao.initRemove();
	}

	/**
	 * 排序字段：从小词中的重要属性词语中提取；去重：根据已审核后的remove表直接改变预备词；生成bas_vocab_dyad_pre2
	 */
	private void init3() {
		// 维护预备词中的排序词语和频数
		vocabDyadInitDao.updatePreOrderVocabAndCnt();
		LOGGER.info("Update pre order vocabulary and cnt success!");

		// 生成bas_vocab_dyad_pre2
		vocabDyadInitDao.initPre2();
		LOGGER.info("Init pre2 success!");

		// 重置第三张预备词表
		vocabDyadInitDao.initPre3();
		LOGGER.info("Init pre3 success!");

	}

	public void wordVecDataInit1() throws Exception {
		Forest forest = Library.makeForest("D:\\home\\library\\userLibrary\\Disease.dic");
		Forest forest2 = Library.makeForest("D:\\home\\library\\ambiguity.dic");
		Forest forest3 = Library.makeForest("D:\\home\\library\\userLibrary\\Additional.dic");
		Forest forest4 = Library.makeForest("D:\\home\\library\\userLibrary\\Inspection.dic");
		Forest forest5 = Library.makeForest("D:\\home\\library\\userLibrary\\Organ.dic");
		Forest forest6 = Library.makeForest("D:\\home\\library\\userLibrary\\Symptom_Independent.dic");
		Forest forest7 = Library.makeForest("D:\\home\\library\\userLibrary\\Time.dic");
		Forest forest8 = Library.makeForest("D:\\home\\library\\userLibrary\\Tongyici_dic.dic");
		Forest forest9 = Library.makeForest("D:\\home\\library\\userLibrary\\userLibrary.dic");
		File file = new File("D:\\home\\brzs_converted_fuzhen.txt");
		BufferedReader reader = null;
		String tempString = null;
		StringBuffer exportStr = new StringBuffer();
		// 读文件根据词典进行分词
		try {
			System.out.println("以字节为单位读取文件内容，一次读一个字节：");
			// 一次读一个字节
			reader = new BufferedReader(new FileReader(file));
			while ((tempString = reader.readLine()) != null) {
				if (StringUtils.isNotBlank(tempString)) {
					String[] brzsAndCnt = tempString.split((char) 1 + "");
					Result result = new DicAnalysis().setAmbiguityForest(forest2)
							.setForests(forest, forest3, forest4, forest5, forest6, forest7, forest8, forest9)
							.parseStr(brzsAndCnt[0]);
					for (int i = 0; i < result.size(); i++) {
						Term term = result.get(i);
						exportStr.append(term.getName()).append("\t")
								.append("null".equals(term.getNatureStr()) ? "x" : term.getNatureStr()).append("\n");
					}
					exportStr.append("\n");
				}
			}
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			if (reader != null) {
				reader.close();
			}
		}
		FileUtils.generateFile("D:\\home\\brzs_fzl_ansjCut.txt", exportStr.toString());
	}

	// 获取小晴的拆分规则
	public void wordVecDataInit2() throws Exception {
		// 获取规则大写转小写的规则
		File file = new File("D:\\home\\AttrMapping.txt");
		BufferedReader reader = null;
		String tempString = null;
		Map<String, String> upRoleAndLow = new HashMap<>();
		try {
			System.out.println("以字节为单位读取文件内容，一次读一行：");
			// 一次读一个字节
			reader = new BufferedReader(new FileReader(file));
			while ((tempString = reader.readLine()) != null) {
				if (StringUtils.isNotBlank(tempString)) {
					String[] role = tempString.split(" ");
					upRoleAndLow.put(role[1], role[0]);
				}
			}
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			if (reader != null) {
				reader.close();
			}
		}

		// 获取小晴的拆分规则
		List<Map<String, String>> roleLists = vocabElementDao.retrieveElementSplit();
		Map<String, String> roleMap = new HashMap<>();
		for (Map<String, String> role : roleLists) {
			String vocab = role.get("vocabulary");
			String property = upRoleAndLow.get(role.get("property"));
			String splitVocab = role.get("split_vocab");
			String splitProp = upRoleAndLow.get(role.get("split_prop"));
			if (roleMap.get(vocab + (char) 1 + property) != null) {
				roleMap.put(vocab + (char) 1 + property,
						roleMap.get(vocab + (char) 1 + property) + (char) 3 + splitVocab + (char) 1 + splitProp);
			} else {
				roleMap.put(vocab + (char) 1 + property, splitVocab + (char) 1 + splitProp);
			}
		}

		File file2 = new File("D:\\home\\brzs_fzl_ansjCut_natureTag_result.txt");
		StringBuffer fileStr = new StringBuffer();
		try {
			System.out.println("以字节为单位读取文件内容，一次读一行：");
			// 一次读一个字节
			reader = new BufferedReader(new FileReader(file2));
			while ((tempString = reader.readLine()) != null) {
				if (StringUtils.isNotBlank(tempString)) {
					if ("唇色	n	O".equals(tempString)) {
						System.out.println(111);
					}
					String[] dataArr = tempString.split("\t");
					if (roleMap.get(dataArr[0] + (char) 1 + dataArr[2]) != null) {
						String splitValue = roleMap.get(dataArr[0] + (char) 1 + dataArr[2]);
						fileStr.append(splitValue.replace((char) 1 + "", "\t" + dataArr[1] + "\t")
								.replace((char) 3 + "", "\n")).append("\n");
					} else {
						fileStr.append(tempString).append("\n");
					}
				} else {
					fileStr.append("\n");
				}
			}
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			if (reader != null) {
				reader.close();
			}
		}
		FileUtils.generateFile("D:\\home\\brzs_fzl_ansjCut_natureTag_result_role.txt", fileStr.toString());
	}

	// 解析获取所有的二元组,计算二元组的向量
	public void wordVecDataInit3() throws Exception {
		
		// 获取所有的二元组
		Set<String> dyadSet = new HashSet<>();
		for(int i = 1;i<=31;i++){
			File file = new File("/root/data/symptom_relation_file/20170920_"+i);
			BufferedReader reader = null;
			String tempString = null;
			try {
				// 一次读一个字节
				reader = new BufferedReader(new FileReader(file));
				while ((tempString = reader.readLine()) != null) {
					if (StringUtils.isNotBlank(tempString)) {
						String[] dyadArr = tempString.split((char) 1 + "",-1);
						// 倒数第2个是同义词二元组
						if (StringUtils.isNotBlank(dyadArr[dyadArr.length - 2])) {
							dyadSet.add(dyadArr[dyadArr.length - 2]);
						}
					}
				}
			} catch (Exception e) {
				e.printStackTrace();
			} finally {
				if (reader != null) {
					reader.close();
				}
			}
		}

		// 获取完所有的二元组后计算二元组的向量
		// 二元组向量
		Map<String, float[]> dyadAndVec = new HashMap<>();
		// 计算二元组的向量
		for (String dyadRow : dyadSet) {
			String[] dyadArr = dyadRow.substring(1, dyadRow.length()).split(",");
			float[] sum = new float[200];
			for (String dyad : dyadArr) {
				float[] dyadFloat = Word2VecAlgo.getWordVector(dyad);
				if(dyadFloat != null){
					for (int i = 0; i < dyadFloat.length; i++) {
						sum[i] = sum[i] + dyadFloat[i];
					}
				}
			}
			dyadAndVec.put(dyadRow, sum);
		}
		// 循环,计算相似度
		//二元组对应其他相似的二元组的相似度
		List<Map<String , String>> dyadAndAllSimilar = new ArrayList<>();
		String dyad2;
		float[] vec2;
		double similar;
		String splitStr = (char)1+"";
		long startTime = System.currentTimeMillis();
		LOGGER.info("开始执行"+startTime+"");
		int j = 1;
		for (Map.Entry<String, float[]> entry : dyadAndVec.entrySet()) {
			String dyad = entry.getKey();
			float[] vec = entry.getValue();
			Map<Double, String> similarAndDyad = new HashMap<>(); 
			//根据相似度排序,取最高的20个
			List<Double> similarList = new ArrayList<>();
			for (Map.Entry<String, float[]> entry2 : dyadAndVec.entrySet()) {
				dyad2 = entry2.getKey();
				vec2 = entry2.getValue();
				if(!StringUtils.equals(dyad2, dyad)){
					//计算2个向量的相似度
					similar = similarity(vec, vec2);
					if(!Double.isNaN(similar)){
						if(!similarList.contains(similar)){
							similarList.add(similar);
						}
						if(similarAndDyad.containsKey(similar)){
							similarAndDyad.put(similar, similarAndDyad.get(similar)+splitStr+dyad2);
						}else{
							similarAndDyad.put(similar, dyad2);
						}
					}
				}
			}
			//降序排列
			Collections.sort(similarList,Collections.reverseOrder());
			//取前20个相似度取二元组
			StringBuffer allDyad = new StringBuffer();
			if(similarList.size()>=20){
				for(int i= 0 ; i <20;i++){
					allDyad.append(similarAndDyad.get(similarList.get(i))).append(splitStr);
				}
			}else{
				for(int i= 0 ; i <similarList.size();i++){
					allDyad.append(similarAndDyad.get(similarList.get(i))).append(splitStr);
				}
			}
			//获取所有的二元组对应的相似的二元组
			Map<String, String> dyadAndSimilarDyadValue = new HashMap<>();
			dyadAndSimilarDyadValue.put("dyad", dyad);
			dyadAndSimilarDyadValue.put("similarDyad", allDyad.toString());
			dyadAndAllSimilar.add(dyadAndSimilarDyadValue);
			j++;
			if(j%500==0){
				LOGGER.info((System.currentTimeMillis()-startTime)+"");
			}
		}
		LOGGER.info("开始更新"+(System.currentTimeMillis()-startTime)+"");
		vocabDyadInitDao.batchInsertDyadSynonymyReference(dyadAndAllSimilar);
		LOGGER.info("更新结束"+(System.currentTimeMillis()-startTime)+"");

	}

	public  double similarity(float[] feat1, float[] feat2) {
		if(feat1 == null || feat2 == null)
			return 0;
		
		double sum1 = 0;
		double sum2 = 0;
		double sum3 = 0;
		for(int i = 0;i < feat1.length;i++){
			sum1 = sum1 + feat1[i] * feat1[i];
			sum2 = sum2 + feat2[i] * feat2[i];
			sum3 = sum3 + feat1[i] * feat2[i];
		}
		sum1 = Math.sqrt(sum1);
		sum2 = Math.sqrt(sum2);
		double cos = sum3/(sum1*sum2);
		cos = Math.ceil(cos*10000)/10000;
		
		return cos;
	}
	
}
