package com.huiquan.analysis.service;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.huiquan.analysis.constants.AnalysisDictionaryConstant;
import com.huiquan.analysis.constants.AnalysisSentenceConstant;
import com.huiquan.analysis.constants.RedisConstant;
import com.huiquan.analysis.dao.AnalysisDictionaryDao;
import com.huiquan.analysis.dao.AnalysisSentenceDao;
import com.huiquan.analysis.dao.AnalysisVocabularyDao;
import com.huiquan.analysis.domain.*;
import com.huiquan.analysis.utils.ListUtil;
import com.huiquan.foundation.util.BusinessUtil;
import com.huiquan.framework.base.BaseService;
import com.huiquan.framework.base.ReturnCode;
import com.huiquan.framework.base.ReturnData;
import com.huiquan.framework.utils.*;
import com.huiquan.sphinx.SphinxClient;
import com.huiquan.sphinx.SphinxException;
import com.huiquan.sphinx.SphinxMatch;
import com.huiquan.sphinx.SphinxResult;
import org.apache.commons.lang.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.ValueOperations;
import org.springframework.stereotype.Service;
import org.springframework.web.servlet.ModelAndView;

import javax.servlet.http.HttpServletRequest;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.*;

@Service
public class AnalysisSentenceService extends BaseService {

    @Autowired
    private AnalysisSentenceDao analysisSentenceDao;
    @Autowired
    private AnalysisVocabularyDao analysisVocabularyDao;

    @Autowired
    private RedisTemplate<String, List<AnalysisSentence>> redisTemplate;

    @Autowired
    private RedisTemplate<String, Object> redisStrTemplate;

    @Autowired
    private AnalysisDictionaryDao analysisDictionDao;

    @Value("${sphinx.host}")
    private String sphinxHost;

    /**
     * 进入词语编辑画面
     *
     * @param idStr
     * @return
     */
    public ModelAndView preEdit(String idStr, int type, String secondType) {
        // 根据id获取句子和分词
        Long id = Long.parseLong(idStr);
        AnalysisSentence analysisSentence = analysisSentenceDao.retrieveObjectById(id);
        List<AnalysisVocabulary> vocabularys = analysisVocabularyDao.retriveListBySid(id);
        Map<String, String> xbsValue = null;
        // 如果现病史查询二级分类
        if (type == AnalysisSentenceConstant.TYPE_XBS) {
            xbsValue = analysisSentenceDao.selectContextBySid(analysisSentence.getId());
        }
        // 获取属性和词性集合，并将分词中不存在的数据添加
        Map<String, String> characterMap = new LinkedHashMap<>();
        characterMap.putAll(AnalysisSentenceConstant.getPropertyMapByType(AnalysisSentenceConstant.CHARACTER_LABEL));
        Map<String, String> propertyMap = new LinkedHashMap<>();
        propertyMap.putAll(AnalysisSentenceConstant.getPropertyMapByTypeAndSecondType(type,
                xbsValue == null ? "" : xbsValue.get("second_type")));


        for (AnalysisVocabulary term : vocabularys) {
            if (!characterMap.containsKey(term.getCharacteristic())) {
                characterMap.put(term.getCharacteristic(), term.getCharacteristic());
            }
            if (!propertyMap.containsKey(term.getProperty())) {
                propertyMap.put(term.getProperty(), term.getProperty());
            }
            // 获取指代的词
            List<String> pronounContentList = new ArrayList<>();
            if (StringUtils.isNotBlank(term.getPronounContent())) {

                pronounContentList = CollectionUtil.getListByArray(term.getPronounContent().split(","));
            }
            term.setShowPronounList(pronounContentList);
        }

        List<AnalysisVocabularySourceDto> vocabSources = getVocabSourceDto(vocabularys);

        Map<String, Object> map = new HashMap<>();
        map.put("id", id);
        map.put("sentence", analysisSentence.getSentence());
        map.put("vocabularys", vocabSources);
        map.put("characterMap", characterMap);
        map.put("position1List", AnalysisSentenceConstant.getPositionByType(AnalysisSentenceConstant.POSITION1_LABEL));
        map.put("position2List", AnalysisSentenceConstant.getPositionByType(AnalysisSentenceConstant.POSITION2_LABEL));
        map.put("propertyMap", propertyMap);
        map.put("secondType", secondType);

        // 如果是现病史语句
        if (analysisSentence.getType() == AnalysisSentenceConstant.TYPE_XBS) {
            // 得的语句关联的上下文语句
            Map<String, String> context = analysisSentenceDao.selectContextBySid(id);
            map.put("aboveSentence1", context.get("above_sentence1"));
            map.put("aboveSentence2", context.get("above_sentence2"));
            map.put("aboveSentence3", context.get("above_sentence3"));
            map.put("belowSentence1", context.get("below_sentence1"));
            map.put("belowSentence2", context.get("below_sentence2"));
            map.put("belowSentence3", context.get("below_sentence3"));

            map.put("secondTypeMap", AnalysisSentenceConstant.secondTypeMap);
        }

        // 存在M0的句子
//        if (analysisSentence.getType() == AnalysisSentenceConstant.TYPE_XBS && StringUtils.equals(secondType, AnalysisSentenceConstant.XBS_SOURCE_TYPE_IMAGEOLOGY_INFO)) {
        if (analysisSentence.getType() == AnalysisSentenceConstant.TYPE_XBS) {
            List<AnalysisSentenceSubdivision> subdivsionSentence = getSubSentence(id);
            map.put("subdivsionSentence", subdivsionSentence);
            // 维护句子的分句
            List<AnalysisSentenceSubdivision> sentences = analysisSentenceDao.retrieveSubObjectById(id, true);
            map.put("subSentences", sentences);
        }


        return new ModelAndView("analysis/sentence_edit", map);
    }

    /**
     * 用循环查出所有存在的子类的句子(M0)
     */
    @Deprecated
    private List<AnalysisSentenceSubdivision> getSubdivisionSentence(Long sid) {

        // 循环标记(还有子类)
        boolean hasChild = true;
        List<AnalysisSentenceSubdivision> rtnList = new ArrayList<>();
        List<AnalysisSentenceSubdivision> rtnListWithOutM0 = new ArrayList<>();
        List<Long> temp = new ArrayList<>();
        // 第一次执行
        temp.add(sid);
        if (sid != null && sid > 0) {
            // 是否第一次
            boolean first = true;
            while (hasChild) {
                List<Long> temp2 = new ArrayList<>();
                // 查询
                for (Long tempSid : temp) {
                    List<AnalysisSentenceSubdivision> sentences = analysisSentenceDao.retrieveSubObjectById(tempSid, first);
                    if (sentences != null) {
                        for (AnalysisSentenceSubdivision sentence : sentences) {
                            boolean hasPosition20 = false;
                            List<AnalysisVocabulary> tempVocabularys = analysisVocabularyDao.retriveSubListBySid(sentence.getId());
                            sentence.setVocabularys(tempVocabularys);
                            for (AnalysisVocabulary vocabulary : tempVocabularys) {
                                if ("0".equals(vocabulary.getPosition2())) {
                                    temp2.add(sentence.getId());
                                    rtnList.add(sentence);
                                    hasPosition20 = true;
                                    break;
                                }
                            }
                            if (!hasPosition20) {
                                rtnListWithOutM0.add(sentence);
                            }
                        }
                    }
                }

                // 遍历子类
                if (temp2.size() > 0) {
                    temp = temp2;
                    first = false;
                    hasChild = true;
                } else {
                    hasChild = false;
                }
            }
        }
        rtnList.addAll(rtnListWithOutM0);
        return rtnList;

    }


    /**
     * 用循环查出所有存在的子类的句子(分路径)
     */
    private List<AnalysisSentenceSubdivision> getSubSentence(Long sid) {

        // 循环标记(还有子类)
        boolean hasChild = true;
        List<AnalysisSentenceSubdivision> rtnList = new ArrayList<>();
        List<AnalysisSentenceSubdivision> temp = new ArrayList<>();
        // 记录上一个句子
        // 第一次执行
        AnalysisSentenceSubdivision firstSub = new AnalysisSentenceSubdivision();
        firstSub.setId(sid);
        temp.add(firstSub);
        if (sid != null && sid > 0) {
            // 是否第一次
            boolean first = true;
            while (hasChild) {
                List<AnalysisSentenceSubdivision> temp2 = new ArrayList<>();
                // 查询
                for (int i = 0; i < temp.size(); i++) {
                    List<AnalysisSentenceSubdivision> sentences = analysisSentenceDao.retrieveSubObjectById(temp.get(i).getId(), first);
                    if (sentences != null) {
                        temp.get(i).setChildSentences(sentences);
                        for (AnalysisSentenceSubdivision sentence : sentences) {
                            temp2.add(sentence);
                            rtnList.add(sentence);
                            List<AnalysisVocabulary> tempVocabularys = analysisVocabularyDao.retriveSubListBySid(sentence.getId());
                            if (tempVocabularys != null && tempVocabularys.size() > 0) {
                                sentence.setVocabularys(tempVocabularys);
                            }
                        }
                    }
                }
                // 遍历子类
                if (temp2.size() > 0) {
                    temp = temp2;
                    first = false;
                    hasChild = true;
                } else {
                    hasChild = false;
                }
            }
        }
        return rtnList;

    }

    public List<AnalysisVocabularySourceDto> getVocabSourceDto(List<AnalysisVocabulary> vocabularys) {

        List<AnalysisVocabularySourceDto> sourceDtos = new ArrayList<>();
        if (vocabularys.get(0).getType() == AnalysisSentenceConstant.TYPE_XBS) {
            sourceDtos = analysisVocabularyDao.retrieveSourceDtoByVids(vocabularys.get(0).getSid());
        }

        List<AnalysisVocabularySourceDto> resultList = new ArrayList<>();
        for (int i = 0; i < vocabularys.size(); i++) {
            AnalysisVocabulary vocabulary = vocabularys.get(i);

            AnalysisVocabularySourceDto result = new AnalysisVocabularySourceDto();
            result.setId(vocabulary.getId());
            result.setType(vocabulary.getType());
            result.setSid(vocabulary.getSid());
            result.setVocabulary(vocabulary.getVocabulary());
            result.setCharacteristic(vocabulary.getCharacteristic());
            result.setProperty(vocabulary.getProperty());
            result.setInitial(vocabulary.getInitial());
            result.setPosition1(vocabulary.getPosition1());
            result.setPosition2(vocabulary.getPosition2());
            result.setShowPronounList(vocabulary.getShowPronounList());
            result.setPronounContent(vocabulary.getPronounContent());
            result.setIsPronoun(vocabulary.getIsPronoun());
            if (sourceDtos.isEmpty()) {
                result.setSource("");
            } else {
                result.setSource(sourceDtos.get(i).getSource());
            }

            resultList.add(result);
        }

        return resultList;
    }

    /**
     * 进行词语编辑操作
     *
     * @param typeStr
     * @param secondType
     * @param idStr
     * @param user
     * @param vocabularyStrs
     * @param characterStrs
     * @param propertyStrs
     * @param position1Strs
     * @param position2Strs
     * @param sourceStrs
     * @return
     * @throws Exception
     */
    public ReturnData edit(String typeStr, String secondType, String idStr, User user, String[] vocabularyStrs,
                           String[] characterStrs, String[] propertyStrs, String[] position1Strs, String[] position2Strs,
                           HttpServletRequest req, String[] sourceStrs) throws Exception {
        Long id = Long.parseLong(idStr);
        int type = Integer.parseInt(typeStr);
        // 获取句子
        AnalysisSentence analysisSentence = analysisSentenceDao.retrieveObjectById(id);

        // 校验参数词语是否与句子匹配
        boolean vocabularyFlag = checkVocabulary(vocabularyStrs, analysisSentence.getSentence());
        if (!vocabularyFlag) {
            return ReturnUtil.fail(ReturnCode.ANALYSIS_VOCABULARYS_CHECK_ERROR);
        }

        // 将参数拼凑成词语实体
        List<AnalysisVocabulary> vocabularys = getVocabularysByParam(vocabularyStrs, characterStrs, propertyStrs,
                position1Strs, position2Strs, id, type);
        // 对比修改前后的分词，调用脚本修改词库
        List<AnalysisVocabulary> rawVocabularys = analysisVocabularyDao.retriveListBySid(id);
        ReturnData result = updateDictionary(vocabularys, rawVocabularys, analysisSentence, user, req, secondType);
//        if (!result.getReturnCode().getCode().equals("0")) {
//            return result;
//        }
        if (AnalysisSentenceConstant.TYPE_XBS == type) {
            // 获取指代数据
            getPronounCountList(vocabularys, req);
        }

        // 更新分词和句子
        updateSentenceAndVocabularys(analysisSentence, vocabularys, user);
        // 插入指代到扩展表
        savePronoun(vocabularys, analysisSentence.getId());
        analysisSentence = analysisSentenceDao.retrieveObjectById(id);

        LOGGER.info("更新redis开始");
        // 移除redis中未标记列表的相关元素，再向已标记列表中添加相关元素
        if (type == AnalysisSentenceConstant.TYPE_CT) {
            removeAnalySentenceceOfRedis(id, RedisConstant.KEY_PATTERN_MEDICAL_UNLABELED);
            addAnalySentenceceInRedis(analysisSentence, RedisConstant.KEY_PATTERN_MEDICAL_LABELED, vocabularys);
        } else if (type == AnalysisSentenceConstant.TYPE_XBS) {
            removeAnalySentenceceOfRedis(id, RedisConstant.KEY_PATTERN_HPI_UNLABELED_NO_SECOND_TYPE + (char) 1 + secondType + (char) 1 + "*");
            addAnalySentenceceInRedis(analysisSentence, RedisConstant.KEY_PATTERN_HPI_LABELED_NO_SECOND_TYPE + (char) 1 + secondType + (char) 1 + "*", vocabularys);

            // 添加现病史分句分类
            addSource(vocabularys, sourceStrs);

        } else if (type == AnalysisSentenceConstant.TYPE_ZS) {
            removeAnalySentenceceOfRedis(id, RedisConstant.KEY_PATTERN_COMPLAIN_UNLABELED);
            addAnalySentenceceInRedis(analysisSentence, RedisConstant.KEY_PATTERN_COMPLAIN_LABELED, vocabularys);
        }
        LOGGER.info("更新redis完成");


        // 记录到redis中该用户有几个句子未标注，未标注的句子先进行标注
        ValueOperations<String, Object> valueopsOther = redisStrTemplate.opsForValue();
        Object unlabel = valueopsOther.get(RedisConstant.MARK_SENTENCE_UNLABEL);
        List<String> unlabelList = new ArrayList<>();
        if (unlabel != null) {
            unlabelList = (List<String>) unlabel;
        }
        Iterator<String> iter = unlabelList.iterator();
        while (iter.hasNext()) {
            String item = iter.next();
            String[] itemArr = item.split((char) 1 + "");
            AnalysisSentence newSentence = analysisSentenceDao.retrieveObjectById(Long.parseLong(itemArr[0]));
            if (newSentence == null || item.contains(idStr)) {
                iter.remove();
            }
        }
        valueopsOther.set(RedisConstant.MARK_SENTENCE_UNLABEL, unlabelList);

        return ReturnUtil.success();
    }

    /**
     * 获取指代数据
     *
     * @param vocabularys
     * @param req
     */
    private void getPronounCountList(List<AnalysisVocabulary> vocabularys, HttpServletRequest req) {

        // 获取是否是代词

        Map<String, Object> map = req.getParameterMap();

        String[] isPronouns = (String[]) map.get("isPronoun");
        for (int i = 1; i <= vocabularys.size(); i++) {
            vocabularys.get(i - 1).setIsPronoun(Integer.parseInt(isPronouns[i - 1]));
            // 如果是代词就可能存在指代的对象
            if (StringUtils.equals(isPronouns[i - 1], "1")) {
                String[] pronounContent = (String[]) map.get("pronoun_" + i);
                if (pronounContent != null) {
                    StringBuilder indexVocab = new StringBuilder();
                    for (int j = 1; j <= pronounContent.length; j++) {
                        // 记录下标第几个词
                        if (StringUtils.equals(pronounContent[j - 1], "1")) {
                            // 需要跳过本身
                            if (j >= i) {
                                indexVocab.append(j + 1).append(",");
                            } else {
                                indexVocab.append(j).append(",");
                            }
                        }
                    }
                    vocabularys.get(i - 1).setPronounContent(indexVocab.toString());
                }
            }
        }
    }

    /**
     * 获取指代数据
     *
     * @param vocabularys
     */
    private void getPronounCountList(List<AnalysisVocabulary> vocabularys, String[] isPronouns, Map<String, Object> paramMap) {

        // 获取是否是代词

        for (int i = 1; i <= vocabularys.size(); i++) {
            vocabularys.get(i - 1).setIsPronoun(Integer.parseInt(isPronouns[i - 1]));
            // 如果是代词就可能存在指代的对象
            if (StringUtils.equals(isPronouns[i - 1], "1")) {
                String[] pronounContent = (String[]) paramMap.get("pronoun_" + i);
                if (pronounContent != null) {
                    StringBuilder indexVocab = new StringBuilder();
                    for (int j = 1; j <= pronounContent.length; j++) {
                        // 记录下标第几个词
                        if (StringUtils.equals(pronounContent[j - 1], "1")) {
                            // 需要跳过本身
                            if (j >= i) {
                                indexVocab.append(j + 1).append(",");
                            } else {
                                indexVocab.append(j).append(",");
                            }
                        }
                    }
                    vocabularys.get(i - 1).setPronounContent(indexVocab.toString());
                }
            }
        }
    }

    /**
     * 插入指代到扩展表
     *
     * @param vocabularys
     * @param id
     */
    private void savePronoun(List<AnalysisVocabulary> vocabularys, Long id) {

        // 查询已经插入的新词
        List<AnalysisVocabulary> newVocabs = analysisVocabularyDao.retriveListBySid(id);
        List<AnalysisVocabulary> insertEnlargeList = new ArrayList<>();
        // 塞入新的ID
        for (int i = 0; i < vocabularys.size(); i++) {

            if (vocabularys.get(i).getIsPronoun() != null) {
                if (StringUtils.isNotBlank(vocabularys.get(i).getPronounContent())) {
                    String[] indexes = vocabularys.get(i).getPronounContent().split(",");
                    StringBuilder vidStr = new StringBuilder();
                    for (String index : indexes) {
                        vidStr.append(newVocabs.get(Integer.parseInt(index) - 1).getId()).append(",");
                    }
                    vocabularys.get(i).setPronounContent(vidStr.toString().substring(0, vidStr.length() - 1));
                } else {
                    vocabularys.get(i).setPronounContent("");
                }
                vocabularys.get(i).setId(newVocabs.get(i).getId());
                insertEnlargeList.add(vocabularys.get(i));
            }
        }
        if (insertEnlargeList.size() > 0) {
            // 插入词语扩展表
            analysisVocabularyDao.batchInsertEnlarge(insertEnlargeList);
            // 删除已经对应不上的扩展表
            analysisVocabularyDao.deleteEnlarge();
        }
    }

    /**
     * 插入指代到扩展表
     *
     * @param vocabularys
     * @param id
     */
    private void savePronounSubdiv(List<AnalysisVocabulary> vocabularys, Long id) {

        // 查询已经插入的新词
        List<AnalysisVocabulary> newVocabs = analysisVocabularyDao.retriveSubListBySid(id);
        // 塞入新的ID
        for (int i = 0; i < vocabularys.size(); i++) {
            if (vocabularys.get(i).getIsPronoun() != null) {

                if (StringUtils.isNotBlank(vocabularys.get(i).getPronounContent())) {
                    String[] indexes = vocabularys.get(i).getPronounContent().split(",");
                    StringBuilder vidStr = new StringBuilder();
                    for (String index : indexes) {
                        vidStr.append(newVocabs.get(Integer.parseInt(index) - 1).getId()).append(",");
                    }
                    vocabularys.get(i).setPronounContent(vidStr.toString().substring(0, vidStr.length() - 1));
                } else {
                    vocabularys.get(i).setPronounContent("");
                }
                vocabularys.get(i).setId(newVocabs.get(i).getId());
                // 更新入表
                analysisVocabularyDao.updateSubVocab(vocabularys.get(i));
            }
        }
    }

    /**
     * 删除改id下所有的记录
     *
     * @param sid
     */
    private void deleteSubdivisionBySid(Long sid, boolean isSidFlag) {

        // 循环标记(还有子类)
        boolean hasChild = true;
        List<Long> temp = new ArrayList<>();
        List<Long> deleteSid = new ArrayList<>();

        // 第一次执行
        temp.add(sid);
        if (sid != null && sid > 0) {
            // 是否第一次
            boolean first = isSidFlag;
            while (hasChild) {
                List<Long> temp2 = new ArrayList<>();
                // 查询
                for (Long tempSid : temp) {
                    List<AnalysisSentenceSubdivision> sentences = analysisSentenceDao.retrieveSubObjectById(tempSid, first);
                    if (sentences != null) {
                        for (AnalysisSentenceSubdivision sentence : sentences) {
                            temp2.add(sentence.getId());
                            deleteSid.add(sentence.getId());
                        }
                    }
                }

                // 遍历子类
                if (temp2.size() > 0) {
                    temp = temp2;
                    first = false;
                    hasChild = true;
                } else {
                    hasChild = false;
                }
            }
        }

        if (deleteSid.size() > 0) {
            analysisSentenceDao.deleteSubdivisionBySidList(deleteSid);
            analysisVocabularyDao.deleteSubdivisionVocab();
        }
    }

    /**
     * 判断前后词语是否一样~是否进行修改
     *
     * @param rawVocabularys
     * @param vocabularys
     * @return
     */
    private boolean checkVocabularySame(List<AnalysisVocabulary> rawVocabularys, List<AnalysisVocabulary> vocabularys) {
        if (rawVocabularys.size() != vocabularys.size()) {
            return false;
        }
        for (int i = 0; i < rawVocabularys.size(); i++) {
            if (!StringUtils.equals(rawVocabularys.get(i).getPosition2().trim(), vocabularys.get(i).getPosition2().trim())
                    || !StringUtils.equals(rawVocabularys.get(i).getPosition1().trim(), vocabularys.get(i).getPosition1().trim())
                    || !StringUtils.equals(rawVocabularys.get(i).getCharacteristic().trim(), vocabularys.get(i).getCharacteristic().trim())
                    || !StringUtils.equals(rawVocabularys.get(i).getVocabulary(), vocabularys.get(i).getVocabulary())
                    || !StringUtils.equals(rawVocabularys.get(i).getProperty().trim(), vocabularys.get(i).getProperty().trim())) {
                return false;
            }
        }
        return true;
    }

    /**
     * 保存句子到细分表中
     *
     * @param subdivisionSentence
     */
    private void saveSubdivisionSentence(List<AnalysisSentence> subdivisionSentence, Long sid, boolean isSidFlag, Integer type, String secondType, User user) {
        for (AnalysisSentence sentence : subdivisionSentence) {
            // 保存句子
            Long newid = analysisSentenceDao.insertSubdivisionSentence(sentence, sid, isSidFlag, type + "", secondType, user);
            // 保存词语
            analysisVocabularyDao.batchInsertSubdivisionVocab(sentence.getVocabularys(), newid, type);


        }

    }

    /**
     * 分路径调用接口
     *
     * @return
     */
    public List<AnalysisSentence> getSubdivisionByVocab(List<AnalysisVocabulary> vocabularies) {
        List<Map<String, String>> mapParam = getMapListByVocab(vocabularies);
        String json = net.sf.json.JSONArray.fromObject(mapParam).toString();
        Map<String, String> param = new HashMap<>();
        param.put("vocabularyList", json);
        List<AnalysisSentence> rtnList = new ArrayList<>();

        String jsonStr = HttpRequest.sendPost("http://172.16.0.206:9099/analysis", param);
        if (jsonStr == null || jsonStr.isEmpty()) {
            LOGGER.info("分路径接口调用返回为空");
            return rtnList;
        }

        JSONObject jo = JSON.parseObject(jsonStr);
        if (jo.getIntValue("code") == 200) {
            JSONArray ja3 = jo.getJSONObject("data").getJSONArray("vocabularyLists");
            //获取数据
            AnalysisSentence sentence = new AnalysisSentence();
            List<AnalysisVocabulary> sentenceVocab = new ArrayList<>();
            // 完整的句子
            StringBuilder sentenceStr = new StringBuilder();
            for (int j = 0; j < ja3.size(); j++) {
                JSONArray ja2 = ja3.getJSONArray(j);
                for (int i = 0; i < ja2.size(); i++) {
                    JSONObject jo2 = ja2.getJSONObject(i);
                    AnalysisVocabulary value = new AnalysisVocabulary();
                    value.setVocabulary(jo2.getString("vocabulary"));
                    value.setCharacteristic(jo2.getString("character"));
                    value.setProperty(jo2.getString("property"));
                    value.setPosition1(jo2.getString("position1"));
                    value.setPosition2(jo2.getString("position2"));
                    sentenceStr.append(value.getVocabulary());
                    sentenceVocab.add(value);
                }
                sentence.setSentence(sentenceStr.toString());
                sentence.setVocabularys(sentenceVocab);
                sentenceVocab = new ArrayList<>();
                sentenceStr = new StringBuilder();
                rtnList.add(sentence);
                sentence = new AnalysisSentence();
            }
        }
        return rtnList;
    }

    private List<Map<String, String>> getMapListByVocab(List<AnalysisVocabulary> vocabularies) {

        List<Map<String, String>> rtn = new ArrayList<>();
        for (AnalysisVocabulary vocabulary : vocabularies) {
            Map<String, String> map = new HashMap<>();
            map.put("property", vocabulary.getProperty());
            map.put("vocabulary", vocabulary.getVocabulary());
            map.put("character", vocabulary.getCharacteristic());
            map.put("position1", vocabulary.getPosition1());
            map.put("position2", vocabulary.getPosition2());
            rtn.add(map);
        }
        return rtn;

    }


    /**
     * 校验词语规则是否通过
     */
    @Deprecated
    public ReturnData checkVocabRule(List<AnalysisVocabulary> vocabularys) {
        // 如果存在BB,MM,EE,个数必须要>=2
        int bb = 0;
        int mm = 0;
        int ee = 0;
        boolean continueB = false;
        boolean continueM = false;
        String position2 = "";
        for (AnalysisVocabulary vocabulary : vocabularys) {
            // 如果存在BB,MM,EE,个数必须要>=2
            if ("BB".equals(vocabulary.getPosition1())) {
                bb++;
            } else if ("MM".equals(vocabulary.getPosition1())) {
                mm++;
            } else if ("EE".equals(vocabulary.getPosition1())) {
                ee++;
            }

            // 两个连续的B或者两个连续的M，而对应的position2为空的情况
            if ("B".equals(vocabulary.getPosition1())) {
                if (continueB) {
                    if (StringUtils.isBlank(position2) || StringUtils.isBlank(vocabulary.getPosition2())) {
                        ReturnCode rc = ReturnCode.ANALYSIS_POSITION_CHECK_ERROR;
                        rc.setDesc("两个连续的B或者两个连续的M，而对应的position2不能为空");
                        return ReturnUtil.fail(rc);
                    }
                }
                continueB = true;
            } else {
                continueB = false;
            }
            if ("M".equals(vocabulary.getPosition1())) {
                if (continueM) {
                    if (StringUtils.isBlank(position2) || StringUtils.isBlank(vocabulary.getPosition2())) {
                        ReturnCode rc = ReturnCode.ANALYSIS_POSITION_CHECK_ERROR;
                        rc.setDesc("两个连续的B或者两个连续的M，而对应的position2不能为空");
                        return ReturnUtil.fail(rc);
                    }
                }
                continueM = true;
            } else {
                continueM = false;
            }
            position2 = vocabulary.getPosition2();
        }
        if (bb == 1 || mm == 1 || ee == 1) {
            ReturnCode rc = ReturnCode.ANALYSIS_POSITION_CHECK_ERROR;
            rc.setDesc("如果存在BB,MM,EE,个数必须要>=2");
            return ReturnUtil.fail(rc);
        }
        return null;
    }

    private void addSource(List<AnalysisVocabulary> vocabularys, String[] sourceStrs) {

        // 删除该句子的历史分类
        analysisVocabularyDao.deleteSourceBySid(vocabularys.get(0).getSid());

        List<AnalysisVocabularySourceDto> sourceList = new ArrayList<>();

        boolean addFlag = false;
        for (int i = 0; i < vocabularys.size(); i++) {
            AnalysisVocabulary vocabulary = vocabularys.get(i);
            AnalysisVocabularySourceDto source = new AnalysisVocabularySourceDto();

            source.setSid(vocabulary.getSid());
            source.setVocabulary(vocabulary.getVocabulary());
            source.setProperty(vocabulary.getProperty());
            source.setCharacteristic(vocabulary.getCharacteristic());
            source.setPosition1(vocabulary.getPosition1());
            source.setPosition2(vocabulary.getPosition2());
            source.setSource(sourceStrs[i]);

            sourceList.add(source);

            if (sourceStrs[i] != null && !sourceStrs[i].isEmpty()) {
                addFlag = true;
            }
        }

        if (addFlag) {
            analysisVocabularyDao.batchInsertSource(sourceList);
        }
    }

    /**
     * 维护词典
     *
     * @param vocabularys    修改后分词
     * @param rawVocabularys 修改前分词
     * @param sentence       句子
     * @param user           用户
     * @return
     */
    private ReturnData updateDictionary(List<AnalysisVocabulary> vocabularys, List<AnalysisVocabulary> rawVocabularys,
                                        AnalysisSentence sentence, User user, HttpServletRequest req, String secondType) {
        LOGGER.info("开始维护词典，sentence=" + sentence.getSentence());
        Map<String, Object> searchParam = null;
        int newIndex = 0, rawIndex = 0, index = 0, count = 0;
        String newStr = "", rawStr = "";
        boolean mergeFlag = true;
        String chara = null;
        // 设定前后分词各自的游标，从头开始一点点对比
        while (newIndex < vocabularys.size() && rawIndex < rawVocabularys.size()) {
            AnalysisVocabulary rawVocabulary = rawVocabularys.get(rawIndex);
            AnalysisVocabulary newVocabulary = vocabularys.get(newIndex);

            if (!rawVocabulary.getVocabulary().equals(newVocabulary.getVocabulary())) {
                // 多对多的拆分属于拆词
                newStr = newVocabulary.getVocabulary();
                rawStr = rawVocabulary.getVocabulary();
                index = newIndex;
                while (!newStr.equals(rawStr)) {
                    if (rawStr.contains(newStr)) {
                        // 如果老短语包含新短语，则新短语继续加长，此时则不是组合类型
                        newStr += vocabularys.get(newIndex + 1).getVocabulary();
                        newIndex++;
                        mergeFlag = false;
                    } else if (newStr.contains(rawStr)) {
                        // 如果新短语包含老短语，则老短语继续加长
                        rawStr += rawVocabularys.get(rawIndex + 1).getVocabulary();
                        rawIndex++;
                        chara = rawVocabulary.getCharacteristic();
                    } else {
                        // 如果新老短语互不包含，则异常
                        LOGGER.error("Sentence update error 1, sid=[{}], rawStr=[{}], newStr=[{}]",
                                new Object[]{sentence.getId(), rawStr, newStr});
                        break;
                    }
                }
                // 循环结束后newStr与rawStr必定相同，如果不同则有异常，打出日志
                if (!newStr.equals(rawStr)) {
                    LOGGER.error("Sentence update error 2, sid=[{}], rawStr=[{}], newStr=[{}]",
                            new Object[]{sentence.getId(), rawStr, newStr});
                } else {
                    if (mergeFlag) {
                        // 组合情况
                        AnalysisDictionary dic = new AnalysisDictionary();
                        dic.setVocabulary(newStr);
                        dic.setDictionary(newStr + '\t' + newVocabulary.getCharacteristic() + '\t' + "1000");
                        dic.setDeleteFlag(AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_DELETE_FLAG_N);
                        if ("mqt".equals(chara)) {
                            dic.setType(AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_TIME);
                        } else {
                            dic.setType(getUserLibraryDictType(sentence.getType(), secondType));
                        }
                        dic.setModifierId(user.getUserId());
                        dic.setModifierName(user.getRealName());
                        // 如果词典在用户词典中已存在
                        searchParam = new HashMap<>();
                        if ("mqt".equals(chara)) {
                            searchParam.put("type", AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_TIME);
                        } else {
                            searchParam.put("type", getUserLibraryDictType(sentence.getType(), secondType));
                        }
                        searchParam.put("keyword", newStr);
                        count = analysisDictionDao.selectAnalysisDictionaryCount(searchParam);
                        if (count > 0) {
                            Map<String, Object> map = new HashMap<>();
                            map.put("mergeFlag", mergeFlag);
                            map.put("searchParam", searchParam);
                            map.put("newStr", newStr);
                            map.put("dic", dic);
                            map.put("vocabularys", vocabularys);
                            map.put("sentence", sentence);
                            req.getSession().setAttribute("objMap", map);
                            ReturnCode returnCode = ReturnCode.DYNAMIC_DESC_EXCEPTION;
                            returnCode.setDesc(newStr + "  分词规则与用户词典冲突，是否确定提交？");
                            return new ReturnData(returnCode, map);
                        }
                        // 如果词典语句在外部词典中已存在
                        if (analysisDictionDao.selectOutsideDictionaryCountByVocabulary(newStr) > 0) {
                            analysisDictionDao.deleteOutsideDictionaryByVocabulary(newStr);
                        }
                        // 如果词典在歧义词典中已存在
                        searchParam.put("type", getAmbiguityDictType(sentence.getType(), secondType));
                        count = analysisDictionDao.selectAnalysisDictionaryCount(searchParam);
                        if (count > 0) {
                            // 则将整句分词结果添加到歧义词典中
                            dic.setVocabulary(sentence.getSentence());
                            dic.setDictionary(this.getSentenceDictionary(sentence, vocabularys));
                            dic.setType(getAmbiguityDictType(sentence.getType(), secondType));
                        }
                        // 最后插入词典
                        LOGGER.info("插入词典，dic：{}", dic.toString());
                        analysisDictionDao.insertAnalysisDictionary(dic);
                    } else {
                        // 拆词情况
                        StringBuilder builder = new StringBuilder(newStr);
                        for (; index <= newIndex; index++) {
                            builder.append('\t').append(vocabularys.get(index).getVocabulary()).append('\t')
                                    .append(vocabularys.get(index).getCharacteristic());
                        }
                        AnalysisDictionary dic = new AnalysisDictionary();
                        dic.setVocabulary(newStr);
                        dic.setDictionary(builder.toString());
                        dic.setDeleteFlag(AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_DELETE_FLAG_N);
                        dic.setType(getAmbiguityDictType(sentence.getType(), secondType));
                        dic.setModifierId(user.getUserId());
                        dic.setModifierName(user.getRealName());
                        // 如果词典在歧义词典中已存在
                        searchParam = new HashMap<>();
                        searchParam.put("type", getAmbiguityDictType(sentence.getType(), secondType));
                        searchParam.put("keyword", newStr);
                        count = analysisDictionDao.selectAnalysisDictionaryCount(searchParam);
                        if (count > 0) {
                            Map<String, Object> map = new HashMap<>();
                            map.put("mergeFlag", mergeFlag);
                            map.put("searchParam", searchParam);
                            map.put("newStr", newStr);
                            map.put("dic", dic);
                            map.put("vocabularys", vocabularys);
                            map.put("sentence", sentence);
                            req.getSession().setAttribute("objMap", map);
                            ReturnCode returnCode = ReturnCode.DYNAMIC_DESC_EXCEPTION;
                            returnCode.setDesc(newStr + " 分词规则与歧义词典冲突，是否确定提交？");
                            return new ReturnData(returnCode, map);
                        }
                        // 如果词典语句在外部词典中已存在
                        if (analysisDictionDao.selectOutsideDictionaryCountByVocabulary(newStr) > 0) {
                            analysisDictionDao.deleteOutsideDictionaryByVocabulary(newStr);
                        }
                        // 如果词典在用户词典中已存在
                        searchParam.put("type", getUserLibraryDictType(sentence.getType(), secondType));
                        count = analysisDictionDao.selectAnalysisDictionaryCount(searchParam);
                        if (count > 0) {
                            // 则将整句分词结果添加到歧义词典中
                            dic.setVocabulary(sentence.getSentence());
                            dic.setDictionary(this.getSentenceDictionary(sentence, vocabularys));
                            dic.setType(getAmbiguityDictType(sentence.getType(), secondType));
                        }
                        // 最后插入词典
                        LOGGER.info("插入词典，dic：{}", dic.toString());
                        analysisDictionDao.insertAnalysisDictionary(dic);
                    }
                }

                // 临时变量还原
                newStr = "";
                rawStr = "";
                mergeFlag = true;
            }
            newIndex++;
            rawIndex++;
        }
        LOGGER.info("维护词典完成");
        return ReturnUtil.success();
    }

    /**
     * 根据type和secondType得到歧义字典类型
     *
     * @param type
     * @param secondType
     * @return
     */
    private int getAmbiguityDictType(int type, String secondType) {
        if (type == AnalysisSentenceConstant.TYPE_ZS) {
            return AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY;
        }
        return AnalysisDictionaryConstant.ANALYSISDictionaryAmbiguitySecondTypeMap.get(secondType);
    }

    /**
     * 根据type和secondType得到用户字典类型
     *
     * @param type
     * @param secondType
     * @return
     */
    private int getUserLibraryDictType(int type, String secondType) {
        if (type == AnalysisSentenceConstant.TYPE_ZS) {
            return AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_USERLIBRARY;
        }
        return AnalysisDictionaryConstant.ANALYSISDictionaryUserLibrarySecondTypeMap.get(secondType);
    }

    /**
     * 强制提交并更新词典
     *
     * @param mergeFlag
     * @param searchParam
     * @param newStr
     * @param dic
     * @param vocabularys
     * @param sentence
     * @param user
     * @param secondType
     * @return
     * @throws Exception
     */
    public ReturnData forceEditAndUpdateDictionary(boolean mergeFlag, Map<String, Object> searchParam, String newStr,
                                                   AnalysisDictionary dic, List<AnalysisVocabulary> vocabularys, AnalysisSentence sentence, User user, String secondType) {
        if (mergeFlag) {
            // 如果词典语句在外部词典中已存在
            if (analysisDictionDao.selectOutsideDictionaryCountByVocabulary(newStr) > 0) {
                analysisDictionDao.deleteOutsideDictionaryByVocabulary(newStr);
            }
            // 如果词典在歧义词典中已存在
            searchParam.put("type", AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY);
            int count = analysisDictionDao.selectAnalysisDictionaryCount(searchParam);
            if (count > 0) {
                // 则将整句分词结果添加到歧义词典中
                dic.setVocabulary(sentence.getSentence());
                dic.setDictionary(this.getSentenceDictionary(sentence, vocabularys));
                dic.setType(AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY);
                // 最后插入词典
                analysisDictionDao.insertAnalysisDictionary(dic);
            } else {
                // 更新词典
                analysisDictionDao.updateAnalysisDictionary(dic);
            }
        } else {
            // 如果词典语句在外部词典中已存在
            if (analysisDictionDao.selectOutsideDictionaryCountByVocabulary(newStr) > 0) {
                analysisDictionDao.deleteOutsideDictionaryByVocabulary(newStr);
            }
            // 如果词典在用户词典中已存在
            searchParam.put("type", AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_USERLIBRARY);
            int count = analysisDictionDao.selectAnalysisDictionaryCount(searchParam);
            if (count > 0) {
                // 则将整句分词结果添加到歧义词典中
                dic.setVocabulary(sentence.getSentence());
                dic.setDictionary(this.getSentenceDictionary(sentence, vocabularys));
                dic.setType(AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY);
                // 最后插入词典
                analysisDictionDao.insertAnalysisDictionary(dic);
            } else {
                // 最后更新词典
                analysisDictionDao.updateAnalysisDictionary(dic);
            }
        }
        return ReturnUtil.success();
    }

    /**
     * 得到整句对应的词典内容
     *
     * @param sentence
     * @param vocabularys
     * @return
     */
    private String getSentenceDictionary(AnalysisSentence sentence, List<AnalysisVocabulary> vocabularys) {
        StringBuilder sb = new StringBuilder();
        sb.append(sentence.getSentence());
        for (AnalysisVocabulary v : vocabularys) {
            sb.append('\t').append(v.getVocabulary()).append('\t').append(v.getCharacteristic());
        }
        return sb.toString();
    }

    @Value("${sphinx.index.rotate}")
    private String rotateIndexScript;

    private void updateSentenceAndVocabularys(AnalysisSentence analysisSentence, List<AnalysisVocabulary> vocabularys,
                                              User user) throws Exception {
        Long sid = analysisSentence.getId();
        LOGGER.info("开始删除历史词语");
        // 删除历史词语
        analysisVocabularyDao.deleteVocabulaysBySid(sid);
        LOGGER.info("逻辑删除搜索引擎中的历史词语");
        // 逻辑删除搜索引擎中的历史词语
        deleteVocabularysToSphinxBySid(sid, analysisSentence.getType());
        LOGGER.info("新增修改后词语");
        // 新增修改后词语
        analysisVocabularyDao.batchInsert(vocabularys);
        LOGGER.info("修改句子");
        // 修改句子
        analysisSentence.setLabelStatus(AnalysisSentenceConstant.LABEL_STATUS_LABELED);
        analysisSentence.setModifierId(user.getUserId());
        analysisSentenceDao.update(analysisSentence);
        LOGGER.info("更新分词和句子完成");
    }

    /**
     * 更新搜索引擎的索引
     *
     * @param index
     * @throws IOException
     */
    public void updateSphinxIndex(String index) {
        LOGGER.info("搜索引擎：更新索引[{}]开始", index);
        String cmds[] = new String[]{rotateIndexScript, index};
        try {
            BusinessUtil.excuteShell(cmds);
        } catch (IOException e) {
            e.printStackTrace();
            LOGGER.info("搜索引擎异常：更新索引[{}]失败", index);
        }
        LOGGER.info("搜索引擎：更新索引[{}]完成", index);
    }

    /**
     * 异步更新搜索引擎的索引
     *
     * @param index
     */
    public void asynchronousUpdateSphinxIndex(String index) {
        Thread t = new Thread(new ExecuteUpdateSphinx(index));
        t.start();
    }

    private void deleteVocabularysToSphinxBySid(long sid, int type) throws SphinxException {
        int sphinxPort = 9312;
        SphinxClient sphinxClient = new SphinxClient(sphinxHost, sphinxPort);
        String indexStr = "vocabulary" + type + "|vocabulary" + type + "_delta";
        SphinxResult sphinxResult = sphinxClient.Query("(@sid=\"^" + sid + "$\")", indexStr);
        if (sphinxResult == null || sphinxResult.matches == null || sphinxResult.matches.length == 0) {
            LOGGER.info("deleteVocabularysToSphinxBySid ERROR,SID:" + sid);
            return;
        }
        long[][] value = new long[1][2];
        value[0][1] = -1L;
        for (SphinxMatch m : sphinxResult.getMatches()) {
            value[0][0] = m.docId;
            // 把相关数据的label_status置为-1，实现逻辑删除
            sphinxClient.UpdateAttributes("vocabulary" + type, new String[]{"label_status"}, value);
        }
        sphinxClient.FlushAttributes();
    }

    private List<AnalysisVocabulary> getVocabularysByParam(String[] vocabularyStrs, String[] characterStrs,
                                                           String[] propertyStrs, String[] position1Strs, String[] position2Strs, Long sid, int type) {
        List<AnalysisVocabulary> vocabularys = new ArrayList<>();
        for (int i = 0; i < vocabularyStrs.length; i++) {
            AnalysisVocabulary vocabulary = new AnalysisVocabulary();
            vocabulary.setSid(sid);
            vocabulary.setVocabulary(vocabularyStrs[i]);
            vocabulary.setCharacteristic(characterStrs[i]);
            vocabulary.setProperty(propertyStrs[i]);
            vocabulary.setPosition1(position1Strs[i]);
            vocabulary.setPosition2(position2Strs[i]);
            vocabulary.setType(type);
            vocabulary.setInitial(vocabularyStrs[i].substring(0, 1));

            vocabularys.add(vocabulary);
        }

        return vocabularys;
    }

    public boolean checkVocabulary(String[] vocabularyStrs, String sentence) {
        if (vocabularyStrs == null || vocabularyStrs.length == 0) {
            return false;
        }
        StringBuffer temp = new StringBuffer();
        for (String vocabulary : vocabularyStrs) {
            temp.append(vocabulary);
        }
        if (temp.toString().equals(sentence)) {
            return true;
        }
        return false;
    }

    /**
     * 进入词语列表画面
     *
     * @param startStr
     * @param type
     * @param labelStatus
     * @param searchParams
     * @param secondType
     * @return
     * @throws SphinxException
     */
    public ModelAndView list(String startStr, int type, int labelStatus, List<AnalysisSearchParam> searchParams,
                             String secondType) throws SphinxException {
        int start = startStr == null ? 1 : Integer.parseInt(startStr);

        // 初始化过滤的条件
        Map<String, Object> param = new HashMap<>();
        param.put("type", type);
        param.put("labelStatus", labelStatus);
        if (searchParams != null && !searchParams.isEmpty()) {
            param.put("searchParams", searchParams);
        }
        param.put("orderStr", "gmt_modified desc");
        if (type == AnalysisSentenceConstant.TYPE_XBS) {
            param.put("secondType", secondType);
        }
        // 获取列表
        List<AnalysisSentence> list = selectSentenceList(param, type, labelStatus, secondType);

        // 获取数据总量
        int totalSize = list.size();

        // 获取翻页相关
        Map<String, Integer> pageNo = GetListUtil.getPageNoMap(totalSize, start);
        int endPage = pageNo.get("endPage");
        start = pageNo.get("start");
        int startIndex = pageNo.get("startIndex");

        if (startIndex < totalSize) {
            int endIndex = startIndex + 20 > totalSize ? totalSize : startIndex + 20;
            list = list.subList(startIndex, endIndex);
        }

        Map<String, Object> map = GetListUtil.getReturnMap2(totalSize, start, startIndex, endPage, list, null);
        LinkedHashMap<String, String> propertyMap = new LinkedHashMap<>();
        propertyMap.put(" ", " ");

        propertyMap.putAll(AnalysisSentenceConstant.getPropertyMapByTypeAndSecondType(type, secondType));
        map.put("propertyMap", propertyMap);
        map.put("position1List", AnalysisSentenceConstant.getPositionByType(AnalysisSentenceConstant.POSITION1_LABEL));
        map.put("position2List", AnalysisSentenceConstant.getPositionByType(AnalysisSentenceConstant.POSITION2_LABEL));
        if (type == AnalysisSentenceConstant.TYPE_XBS) {
            map.put("secondTypeMap", AnalysisSentenceConstant.secondTypeMap);
            map.put("errorMsg", getSourceErrMsg());
        }
        return new ModelAndView("analysis/index", map);
    }

    private String getSourceErrMsg() {

        List<Map<String, String>> sourceErrList = analysisVocabularyDao.retrieveSourceErrMsg();
        String errMsg = "";
        if (sourceErrList != null && !sourceErrList.isEmpty()) {
            errMsg = "分句分类有冲突：";
            for (Map<String, String> sourceErr : sourceErrList) {
                errMsg += sourceErr.get("sentence");
                errMsg += "->";
                errMsg += sourceErr.get("source");
                errMsg += "；";
            }
        }

        return errMsg;
    }

    @Value("${zuci.script}")
    private String zuciScript;
    @Value("${translate.script}")
    private String translateScript;
    @Value("${zuci.tmpfile.path}")
    private String zuciTmpFilePath;

    public ReturnData preview(String idStr, String[] vocabularyStrs, String[] characterStrs, String[] propertyStrs)
            throws Exception {
        Long id = Long.parseLong(idStr);
        // 获取句子
        AnalysisSentence sentence = analysisSentenceDao.retrieveObjectById(id);

        // 校验参数词语是否与句子匹配
        boolean vocabularyFlag = checkVocabulary(vocabularyStrs, sentence.getSentence());
        if (!vocabularyFlag) {
            return ReturnUtil.fail(ReturnCode.ANALYSIS_VOCABULARYS_CHECK_ERROR);
        }

        // 将参数拼凑成词语实体
        String[] temp = new String[vocabularyStrs.length];
        List<AnalysisVocabulary> vocabularys = getVocabularysByParam(vocabularyStrs, characterStrs, propertyStrs, temp,
                temp, 0l, 0);

        String csStr = System.currentTimeMillis() + ".txt";

        File file = new File(zuciTmpFilePath + csStr);
        BufferedOutputStream bOS = new BufferedOutputStream(new FileOutputStream(file));

        StringBuffer vocabSb = new StringBuffer();
        for (AnalysisVocabulary vocabulary : vocabularys) {
            vocabSb.append(vocabulary.getVocabulary());
            vocabSb.append("\t");
            vocabSb.append(vocabulary.getCharacteristic());
            vocabSb.append("\t");
            vocabSb.append(vocabulary.getProperty());
            vocabSb.append("\n");
        }
        bOS.write(vocabSb.toString().getBytes("utf-8"));
        bOS.flush();
        bOS.close();

        String cmds[] = new String[3];
        cmds[0] = translateScript;
        cmds[1] = file.getAbsolutePath();
        cmds[2] = csStr;

        // 定义shell返回值
        String result = BusinessUtil.excuteShell(cmds);
        String[] resultSplit = result.split("\t");

        if (resultSplit.length > 1) {
            String splitStr = resultSplit[1];
            file.delete();
            bOS = new BufferedOutputStream(new FileOutputStream(file));

            bOS.write((sentence.getSentence() + String.valueOf((char) 1) + "1" + "\t" + splitStr + "\n")
                    .getBytes("utf-8"));
            bOS.flush();
            bOS.close();

            cmds[0] = zuciScript;
            result = BusinessUtil.excuteShell(cmds);
        } else {
            result = "error";
        }

        Map<String, Object> map = new HashMap<>();
        map.put("zuci", result);

        return ReturnUtil.success(map);
    }

    /**
     * 读取数据库中的数据更新redis
     *
     * @throws SphinxException
     */
    public void updateRedisDataByDB() throws SphinxException {
        ValueOperations<String, List<AnalysisSentence>> valueops = redisTemplate.opsForValue(); // redis数据操作对象
        Set<String> keys = redisTemplate.keys("*");
        Iterator<String> iterator = keys.iterator();
        List<AnalysisSentence> dataList = null;
        String key = null;
        String secondType = null;
        Map<String, Object> param = null;
        while (iterator.hasNext()) {
            key = iterator.next();
            // 过滤key
            if (RedisConstant.KEY_ANALYSIS_DELETE.equals(key) || key.startsWith(RedisConstant.MARK_SENTENCE_UNLABEL)) {
                continue;
            }
            // 根据key得到检索条件
            param = getSearchParamMapByKey(key);
            int type = Integer.valueOf(param.get("type").toString());
            secondType = null;
            if (type == AnalysisSentenceConstant.TYPE_XBS) {
                secondType = key.substring(key.lastIndexOf(1) + 1);
            }
            // 从数据库中检索得到数据
            if (RedisConstant.NULL_SEARCH_COMPLAIN_UNLABELED.equals(key)) {
                // 主诉空检索的情况下
                dataList = analysisSentenceDao.retrieveZsListNullSearch();
                if (dataList != null && dataList.size() > 0) {
                    StringBuilder sb = new StringBuilder();
                    for (int i = 0; i < dataList.size(); i++) {
                        sb.append("'").append(dataList.get(i).getId()).append("'");
                        if (i < dataList.size() - 1) {
                            sb.append(",");
                        }
                    }
                    List<HashMap<String, Object>> mapList = analysisSentenceDao
                            .selectVocabularySidPropertyMapper(sb.toString());
                    dataList = removeSameItem(dataList, mapList);
                }
            } else if (key.startsWith(RedisConstant.NULL_SEARCH_HPI_UNLABELED_NO_SECOND_TYPE) && key.split("\01").length == 4) {
                // 现病史未标记空检索的情况下
                dataList = analysisSentenceDao.retrieveXbsListNullSearchUnlabeled(secondType);
            } else if (key.startsWith(RedisConstant.NULL_SEARCH_HPI_LABELED_NO_SECOND_TYPE) && key.split("\01").length == 4) {
                // 现病史已标记空检索的情况下
                dataList = analysisSentenceDao.retrieveXbsListNullSearchLabeled(secondType);
            } else if (RedisConstant.NULL_SEARCH_MEDICAL_UNLABELED.equals(key)
                    || RedisConstant.NULL_SEARCH_COMPLAIN_LABELED.equals(key)
                    || RedisConstant.NULL_SEARCH_COMPLAIN_LABELED.equals(key)) {
                // 其他空检索情况下
                dataList = analysisSentenceDao.retrieveListNoLimit(param);
            } else {
                // 其他情况下
                // TODO
                // list = analysisSentenceDao.retrieveListNoLimit(param);
                @SuppressWarnings("unchecked")
                List<AnalysisSearchParam> searchParams = (List<AnalysisSearchParam>) param.get("searchParams");

                int labelStatus = Integer.valueOf(param.get("labelStatus").toString());
                dataList = getListFromSphinx(searchParams, type, labelStatus, secondType);
            }
            // 把数据放入redis中覆盖掉原数据
            valueops.set(key, dataList);
        }

        // 把已删除的置空
        ValueOperations<String, Object> valueopsOther = redisStrTemplate.opsForValue();
        valueopsOther.set(RedisConstant.KEY_ANALYSIS_DELETE, ",");

    }

    /**
     * 得到语句List
     *
     * @param param
     * @param type
     * @param labelStatus
     * @param secondType
     * @return
     * @throws SphinxException
     */  //现病史-未标注list
    private List<AnalysisSentence> selectSentenceList(Map<String, Object> param, int type, int labelStatus,
                                                      String secondType) throws SphinxException {
        ValueOperations<String, List<AnalysisSentence>> valueops = redisTemplate.opsForValue(); // redis数据操作对象
        // 得到检索用的key
        String key = getSelectKey(param, secondType);
        // 得到语句列表
        List<AnalysisSentence> list = valueops.get(key);

        // 如果列表不为null且列表的元素数大于0且列表的元素数不为4000时，直接返回list
        if (list != null && list.size() > 0 && list.size() != 4000) {
            return list;
        }
        // 如果列表为null或者列表的元素数为0或者列表的元素数正好为4000时，从数据库中检索
        if (RedisConstant.NULL_SEARCH_COMPLAIN_UNLABELED.equals(key)) {
            // 主诉未标记空检索的情况下
            list = analysisSentenceDao.retrieveZsListNullSearch();
            if (ListUtil.isNotEmpty(list)) {
                List<HashMap<String, Object>> mapList = analysisSentenceDao
                        .selectVocabularySidPropertyMapper(getIdListStr(list));
                list = removeSameItem(list, mapList);
            }
        } else if (key.startsWith(RedisConstant.NULL_SEARCH_HPI_UNLABELED_NO_SECOND_TYPE) && key.split("\01").length == 4) {
            // 现病史未标记空检索的情况下
            list = analysisSentenceDao.retrieveXbsListNullSearchUnlabeled(secondType);
            // 如果list不为空，做去重处理
            if (ListUtil.isNotEmpty(list)) {
                List<HashMap<String, Object>> mapList = analysisSentenceDao
                        .selectVocabularySidPropertyMapper(getIdListStr(list));
                list = removeSameItem(list, mapList);
            }
        } else if (key.startsWith(RedisConstant.NULL_SEARCH_HPI_LABELED_NO_SECOND_TYPE) && key.split("\01").length == 4) {
            // 现病史已标记空检索的情况下
            list = analysisSentenceDao.retrieveXbsListNullSearchLabeled(secondType);
        } else if (RedisConstant.NULL_SEARCH_MEDICAL_UNLABELED.equals(key)
                || RedisConstant.NULL_SEARCH_MEDICAL_LABELED.equals(key)
                || RedisConstant.NULL_SEARCH_COMPLAIN_LABELED.equals(key)) {
            // 其他空检索情况下
            list = analysisSentenceDao.retrieveListNoLimit(param);
        } else {
            // 其他情况下
            // TODO
            // list = analysisSentenceDao.retrieveListNoLimit(param);
            @SuppressWarnings("unchecked")
            List<AnalysisSearchParam> searchParams = (List<AnalysisSearchParam>) param.get("searchParams");
            list = getListFromSphinx(searchParams, type, labelStatus, secondType);

            // 如果是搜索的情况下，要过滤已经删除的记录
            Iterator<AnalysisSentence> iter = list.iterator();
            ValueOperations<String, Object> valueopsOther = redisStrTemplate.opsForValue();
            Object deleteId = valueopsOther.get(RedisConstant.KEY_ANALYSIS_DELETE);
            if (deleteId != null) {
                while (iter.hasNext()) {
                    AnalysisSentence item = iter.next();
                    if (deleteId.toString().contains(item.getId() + "")) {
                        iter.remove();
                    }
                }
            }

        }
        // 将检索结果放入redis
        addAnalysisSentenceListInRedis(key, list);
        return list;
    }

    /**
     * 从语句list中得到id以,拼接的字符串
     *
     * @param list
     * @return
     */
    private String getIdListStr(List<AnalysisSentence> list) {
        StringBuilder sb = new StringBuilder();
        for (int i = 0; i < list.size(); i++) {
            sb.append("'").append(list.get(i).getId()).append("'");
            if (i < list.size() - 1) {
                sb.append(",");
            }
        }
        return sb.toString();
    }


    /**
     * 从sphinx搜索引擎中得到数据列表
     *
     * @param searchParams
     * @param type
     * @param labelStatus
     * @param secondType
     * @return
     * @throws SphinxException
     */
    private List<AnalysisSentence> getListFromSphinx(List<AnalysisSearchParam> searchParams, int type, int labelStatus,
                                                     String secondType) throws SphinxException {
        List<AnalysisSentence> result = new ArrayList<>();
        if (searchParams == null || searchParams.size() == 0) {
            return result;
        }
        int sphinxPort = 9312;
        SphinxClient sphinxClient = new SphinxClient(sphinxHost, sphinxPort);
        // 设置排序模式
        sphinxClient.SetSortMode(SphinxClient.SPH_SORT_ATTR_DESC, "gmt_modified");
        // 分页查询的范围
        sphinxClient.SetLimits(0, 200000, 200000);
        // 过滤器设置，true为！=，false为=
        sphinxClient.SetFilter("label_status", labelStatus, false);
        AnalysisSentence sentence = null;
        SphinxResult sphinxResult = null;
        String queryStr = null;
        String indexStr = null;
        LinkedHashMap<Long, String[]> resultMap = null;
        // 查询
        for (int i = 0; i < searchParams.size(); i++) {
            queryStr = getQueryVocabularyStr(searchParams.get(i));
            if (type == AnalysisSentenceConstant.TYPE_XBS) {
                queryStr = queryStr + "&(@second_type=\"^" + secondType + "$\")";
            }
            indexStr = "vocabulary" + type + "|vocabulary" + type + "_delta";
            LOGGER.info("查询语句为" + queryStr);
            LOGGER.info("索引语句为" + indexStr);
            sphinxResult = sphinxClient.Query(queryStr, indexStr);
            if (sphinxResult == null || sphinxResult.getMatches() == null || sphinxResult.getMatches().length == 0) {
                return result;
            }
            if (i == 0) {
                resultMap = getResultMap(sphinxResult.getMatches());
            } else {
                // 得到多个查询结果中的交集部分
                resultMap = ListUtil.getRepeatResult(resultMap, getResultMap(sphinxResult.getMatches()));
            }
        }
        if (resultMap == null || resultMap.size() == 0) {
            return result;
        }
        for (Map.Entry<Long, String[]> entry : resultMap.entrySet()) {
            sentence = new AnalysisSentence();
            sentence.setId(entry.getKey());
            sentence.setSentence(entry.getValue()[0]);
            sentence.setGmtModified(DateUtils.getDateStrFromUnixTimestamp(entry.getValue()[1]));
            result.add(sentence);
            if (result.size() >= 5000) {
                return result;
            }
        }
        return result;
    }

    /**
     * 得到去重后的结果集合
     *
     * @param matches
     * @return
     */
    private LinkedHashMap<Long, String[]> getResultMap(SphinxMatch[] matches) {
        LinkedHashMap<Long, String[]> map = new LinkedHashMap<>();
        String sid = null;
        String sentence;
        String gmtModified;
        for (SphinxMatch m : matches) {
            sid = String.valueOf(m.getAttrValues().get(0));
            sentence = String.valueOf(m.getAttrValues().get(1));
            gmtModified = String.valueOf(m.getAttrValues().get(7));
            map.put(Long.valueOf(sid), new String[]{sentence, gmtModified});
        }
        return map;
    }

    /**
     * 根据搜索条件集合得到SphinxQL查询词语语句
     *
     * @param searchParam
     * @return
     */
    private String getQueryVocabularyStr(AnalysisSearchParam searchParam) {
        if (searchParam == null) {
            return null;
        }
        StringBuilder sb = new StringBuilder();
        sb.append("(@vocabulary=\"^").append(searchParam.getVocabulary()).append("$\")");
        if (StringUtils.isNotEmpty(searchParam.getProperty())) {
            if ("0".endsWith(searchParam.getOppositePropertyFlag())) {
                sb.append("&(@property=").append(searchParam.getProperty()).append(")");
            } else {
                List<String> propertyList = new ArrayList<String>(Arrays
                        .asList(new String[]{"O", "O_AD", "P", "R", "R_AD", "S_AD", "S_I", "DI", "DE", "IN", "AD"}));
                propertyList.remove(searchParam.getProperty());
                sb.append("&(@property=");
                String prefix = "";
                for (String property : propertyList) {
                    sb.append(prefix);
                    prefix = "|";
                    sb.append(property);
                }
                sb.append(")");
            }
        }
        if (StringUtils.isNotEmpty(searchParam.getPosition1())) {
            if ("0".endsWith(searchParam.getOppositePosition1Flag())) {
                sb.append("&(@position1=").append(searchParam.getPosition1()).append(")");
            } else {
                List<String> position1List = new ArrayList<String>(
                        Arrays.asList(new String[]{"N", "B", "M", "E", "S", "BB", "MM", "EE"}));
                position1List.remove(searchParam.getPosition1());
                sb.append("&(@position1=");
                String prefix = "";
                for (String position1 : position1List) {
                    sb.append(prefix);
                    prefix = "|";
                    sb.append(position1);
                }
                sb.append(")");
            }
        }
        if (StringUtils.isNotEmpty(searchParam.getPosition2())) {
            sb.append("&(@position2=^").append(searchParam.getPosition2()).append("$)");
        }
        return sb.toString();
    }

    /**
     * 移除列表中相似度较高的项目
     *
     * @param list
     * @param mapList
     * @return
     */
    private List<AnalysisSentence> removeSameItem(List<AnalysisSentence> list, List<HashMap<String, Object>> mapList) {
        if (list == null || list.size() <= 1) {
            return list;
        }
        HashMap<Long, String> propertyMap = new HashMap<>();
        for (HashMap<String, Object> map : mapList) {
            propertyMap.put((long) map.get("sid"), String.valueOf(map.get("property")));
        }
        List<AnalysisSentence> result = new ArrayList<>();
        result.add(list.get(0));
        for (int i = 1; i < list.size(); i++) {
            boolean flag = true;
            for (int j = 1; j <= 10; j++) {
                if ((i - j) < 0) {
                    break;
                }
                if (StringUtils.isEmpty(propertyMap.get(list.get(i).getId()))) {
                    LOGGER.info("ID为:" + list.get(i).getId() + "的语句找不到对应的属性集合");
                    continue;
                }
                if (StringUtils.isEmpty(propertyMap.get(list.get(i - j).getId()))) {
                    LOGGER.info("ID为:" + list.get(i - j).getId() + "的语句找不到对应的属性集合");
                    continue;
                }
                double d = SimilarityAlgo.getSimilarity(propertyMap.get(list.get(i).getId()),
                        propertyMap.get(list.get(i - j).getId()));
                if (d > 0.93D) {
                    flag = false;
                }
            }
            if (flag) {
                result.add(list.get(i));
            }
        }
        return result;
    }

    /**
     * 向redis中添加语句数据
     *
     * @param key
     * @param list
     */
    private void addAnalysisSentenceListInRedis(String key, List<AnalysisSentence> list) {
        ValueOperations<String, List<AnalysisSentence>> valueops = redisTemplate.opsForValue(); // redis数据操作对象
        String keyPattern = key.substring(0, 4) + "*";
        // 将redis中对应的pattern下的数据清除
        Set<String> keys = redisTemplate.keys(keyPattern);
        // 空检索的关键字例外，不清除
        keys.remove(RedisConstant.NULL_SEARCH_COMPLAIN_LABELED);
        keys.remove(RedisConstant.NULL_SEARCH_MEDICAL_LABELED);
        keys.remove(RedisConstant.NULL_SEARCH_COMPLAIN_UNLABELED);
        keys.remove(RedisConstant.NULL_SEARCH_MEDICAL_UNLABELED);
        Iterator<String> iterator = keys.iterator();
        String k;
        while (iterator.hasNext()) {
            k = iterator.next();
            if (k.startsWith(RedisConstant.NULL_SEARCH_HPI_UNLABELED_NO_SECOND_TYPE)
                    || k.startsWith(RedisConstant.NULL_SEARCH_HPI_LABELED_NO_SECOND_TYPE)) {
                iterator.remove();
            }
        }
        redisTemplate.delete(keys);
        // 添加数据
        valueops.set(key, list);
    }

    /**
     * 移除redis中的相关语句元素
     *
     * @param id
     * @param keyPattern
     */
    public void removeAnalySentenceceOfRedis(Long id, String keyPattern) {
        ValueOperations<String, List<AnalysisSentence>> valueops = redisTemplate.opsForValue(); // redis数据操作对象
        // 得到key的集合
        Set<String> keys = redisTemplate.keys(keyPattern);
        Iterator<String> iterator = keys.iterator();
        String key = null;
        List<AnalysisSentence> data = null;
        List<AnalysisSentence> resultList = null;
        // 循环遍历集合
        while (iterator.hasNext()) {
            key = iterator.next();
            data = valueops.get(key);
            // 得到移除相关元素后的list
            resultList = removeElementsById(data, id);
            // 将list放入redis覆盖掉原list
            valueops.set(key, resultList);
        }
    }

    /**
     * 向redis中的list中插入相关元素
     *
     * @param analysisSentence
     * @param keyPattern
     * @param vocabularys
     */
    private void addAnalySentenceceInRedis(AnalysisSentence analysisSentence, String keyPattern,
                                           List<AnalysisVocabulary> vocabularys) {
        ValueOperations<String, List<AnalysisSentence>> valueops = redisTemplate.opsForValue(); // redis数据操作对象
        // 得到key的集合
        Set<String> keys = redisTemplate.keys(keyPattern);
        Iterator<String> iterator = keys.iterator();
        String key = null;
        List<AnalysisSentence> data = null;
        List<AnalysisSentence> resultList = null;
        // 循环遍历集合
        while (iterator.hasNext()) {
            key = iterator.next();
            data = valueops.get(key);
            // 判断更改词语后的语句是否还满足搜索条件
            boolean flag = canSelect(getSearchParamsByKey(key, analysisSentence.getType()), vocabularys);
            resultList = addElementsInList(data, analysisSentence, flag);
            // 将list放入redis覆盖掉原list
            valueops.set(key, resultList);
        }
    }

    /**
     * 得到检索用的key
     *
     * @param map
     * @return
     */
    private String getSelectKey(Map<String, Object> map, String secondType) {
        StringBuilder sb = new StringBuilder();
        int type = (int) map.get("type");
        sb.append(type);
        sb.append((char) 1).append(map.get("labelStatus"));
        // 如果type为现病史且二级标签不为空，加入二级标签
        if (type == AnalysisSentenceConstant.TYPE_XBS && StringUtils.isNotEmpty(secondType)) {
            sb.append((char) 1).append(secondType);
        }
        @SuppressWarnings("unchecked")
        List<AnalysisSearchParam> searchParams = (List<AnalysisSearchParam>) map.get("searchParams");
        if (searchParams != null) {
            for (AnalysisSearchParam searchParam : searchParams) {
                sb.append((char) 1).append(searchParam.getId());
                sb.append((char) 1).append(searchParam.getVocabulary());
                sb.append((char) 1).append(searchParam.getProperty());
                sb.append((char) 1).append(searchParam.getPosition1());
                sb.append((char) 1).append(searchParam.getPosition2());
                sb.append((char) 1).append(searchParam.getOppositePropertyFlag());
                sb.append((char) 1).append(searchParam.getOppositePosition1Flag());
            }
        }
        sb.append((char) 1).append(map.get("orderStr"));
        return sb.toString();
    }

    /**
     * 根据key逆向解析得到SearchParam列表
     *
     * @param key
     * @return
     */
    private List<AnalysisSearchParam> getSearchParamsByKey(String key, int type) {
        if (!key.endsWith("desc")) {
            key = key.substring(0, key.lastIndexOf(1));
        }
        String[] s = key.split(String.valueOf((char) 1));
        int limit = 3;
        int i = 2;
        if (AnalysisSentenceConstant.TYPE_XBS == type) {
            limit = 4;
            i = 3;
        }
        if (s.length <= limit) {
            return null;
        }
        List<AnalysisSearchParam> result = new ArrayList<>();
        AnalysisSearchParam param = null;
        for (; i < s.length - 2; ) {
            param = new AnalysisSearchParam();
            param.setId(Long.valueOf(s[i++]));
            param.setVocabulary(s[i++]);
            param.setProperty(s[i++]);
            param.setPosition1(s[i++]);
            param.setPosition2(s[i++]);
            param.setOppositePropertyFlag(s[i++]);
            param.setOppositePosition1Flag(s[i++]);
            result.add(param);
        }
        return result;
    }

    /**
     * 根据key逆向解析得到数据库检索用param
     *
     * @param key
     * @return
     */
    private Map<String, Object> getSearchParamMapByKey(String key) {
        Map<String, Object> paramMap = new HashMap<>();
        String[] s = key.split(String.valueOf((char) 1));
        paramMap.put("type", s[0]);
        paramMap.put("labelStatus", s[1]);
        if (key.endsWith("desc")) {
            paramMap.put("orderStr", s[s.length - 1]);
        } else {
            paramMap.put("orderStr", s[s.length - 2]);
        }
        List<AnalysisSearchParam> serachParams = getSearchParamsByKey(key, Integer.parseInt(s[0]));
        if (serachParams != null && serachParams.size() > 0) {
            paramMap.put("searchParams", serachParams);
        }
        return paramMap;
    }

    /**
     * 移除list中的相关元素
     *
     * @param list
     * @param id
     * @return
     */
    private List<AnalysisSentence> removeElementsById(List<AnalysisSentence> list, Long id) {
        Iterator<AnalysisSentence> iterator = list.iterator();
        AnalysisSentence analysisSentence = null;
        while (iterator.hasNext()) {
            analysisSentence = iterator.next();
            if (id.equals(analysisSentence.getId())) {
                iterator.remove();
            }
        }
        return list;
    }

    /**
     * 向list中添加一个语句
     *
     * @param list    语句列表
     * @param element 要添加的语句
     * @param flag    是否添加flag
     * @return
     */
    private List<AnalysisSentence> addElementsInList(List<AnalysisSentence> list, AnalysisSentence element,
                                                     boolean flag) {
        Iterator<AnalysisSentence> iterator = list.iterator();
        Long id = element.getId();
        AnalysisSentence analysisSentence = null;
        /* 循环遍历List，如果list中已存在相同id的元素就将其移除 */
        while (iterator.hasNext()) {
            analysisSentence = iterator.next();
            if (id.equals(analysisSentence.getId())) {
                iterator.remove();
            }
        }
        /* 如果flag为true，在List的头部加入元素 */
        if (flag) {
            list.add(0, element);
        }
        return list;
    }

    /**
     * 判断语句的词语是否满足检索条件
     *
     * @param searchParams 检索条件
     * @param vocabularys  词语
     * @return true 满足，false 不满足。
     */
    private boolean canSelect(List<AnalysisSearchParam> searchParams, List<AnalysisVocabulary> vocabularys) {
        if (searchParams == null) {
            return true;
        }
        if (vocabularys == null) {
            return false;
        }
        for (AnalysisSearchParam serachParam : searchParams) {
            boolean flag = true; // 设置一个flag来记录当前serachPara是否被满足
            for (AnalysisVocabulary analysisVocabulary : vocabularys) {
                // 如果词语型匹配
                if (serachParam.getVocabulary().equals(analysisVocabulary.getVocabulary())) {
                    /* 如果属性项不为空 */
                    if (StringUtils.isNotEmpty(serachParam.getProperty())) {
                        /* 如果是正向搜索且属性值不匹配,结果为false */
                        if ("0".equals(serachParam.getOppositePropertyFlag())
                                && !serachParam.getProperty().equals(analysisVocabulary.getProperty())) {
                            return false;
                        }
                        /* 如果是反向搜索且属性值匹配,结果为false */
                        if ("1".equals(serachParam.getOppositePropertyFlag())
                                && serachParam.getProperty().equals(analysisVocabulary.getProperty())) {
                            return false;
                        }
                    }
                    /* 如果位置项1不为空且不匹配,结果为false */
                    if (StringUtils.isNotEmpty(serachParam.getPosition1())) {
                        /* 如果是正向搜索且位置项1不匹配,结果为false */
                        if ("0".equals(serachParam.getOppositePosition1Flag())
                                && !serachParam.getPosition1().equals(analysisVocabulary.getPosition1())) {
                            return false;
                        }
                        /* 如果是反向搜索且位置项1匹配,结果为false */
                        if ("1".equals(serachParam.getOppositePosition1Flag())
                                && serachParam.getPosition1().equals(analysisVocabulary.getPosition1())) {
                            return false;
                        }
                    }
                    /* 如果位置项2不为空且不匹配,结果为false */
                    if (StringUtils.isNotEmpty(serachParam.getPosition2())
                            && !serachParam.getPosition2().equals(analysisVocabulary.getPosition2())) {
                        return false;
                    }
                    // 所有项都匹配，将flag置为false
                    flag = false;
                }
            }
            /* 如果当前searchParam没有被匹配过，结果为false */
            if (flag) {
                return false;
            }
        }
        return true;

    }

    /**
     * 删除句子
     *
     * @param typeStr
     * @param secondType
     * @param idStr
     * @return
     */
    public ReturnData delete(String typeStr, String secondType, String idStr) {
        Long id = Long.parseLong(idStr);
        int type = Integer.parseInt(typeStr);

        // 从analysis库中删除，†移入已删除的表中
        removeAnalysisToDel(id);

        AnalysisSentence analysisSentence = analysisSentenceDao.retrieveDelObjectById(id);

        LOGGER.info("更新redis开始");
        // 移除redis中未标记列表的相关元素，再向已标记列表中添加相关元素
        String queryStr = "";
        if (type == AnalysisSentenceConstant.TYPE_CT && analysisSentence.getLabelStatus() == 1) {
            queryStr = RedisConstant.KEY_PATTERN_MEDICAL_LABELED;

        } else if (type == AnalysisSentenceConstant.TYPE_XBS && analysisSentence.getLabelStatus() == 1) {
            queryStr = RedisConstant.KEY_PATTERN_HPI_LABELED_NO_SECOND_TYPE + (char) 1 + secondType + (char) 1 + "*";
        } else if (type == AnalysisSentenceConstant.TYPE_ZS && analysisSentence.getLabelStatus() == 1) {
            queryStr = RedisConstant.KEY_PATTERN_COMPLAIN_LABELED;

        } else if (type == AnalysisSentenceConstant.TYPE_CT && analysisSentence.getLabelStatus() == 0) {
            queryStr = RedisConstant.KEY_PATTERN_MEDICAL_UNLABELED;

        } else if (type == AnalysisSentenceConstant.TYPE_XBS && analysisSentence.getLabelStatus() == 0) {
            queryStr = RedisConstant.KEY_PATTERN_HPI_UNLABELED_NO_SECOND_TYPE + (char) 1 + secondType + (char) 1 + "*";
        } else if (type == AnalysisSentenceConstant.TYPE_ZS && analysisSentence.getLabelStatus() == 0) {
            queryStr = RedisConstant.KEY_PATTERN_COMPLAIN_UNLABELED;
        }
        removeAnalySentenceceOfRedis(id, queryStr);
        // 由于搜索引擎删除过于繁琐 ，在显示的时候过滤，增加过滤的redis
        ValueOperations<String, Object> valueops = redisStrTemplate.opsForValue();
        Object deleteId = valueops.get(RedisConstant.KEY_ANALYSIS_DELETE);
        if (deleteId != null) {
            valueops.set(RedisConstant.KEY_ANALYSIS_DELETE, deleteId + idStr + ",");
        } else {
            valueops.set(RedisConstant.KEY_ANALYSIS_DELETE, "," + idStr + ",");
        }
        LOGGER.info("更新redis完成");
        return ReturnUtil.success();
    }

    /**
     * 无法标注句子
     *
     * @param typeStr
     * @param secondType
     * @param idStr
     * @return
     */
    public ReturnData canNotTag(String typeStr, String secondType, String idStr) {
        Long id = Long.parseLong(idStr);
        //从analysis库中删除，移入已删除的表中标注为无法标注
        analysisSentenceDao.insertCannotTag(id);

        LOGGER.info("更新redis开始");
        // 移除redis中未标记列表的相关元素，再向已标记列表中添加相关元素
        String queryStr = RedisConstant.KEY_PATTERN_HPI_UNLABELED_NO_SECOND_TYPE + (char) 1 + secondType + (char) 1 + "*";

        removeAnalySentenceceOfRedis(id, queryStr);
        // 由于搜索引擎删除过于繁琐 ，在显示的时候过滤，增加过滤的redis
        ValueOperations<String, Object> valueops = redisStrTemplate.opsForValue();
        Object deleteId = valueops.get(RedisConstant.KEY_ANALYSIS_DELETE);
        if (deleteId != null) {
            valueops.set(RedisConstant.KEY_ANALYSIS_DELETE, deleteId + idStr + ",");
        } else {
            valueops.set(RedisConstant.KEY_ANALYSIS_DELETE, "," + idStr + ",");
        }
        LOGGER.info("更新redis完成");
        return ReturnUtil.success();
    }

    /**
     * 可以标注句子
     *
     * @param typeStr
     * @param secondType
     * @param idStr
     * @return
     */
    public ReturnData canTag(String typeStr, String secondType, String idStr) {
        Long id = Long.parseLong(idStr);
        int type = Integer.parseInt(typeStr);

        // 从已删除表中删除，移入analysis表，变为未标注句子       
        long newId = analysisSentenceDao.recoverCanTag(id);
        analysisSentenceDao.recoverCanTagVocab(id, newId);

        AnalysisSentence analysisSentence = analysisSentenceDao.retrieveObjectById(newId);//根据id从未删除表取出句子

        List<AnalysisVocabulary> vocabularys = analysisVocabularyDao.retrieveVocabById(newId);//根据id从未删除表取出词语

        LOGGER.info("更新redis开始");
        // 移除redis中未标记列表的相关元素，再向已标记列表中添加相关元素
        String queryStr = RedisConstant.KEY_PATTERN_HPI_UNLABELED_NO_SECOND_TYPE + (char) 1 + secondType + (char) 1 + "*";

        addAnalySentenceceInRedis(analysisSentence, queryStr, vocabularys);
        // 由于搜索引擎删除过于繁琐 ，在显示的时候过滤，增加过滤的redis
        ValueOperations<String, Object> valueops = redisStrTemplate.opsForValue();
        Object deleteId = valueops.get(RedisConstant.KEY_ANALYSIS_DELETE);
        if (deleteId != null) {
            valueops.set(RedisConstant.KEY_ANALYSIS_DELETE, deleteId + idStr + ",");
        } else {
            valueops.set(RedisConstant.KEY_ANALYSIS_DELETE, "," + idStr + ",");
        }
        LOGGER.info("更新redis完成");
        return ReturnUtil.success();
    }

    /**
     * 删除句子和词语移入已删除表
     *
     * @param id
     */
    private void removeAnalysisToDel(Long id) {
        analysisSentenceDao.insertDel(id);
    }

    public ModelAndView deleteList(String startStr, String sentence, int type, String exactSearchFlag, String countPerPageStr, String secondType) {

        Map<String, Object> param = new HashMap<>();
        // 根据词语查询
        if (sentence != null && !sentence.isEmpty()) {
            if (exactSearchFlag != null && exactSearchFlag.equals("1")) {
                param.put("sentence", "%\t" + sentence + "\t%");
            } else {
                param.put("sentence",
                        "%" + sentence.replace(";", "%").replace("；", "%").replace(",", "%").replace("，", "%") + "%");
            }
        }
        param.put("type", type);
        if (StringUtils.isNotBlank(secondType)) {
            param.put("secondType", secondType);
        }
        int totalSize = analysisSentenceDao.retrieveDelSize(param);

        int start = 1;
        if (startStr != null && !startStr.isEmpty()) {
            start = Integer.parseInt(startStr);
        }
        Map<String, Integer> pageNo;
        if (countPerPageStr != null && !countPerPageStr.isEmpty()) {
            int countPerPage = Integer.parseInt(countPerPageStr);
            pageNo = GetListUtil.getPageNoParam(totalSize, startStr, countPerPage);
            param.put("countPerPage", countPerPage);
        } else {
            pageNo = GetListUtil.getPageNoParam(totalSize, startStr);
        }
        int endPage = pageNo.get("endPage");
        start = pageNo.get("start");
        int startIndex = pageNo.get("startIndex");

        List<AnalysisSentence> list = new ArrayList<>();

        if (startIndex < totalSize) {
            param.put("startIndex", startIndex);
            list = analysisSentenceDao.retrieveDelList(param);

        }
        Map<String, String> showSearch = new LinkedHashMap<>();
        showSearch.put("sentence", "句子");

        Map<String, String> searchMap = new HashMap<>();
        searchMap.put("sentence", sentence);

        Map<String, Object> map = GetListUtil.getReturnMap2(totalSize, start, startIndex, endPage, list, searchMap);
        map.put("showSearch", showSearch);
        map.put("showSearchValue", searchMap);

        if (type == AnalysisSentenceConstant.TYPE_XBS) {
            map.put("secondTypeMap", AnalysisSentenceConstant.secondTypeMap);
            map.put("errorMsg", getSourceErrMsg());
        }
        return new ModelAndView("analysis/index", map);

    }

    /**
     * 进入无法标注画面
     *
     * @param startStr
     * @param type
     * @param secondType
     * @return
     * @throws SphinxException
     */
    public ModelAndView canNotTagList(String startStr, String sentence, int type, String exactSearchFlag, String countPerPageStr,
                                      String secondType) throws SphinxException {
        int start = startStr == null ? 1 : Integer.parseInt(startStr);

        // 初始化过滤的条件
        Map<String, Object> param = new HashMap<>();
        // 根据词语查询
        if (sentence != null && !sentence.isEmpty()) {
            if (exactSearchFlag != null && exactSearchFlag.equals("1")) {
                param.put("sentence", "%\t" + sentence + "\t%");
            } else {
                param.put("sentence", "%" + sentence.replace(";", "%").replace("；", "%").replace(",", "%").replace("，", "%") + "%");
            }
        }
        param.put("type", type);
        param.put("secondType", secondType);

        //获取无法标注句子的列表
        List<AnalysisSentence> list = analysisSentenceDao.retrieveXbsCanNotTag(param);

        // 获取数据总量
        int totalSize = 0;
        if (list != null) {
            totalSize = list.size();
        }
        // 获取翻页相关
        Map<String, Integer> pageNo = GetListUtil.getPageNoMap(totalSize, start);
        int endPage = pageNo.get("endPage");
        start = pageNo.get("start");
        int startIndex = pageNo.get("startIndex");

        if (startIndex < totalSize) {
            int endIndex = startIndex + 20 > totalSize ? totalSize : startIndex + 20;
            list = list.subList(startIndex, endIndex);
        }

        Map<String, String> showSearch = new LinkedHashMap<>();
        showSearch.put("sentence", "句子");

        Map<String, String> searchMap = new HashMap<>();
        searchMap.put("sentence", sentence);

        Map<String, Object> map = GetListUtil.getReturnMap2(totalSize, start, startIndex, endPage, list, searchMap);
        map.put("showSearch", showSearch);
        map.put("showSearchValue", searchMap);
        map.put("secondTypeMap", AnalysisSentenceConstant.secondTypeMap);

        return new ModelAndView("analysis/index", map);
    }


    public ReturnData subdivisionEdit(String typeStr, String secondType, String idStr, User user, String[] vocabularyStrs, String[] characterStrs, String[] propertyStrs, String[] position1Strs, String[] position2Strs, HttpServletRequest req) {

        Long id = Long.parseLong(idStr);
        int type = Integer.parseInt(typeStr);

        // 获取句子
        AnalysisSentenceSubdivision analysisSubSentence = analysisSentenceDao.retrieveSubdivisionObjectById(id);


        // 校验参数词语是否与句子匹配
        boolean vocabularyFlag = checkVocabulary(vocabularyStrs, analysisSubSentence.getSentence());
        if (!vocabularyFlag) {
            return ReturnUtil.fail(ReturnCode.ANALYSIS_VOCABULARYS_CHECK_ERROR);
        }

        // 将参数拼凑成词语实体
        List<AnalysisVocabulary> vocabularys = getVocabularysByParam(vocabularyStrs, characterStrs, propertyStrs,
                position1Strs, position2Strs, id, type);

        // 对比修改前后的分词，调用脚本修改词库
        List<AnalysisVocabulary> rawVocabularys = analysisVocabularyDao.retriveSubListBySid(id);
        // 创建一个句子对象
        AnalysisSentence analysisSentence = new AnalysisSentence();
        analysisSentence.setSentence(analysisSubSentence.getSentence());
        analysisSentence.setId(analysisSubSentence.getId());
        analysisSentence.setType(analysisSubSentence.getType());
        // 获取指代词list
        getPronounCountList(vocabularys, req);

        // 更新词典，但是不提示
        updateDictionary(vocabularys, rawVocabularys, analysisSentence, user, req, secondType);

        // 保存词语
        // 删除历史词语
        analysisVocabularyDao.deleteSubVocabulaysBySid(analysisSubSentence.getId());
        // 新增修改后词语
        analysisVocabularyDao.batchInsertSubdivisionVocab(vocabularys, analysisSubSentence.getId(), type);

        //指代更新入细分词语表
        savePronounSubdiv(vocabularys, analysisSubSentence.getId());
        return ReturnUtil.success();
    }

    /**
     * 改变有效性flag
     *
     * @param idStr
     * @param user
     * @param check
     * @return
     */
    public ReturnData changeValid(String idStr, User user, String check) {
        if (StringUtils.isNumeric(check)) {
            analysisSentenceDao.updateSubValidFlag(Long.parseLong(idStr), check, user.getUserId());
        }

        return ReturnUtil.success();
    }

    class ExecuteUpdateSphinx implements Runnable {

        private String index;

        ExecuteUpdateSphinx(String index) {
            this.index = index;
        }

        @Override
        public void run() {
            updateSphinxIndex(index);
        }
    }

    /**
     * 恢复句子
     *
     * @param typeStr
     * @param secondType
     * @param idStr
     * @return
     */
    public ReturnData recoverAnalysis(String typeStr, String secondType, String idStr) {
        Long id = Long.parseLong(idStr);
        int type = Integer.parseInt(typeStr);
        AnalysisSentence analysisSentence = analysisSentenceDao.retrieveDelObjectById(id);
        Map<String, Object> param = new HashMap<>();
        param.put("sid", id);
        List<AnalysisVocabulary> vocabularys = analysisVocabularyDao.retrieveDelList(param);
        // 从analysis库中删除，移入已删除的表中
        long newId = analysisSentenceDao.recoverAnalysis(id);
        analysisSentenceDao.recoverAnalysisVocab(id, newId);

        LOGGER.info("更新redis开始");
        // 添加到redis中
        String queryStr = "";
        if (type == AnalysisSentenceConstant.TYPE_CT && analysisSentence.getLabelStatus() == 1) {
            queryStr = RedisConstant.KEY_PATTERN_MEDICAL_LABELED;

        } else if (type == AnalysisSentenceConstant.TYPE_XBS && analysisSentence.getLabelStatus() == 1) {
            queryStr = RedisConstant.KEY_PATTERN_HPI_LABELED_NO_SECOND_TYPE + (char) 1 + secondType + (char) 1 + "*";
        } else if (type == AnalysisSentenceConstant.TYPE_ZS && analysisSentence.getLabelStatus() == 1) {
            queryStr = RedisConstant.KEY_PATTERN_COMPLAIN_LABELED;

        } else if (type == AnalysisSentenceConstant.TYPE_CT && analysisSentence.getLabelStatus() == 0) {
            queryStr = RedisConstant.KEY_PATTERN_MEDICAL_UNLABELED;

        } else if (type == AnalysisSentenceConstant.TYPE_XBS && analysisSentence.getLabelStatus() == 0) {
            queryStr = RedisConstant.KEY_PATTERN_HPI_UNLABELED_NO_SECOND_TYPE + (char) 1 + secondType + (char) 1 + "*";
        } else if (type == AnalysisSentenceConstant.TYPE_ZS && analysisSentence.getLabelStatus() == 0) {
            queryStr = RedisConstant.KEY_PATTERN_COMPLAIN_UNLABELED;
        }
        // 换成新的ID
        analysisSentence.setId(newId);
        addAnalySentenceceInRedis(analysisSentence, queryStr, vocabularys);

        LOGGER.info("更新redis完成");
        return ReturnUtil.success();
    }


    /**
     * 分路径预览
     *
     * @param idStr
     * @param vocabularyStrs
     * @param characterStrs
     * @param propertyStrs
     * @param position1Strs
     * @param position2Strs
     * @return
     */
    public ReturnData analysisPreview(String idStr, String[] vocabularyStrs,
                                      String[] characterStrs, String[] propertyStrs, String[] position1Strs, String[] position2Strs, int type, String[] isPronouns, String secondType, User user, Map<String, Object> paramMap) {
        Long id = Long.parseLong(idStr);
        // 获取句子
        AnalysisSentence analysisSentence = analysisSentenceDao.retrieveObjectById(id);

        // 校验参数词语是否与句子匹配
        boolean vocabularyFlag = checkVocabulary(vocabularyStrs, analysisSentence.getSentence());
        if (!vocabularyFlag) {
            return ReturnUtil.fail(ReturnCode.ANALYSIS_VOCABULARYS_CHECK_ERROR);
        }

        // 将参数拼凑成词语实体
        List<AnalysisVocabulary> vocabularys = getVocabularysByParam(vocabularyStrs, characterStrs, propertyStrs,
                position1Strs, position2Strs, id, type);

        // 对比修改前后的分词，调用脚本修改词库
        List<AnalysisVocabulary> rawVocabularys = analysisVocabularyDao.retriveListBySid(id);
        // 判断是否新老词语一致
        boolean isSame = checkVocabularySame(rawVocabularys, vocabularys);
        // 获取指代数据
        getPronounCountList(vocabularys, isPronouns, paramMap);
        if (!isSame) {
            // 保存词语
            // 删除历史词语
            analysisVocabularyDao.deleteVocabulaysBySid(analysisSentence.getId());
            // 新增修改后词语
            analysisVocabularyDao.batchInsert(vocabularys);
            // 插入指代到扩展表
            savePronoun(vocabularys, analysisSentence.getId());


            // 调用接口获取position = 0 的句子
            List<AnalysisSentence> subdivisionSentence = getSubdivisionByVocab(vocabularys);
            // 删除原有的数据
            deleteSubdivisionBySid(analysisSentence.getId(), true);
            // 保存这些句子到细分表中
            saveSubdivisionSentence(subdivisionSentence, id, true, type, secondType, user);
        }

        return ReturnUtil.success();
    }

    /**
     * 分路径预览（细分句子）
     *
     * @param idStr
     * @param vocabularyStrs
     * @param characterStrs
     * @param propertyStrs
     * @param position1Strs
     * @param position2Strs
     * @return
     */
    public ReturnData subdivPreview(String idStr, String[] vocabularyStrs,
                                    String[] characterStrs, String[] propertyStrs, String[] position1Strs, String[] position2Strs, int type, String[] isPronouns, String secondType, User user, Map<String, Object> paramMap, HttpServletRequest req) {
        Long id = Long.parseLong(idStr);
        // 获取句子
        AnalysisSentenceSubdivision analysisSubSentence = analysisSentenceDao.retrieveSubdivisionObjectById(id);

        // 校验参数词语是否与句子匹配
        boolean vocabularyFlag = checkVocabulary(vocabularyStrs, analysisSubSentence.getSentence());
        if (!vocabularyFlag) {
            return ReturnUtil.fail(ReturnCode.ANALYSIS_VOCABULARYS_CHECK_ERROR);
        }

        // 将参数拼凑成词语实体
        List<AnalysisVocabulary> vocabularys = getVocabularysByParam(vocabularyStrs, characterStrs, propertyStrs,
                position1Strs, position2Strs, id, type);

        // 对比修改前后的分词，调用脚本修改词库
        List<AnalysisVocabulary> rawVocabularys = analysisVocabularyDao.retriveSubListBySid(id);
        // 创建一个句子对象
        AnalysisSentence analysisSentence = new AnalysisSentence();
        analysisSentence.setSentence(analysisSubSentence.getSentence());
        analysisSentence.setId(analysisSubSentence.getId());
        analysisSentence.setType(analysisSubSentence.getType());
        // 获取指代词list
        getPronounCountList(vocabularys, isPronouns, paramMap);

        // 更新词典，但是不提示
        updateDictionary(vocabularys, rawVocabularys, analysisSentence, user, req, secondType);

        boolean isSame = checkVocabularySame(rawVocabularys, vocabularys);

        // 保存词语
        // 删除历史词语
        analysisVocabularyDao.deleteSubVocabulaysBySid(analysisSubSentence.getId());
        // 新增修改后词语
        analysisVocabularyDao.batchInsertSubdivisionVocab(vocabularys, analysisSubSentence.getId(), type);

        //指代更新入细分词语表
        savePronounSubdiv(vocabularys, analysisSubSentence.getId());
        if (!isSame) {
            // 分路径
            List<AnalysisSentence> subdivisionSentence = getSubdivisionByVocab(vocabularys);
            // 删除原有的数据
            deleteSubdivisionBySid(analysisSubSentence.getId(), false);
            // 保存这些句子到细分表中
            saveSubdivisionSentence(subdivisionSentence, id, false, type, secondType, user);
        }
        return ReturnUtil.success();
    }

}
