package com.huiquan.mark_sentence.service;

import com.alibaba.fastjson.JSON;
import com.huiquan.analysis.constants.AnalysisSentenceConstant;
import com.huiquan.analysis.constants.RedisConstant;
import com.huiquan.analysis.dao.AnalysisSentenceDao;
import com.huiquan.analysis.dao.AnalysisVocabularyDao;
import com.huiquan.analysis.domain.*;
import com.huiquan.analysis.service.AnalysisSentenceService;
import com.huiquan.foundation.util.BusinessUtil;
import com.huiquan.framework.base.BaseService;
import com.huiquan.framework.base.ReturnCode;
import com.huiquan.framework.base.ReturnData;
import com.huiquan.framework.utils.*;
import com.huiquan.mark_sentence.dao.MarkSentenceDao;
import com.huiquan.mark_sentence.domain.MarkGroup;
import com.huiquan.mark_sentence.domain.MarkSentence;
import com.huiquan.mark_sentence.domain.MarkVocabulary;
import com.witspring.analysis.LongSentenceProcess;
import com.witspring.analysis.SubSentence;
import net.sf.json.JSONObject;
import org.apache.commons.lang.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.ValueOperations;
import org.springframework.stereotype.Service;
import org.springframework.web.servlet.ModelAndView;

import javax.servlet.http.HttpServletRequest;
import java.io.*;
import java.util.*;

/**
 * @author lichao email:lichao@witspring.com
 * @Description: 语句标注 service
 * @Date 2018/4/25 11:51
 * @since Ver V0.0.1
 */
@Service
public class MarkSentenceService extends BaseService {

    @Autowired
    private MarkSentenceDao markSentenceDao;
    @Autowired
    private AnalysisSentenceDao analysisSentenceDao;
    @Autowired
    private AnalysisSentenceService analysisSentenceService;
    @Autowired
    private AnalysisVocabularyDao analysisVocabularyDao;
    @Autowired
    private RedisTemplate<String, Object> redisStrTemplate;
    @Autowired
    private RedisTemplate<String, List<AnalysisSentence>> redisTemplate;

    public ModelAndView getSentenceList(String flag, String keyword, String startStr, String delFlag) {
        int start = startStr == null ? 1 : Integer.parseInt(startStr);
        // 初始化过滤的条件
        Map<String, Object> param = new HashMap<>();
        if (!StringUtils.isEmpty(keyword)) {
            param.put("keyword", keyword);
        }
        if (StringUtils.equals(delFlag, "1")) {
            param.put("flag", "1");
        } else {
            param.put("flag", flag);
        }
        param.put("delFlag", delFlag);
        // 获取数据总量
        int totalSize = markSentenceDao.selectMarkSentenceCount(param);
        // 获取翻页相关
        Map<String, Integer> pageNo = GetListUtil.getPageNoMap(totalSize, start);
        int startIndex = pageNo.get("startIndex");
        // 获取列表
        List<MarkSentence> list = new ArrayList<>();
        if (startIndex < totalSize) {
            param.put("startIndex", startIndex);
            list = markSentenceDao.selectMarkSentenceList(param);
        }
        Map<String, Object> map = GetListUtil.getReturnMap2(totalSize, pageNo.get("start"), startIndex,
                pageNo.get("endPage"), list, null);
        map.put("typeList", markSentenceDao.selectMarkSentenceTypeList());
        map.put("keyword", keyword);
        return new ModelAndView("mark_sentence/index", map);
    }

    /**
     * 进入词语编辑画面
     *
     * @param idStr
     * @return
     */
    public ModelAndView preEdit(String idStr) {
        // 根据id获取句子和分词
        Long id = Long.parseLong(idStr);
        MarkSentence analysisSentence = markSentenceDao.selectMarkSentenceById(id);
        // 查询分组
        List<MarkGroup> groups = markSentenceDao.retriveGroupListBySid(id);
        // 获取属性和词性集合，并将分词中不存在的数据添加
        Map<String, String> characterMap = new LinkedHashMap<>();
        characterMap.putAll(AnalysisSentenceConstant.getPropertyMapByType(AnalysisSentenceConstant.CHARACTER_LABEL));
        Map<String, String> propertyMap = new LinkedHashMap<>();
        propertyMap.putAll(AnalysisSentenceConstant.getPropertyMapByTypeAndSecondType(2,
                "ImageologyInfo"));
        List<MarkVocabulary> allVocabs = new ArrayList<>();
        // 句子分类，查询句子根据句子找寻
        for (MarkGroup group : groups) {

            List<MarkVocabulary> vocabularys = markSentenceDao.retriveVocabListByGroup(group.getId());

            for (MarkVocabulary term : vocabularys) {
                term.setType(group.getType());
            }
            allVocabs.addAll(vocabularys);
        }
        // 未标注的句子使得预标准确使用已标注的句子的集合
        if (analysisSentence.getFlag() == 0) {
            // 根据已校验的句子判断是否该句子存在某一小段已经标注过了~用该段的词语,替换该段
            allVocabs = dealVocabsByLabeled(analysisSentence.getSentence(), allVocabs);
        }
        for (MarkVocabulary term : allVocabs) {
            if (!characterMap.containsKey(term.getCharacteristic())) {
                characterMap.put(term.getCharacteristic(), term.getCharacteristic());
            }
            if (!propertyMap.containsKey(term.getProperty())) {
                propertyMap.put(term.getProperty(), term.getProperty());
            }
        }
        Map<String, Object> map = new HashMap<>();
        map.put("id", id);
        map.put("sentence", analysisSentence.getSentence());
        map.put("vocabularys", allVocabs);
        map.put("characterMap", characterMap);
        map.put("position1List", AnalysisSentenceConstant.getPositionByType(AnalysisSentenceConstant.POSITION1_LABEL));
        map.put("position2List", AnalysisSentenceConstant.getPositionByType(AnalysisSentenceConstant.POSITION2_LABEL));
        map.put("propertyMap", propertyMap);
        map.put("secondTypeMap", AnalysisSentenceConstant.secondTypeMap);

        return new ModelAndView("mark_sentence/sentence_edit", map);
    }

    /**
     * 根据已校验的句子判断是否该句子存在某一小段已经标注过了~用该段的词语,替换该段
     *
     * @param sentence
     * @param allVocabs
     * @return
     */
    private List<MarkVocabulary> dealVocabsByLabeled(String sentence, List<MarkVocabulary> allVocabs) {

        // 调用对象方法
        LongSentenceProcess dealSentence = new LongSentenceProcess();
        List<Map<String, Object>> labeled = analysisSentenceDao.retrieveLabeledSentence();
        Map<String, Long> labeledSentenceAndId = new HashMap<>();
        for (Map<String, Object> label : labeled) {
            String sentenceStr = trim(trim(label.get("sentence").toString(), ','), '，');

            labeledSentenceAndId.put(sentenceStr, (Long) label.get("id"));
        }
        List<SubSentence> rtns = dealSentence.getLabeledSubSentenceList(sentence, labeledSentenceAndId);
        List<Integer> indexs = new ArrayList<>();
        List<Long> sids = new ArrayList<>();
        for (SubSentence rtn : rtns) {
            indexs.add(rtn.getStartSequence());
            sids.add(rtn.getSid());
        }
        List<MarkVocabulary> replacedVocab = getReplacedList(indexs, sids, allVocabs);
        return replacedVocab;
    }

    public String trim(String value, char trimFlag) {
        int len = value.length();
        int st = 0;
        char[] val = value.toCharArray();    /* avoid getfield opcode */

        while ((st < len) && (val[st] == trimFlag)) {
            st++;
        }
        while ((st < len) && (val[len - 1] == trimFlag)) {
            len--;
        }
        return ((st > 0) || (len < value.length())) ? value.substring(st, len) : value;
    }

    private List<MarkVocabulary> getReplacedList(List<Integer> startIndex, List<Long> sids, List<MarkVocabulary> allVocabs) {
        List<MarkVocabulary> replacedVocab = new ArrayList<>();
        StringBuilder sentence = new StringBuilder();
        StringBuilder tempSentence = new StringBuilder();
        int indexSid = 0;
        for (MarkVocabulary vocab : allVocabs) {
            if (tempSentence.length() == sentence.length()) {
                // 需要开始添加已校验的词语
                if (startIndex.contains(sentence.length())) {
                    // 查询已标注的分词
                    List<AnalysisVocabulary> vocabs = analysisVocabularyDao.retriveListBySid(sids.get(indexSid));
                    indexSid++;
                    // 去掉末尾的逗号的词语
                    for (int i = vocabs.size() - 1; i >= 0; i--) {
                        if(",".equals(vocabs.get(i).getVocabulary()) || "，".equals(vocabs.get(i).getVocabulary())){
                            vocabs.remove(i);
                        }else{
                            break;
                        }
                    }
                    // 去掉首的,，的词语
                    boolean startTemp = true;
                    for (AnalysisVocabulary analysisVocab : vocabs) {
                        if (!analysisVocab.getVocabulary().equals(",") && !analysisVocab.getVocabulary().equals("，") && startTemp) {
                            startTemp = false;
                        }
                        if(!startTemp){
                            MarkVocabulary markVocab = tranMarkVocab(analysisVocab, vocab.getType(), vocab.getSid());
                            replacedVocab.add(markVocab);
                            sentence.append(markVocab.getVocabulary());
                        }
                    }
                    tempSentence.append(vocab.getVocabulary());
                } else {
                    sentence.append(vocab.getVocabulary());
                    tempSentence.append(vocab.getVocabulary());
                    replacedVocab.add(vocab);
                }
            } else {
                // 句子不相等说明经过了替换,需要跳过这些词语
                tempSentence.append(vocab.getVocabulary());
            }
        }
        return replacedVocab;
    }


    /**
     * 对象转换
     *
     * @param vocab
     * @return
     */
    private MarkVocabulary tranMarkVocab(AnalysisVocabulary vocab, String type, Long sid) {

        MarkVocabulary markVocabulary = new MarkVocabulary();
        markVocabulary.setCharacteristic(vocab.getCharacteristic());
        markVocabulary.setVocabulary(vocab.getVocabulary());
        markVocabulary.setPosition1(vocab.getPosition1());
        markVocabulary.setPosition2(vocab.getPosition2());
        markVocabulary.setProperty(vocab.getProperty());
        markVocabulary.setType(type);
        markVocabulary.setSid(sid);
        return markVocabulary;
    }

    /**
     * 更新语句type
     * 更新完类型后,要把对应的语句添加到bas_analysis_sentence表，设置对应的现病史子分类,并取得bas_analysis_sentence的id
     *
     * @param id
     * @param type
     * @return
     */
    public ReturnData updateType(long id, String type) {
        markSentenceDao.updateMarkSentenceType(id, type);
        MarkSentence markSentence = markSentenceDao.selectMarkSentenceById(id);
        // 如果sid为空，插入一条到bas_analysis_sentence和bas_analysis_sentence_enlarge_xbs
        if (markSentence.getSid() == null || markSentence.getSid() == 0) {
            AnalysisSentence analysisSentence = new AnalysisSentence();
            analysisSentence.setSentence(markSentence.getSentence());
            analysisSentence.setLabelStatus(AnalysisSentenceConstant.LABEL_STATUS_UNLABELED);
            analysisSentence.setType(AnalysisSentenceConstant.TYPE_XBS);
            // 插入到bas_analysis_sentence并获得主键
            long sid = markSentenceDao.insertToAnalysisSentence(analysisSentence);
            // 插入到bas_analysis_sentence_enlarge_xbs
            markSentenceDao.insertToAnalysisSentenceEnlargeXbs(sid, type);
            // 更新bas_mark_sentence中的sid
            markSentenceDao.updateMarkSentenceSid(id, sid);
        } else {
            // 如果sid不为空，更新bas_analysis_sentence_enlarge_xbs中的secondType
            markSentenceDao.updateAnalysisSentenceEnlargeXbs(markSentence.getSid(), type);
        }
        return ReturnUtil.success();
    }


    @Value("${mark.sentence.export.path}")
    private String markedSentenceExportPath;

    @Value("${mark.sentence.export.script}")
    private String exportMarkedSentenceScript;

    /**
     * 导出已标记的语句
     */
    public void exportMarkedSentence() {
        String outputPath = markedSentenceExportPath + "markedSentence.txt";
        List<Map<String, String>> list = markSentenceDao.selectMarkSentenceListForExport();
        try {
            PrintWriter pw = new PrintWriter(outputPath);
            for (Map<String, String> markSentence : list) {
                pw.println(markSentence.get("sentence") + "\t" + markSentence.get("other_id"));
            }
            pw.flush();
            pw.close();
            BusinessUtil.excuteShell(new String[]{exportMarkedSentenceScript, outputPath});
            LOGGER.info("导出已标记语句到181成功");
        } catch (FileNotFoundException e) {
            e.printStackTrace();
            LOGGER.error("标记语句导出路径错误，路径为：{}", outputPath);
        } catch (IOException e) {
            e.printStackTrace();
            LOGGER.error("导出已标记语句到181失败【{}】", e.getMessage());
        }
    }

    @Value("${mark.sentence.import.script}")
    private String importUnmarkedSentenceScript;

    /**
     * 导入未标记语句
     */
    public void importUnmarkSentence() throws Exception {
        // 1.先从181复制文件到本地,从文件导入数据到临时表
        try {
            BusinessUtil.excuteShell(new String[]{importUnmarkedSentenceScript});
        } catch (IOException e) {
            e.printStackTrace();
            LOGGER.error("执行导入未标记数据到临时表脚本失败【{}】", e.getMessage());
            return;
        }
        LOGGER.info("执行导入未标记数据到临时表脚本成功");
        // 3.删除未标注语句
        markSentenceDao.deleteUnmarkedSentence();
        LOGGER.info("删除未标注语句成功");
        // 4.连表将临时表中数据插入到未标注语句中
        markSentenceDao.insertUnmarkedSentenceWithTemp();

        // 5.给句子进行分词和os和bme
        dealMarkSentence();

        // 6.根据重复率算法给现病史未校验数据进行权重的赋值
        dealRepetitionRate();

        LOGGER.info("连表将临时表中数据插入到未标注语句成功");
    }

    @Value("${mark.init.sentence.path}")
    private String markPath;
    @Value("${init.sentence.execute.label.mark.script}")
    private String script;
    @Value("${init.xbs.repetition.rate.script}")
    private String ratescript;
    @Value("${export.xbs.unlabel.file.path}")
    private String outUnlabelFilePath;
    @Value("${export.xbs.label.file.path}")
    private String outLabelFilePath;

    /**
     * 根据重复率算法给现病史未校验数据进行权重的赋值
     */
    private void dealRepetitionRate() throws Exception {

        // 清空重复率表
        analysisSentenceDao.initRepetitionRate();
        // 生成未标记文件
        initUnlabelFileBySecondType();

        // 生成已标记的文件
        initLabelFileBySecondType();
        // 调用接口
        Map<String, String> param = new HashMap<>();
        param.put("labeledVocabularyListFilePath", outLabelFilePath + DateUtils.convertTodayFormat() + ".txt");
        param.put("unlabelledSentenceListFilePath", outUnlabelFilePath + DateUtils.convertTodayFormat() + ".txt");
        String jsonStr = HttpRequest.sendPost("http://172.16.0.132:1099/repetitionRate/getRate.do", param);
        if (jsonStr == null || jsonStr.isEmpty()) {
            LOGGER.error("重复率接口调用失败");
        }
        com.alibaba.fastjson.JSONObject jo = JSON.parseObject(jsonStr);
        if (jo.getIntValue("code") == 200) {
            com.alibaba.fastjson.JSONObject jo2 = jo.getJSONObject("data");
            // 导入mysql
            String result = "error";
            result = BusinessUtil.excuteShell(new String[]{ratescript});
            LOGGER.info("执行重复率导入脚本成功! result=" + result);
        } else {
            LOGGER.error("重复率接口调用返回错误错误，{}", jo.toJSONString());
        }
    }

    /**
     * 生成已标记文件
     */
    private void initLabelFileBySecondType() throws Exception {
        StringBuilder outLabelFile = new StringBuilder();
        LOGGER.info("获取已校验数据");
        List<AnalysisVocabulary> labelVocabs = analysisVocabularyDao.retrieveLabeledXbsVocabularys(null);
        for (AnalysisVocabulary labelVocab : labelVocabs) {
            AnalysisVocabularyJson vocabularyJson = new AnalysisVocabularyJson();
            vocabularyJson.setVocabulary(labelVocab.getVocabulary());
            vocabularyJson.setProperty(labelVocab.getProperty());
            vocabularyJson.setCharacteristic(labelVocab.getCharacteristic());
            outLabelFile.append(JSONObject.fromObject(vocabularyJson).toString()).append("\n");
        }
        LOGGER.info("开始写入文件");
        // 写文件
        FileUtils.generateFile(outLabelFilePath + DateUtils.convertTodayFormat() + ".txt", outLabelFile.toString());
        LOGGER.info("写入文件完成");

    }

    /**
     * 生成未标记文件
     */
    private void initUnlabelFileBySecondType() throws Exception {
        List<AnalysisVocabulary> unlabelVocabs = markSentenceDao.retrieveUnbelVocabs();
        List<AnalysisVocabularyJson> vocabJsonList = new ArrayList<>();
        Long tempSid = 0L;
        StringBuilder outUnlabelFile = new StringBuilder();
        LOGGER.info("开始拼接未校验数据");
        for (AnalysisVocabulary unlabelVocab : unlabelVocabs) {
            if (tempSid.equals(unlabelVocab.getSid()) || tempSid.equals(0L)) {
                tempSid = unlabelVocab.getSid();
                AnalysisVocabularyJson vocabularyJson = new AnalysisVocabularyJson();
                vocabularyJson.setCharacteristic(unlabelVocab.getCharacteristic());
                vocabularyJson.setProperty(unlabelVocab.getProperty());
                vocabularyJson.setVocabulary(unlabelVocab.getVocabulary());
                vocabJsonList.add(vocabularyJson);
            } else {
                // 不一致后保存上一条记录
                AnalysisSentenceJson sjson = new AnalysisSentenceJson();
                sjson.setSid(tempSid);
                sjson.setVocabularyList(vocabJsonList);
                outUnlabelFile.append(JSONObject.fromObject(sjson).toString()).append("\n");
                // 清空临时数据
                vocabJsonList = new ArrayList<>();
                tempSid = unlabelVocab.getSid();
                // 维护当前记录
                AnalysisVocabularyJson vocabularyJson = new AnalysisVocabularyJson();
                vocabularyJson.setCharacteristic(unlabelVocab.getCharacteristic());
                vocabularyJson.setProperty(unlabelVocab.getProperty());
                vocabularyJson.setVocabulary(unlabelVocab.getVocabulary());
                vocabJsonList.add(vocabularyJson);
            }
        }
        if (vocabJsonList.size() > 0) {
            // 保存最后一条记录
            AnalysisSentenceJson sjson = new AnalysisSentenceJson();
            sjson.setSid(tempSid);
            sjson.setVocabularyList(vocabJsonList);
            outUnlabelFile.append(JSONObject.fromObject(sjson).toString()).append("\n");
        }
        LOGGER.info("拼接未校验数据结束");
        // 写文件
        LOGGER.info("开始写入文件");
        FileUtils.generateFile(outUnlabelFilePath + DateUtils.convertTodayFormat() + ".txt", outUnlabelFile.toString());
        LOGGER.info("写入文件完成");

    }

    /**
     * 对未分类句子进行分词
     */
    private void dealMarkSentence() throws Exception {
        Map<String, Object> param = new HashMap<>();
        param.put("flag", "0");

        List<MarkSentence> list = markSentenceDao.selectMarkSentenceList(param);

        // 输出文件
        String ulFilePath = BusinessUtil.getCompletePath4Properties(markPath);
        StringBuilder ulSb = new StringBuilder();
        for (MarkSentence sentence : list) {
            ulSb.append(sentence.getSentence());
            ulSb.append("\n");
        }
        FileUtils.generateFile(ulFilePath, ulSb.toString());
        LOGGER.info("创建句子分类预处理输入文件成功!");

        // 经过分词标注

        String result = "error";
        result = BusinessUtil.excuteShell(new String[]{script});
        LOGGER.info("执行预处理脚本成功! result=" + result);

        // 对预标注结果进行处理
        dealLabelResult(list);
        LOGGER.info("初始化数据完成");

    }

    @Value("${init.sentence.load.mark.script}")
    private String loadMarkScript;
    @Value("${init.sentence.bme.result.mark.path}")
    private String markResultPath;
    @Value("${init.sentence.bme.result.sid.mark.path}")
    private String markResultWithSidPath;

    /**
     * 对预标注结果进行处理
     *
     * @param list
     * @throws IOException
     */
    private void dealLabelResult(List<MarkSentence> list) throws IOException {
        // 对预处理的结果文件进行处理，加入sid
        String resultFilePath = null;
        String resultWithSidPath = null;
        resultFilePath = BusinessUtil.getCompletePath4Properties(markResultPath);
        resultWithSidPath = BusinessUtil.getCompletePath4Properties(markResultWithSidPath);
        int count = 0;
        File resultFile = new File(resultFilePath);
        OutputStreamWriter writer = new OutputStreamWriter(new FileOutputStream(resultWithSidPath), "UTF-8");
        // 判断文件是否存在
        if (resultFile.isFile() && resultFile.exists()) {
            LOGGER.info("对处理结果添加sid");
            // 考虑到编码格式
            InputStreamReader read = new InputStreamReader(new FileInputStream(resultFile), "UTF-8");
            BufferedReader bufferedReader = new BufferedReader(read);
            String lineTxt = null;
            ArrayList<String> lineList = new ArrayList<>();
            while ((lineTxt = bufferedReader.readLine()) != null) {
                if ("".equals(lineTxt)) {
                    count++;
                } else {
                    if (lineTxt.startsWith("#") && lineTxt.split("\t").length < 3) {
                        String reliability = lineTxt.substring(2);
                        for (String line : lineList) {
                            // 将id写入为sid
                            writer.write(String.valueOf(list.get(count).getId()));
                            writer.write("\t");
                            writer.write(line);
                            // 将可信度写入
                            writer.write("\t");
                            writer.write(reliability);
                            writer.write("\n");
                        }
                        lineList.clear();
                    } else {
                        lineList.add(lineTxt);
                    }
                }
            }
            read.close();
            writer.flush();
            writer.close();
            LOGGER.info("添加sid完成2,count=" + count);
        }
        LOGGER.info("读取带sid的处理结果插入到临时表中");
        // 读取文件写入到MySql的临时表中
        String cmds[] = {loadMarkScript};
        String result = "error";
        result = BusinessUtil.excuteShell(cmds);
        LOGGER.info("读取带sid的处理结果插入到临时表中成功! result=" + result);
        // 将临时表里的数据维护到bas_analysis_vocabulary表中
        LOGGER.info("初始化现病史分类BME数据开始");
        markSentenceDao.initMarkVocab();
        LOGGER.info("初始化现病史分类BME数据结束");
    }

    public boolean checkVocabulary(String[] vocabularyStrs, String sentence) {
        if (vocabularyStrs == null || vocabularyStrs.length == 0) {
            return false;
        }
        StringBuffer temp = new StringBuffer();
        for (String vocabulary : vocabularyStrs) {
            temp.append(vocabulary);
        }
        if (temp.toString().equals(sentence)) {
            return true;
        }
        return false;
    }

    private List<List<MarkVocabulary>> getVocabularysByParam(String[] vocabularyStrs, String[] characterStrs,
                                                             String[] propertyStrs, String[] position1Strs, String[] position2Strs, String[] sources, Long sid, String type) {
        String tempSource = "";
        List<List<MarkVocabulary>> allGroup = new ArrayList<>();
        List<MarkVocabulary> tempVocab = new ArrayList<>();
        for (int i = 0; i < vocabularyStrs.length; i++) {
            MarkVocabulary vocabulary = new MarkVocabulary();
            vocabulary.setSid(sid);
            vocabulary.setVocabulary(vocabularyStrs[i]);
            vocabulary.setCharacteristic(characterStrs[i]);
            vocabulary.setProperty(propertyStrs[i]);
            vocabulary.setPosition1(position1Strs[i].trim());
            vocabulary.setPosition2(position2Strs[i].trim());
            if (StringUtils.isNotBlank(tempSource) && !StringUtils.equals(tempSource, sources[i])) {
                tempSource = sources[i];
                // 加入分组
                allGroup.add(tempVocab);
                vocabulary.setType(tempSource);
                tempVocab = new ArrayList<>();
                tempVocab.add(vocabulary);
            } else {
                tempSource = sources[i];
                vocabulary.setType(tempSource);
                tempVocab.add(vocabulary);
            }
        }
        if (tempVocab.size() > 0) {
            allGroup.add(tempVocab);
        }
        return allGroup;
    }


    /**
     * 进行词语分类
     *
     * @param idStr
     * @param user
     * @param vocabularyStrs
     * @param characterStrs
     * @param propertyStrs
     * @param position1Strs
     * @param position2Strs
     * @param req
     * @param sourceStrs
     * @return
     */
    public ReturnData edit(String idStr, User user, String[] vocabularyStrs, String[] characterStrs, String[] propertyStrs, String[] position1Strs, String[] position2Strs, HttpServletRequest req, String[] sourceStrs) {
        Long id = Long.parseLong(idStr);
        // 获取句子
        MarkSentence markSentence = markSentenceDao.selectMarkSentenceById(id);
        List<MarkGroup> groups = markSentenceDao.retriveGroupListBySid(id);

        // 校验参数词语是否与句子匹配
        boolean vocabularyFlag = checkVocabulary(vocabularyStrs, markSentence.getSentence());
        if (!vocabularyFlag) {
            return ReturnUtil.fail(ReturnCode.ANALYSIS_VOCABULARYS_CHECK_ERROR);
        }

        // 将参数拼凑成词语分组实体
        List<List<MarkVocabulary>> vocabularys = getVocabularysByParam(vocabularyStrs, characterStrs, propertyStrs,
                position1Strs, position2Strs, sourceStrs, id, groups.get(0).getType());

        // 删除该句子的所有group和vocab
        markSentenceDao.deleteGroupVocabBySid(id);
        // 删除子句子
        if (StringUtils.isNotBlank(markSentence.getChildIds())) {
            String[] sentenceIds = markSentence.getChildIds().split(",");
            for (String sentenceId : sentenceIds) {
                Map<String, String> sen = analysisSentenceDao.selectContextBySid(Long.parseLong(sentenceId));
                if (sen != null && sen.size() > 0) {
                    analysisSentenceService.removeAnalySentenceceOfRedis(Long.parseLong(sentenceId), RedisConstant.KEY_PATTERN_HPI_UNLABELED_NO_SECOND_TYPE + (char) 1 + sen.get("second_type") + (char) 1 + "*");
                    analysisSentenceService.removeAnalySentenceceOfRedis(Long.parseLong(sentenceId), RedisConstant.KEY_PATTERN_HPI_LABELED_NO_SECOND_TYPE + (char) 1 + sen.get("second_type") + (char) 1 + "*");
                }
            }
            analysisSentenceDao.deleteByIds(markSentence.getChildIds());
        }
        // 待标注的id
        StringBuilder newIds = new StringBuilder();
        List<String> newIdList = new ArrayList<>();
        //循环分组
        for (List<MarkVocabulary> vocabs : vocabularys) {
            // 新增一个分组
            Long groupNewId = markSentenceDao.insertGroup(vocabs.get(0).getType(), id);

            // 拼接成新句子
            StringBuilder sentence = new StringBuilder();
            for (MarkVocabulary vocab : vocabs) {
                vocab.setGroupId(groupNewId);
                sentence.append(vocab.getVocabulary());
            }
            // 新增词语
            markSentenceDao.batchInsertVocab(vocabs);

            // 新增入未校验的现病史的该分类中
            AnalysisSentence newSentence = new AnalysisSentence();
            newSentence.setLabelStatus(0);
            // 现病史
            newSentence.setType(2);
            newSentence.setSentence(sentence.toString());
            newSentence.setFrequency(markSentence.getFrequency());
            newSentence.setModifierId(user.getUserId());
            Long sentenceNewId = analysisSentenceDao.insert(newSentence);
            List<AnalysisVocabulary> newVocab = markTransVocab(vocabs, sentenceNewId);
            // 新增现病史额外表
            Map<String, Object> param = new HashMap<>();
            param.put("id", sentenceNewId);
            param.put("secondType", vocabs.get(0).getType());
            analysisSentenceDao.insertEnlargeXbs(param);


            analysisVocabularyDao.batchInsert(newVocab);
            newIds.append(sentenceNewId).append(",");
            String spe = (char) 1 + "";
            newIdList.add(sentenceNewId + spe + vocabs.get(0).getType());
            newSentence.setId(sentenceNewId);
            newSentence.setGmtModified(DateUtils.convertDateFormat(new Date(), DateUtils.DateFormat.FULL));
            LOGGER.info("更新redis开始");
            // 移除redis中未标记列表的相关元素，再向已标记列表中添加相关元素
            addAnalySentenceceInRedis(newSentence, RedisConstant.KEY_PATTERN_HPI_UNLABELED_NO_SECOND_TYPE + (char) 1 + vocabs.get(0).getType() + (char) 1 + "*", newVocab);

            LOGGER.info("更新redis完成");
        }
        // 更新新子句子ID到句子表
        markSentenceDao.updateChildIds(id, newIds.toString().substring(0, newIds.length() - 1));

        if (newIds != null && newIds.length() > 0) {
            // 记录到redis中该用户有几个句子未标注，未标注的句子先进行标注
            ValueOperations<String, Object> valueopsOther = redisStrTemplate.opsForValue();
            Object unlabel = valueopsOther.get(RedisConstant.MARK_SENTENCE_UNLABEL);
            List<String> unlabelList = new ArrayList<>();
            if (unlabel != null) {
                unlabelList = (List<String>) unlabel;
            }
            for (String unlabelId : newIdList) {
                if (!unlabelList.contains(unlabelId)) {
                    unlabelList.add(unlabelId);
                }
            }
            valueopsOther.set(RedisConstant.MARK_SENTENCE_UNLABEL, unlabelList);
        }
        return ReturnUtil.success();
    }

    /**
     * 向redis中的list中插入相关元素
     *
     * @param analysisSentence
     * @param keyPattern
     * @param vocabularys
     */
    private void addAnalySentenceceInRedis(AnalysisSentence analysisSentence, String keyPattern,
                                           List<AnalysisVocabulary> vocabularys) {
        ValueOperations<String, List<AnalysisSentence>> valueops = redisTemplate.opsForValue(); // redis数据操作对象
        // 得到key的集合
        Set<String> keys = redisTemplate.keys(keyPattern);
        Iterator<String> iterator = keys.iterator();
        String key = null;
        List<AnalysisSentence> data = null;
        List<AnalysisSentence> resultList = null;
        // 循环遍历集合
        while (iterator.hasNext()) {
            key = iterator.next();
            data = valueops.get(key);
            // 判断更改词语后的语句是否还满足搜索条件
            boolean flag = canSelect(getSearchParamsByKey(key, analysisSentence.getType()), vocabularys);
            resultList = addElementsInList(data, analysisSentence, flag);
            // 将list放入redis覆盖掉原list
            valueops.set(key, resultList);
        }
    }

    /**
     * 根据key逆向解析得到SearchParam列表
     *
     * @param key
     * @return
     */
    private List<AnalysisSearchParam> getSearchParamsByKey(String key, int type) {
        if (!key.endsWith("desc")) {
            key = key.substring(0, key.lastIndexOf(1));
        }
        String[] s = key.split(String.valueOf((char) 1));
        int limit = 3;
        int i = 2;
        if (AnalysisSentenceConstant.TYPE_XBS == type) {
            limit = 4;
            i = 3;
        }
        if (s.length <= limit) {
            return null;
        }
        List<AnalysisSearchParam> result = new ArrayList<>();
        AnalysisSearchParam param = null;
        for (; i < s.length - 2; ) {
            param = new AnalysisSearchParam();
            param.setId(Long.valueOf(s[i++]));
            param.setVocabulary(s[i++]);
            param.setProperty(s[i++]);
            param.setPosition1(s[i++]);
            param.setPosition2(s[i++]);
            param.setOppositePropertyFlag(s[i++]);
            param.setOppositePosition1Flag(s[i++]);
            result.add(param);
        }
        return result;
    }

    /**
     * 判断语句的词语是否满足检索条件
     *
     * @param searchParams 检索条件
     * @param vocabularys  词语
     * @return true 满足，false 不满足。
     */
    private boolean canSelect(List<AnalysisSearchParam> searchParams, List<AnalysisVocabulary> vocabularys) {
        if (searchParams == null) {
            return true;
        }
        if (vocabularys == null) {
            return false;
        }
        for (AnalysisSearchParam serachParam : searchParams) {
            boolean flag = true; // 设置一个flag来记录当前serachPara是否被满足
            for (AnalysisVocabulary analysisVocabulary : vocabularys) {
                // 如果词语型匹配
                if (serachParam.getVocabulary().equals(analysisVocabulary.getVocabulary())) {
                    /* 如果属性项不为空 */
                    if (StringUtils.isNotEmpty(serachParam.getProperty())) {
                        /* 如果是正向搜索且属性值不匹配,结果为false */
                        if ("0".equals(serachParam.getOppositePropertyFlag())
                                && !serachParam.getProperty().equals(analysisVocabulary.getProperty())) {
                            return false;
                        }
                        /* 如果是反向搜索且属性值匹配,结果为false */
                        if ("1".equals(serachParam.getOppositePropertyFlag())
                                && serachParam.getProperty().equals(analysisVocabulary.getProperty())) {
                            return false;
                        }
                    }
                    /* 如果位置项1不为空且不匹配,结果为false */
                    if (StringUtils.isNotEmpty(serachParam.getPosition1())) {
                        /* 如果是正向搜索且位置项1不匹配,结果为false */
                        if ("0".equals(serachParam.getOppositePosition1Flag())
                                && !serachParam.getPosition1().equals(analysisVocabulary.getPosition1())) {
                            return false;
                        }
                        /* 如果是反向搜索且位置项1匹配,结果为false */
                        if ("1".equals(serachParam.getOppositePosition1Flag())
                                && serachParam.getPosition1().equals(analysisVocabulary.getPosition1())) {
                            return false;
                        }
                    }
                    /* 如果位置项2不为空且不匹配,结果为false */
                    if (StringUtils.isNotEmpty(serachParam.getPosition2())
                            && !serachParam.getPosition2().equals(analysisVocabulary.getPosition2())) {
                        return false;
                    }
                    // 所有项都匹配，将flag置为false
                    flag = false;
                }
            }
            /* 如果当前searchParam没有被匹配过，结果为false */
            if (flag) {
                return false;
            }
        }
        return true;

    }

    /**
     * 向list中添加一个语句
     *
     * @param list    语句列表
     * @param element 要添加的语句
     * @param flag    是否添加flag
     * @return
     */
    private List<AnalysisSentence> addElementsInList(List<AnalysisSentence> list, AnalysisSentence element,
                                                     boolean flag) {
        Iterator<AnalysisSentence> iterator = list.iterator();
        Long id = element.getId();
        AnalysisSentence analysisSentence = null;
        /* 循环遍历List，如果list中已存在相同id的元素就将其移除 */
        while (iterator.hasNext()) {
            analysisSentence = iterator.next();
            if (id.equals(analysisSentence.getId())) {
                iterator.remove();
            }
        }
        /* 如果flag为true，在List的头部加入元素 */
        if (flag) {
            list.add(0, element);
        }
        return list;
    }

    private List<AnalysisVocabulary> markTransVocab(List<MarkVocabulary> markVocabs, Long sentenceNewId) {
        List<AnalysisVocabulary> vocabs = new ArrayList<>();
        for (MarkVocabulary mark : markVocabs) {
            AnalysisVocabulary vo = new AnalysisVocabulary();
            vo.setVocabulary(mark.getVocabulary());
            vo.setCharacteristic(mark.getCharacteristic());
            vo.setPosition1(mark.getPosition1());
            vo.setPosition2(mark.getPosition2());
            vo.setProperty(mark.getProperty());
            vo.setSid(sentenceNewId);
            vo.setInitial(mark.getVocabulary().substring(0, 1));
            vo.setType(2);
            vocabs.add(vo);
        }


        return vocabs;
    }

    /**
     * 返回未校验句子的ID
     *
     * @return
     */
    public String getUnlabelId() {
        // 记录到redis中该用户有几个句子未标注，未标注的句子先进行标注
        ValueOperations<String, Object> valueopsOther = redisStrTemplate.opsForValue();
        Object unlabel = valueopsOther.get(RedisConstant.MARK_SENTENCE_UNLABEL);
        List<String> unlabelList = new ArrayList<>();
        if (unlabel != null) {
            unlabelList = (List<String>) unlabel;
        }
        if (unlabelList.size() > 0) {
            for (String unlabelStr : unlabelList) {
                String[] unlabelId = unlabelStr.split((char) 1 + "");
                AnalysisSentence analysisSentence = analysisSentenceDao.retrieveObjectById(Long.parseLong(unlabelId[0]));
                if (analysisSentence != null) {
                    return unlabelStr;
                }
            }
            return "";
        } else {
            return "";
        }

    }

    /**
     * 删除(恢复)该长句子
     *
     * @param id
     */
    public void deleteSentence(Long id) {
        MarkSentence markSentence = markSentenceDao.selectMarkSentenceById(id);
        if (StringUtils.equals(markSentence.getDelFlag() + "", "1")) {
            // 未校验数据
            markSentenceDao.updateDelFlagAndFlag(0, 0, id);
            String childIds = markSentence.getChildIds();
            // 删除子句子
            if (StringUtils.isNotBlank(childIds)) {
                String[] sentenceIds = childIds.split(",");
                for (String sentenceId : sentenceIds) {
                    Map<String, String> sen = analysisSentenceDao.selectContextBySid(Long.parseLong(sentenceId));
                    if (sen != null && sen.size() > 0) {
                        analysisSentenceService.removeAnalySentenceceOfRedis(Long.parseLong(sentenceId), RedisConstant.KEY_PATTERN_HPI_UNLABELED_NO_SECOND_TYPE + (char) 1 + sen.get("second_type") + (char) 1 + "*");
                        analysisSentenceService.removeAnalySentenceceOfRedis(Long.parseLong(sentenceId), RedisConstant.KEY_PATTERN_HPI_LABELED_NO_SECOND_TYPE + (char) 1 + sen.get("second_type") + (char) 1 + "*");
                    }
                }
                analysisSentenceDao.deleteByIds(markSentence.getChildIds());
            }
        } else {
            // 已删除已校验
            markSentenceDao.updateDelFlagAndFlag(1, 1, id);
        }
    }
}
