package com.huiquan.analysis.service;

import com.huiquan.analysis.constants.AnalysisDictionaryConstant;
import com.huiquan.analysis.constants.AnalysisSentenceConstant;
import com.huiquan.analysis.dao.AnalysisDictionaryDao;
import com.huiquan.analysis.domain.AnalysisDictionary;
import com.huiquan.analysis.domain.AnalysisElementSplitAmbiguity;
import com.huiquan.analysis.domain.User;
import com.huiquan.foundation.util.BusinessUtil;
import com.huiquan.framework.base.BaseService;
import com.huiquan.framework.base.ReturnCode;
import com.huiquan.framework.base.ReturnData;
import com.huiquan.framework.utils.GetListUtil;
import com.huiquan.framework.utils.ReturnUtil;
import com.huiquan.vocab.dao.VocabElementDao;
import com.huiquan.vocab.domain.VocabElementDto;

import org.apache.commons.lang.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import java.io.*;
import java.util.*;

/**
 * 词典 service
 *
 * @author leo
 * @time 2017年4月11日 下午5:42:27
 */
@Service
public class AnalysisDictionaryService extends BaseService {

    @Value("${dict.file.path}")
    private String dictionaryFilePath;

    @Value("${dict.user.file.path}")
    private String dictionaryUserFilePath;

    @Value("${dict.file.export.path}")
    private String exportDicFilePath;

    @Value("${dict.user.file.export.path}")
    private String exportUserDicFilePath;

    @Value("${dict.init.script}")
    private String initDictionaryFileScript;

    @Value("${dict.synchronization.script}")
    private String synchronizationDictionaryFileScript;

    @Autowired
    private AnalysisDictionaryDao dao;

    /**
     * 初始化数据
     *
     * @param user
     * @return
     * @throws IOException
     */
    @Deprecated
    public ReturnData initData(User user) throws IOException {
        // 执行脚本把181上最新的词典文件复制到本地
        LOGGER.info("initDictionaryFileScript begin!");
        BusinessUtil.excuteShell(new String[]{initDictionaryFileScript});
        LOGGER.info("initDictionaryFileScript end!");
        // 初始化ambiguity.dic数据
        LOGGER.info("初始化ambiguity.dic数据开始！");
        List<AnalysisDictionary> list = getDictionaryListFromDicFile(dictionaryFilePath + "ambiguity.dic",
                AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY, user);
        batchInsetData(list);
        LOGGER.info("初始化ambiguity.dic数据完成！数据量为：" + list.size());
        list.clear();
        // 初始化userLibrary.dic数据
        LOGGER.info("初始化userLibrary.dic数据开始！");
        list = getDictionaryListFromDicFile(dictionaryUserFilePath + "userLibrary.dic",
                AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_USERLIBRARY, user);
        batchInsetData(list);
        LOGGER.info("初始化userLibrary.dic数据完成！数据量为：" + list.size());
        list.clear();
        // 初始化Tongyici_dic.dic数据
        LOGGER.info("初始化Tongyici_dic.dic数据开始！");
        list = getDictionaryListFromDicFile(dictionaryUserFilePath + "Tongyici_dic.dic",
                AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_TONGYICI_DIC, user);
        batchInsetData(list);
        LOGGER.info("初始化Tongyici_dic.dic数据完成！数据量为：" + list.size());
        list.clear();
        // 初始化Symptom_Independent.dic数据
        LOGGER.info("初始化Symptom_Independent.dic数据开始！");
        list = getDictionaryListFromDicFile(dictionaryUserFilePath + "Symptom_Independent.dic",
                AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_SYMPTOM_INDEPENDENT, user);
        batchInsetData(list);
        LOGGER.info("初始化Symptom_Independent.dic数据完成！数据量为：" + list.size());
        list.clear();
        // 初始化Organ.dic数据
        LOGGER.info("初始化Organ.dic数据开始！");
        list = getDictionaryListFromDicFile(dictionaryUserFilePath + "Organ.dic",
                AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_ORGAN, user);
        batchInsetData(list);
        LOGGER.info("初始化Organ.dic数据完成！数据量为：" + list.size());
        list.clear();
        // 初始化Inspection.dic数据
        LOGGER.info("初始化Inspection.dic数据开始！");
        list = getDictionaryListFromDicFile(dictionaryUserFilePath + "Inspection.dic",
                AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_INSPECTION, user);
        batchInsetData(list);
        LOGGER.info("初始化Inspection.dic数据完成！数据量为：" + list.size());
        list.clear();
        // 初始化Disease.dic数据
        LOGGER.info("初始化Disease.dic数据开始！");
        list = getDictionaryListFromDicFile(dictionaryUserFilePath + "Disease.dic",
                AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_DISEASE, user);
        batchInsetData(list);
        LOGGER.info("初始化Disease.dic数据完成！数据量为：" + list.size());
        list.clear();
        // 初始化Additional.dic数据
        LOGGER.info("初始化Additional.dic数据开始！");
        list = getDictionaryListFromDicFile(dictionaryUserFilePath + "Additional.dic",
                AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_ADDITIONAL, user);
        batchInsetData(list);
        LOGGER.info("初始化Additional.dic数据完成！数据量为：" + list.size());
        list.clear();
        // 初始化userLibrary_di.dic数据
        LOGGER.info("初始化userLibrary_di.dic数据开始！");
        list = getDictionaryListFromDicFile(dictionaryUserFilePath + "userLibrary_di.dic",
                AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_USERLIBRARY_DI, user);
        batchInsetData(list);
        LOGGER.info("初始化userLibrary_di.dic数据完成！数据量为：" + list.size());
        list.clear();
        // 初始化ambiguity_di.dic数据
        LOGGER.info("初始化ambiguity_di.dic数据开始！");
        list = getDictionaryListFromDicFile(dictionaryUserFilePath + "ambiguity_di.dic",
                AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY_DI, user);
        batchInsetData(list);
        LOGGER.info("初始化ambiguity_di.dic数据完成！数据量为：" + list.size());
        list.clear();

        return ReturnUtil.success();
    }

    /**
     * 批量插入数据
     *
     * @param list
     */
    private void batchInsetData(List<AnalysisDictionary> list) {
        int i = 1;
        List<AnalysisDictionary> tempList = null;
        while (i * 1000 < list.size()) {
            tempList = list.subList((i - 1) * 1000, (i * 1000));
            dao.insertAnalysisDictionaryBatch(tempList);
            i++;
        }
        tempList = list.subList((i - 1) * 1000, list.size());
        if (tempList != null && tempList.size() > 0) {
            dao.insertAnalysisDictionaryBatch(tempList);
        }
    }

    /**
     * 同步字典数据
     *
     * @throws IOException
     */
    public void synchronizationDictionary() throws IOException {
        // 导出词典数据到词典文件
        this.exportDataToDicFile();
        // 执行脚本把文件上传到181
        LOGGER.info("synchronizationDictionaryFileScript begin!");
        BusinessUtil.excuteShell(new String[]{synchronizationDictionaryFileScript});
        LOGGER.info("synchronizationDictionaryFileScript end!");
    }

    /**
     * 导出词典数据到词典文件
     *
     * @throws IOException
     */
    private void exportDataToDicFile() throws IOException {
        Map<String, Object> param = new HashMap<>();
        List<AnalysisDictionary> list = null;
        // 导出数据到ambiguity.dic
        LOGGER.info("导出词典开始开始！");
        Map<String, String> typeAndFile = AnalysisDictionaryConstant.typeAndOutFileName;
        if (typeAndFile != null && typeAndFile.size() > 0) {
            for (Map.Entry<String, String> entry : typeAndFile.entrySet()) {
                param.put("type", entry.getKey());
                list = dao.selectAnalysisDictionaryList(param);
                // 歧义词典放的地方不一样
                if (StringUtils.isNotBlank(entry.getValue()) && entry.getValue().contains("ambiguity")) {
                    exportFileByListAndPath(list, exportDicFilePath + entry.getValue());
                } else {
                    exportFileByListAndPath(list, exportUserDicFilePath + entry.getValue());

                }
                LOGGER.info("导出数据到" + entry.getValue() + "完成！数据量为：" + list.size());
            }

        }
        LOGGER.info("词典导出结束");
        list.clear();
    }

    /**
     * 查看词典列表
     *
     * @param tabid
     * @param start
     * @param keyword
     * @param secondType
     * @return
     */
    public Map<String, Object> viewDictionaryList(String tabid, int start, String keyword, String secondType, String exactSearchFlag) {
        Map<String, Object> param = new HashMap<>();
        int type = getDictType(tabid, secondType);
        boolean exactFlag = exactSearchFlag != null && exactSearchFlag.equals("1");

        param.put("type", type);
        param.put("exactFlag", exactFlag);
        if (keyword != null && !keyword.isEmpty()) {
            param.put("keyword", exactFlag ? keyword : "%" + keyword + "%");
        }
        int totalSize = dao.selectAnalysisDictionaryCount(param);      
        Map<String, Integer> pageNo = GetListUtil.getPageNoMap(totalSize, start);

        int endPage = pageNo.get("endPage");
        start = pageNo.get("start");
        int startIndex = pageNo.get("startIndex");
        List<AnalysisDictionary> list1 = new ArrayList<>();

        if (startIndex < totalSize) {
        	param.put("startIndex", startIndex);
            list1 = dao.selectAnalysisDictionaryList(param);
        }
        

        Map<String, String> showSearch = new LinkedHashMap<>();
        showSearch.put("keyword", "词语");
        Map<String, Object> map = new HashMap<>();
          	 
    	map = GetListUtil.getReturnMap2(totalSize, start, startIndex, endPage, list1, null);
         	
        map.put("showSearch", showSearch);
        map.put("characterMap", AnalysisSentenceConstant.getPropertyMapByType(AnalysisSentenceConstant.CHARACTER_LABEL));
        map.put("tabid", tabid);
        map.put("secondType", secondType);
        map.put("secondTypeMap", AnalysisDictionaryConstant.dictionarySecondTypeMap);
        return map;
    }
    
    /**
     * 查看歧义词典列表
     *
     * @param tabid
     * @param start
     * @param keyword
     * @param secondType
     * @return
     */
    public Map<String, Object> viewElementSplitAmbiguityList(String tabid, int start, String keyword, String secondType, String exactSearchFlag) {
        Map<String, Object> param = new HashMap<>();
        int type = getDictType(tabid, secondType);
        param.put("type", type);
        boolean exactFlag = exactSearchFlag != null && exactSearchFlag.equals("1");
        param.put("exactFlag", exactFlag);
        if (keyword != null && !keyword.isEmpty()) {
            param.put("keyword", exactFlag ? keyword : "%" + keyword + "%");
        }
        int totalSize = dao.selectAnalysisElementSplitAmbiguityCount(param);      
        Map<String, Integer> pageNo = GetListUtil.getPageNoMap(totalSize, start);
        int endPage = pageNo.get("endPage");
        start = pageNo.get("start");
        int startIndex = pageNo.get("startIndex");
		List<AnalysisElementSplitAmbiguity> list2 = new ArrayList<>();

        if (startIndex < totalSize) {
        	param.put("startIndex", startIndex);
            list2 = dao.selectAnalysisElementSplitAmbiguityList(param);
        }
        

        Map<String, String> showSearch = new LinkedHashMap<>();
        showSearch.put("keyword", "词语");
        Map<String, Object> map = new HashMap<>();
        map = GetListUtil.getReturnMap2(totalSize, start, startIndex, endPage, list2, null);
        	
        map.put("showSearch", showSearch);
        map.put("characterMap",
                AnalysisSentenceConstant.getPropertyMapByType(AnalysisSentenceConstant.CHARACTER_LABEL));
        map.put("tabid", tabid);
        map.put("secondType", secondType);
        map.put("secondTypeMap", AnalysisDictionaryConstant.dictionarySecondTypeMap);
        return map;
    }

    /**
     * 更新词典
     *
     * @param param
     * @param user
     * @return
     */
    public ReturnData updateDictionary(Map<String, String> param, User user) {
        int id = Integer.valueOf(param.get("id"));
        AnalysisDictionary entity = dao.selectAnalysisDictionarById(id);
        if (StringUtils.isNotEmpty(param.get("vocabulary"))) {
            entity.setVocabulary(param.get("vocabulary"));
        }
        if (StringUtils.isNotEmpty(param.get("dictionary"))) {
            entity.setDictionary(param.get("dictionary"));
        }
        entity.setModifierId(user.getUserId());
        entity.setModifierName(user.getRealName());
        dao.updateAnalysisDictionary(entity);
        return ReturnUtil.success();
    }

    /**
     * 删除一条词典
     *
     * @param id
     * @param user
     * @return
     */
    public ReturnData deleteDictionaryById(int id, User user) {
        Map<String, Object> param = new HashMap<>();
        param.put("id", id);
        param.put("modifierId", user.getUserId());
        param.put("modifierName", user.getRealName());
        dao.deleteAnalysisDictionaryById(param);
        return ReturnUtil.success();
    }

    /**
     * 根据页面参数添加词典
     *
     * @param param
     * @param user
     * @return
     */
    public ReturnData addDictionary(Map<String, String> param, User user, String[] dictionary, String[] characterStrs) {
        // 根据页面的参数格式化出词典对象
        String vocabulary = param.get("vocabulary");
        int type = getDictType(param.get("tabid"), param.get("secondType"));

        StringBuffer dictionaryStr = new StringBuffer();
        if (isSplitDictionary(type)) {
            dictionaryStr.append(vocabulary).append("\t");
        }
        for (int i = 0; i < dictionary.length; i++) {
            if (StringUtils.isNotBlank(dictionary[i]) && StringUtils.isNotBlank(characterStrs[i])) {
                dictionaryStr.append(dictionary[i]).append("\t").append(characterStrs[i]).append("\t");
            }
        }
        if (isMergeDictionary(type)) {
            dictionaryStr.append(1000).append("\t");
        }
        AnalysisDictionary entity = new AnalysisDictionary();
        entity.setVocabulary(vocabulary);
        entity.setDictionary(dictionaryStr.toString().substring(0, dictionaryStr.length() - 1));
        entity.setType(type);
        entity.setDeleteFlag(AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_DELETE_FLAG_N);
        entity.setModifierId(user.getUserId());
        entity.setModifierName(user.getRealName());

        return add(entity);
    }

    /**
     * 得到词典类型
     *
     * @param tabid
     * @param secondType
     * @return
     */
    private int getDictType(String tabid, String secondType) {
        if (tabid.equals("33_1")) {
            if (secondType.equals("zs")) {
                return AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY;
            }
            return AnalysisDictionaryConstant.ANALYSISDictionaryAmbiguitySecondTypeMap.get(secondType);
        } else if (tabid.equals("33_2")) {
            if (secondType.equals("zs")) {
                return AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_USERLIBRARY;
            }
            return AnalysisDictionaryConstant.ANALYSISDictionaryUserLibrarySecondTypeMap.get(secondType);
        } else {
            return Integer.valueOf(tabid.split("_")[1]);
        }
    }

    /**
     * 添加词典
     *
     * @param dictionary
     * @return
     */
    public ReturnData add(AnalysisDictionary dictionary) {
        dictionary.setVocabulary(dictionary.getVocabulary().trim());
        dictionary.setDictionary(dictionary.getDictionary().trim());

        // 根据类型校验格式
        boolean formatFlag = checkFormat(dictionary);
        if (!formatFlag) {
            LOGGER.error("添加词典失败，参数错误！");
            return ReturnUtil.fail(ReturnCode.DICTIONARY_ADD_FORMAT_ERROR);
        }

        // 判断是否已存在该词典，是否有冲突
        ReturnCode existsResult = checkExists(dictionary);
        if (!existsResult.getCode().equals(ReturnCode.SUCCESS.getCode())) {
            return ReturnUtil.fail(existsResult);
        }

        // 添加数据
        dictionary.setDeleteFlag(AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_DELETE_FLAG_N);
        if (dictionary.getModifierId() == 0) {
            dictionary.setModifierId(1);
        }
        if (dictionary.getModifierName().isEmpty()) {
            dictionary.setModifierName("系统管理员");
        }
        dao.insertAnalysisDictionary(dictionary);

        // 删除该词语对应历史词典中相同的词
        dao.deleteUnavailableByVocabulary(dictionary, dictionary.getType());

        return ReturnUtil.success();
    }

    /**
     * 查询该词典是否存在冲突或已存在
     *
     * @param dictionary
     * @return
     */
    private ReturnCode checkExists(AnalysisDictionary dictionary) {
        int type = dictionary.getType();
        List<AnalysisDictionary> availableList = dao.retrieveAvailableList(dictionary.getVocabulary(), type);
        if (!availableList.isEmpty()) {
            boolean conflictFlag = false;
            boolean repeatFlag = false;
            String msg = "";

            for (AnalysisDictionary exist : availableList) {
                msg += exist.getDictionary() + "；";
                if (isMergeDictionary(type) != isMergeDictionary(exist.getType())) {
                    conflictFlag = true;
                } else {
                    if (isSplitDictionary(type)) {
                        // 歧义词典需判断分词是否一致，一致则表示重复，否则为冲突
                        if (!getVocabSub(exist.getDictionary()).equals(getVocabSub(dictionary.getDictionary()))) {
                            conflictFlag = true;
                            continue;
                        }
                    }
                    repeatFlag = true;
                }
            }

            if (conflictFlag) {
                // 有冲突
                LOGGER.info("词典有冲突，msg=" + msg);
                ReturnCode rc = ReturnCode.DYNAMIC_DESC_EXCEPTION;
                rc.setDesc("词典有冲突，已存在的词有：" + msg);
                return rc;
            } else if (repeatFlag) {
                // 已存在
                LOGGER.info("词典已存在，msg=" + msg);
                return ReturnCode.DICTIONARY_ADD_EXISTS;
            }
        }
        return ReturnCode.SUCCESS;
    }

    /**
     * 根据歧义词典结果获取根据TAB符号分隔的分词字符串
     *
     * @param result
     * @return
     */
    private String getVocabSub(String result) {
        StringBuffer vocabSub = new StringBuffer();
        String[] subResult = result.split("\t");
        for (int i = 1; i < subResult.length; i++) {
            if (i % 2 == 1) {
                vocabSub.append(subResult[i]);
                vocabSub.append("\t");
            }
        }
        return vocabSub.toString();
    }

    private boolean checkFormat(AnalysisDictionary dictionary) {

        // 获取类型，分类判断
        int type = dictionary.getType();
        String vocabulary = dictionary.getVocabulary();
        String result = dictionary.getDictionary();

        if (StringUtils.isEmpty(vocabulary) || StringUtils.isEmpty(result)) {
            LOGGER.error("词典参数有误，词语或结果不能为空！");
            return false;
        }

        String[] subResult = result.split("\t");
        // 结果开头都是完整词语
        if (!vocabulary.equals(subResult[0])) {
            LOGGER.error("词典参数有误，结果中需要以词语开头！");
            return false;
        }
        if (isSplitDictionary(type)) {
            // 歧义词典结果格式：词语\t分词1\t词性1\t分词2\t词性2...
            if (subResult.length < 5 || subResult.length % 2 == 0) {
                // 歧义词典是拆分，最少也有两个分词，而且分词与词性成对出现
                LOGGER.error("词典参数有误，歧义词典结果分词个数有误！");
                return false;
            }
            String resultVocabulary = "";
            for (int i = 1; i < subResult.length; i++) {
                if (i % 2 == 1) {
                    resultVocabulary += subResult[i];
                }
            }
            if (!resultVocabulary.equals(vocabulary)) {
                // 如果分词合起来和原词语不同，也是错误
                LOGGER.error("词典参数有误，歧义词典分词组合后不同与原词语！");
                return false;
            }

        } else if (isMergeDictionary(type)) {
            // 用户词典结果格式：词语\t词性\t频数
            if (subResult.length != 3) {
                LOGGER.error("词典参数有误，用户词典结果格式错误！");
                return false;
            }
        } else {
            LOGGER.error("词典参数有误，类型超出范围！");
            return false;
        }

        return true;
    }

    /**
     * 判断是否为合并词典
     *
     * @param type
     * @return
     */
    private boolean isMergeDictionary(int type) {

        return AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_USERLIBRARY == type
                || AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_USERLIBRARY_DI == type || (type >= AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_USERLIBRARY_XBS_EXTRACTED && type <= AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_USERLIBRARY_XBS_SYMPTOMS_DESCRIBED);
    }

    /**
     * 判断是否为拆分词典
     *
     * @param type
     * @return
     */
    private boolean isSplitDictionary(int type) {

        return AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY == type
                || AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY_DI == type || (type >= AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY_XBS_EXTRACTED && type <= AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY_XBS_SYMPTOMS_DESCRIBED);
    }

    /**
     * @param filePath
     * @param type     1-ambiguity,2-userLibrary,3-Tongyici_dic,4-Symptom_Independent,5-Organ,6-Inspection,7-Disease,8-Additional
     * @param user
     * @return
     * @throws IOException
     */
    private List<AnalysisDictionary> getDictionaryListFromDicFile(String filePath, int type, User user)
            throws IOException {
        File file = new File(filePath);
        InputStreamReader read = new InputStreamReader(new FileInputStream(file), "UTF-8");// 考虑到编码格式
        BufferedReader bufferedReader = new BufferedReader(read);
        String lineTxt = null;
        List<AnalysisDictionary> list = new ArrayList<>();
        AnalysisDictionary entity = null;
        String[] strArray = null;
        while ((lineTxt = bufferedReader.readLine()) != null) {
            strArray = lineTxt.split("\t");
            // 如果是ambiguity类型，则要check是否为拆分型数据
            if (type == AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY
                    || type == AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_AMBIGUITY_DI) {
                if (strArray.length < 4) {
                    continue;
                }
            }
            // 如果是userLibrary类型，则要check是否为合并型数据
            if (type == AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_USERLIBRARY
                    || type == AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_TYPE_USERLIBRARY_DI) {
                if (strArray.length != 3) {
                    continue;
                }
            }
            entity = new AnalysisDictionary();
            if (strArray != null && strArray.length > 0) {
                entity.setVocabulary(strArray[0]);
                entity.setDictionary(lineTxt);
            }
            entity.setDeleteFlag(AnalysisDictionaryConstant.ANALYSIS_DICTIONARY_DELETE_FLAG_N);
            entity.setType(type);
            entity.setModifierId(user.getUserId());
            entity.setModifierName(user.getRealName());
            list.add(entity);
        }
        read.close();
        return list;
    }

    /**
     * 根据词典List和路径导出文件
     *
     * @param dicList
     * @param path
     * @throws IOException
     */
    private void exportFileByListAndPath(List<AnalysisDictionary> dicList, String path) throws IOException {
        OutputStreamWriter writer = new OutputStreamWriter(new FileOutputStream(path), "UTF-8");
        for (AnalysisDictionary dic : dicList) {
            writer.write(dic.getDictionary());
            writer.write("\n");
        }
        writer.flush();
        writer.close();
    }

    @Autowired
    private VocabElementDao vocabElementDao;

    /**
     * 初始化时间词词典
     */
    public void initTimeDictionary() {
        // 从元素中正确的t获取数据
        Map<String, Object> param = new HashMap<>();
        param.put("property", "t");
        param.put("flag", 0);
        param.put("orderStr", "a.vocabulary");
        List<VocabElementDto> elelist = vocabElementDao.retrieveDtoList(param);
        // 去重:去除其他词典中与本次数据相同的词
        // 1.先把数据插入到临时表
        List<AnalysisDictionary> dicList = new ArrayList<>();
        AnalysisDictionary dic = null;
        for (VocabElementDto e : elelist) {
            dic = new AnalysisDictionary();
            dic.setVocabulary(e.getVocabulary());
            dic.setDictionary(e.getVocabulary() + "\tt\t1000");
            dicList.add(dic);
        }
        if (dicList.size() == 0) {
            return;
        }
        dao.insertTimeDicToTemp(dicList);
        // 2.去重并插入时间词
        dao.initTimeDic();
    }
}
