package com.nlp.visualization.core.discourse.impl;

import com.hankcs.hanlp.HanLP;
import com.nlp.visualization.common.CONSTANTS;
import com.nlp.visualization.core.discourse.IDiscourseService;

import com.nlp.visualization.core.discourse.ISensitiveService;
import com.nlp.visualization.core.discourse.IWord2VecService;
import com.nlp.visualization.core.lda.process.LDA4News;
import com.nlp.visualization.core.seg.ISegmentService;
import com.nlp.visualization.core.seg.SegmentType;
import com.nlp.visualization.core.seg.filter.SegFilter;
import com.nlp.visualization.core.sentence.ISentenceService;
import com.nlp.visualization.pojo.NLP.discourse.WordFrequencyTable;

import com.nlp.visualization.pojo.lda.LDATheme;
import com.nlp.visualization.service.news.INewsService;
import com.nlp.visualization.utils.PropertyUtil;
import org.apache.commons.collections.map.HashedMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;


import java.io.File;
import java.io.IOException;
import java.util.*;


@Service
public class DiscourseServiceImpl implements IDiscourseService {

    @Autowired
    ISegmentService segmentService;

    @Autowired
    ISensitiveService sensitiveService;

    @Autowired
    IWord2VecService word2VecService;

    @Autowired
    INewsService newsService;

    Logger logger = LoggerFactory.getLogger(DiscourseServiceImpl.class);

    private SegFilter.FilterBuilder filterBuilder = new SegFilter.FilterBuilder();                      //建造过滤器
    private SegFilter segFilter = filterBuilder.enableStopwords(true).enableStopwords(true).build();    //创建过滤器
    private SegmentType DefaultSegType = SegmentType.HANLP_CRF;                                         //默认分词方式

    private String basePath = PropertyUtil.getInstance("config").getProperty("lda.base");               //LDA基路径
    private String modelPath = basePath + PropertyUtil.getInstance("config").getProperty("lda.model");  //LDA模型路径

    /**
     * 统计词频
     *
     * @param text
     * @return 词频表
     */
    @Override
    public WordFrequencyTable countWordFrequency(String text) {

        WordFrequencyTable wordFrequencyTable = new WordFrequencyTable();
        List<String> wordList = segmentService.executeSeg(text, DefaultSegType, segFilter);
        String reg = "[\\u4e00-\\u9fa5]+";//判断是否都是汉字

        List<WordFrequencyTable.sampleWord> allList = new LinkedList<>();
        List<String> repeatCount = new LinkedList<>();

        for (String term : wordList) {
            if (term.trim().equals(""))
                continue;
            String[] terms = term.split("/");
            //判断是否是没有异常的分词
            try {
                //判断是否是中文
                if (!terms[0].matches(reg))
                    continue;
                if (terms.length > 1) {
                    //词性
                    char pos = terms[1].charAt(0);
                    //该词
                    String word = terms[0];
                    //创建一个新的需要记录的词
                    WordFrequencyTable.sampleWord sampleWord = wordFrequencyTable.new sampleWord(word, pos + "", 1);
                    /*此处过滤自己想要的词性*/
                    //                    if (pos == 'n' || pos == 'a' || pos == 'd' || pos == 'g') {
//                        allList.add(sampleWord);
//                    }
//                    else
//                    continue;
                    /*添加词汇*/
                    if (repeatCount.contains(sampleWord.getName())) {
                        for (WordFrequencyTable.sampleWord temp : allList) {
                            if (sampleWord.getName().equals(temp.getName())) {
                                int count = sampleWord.getCount();
                                temp.setCount((temp.getCount() + 1));
                            }
                        }
                    } else {
                        repeatCount.add(sampleWord.getName());
                        allList.add(sampleWord);
                    }
                }
            } catch (Exception e) {
                continue;
            }

        }
        //将词汇表按照词性>词频>词的顺序排序
        Collections.sort(allList, new Comparator<WordFrequencyTable.sampleWord>() {
            @Override
            public int compare(WordFrequencyTable.sampleWord o1, WordFrequencyTable.sampleWord o2) {
                if (!o1.getPos().equals(o2.getPos()))
                    return o2.getPos().compareTo(o1.getPos());
                else if (o1.getCount() != o2.getCount())
                    return o2.getCount() - o1.getCount();
                else return o2.getName().compareTo(o1.getName());
            }
        });

        wordFrequencyTable.setTable(allList);
        return wordFrequencyTable;
    }


    /**
     * 获取所有的命名实体，目前支持中、日人名，地名，团体机构名
     *
     * @param text
     * @return
     */
    @Override
    public List getNamedEntity(String text) {

        //一个用于记录的set
        Set<String> recordSet = new HashSet<>();

        List<String> wordList = segmentService.executeSegWithNamedEntity(text, segFilter);
        List<Map> res = new LinkedList<>();
        String reg = "[\\u4e00-\\u9fa5]+";//判断是否都是汉字
        for (String term : wordList) {
            if (term.trim().equals(""))
                continue;
            String[] terms = term.split("/");
            //判断是否是没有异常的分词
            try {
                //判断是否是中文
                if (!terms[0].matches(reg))
                    continue;
                if (terms.length > 1) {
                    //词性
                    char pos = terms[1].charAt(0);
                    //判断是已经收录的专有名词的条目
                    if (pos == 'n' && terms[1].length() > 1 && !recordSet.contains(terms[0])) {
                        //该词
                        String word = terms[0];
                        Map map = new HashedMap();
                        map.put("name", word);
                        map.put("pos", terms[1]);
                        res.add(map);
                        recordSet.add(word);
                    }

                }
            } catch (Exception e) {
                continue;
            }

        }

        return res;
    }

    /**
     * 获取关键字，算法：textsrank
     *
     * @param text
     * @return
     */
    @Override
    public List getKeywords(String text, int count) {

        List<String> keywordList = HanLP.extractKeyword(text, count);
        return keywordList;
    }

    /**
     * 分词后获取地域信息
     *
     * @param text
     * @return
     * @throws Exception
     */
    @Override
    public List mapRemark(String text) throws Exception {
        //一个用于记录的set
        Set<String> recordSet = new HashSet<>();

        List<String> wordList = segmentService.executeSegWithNamedEntity(text, segFilter);
        List<Map> res = new LinkedList<>();
        String reg = "[\\u4e00-\\u9fa5]+";//判断是否都是汉字
        for (String term : wordList) {
            if (term.trim().equals(""))
                continue;
            String[] terms = term.split("/");
            //判断是否是没有异常的分词
            try {
                //判断是否是中文
                if (!terms[0].matches(reg))
                    continue;
                if (terms.length > 1) {
                    //判断是已经收录的专有地名名词的条目
                    if (terms[1].equals("ns") && !recordSet.contains(terms[0])) {
                        //该词
                        String word = terms[0];
                        Map map = new HashedMap();
                        map.put("name", word);
                        map.put("pos", terms[1]);
                        res.add(map);
                        recordSet.add(word);
                    }

                }
            } catch (Exception e) {
                continue;
            }

        }

        return res;
    }


    /**
     * 获取文章的摘要信息
     *
     * @param text
     * @return
     * @throws Exception
     */
    @Override
    public List getAbstract(String text, int count) throws Exception {

        List<String> sentenceList = HanLP.extractSummary(text, count);
        return sentenceList;

    }

    /**
     * 获取文章的主题，采用LDA主题模型预测
     *
     * @param text
     * @return
     */
    @Override
    public Map getTheme(String text) {

        Map map = new HashedMap();

        try {
            //首先，text分词后存在的temp临时文件目录下方便清除
            String basePath = CONSTANTS.CONTEXT;
            String tempPath = CONSTANTS.TEXT_TEMP;
            File dir = new File(basePath + tempPath);
            File textFile = newsService.newsSeg2Text(text, dir, segFilter);
            //然后取最新的模型，执行预测
            File modelDir = new File(modelPath);
            File[] modelsDir = modelDir.listFiles();
            //模型文件
            List<File> modelsList = new LinkedList<>();
            //模型标注的序列文件
            List<File> seqList = new LinkedList<>();
            for (File file : modelsDir) {
                if (file.getName().indexOf("model") != -1 && file.getName().indexOf(".txt") == -1)
                    modelsList.add(file);
                else if (file.getName().indexOf("seq") != -1)
                    seqList.add(file);
                else {

                }
            }
            LDA4News lda4News = new LDA4News();
            //历史模型系列，连续取近一个月的30个模型来预测
            List mapList = new LinkedList();
            int length = modelsList.size() > 20 ? 20 : modelsList.size();
            //以最近的排序为准
            Collections.sort(modelsList, new Comparator<File>() {
                @Override
                public int compare(File o1, File o2) {
                    return o2.getName().compareTo(o1.getName());
                }
            });
            for (int i = 0; i < length; i++) {
                //取出N个主题
                List<LDATheme> list = lda4News.inference(textFile.getPath(), modelsList.get(i).getPath());
                //将分类，去重，重新累加概率
                Map<String, Float> tmpMap = new TreeMap<>();
                for (int j = 0; j < list.size(); j++) {
                    if (tmpMap.get(list.get(j).getName()) == null) {
                        tmpMap.put(list.get(j).getName(), list.get(j).getProbability());
                    } else {
                        tmpMap.put(list.get(j).getName(), tmpMap.get(list.get(j).getName()) + list.get(j).getProbability());
                    }
                }
                //这里将map.entrySet()转换成list来排序
                List<Map.Entry<String, Float>> sortList = new ArrayList<Map.Entry<String, Float>>(tmpMap.entrySet());
                //然后通过比较器来实现排序
                Collections.sort(sortList, new Comparator<Map.Entry<String, Float>>() {
                    //升序排序
                    public int compare(Map.Entry<String, Float> o1,
                                       Map.Entry<String, Float> o2) {
                        return -o1.getValue().compareTo(o2.getValue());
                    }

                });
                //我想再将其转List的形式返回
                List tmpList = new LinkedList();
                for (Map.Entry<String, Float> stringFloatEntry : sortList) {
                    Map map1 = new HashedMap();
                    map1.put("name", stringFloatEntry.getKey());
                    map1.put("p", stringFloatEntry.getValue());
                    tmpList.add(map1);
                }

                Map map1 = new HashedMap();
                map1.put("date", modelsList.get(i).getName().split("\\.")[0]);
                map1.put("list", tmpList);
                mapList.add(map1);

            }
            //放进map中返回
            map.put("rank", mapList);

        } catch (IOException e) {
            logger.error("存储临时文件异常");
            e.printStackTrace();

        } catch (Exception e) {
            logger.error("未知异常");
            e.printStackTrace();
        }

        return map;
    }

    /**
     * 获取文章的情感倾向
     *
     * @param text
     * @return
     */
    @Override
    public Map getEmotion(String text) {
        return null;
    }


    /**
     * 词向量
     *
     * @param text
     * @return
     */
    @Override
    public List word2Vec(String text) {
        return null;
    }

    /**
     * 获取敏感词汇
     *
     * @param text
     * @return
     */
    @Override
    public Map sensitiveWord(String text, int matchType) {
        Map res = new HashedMap();
        Set set = sensitiveService.sensitiveWords(text, matchType);
        String replaceStr = sensitiveService.replaceSensitiveWords(text, matchType, "*");
        res.put("words", set);
        res.put("replace", replaceStr);
        return res;
    }


    /**
     * 文本相似度计算
     *
     * @param text_A
     * @param texts
     * @return
     */
    @Override
    public Map textNearCalculate(String text_A, String... texts) {
        Map map = new HashedMap();
        List list = new ArrayList();
        for (String str : texts) {
            Map tempMap = new HashedMap();
            tempMap.put("text", str);
            tempMap.put("cos", word2VecService.similarCompare(text_A, str));
            list.add(tempMap);
        }
        map.put("res", list);
        return map;
    }


}
