package com.txtTool.articleTxt.service.impl;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.Segment;
import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.NLPTokenizer;
import com.kennycason.kumo.CollisionMode;
import com.kennycason.kumo.WordCloud;
import com.kennycason.kumo.WordFrequency;
import com.kennycason.kumo.bg.CircleBackground;
import com.kennycason.kumo.bg.PixelBoundryBackground;
import com.kennycason.kumo.font.KumoFont;
import com.kennycason.kumo.font.scale.SqrtFontScalar;
import com.kennycason.kumo.nlp.FrequencyAnalyzer;
import com.kennycason.kumo.palette.LinearGradientColorPalette;
import com.txtTool.articleTxt.pojo.WordLabe;
import com.txtTool.articleTxt.service.BaseService;
import com.txtTool.articleTxt.utils.DocUtil;
import com.txtTool.articleTxt.utils.TokenCache;
import org.apache.commons.collections.map.HashedMap;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;

import java.awt.*;
import java.io.File;
import java.io.IOException;
import java.util.*;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;

@Service
public class BaseServiceImpl implements BaseService {

    @Value("${txtPath}")
    private String txtPath;
    @Value("${baseImgPath}")
    private String baseImgPath;
    @Value("${setImgPath}")
    private String setImgPath;

    @Override
    public void getTxtCach() throws IOException {
        File file = new File(txtPath);
        String  txtString= DocUtil.doc2String(file);
        TokenCache.setKey("txtString",txtString);
    }

    /**
     * 获取内容的摘要
     *
     * @param length 摘要长度
     * @return 返回文本分析出的摘要
     */
    public  String getAbstract( int length) {
        String content = TokenCache.getKey("txtString");
        if(StringUtils.isEmpty(content)){
            try {
                this.getTxtCach();
            } catch (IOException e) {
                e.printStackTrace();
            }
            content= TokenCache.getKey("txtString");
        }
        if(StringUtils.isEmpty(content) || content.length()<length) {
            return content;
        } else {
            String abstrct = HanLP.getSummary(content,length);
            return abstrct;
        }
    }

    /**
     * 获取内容的短语
     *
     * @param num 短语个数
     * @return 返回文本分析出的摘要
     */
    public  List<String> getPhrase( int num) {
        String content = TokenCache.getKey("txtString");
        if(StringUtils.isEmpty(content)){
            try {
                this.getTxtCach();
            } catch (IOException e) {
                e.printStackTrace();
            }
            content= TokenCache.getKey("txtString");
        }
        List<String> phraseList = HanLP.extractPhrase(content,num);
        return phraseList;
    }


    /**
     * 分析文本的关键字
     *
     * @param num 关键字的个数
     * @return 返回分析出的文本的关键字
     */
    public  List<String> getKeyWords(int num) {
        String content = TokenCache.getKey("txtString");
        if(StringUtils.isEmpty(content)){
            try {
                this.getTxtCach();
            } catch (IOException e) {
                e.printStackTrace();
            }
            content= TokenCache.getKey("txtString");
        }
        List<String> keywordList = getKeyWords(content, num, num, 0);
        return keywordList;
    }
    /**
     * 获取分词并计算词频
     * @param topNum 需要返回前多少条分词
     * @return
     */
    public  List<WordFrequency> wordDemoFun( int topNum){
        String content = TokenCache.getKey("txtString");
        if(StringUtils.isEmpty(content)){
            try {
                this.getTxtCach();
            } catch (IOException e) {
                e.printStackTrace();
            }
            content= TokenCache.getKey("txtString");
        }
        //需要过滤的字符
        String filterStr = "`~!@#$^&*()=|{}':;',\\[\\].<>/?~！@#￥……&*（）——|{}【】‘；：”“'。，、？ \n\n";

        List<String> naturelist =Arrays.asList("rzv","rzt","rzs","rz","ryv","ryt","rys","ry","rr","Rg","rg","r","p","pba","pbei","dl","d","e","dg",
        "u","ud","ude1","ude2","ude3","udeng","udh","ug","uguo","uj","ul","ule","ulian","uls","usuo","uv","uyy","uz","uzhe","uzhi","c","cc"
        );
        //保存数据分词数据
        Map<String, WordFrequency> extractLabelMap = new HashMap<String, WordFrequency>(16);
        //计算分词
        Segment segment = HanLP.newSegment().enableNameRecognize(true).enableTranslatedNameRecognize(true).enablePlaceRecognize(true).enableOrganizationRecognize(true);
        List<Term> termList =segment.seg(content);
        termList = termList.stream().filter(s -> !naturelist.contains(s.nature.toString())).collect(Collectors.toList());
        //过滤一下字符
        List<String> list = termList.stream().map(a -> a.word).filter(s -> !filterStr.contains(s)).collect(Collectors.toList());

        //计算词频并封装ExtractLabelDto对象
        for (String word : list) {
            //判断不存在则新增(去重)
            if (extractLabelMap.get(word) == null) {
                //计算获取词频(获取word在list中重复的次数)
                int count = Collections.frequency(list, word);
                //封装成WordLabe对象
                extractLabelMap.put(word, new WordFrequency(word, count));
            }
        }
        //map转list
        List<WordFrequency> extractLabellist = new ArrayList<WordFrequency>(extractLabelMap.values());
        //针对词频数量降序排序
        Collections.sort(extractLabellist, new Comparator<WordFrequency>() {
            @Override
            public int compare(WordFrequency o1, WordFrequency o2) {
                //降序排序
                return o2.getFrequency() - o1.getFrequency();
            }
        });
        //如果大于topNum 则返回前 topNum 个
        return extractLabellist.size() > topNum ? extractLabellist.subList(0,topNum) : extractLabellist;

    }


    /**
     * 获取名字获取
     * @return 返回文本分析
     */
    public  Map<String,List> getSeg() {
        String content = TokenCache.getKey("txtString");
        if(StringUtils.isEmpty(content)){
            try {
                this.getTxtCach();
            } catch (IOException e) {
                e.printStackTrace();
            }
            content= TokenCache.getKey("txtString");
        }
        List nameList = new ArrayList();
        List placeList = new ArrayList();
        List orgList = new ArrayList();
        Map<String,List> map =new HashedMap();
        Segment segment = HanLP.newSegment().enableNameRecognize(true).enableTranslatedNameRecognize(true).enablePlaceRecognize(true).enableOrganizationRecognize(true);
        List<Term> terms =segment.seg(content);
        for (int i = terms.size() - 1; i >= 0; i--){
            String word = terms.get(i).word;
            String nature = terms.get(i).nature.toString();
            if (nature == "nr"){
                nameList.add(word);
            }else if(nature == "ns"){
                placeList.add(word);
            }else if(nature == "nt"){
                orgList.add(word);
            }
        }
        map.put("***********人名***********",nameList);
        map.put("***********地名***********",placeList);
        map.put("***********组织机构***********",orgList);
        return map;
    }

    public  void getImg(){
        //建立词频分析器，设置词频，以及词语最短长度，此处的参数配置视情况而定即可
        FrequencyAnalyzer frequencyAnalyzer = new FrequencyAnalyzer();
        frequencyAnalyzer.setWordFrequenciesToReturn(600);
        frequencyAnalyzer.setMinWordLength(2);

        //指定文本文件路径，生成词频集合
        final List<WordFrequency> wordFrequencyList = this.wordDemoFun(100);
        //设置图片分辨率
        Dimension dimension = new Dimension(1920,1080);
        //此处的设置采用内置常量即可，生成词云对象
        WordCloud wordCloud = new WordCloud(dimension, CollisionMode.PIXEL_PERFECT);
        //设置边界及字体
        wordCloud.setPadding(2);
        java.awt.Font font = new java.awt.Font("STSong-Light", 2, 20);
        //设置词云显示的三种颜色，越靠前设置表示词频越高的词语的颜色
        wordCloud.setColorPalette(new LinearGradientColorPalette(Color.RED, Color.BLUE, Color.GREEN, 30, 30));
        wordCloud.setKumoFont(new KumoFont(font));
        //设置背景色
        wordCloud.setBackgroundColor(new Color(255,255,255));
        //设置背景图片
        try {
            PixelBoundryBackground pb= new PixelBoundryBackground(baseImgPath);
            wordCloud.setBackground(pb);
        } catch (IOException e) {
            e.printStackTrace();
        }

        //设置背景图层为圆形
//        wordCloud.setBackground(new CircleBackground(255));
//        wordCloud.setFontScalar(new SqrtFontScalar(12, 45));
        //生成词云
        wordCloud.build(wordFrequencyList);
        wordCloud.writeToFile(setImgPath);
    }

    /**
     * 分析文本的关键字工具
     * @param content 需要分析的文本
     * @param num 关键字的个数
     * @param sum 递归的次数控制参数
     * @return 返回分析出的文本的关键字
     */
    private List<String> getKeyWords(String content, int num, int targetNum, int sum) {
        // 内容为空则返回空列表
        if(StringUtils.isEmpty(content)) {
            return new ArrayList<>();
        }
        // 如果个数为0则默认为5个
        if(num == 0) {
            num = 5;
            targetNum = 5;
        }
        List<String> keywordList = HanLP.extractKeyword(content, num);
        List<String> returnList = new ArrayList<>();
        // 过滤非汉字关键字
        for(String keyword: keywordList) {
            boolean matches = keyword.matches("[\\u4e00-\\u9fa5]{2,}");
            if(matches) {
                returnList.add(keyword);
            }
        }
        // 最多递归10次
        if(sum < 10) {
            if (returnList.size() >= targetNum) {
                return returnList;
            } else {
                num = num + (targetNum - returnList.size());
                sum++;
                return getKeyWords(content, num,targetNum,  sum);
            }
        } else {
            return returnList;
        }
    }






}
