package com.example.demo.service;

import cn.hutool.core.io.FastByteArrayOutputStream;
import cn.hutool.core.io.IoUtil;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.example.demo.dao.BookMapper;
import com.example.demo.dao.RoleMapper;
import com.example.demo.model.Book;
import com.example.demo.model.Role;
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary;
import com.hankcs.hanlp.seg.NShort.NShortSegment;
import com.hankcs.hanlp.seg.Segment;
import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.summary.TextRankKeyword;
import com.hankcs.hanlp.tokenizer.NLPTokenizer;
import com.hankcs.hanlp.tokenizer.StandardTokenizer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.*;

/**
 * 功能描述：
 *
 * @Author: Zhanghf
 * @Date: 2021/12/16 10:19
 */
@Service
public class NLPService {
    @Autowired
    private RoleMapper roleMapper;
    @Autowired
    private BookMapper bookMapper;
    public void updateRoleTable() throws FileNotFoundException {
        List<Book> books = bookMapper.selectList(new QueryWrapper<Book>().isNotNull("latestnum").select("id", "latestnum"));
        for (Book book : books) {
            String leadingRole = getLeadingRole(book.getId());
            roleMapper.insert(new Role(leadingRole,book.getId()));
        }
    }

    /**
     * @param bookid 小说id
     * @Author Zhanghf
     * @Date 2021-12-17 20:31
     * @Description: 获取小说主角名，默认第一章的词频最高的词为主角名
     * @Return: 主角名
     */
    public String getLeadingRole(Integer bookid) throws FileNotFoundException {
        // CoreStopWordDictionary.add("<br>");

//      获取章节，默认只获取第一章，即从第一章开始到第十章结束
        int startsize = 1;
        int offsize = 10;
        StringBuilder stringBuilder = new StringBuilder();
        for (int i = startsize; i <= offsize; i++) {
            //File file = new File("C:\\HanLp\\booktest\\" + bookname + i  +".txt");
            File file = new File("C:\\Knight2.0\\Yunxs\\" + bookid + "\\" + i + ".txt");
            FileInputStream fileInputStream = new FileInputStream(file);
            FastByteArrayOutputStream read = IoUtil.read(fileInputStream);
            stringBuilder.append(read.toString());
        }
        FastByteArrayOutputStream read = IoUtil.read(new FileInputStream(new File("C:\\HanLp\\booktest\\武动乾坤1.txt")));
        String s = stringBuilder.toString();
        Segment segment1 = new NShortSegment().enableIndexMode(2);
        TextRankKeyword textRankKeyword = new TextRankKeyword(segment1);
        List<String> keywords = textRankKeyword.getKeywords(s, 1);
        System.out.println(keywords.toString());//输出第一章的词频Top1
        return keywords.get(0);
    }

    /**
     * @param sentence 文本
     * @Author Zhanghf
     * @Date 2021-12-05 15:29
     * @Description:针对一句话/一段话进行情感分析
     * @Return: Map<String, Integer>:存放章节中的情感波动，包含的情感词汇；1正面，0负面，-1无感情波动
     */

    public Map<String, Integer> emotionRecognition(String sentence) throws FileNotFoundException {
        File positiveWords = new File("C:\\HanLp\\data\\dictionary\\正面情感词语.txt");
        File negativeWords = new File("C:\\HanLp\\data\\dictionary\\负面情感词语.txt");
        ArrayList<String> positiveList = new ArrayList<>();
        ArrayList<String> negativeList = new ArrayList<>();
        IoUtil.readLines(new FileInputStream(positiveWords), StandardCharsets.UTF_8, positiveList);
        IoUtil.readLines(new FileInputStream(negativeWords), StandardCharsets.UTF_8, negativeList);
        //将句子进行分词并去除标点符号
        List<Term> segment = NLPTokenizer.segment(sentence);
        for (int i = 0; i < segment.size(); i++) {
            if (segment.get(i).nature.toString().equals("w")) {
                segment.remove(i);
            }
        }
        //       System.out.println("segment.toString()"+segment.toString());
//        是句子contains包含情感词汇还是句子分词后得到的词存在情感词汇

//        1、句子contains包含情感词汇
        HashMap<String, Integer> map = new HashMap<>(1);
        for (String s : positiveList) {
            if (sentence.contains(s)) {
//                map.put(s,1 );
                map.put(sentence, 1);

                // System.out.println(s+"**正面**"+sentence);
                return map;
            }
        }
        for (String s : negativeList) {
            if (sentence.contains(s)) {
//                map.put(s,0 );
                map.put(sentence, 0);

                //  System.out.println(s+"**负面**"+sentence);
                return map;
            }
        }
//        map.put("未分析到情感词汇", -1);
//        System.out.println("无感情波动"+sentence);

        map.put(sentence, -1);

        return map;
    }

    /**
     * @param txtPath  章节文本路径
     * @param roleName 角色名
     * @Author Zhanghf
     * @Date 2021-12-05 15:35
     * @Description:针对本地文件中的章节+角色进行感情分析
     * @Return:
     */
    public void analysisLocalFile(String txtPath, String roleName) throws FileNotFoundException {
        File file = new File(txtPath);
        if (!file.exists()) {
            System.out.println("文件不存在");
            return;
        } else {
            FastByteArrayOutputStream fastByteArrayOutputStream = IoUtil.read(new FileInputStream(file));
            String s = fastByteArrayOutputStream.toString();

            String[] split = s.split("\n");
            List<String> paragraphs = Arrays.asList(split);
            //章节的段落情感分析列表 1 0 -1
//            ArrayList<Integer> emotionList = new ArrayList<>();
            for (int i = 0; i < paragraphs.size(); i++) {
                //            i=0存在一个空串
                String paragraph_i = paragraphs.get(i);
//                System.out.println("段落"+i+paragraph_i);
                if (paragraph_i.contains(roleName)) {
                    Map<String, Integer> recognition = emotionRecognition(paragraph_i);
                    recognition.forEach((k, v) -> System.out.println("key:value = " + k + ":" + v));
                }
            }
        }
    }

    /**
     * @param s        网页上的章节文本
     * @param roleName 角色名
     * @Author Zhanghf
     * @Date 2021-12-05 15:35
     * @Description:针对网页上的某一章节+角色进行感情分析
     * @Return:
     */
    public Map<String, Integer> analysisPage(String s, String roleName) throws FileNotFoundException {

        String[] split = s.split("<br>,");
        List<String> paragraphs = Arrays.asList(split);

//        paragraphs.removeIf(x->x.equals("<br>"));
//        paragraphs.removeIf(x->x.equals("\n"));

        Map<String, Integer> recognitionChapter = new HashMap<String, Integer>();
        for (int i = 0; i < paragraphs.size(); i++) {
            //            i=0存在一个空串
            String paragraph_i = paragraphs.get(i);
            if (paragraph_i.contains(roleName)) {

                Map<String, Integer> recognitionparagraph = emotionRecognition(paragraph_i);
                // recognitionparagraph.forEach((k, v) -> System.out.println("key:value = " + k + ":" + v));
                recognitionChapter.putAll(recognitionparagraph);
            }
        }
        return recognitionChapter;
    }


    /**
     * @param
     * @Author Zhanghf
     * @Date 2021-12-16 10:40
     * @Description: 指定小说进行高频词探索
     * @Return:
     */
    public Map<String, Float> highFrequencyForNovel(Integer bookid, int startsize, int offsize, int size) throws IOException {
        CoreStopWordDictionary.add("会");
        CoreStopWordDictionary.add("中");
        CoreStopWordDictionary.add("便");
        CoreStopWordDictionary.add("出");
        CoreStopWordDictionary.add("之上");
        CoreStopWordDictionary.add("之中");
        CoreStopWordDictionary.add("说");
        CoreStopWordDictionary.add("没有");
        CoreStopWordDictionary.add("之后");
        CoreStopWordDictionary.add("能够");
        CoreStopWordDictionary.add("有着");
        CoreStopWordDictionary.add("见到");

        Map<String, Float> highFrequency = new TreeMap<String, Float>();
        StringBuilder stringBuilder = new StringBuilder();


        for (int i = startsize; i <= offsize; i++) {
            //File file = new File("C:\\HanLp\\booktest\\" + bookname + i  +".txt");
            File file = new File("C:\\Knight2.0\\Yunxs\\" + bookid + "\\" + i + ".txt");
            FileInputStream fileInputStream = new FileInputStream(file);
            FastByteArrayOutputStream read = IoUtil.read(fileInputStream);
            stringBuilder.append(read.toString());
        }
        Map<String, Float> stringFloatMap = highFrequencyForString(stringBuilder.toString(), size);

        //stringFloatMap.forEach((k, v) -> System.out.println("key:value = " + k + ":" + v));
        return stringFloatMap;
    }

    /**
     * @param txtpath 文本所在路径
     * @param size    取频率最高的前size个
     * @Author Zhanghf
     * @Date 2021-12-16 10:38
     * @Description:
     * @Return:
     */
    public Map<String, Float> highFrequencyForFile(String txtpath, int size) throws FileNotFoundException {
        File file = new File(txtpath);
        FileInputStream fileInputStream = new FileInputStream(file);
        FastByteArrayOutputStream read = IoUtil.read(fileInputStream);

        Segment segment = StandardTokenizer.SEGMENT.enableIndexMode(2);
        TextRankKeyword textRankKeyword = new TextRankKeyword(segment);
        Map<String, Float> termAndRank = textRankKeyword.getTermAndRank(read.toString(), size);

//        Set<String> keywords = termAndRank.keySet();
//        System.out.println(keywords.toString());
//
//        if (ifrank) {
//            termAndRank.forEach((k, v) -> System.out.println("key:value = " + k + ":" + v));
//        }
        return termAndRank;
    }

    /**
     * @param text 分析的文本
     * @param size 取频率最高的前size个
     *             //     * @param ifrank 是否展示频率
     * @Author Zhanghf
     * @Date 2021-12-16 10:35
     * @Description:
     * @Return:
     */

    public Map<String, Float> highFrequencyForString(String text, int size) throws FileNotFoundException {
        CoreStopWordDictionary.add("<br>");
        Segment segment = StandardTokenizer.SEGMENT.enableIndexMode(2);
        TextRankKeyword textRankKeyword = new TextRankKeyword(segment);
        Map<String, Float> termAndRank = textRankKeyword.getTermAndRank(text, size);

//        Set<String> keywords = termAndRank.keySet();
//        System.out.println(keywords.toString());
//
//        if (ifrank) {
//            termAndRank.forEach((k, v) -> System.out.println("key:value = " + k + ":" + v));
//        }
        return termAndRank;
    }
}
