package ltd.hxya.novel.analysis.service.impl;

import com.alibaba.fastjson.JSON;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.toolkit.IdWorker;
import lombok.extern.slf4j.Slf4j;
import ltd.hxya.novel.analysis.feign.BookServiceFeign;
import ltd.hxya.novel.analysis.mapper.HiveDataPartMapper;
import ltd.hxya.novel.analysis.mapper.HiveRowDataMapper;
import ltd.hxya.novel.common.bean.Result;
import ltd.hxya.novel.common.bean.Word;
import ltd.hxya.novel.common.config.ThreadPoolConfig;
import ltd.hxya.novel.common.utils.*;
import ltd.hxya.novel.entity.rowdata.DataPart;
import ltd.hxya.novel.entity.rowdata.NovelRowData;
import ltd.hxya.novel.entity.rowdata.UseLessInfo;
import ltd.hxya.novel.entity.rowdata.WordAnalysis;
import org.jetbrains.annotations.NotNull;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import org.springframework.util.StringUtils;

import java.io.IOException;
import java.sql.SQLException;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.stream.Collectors;

@Slf4j
@Service
public class HiveRowDataServiceImpl {

    @Autowired
    private HiveRowDataMapper hiveRowDataMapper;

    @Autowired
    private HiveDataPartMapper hiveDataPartMapper;

    @Autowired
    private BookServiceFeign bookServiceFeign;

    private ThreadPoolExecutor threadPoolExecutor;


    @Value("${category}")
    private String category;


    private Map<String, List<Word>> wordMap = new HashMap<>();

    public Map<String, String> dataCategory() {
        List<NovelRowData> rowDataList = hiveRowDataMapper.getList();
        //将最后一章的数据保存到数据库中
        saveLastCrawlIndex(rowDataList);
        List<List<NovelRowData>> groupByBookList = groupByBook(rowDataList);

        Map<String, String> bookGroupJsonMap = new HashMap<>();
        groupByBookList.forEach(novelRowDataList -> {
            NovelRowData novelRowData = novelRowDataList.get(0);
            //判断分类
            String category = "";
            try {
                category = calculateMaxSimilarCategory(novelRowData.getBookDesc());
            } catch (IOException e) {
                e.printStackTrace();
            }
            for (NovelRowData rowData : novelRowDataList) {
                rowData.setCatName(category);
                String json = bookGroupJsonMap.get(rowData.getBookName());
                if (json == null || "".equals(json)) {
                    json = JSON.toJSONString(novelRowData);
                }
                json = json + "\n" + JSON.toJSONString(rowData);
                bookGroupJsonMap.put(rowData.getBookName(), json);
            }
            String json = bookGroupJsonMap.get(novelRowData.getBookName());
            String idStr = IdWorker.getIdStr();
            try {
                HDFSUtils.createFile(idStr, json);
                HiveUtils.otherOperator("load data inpath '/test/" + idStr + ".txt' into table data_part partition(category = '" + category + "',name='" + novelRowData.getBookName() + "')");
            } catch (Exception e) {
                e.printStackTrace();
            }

        });
        System.out.println(JSON.toJSONString(bookGroupJsonMap));
        return bookGroupJsonMap;
    }

    private void saveLastCrawlIndex(List<NovelRowData> rowDataList) {
        Map<String, NovelRowData> lastIndexMap = new HashMap<>();
        rowDataList.forEach(rowData -> {

            NovelRowData novelRowData = lastIndexMap.get(rowData.getBookId());
            if (novelRowData == null) {
                lastIndexMap.put(rowData.getBookId(), rowData);
                return;
            }
            if (rowData.getIndexNum() > novelRowData.getIndexNum()) {
                lastIndexMap.put(rowData.getBookId(), rowData);
            }
        });
        List<NovelRowData> saveData = new ArrayList<>();
        lastIndexMap.forEach((bookId, rowData) -> {
            if (StringUtils.isEmpty(rowData.getContent())){
                rowData.setContent("");
            }
            saveData.add(rowData);
        });
        bookServiceFeign.saveLastIndex(saveData);
    }

    public List<List<DataPart>> analysisData() throws SQLException, ClassNotFoundException, IOException, InterruptedException {
        ThreadPoolConfig threadPoolConfig = new ThreadPoolConfig();
        threadPoolExecutor = threadPoolConfig.threadPoolExecutor();
        List<String> categorys = BaseUtils.getCategory(this.category);
        List<DataPart> rowDataList = hiveDataPartMapper.getListByCategory(categorys);
        filterImg();

        List<List<DataPart>> finalData = removeDuplication(rowDataList);
        return finalData;
    }

    private void filterImg() {

    }

    public List<List<DataPart>> removeDuplication(List<DataPart> rowDataList) throws InterruptedException {
        Date startDate = new Date();

        //将一本小说的内容分到一组
        List<List<DataPart>> bookGroupList = groupBookName(rowDataList);

        List<List<DataPart>> mergeThreadBookGroupList = new ArrayList<>();
        for (List<DataPart> bookGroup : bookGroupList) {
            /* CompletableFuture<Void> completableFuture = CompletableFuture.runAsync(() -> {*/
            List<List<DataPart>> firstSummarizeData = repeatClassific(bookGroup);
            //筛选出来几个数据中最好的数据，并分类
            List<DataPart> firstScreenData = firstScreen(firstSummarizeData);
            mergeThreadBookGroupList.add(firstScreenData);
            //保存到MySQL中
            CompletableFuture<Void> completableFuture = CompletableFuture.runAsync(() -> {
                bookServiceFeign.saveBook(firstScreenData);
            }, threadPoolExecutor);
            completableFuture.exceptionally(throwable -> {
                throwable.printStackTrace();
                return null;
            });
        }
        //对最终得到的数据进行修改，得到最终数据，并保存到MySQL数据库中
        Date endDate = new Date();
        log.info("执行时间：" + (endDate.getTime() - startDate.getTime()) / 1000);

        return mergeThreadBookGroupList;

    }

    private List<List<DataPart>> groupBookName(List<DataPart> rowDataList) {
        Map<String, List<DataPart>> dataPartMap = new HashMap<>();
        List<List<DataPart>> groupList = new ArrayList<>();
        rowDataList.forEach(dataPart -> {
            String name = dataPart.getBookName();
            List<DataPart> dataParts = dataPartMap.get(name);
            if (CollectionUtils.isEmpty(dataParts)) {
                dataParts = new ArrayList<>();
            }
            dataParts.add(dataPart);
            dataPartMap.put(name, dataParts);
        });
        dataPartMap.forEach((key, value) -> {
            groupList.add(value);
        });
        return groupList;
    }


    public List<List<NovelRowData>> groupByBook(List<NovelRowData> rowDataList) {
        List<List<NovelRowData>> bookGroupList = new ArrayList<>();
        //根据小说名分类
        Map<String, List<NovelRowData>> bookGroupMap = new HashMap<>();
        rowDataList.stream().forEach(rowData -> {
            List<NovelRowData> bookGroup = bookGroupMap.get(rowData.getBookName());
            if (CollectionUtils.isEmpty(bookGroup)) {
                bookGroup = new ArrayList<>();
            }
            bookGroup.add(rowData);
            bookGroupMap.put(rowData.getBookName(), bookGroup);
        });
        Map<String, NovelRowData> compareData = new HashMap<>();

        List<NovelRowData> compareList = new ArrayList<>();
        bookGroupMap.forEach((key, value) -> {
            compareData.put(key, value.get(0));
            compareList.add(value.get(0));

        });
        //根据平均相似度，判断使用的小说名和简介
        compareData.forEach((key, value) -> {
            if (value == null) {
                return;
            }
            List<String> descList = new ArrayList<>();
            List<String> bookNameList = new ArrayList<>();
            List<NovelRowData> similarData = compareList.stream().filter(item -> {
                double similarity = CosineSimilarity.getSimilarity(value.getBookDesc(), item.getBookDesc());
                if (similarity < 80 && value.getBookDesc().indexOf(item.getBookDesc()) == -1) {
                    return false;
                }
                bookNameList.add(item.getBookName());
                //匹配到最合适的desc
                return true;
            }).collect(Collectors.toList());
            String maxSuitableName = CosineSimilarity.avgMaxSimilarityText(bookNameList);
            List<NovelRowData> totalNovelList = new ArrayList<>();
            similarData.forEach(item -> {
                totalNovelList.addAll(bookGroupMap.get(item.getBookName()));
                if (item.getBookName().equals(maxSuitableName)) {
                    return;
                }
                compareData.put(item.getBookName(), null);
                bookGroupMap.remove(item.getBookName());
            });
            Set<String> descSet = new HashSet<>();
            //获得某一小说分组下的所有小数，并修改他们的小说简介和小说名为统一
            List<NovelRowData> indexList = totalNovelList.stream().map(novelRowData -> {
                descSet.add(novelRowData.getBookDesc());
                novelRowData.setBookName(maxSuitableName);
                return novelRowData;
            }).collect(Collectors.toList());
            String maxSuitableDesc = CosineSimilarity.avgMaxSimilarityText(descSet);
            indexList = indexList.stream().map(novelRowData -> {
                novelRowData.setBookDesc(maxSuitableDesc);
                return novelRowData;
            }).collect(Collectors.toList());
            bookGroupMap.put(maxSuitableName, indexList);

        });

        bookGroupMap.forEach((key, value) -> {
            bookGroupList.add(value);
        });
        return bookGroupList;
    }

    private List<List<NovelRowData>> updateRowData(List<List<NovelRowData>> thirdScreenData) {

        List<List<NovelRowData>> finalData = thirdScreenData.stream().map(chapterList -> {
            //对每个小说通过desc的内容计算出小说分类
            List<NovelRowData> updateCategoryData = null;
            try {
                updateCategoryData = updateCategory(chapterList);
            } catch (IOException e) {
                e.printStackTrace();
            }

            return updateCategoryData;
        }).collect(Collectors.toList());

        return finalData;
    }

    private List<NovelRowData> updateCategory(List<NovelRowData> chapterList) throws IOException {
        NovelRowData exampleNovelData = chapterList.get(0);

        String category = calculateMaxSimilarCategory(exampleNovelData.getBookDesc());

        List<NovelRowData> novelRowDataList = chapterList.stream().map(novelRowData -> {
            novelRowData.setCatName(category);
            return novelRowData;
        }).collect(Collectors.toList());

        return novelRowDataList;
    }

    @NotNull
    private List<DataPart> updateNameOrDesc(List<DataPart> chapterList, List<String> contentList, String type) {
        List<DataPart> calculationScoreData = chapterList.stream().map(novelRowData -> {
            novelRowData.setScore(100.00);
            Map<String, Integer> similarityCountMap = new HashMap<>();
            similarityCountMap.put("similarityCount", 0);
            contentList.forEach(item -> {
                double similarity = 0;
                //如果类型为desc，则计算简介的最大得分，如果类型为bookName，则计算小说名的最大得分
                if ("desc".equals(type)) {
                    similarity = CosineSimilarity.getSimilarity(novelRowData.getBookDesc(), item);
                } else {
                    similarity = CosineSimilarity.getSimilarity(novelRowData.getBookName(), item);
                }
                //如果相似度大于60，则相似数量加一，
                if (similarity > 60) {
                    Integer similarityCount = similarityCountMap.get("similarityCount");
                    similarityCount++;
                    similarityCountMap.put("similarityCount", similarityCount);
                }
            });
            Integer similarityCount = similarityCountMap.get("similarityCount");

            novelRowData.setScore(Math.pow(novelRowData.getScore(), 1 / similarityCount));
            return novelRowData;
        }).collect(Collectors.toList());
        DataPart novelRowData = maxScore(calculationScoreData);
        chapterList = chapterList.stream().map(item -> {
            if ("desc".equals(type)) {
                item.setBookDesc(novelRowData.getBookDesc());
            } else {
                item.setBookName(novelRowData.getBookName());
            }
            return item;
        }).collect(Collectors.toList());
        //将所有的简介或者小说名修改为和得到最大得分的一样
        return chapterList;
    }

    private List<List<NovelRowData>> thirdScreen(List<NovelRowData> novelRowDataList) {
        //TODO 实现具体功能
        List<List<NovelRowData>> bookRowDataLists = new ArrayList<>();
        //List<NovelRowData> bookList = new ArrayList<>();
        Map<String, List<NovelRowData>> bookMap = new HashMap<>();
        novelRowDataList.forEach(novelRowData -> {
            List<NovelRowData> bookList = bookMap.get(novelRowData.getBookName());
            if (CollectionUtils.isEmpty(bookList)) {
                bookList = new ArrayList<>();
            }
            bookList.add(novelRowData);
            bookMap.put(novelRowData.getBookName(), bookList);
        });
        //将小说map集合转换成list集合
        bookMap.forEach((key, value) -> {
            bookRowDataLists.add(value);
        });
        return bookRowDataLists;
    }

    private List<DataPart> secondRepeatClassific(List<DataPart> firstScreenData) {
        //根据desc简介对小说分类，将同一种类的小说放到一类
        List<List<DataPart>> secondChapterClassifaction = chapterClassifcation(firstScreenData);
        //第二次去重
        List<DataPart> secondRemoveDuplicateData = secondDuplicateRemove(secondChapterClassifaction);
        return secondRemoveDuplicateData;
    }

    private List<DataPart> secondDuplicateRemove(List<List<DataPart>> secondChapterClassifaction) {
        //对比两个小说的内容是否相似，如果相似
        List<DataPart> secondDuplicateData = firstScreen(secondChapterClassifaction);
        return secondDuplicateData;
    }

    @NotNull
    private List<List<DataPart>> chapterClassifcation(List<DataPart> firstScreenData) {
        List<List<DataPart>> chapterClassification = new ArrayList<>();
        Map<String, List<DataPart>> descSimilarityNovel = new HashMap<>();
        BaseUtils.forEach(firstScreenData, (oneIndex, novelRowData) -> {
            Map<Integer, Integer> descSimilarityIndexMap = new HashMap<>();
            BaseUtils.forEach(firstScreenData, (index, item) -> {
                Integer parentIndex = descSimilarityIndexMap.get(index);
                //如果第一层的索引不为空，则表明当前小说已经被分组，不做任何处理，否则重新创建分类
                if (parentIndex != null) {
                    return;
                }
                //判断是否相似，如果不相似，跳出循环，寻找下一个相似的小说章节
                double similarity = CosineSimilarity.getSimilarity(novelRowData.getIndexName(), item.getIndexName());
                if (similarity < 60) {
                    return;
                }
                //如果相似，将相似章节的索引当做键，原始章节索引当做key，用于下一次循环作为对比
                descSimilarityIndexMap.put(index, oneIndex);
                //判断在相似的小说map中，是否有当前小说的简介作为键的内容，如果有，则取出信息，将当前的novel添加到相似小说集合中，如果没有，新建集合，添加到集合中
                List<DataPart> rowData = descSimilarityNovel.get(novelRowData.getIndexName());
                if (rowData == null) {
                    rowData = new ArrayList<>();
                }
                rowData.add(item);
                descSimilarityNovel.put(novelRowData.getIndexName(), rowData);
            });
        });

        descSimilarityNovel.forEach((key, value) -> {
            chapterClassification.add(value);
        });
        return chapterClassification;
    }

    public List<List<DataPart>> repeatClassific(List<DataPart> rowDataList) {
        //对所有的数据进行遍历，得到总共的爬虫源数量
        Set<Integer> sourceTypeSet = rowDataList.stream().map(novelRowData -> {
            return novelRowData.getCrawlSourceId();
        }).collect(Collectors.toSet());
        Map<String, List<DataPart>> bookMap = new HashMap<>();
        Map<String, Integer> indexNameFrequency = new HashMap<>();
        //对数据进行分类，将章节名相同的数据放到一个集合中

        List<List<DataPart>> firstSummarizeData = new ArrayList<>();
        rowDataList.forEach(novelRowData -> {
            List<DataPart> rowDatas = bookMap.get(novelRowData.getIndexName());
            Integer frequency = indexNameFrequency.get(novelRowData.getIndexName());
            if (frequency == null) {
                frequency = 0;
            }
            frequency = frequency + 1;
            indexNameFrequency.put(novelRowData.getIndexName(), frequency);
            if (CollectionUtils.isEmpty(rowDatas)) {
                rowDatas = new ArrayList<>();
            }
            rowDatas.add(novelRowData);
            bookMap.put(novelRowData.getIndexName(), rowDatas);
        });
        Map<String, List<DataPart>> indexGroupMap = new HashMap<>();
        bookMap.forEach((key, value) -> {
            Integer frequency = indexNameFrequency.get(key);
            if (frequency >= (sourceTypeSet.size() / 2)) {
                indexGroupMap.put(key, value);
            }
        });

        indexGroupMap.forEach((key, value) -> {
            firstSummarizeData.add(value);
        });
        return firstSummarizeData;
    }

    //第一次筛选
    public List<DataPart> firstScreen(List<List<DataPart>> firstSummarizeData) {
        List<DataPart> firstScreenData = firstSummarizeData.stream().map(novelRowDataList -> {
            if (novelRowDataList.size() == 1) {
                return novelRowDataList.get(0);
            }
            //判断有哪些不相同，把不相同的地方给整理出来，
            List<String> contentList = novelRowDataList.stream().map(novelRowData -> novelRowData.getContent()).collect(Collectors.toList());
            List<DataPart> firstCalculateRowData = novelRowDataList.stream().map(novelRowData -> {
                matchText(contentList, novelRowData);
                return novelRowData;
            }).collect(Collectors.toList());
            return maxScore(firstCalculateRowData);
        }).collect(Collectors.toList());

        return firstScreenData;
    }

    private DataPart maxScore(List<DataPart> firstCalculateRowData) {
        Map<String, DataPart> maxScoreRowDataMap = new HashMap<>();
        DataPart rowData = new DataPart();
        rowData.setScore(0.00);
        maxScoreRowDataMap.put("max", rowData);
        //获取分数最高的
        firstCalculateRowData.forEach(novelRowData -> {
            DataPart maxNovelRowData = maxScoreRowDataMap.get("max");
            if (novelRowData.getScore() >= maxNovelRowData.getScore()) {
                maxScoreRowDataMap.put("max", novelRowData);
            }
        });

        return maxScoreRowDataMap.get("max");
    }

    public void matchText(List<String> contentList, DataPart novelRowData) {
        List<String> useLessList = new ArrayList<>();

        //String rowDataContent = novelRowData.getContent();
        List<String> textList = CrawlUtils.simpleCollectByPattern("(<p>[\\s\\S]*?</p>)", novelRowData.getContent());
        textList.forEach(textLine -> {
            Boolean flag = getUseLessInfo(textLine, contentList);
            if (!flag) {
                useLessList.add(textLine);
            }
        });
        //将该删除的部分删除
        useLessList.forEach(useLessInfo -> {
            String content = novelRowData.getContent().replace(useLessInfo, "");
            novelRowData.setContent(content);
        });
        //取出其中缺少的词语，并计算出缺少的频率，如果缺少的信息在其他的源中出现次数大于60%，则认为该词语为有用信息，否则，认为为无用信息，判断是否是可用信息
        Map<String, Integer> lackKeyWordMap = new HashMap<>();
        contentList.forEach(content -> {
            deleteAllUsefulStr(novelRowData.getContent(), content, lackKeyWordMap);

        });
        Map<String, Integer> lengthMap = new HashMap<>();
        lengthMap.put("length", 1);
        lackKeyWordMap.forEach((keyword, frequency) -> {
            Integer length = lengthMap.get("length");
            if (frequency >= contentList.size() * 0.6) {

                length = length + keyword.length();
            }
            lengthMap.put("length", length);
        });
        novelRowData.setScore(100 / Math.sqrt(lengthMap.get("length")));

        //删除文本中的insertAllUseLessStr和对deleteAllUsefulStr比较确实的信息的多少，并计算得分
        //removeUseLessStr(novelRowData, insertAllUseLessStr);
        //calculateScore(novelRowData, deleteAllUsefulStr);

    }

    //判断某一行信息是否为有用信息
    private Boolean getUseLessInfo(String value1, List<String> value2) {
        Map<String, Double> totalMap = new HashMap<>();
        totalMap.put("total", 0.00);
        if (CollectionUtils.isEmpty(value2)) {
            return true;
        }
        value2.forEach(content -> {
            //double similarity = CosineSimilarity.getSimilarity(value1, content);
            //对其他小说进行分行，并从中匹配出最想四的，并返回最相似的相似度
            List<String> lines = CrawlUtils.simpleCollectByPattern("(<p>[\\s\\S]*?</p>)", content);
            Double maxSimilar = getMaxSimilar(value1, lines);
            Double totalSimilar = totalMap.get("total");
            totalSimilar = totalSimilar + maxSimilar;
            totalMap.put("total", totalSimilar);
        });

        Double total = totalMap.get("total");
        double aveSimilar = total / value2.size();
        return aveSimilar > 60;
    }

    private Double getMaxSimilar(String line, List<String> lines) {
        Double maxSimilarity = 0.00;
        List<Word> lineWords = wordMap.get(line);
        if (CollectionUtils.isEmpty(lineWords)) {
            lineWords = Tokenizer.segment(line);
            wordMap.put(line, lineWords);
        }
        for (String item : lines) {
            List<Word> itemWords = wordMap.get(item);

            if (CollectionUtils.isEmpty(itemWords)) {
                itemWords = Tokenizer.segment(item);
                wordMap.put(item, itemWords);

            }
            double similarity = CosineSimilarity.getSimilarityImpl(lineWords, itemWords);
            if (similarity > 90) {
                return similarity;
            }
            if (similarity > maxSimilarity) {
                maxSimilarity = similarity;
            }
        }
        return maxSimilarity;
    }

    private void calculateScore(NovelRowData novelRowData, List<String> deleteAllUsefulStr) {
        Map<String, Integer> lengthMap = new HashMap<>();
        lengthMap.put("length", 0);
        deleteAllUsefulStr.forEach(item -> {
            Integer length = lengthMap.get("length");
            length = length + item.length();
            lengthMap.put("length", length);
        });
        Integer length = lengthMap.get("length");

        Double score = novelRowData.getScore() / Math.sqrt(length);
        novelRowData.setScore(score);
    }

    private void removeUseLessStr(NovelRowData novelRowData, List<String> insertAllUseLessStr) {
        String content = novelRowData.getContent();
        insertAllUseLessStr.forEach((str -> {
            content.replaceAll(str, "");
        }));
        novelRowData.setContent(content);
    }


    private void deleteAllUsefulStr(String content1, String content2, Map<String, Integer> frequencyMap) {
        diff_match_patch diff_match_patch = new diff_match_patch();
        LinkedList<ltd.hxya.novel.common.utils.diff_match_patch.Diff> diffs = diff_match_patch.diff_main(content1, content2);
        Matcher deleteMatcher = PatternUtils.pattern("Diff\\(DELETE,\"([\\s\\S])", diffs.toString());
        //Map<String,Integer> frequencyMap = new HashMap<>();
        while (deleteMatcher.find()) {
            String keyword = deleteMatcher.group(1);
            Integer frequency = frequencyMap.get(deleteMatcher.group(1));
            if (frequency == null) {
                frequencyMap.put(keyword, 1);
                return;
            }
            frequencyMap.put(keyword, frequency + 1);
        }
    }

    //的到相似度最高的分类
    public String calculateMaxSimilarCategory(String text) throws IOException {
        Result<List<String>> result = bookServiceFeign.categoryList();
        List<String> categoryList = result.getData();
        Map<String, Double> similarMap = new HashMap<>();
        for (String category : categoryList) {
            double similar = calcularteSimilarCategory(text, category);
            similarMap.put(category, similar);
        }
        similarMap.put("max", 0.00);
        Map<String, String> maxSimilarMap = new HashMap<>();
        similarMap.forEach((key, value) -> {
            if (value >= similarMap.get("max")) {
                similarMap.put("max", value);
                if (!"max".equals(key)) {
                    maxSimilarMap.put("max", key);
                }
            }
        });
        return maxSimilarMap.get("max");

    }

    public double calcularteSimilarCategory(String text, String category) throws IOException {
        QueryWrapper<WordAnalysis> wordAnalysisQueryWrapper = new QueryWrapper<>();
        wordAnalysisQueryWrapper.eq("category", category);

        List<WordAnalysis> rowWordAnalyses = bookServiceFeign.wordCategoryList(category).getData();

        List<WordAnalysis> commonWordList = bookServiceFeign.wordCategoryList("通用").getData();
        List<String> commonKeyWord = commonWordList.stream().map(wordAnalysis -> wordAnalysis.getKeyword()).collect(Collectors.toList());
        //如果在该分类下的关键词时在common中拥有的词，则该关键词的权重修改为开根号
        List<WordAnalysis> wordAnalyses = rowWordAnalyses.stream().filter(wordAnalysis -> {
            if (commonKeyWord.contains(wordAnalysis.getKeyword())) {
                return false;
            }
            return true;
        }).collect(Collectors.toList());
        //将公共部分降低权重
      /*  List<WordAnalysis> wordAnalyses = rowWordAnalyses.stream().map(wordAnalysis -> {
            if (commonKeyWord.contains(wordAnalysis.getKeyword())) {
                Double rowFrequency = Double.valueOf(wordAnalysis.getFrequency());
                Float frequency = (float) Math.pow(rowFrequency,1/2);
                wordAnalysis.setFrequency(frequency);
                return wordAnalysis;
            }
            return wordAnalysis;
        }).collect(Collectors.toList());*/
        double similarity = CosineSimilarity.getSimilarityImpl(wordAnalyses, text);
        log.info("相似度为：" + similarity);
        return similarity;
    }


}
