package com.qf.index;

import cn.hutool.core.collection.CollUtil;
import cn.hutool.crypto.SecureUtil;
import cn.hutool.db.Db;
import cn.hutool.db.Entity;
import com.alibaba.fastjson.JSONObject;
import lombok.Data;

import java.io.Serializable;
import java.sql.SQLException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;

public class Test3 {
    public static void main(String[] args) throws SQLException {

//        String freeWords = "abs hepatocyte growth factor (HGF) Cde sdfsd 3d 中华人民共和国万岁毛泽东万岁贾采用免疫组化SP法检测35例CA组织中STAT3和VEGF的表达新非小细胞大细胞";
//        Set<String> keyWordSet = new HashSet<>();
//        keyWordSet.add("hepatocyte growth factor");
//        keyWordSet.add("HGF");
//        keyWordSet.add("35例ca组织");
//        keyWordSet.add("毛泽东");
//        keyWordSet.add("人民共和国");
//        keyWordSet.add("中华人民共和国");
//        keyWordSet.add("免疫组化SP法");
//        keyWordSet.add("非小细胞");
//        keyWordSet.add("cde sdfsd 3d");
//        keyWordSet.add("cde sdfsd");
//        Set<String> extractSet = new HashSet<>();
//        extract(freeWords, keyWordSet, extractSet);
//        System.out.println(extractSet);

        extract2();

    }

    @Data
    public static class Document implements Serializable {
        private List<String> tiList = new ArrayList<>();
        private List<String> abList = new ArrayList<>();
        private List<String> kwList = new ArrayList<>();
        private Map<String, Double> score = new HashMap<>();
        private int aid = 0;
    }

    public static void extract2() {

        List<Document> documentList = new ArrayList<>();
        Document document1 = new Document();
        document1.setTiList(Arrays.asList("青年", "数字化阅读", "指尖阅读", "通过"));
        document1.setKwList(Arrays.asList("指尖", "作为", "开启", "自我", "指尖阅读", "提升"));
        document1.setAbList(Arrays.asList("指尖", "青年", "指尖阅读", "透视", "现象"));
        document1.setAid(1);
        documentList.add(document1);

        Document document2 = new Document();
        document2.setTiList(Arrays.asList("索网结构", "光伏支架", "马鞍形", "预张力优化"));
        document2.setKwList(Arrays.asList("索网结构", "关键刚度", "预张力优化", "刚度替换"));
        document2.setAbList(Arrays.asList("索网结构", "调整", "拉索", "变化量", "提出", "马鞍形", "建立", "构件", "截面面积", "通过", "定量评价"));
        document2.setAid(2);
        documentList.add(document2);

        Document document3 = new Document();
        document3.setTiList(Arrays.asList("对口支援", "院前急救", "民营医院管理", "医院营销", "院前急救"));
        document3.setKwList(Arrays.asList("民营医院", "hospitals, public", "对口支援", "院前急救", "pre-hospital first aid", "private hospitals", "建设", "公立医院"));
        document3.setAbList(Arrays.asList("民营医院", "流程", "作为", "选派", "美誉度", "结果", "结论", "探讨", "院前急救", "专家", "建设", "技术支持", "方法", "对口支援", "目的", "公立医院", "通过"));
        document3.setAid(3);
        documentList.add(document3);


        // 模拟数据：三个文档的关键词列表
        List<List<String>> allDocuments = Arrays.asList(
                Arrays.asList("研究", "方法", "实验", "结果"),
                Arrays.asList("算法", "模型", "方法", "数据"),
                Arrays.asList("研究", "分析", "方法", "测试"),
                Arrays.asList("频率", "分析", "文档", "测试"),
                Arrays.asList("统计", "分析", "文档", "排序")
        );

        // 统计文档频率（DF）
        Map<String, Integer> docFrequency = new HashMap<>();
        int totalDocuments = allDocuments.size();
        for (List<String> keywords : allDocuments) {
            Set<String> uniqueKeywords = new HashSet<>(keywords);
            for (String keyword : uniqueKeywords) {
//                docFrequency.put(keyword, docFrequency.getOrDefault(keyword, 0) + 1);
                docFrequency.compute(keyword, (key, value) -> value != null ? (value + 1) : 1);
            }
        }

        // 计算IDF
        Map<String, Double> idfMap = new HashMap<>();
        for (Map.Entry<String, Integer> entry : docFrequency.entrySet()) {
            String term = entry.getKey();
            int df = entry.getValue();
            double idf = Math.log(totalDocuments / (double) (df + 1));
            idfMap.put(term, idf);
        }

        // 按IDF降序排序，保留有效关键词（排除低IDF的停用词）
        int stopwordCount = 10; // 定义要排除的停用词数量
        List<String> validKeywords = idfMap.entrySet().stream()
                .sorted((e1, e2) -> Double.compare(e2.getValue(), e1.getValue())) // 降序
                .skip(stopwordCount) // 跳过前N个低IDF词（即停用词）
                .map(Map.Entry::getKey)
                .toList();
//        List<String> validKeywords = idfMap.entrySet().stream()
//                .sorted(Map.Entry.<String, Double>comparingByValue()) // 降序
//                .skip(stopwordCount) // 跳过前N个低IDF词（即停用词）
//                .map(Map.Entry::getKey)
//                .toList();

        Map<String, Double> stringDoubleMap = mapSortValueIntMin(idfMap);
        System.out.println(stringDoubleMap);

        // 输出结果
        System.out.println("原始所有词汇: " + idfMap.keySet());
        System.out.println("有效关键词（排除停用词）: " + validKeywords);


    }

    // 对Map的值进行排序，此排序为最大的在前面，输入一个map，再返回一个排序后的新map，String, Double类型，本排序增加了过滤功能，可以设置要前多少（len）个。
    // 对Map的值进行排序，此排序为最小的在前面，输入一个map，再返回一个排序后的新map
    public static Map<String, Double> mapSortValueIntMin(Map<String, Double> mapName) {
        Map<String, Double> result = new LinkedHashMap<>();
        mapName.entrySet().stream()
                .sorted(Map.Entry.<String, Double>comparingByValue())
                .forEachOrdered(e -> result.put(e.getKey(), e.getValue()));
        return result;
    }


    // 执行提取，输入自由文本freeWords，标准词Map，取回提取到的集合extractSet，以及提取后的字符串remaining
//    public static void extract(String freeWords, Set<String> originalSet, Set<String> extractSet, Map<String, String> lowerToUpperMap) {
//        if (null != freeWords && freeWords.length() > 1) {
//            String spl = Utils.isContainChinese(freeWords) ? "" : " ";
//            int splLen = spl.length();
//            List<String> list = Arrays.asList(freeWords.split(spl));
//            int iLen = 0;
//            int listLen = list.size();
//            for (int i = 0; i < listLen; i++) {
//                if (freeWords.length() >= iLen) {
//                    String iStr = freeWords.substring(iLen);
//                    iLen += list.get(i).length() + splLen;
//                    int jLen = 0;
//                    for (int j = listLen - 1; j >= 0; j--) {
//                        if (iStr.length() - jLen >= 0) {
//                            String jStr = iStr.substring(0, iStr.length() - jLen);
//                            jLen += list.get(j).length() + splLen;
//                            if (originalSet.contains(jStr) || originalSet.contains(jStr.toLowerCase())) {
//                                if (ReadKw.isKeyWordAbbr(jStr)) {
//                                    extractSet.add(jStr);
//                                } else if (lowerToUpperMap.containsKey(jStr)) {
//                                    extractSet.add(lowerToUpperMap.get(jStr));
//                                }
//                                iLen += (jStr.length() - list.get(i).length());
//                                i = i + jStr.split(spl).length - 1;
//                                break;
//                            }
//                        }
//                    }
//                }
//            }
//        }
//    }


}
