package org.example.util;


import cn.hutool.core.lang.Pair;
import cn.hutool.core.util.CharUtil;
import cn.hutool.core.util.ReUtil;
import cn.hutool.extra.tokenizer.TokenizerEngine;
import cn.hutool.extra.tokenizer.TokenizerUtil;
import cn.hutool.extra.tokenizer.Word;
import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;


public class KeywordExtractor {

    private static final Set<String> STOP_WORDS = new HashSet<>(Arrays.asList(
            "的","了","和","是","在","就","都","而","及","与","着","或","一个","没有","我们",
            "你","我","他","她","它","他们","以及","如果","因为","所以","而且","并且","但是",
            "通过","对于","关于","由于","作为","主要","进行","同时","这种","这些","然后","这里",
            "目前","需要","应该","可能","非常","可以","一般","以及","比如","比如说","比如在",
            "以及其","各个","各类","一些","一种","这个","那个","其中"
    ));

    private static boolean keepToken(String token) {
        if (token == null) return false;
        token = token.trim().toLowerCase();
        if (token.length() <= 1) return false;          // 单字符忽略
        if (token.length() > 5) return false;           // 过长词忽略
        if (STOP_WORDS.contains(token)) return false;
        if (token.chars().allMatch(CharUtil::isBlankChar)) return false;
        if (!ReUtil.isMatch(".*[\\p{IsHan}a-z0-9].*", token)) return false; // 保留中英文数字
        if (ReUtil.isMatch("^[0-9\\.]+$", token)) return false;             // 纯数字忽略
        return true;
    }

    /**
     * 分词（优先使用 Hutool，如果效果不好可切换 Jieba 搜索模式）
     */
    private static List<String> tokenize(String text) {
        try {
            // Hutool Tokenizer 引擎
            TokenizerEngine engine = TokenizerUtil.createEngine();
            List<String> list = new ArrayList<>();
            for (Word word : engine.parse(text)) {
                String w = word.getText();
                if (keepToken(w)) list.add(w);
            }

            // 如果列表太少，可以尝试 Jieba 搜索模式增强
            if (list.size() < 3) {
                JiebaSegmenter segmenter = new JiebaSegmenter();
                List<SegToken> tokens = segmenter.process(text, JiebaSegmenter.SegMode.SEARCH);
                list = tokens.stream()
                        .map(t -> t.word) // 或者 t.getWord()
                        .filter(KeywordExtractor::keepToken)
                        .collect(Collectors.toList());
            }

            return list;
        } catch (Throwable ignore) {
            // 回退：按非中英文数字切分
            String[] rough = text.split("[^\\p{IsHan}a-zA-Z0-9]+");
            return Arrays.stream(rough)
                    .map(String::toLowerCase)
                    .filter(KeywordExtractor::keepToken)
                    .collect(Collectors.toList());
        }
    }

    /**
     * 构建 TextRank 共现图
     */
    private static Map<String, Map<String, Integer>> buildCoOccurrenceGraph(List<String> tokens, int window) {
        Map<String, Map<String, Integer>> graph = new HashMap<>();
        int n = tokens.size();
        for (int i = 0; i < n; i++) {
            String wi = tokens.get(i);
            graph.putIfAbsent(wi, new HashMap<>());
            int maxJ = Math.min(i + window, n - 1);
            for (int j = i + 1; j <= maxJ; j++) {
                String wj = tokens.get(j);
                if (wi.equals(wj)) continue;
                graph.get(wi).merge(wj, 1, Integer::sum);
                graph.computeIfAbsent(wj, k -> new HashMap<>()).merge(wi, 1, Integer::sum);
            }
        }
        return graph;
    }

    /**
     * TextRank 算法
     */
    private static Map<String, Double> textRank(Map<String, Map<String, Integer>> graph, double d, int iterations) {
        Map<String, Double> score = new ConcurrentHashMap<>();
        for (String k : graph.keySet()) {   // Java 8 兼容写法
            score.put(k, 1.0);
        }

        for (int it = 0; it < iterations; it++) {
            Map<String, Double> next = new ConcurrentHashMap<>();
            for (Map.Entry<String, Map<String, Integer>> e : graph.entrySet()) {
                String v = e.getKey();
                Map<String, Integer> neighbors = e.getValue();
                double sum = 0.0;
                for (Map.Entry<String, Integer> nb : neighbors.entrySet()) {
                    String u = nb.getKey();
                    int w_uv = nb.getValue();
                    int w_u_all = graph.getOrDefault(u, Collections.emptyMap())
                            .values().stream().mapToInt(Integer::intValue).sum();
                    if (w_u_all == 0) continue;
                    sum += (w_uv * 1.0 / w_u_all) * score.getOrDefault(u, 1.0);
                }
                next.put(v, (1 - d) + d * sum);
            }
            score = next;
        }
        return score;
    }

    /**
     * 提取关键词
     */
    public static List<Pair<String, Double>> extractKeywords(String text, int topK) {
        if (text == null || text.trim().isEmpty()) return Collections.emptyList();

        List<String> tokens = tokenize(text);
        if (tokens.isEmpty()) return Collections.emptyList();

        Map<String, Map<String, Integer>> graph = buildCoOccurrenceGraph(tokens, 5);
        Map<String, Double> scores = textRank(graph, 0.85, 20);

        return scores.entrySet().stream()
                .sorted((a, b) -> Double.compare(b.getValue(), a.getValue()))
                .limit(topK)
                .map(e -> Pair.of(e.getKey(), e.getValue()))
                .collect(Collectors.toList());
    }

    public static void main(String[] args) {
        String text = "华为正式发布新一代AI计算平台，面向大模型训练与推理场景，" +
                "并宣布与多家伙伴共同打造行业解决方案。该平台在算力、能效与生态上全面升级。";

        List<Pair<String, Double>> keywords = extractKeywords(text, 10);
        System.out.println("Top Keywords:");
        for (Pair<String, Double> p : keywords) {
            System.out.println(p.getKey() + " -> " + p.getValue());
        }
    }
}