package com.server.questionbank.snframe.service.OllamaService.KnowledgeBaseService;

import org.apache.commons.lang3.StringUtils;
import org.commonmark.node.*;
import org.commonmark.parser.Parser;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

public class TextChunker {
    private static final Logger logger = LoggerFactory.getLogger(TextChunker.class);

    // 正则表达式常量
    private static final Pattern SENTENCE_PATTERN = Pattern.compile(
            "(?<=[。！？!?]\\s?)|(?<=[\n])|(?<=\\p{Ps}\\s)");
    private static final Pattern MD_PATTERN = Pattern.compile(
            "^#+ |!\\[.*?\\]\\(.*?\\)|\\[.*?\\]\\(.*?\\)|```|\\*\\*.*?\\*\\*|_.*?_",
            Pattern.MULTILINE);
    private static final Pattern SPECIAL_NUM_PATTERN = Pattern.compile(
            "\\b(\\d{1,3}(,\\d{3})+\\b|\\d+\\.\\d+%?)");

    // CommonMark组件
    private static final Parser mdParser = Parser.builder().build();

    // 中文停用词（示例）
    private static final Set<String> CN_STOP_WORDS = new HashSet<>(Arrays.asList(
            "的", "是", "在", "了", "和", "有", "就", "也", "这个"));

    /**
     * 增强文本预处理入口
     */
    public static String preprocessContent(String text) {
        if (StringUtils.isBlank(text)) return "";

        // 1. 格式预处理
        String processed = isMarkdown(text) ? processMarkdown(text) : cleanHtml(text);

        // 2. 内容清洗
        processed = cleanContent(processed);

        // 3. 中文处理
        if (containsChinese(processed)) {
            processed = processChinese(processed);
        }

        return processed;
    }

    /**
     * 智能分块主方法
     */
    public static List<String> chunk(String text, int chunkSize) {
        String processed = preprocessContent(text);
        return chunkProcessor(processed, chunkSize);
    }

    // ================ 预处理私有方法 ================ //

    private static String cleanHtml(String text) {
        return text.replaceAll("<[^>]+>", "")
                .replaceAll("&[a-z]+;", "");
    }

    private static String cleanContent(String text) {
        // 保留关键数字格式
        Matcher numMatcher = SPECIAL_NUM_PATTERN.matcher(text);
        Map<String, String> numPlaceholders = new HashMap<>();
        while (numMatcher.find()) {
            String num = numMatcher.group();
            String placeholder = "NUM_" + System.nanoTime();
            numPlaceholders.put(placeholder, num);
            text = text.replace(num, placeholder);
        }

        // 基础清洗
        String cleaned = text.replaceAll("[\\x00-\\x1F\\x7F-\\x9F]", "")
                .replaceAll("\\s+", " ")
                .trim();

        // 恢复数字
        for (Map.Entry<String, String> entry : numPlaceholders.entrySet()) {
            cleaned = cleaned.replace(entry.getKey(), entry.getValue());
        }
        return cleaned;
    }

    private static String processChinese(String text) {
        try {
            // 实际应使用分词库（此处为示例）
            return text.replaceAll("([\\p{P}&&[^#]])", " $1 ")
                    .replaceAll("\\s+", " ");
        } catch (Exception e) {
            logger.error("中文处理异常", e);
            return text;
        }
    }

    // ================ Markdown处理 ================ //

    private static boolean isMarkdown(String text) {
        return MD_PATTERN.matcher(text).find();
    }

    private static String processMarkdown(String text) {
        try {
            Node document = mdParser.parse(text);
            MarkdownVisitor visitor = new MarkdownVisitor();
            document.accept(visitor);
            return visitor.getCleanedText();
        } catch (Exception e) {
            logger.error("Markdown解析失败，使用降级方案", e);
            return fallbackMdClean(text);
        }
    }

    private static String fallbackMdClean(String text) {
        return text.replaceAll("```(.*?)\\s*```", "$1")
                .replaceAll("\\*\\*|__", "")
                .replaceAll("^#{1,6}\\s*", "[HEADING] ")
                .replaceAll("\\[(.*?)\\]\\(.*?\\)", "$1");
    }

    private static class MarkdownVisitor extends AbstractVisitor {
        private final StringBuilder builder = new StringBuilder();
        private int codeBlockDepth = 0;

        @Override
        public void visit(Heading heading) {
            builder.append("\n[HEADING level=")
                    .append(heading.getLevel())
                    .append("] ");
            visitChildren(heading);
            builder.append("\n");
        }

        @Override
        public void visit(FencedCodeBlock codeBlock) {
            builder.append("\n[CODE_BEGIN lang=")
                    .append(codeBlock.getInfo())
                    .append("]\n");
            codeBlockDepth++;
            visitChildren(codeBlock);
            codeBlockDepth--;
            builder.append("\n[CODE_END]\n");
        }

        @Override
        public void visit(Link link) {
            builder.append("[");
            visitChildren(link);
            builder.append("](").append(link.getDestination()).append(")");
        }

        @Override
        public void visit(Text text) {
            String content = codeBlockDepth > 0 ?
                    text.getLiteral() :
                    text.getLiteral().replaceAll("\\s+", " ");
            builder.append(content);
        }

        public String getCleanedText() {
            return builder.toString()
                    .replaceAll("\\n{3,}", "\n\n")
                    .trim();
        }
    }

    // ================ 分块核心逻辑 ================ //

    private static List<String> chunkProcessor(String text, int chunkSize) {
        if (text.length() < chunkSize) return Collections.singletonList(text);

        List<String> chunks = new ArrayList<>();
        if (text.contains("[HEADING]")) {
            chunks = chunkByHeadings(text, chunkSize);
        } else {
            chunks = chunkBySentences(text, chunkSize);
        }

        // 后处理：合并过小分块
        return mergeSmallChunks(chunks, chunkSize);
    }

    private static List<String> chunkByHeadings(String text, int chunkSize) {
        List<String> chunks = new ArrayList<>();
        String[] sections = text.split("\\[HEADING level=\\d+\\]");

        for (String section : sections) {
            if (section.isBlank()) continue;

            if (section.length() > chunkSize) {
                chunks.addAll(chunkBySentences(section, chunkSize));
            } else {
                chunks.add(section.trim());
            }
        }
        return chunks;
    }

    private static List<String> chunkBySentences(String text, int chunkSize) {
        List<String> sentences = Arrays.stream(SENTENCE_PATTERN.split(text))
                .filter(s -> !s.isBlank())
                .collect(Collectors.toList());

        List<String> chunks = new ArrayList<>();
        StringBuilder current = new StringBuilder();

        for (String sentence : sentences) {
            if (current.length() + sentence.length() > chunkSize) {
                if (current.length() > 0) chunks.add(current.toString().trim());
                current.setLength(0);
            }
            current.append(sentence).append(" ");
        }
        if (current.length() > 0) chunks.add(current.toString().trim());

        return chunks;
    }

    private static List<String> mergeSmallChunks(List<String> chunks, int chunkSize) {
        List<String> merged = new ArrayList<>();
        StringBuilder current = new StringBuilder();

        for (String chunk : chunks) {
            if (current.length() + chunk.length() < chunkSize * 0.8) {
                current.append(chunk).append("\n");
            } else {
                if (current.length() > 0) merged.add(current.toString().trim());
                current.setLength(0);
                current.append(chunk).append("\n");
            }
        }
        if (current.length() > 0) merged.add(current.toString().trim());

        return merged;
    }

    // ================ 工具方法 ================ //

    private static boolean containsChinese(String text) {
        return Pattern.compile("\\p{Script=Han}").matcher(text).find();
    }
}