package org.example;

import com.google.common.hash.HashCode;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hashing;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.commons.lang3.StringUtils;
import org.apache.tika.exception.TikaException;
import org.apache.tika.metadata.Metadata;
import org.apache.tika.parser.ParseContext;
import org.apache.tika.parser.microsoft.ooxml.OOXMLParser;
import org.apache.tika.sax.BodyContentHandler;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xml.sax.SAXException;

import java.io.*;
import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.*;
import java.util.stream.Collectors;
import java.util.stream.Stream;

import static java.lang.Math.min;

/**
 * RAG准备工具类
 * 用于处理文档文件，提取内容并生成向量嵌入，然后存储到数据库中
 */
public class RagPreparer {
    // 添加日志记录器
    private static final Logger logger = LoggerFactory.getLogger(RagPreparer.class);
    
    // 数据库配置
    private static final String DB_URL = ConfigLoader.getProperty("database.url");
    private static final String DB_USER = ConfigLoader.getProperty("database.username");
    private static final String DB_PASSWORD = ConfigLoader.getProperty("database.password");
    
    // Manticore Search配置
    private static final String MANTICORE_URL = ConfigLoader.getProperty("manticore.url", "http://localhost:9308");
    
    // 添加HikariCP数据源
    private static final HikariDataSource dataSource;

    // 静态初始化块，用于初始化连接池
    static {
        HikariConfig config = new HikariConfig();
        config.setJdbcUrl(DB_URL);
        config.setUsername(DB_USER);
        config.setPassword(DB_PASSWORD);
        config.setMaximumPoolSize(10);
        config.setMinimumIdle(2);
        config.setConnectionTimeout(30000);
        config.setIdleTimeout(600000);
        config.setMaxLifetime(1800000);
        config.setLeakDetectionThreshold(60000);
        
        dataSource = new HikariDataSource(config);
    }

    // Ollama配置
    private static final String OLLAMA_URL = ConfigLoader.getProperty("ollama.url");
    private static final String MODEL_NAME = ConfigLoader.getProperty("ollama.model");
    
    // 重试配置
    private static final int MAX_RETRIES = 3;
    private static final int RETRY_DELAY_MS = 1000;
    
    // 添加统计信息变量
    private static final List<EmbeddingStat> embeddingStats = new ArrayList<>();
    private static final Logger statLogger = LoggerFactory.getLogger("EMBEDDING_STAT");
    private static volatile boolean running = true;

    /**
     * 程序入口点
     *
     * @param args 命令行参数，第一个参数应为要处理的目录路径
     */
    public static void main(String[] args) {
        // 添加关闭钩子以处理Ctrl+C
        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            running = false;
            logger.info("接收到关闭信号，正在停止...");
            printEmbeddingStats();
        }));

        // 从配置文件读取目录路径
        String directoryPath = ConfigLoader.getProperty("processor.directory.path");

        // 如果配置文件中没有设置，则使用命令行参数
        if (StringUtils.isEmpty(directoryPath) && args.length > 0) {
            directoryPath = args[0];
        }

        if (StringUtils.isEmpty(directoryPath)) {
            logger.error("请在配置文件中指定要处理的目录路径或通过命令行参数指定");
            System.exit(1);
        }

        try {
            // 初始化数据库表
            initializeDatabase();

            // 测试Ollama的embedding功能
            testOllamaEmbedding();

            // 更新历史文档记录的字段信息
            updateHistoricalDocuments();

            // 处理目录中的所有docx文件
            processDocxFiles(directoryPath);

            logger.info("RAG准备工作完成");
            
            // 输出统计信息
            printEmbeddingStats();
        } catch (Exception e) {
            logger.error("处理过程中发生错误", e);
        } finally {
            // 关闭数据源
            if (dataSource != null && !dataSource.isClosed()) {
                dataSource.close();
            }
        }
    }

    /**
     * 初始化数据库表结构
     * 创建documents表和paragraphs表（如果它们不存在）
     *
     * @throws SQLException 当数据库操作失败时抛出
     */
    private static void initializeDatabase() throws SQLException {
        // 使用try-with-resources确保连接自动关闭
        try (Connection conn = dataSource.getConnection();
             Statement stmt = conn.createStatement()) {

            // 创建文档表
            String createDocumentsTable = "CREATE TABLE IF NOT EXISTS documents (" +
                    "id INT AUTO_INCREMENT PRIMARY KEY," +
                    "file_path VARCHAR(255) NOT NULL," +
                    "file_name VARCHAR(255) NOT NULL," +
                    "content_length INT DEFAULT 0," +  // 正文长度字段
                    "paragraph_count INT DEFAULT 0," +  // 段落数字段
                    "processing_completed BOOLEAN DEFAULT FALSE," +  // 处理完成标志字段
                    "created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP" +
                    ")";

            // 创建段落表
            String createParagraphsTable = "CREATE TABLE IF NOT EXISTS paragraphs (" +
                    "id INT AUTO_INCREMENT PRIMARY KEY," +
                    "document_id INT NOT NULL," +
                    "content TEXT NOT NULL," +
                    "embedding JSON NOT NULL," +
                    "paragraph_number INT NOT NULL," +
                    "created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP," +
                    "FOREIGN KEY (document_id) REFERENCES documents(id)" +
                    ")";

            stmt.execute(createDocumentsTable);
            stmt.execute(createParagraphsTable);
            System.out.println("数据库表初始化完成");
        } catch (SQLException e) {
            logger.error("数据库初始化失败", e);
            throw e;
        }
    }

    /**
     * 处理指定目录下的所有docx文件
     * 遍历目录并处理其中的每个docx文件
     *
     * @param directoryPath 要处理的目录路径
     * @throws IOException  当文件操作失败时抛出
     * @throws SQLException 当数据库操作失败时抛出
     */
    private static void processDocxFiles(String directoryPath) throws IOException, SQLException {
        Path dirPath = Paths.get(directoryPath);

        if (!Files.isDirectory(dirPath)) {
            throw new IllegalArgumentException("指定的路径不是一个目录: " + directoryPath);
        }

        // 遍历目录中的所有docx文件
        try (Stream<Path> paths = Files.walk(dirPath)) {
            List<Path> pathList = paths.filter(Files::isRegularFile)
                    .filter(path -> path.getFileName().toString().toLowerCase().endsWith(".docx"))
                    .toList();

            for (Path path : pathList) {
                // 检查是否收到了关闭信号
                if (!running) {
                    logger.info("收到关闭信号，中断处理");
                    break;
                }
                
                logger.info("正在处理文件: " + path);
                try {
                    processSingleDocxFile(path);
                } catch (Exception e) {
                    logger.error("处理文件时出错: " + path + ", 错误: " + e.getMessage(), e);
                    // 继续处理其他文件，而不是中断整个过程
                    continue;
                }
            }
        }
    }

    public static long farmHash64(String input) {
        HashFunction hashFunction = Hashing.farmHashFingerprint64();
        HashCode hashCode = hashFunction.hashUnencodedChars(input);
        return hashCode.asLong();
    }
    
    /**
     * 检查段落是否已经存在于数据库中
     *
     * @param paragraphId 段落ID
     * @return 如果段落已存在返回true，否则返回false
     * @throws SQLException 当数据库操作失败时抛出
     */
    private static boolean isParagraphExists(long paragraphId) throws SQLException {
        // 使用try-with-resources确保连接和PreparedStatement自动关闭
        try (Connection conn = dataSource.getConnection();
             PreparedStatement pstmt = conn.prepareStatement(
                     "SELECT id FROM paragraphs WHERE id = ?")) {

            pstmt.setLong(1, paragraphId);
            try (ResultSet rs = pstmt.executeQuery()) {
                if (rs.next()) {
                    return rs.getLong(1) !=
                            0;
                }
            }
        } catch (SQLException e) {
            logger.error("检查段落存在性时发生错误", e);
            throw e;
        }
        return false;
    }

    /**
     * 处理单个docx文件
     * 提取文件内容，分割成段落，生成向量嵌入并存储到数据库
     *
     * @param docxPath docx文件的路径
     * @throws IOException   当文件操作失败时抛出
     * @throws TikaException 当Tika解析失败时抛出
     * @throws SAXException  当XML解析失败时抛出
     * @throws SQLException  当数据库操作失败时抛出
     */
    private static void processSingleDocxFile(Path docxPath) throws IOException, TikaException, SAXException, SQLException {
        String filePath = docxPath.toAbsolutePath().toString();
        String fileName = docxPath.getFileName().toString();

        logger.info("开始处理文件: " + fileName);

        // 使用Tika读取docx内容
        String content = extractDocxContent(docxPath.toFile());
        if (StringUtils.isEmpty( content)){
            logger.info("文件 " + fileName + " 为空");
            return;
        }
        logger.info("文件内容: " + content.substring(0, min(100, content.length())));

        // 按自然段分割内容
        List<String> originalParagraphs = splitIntoParagraphs(content);

        // 合并短段落逻辑
        List<String> mergedParagraphs = mergeShortParagraphs(originalParagraphs);

        logger.info("原始段落数量: " + originalParagraphs.size() + ", 合并后段落数量: " + mergedParagraphs.size());

        if (mergedParagraphs.isEmpty()) {
            logger.info("文件 " + fileName + " 中没有可处理的段落");
            return;
        }

        // 用文件内容的hash64值作为文档ID
        long documentId = farmHash64(content);
        documentId = insertDocument(filePath, fileName, documentId);

        // 检查文档是否已经处理完成
        if (isDocumentProcessed(documentId)) {
            logger.info("文档已处理完成，跳过处理");
            return;
        }

        // 创建线程池以并行处理向量生成
        ExecutorService executorService = Executors.newFixedThreadPool(2);
        List<Future<ParagraphEmbedding>> futures = new ArrayList<>();

        // 提交段落处理任务到线程池
        for (int i = 0; i < mergedParagraphs.size(); i++) {
            String paragraph = mergedParagraphs.get(i).trim();
            if (paragraph.trim().isEmpty()) continue;
            if (paragraph.length() <= 2) continue;

            final int paragraphIndex = i;
            final String paragraphText = paragraph;
            final long finalDocumentId = documentId; // 创建final副本供lambda使用
            
            Future<ParagraphEmbedding> future = executorService.submit(() -> {
                logger.info("-----处理段落 " + (paragraphIndex + 1) + "/" + mergedParagraphs.size() + ":" + paragraphText.substring(0, min(100, paragraphText.length())));
                long paragraphId = farmHash64(paragraphText);
                
                // 先判断是否已经处理过
                if (isParagraphExists(paragraphId)) {
                    logger.info("段落已存在，跳过处理");
                    return null;
                }
                
                // 使用Ollama生成向量，带重试机制
                float[] embedding = null;
                for (int retry = 0; retry < MAX_RETRIES; retry++) {
                    try {
                        embedding = generateEmbedding(paragraphText);
                        if (embedding != null) {
                            break;
                        }
                    } catch (IOException e) {
                        logger.warn("生成向量时发生错误（尝试 {}/{}）: {}", retry + 1, MAX_RETRIES, e.getMessage());
                        if (retry < MAX_RETRIES - 1) {
                            try {
                                TimeUnit.MILLISECONDS.sleep(RETRY_DELAY_MS * (retry + 1));
                            } catch (InterruptedException ie) {
                                Thread.currentThread().interrupt();
                                throw new IOException("重试被中断", ie);
                            }
                        }
                    }
                }
                
                if (embedding == null) {
                    logger.info("向量生成失败，跳过段落");
                    return null;
                }
                
                // 插入段落到数据库
//                try {
//                    insertParagraph(finalDocumentId, paragraphId, paragraphText, embedding, paragraphIndex + 1);
//                } catch (SQLException e) {
//                    logger.error("插入段落时发生错误", e);
//                }
                
                return new ParagraphEmbedding(paragraphId, paragraphText, embedding, paragraphIndex + 1);
            });
            
            futures.add(future);
        }

        // 收集处理结果并保存到数据库
        for (Future<ParagraphEmbedding> future : futures) {
            try {
                ParagraphEmbedding paragraphEmbedding = future.get();
                if (paragraphEmbedding != null) {
//                    insertParagraph(documentId, paragraphEmbedding.paragraphId,
//                                  paragraphEmbedding.content, paragraphEmbedding.embedding,
//                                  paragraphEmbedding.paragraphNumber);
                }
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                logger.warn("处理线程被中断", e);
                break;
            } catch (ExecutionException e) {
                logger.error("执行段落处理任务时出错", e);
            }
            
            // 检查是否收到了关闭信号
            if (!running) {
                logger.info("收到关闭信号，中断处理");
                break;
            }
        }

        // 关闭线程池
        executorService.shutdown();
        try {
            if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
                executorService.shutdownNow();
            }
        } catch (InterruptedException e) {
            executorService.shutdownNow();
            Thread.currentThread().interrupt();
        }

        // 更新文档的处理完成标志和其他信息
        if (running) {
            updateDocumentProcessingStatus(documentId, content.length(), mergedParagraphs.size());
            logger.info("文件 " + fileName + " 处理完成，共处理 " + mergedParagraphs.size() + " 个段落");
        } else {
            logger.info("文件 " + fileName + " 处理被中断");
        }
    }

    /**
     * 合并短段落：如果段落长度小于等于100，则与下一段合并，一直累积到大于100后，再处理
     *
     * @param originalParagraphs 原始段落列表
     * @return 合并后的段落列表
     */
    private static List<String> mergeShortParagraphs(List<String> originalParagraphs) {
        List<String> mergedParagraphs = new ArrayList<>();
        StringBuilder currentParagraph = new StringBuilder();

        for (String paragraph : originalParagraphs) {
            // 跳过空段落或长度过小的段落
            if (paragraph.trim().isEmpty() || paragraph.trim().length() <= 2) {
                continue;
            }

            // 将当前段落添加到累积段落中
            if (currentParagraph.length() > 0) {
                currentParagraph.append("\n");
            }
            currentParagraph.append(paragraph.trim());

            // 如果累积段落长度超过100，则添加到结果列表中
            if (currentParagraph.length() > 100) {
                mergedParagraphs.add(currentParagraph.toString());
                currentParagraph.setLength(0); // 清空累积段落
            }
        }

        // 处理最后一个累积段落（即使长度不足100）
        if (currentParagraph.length() > 0) {
            mergedParagraphs.add(currentParagraph.toString());
        }

        return mergedParagraphs;
    }

    /**
     * 使用Tika提取docx文件内容
     *
     * @param docxFile 要处理的docx文件
     * @return 提取的文件内容文本
     * @throws IOException   当文件操作失败时抛出
     * @throws TikaException 当Tika解析失败时抛出
     * @throws SAXException  当XML解析失败时抛出
     */
    private static String extractDocxContent(File docxFile) throws IOException, TikaException, SAXException {
        BodyContentHandler handler = null;
        FileInputStream inputStream = null;
        try {
            handler = new BodyContentHandler(-1); // -1表示无长度限制
            Metadata metadata = new Metadata();
            inputStream = new FileInputStream(docxFile);
            ParseContext pcontext = new ParseContext();

            // 使用OOXML解析器
            OOXMLParser ooxmlParser = new OOXMLParser();
            ooxmlParser.parse(inputStream, handler, metadata, pcontext);

            return handler.toString();
        } catch (Exception e) {
            logger.error("解析文件时出错: " + docxFile.getAbsolutePath(), e);
            return null;
        } finally {
            if (inputStream != null) {
                try {
                    inputStream.close();
                } catch (IOException e) {
                    logger.warn("关闭文件输入流时出错", e);
                }
            }
        }
    }

    /**
     * 将文本按自然段分割
     *
     * @param content 要分割的文本内容
     * @return 分割后的段落列表
     */
    private static List<String> splitIntoParagraphs(String content) {
        List<String> paragraphs = new ArrayList<>();

        try {
            // 使用jsoup处理可能的HTML格式内容
            Document doc = Jsoup.parse(content);
            Elements elements = doc.select("p"); // 选择所有段落标签

            if (!elements.isEmpty()) {
                // 如果有段落标签，使用这些段落
                for (Element element : elements) {
                    String text = element.text().trim();
                    if (!text.isEmpty()) {
                        paragraphs.add(text);
                    }
                }
            } else {
                // 否则按空行分割
                String[] parts = content.split("\\n\\s*\\n");
                for (String part : parts) {
                    String text = part.trim().replaceAll("\\s+", " ");
                    if (!text.isEmpty()) {
                        paragraphs.add(text);
                    }
                }
            }
        } catch (Exception e) {
            logger.error("分割段落时出错", e);
        }

        return paragraphs;
    }

    /**
     * 使用Ollama生成文本向量
     *
     * @param text 要生成向量的文本
     * @return 文本的向量表示
     * @throws IOException 当网络请求失败时抛出
     */
    private static float[] generateEmbedding(String text) throws IOException {
        // 记录开始时间
        long startTime = System.currentTimeMillis();
        int textLength = text.length();
        
        // 创建请求JSON
        String jsonInput = String.format(
                "{\"model\": \"%s\", \"input\": \"%s\"}",
                MODEL_NAME,
                text.replace("\"", "\\\"").replace("\n", "\\n")
        );

        // 发送POST请求到Ollama
        HttpClient client = HttpClient.newHttpClient();
        HttpRequest request = HttpRequest.newBuilder()
                .uri(URI.create(OLLAMA_URL))
                .header("Content-Type", "application/json")
                .POST(HttpRequest.BodyPublishers.ofString(jsonInput))
                .build();

        try {
            HttpResponse<String> response = client.send(
                    request, HttpResponse.BodyHandlers.ofString(StandardCharsets.UTF_8)
            );
            
            // 记录结束时间
            long endTime = System.currentTimeMillis();
            long duration = endTime - startTime;
            
            // 记录统计信息
            embeddingStats.add(new EmbeddingStat(textLength, duration));
            
            if (response.statusCode() == 500 ) {
                logger.warn("Ollama返回 500");
                return null;
            }else
            if (response.statusCode() == 405 ) {
                logger.warn("Ollama返回405");
                return null;
            } else
            if (response.statusCode() == 405 && response.body().contains("invalid character")) {
                logger.warn("Ollama返回405");
                return null;
            } else
            if (response.statusCode() == 400 && response.body().contains("invalid character")) {
                logger.warn("Ollama返回无效字符错误，跳过该段落");
                return null;
            } else
            if (response.statusCode() != 200) {
                logger.warn("Ollama请求失败，状态码: " + response.statusCode() + ", 响应: " + response.body());
                throw new IOException("Ollama请求失败，状态码: " + response.statusCode() + ", 响应: " + response.body());
            }else {
                // 解析JSON响应获取向量
                logger.warn("Ollama返回: " + response.body());
                return parseEmbeddingFromJson(response.body());
            }
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new IOException("请求被中断", e);
        }
    }

    /**
     * 解析Ollama返回的向量响应
     *
     * @param responseBody Ollama的响应体
     * @return 解析出的向量数组
     */
    private static float[] parseEmbeddingResponse(String responseBody) {
        try {
            // 解析Ollama的/embeddings API响应
            // 响应格式: {"embedding": [...]}
            int start = responseBody.indexOf("\"embedding\":") + 12;
            int arrayStart = responseBody.indexOf("[", start);
            int arrayEnd = responseBody.lastIndexOf("]");
            
            if (arrayStart == -1 || arrayEnd == -1 || arrayEnd <= arrayStart) {
                logger.error("无法解析embedding数组: " + responseBody);
                return null;
            }
            
            String[] parts = responseBody.substring(arrayStart + 1, arrayEnd).split(",");
            float[] embedding = new float[parts.length];

            // 将字符串数组转换为浮点数数组
            for (int i = 0; i < parts.length; i++) {
                embedding[i] = Float.parseFloat(parts[i].trim());
            }
            return embedding;
        } catch (Exception e) {
            logger.error("解析Ollama响应时出错: " + responseBody, e);
            return null;
        }
    }

    /**
     * 将文档信息插入数据库
     *
     * @param filePath 文档文件路径
     * @param fileName 文档文件名
     * @return 插入的文档ID
     * @throws SQLException 当数据库操作失败时抛出
     */
    private static long insertDocument(String filePath, String fileName, long id) throws SQLException {
        // 使用try-with-resources确保连接和PreparedStatement自动关闭
        try (Connection conn = dataSource.getConnection();
             PreparedStatement pstmt = conn.prepareStatement(
                     "INSERT INTO documents (file_path, file_name,id) VALUES (?, ?, ?)",
                     Statement.RETURN_GENERATED_KEYS)) {

            pstmt.setString(1, filePath);
            pstmt.setString(2, fileName);
            pstmt.setLong(3, id);
            try {
                pstmt.executeUpdate();
            } catch (SQLException e) {
                if (e.getErrorCode() == 1062) {
                    logger.info("文档已存在，跳过插入");
                    return id;
                }
                logger.info("插入文档失败: " + e.getErrorCode());
                logger.error("插入文档时发生错误", e);
                throw e;
            }

            // 获取生成的ID
            try (ResultSet rs = pstmt.getGeneratedKeys()) {
                if (rs.next()) {
                    return rs.getLong(1);
                } else {
                    throw new SQLException("无法获取文档ID");
                }
            }
        } catch (SQLException e) {
            logger.error("数据库操作失败", e);
            throw e;
        }
    }

    /**
     * 将段落实体和向量插入数据库
     *
     * @param documentId      文档ID
     * @param content         段落内容
     * @param embedding       段落向量
     * @param paragraphNumber 段落编号
     * @throws SQLException 当数据库操作失败时抛出
     */
    private static void insertParagraph(long documentId,  long paragraphId ,String content, float[] embedding, int paragraphNumber) throws SQLException {
        // 将向量转换为JSON格式
        StringBuilder sb = new StringBuilder();
        sb.append("[");
        for (int i = 0; i < embedding.length; i++) {
            if (i > 0) {
                sb.append(",");
            }
            sb.append(embedding[i]);
        }
        sb.append("]");
        String embeddingJson = sb.toString();

        // 使用try-with-resources确保连接和PreparedStatement自动关闭
        try (Connection conn = dataSource.getConnection();
             PreparedStatement pstmt = conn.prepareStatement(
                     "INSERT INTO paragraphs (document_id, content, embedding, paragraph_number,id) " +
                             "VALUES (?, ?, ?, ?, ?)")) {

            pstmt.setLong(1, documentId);
            pstmt.setString(2, content);
            pstmt.setString(3, embeddingJson);
            pstmt.setInt(4, paragraphNumber);
            pstmt.setLong(5, paragraphId);
            try {
                pstmt.executeUpdate();
            } catch (SQLException e) {
                if (e.getErrorCode() == 1062) {
                    logger.info("段落已存在，跳过插入");
                    return;
                }
                logger.info("插入段落失败: " + e.getErrorCode());
                logger.error("插入段落时发生错误", e);
                throw e;
            }
        } catch (SQLException e) {
            logger.error("数据库操作失败", e);
            throw e;
        }
        
        // 同时将段落数据存储到Manticore Search
        try {
            insertParagraphToManticore(documentId, paragraphId, content, embedding, paragraphNumber);
        } catch (Exception e) {
            logger.error("存储段落到Manticore Search时出错", e);
        }
    }

    /**
     * 将段落数据插入Manticore Search
     *
     * @param documentId      文档ID
     * @param paragraphId     段落ID
     * @param content         段落内容
     * @param embedding       段落向量
     * @param paragraphNumber 段落编号
     * @throws IOException 当网络请求失败时抛出
     * @throws InterruptedException 当请求被中断时抛出
     */
    private static void insertParagraphToManticore(long documentId, long paragraphId, String content, 
                                                   float[] embedding, int paragraphNumber) throws IOException, InterruptedException {
        // 构建JSON请求体
        StringBuilder jsonBuilder = new StringBuilder();
        jsonBuilder.append("{");
        jsonBuilder.append("\"index\": \"paragraphs\",");
        jsonBuilder.append("\"id\": ").append(paragraphId).append(",");
        jsonBuilder.append("\"doc\": {");
        jsonBuilder.append("\"document_id\": ").append(documentId).append(",");
        jsonBuilder.append("\"content\": \"").append(content.replace("\"", "\\\"")).append("\",");
        jsonBuilder.append("\"paragraph_number\": ").append(paragraphNumber).append(",");
        
        // 添加向量数据
        jsonBuilder.append("\"embedding\": [");
        for (int i = 0; i < embedding.length; i++) {
            if (i > 0) jsonBuilder.append(",");
            jsonBuilder.append(embedding[i]);
        }
        jsonBuilder.append("]");
        
        jsonBuilder.append("}}");
        
        String jsonInput = jsonBuilder.toString();
        
        // 发送POST请求到Manticore Search
        HttpClient client = HttpClient.newHttpClient();
        HttpRequest request = HttpRequest.newBuilder()
                .uri(URI.create(MANTICORE_URL + "/json/index"))
                .header("Content-Type", "application/json")
                .POST(HttpRequest.BodyPublishers.ofString(jsonInput))
                .build();

        HttpResponse<String> response = client.send(request, HttpResponse.BodyHandlers.ofString(StandardCharsets.UTF_8));
        
        if (response.statusCode() != 200) {
            logger.warn("Manticore Search请求失败，状态码: " + response.statusCode() + ", 响应: " + response.body());
            throw new IOException("Manticore Search请求失败，状态码: " + response.statusCode());
        }
        
        logger.info("段落数据成功存储到Manticore Search，段落ID: " + paragraphId);
    }

    /**
     * 检查文档是否已经处理完成
     *
     * @param documentId 文档ID
     * @return 如果文档已处理完成返回true，否则返回false
     * @throws SQLException 当数据库操作失败时抛出
     */
    private static boolean isDocumentProcessed(long documentId) throws SQLException {
        // 使用try-with-resources确保连接和PreparedStatement自动关闭
        try (Connection conn = dataSource.getConnection();
             PreparedStatement pstmt = conn.prepareStatement(
                     "SELECT processing_completed FROM documents WHERE id = ?")) {

            pstmt.setLong(1, documentId);
            try (ResultSet rs = pstmt.executeQuery()) {
                if (rs.next()) {
                    return rs.getBoolean("processing_completed");
                }
            }
        } catch (SQLException e) {
            logger.error("检查文档处理状态时发生错误", e);
            throw e;
        }
        return false;
    }

    /**
     * 更新文档处理状态信息
     *
     * @param documentId 文档ID
     * @param contentLength 正文长度
     * @param paragraphCount 段落数
     * @throws SQLException 当数据库操作失败时抛出
     */
    private static void updateDocumentProcessingStatus(long documentId, int contentLength, int paragraphCount) throws SQLException {
        // 使用try-with-resources确保连接和PreparedStatement自动关闭
        try (Connection conn = dataSource.getConnection();
             PreparedStatement pstmt = conn.prepareStatement(
                     "UPDATE documents SET content_length = ?, paragraph_count = ?, processing_completed = ? WHERE id = ?")) {

            pstmt.setInt(1, contentLength);
            pstmt.setInt(2, paragraphCount);
            pstmt.setBoolean(3, true);
            pstmt.setLong(4, documentId);
            
            int rowsAffected = pstmt.executeUpdate();
            if (rowsAffected > 0) {
                logger.info("文档处理状态更新成功: contentLength={}, paragraphCount={}", contentLength, paragraphCount);
            } else {
                logger.warn("文档处理状态更新失败，未找到文档ID: {}", documentId);
            }
        } catch (SQLException e) {
            logger.error("更新文档处理状态时发生错误", e);
            throw e;
        }
    }

    /**
     * 更新历史文档记录的字段信息（正文长度、段落数、处理完成标志）
     *
     * @throws SQLException 当数据库操作失败时抛出
     */
    private static void updateHistoricalDocuments() throws SQLException {
        // 使用try-with-resources确保连接、Statement和PreparedStatement自动关闭
        try (Connection conn = dataSource.getConnection();
             Statement stmt = conn.createStatement();
             PreparedStatement selectPstmt = conn.prepareStatement(
                     "SELECT d.id, d.file_path, COUNT(p.id) as paragraph_count " +
                             "FROM documents d LEFT JOIN paragraphs p ON d.id = p.document_id " +
                             "WHERE d.content_length = 0 OR d.paragraph_count = 0 OR d.processing_completed = FALSE " +
                             "GROUP BY d.id, d.file_path");
             PreparedStatement updatePstmt = conn.prepareStatement(
                     "UPDATE documents SET content_length = ?, paragraph_count = ?, processing_completed = ? WHERE id = ?")) {

            // 查询需要更新的历史文档
            try (ResultSet rs = selectPstmt.executeQuery()) {
                int updatedCount = 0;
                while (rs.next()) {
                    long documentId = rs.getLong("id");
                    String filePath = rs.getString("file_path");
                    int paragraphCount = rs.getInt("paragraph_count");

                    try {
                        // 读取文件内容以获取长度
                        String content = "";
                        File file = new File(filePath);
                        // 检查文件是否存在且可读
                        if (file.exists() && file.canRead() && filePath.toLowerCase().endsWith(".docx")) {
                            content = extractDocxContent(file);
                        } else {
                            logger.warn("文件不存在、不可读或不是docx文件: {}", filePath);
                        }

                        // 更新文档信息
                        updatePstmt.setInt(1, content != null ? content.length() : 0);
                        updatePstmt.setInt(2, paragraphCount);
                        updatePstmt.setBoolean(3, true);
                        updatePstmt.setLong(4, documentId);

                        updatePstmt.executeUpdate();
                        updatedCount++;
                        
                        // 处理历史段落数据并存储到Manticore Search
                        processHistoricalParagraphs(documentId);
                    } catch (Exception e) {
                        logger.warn("更新文档信息时出错，文档ID: " + documentId + ", 错误: " + e.getMessage(), e);
                    }
                }
                logger.info("成功更新 {} 条历史文档记录", updatedCount);
            }
        } catch (SQLException e) {
            logger.error("更新历史文档记录时发生错误", e);
            throw e;
        }
    }
    
    /**
     * 处理历史段落数据并存储到Manticore Search
     *
     * @param documentId 文档ID
     * @throws SQLException 当数据库操作失败时抛出
     */
    private static void processHistoricalParagraphs(long documentId) throws SQLException {
        // 使用try-with-resources确保连接和PreparedStatement自动关闭
        try (Connection conn = dataSource.getConnection();
             PreparedStatement pstmt = conn.prepareStatement(
                     "SELECT id, content, embedding, paragraph_number FROM paragraphs WHERE document_id = ?")) {
            
            pstmt.setLong(1, documentId);
            
            try (ResultSet rs = pstmt.executeQuery()) {
                int processedCount = 0;
                while (rs.next()) {
                    long paragraphId = rs.getLong("id");
                    String content = rs.getString("content");
                    String embeddingJson = rs.getString("embedding");
                    int paragraphNumber = rs.getInt("paragraph_number");
                    
                    try {
                        // 将JSON格式的向量转换为float数组
                        float[] embedding = parseEmbeddingFromJson(embeddingJson);
                        
                        // 将段落数据存储到Manticore Search
                        insertParagraphToManticore(documentId, paragraphId, content, embedding, paragraphNumber);
                        processedCount++;
                    } catch (Exception e) {
                        logger.warn("处理历史段落时出错，段落ID: " + paragraphId + ", 错误: " + e.getMessage(), e);
                    }
                }
                logger.info("成功处理 {} 条历史段落记录，文档ID: {}", processedCount, documentId);
            }
        } catch (SQLException e) {
            logger.error("查询历史段落记录时发生错误，文档ID: " + documentId, e);
            throw e;
        }
    }
    
    /**
     * 从JSON字符串解析向量数据
     *
     * @param embeddingJson JSON格式的向量数据
     * @return 解析后的浮点数数组
     */
    private static float[] parseEmbeddingFromJson(String embeddingJson) {
        try {
            // 解析Ollama的/embed API响应
            // 响应格式: {"model": "qwen3-embedding:0.6b", "embeddings": [[...]]}
            int embeddingsStart = embeddingJson.indexOf("\"embeddings\":");
            if (embeddingsStart == -1) {
                logger.error("无法在响应中找到embeddings字段: " + embeddingJson);
                return null;
            }
            
            int arrayStart = embeddingJson.indexOf("[", embeddingsStart);
            int arrayEnd = embeddingJson.indexOf("]", arrayStart);
            
            if (arrayStart == -1 || arrayEnd == -1 || arrayEnd <= arrayStart) {
                logger.error("无法解析embeddings数组: " + embeddingJson);
                return null;
            }
            
            // 提取第一个嵌入向量（我们只发送一个文本）
            int innerArrayStart = embeddingJson.indexOf("[", arrayStart + 1);
            int innerArrayEnd = embeddingJson.indexOf("]", innerArrayStart);
            
            if (innerArrayStart == -1 || innerArrayEnd == -1 || innerArrayEnd <= innerArrayStart) {
                logger.error("无法解析嵌入向量: " + embeddingJson);
                return null;
            }
            
            String[] parts = embeddingJson.substring(innerArrayStart + 1, innerArrayEnd).split(",");
            float[] embedding = new float[parts.length];
            
            for (int i = 0; i < parts.length; i++) {
                embedding[i] = Float.parseFloat(parts[i].trim());
            }
            
            return embedding;
        } catch (Exception e) {
            logger.error("解析向量数据时出错: " + embeddingJson, e);
            return new float[0]; // 返回空数组
        }
    }

    /**
     * 测试Ollama的embedding功能
     */
    private static void testOllamaEmbedding() {
        logger.info("开始测试Ollama的embedding功能");
        try {
            float[] embedding = generateEmbedding("测试文本");
            if (embedding != null && embedding.length > 0) {
                logger.info("Ollama的embedding功能测试成功，向量维度: " + embedding.length);
            } else {
                logger.error("Ollama的embedding功能测试失败，返回空的向量");
                throw new RuntimeException("Ollama的embedding功能测试失败");
            }
        } catch (Exception e) {
            logger.error("Ollama的embedding功能测试失败: " + e.getMessage(), e);
            throw new RuntimeException("Ollama的embedding功能测试失败", e);
        }
    }

    /**
     * 输出嵌入统计信息
     */
    private static void printEmbeddingStats() {
        logger.info("=== 嵌入统计信息 ===");
        logger.info("总请求数: {}", embeddingStats.size());
        
        if (embeddingStats.isEmpty()) {
            logger.info("没有嵌入统计信息");
            statLogger.info("=== 嵌入统计信息 ===");
            statLogger.info("总请求数: {}", embeddingStats.size());
            statLogger.info("没有嵌入统计信息");
            return;
        }
        
        long totalTextLength = 0;
        long totalDuration = 0;
        long maxTextLength = 0;
        long minTextLength = Long.MAX_VALUE;
        long maxDuration = 0;
        long minDuration = Long.MAX_VALUE;
        
        for (EmbeddingStat stat : embeddingStats) {
            totalTextLength += stat.textLength;
            totalDuration += stat.duration;
            maxTextLength = Math.max(maxTextLength, stat.textLength);
            minTextLength = Math.min(minTextLength, stat.textLength);
            maxDuration = Math.max(maxDuration, stat.duration);
            minDuration = Math.min(minDuration, stat.duration);
        }
        
        double avgTextLength = (double) totalTextLength / embeddingStats.size();
        double avgDuration = (double) totalDuration / embeddingStats.size();
        double charsPerMinute = totalDuration > 0 ? (double) totalTextLength / totalDuration * 60000 : 0; // 每分钟处理字符数
        
        logger.info("平均文本长度: {}", String.format("%.2f", avgTextLength));
        logger.info("最大文本长度: {}", maxTextLength);
        logger.info("最小文本长度: {}", minTextLength);
        logger.info("总字符长度: {}", totalTextLength);
        logger.info("平均请求耗时: {} ms", String.format("%.2f", avgDuration));
        logger.info("最大请求耗时: {} ms", maxDuration);
        logger.info("最小请求耗时: {} ms", minDuration);
        logger.info("总时长: {} ms", totalDuration);
        logger.info("每分钟处理字符长度: {}", String.format("%.2f", charsPerMinute));
        logger.info("==================");
        
        // 记录到专门的日志文件
        statLogger.info("=== 嵌入统计信息 ===");
        statLogger.info("总请求数: {}", embeddingStats.size());
        statLogger.info("平均文本长度: {}", String.format("%.2f", avgTextLength));
        statLogger.info("最大文本长度: {}", maxTextLength);
        statLogger.info("最小文本长度: {}", minTextLength);
        statLogger.info("总字符长度: {}", totalTextLength);
        statLogger.info("平均请求耗时: {} ms", String.format("%.2f", avgDuration));
        statLogger.info("最大请求耗时: {} ms", maxDuration);
        statLogger.info("最小请求耗时: {} ms", minDuration);
        statLogger.info("总时长: {} ms", totalDuration);
        statLogger.info("每分钟处理字符长度: {}", String.format("%.2f", charsPerMinute));
        statLogger.info("==================");
    }

    /**
     * 嵌入统计信息类
     */
    private static class EmbeddingStat {
        final int textLength;
        final long duration;

        EmbeddingStat(int textLength, long duration) {
            this.textLength = textLength;
            this.duration = duration;
        }
    }
    
    /**
     * 段落向量嵌入结果类
     */
    private static class ParagraphEmbedding {
        final long paragraphId;
        final String content;
        final float[] embedding;
        final int paragraphNumber;

        ParagraphEmbedding(long paragraphId, String content, float[] embedding, int paragraphNumber) {
            this.paragraphId = paragraphId;
            this.content = content;
            this.embedding = embedding;
            this.paragraphNumber = paragraphNumber;
        }
    }
}