package com.sqlcheck.service.impl;

import com.sqlcheck.common.enums.DatabaseType;
import com.sqlcheck.dto.request.UnifiedAIAnalysisRequest;
import com.sqlcheck.dto.response.AIAnalysisResponse;
import com.sqlcheck.dto.response.AIStatementAnalysis;
import com.sqlcheck.entity.CompatibilityRule;
import com.sqlcheck.model.SqlStatement;
import com.sqlcheck.repository.SqlStatementRepository;
import com.sqlcheck.service.SqlBatchProcessorService;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import java.util.*;
import java.util.function.Consumer;
import java.util.function.Function;

/**
 * SQL批处理服务实现
 */
@Service
@RequiredArgsConstructor
@Slf4j
public class SqlBatchProcessorServiceImpl implements SqlBatchProcessorService {

    private final SqlStatementRepository sqlStatementRepository;

    // 批处理配置参数
    @Value("${llm.batching.enabled:true}")
    private boolean batchingEnabled;

    @Value("${llm.batching.max-tokens-per-batch:3000}")
    private int maxTokensPerBatch;

    @Value("${llm.batching.min-statements-for-batching:50}")
    private int minStatementsForBatching;

    @Value("${llm.batching.token-safety-margin:500}")
    private int tokenSafetyMargin;

    // Token估算常量
    private static final double CHINESE_CHARS_PER_TOKEN = 2.0;
    private static final double ENGLISH_CHARS_PER_TOKEN = 0.75;

    @Override
    @Deprecated
    public boolean shouldUseBatchProcessing(String sqlContent) {
        if (!batchingEnabled || sqlContent == null || sqlContent.trim().isEmpty()) {
            return false;
        }

        // 统计SQL语句数量
        List<String> statements = extractSqlStatements(sqlContent);
        int statementCount = statements.size();

        // 估算token数量
        int estimatedTokens = estimateTokenCount(sqlContent);

        log.info("批处理检查 - 语句数量: {}, 估算tokens: {}, 阈值: {} 语句, {} tokens",
                statementCount, estimatedTokens, minStatementsForBatching, maxTokensPerBatch);

        // 如果语句数量或token数量超过阈值，启用批处理
        boolean shouldBatch = statementCount >= minStatementsForBatching ||
                estimatedTokens > maxTokensPerBatch;

        if (shouldBatch) {
            log.info("启用批处理模式 - 原因: {}语句 或 {}tokens 超过阈值",
                    statementCount >= minStatementsForBatching ? statementCount : "未达到",
                    estimatedTokens > maxTokensPerBatch ? estimatedTokens : "未达到");
        }

        return shouldBatch;
    }

    @Override
    @Deprecated
    public List<SqlBatch> splitIntoBatches(String sqlContent, List<CompatibilityRule> rules,
            DatabaseType sourceDbType, DatabaseType targetDbType) {
        log.info("开始分批处理SQL - 源数据库: {}, 目标数据库: {}", sourceDbType, targetDbType);

        List<String> sqlStatements = extractSqlStatements(sqlContent);
        List<SqlBatch> batches = new ArrayList<>();

        if (sqlStatements.isEmpty()) {
            log.warn("未找到有效的SQL语句");
            return batches;
        }

        // 计算基础提示词（规则和上下文）的token数量
        String basePrompt = generateBasePrompt(rules, sourceDbType, targetDbType);
        int basePromptTokens = estimateTokenCount(basePrompt);

        // 计算每批可用的token空间
        int availableTokensPerBatch = maxTokensPerBatch - basePromptTokens - tokenSafetyMargin;

        log.info("批处理配置 - 基础提示词tokens: {}, 每批可用tokens: {}, 总语句数: {}",
                basePromptTokens, availableTokensPerBatch, sqlStatements.size());

        List<String> currentBatch = new ArrayList<>();
        int currentBatchTokens = 0;
        int batchNumber = 1;

        for (int i = 0; i < sqlStatements.size(); i++) {
            String statement = sqlStatements.get(i);
            int statementTokens = estimateTokenCount(formatStatementForBatch(statement, currentBatch.size() + 1));

            log.debug("处理语句 {}/{} - tokens: {}, 累计batch tokens: {}",
                    i + 1, sqlStatements.size(), statementTokens, currentBatchTokens);

            // 检查是否需要开始新批次
            if (!currentBatch.isEmpty() &&
                    (currentBatchTokens + statementTokens > availableTokensPerBatch)) {

                // 创建当前批次
                String batchContent = createBatchContentFromStrings(currentBatch, rules, sourceDbType, targetDbType,
                        batchNumber);
                SqlBatch batch = new SqlBatch(batchNumber, batchContent,
                        basePromptTokens + currentBatchTokens, convertStringsToStatements(currentBatch, batchNumber));
                batches.add(batch);

                log.debug("创建批次 {} - 包含 {} 个语句, {} tokens",
                        batchNumber, currentBatch.size(), batch.getEstimatedTokens());

                // 开始新批次
                currentBatch.clear();
                currentBatchTokens = 0;
                batchNumber++;
            }

            // 添加语句到当前批次
            currentBatch.add(statement);
            currentBatchTokens += statementTokens;
        }

        // 处理最后一个批次
        if (!currentBatch.isEmpty()) {
            String batchContent = createBatchContentFromStrings(currentBatch, rules, sourceDbType, targetDbType,
                    batchNumber);
            SqlBatch batch = new SqlBatch(batchNumber, batchContent,
                    basePromptTokens + currentBatchTokens, convertStringsToStatements(currentBatch, batchNumber));
            batches.add(batch);

            log.debug("创建最终批次 {} - 包含 {} 个语句, {} tokens",
                    batchNumber, currentBatch.size(), batch.getEstimatedTokens());
        }

        log.info("SQL分批完成 - 总共创建 {} 个批次，平均每批次 {} 个语句",
                batches.size(), batches.isEmpty() ? 0.0 : (double) sqlStatements.size() / batches.size());
        return batches;
    }

    @Override
    public List<SqlBatch> splitIntoBatches(List<SqlStatement> sqlStatements, List<CompatibilityRule> rules,
            DatabaseType sourceDbType, DatabaseType targetDbType) {
        log.info("开始分批处理SQL - 源数据库: {}, 目标数据库: {}, 语句数: {}",
                sourceDbType, targetDbType, sqlStatements.size());

        List<SqlBatch> batches = new ArrayList<>();

        if (sqlStatements.isEmpty()) {
            log.warn("未找到有效的SQL语句");
            return batches;
        }

        // 计算基础提示词（规则和上下文）的token数量
        String basePrompt = generateBasePrompt(rules, sourceDbType, targetDbType);
        int basePromptTokens = estimateTokenCount(basePrompt);

        // 计算每批可用的token空间
        int availableTokensPerBatch = maxTokensPerBatch - basePromptTokens - tokenSafetyMargin;

        log.info("批处理配置 - 基础提示词tokens: {}, 每批可用tokens: {}, 总语句数: {}",
                basePromptTokens, availableTokensPerBatch, sqlStatements.size());

        List<SqlStatement> currentBatch = new ArrayList<>();
        int currentBatchTokens = 0;
        int batchNumber = 1;

        for (int i = 0; i < sqlStatements.size(); i++) {
            SqlStatement statement = sqlStatements.get(i);
            int statementTokens = estimateTokenCount(formatStatementForBatch(statement, currentBatch.size() + 1));

            log.debug("处理语句 {}/{} (ID:{}) - tokens: {}, 累计batch tokens: {}",
                    i + 1, sqlStatements.size(), statement.getId(), statementTokens, currentBatchTokens);

            // 检查是否需要开始新批次
            if (!currentBatch.isEmpty() &&
                    (currentBatchTokens + statementTokens > availableTokensPerBatch)) {

                // 创建当前批次
                String batchContent = createBatchContentFromStrings(convertStatementsToStrings(currentBatch), rules,
                        sourceDbType, targetDbType, batchNumber);
                SqlBatch batch = new SqlBatch(batchNumber, batchContent,
                        basePromptTokens + currentBatchTokens, new ArrayList<>(currentBatch));
                batches.add(batch);

                log.debug("创建批次 {} - 包含 {} 个语句, {} tokens",
                        batchNumber, currentBatch.size(), batch.getEstimatedTokens());

                // 开始新批次
                currentBatch.clear();
                currentBatchTokens = 0;
                batchNumber++;
            }

            // 添加语句到当前批次
            currentBatch.add(statement);
            currentBatchTokens += statementTokens;
        }

        // 处理最后一个批次
        if (!currentBatch.isEmpty()) {
            String batchContent = createBatchContentFromStrings(convertStatementsToStrings(currentBatch), rules,
                    sourceDbType, targetDbType, batchNumber);
            SqlBatch batch = new SqlBatch(batchNumber, batchContent,
                    basePromptTokens + currentBatchTokens, new ArrayList<>(currentBatch));
            batches.add(batch);

            log.debug("创建最终批次 {} - 包含 {} 个语句, {} tokens",
                    batchNumber, currentBatch.size(), batch.getEstimatedTokens());
        }

        log.info("SQL分批完成 - 总共创建 {} 个批次，平均每批次 {} 个语句",
                batches.size(), batches.isEmpty() ? 0.0 : (double) sqlStatements.size() / batches.size());
        return batches;
    }

    @Override
    public AIAnalysisResponse analyzeSqlInBatches(List<SqlStatement> sqlStatements, UnifiedAIAnalysisRequest request,
            Consumer<BatchProgress> progressCallback,
            Function<UnifiedAIAnalysisRequest, AIAnalysisResponse> analysisFunction) {
        log.info("开始基于SqlStatement列表的批量分析 - 语句数: {}, 用户ID: {}",
                sqlStatements.size(), request.getUserId());

        try {
            if (sqlStatements.isEmpty()) {
                log.warn("SQL语句列表为空，无法进行批量分析");
                AIAnalysisResponse response = new AIAnalysisResponse();
                response.setSuccess(false);
                response.setErrorMessage("没有提供SQL语句");
                response.setStatementAnalysis(new ArrayList<>());
                return response;
            }

            log.info("使用SqlStatement列表进行批量分析 - 语句数: {}", sqlStatements.size());

            // 分批
            List<SqlBatch> batches = splitIntoBatches(
                    sqlStatements,
                    request.getRules(),
                    request.getSourceDbType(),
                    request.getTargetDbType());

            if (batches.isEmpty()) {
                log.warn("未能创建任何批次，回退到常规分析");
                return analysisFunction.apply(request);
            }

            List<AIAnalysisResponse> batchResults = new ArrayList<>();
            int totalStatements = batches.stream().mapToInt(SqlBatch::getStatementCount).sum();
            int processedStatements = 0;

            log.info("开始执行批量分析 - 总批次: {}, 总语句: {}", batches.size(), totalStatements);

            // 逐批次分析
            for (int i = 0; i < batches.size(); i++) {
                SqlBatch batch = batches.get(i);

                // 更新进度
                if (progressCallback != null) {
                    BatchProgress progress = new BatchProgress(
                            batch.getBatchNumber(),
                            batches.size(),
                            processedStatements,
                            totalStatements,
                            String.format("正在分析批次 %d/%d", batch.getBatchNumber(), batches.size()));
                    progressCallback.accept(progress);
                }

                log.debug("开始分析批次 {}/{} - 包含 {} 个语句, {} tokens",
                        batch.getBatchNumber(), batches.size(), batch.getStatementCount(), batch.getEstimatedTokens());

                // 创建批次请求
                UnifiedAIAnalysisRequest batchRequest = createBatchRequestForStatements(request, batch);

                // 分析当前批次
                long batchStartTime = System.currentTimeMillis();
                AIAnalysisResponse batchResult = analysisFunction.apply(batchRequest);
                long batchDuration = System.currentTimeMillis() - batchStartTime;

                if (batchResult.isSuccess()) {
                    batchResults.add(batchResult);
                    processedStatements += batch.getStatementCount();
                    log.debug("批次 {} 分析成功 - 返回 {} 个语句分析结果, 耗时 {}ms",
                            batch.getBatchNumber(),
                            batchResult.getStatementAnalysis() != null ? batchResult.getStatementAnalysis().size() : 0,
                            batchDuration);
                } else {
                    log.warn("批次 {} 分析失败: {} (耗时 {}ms)",
                            batch.getBatchNumber(), batchResult.getErrorMessage(), batchDuration);
                    // 继续处理其他批次，记录失败的批次
                }
            }

            // 最终进度更新
            if (progressCallback != null) {
                BatchProgress finalProgress = new BatchProgress(
                        batches.size(),
                        batches.size(),
                        processedStatements,
                        totalStatements,
                        "分析完成");
                progressCallback.accept(finalProgress);
            }

            // 合并结果
            AIAnalysisResponse mergedResult = mergeBatchResults(batchResults);

            log.info("基于SqlStatement列表的批量分析完成 - 成功批次: {}/{}, 总语句分析: {}, 成功率: {}%",
                    batchResults.size(), batches.size(),
                    mergedResult.getStatementAnalysis() != null ? mergedResult.getStatementAnalysis().size() : 0,
                    batches.size() > 0 ? Math.round((double) batchResults.size() / batches.size() * 100) : 0);

            return mergedResult;

        } catch (Exception e) {
            log.error("基于SqlStatement列表的批量分析过程中发生异常", e);
            AIAnalysisResponse errorResponse = new AIAnalysisResponse();
            errorResponse.setSuccess(false);
            errorResponse.setErrorMessage("批量分析失败: " + e.getMessage());
            errorResponse.setStatementAnalysis(new ArrayList<>());
            return errorResponse;
        }
    }

    @Override
    public AIAnalysisResponse analyzeSqlInBatches(Long taskId, UnifiedAIAnalysisRequest request,
            Consumer<BatchProgress> progressCallback,
            Function<UnifiedAIAnalysisRequest, AIAnalysisResponse> analysisFunction) {
        log.info("开始基于sql_statement记录的批量分析 - 任务ID: {}, 用户ID: {}", taskId, request.getUserId());

        try {
            // 从数据库加载SQL语句记录
            List<com.sqlcheck.entity.SqlStatement> entityStatements = sqlStatementRepository.selectByTaskId(taskId);
            if (entityStatements.isEmpty()) {
                log.warn("任务 {} 没有找到SQL语句记录，无法进行批量分析", taskId);
                AIAnalysisResponse response = new AIAnalysisResponse();
                response.setSuccess(false);
                response.setErrorMessage("任务中没有找到SQL语句记录");
                response.setStatementAnalysis(new ArrayList<>());
                return response;
            }

            // 转换为model.SqlStatement
            List<SqlStatement> sqlStatements = convertEntityToModelStatements(entityStatements);
            log.info("从数据库加载到 {} 条SQL语句记录", sqlStatements.size());

            // 分批
            List<SqlBatch> batches = splitIntoBatches(
                    sqlStatements,
                    request.getRules(),
                    request.getSourceDbType(),
                    request.getTargetDbType());

            if (batches.isEmpty()) {
                log.warn("未能创建任何批次，回退到常规分析");
                return analysisFunction.apply(request);
            }

            List<AIAnalysisResponse> batchResults = new ArrayList<>();
            int totalStatements = batches.stream().mapToInt(SqlBatch::getStatementCount).sum();
            int processedStatements = 0;

            log.info("开始执行批量分析 - 总批次: {}, 总语句: {}", batches.size(), totalStatements);

            // 逐批次分析
            for (int i = 0; i < batches.size(); i++) {
                SqlBatch batch = batches.get(i);

                // 更新进度
                if (progressCallback != null) {
                    BatchProgress progress = new BatchProgress(
                            batch.getBatchNumber(),
                            batches.size(),
                            processedStatements,
                            totalStatements,
                            String.format("正在分析批次 %d/%d", batch.getBatchNumber(), batches.size()));
                    progressCallback.accept(progress);
                }

                log.debug("开始分析批次 {}/{} - 包含 {} 个语句, {} tokens",
                        batch.getBatchNumber(), batches.size(), batch.getStatementCount(), batch.getEstimatedTokens());

                // 创建批次请求
                UnifiedAIAnalysisRequest batchRequest = createBatchRequest(request, batch);

                // 分析当前批次
                long batchStartTime = System.currentTimeMillis();
                AIAnalysisResponse batchResult = analysisFunction.apply(batchRequest);
                long batchDuration = System.currentTimeMillis() - batchStartTime;

                if (batchResult.isSuccess()) {
                    batchResults.add(batchResult);
                    processedStatements += batch.getStatementCount();
                    log.debug("批次 {} 分析成功 - 返回 {} 个语句分析结果, 耗时 {}ms",
                            batch.getBatchNumber(),
                            batchResult.getStatementAnalysis() != null ? batchResult.getStatementAnalysis().size() : 0,
                            batchDuration);
                } else {
                    log.warn("批次 {} 分析失败: {} (耗时 {}ms)",
                            batch.getBatchNumber(), batchResult.getErrorMessage(), batchDuration);
                    // 继续处理其他批次，记录失败的批次
                }
            }

            // 最终进度更新
            if (progressCallback != null) {
                BatchProgress finalProgress = new BatchProgress(
                        batches.size(),
                        batches.size(),
                        processedStatements,
                        totalStatements,
                        "分析完成");
                progressCallback.accept(finalProgress);
            }

            // 合并结果
            AIAnalysisResponse mergedResult = mergeBatchResults(batchResults);

            log.info("批量分析完成 - 成功批次: {}/{}, 总语句分析: {}, 成功率: {}%",
                    batchResults.size(), batches.size(),
                    mergedResult.getStatementAnalysis() != null ? mergedResult.getStatementAnalysis().size() : 0,
                    batches.size() > 0 ? Math.round((double) batchResults.size() / batches.size() * 100) : 0);

            return mergedResult;

        } catch (Exception e) {
            log.error("基于sql_statement记录的批量分析过程中发生异常", e);
            AIAnalysisResponse errorResponse = new AIAnalysisResponse();
            errorResponse.setSuccess(false);
            errorResponse.setErrorMessage("批量分析失败: " + e.getMessage());
            errorResponse.setStatementAnalysis(new ArrayList<>());
            return errorResponse;
        }
    }

    @Override
    public int estimateTokenCount(String text) {
        if (text == null || text.isEmpty()) {
            return 0;
        }

        int chineseChars = 0;
        int englishChars = 0;

        for (char ch : text.toCharArray()) {
            if (ch >= 0x4e00 && ch <= 0x9fff) { // 中文字符范围
                chineseChars++;
            } else if (Character.isLetterOrDigit(ch) || Character.isWhitespace(ch)) {
                englishChars++;
            }
        }

        int estimatedTokens = (int) Math.ceil(chineseChars / CHINESE_CHARS_PER_TOKEN +
                englishChars / ENGLISH_CHARS_PER_TOKEN);

        return Math.max(1, estimatedTokens);
    }

    @Override
    public AIAnalysisResponse mergeBatchResults(List<AIAnalysisResponse> batchResults) {
        log.debug("开始合并 {} 个批次的分析结果", batchResults.size());

        AIAnalysisResponse mergedResponse = new AIAnalysisResponse();
        List<AIStatementAnalysis> allStatements = new ArrayList<>();

        boolean hasSuccessfulBatch = false;
        StringBuilder errorMessages = new StringBuilder();

        for (int i = 0; i < batchResults.size(); i++) {
            AIAnalysisResponse batchResult = batchResults.get(i);

            if (batchResult.isSuccess() && batchResult.getStatementAnalysis() != null) {
                allStatements.addAll(batchResult.getStatementAnalysis());
                hasSuccessfulBatch = true;
                log.debug("合并批次 {} - 添加 {} 个语句分析",
                        i + 1, batchResult.getStatementAnalysis().size());
            } else {
                String error = batchResult.getErrorMessage() != null ? batchResult.getErrorMessage() : "未知错误";
                errorMessages.append("批次 ").append(i + 1).append(": ").append(error).append("; ");
                log.debug("批次 {} 分析失败: {}", i + 1, error);
            }
        }

        // 设置合并结果
        mergedResponse.setSuccess(hasSuccessfulBatch);
        mergedResponse.setStatementAnalysis(allStatements);

        if (hasSuccessfulBatch) {
            if (errorMessages.length() > 0) {
                mergedResponse.setErrorMessage("部分批次分析失败: " + errorMessages.toString());
                log.debug("批量分析部分成功 - 成功语句: {}, 失败信息: {}",
                        allStatements.size(), errorMessages.toString());
            }
        } else {
            mergedResponse.setErrorMessage("所有批次分析都失败: " + errorMessages.toString());
            log.error("批量分析完全失败 - 所有批次都失败");
        }

        log.debug("分析结果合并完成 - 总语句数: {}, 成功: {}",
                allStatements.size(), mergedResponse.isSuccess());

        return mergedResponse;
    }

    /**
     * 提取SQL语句
     */
    private List<String> extractSqlStatements(String sqlContent) {
        List<String> statements = new ArrayList<>();

        if (sqlContent == null || sqlContent.trim().isEmpty()) {
            return statements;
        }

        // 按分号分割语句
        String[] parts = sqlContent.split(";");
        int validStatements = 0;

        for (String part : parts) {
            String trimmed = part.trim();
            if (!trimmed.isEmpty() &&
                    !trimmed.startsWith("--") &&
                    !trimmed.startsWith("/*")) {
                statements.add(trimmed);
                validStatements++;
            }
        }

        log.debug("从SQL内容中提取到 {} 个有效语句（跳过 {} 个注释/空行）",
                validStatements, parts.length - validStatements);
        return statements;
    }

    /**
     * 生成基础提示词（包含规则和上下文信息）
     */
    private String generateBasePrompt(List<CompatibilityRule> rules, DatabaseType sourceDbType,
            DatabaseType targetDbType) {
        StringBuilder basePrompt = new StringBuilder();

        // 角色和任务描述
        basePrompt.append("# ROLE\n");
        basePrompt.append("你是一名精通数据库迁移的资深DBA，专门负责从 ").append(sourceDbType)
                .append(" 迁移到 ").append(targetDbType).append("。\n\n");

        // 上下文信息
        basePrompt.append("# CONTEXT\n");
        basePrompt.append("- 源数据库: ").append(sourceDbType).append("\n");
        basePrompt.append("- 目标数据库: ").append(targetDbType).append("\n\n");

        // 迁移规则
        if (rules != null && !rules.isEmpty()) {
            basePrompt.append("# MIGRATION RULES\n");
            for (int i = 0; i < rules.size(); i++) {
                CompatibilityRule rule = rules.get(i);
                basePrompt.append("规则 ").append(i + 1).append(": ").append(rule.getRuleName()).append("\n");
                basePrompt.append("- 描述: ").append(rule.getDescription()).append("\n");
                if (rule.getSuggestion() != null) {
                    basePrompt.append("- 建议: ").append(rule.getSuggestion()).append("\n");
                }
                basePrompt.append("\n");
            }
        }

        return basePrompt.toString();
    }

    /**
     * 为批次格式化单个SQL语句对象
     */
    private String formatStatementForBatch(SqlStatement statement, int statementNumber) {
        return String.format("**语句#%d** (ID:%d, Number:%d):\n%s%s\n",
                statementNumber,
                statement.getId(),
                statement.getStatementNumber(),
                statement.getContent(),
                statement.getContent().trim().endsWith(";") ? "" : ";");
    }

    /**
     * 兼容性方法：为批次格式化单个语句文本（用于旧版本）
     */
    @Deprecated
    private String formatStatementForBatch(String statement, int statementNumber) {
        return String.format("**语句#%d**:\n%s%s\n",
                statementNumber,
                statement,
                statement.trim().endsWith(";") ? "" : ";");
    }

    /**
     * 创建批次内容（基于SqlStatement对象）
     */
    private String createBatchContent(List<SqlStatement> statements, List<CompatibilityRule> rules,
            DatabaseType sourceDbType, DatabaseType targetDbType, int batchNumber) {
        StringBuilder batchContent = new StringBuilder();

        // 添加批次标识
        batchContent.append("-- BATCH SQL ANALYSIS REQUEST --\n");
        batchContent.append("-- 批次编号: ").append(batchNumber).append("\n");
        batchContent.append("-- 语句数量: ").append(statements.size()).append("\n\n");

        // 添加基础提示词
        String basePrompt = generateBasePrompt(rules, sourceDbType, targetDbType);
        batchContent.append(basePrompt);

        // 添加SQL语句
        batchContent.append("# INPUT SQL STATEMENTS\n");
        for (int i = 0; i < statements.size(); i++) {
            batchContent.append(formatStatementForBatch(statements.get(i), i + 1));
        }

        // 添加输出格式要求
        batchContent.append("\n# OUTPUT FORMAT\n");
        batchContent.append("请严格按照JSON格式返回分析结果，包含statementAnalysis数组。\n");

        return batchContent.toString();
    }

    /**
     * 创建批次内容（兼容性方法，基于字符串列表）
     */
    @Deprecated
    private String createBatchContentFromStrings(List<String> statements, List<CompatibilityRule> rules,
            DatabaseType sourceDbType, DatabaseType targetDbType, int batchNumber) {
        StringBuilder batchContent = new StringBuilder();

        // 添加批次标识
        batchContent.append("-- BATCH SQL ANALYSIS REQUEST --\n");
        batchContent.append("-- 批次编号: ").append(batchNumber).append("\n");
        batchContent.append("-- 语句数量: ").append(statements.size()).append("\n\n");

        // 添加基础提示词
        String basePrompt = generateBasePrompt(rules, sourceDbType, targetDbType);
        batchContent.append(basePrompt);

        // 添加SQL语句
        batchContent.append("# INPUT SQL STATEMENTS\n");
        for (int i = 0; i < statements.size(); i++) {
            batchContent.append(formatStatementForBatch(statements.get(i), i + 1));
        }

        // 添加输出格式要求
        batchContent.append("\n# OUTPUT FORMAT\n");
        batchContent.append("请严格按照JSON格式返回分析结果，包含statementAnalysis数组。\n");

        return batchContent.toString();
    }

    /**
     * 将字符串列表转换为SqlStatement列表（用于@Deprecated方法）
     */
    @Deprecated
    private List<SqlStatement> convertStringsToStatements(List<String> sqlStrings, int batchNumber) {
        List<SqlStatement> statements = new ArrayList<>();
        for (int i = 0; i < sqlStrings.size(); i++) {
            SqlStatement stmt = SqlStatement.builder()
                    .id((long) (batchNumber * 1000 + i)) // 生成临时ID
                    .statementNumber(i + 1)
                    .content(sqlStrings.get(i))
                    .sqlType(com.sqlcheck.model.SqlStatement.SqlType.QUERY) // 默认类型
                    .sourceType(com.sqlcheck.model.SqlStatement.SourceType.MANUAL_INPUT)
                    .build();
            statements.add(stmt);
        }
        return statements;
    }

    /**
     * 将SqlStatement列表转换为字符串列表（用于@Deprecated方法）
     */
    @Deprecated
    private List<String> convertStatementsToStrings(List<SqlStatement> statements) {
        return statements.stream()
                .map(SqlStatement::getContent)
                .collect(java.util.stream.Collectors.toList());
    }

    /**
     * 转换entity.SqlStatement为model.SqlStatement
     */
    private List<SqlStatement> convertEntityToModelStatements(List<com.sqlcheck.entity.SqlStatement> entityStatements) {
        List<SqlStatement> modelStatements = new ArrayList<>();

        for (com.sqlcheck.entity.SqlStatement entity : entityStatements) {
            SqlStatement model = SqlStatement.builder()
                    .id(entity.getId())
                    .statementNumber(entity.getStatementNumber())
                    .content(entity.getContent())
                    .sqlType(convertEntitySqlTypeToModel(entity.getSqlType()))
                    .sourceType(convertEntitySourceTypeToModel(entity.getSourceType()))
                    .sourceFile(entity.getSourceFile())
                    .lineNumber(entity.getLineNumber())
                    .columnNumber(entity.getColumnNumber())
                    .className(entity.getClassName())
                    .methodName(entity.getMethodName())
                    .primaryTable(entity.getPrimaryTable())
                    .build();
            modelStatements.add(model);
        }

        return modelStatements;
    }

    /**
     * 转换entity.SqlType为model.SqlType
     */
    private com.sqlcheck.model.SqlStatement.SqlType convertEntitySqlTypeToModel(
            com.sqlcheck.entity.SqlStatement.SqlType entityType) {
        if (entityType == null) {
            return com.sqlcheck.model.SqlStatement.SqlType.QUERY;
        }

        try {
            return com.sqlcheck.model.SqlStatement.SqlType.valueOf(entityType.name());
        } catch (IllegalArgumentException e) {
            log.warn("无法转换SqlType: {}, 使用默认值QUERY", entityType.name());
            return com.sqlcheck.model.SqlStatement.SqlType.QUERY;
        }
    }

    /**
     * 转换entity.SourceType为model.SourceType
     */
    private com.sqlcheck.model.SqlStatement.SourceType convertEntitySourceTypeToModel(
            com.sqlcheck.entity.SqlStatement.SourceType entityType) {
        if (entityType == null) {
            return com.sqlcheck.model.SqlStatement.SourceType.MANUAL_INPUT;
        }

        try {
            return com.sqlcheck.model.SqlStatement.SourceType.valueOf(entityType.name());
        } catch (IllegalArgumentException e) {
            log.warn("无法转换SourceType: {}, 使用默认值MANUAL_INPUT", entityType.name());
            return com.sqlcheck.model.SqlStatement.SourceType.MANUAL_INPUT;
        }
    }

    /**
     * 创建批次请求（基于SqlStatement列表）
     */
    private UnifiedAIAnalysisRequest createBatchRequestForStatements(UnifiedAIAnalysisRequest originalRequest,
            SqlBatch batch) {
        UnifiedAIAnalysisRequest batchRequest = new UnifiedAIAnalysisRequest();
        batchRequest.setSqlContent(batch.getBatchContent());
        batchRequest.setSourceDbType(originalRequest.getSourceDbType());
        batchRequest.setTargetDbType(originalRequest.getTargetDbType());
        batchRequest.setUserId(originalRequest.getUserId());
        batchRequest.setRules(originalRequest.getRules());

        return batchRequest;
    }

    /**
     * 创建批次请求
     */
    private UnifiedAIAnalysisRequest createBatchRequest(UnifiedAIAnalysisRequest originalRequest, SqlBatch batch) {
        UnifiedAIAnalysisRequest batchRequest = new UnifiedAIAnalysisRequest();
        batchRequest.setSqlContent(batch.getBatchContent());
        batchRequest.setSourceDbType(originalRequest.getSourceDbType());
        batchRequest.setTargetDbType(originalRequest.getTargetDbType());
        batchRequest.setUserId(originalRequest.getUserId());
        batchRequest.setRules(originalRequest.getRules());

        return batchRequest;
    }
}