# result_compiler.py
# -*- coding: utf-8 -*-

import os
import logging
from typing import Dict, Any, List, Tuple, Set, Optional
from collections import defaultdict
import math # 用于处理可能的 NaN 或 Inf
import time

# 导入项目模块
import config
import utils
from plagiarism_core.core_engine import ( # 导入核心数据结构类型
    ProcessedDocData,
    CoreResultData,
    TaskInfo,
    TextStats,
    AggregateMetrics,
    FragmentInfo
)

from plagiarism_core.core_engine_types import HitDetail

# 导入需要的数据类型或函数
from plagiarism_core.similarity_calculator import MatchTuple
from plagiarism_core.semantic_comparer import SemanticMatchWithPosTuple
from datasketch import MinHash

# 获取日志记录器
logger = logging.getLogger(__name__)

class ResultCompiler:
    """
    编译查重结果，计算聚合指标，并格式化命中详情。
    """

    def __init__(self, corpus_data: Dict[str, ProcessedDocData]):
        """
        初始化结果编译器。
        """
        logger.info("初始化 ResultCompiler...")
        self.corpus_data = corpus_data
        logger.info("ResultCompiler 初始化完成。")

    def compile(self,
                task_id: str,
                input_file_path: str,
                input_data: ProcessedDocData,
                comparison_results: Dict[str, Any],
                recall_stats: Optional[Dict[str, int]] = None # 可选的召回统计
               ) -> CoreResultData:
        """
        执行结果编译的主方法。
        """
        task_id_str = f"[任务 {task_id}]"
        input_filename = os.path.basename(input_file_path)
        logger.info(f"{task_id_str} 开始编译查重结果: {input_filename}")
        compile_start_time = time.time()

        # --- 初始化 CoreResultData 结构 ---
        core_result = self._initialize_core_result_data(task_id, input_filename)

        # --- 获取基础数据 ---
        cleaned_input_text = input_data.get("cleaned_text", "")
        total_chars = input_data.get("stats", {}).get("total_chars", 0)
        paragraphs_with_pos = input_data.get("paragraphs_pos", [])
        input_chunks_with_pos = input_data.get(
            "sentences_pos" if config.SEMANTIC_CHUNK_TYPE == 'sentence' else "paragraphs_pos", []
        )
        input_minhash_sig = input_data.get("minhash_signature") # 获取预计算的 MinHash

        literal_matches: List[MatchTuple] = comparison_results.get('literal_matches', [])
        semantic_matches: List[SemanticMatchWithPosTuple] = comparison_results.get('semantic_matches', [])
        ngram_scores: Dict[str, float] = comparison_results.get('ngram_scores', {})

        # 更新文本统计信息
        core_result["text_stats"]["total_words"] = input_data.get("stats", {}).get("total_words", 0)
        core_result["text_stats"]["total_chars"] = total_chars
        core_result["text_stats"]["total_paragraphs"] = input_data.get("stats", {}).get("total_paragraphs", 0)
        # compared_doc_count 通常在 core_engine 中根据 candidate_ids 确定，这里可以从 recall_stats 推断或留空
        core_result["text_stats"]["compared_doc_count"] = sum(recall_stats.values()) if recall_stats else 0 # 粗略估计

        # --- 开始计算和填充 ---
        metrics_agg = core_result["aggregate_metrics"] # 获取指标字典的引用
        hit_details_by_source = defaultdict(list) # 用于按来源组织 HitDetail
        literal_hit_counter = 0
        semantic_hit_counter = 0

        # --- 1. 计算 MinHash 相关指标 (如果可用且已计算) ---
        self._compile_minhash_metrics(metrics_agg, input_minhash_sig)

        # --- 2. 处理字面匹配 (Literal Matches) ---
        logger.debug(f"{task_id_str} 开始处理 {len(literal_matches)} 个字面匹配...")
        covered_indices, hit_source_ids, chars_per_source = self._process_literal_matches(
            literal_matches, total_chars, cleaned_input_text, hit_details_by_source
        )
        literal_hit_counter = sum(len(hits) for hits in hit_details_by_source.values() if hits[0]['match_type'] == 'literal') # 重新计数
        logger.debug(f"{task_id_str} 字面匹配处理完成，生成了 {literal_hit_counter} 条命中详情。")

        # --- 3. 计算字面匹配相关的聚合指标 ---
        self._compile_literal_metrics(
            metrics_agg, literal_matches, total_chars, covered_indices,
            hit_source_ids, chars_per_source, paragraphs_with_pos
        )

        # --- 4. 处理语义匹配 (Semantic Matches) ---
        logger.debug(f"{task_id_str} 开始处理 {len(semantic_matches)} 个语义匹配...")
        max_semantic_score, max_semantic_source_id, involved_semantic_input_indices = self._process_semantic_matches(
            semantic_matches, input_chunks_with_pos, hit_details_by_source
        )
        semantic_hit_counter = sum(len(hits) for hits in hit_details_by_source.values() if hits[0]['match_type'] == 'semantic') # 重新计数
        logger.debug(f"{task_id_str} 语义匹配处理完成，生成了 {semantic_hit_counter} 条命中详情。")

        # --- 5. 计算语义匹配相关的聚合指标 ---
        self._compile_semantic_metrics(
            metrics_agg, semantic_hit_counter, max_semantic_score,
            max_semantic_source_id, involved_semantic_input_indices
        )

        # --- 6. 处理 N-gram 相关指标 ---
        if config.USE_NGRAM_SIMILARITY:
             self._compile_ngram_metrics(metrics_agg, ngram_scores)

        # --- 7. 最终整理命中详情 ---
        # 按输入片段的起始位置对每个来源的命中列表进行排序
        for source_file in hit_details_by_source:
            hit_details_by_source[source_file].sort(
                key=lambda x: x.get("input_fragment", {}).get("start_char", float('inf'))
            )
        # 转换为普通字典并赋值给 core_result
        core_result["hit_details_by_source"] = dict(hit_details_by_source)

        compile_duration = time.time() - compile_start_time
        logger.info(f"{task_id_str} 结果编译完成。耗时: {compile_duration:.2f} 秒。")
        logger.debug(f"{task_id_str} 最终聚合指标: {metrics_agg}")

        # 返回填充完整的 CoreResultData
        return core_result


    def _initialize_core_result_data(self, task_id: str, input_filename: str) -> CoreResultData:
        """(私有) 初始化 CoreResultData 字典结构并填入基本信息。"""
        # 这个函数可以与 core_engine 中的版本合并或保持独立
        return CoreResultData(
            task_info=TaskInfo(
                task_id=task_id,
                check_time="编译中...", # 稍后更新
                original_filename=input_filename,
                processing_time_seconds=0.0 # 稍后更新
            ),
            text_stats=TextStats(
                total_words=0, total_chars=0, total_paragraphs=0,
                corpus_doc_count=len(self.corpus_data), # 语料库总数
                compared_doc_count=0 # 待更新
            ),
            aggregate_metrics=AggregateMetrics( # 所有指标初始化为 0 或 N/A
                overall_similarity_ratio=0.0, duplicate_chars_count=0,
                max_similarity_ratio_single_source=0.0, max_duplicate_chars_single_source=0,
                max_similarity_source_name="N/A", literal_hit_source_count=0,
                suspected_paragraph_count=0, max_duplicate_chars_in_paragraph=0,
                min_duplicate_chars_in_paragraph=0, front_part_overlap_chars=0,
                rear_part_overlap_chars=0, max_minhash_jaccard=0.0,
                max_minhash_source_name="N/A", max_ngram_score=0.0,
                max_ngram_source_name="N/A", high_ngram_source_count_threshold_0_7=0,
                semantic_hit_block_count=0, semantic_involved_input_block_count=0,
                max_semantic_score=0.0, max_semantic_source_name="N/A"
            ),
            hit_details_by_source={},
            error_message=None
        )


    # === 指标计算与数据处理辅助方法 ===

    def _compile_minhash_metrics(self, metrics_agg: AggregateMetrics, input_minhash_sig: Optional[MinHash]):
        """计算 MinHash 相关指标。"""
        if not config.USE_MINHASH_PRESCREENING or not input_minhash_sig:
             logger.debug("MinHash 预筛选未启用或输入签名无效，跳过 MinHash 指标计算。")
             metrics_agg["max_minhash_jaccard"] = 0.0
             metrics_agg["max_minhash_source_name"] = "N/A"
             return

        logger.debug("开始计算 MinHash 指标...")
        max_jaccard = 0.0
        max_jaccard_source_id = None

        # 遍历语料库的 MinHash 签名 (应从 IndexManager 获取或已预计算在 corpus_data 中)
        corpus_minhashes_local: Dict[str, MinHash] = {}
        # 尝试从 corpus_data 获取预计算的签名
        for doc_id, data in self.corpus_data.items():
             sig = data.get("minhash_signature")
             if sig and isinstance(sig, MinHash): # 确保类型正确
                 corpus_minhashes_local[doc_id] = sig

        if not corpus_minhashes_local:
            logger.warning("未在语料库数据中找到预计算的 MinHash 签名，尝试从其他来源获取（当前未实现）。")

        if not corpus_minhashes_local:
             logger.warning("无法获取语料库 MinHash 签名，跳过 MinHash 指标计算。")
             metrics_agg["max_minhash_jaccard"] = 0.0
             metrics_agg["max_minhash_source_name"] = "N/A"
             return

        # 计算与每个语料库签名的 Jaccard 相似度
        for cid, c_sig in corpus_minhashes_local.items():
            try:
                # Jaccard 相似度计算可能很耗时，特别是签名数量多时
                j_sim = input_minhash_sig.jaccard(c_sig)
                if not math.isfinite(j_sim): # 处理 NaN 或 Inf
                     logger.warning(f"计算与文档 '{cid}' 的 Jaccard 相似度得到无效值: {j_sim}，已忽略。")
                     continue
                if j_sim > max_jaccard:
                    max_jaccard = j_sim
                    max_jaccard_source_id = cid
            except Exception as e:
                # datasketch 内部可能有错误
                logger.warning(f"计算 MinHash Jaccard 相似度时出错 (与文档 '{cid}'): {e}")

        metrics_agg["max_minhash_jaccard"] = round(max_jaccard, 4)
        metrics_agg["max_minhash_source_name"] = os.path.basename(max_jaccard_source_id) if max_jaccard_source_id else "N/A"
        logger.debug(f"MinHash 指标计算完成。最高 Jaccard: {metrics_agg['max_minhash_jaccard']:.4f} (来源: {metrics_agg['max_minhash_source_name']})")


    def _process_literal_matches(self,
                                 literal_matches: List[MatchTuple],
                                 total_chars: int,
                                 cleaned_input_text: str,
                                 hit_details_by_source: defaultdict # 直接修改传入的字典
                                ) -> Tuple[Set[int], Set[str], Dict[str, Set[int]]]:
        """处理字面匹配列表，计算覆盖索引、来源ID，并生成 HitDetail。"""
        covered_indices = set()
        hit_source_ids = set()
        chars_per_source = defaultdict(set) # {corpus_id: {覆盖的字符索引}}
        literal_hit_counter = 0
        min_match_len = config.MIN_MATCH_LENGTH

        if not literal_matches:
             return covered_indices, hit_source_ids, chars_per_source

        for input_start, source_start, length, corpus_id in literal_matches:
            # 基本验证
            if length < min_match_len: continue # 理论上不应出现，因为比较器已过滤
            if corpus_id not in self.corpus_data:
                 logger.warning(f"字面匹配结果包含未知的语料库 ID '{corpus_id}'，已跳过。")
                 continue

            # 计算覆盖索引
            match_indices = set(range(max(0, input_start), min(total_chars, input_start + length)))
            covered_indices.update(match_indices)
            hit_source_ids.add(corpus_id)
            chars_per_source[corpus_id].update(match_indices)

            # --- 创建 HitDetail ---
            source_basename = os.path.basename(corpus_id) # 用于按文件名分组
            literal_hit_counter += 1
            hit_id = f"LIT_{literal_hit_counter:04d}"

            # 提取输入片段信息
            input_frag_text, input_len = self._safe_extract_fragment(cleaned_input_text, input_start, input_start + length)
            input_frag_info: FragmentInfo = {
                "text": input_frag_text,
                "start_char": input_start,
                "end_char": input_start + length,
                "length": input_len
            }

            # 提取来源片段信息
            source_frag_text, source_actual_start, source_actual_end, source_len = self._safe_extract_source_fragment(
                corpus_id, source_start, source_start + length
            )
            source_frag_info: FragmentInfo = {
                "text": source_frag_text,
                "start_char": source_actual_start,
                "end_char": source_actual_end,
                "length": source_len
            }

            # 组装 HitDetail
            hit_detail: HitDetail = {
                "hit_id": hit_id,
                "source_file": source_basename,
                "match_type": "literal",
                "similarity_score": 1.0, # 字面匹配相似度为 1.0
                "input_fragment": input_frag_info,
                "source_fragment": source_frag_info
            }
            # 添加到按来源组织的字典中
            hit_details_by_source[source_basename].append(hit_detail)

        return covered_indices, hit_source_ids, dict(chars_per_source) # 返回普通字典

    def _compile_literal_metrics(self,
                                metrics_agg: AggregateMetrics,
                                literal_matches: List[MatchTuple], # 原始匹配用于某些计算
                                total_chars: int,
                                covered_indices: Set[int],
                                hit_source_ids: Set[str],
                                chars_per_source: Dict[str, Set[int]],
                                paragraphs_with_pos: List[Tuple[str, int, int]]):
        """计算所有与字面匹配相关的聚合指标。"""
        logger.debug("开始计算字面匹配聚合指标...")

        # --- 总复制比和重复字数 ---
        duplicate_chars_count = len(covered_indices)
        metrics_agg["duplicate_chars_count"] = duplicate_chars_count
        if total_chars > 0:
            metrics_agg["overall_similarity_ratio"] = round((duplicate_chars_count / total_chars) * 100, 2)
        else:
            metrics_agg["overall_similarity_ratio"] = 0.0
        metrics_agg["literal_hit_source_count"] = len(hit_source_ids)

        # --- 单篇最大复制比和重复字数 ---
        max_single_chars = 0
        max_single_source_id = None
        if chars_per_source:
            source_char_counts = {cid: len(indices) for cid, indices in chars_per_source.items()}
            if source_char_counts: # 确保字典非空
                # 找到重复字符数最多的来源 ID
                max_single_source_id = max(source_char_counts, key=source_char_counts.get)
                max_single_chars = source_char_counts[max_single_source_id]

        metrics_agg["max_duplicate_chars_single_source"] = max_single_chars
        if total_chars > 0:
            metrics_agg["max_similarity_ratio_single_source"] = round((max_single_chars / total_chars) * 100, 2)
        else:
             metrics_agg["max_similarity_ratio_single_source"] = 0.0
        metrics_agg["max_similarity_source_name"] = os.path.basename(max_single_source_id) if max_single_source_id else "N/A"

        # --- 段落相关指标 ---
        suspected_para_indices, para_counts = self._map_indices_to_paragraphs(covered_indices, paragraphs_with_pos)
        metrics_agg["suspected_paragraph_count"] = len(suspected_para_indices)
        if para_counts:
            counts = list(para_counts.values())
            metrics_agg["max_duplicate_chars_in_paragraph"] = max(counts) if counts else 0
            metrics_agg["min_duplicate_chars_in_paragraph"] = min(counts) if counts else 0
        else:
            metrics_agg["max_duplicate_chars_in_paragraph"] = 0
            metrics_agg["min_duplicate_chars_in_paragraph"] = 0

        # --- 前后部重叠字数 ---
        if covered_indices and total_chars > 0:
            try:
                # 使用配置中的字符数定义前后部
                front_limit = min(config.FRONT_PART_CHARS, total_chars)
                rear_start = max(0, total_chars - config.REAR_PART_CHARS)

                front_overlap = sum(1 for i in covered_indices if 0 <= i < front_limit)
                rear_overlap = sum(1 for i in covered_indices if rear_start <= i < total_chars)

                metrics_agg["front_part_overlap_chars"] = front_overlap
                metrics_agg["rear_part_overlap_chars"] = rear_overlap
            except Exception as e:
                logger.error(f"计算前后部重叠字符数时出错: {e}", exc_info=True)
                metrics_agg["front_part_overlap_chars"] = 0
                metrics_agg["rear_part_overlap_chars"] = 0
        else:
            metrics_agg["front_part_overlap_chars"] = 0
            metrics_agg["rear_part_overlap_chars"] = 0

        logger.debug(f"字面匹配指标计算完成。总重复字符: {duplicate_chars_count}, 总复制比: {metrics_agg['overall_similarity_ratio']:.2f}%")


    def _process_semantic_matches(self,
                                  semantic_matches: List[SemanticMatchWithPosTuple],
                                  input_chunks_with_pos: List[Tuple[str, int, int]],
                                  hit_details_by_source: defaultdict # 直接修改
                                 ) -> Tuple[float, Optional[str], Set[int]]:
        """处理语义匹配列表，计算指标，并生成 HitDetail。"""
        involved_semantic_input_indices = set() # 存储命中的输入块的索引
        max_semantic_score = 0.0
        max_semantic_source_id = None
        semantic_hit_counter = 0

        if not semantic_matches:
            return max_semantic_score, max_semantic_source_id, involved_semantic_input_indices

        for match_data in semantic_matches:
            try:
                # 解包元组: (input_chunk_idx, corpus_id, src_chunk_idx_in_doc, score, src_start_char, src_end_char)
                in_idx, src_id, src_chunk_idx, score, src_start, src_end = match_data

                # 检查来源 ID 是否有效
                if src_id not in self.corpus_data:
                    logger.warning(f"语义匹配结果包含未知的语料库 ID '{src_id}'，已跳过。")
                    continue

                # 记录涉及的输入块索引
                involved_semantic_input_indices.add(in_idx)

                # --- 获取输入块信息 ---
                if not (0 <= in_idx < len(input_chunks_with_pos)):
                     logger.error(f"语义匹配结果包含无效的输入块索引: {in_idx} (总输入块数: {len(input_chunks_with_pos)})")
                     continue # 跳过此无效匹配
                input_chunk_tuple = input_chunks_with_pos[in_idx]
                input_frag_text, in_start_char, in_end_char = input_chunk_tuple
                input_len = len(input_frag_text) # 使用实际文本长度

                # --- 获取来源块信息 ---
                if src_start < 0 or src_end < 0 or src_start >= src_end:
                    logger.warning(f"语义匹配结果包含无效的来源位置: start={src_start}, end={src_end} (来源: {src_id})，已跳过。")
                    continue
                source_frag_text, source_actual_start, source_actual_end, source_len = self._safe_extract_source_fragment(
                    src_id, src_start, src_end
                )
                # 检查来源片段是否成功提取
                if source_frag_text is None:
                     logger.warning(f"无法提取来源片段 (来源: {src_id}, 位置: {src_start}-{src_end})，跳过此语义匹配。")
                     continue

                # --- 更新最高分 ---
                if score > max_semantic_score:
                    # 检查得分是否有效（防止 NaN 或 Inf）
                    if math.isfinite(score):
                         max_semantic_score = score
                         max_semantic_source_id = src_id
                    else:
                         logger.warning(f"语义匹配得分无效: {score} (来源: {src_id})，已忽略。")


                # --- 创建 HitDetail ---
                semantic_hit_counter += 1
                hit_id = f"SEM_{semantic_hit_counter:04d}"
                source_basename = os.path.basename(src_id)

                input_frag_info: FragmentInfo = {
                    "text": input_frag_text, "start_char": in_start_char, "end_char": in_end_char, "length": input_len}
                source_frag_info: FragmentInfo = {
                    "text": source_frag_text, "start_char": source_actual_start, "end_char": source_actual_end, "length": source_len}

                hit_detail: HitDetail = {
                    "hit_id": hit_id,
                    "source_file": source_basename,
                    "match_type": "semantic",
                    "similarity_score": round(score, 4) if math.isfinite(score) else 0.0, # 四舍五入，处理无效值
                    "input_fragment": input_frag_info,
                    "source_fragment": source_frag_info
                }
                hit_details_by_source[source_basename].append(hit_detail)

            except Exception as se:
                # 提供更具体的错误上下文
                hit_info_str = f"输入块索引: {match_data[0]}, 来源ID: {match_data[1]}" if isinstance(match_data, tuple) and len(match_data) > 1 else "未知匹配数据"
                logger.error(f"处理语义命中详情时发生错误 ({hit_info_str}): {se}", exc_info=True)

        return max_semantic_score, max_semantic_source_id, involved_semantic_input_indices

    def _compile_semantic_metrics(self,
                                 metrics_agg: AggregateMetrics,
                                 semantic_hit_counter: int,
                                 max_semantic_score: float,
                                 max_semantic_source_id: Optional[str],
                                 involved_semantic_input_indices: Set[int]):
        """计算所有与语义匹配相关的聚合指标。"""
        logger.debug("开始计算语义匹配聚合指标...")
        metrics_agg["semantic_hit_block_count"] = semantic_hit_counter
        metrics_agg["max_semantic_score"] = round(max_semantic_score, 4)
        metrics_agg["max_semantic_source_name"] = os.path.basename(max_semantic_source_id) if max_semantic_source_id else "N/A"
        metrics_agg["semantic_involved_input_block_count"] = len(involved_semantic_input_indices)
        logger.debug(f"语义匹配指标计算完成。命中块数: {semantic_hit_counter}, 最高分: {max_semantic_score:.4f}")

    def _compile_ngram_metrics(self,
                              metrics_agg: AggregateMetrics,
                              ngram_scores: Dict[str, float]):
        """计算 N-gram 相关指标。"""
        if not ngram_scores:
             logger.debug("N-gram 得分为空，跳过 N-gram 指标计算。")
             metrics_agg["max_ngram_score"] = 0.0
             metrics_agg["max_ngram_source_name"] = "N/A"
             metrics_agg["high_ngram_source_count_threshold_0_7"] = 0
             return

        logger.debug("开始计算 N-gram 指标...")
        max_ngram = 0.0
        max_ngram_source_id = None
        high_ngram_count = 0
        # 从配置获取高相似度阈值
        ngram_thresh = getattr(config, 'NGRAM_HIGH_SIMILARITY_THRESHOLD', 0.7)

        for doc_id, score in ngram_scores.items():
            # 检查得分是否有效
            if not math.isfinite(score):
                 logger.warning(f"N-gram 得分无效: {score} (文档: {doc_id})，已忽略。")
                 continue
            # 确保 doc_id 存在于语料库中 (理论上 DetailedComparer 已处理)
            if doc_id not in self.corpus_data:
                logger.warning(f"N-gram 得分结果包含未知的语料库 ID '{doc_id}'，已跳过。")
                continue

            if score > max_ngram:
                max_ngram = score
                max_ngram_source_id = doc_id
            if score >= ngram_thresh:
                high_ngram_count += 1

        metrics_agg["max_ngram_score"] = round(max_ngram, 4)
        metrics_agg["max_ngram_source_name"] = os.path.basename(max_ngram_source_id) if max_ngram_source_id else "N/A"
        metrics_agg["high_ngram_source_count_threshold_0_7"] = high_ngram_count
        logger.debug(f"N-gram 指标计算完成。最高分: {max_ngram:.4f}, 高分(>={ngram_thresh:.2f})来源数: {high_ngram_count}")


    # === 文本片段提取和位置映射辅助方法 ===

    def _calculate_coverage(self, literal_matches: List[MatchTuple], text_length: int) -> Set[int]:
        """计算字面匹配覆盖的输入文本唯一字符索引集合。"""
        covered_indices = set()
        if not literal_matches or text_length <= 0:
            return covered_indices
        min_match_len = config.MIN_MATCH_LENGTH
        for i, _, n, _ in literal_matches:
            if n >= min_match_len:
                # 计算有效索引范围 [start, end)
                start_index = max(0, i)
                end_index = min(text_length, i + n)
                # 使用 update 高效添加范围内的所有索引
                if end_index > start_index: # 确保范围有效
                     covered_indices.update(range(start_index, end_index))
        return covered_indices

    def _map_indices_to_paragraphs(self,
                                   covered_indices: Set[int],
                                   paragraphs_with_pos: List[Tuple[str, int, int]]
                                   ) -> Tuple[Set[int], Dict[int, int]]:
        """
        将字符索引映射到段落索引，并计算每个疑似段落内的重复字符数。
        """
        suspected_para_indices = set() # 存储包含重复字符的段落的索引
        para_duplicate_counts = defaultdict(int) # {paragraph_index: count}

        if not paragraphs_with_pos or not covered_indices:
            return suspected_para_indices, dict(para_duplicate_counts)

        logger.debug(f"开始将 {len(covered_indices)} 个覆盖字符索引映射到 {len(paragraphs_with_pos)} 个段落...")
        map_start_time = time.time()

        # --- 优化：构建段落范围查找结构 (如果段落很多) ---
        # 可以创建一个列表 [(start, end, index), ...] 并排序，然后使用二分查找
        para_ranges = sorted([(start, end, idx) for idx, (_, start, end) in enumerate(paragraphs_with_pos)])

        # 遍历覆盖的索引
        not_found_count = 0
        for char_idx in covered_indices:
            found_para = False

            # --- 简单遍历查找 (适用于段落不多的情况) ---
            if not found_para:
                for para_idx, (_, para_start, para_end) in enumerate(paragraphs_with_pos):
                    if para_start <= char_idx < para_end: # 字符索引在段落范围内
                        suspected_para_indices.add(para_idx)
                        para_duplicate_counts[para_idx] += 1
                        found_para = True
                        break # 一个字符只属于一个段落
            if not found_para:
                 not_found_count += 1
                 # logger.warning(f"字符索引 {char_idx} 未找到对应的段落。")

        if not_found_count > 0:
             logger.warning(f"有 {not_found_count} 个覆盖字符索引未找到对应的段落（可能由段落过滤或索引错误引起）。")

        map_duration = time.time() - map_start_time
        logger.debug(f"索引到段落映射完成，找到 {len(suspected_para_indices)} 个疑似段落。耗时: {map_duration:.4f} 秒。")
        return suspected_para_indices, dict(para_duplicate_counts)


    def _safe_extract_fragment(self, text: Optional[str], start: int, end: int) -> Tuple[Optional[str], int]:
        """
        安全地从给定文本中提取指定起止位置的片段。
        """
        fragment_text: Optional[str] = None
        fragment_len: int = 0
        if text is None:
            # logger.warning(f"试图从 None 文本中提取片段 ({start}-{end})")
            return None, 0

        text_len = len(text)
        # 检查索引有效性
        if start < 0 or end < start or start >= text_len:
            # logger.warning(f"提取片段时索引无效或越界: start={start}, end={end}, text_len={text_len}")
            return None, 0

        # 调整 end 以确保不越界
        safe_end = min(end, text_len)
        try:
            fragment_text = text[start:safe_end]
            fragment_len = len(fragment_text) # 使用实际提取的长度
        except Exception as e:
            logger.warning(f"安全提取片段 ({start}-{safe_end}) 时发生未知错误: {e}")
            return None, 0

        return fragment_text, fragment_len

    def _safe_extract_source_fragment(self, corpus_id: str, start: int, end: int) -> Tuple[Optional[str], int, int, int]:
        """
        根据语料库文档 ID 和起止位置，安全地提取来源片段文本。
        """
        fragment_text: Optional[str] = None
        actual_start, actual_end, length = -1, -1, 0

        # 从 self.corpus_data 获取预处理好的语料数据
        source_data = self.corpus_data.get(corpus_id)
        if source_data:
            source_text = source_data.get("cleaned_text")
            if source_text:
                 fragment_text, length = self._safe_extract_fragment(source_text, start, end)
                 if fragment_text is not None:
                      actual_start = start
                      actual_end = start + length # 结束位置是 start + 实际长度
                 # else: (日志已在 _safe_extract_fragment 中记录)
            else:
                 logger.warning(f"提取来源片段时，语料库文档 '{corpus_id}' 的清理后文本为空。")
        else:
            logger.warning(f"提取来源片段时未找到语料库文档数据，ID: '{corpus_id}'")

        return fragment_text, actual_start, actual_end, length
