# core_engine.py
# -*- coding: utf-8 -*-

import math
import os
import datetime
import logging
import time
import json
from typing import Dict, Any, Optional

import numpy as np

# 导入项目基础模块
import config
import utils
import plagiarism_core.text_extractor as text_extractor
import plagiarism_core.preprocessor as preprocessor

# 导入核心数据结构类型定义
from plagiarism_core.core_engine_types import (
    ProcessedDocData,
    CoreResultData,
    TaskInfo,
    TextStats,
    AggregateMetrics
)

from plagiarism_core.core_engine_types import FragmentInfo, HitDetail

# 导入重构后的功能模块
from plagiarism_core.index_manager import IndexManager
from plagiarism_core.candidate_generator import CandidateGenerator
from plagiarism_core.detailed_comparer import DetailedComparer
from plagiarism_core.result_compiler import ResultCompiler
from plagiarism_core.semantic_comparer import SemanticComparer

# 获取日志记录器
logger = logging.getLogger(__name__)


class PlagiarismChecker:

    def __init__(self):
        """
        初始化查重引擎。
        """
        logger.info("正在初始化查重引擎 PlagiarismChecker (重构版)...")
        self.start_init_time = time.time()
        self.current_task_id: Optional[str] = None  # 初始化当前任务 ID

        # --- 1. 加载和预处理语料库 ---
        self.corpus_data: Dict[str,
                               ProcessedDocData] = self._load_and_preprocess_corpus()
        if not self.corpus_data:
            logger.critical("语料库加载失败或为空，查重引擎无法正常工作！")
            raise RuntimeError("无法加载有效的语料库数据。")

        # --- 2. 初始化 SemanticComparer (如果配置需要) ---
        # SemanticComparer 可能被 IndexManager (构建语义索引) 和 DetailedComparer (语义比较) 需要
        self.semantic_comparer_instance: Optional[SemanticComparer] = None
        if config.USE_SEMANTIC_SIMILARITY or config.USE_SEMANTIC_INDEX:
            logger.info("正在初始化 SemanticComparer...")
            try:
                self.semantic_comparer_instance = SemanticComparer()
                if not self.semantic_comparer_instance.enabled:
                    logger.warning("SemanticComparer 初始化失败或被禁用。语义相关功能将受限。")
                    self.semantic_comparer_instance = None  # 确保禁用状态
                else:
                    logger.info("SemanticComparer 初始化成功。")
            except Exception as e:
                logger.exception(f"初始化 SemanticComparer 时发生严重错误: {e}")
                self.semantic_comparer_instance = None

        # --- 3. 初始化索引管理器 ---
        logger.info("正在初始化 IndexManager...")
        try:
            self.index_manager = IndexManager(self.corpus_data)
            # --- 可选：如果语义索引未加载且需要构建，在此处触发构建 ---
            if config.USE_SEMANTIC_INDEX and not self.index_manager.semantic_index_enabled:
                if self.semantic_comparer_instance:
                    logger.info("尝试构建语义索引...")
                    build_success = self.index_manager.build_semantic_index(
                        self.semantic_comparer_instance.model)
                    if build_success:
                        logger.info("语义索引构建成功。")
                    else:
                        logger.error("语义索引构建失败。语义索引功能将不可用。")
                else:
                    logger.warning("无法构建语义索引：SemanticComparer 不可用。")

            logger.info(f"IndexManager 初始化完成。状态: "
                        f"MinHash({'启用' if self.index_manager.minhash_enabled else '禁用'}), "
                        f"倒排({'启用' if self.index_manager.inverted_index_enabled else '禁用'}), "
                        f"语义索引({'启用' if self.index_manager.semantic_index_enabled else '禁用'})")
        except Exception as e:
            logger.exception(f"初始化 IndexManager 时发生严重错误: {e}")
            raise RuntimeError("索引管理器初始化失败，无法继续。") from e

        # --- 4. 初始化其他核心组件 ---
        logger.info(
            "正在初始化其他核心组件 (CandidateGenerator, DetailedComparer, ResultCompiler)...")
        try:
            # CandidateGenerator 依赖 IndexManager
            self.candidate_generator = CandidateGenerator(self.index_manager)
            # DetailedComparer 依赖语料库数据和 SemanticComparer 实例
            self.detailed_comparer = DetailedComparer(
                self.corpus_data, self.semantic_comparer_instance)
            # ResultCompiler 依赖语料库数据
            self.result_compiler = ResultCompiler(self.corpus_data)
            logger.info(
                "核心组件 (CandidateGenerator, DetailedComparer, ResultCompiler) 初始化完成。")
        except Exception as e:
            logger.exception(f"初始化核心组件时发生严重错误: {e}")
            raise RuntimeError("核心组件初始化失败，无法继续。") from e

        init_duration = time.time() - self.start_init_time
        logger.info(f"查重引擎 PlagiarismChecker 初始化完成，总耗时 {init_duration:.2f} 秒。")

    def _get_structured_cache_path(self, original_file_path: str) -> str:
        """
        根据原始文件路径生成结构化的缓存文件路径 (JSON 格式)。
        """
        # 逻辑与之前相同，确保缓存目录存在并生成安全的文件名
        if not os.path.exists(config.CACHE_DIR):
            try:
                os.makedirs(config.CACHE_DIR, exist_ok=True)
            except OSError as e:
                logger.error(
                    f"创建缓存目录失败: {config.CACHE_DIR}, 错误: {e}", exc_info=True)
                raise
        try:
            relative_path = os.path.relpath(
                original_file_path, config.BASE_DIR)
        except ValueError:
            import hashlib
            path_hash = hashlib.md5(
                original_file_path.encode('utf-8')).hexdigest()
            cache_base_name = f"abs_{path_hash}"
            logger.warning(
                f"无法获取文件的相对路径，将使用哈希作为缓存文件名基础: {original_file_path} -> {cache_base_name}")
        else:
            dir_part, filename = os.path.split(relative_path)
            name_part, _ = os.path.splitext(filename)
            cache_filename_parts = [part.replace(
                os.sep, '_') for part in dir_part.split(os.sep) if part]
            cache_filename_parts.append(name_part)
            cache_base_name = "_".join(cache_filename_parts)
        safe_base_name = "".join(c if c.isalnum() or c in [
                                 '_', '-'] else '_' for c in cache_base_name)
        max_len = 100
        if len(safe_base_name) > max_len:
            import hashlib
            name_hash = hashlib.md5(
                safe_base_name.encode('utf-8')).hexdigest()[:8]
            safe_base_name = f"{safe_base_name[:max_len-9]}_{name_hash}"
        cache_file_name = f"{safe_base_name}_cache.json"
        return os.path.join(config.CACHE_DIR, cache_file_name)

    def _load_or_process_document(self, file_path: str, is_corpus: bool = False) -> Optional[ProcessedDocData]:
        """
        加载或处理单个文档（输入文档或语料库文档）。
        """
        doc_type = "语料库文档" if is_corpus else "输入文档"
        task_id_str = f"[任务 {self.current_task_id or '初始化'}]"
        logger.debug(
            f"{task_id_str} 开始加载/处理 {doc_type}: {os.path.basename(file_path)}")
        cache_path = self._get_structured_cache_path(file_path)
        use_cache = False
        cached_data: Optional[Dict] = None  # 先用 Dict，后面转 ProcessedDocData

        # 1. 检查缓存
        if os.path.exists(cache_path) and os.path.exists(file_path):
            try:
                logger.debug(f"{task_id_str} 发现缓存文件: {cache_path}")
                with open(cache_path, 'r', encoding=config.DEFAULT_ENCODING) as f_cache:
                    cached_data = json.load(f_cache)

                original_mtime = os.path.getmtime(file_path)
                cached_mtime = cached_data.get("source_file_mtime")
                cached_config = cached_data.get("config_snapshot", {})

                if cached_mtime and cached_mtime >= original_mtime:
                    critical_config_keys = [  # 需要与生成缓存时一致
                        'MIN_SEMANTIC_CHUNK_LENGTH', 'MAX_SEMANTIC_CHUNK_LENGTH', 'LTP_MODEL_NAME'
                    ]
                    config_match = True
                    for key in critical_config_keys:
                        current_value = getattr(config, key, None)
                        if cached_config.get(key) != current_value:
                            config_match = False
                            logger.info(
                                f"{task_id_str} 缓存失效 ({os.path.basename(file_path)}): 配置项 '{key}' 不匹配。")
                            break
                    if config_match:
                        required_keys = [
                            "cleaned_text", "paragraphs_pos", "sentences_pos", "stats"]
                        if all(key in cached_data for key in required_keys):
                            use_cache = True
                            logger.info(
                                f"{task_id_str} 使用有效的缓存文件: {os.path.basename(file_path)}")
                        else:
                            logger.info(f"{task_id_str} 缓存文件缺少必需键，将重新处理。")
                else:
                    logger.info(f"{task_id_str} 缓存失效: 文件已被修改。")
            except Exception as e:
                logger.warning(
                    f"验证缓存文件 {cache_path} 时出错: {e}。将重新处理。", exc_info=True)

        # 2. 如果缓存有效，返回数据
        if use_cache and cached_data:
            try:
                # 转换回 ProcessedDocData 结构 (特别是元组)
                # 注意：缓存中不包含预计算的签名和嵌入
                processed_doc = ProcessedDocData(
                    source_file_path=cached_data["source_file_path"],
                    source_file_mtime=cached_data["source_file_mtime"],
                    config_snapshot=cached_data["config_snapshot"],
                    cleaned_text=cached_data["cleaned_text"],
                    paragraphs_pos=[tuple(p) for p in cached_data.get(
                        "paragraphs_pos", [])],
                    sentences_pos=[tuple(s) for s in cached_data.get(
                        "sentences_pos", [])],
                    stats=cached_data["stats"],
                    minhash_signature=None,  # 从缓存加载时不包含
                    chunk_embeddings=None,  # 从缓存加载时不包含
                    chunk_type_used_for_embedding=None
                )
                return processed_doc
            except Exception as convert_err:
                logger.error(
                    f"转换缓存数据格式时出错 ({cache_path}): {convert_err}。将重新处理。")
                use_cache = False

        # 3. 缓存无效，执行完整处理
        logger.info(
            f"{task_id_str} 处理 {doc_type} (无有效缓存): {os.path.basename(file_path)}")
        try:
            raw_text = text_extractor.extract_text(file_path)
            if raw_text is None:
                raise ValueError("无法提取文本内容。")

            cleaned_text, paragraphs_pos, sentences_pos, total_words, total_paragraphs_count = preprocessor.preprocess_text(
                raw_text)

            # 处理完全空文本的情况
            if not cleaned_text and total_words == 0:
                logger.warning(
                    f"{task_id_str} 文件 '{os.path.basename(file_path)}' 处理后内容为空。")
                # 返回表示空文档的结构
                current_mtime = os.path.getmtime(
                    file_path) if os.path.exists(file_path) else time.time()
                critical_keys_snap = {key: getattr(config, key, None) for key in [
                    'MIN_SEMANTIC_CHUNK_LENGTH', 'MAX_SEMANTIC_CHUNK_LENGTH', 'LTP_MODEL_NAME']}
                return ProcessedDocData(
                    source_file_path=file_path, source_file_mtime=current_mtime,
                    config_snapshot=critical_keys_snap, cleaned_text="",
                    paragraphs_pos=[], sentences_pos=[],
                    stats={"total_words": 0,
                           "total_paragraphs": 0, "total_chars": 0},
                    minhash_signature=None, chunk_embeddings=None, chunk_type_used_for_embedding=None
                )

            # 构建 ProcessedDocData
            current_mtime = os.path.getmtime(file_path)
            critical_keys_snap = {key: getattr(config, key, None) for key in [
                'MIN_SEMANTIC_CHUNK_LENGTH', 'MAX_SEMANTIC_CHUNK_LENGTH', 'LTP_MODEL_NAME']}
            processed_data = ProcessedDocData(
                source_file_path=file_path, source_file_mtime=current_mtime,
                config_snapshot=critical_keys_snap, cleaned_text=cleaned_text,
                paragraphs_pos=paragraphs_pos, sentences_pos=sentences_pos,
                stats={"total_words": total_words, "total_paragraphs":
                       total_paragraphs_count, "total_chars": len(cleaned_text)},
                minhash_signature=None, chunk_embeddings=None, chunk_type_used_for_embedding=None  # 初始为 None
            )

            # 写入缓存 (不包含预计算的签名和嵌入)
            try:
                data_to_save = processed_data.copy()
                data_to_save.pop("minhash_signature", None)
                data_to_save.pop("chunk_embeddings", None)
                data_to_save.pop("chunk_type_used_for_embedding", None)
                data_to_save["paragraphs_pos"] = [
                    list(p) for p in paragraphs_pos]  # type: ignore
                data_to_save["sentences_pos"] = [
                    list(s) for s in sentences_pos]  # type: ignore

                os.makedirs(os.path.dirname(cache_path), exist_ok=True)
                with open(cache_path, 'w', encoding=config.DEFAULT_ENCODING) as f_cache:
                    json.dump(data_to_save, f_cache,
                              ensure_ascii=False, indent=4)
                logger.info(f"{task_id_str} 处理后的数据已缓存到: {cache_path}")
            except Exception as cache_write_err:
                logger.error(
                    f"写入缓存文件 {cache_path} 失败: {cache_write_err}", exc_info=True)

            return processed_data

        except Exception as proc_err:
            logger.error(
                f"{task_id_str} 处理 {doc_type} 文件 {os.path.basename(file_path)} 时发生严重错误: {proc_err}", exc_info=True)
            return None

    def _load_and_preprocess_corpus(self) -> Dict[str, ProcessedDocData]:
        """加载并预处理整个语料库 (调用 _load_or_process_document)。"""
        corpus_data: Dict[str, ProcessedDocData] = {}
        corpus_files = utils.get_corpus_files()
        if not corpus_files:
            logger.warning("语料库目录为空或无法访问，将使用空语料库。")
            return corpus_data
        logger.info(f"开始加载和预处理语料库 (共 {len(corpus_files)} 个文件)...")
        loaded_count = 0
        error_count = 0
        original_task_id = self.current_task_id
        self.current_task_id = "语料库加载"
        for i, file_path in enumerate(corpus_files):
            corpus_id = os.path.basename(file_path)
            try:
                processed_doc = self._load_or_process_document(
                    file_path, is_corpus=True)
                if processed_doc:
                    corpus_data[corpus_id] = processed_doc
                    loaded_count += 1
                else:
                    error_count += 1
            except Exception as e:
                logger.exception(f"处理语料库文件 {file_path} 时发生顶层意外错误: {e}")
                error_count += 1
            processed_count = i + 1
            if processed_count % 100 == 0 or processed_count == len(corpus_files):
                logger.info(
                    f"  已处理 {processed_count}/{len(corpus_files)} 个语料库文件 (成功: {loaded_count}, 失败: {error_count})...")
        self.current_task_id = original_task_id
        if not corpus_data:
            logger.error("未能成功加载任何语料库文档！")
        else:
            logger.info(
                f"语料库加载和预处理完成。成功加载 {loaded_count} 个文档，失败 {error_count} 个。")
        return corpus_data

    # ========================================================================
    #                      主检查方法 check (最终版)
    # ========================================================================

    def check(self, input_file_path: str) -> Dict[str, Any]:
        """
        执行查重的主要方法 (最终重构版)。
        协调处理流程：预处理输入 -> 生成候选集 -> 详细比较 -> 编译结果 -> 生成报告。
        """
        self.current_task_id = utils.generate_task_id()  # 生成新任务 ID
        start_time = time.time()
        task_id_str = f"[任务 {self.current_task_id}]"
        logger.info(f"{task_id_str} ====== 开始处理新任务: {input_file_path} ======")

        # --- 初始化最终返回结果字典 ---
        final_result: Dict[str, Any] = {
            "task_id": self.current_task_id,
            "metrics": None,  # 将由 ResultCompiler 生成
            "highlighted_html_path": None, "metrics_path": None,
            "cleaned_text_path": None, "chunk_list_path": None,
            "cache_path": None, "recall_stats": {}, "error": None
        }
        core_result_data: Optional[CoreResultData] = None  # 用于存储编译后的结果

        try:
            # --- 阶段 1: 加载与预处理输入文件 ---
            logger.info(f"{task_id_str} [阶段 1/5] 加载和预处理输入文档...")
            input_data = self._load_or_process_document(
                input_file_path, is_corpus=False)
            if input_data is None:
                raise ValueError("无法加载或处理输入文件。")
            # 检查是否为空文档
            if not input_data.get("cleaned_text") and input_data.get("stats", {}).get("total_chars", 0) == 0:
                raise ValueError("输入文件内容为空或无法提取。")
            final_result["cache_path"] = self._get_structured_cache_path(
                input_file_path)
            logger.info(
                f"{task_id_str} 输入文档预处理完成。字符数: {input_data['stats']['total_chars']}")

            # --- 阶段 2: 生成候选集 ---
            logger.info(f"{task_id_str} [阶段 2/5] 生成候选比对文档...")
            # --- 注入当前 task_id 到 index_manager 以便 CandidateGenerator 使用 ---
            # (这是一个临时的处理方式，更好的方式是在调用 generate 时传递 task_id)
            setattr(self.index_manager, 'current_task_id', self.current_task_id)
            candidate_ids, recall_stats = self.candidate_generator.generate(
                input_data)
            final_result["recall_stats"] = recall_stats  # 记录召回统计
            logger.info(
                f"{task_id_str} 候选集生成完成，共找到 {len(candidate_ids)} 个候选文档。召回统计: {recall_stats}")

            # --- 阶段 3: 详细比较 ---
            logger.info(f"{task_id_str} [阶段 3/5] 执行详细相似度比较...")
            if not candidate_ids:
                logger.info(f"{task_id_str} 候选集为空，跳过详细比较阶段。")
                comparison_results = {
                    'literal_matches': [], 'semantic_matches': [], 'ngram_scores': {}}  # 空结果
            else:
                # 注入 task_id
                # 假设 ProcessedDocData 可以动态添加属性
                setattr(input_data, 'task_id', self.current_task_id)
                comparison_results = self.detailed_comparer.compare(
                    input_data, candidate_ids)
                logger.info(f"{task_id_str} 详细比较完成。")
                logger.debug(f"{task_id_str} 比较结果摘要: "
                             f"字面匹配={len(comparison_results.get('literal_matches', []))}, "
                             f"语义匹配={len(comparison_results.get('semantic_matches', []))}, "
                             f"N-gram得分文档数={len(comparison_results.get('ngram_scores', {}))}")

            # --- 阶段 4: 编译结果与指标计算 ---
            logger.info(f"{task_id_str} [阶段 4/5] 编译结果并计算聚合指标...")
            # 调用 ResultCompiler
            core_result_data = self.result_compiler.compile(
                self.current_task_id, input_file_path, input_data, comparison_results, recall_stats
            )
            final_result["metrics"] = core_result_data  # 更新最终结果中的 metrics
            logger.info(
                f"{task_id_str} 结果编译完成。总文字复制比: {core_result_data['aggregate_metrics']['overall_similarity_ratio']:.2f}%")

        except Exception as e:
            error_msg = f"处理文件 '{os.path.basename(input_file_path)}' 时发生严重错误: {e}"
            logger.exception(f"{task_id_str} {error_msg}")
            final_result["error"] = error_msg
            # 尝试填充部分 metrics
            if core_result_data is None:  # 如果在编译前出错
                core_result_data = self._initialize_core_result_data(
                    self.current_task_id, os.path.basename(input_file_path))
            core_result_data["error_message"] = error_msg
            if input_data:  # 填充已有的统计信息
                core_result_data["text_stats"]["total_words"] = input_data.get(
                    "stats", {}).get("total_words", 0)
                core_result_data["text_stats"]["total_chars"] = input_data.get(
                    "stats", {}).get("total_chars", 0)
                core_result_data["text_stats"]["total_paragraphs"] = input_data.get(
                    "stats", {}).get("total_paragraphs", 0)
            final_result["metrics"] = core_result_data

        # --- 阶段 5: 收尾与报告生成 ---
        logger.info(f"{task_id_str} [阶段 5/5] 完成任务并生成报告...")
        end_time = time.time()
        processing_time = round(end_time - start_time, 2)

        # 更新最终的任务信息 (确保 metrics 字典存在)
        if final_result["metrics"]:
            final_result["metrics"]["task_info"]["check_time"] = datetime.datetime.now(
            ).strftime("%Y-%m-%d %H:%M:%S")
            final_result["metrics"]["task_info"]["processing_time_seconds"] = processing_time
        else:
            logger.error(f"{task_id_str} 无法更新任务时间戳和耗时，因为 metrics 数据为空。")

        # --- 保存中间文件和报告 (确保 core_result_data 存在) ---
        if core_result_data:
            # 保存清洗后的文本
            if input_data and input_data.get("cleaned_text"):
                try:
                    cleaned_text_path = utils.build_result_path(
                        config.RESULT_DIR, self.current_task_id, "_cleaned.txt")
                    with open(cleaned_text_path, 'w', encoding=config.DEFAULT_ENCODING) as f:
                        f.write(input_data["cleaned_text"])
                    final_result["cleaned_text_path"] = cleaned_text_path
                    logger.info(
                        f"{task_id_str} 清洗后的输入文本已保存到: {cleaned_text_path}")
                except Exception as e:
                    logger.error(f"{task_id_str} 保存清洗文本失败: {e}")

            # 保存 JSON 指标文件
            try:
                metrics_path = utils.build_result_path(
                    config.RESULT_DIR, self.current_task_id, "_metrics.json")
                # 确保数据可序列化 (ResultCompiler 内部应该处理好，或在此调用辅助函数)
                serializable_metrics = self._make_result_serializable(
                    core_result_data)
                with open(metrics_path, 'w', encoding=config.DEFAULT_ENCODING) as f:
                    json.dump(serializable_metrics, f,
                              ensure_ascii=False, indent=4)
                final_result["metrics_path"] = metrics_path
                logger.info(f"{task_id_str} 指标 JSON 文件已保存到: {metrics_path}")
            except Exception as e:
                logger.error(
                    f"{task_id_str} 保存指标 JSON 文件失败: {e}", exc_info=True)

        else:
            logger.error(f"{task_id_str} 无法保存文件和生成报告，因为 core_result_data 为空。")

        logger.info(
            f"{task_id_str} ====== 任务处理完成。总耗时: {processing_time:.2f} 秒 ======")
        return final_result

    def _initialize_core_result_data(self, task_id: str, input_filename: str) -> CoreResultData:
        """初始化 CoreResultData 字典结构。"""
        # 与 ResultCompiler 中的版本相同，可以考虑移到公共位置或 types.py
        return CoreResultData(
            task_info=TaskInfo(task_id=task_id, check_time="初始化...",
                               original_filename=input_filename, 
                               processing_time_seconds=0.0),
            text_stats=TextStats(total_words=0, 
                                 total_chars=0, 
                                 total_paragraphs=0, 
                                 corpus_doc_count=len(self.corpus_data), compared_doc_count=0),
            aggregate_metrics=AggregateMetrics(overall_similarity_ratio=0.0, 
                                               duplicate_chars_count=0, 
                                               max_similarity_ratio_single_source=0.0, 
                                               max_duplicate_chars_single_source=0, 
                                               max_similarity_source_name="N/A", 
                                               literal_hit_source_count=0, 
                                               suspected_paragraph_count=0, 
                                               max_duplicate_chars_in_paragraph=0, 
                                               min_duplicate_chars_in_paragraph=0,
                                               front_part_overlap_chars=0, 
                                               rear_part_overlap_chars=0, 
                                               max_minhash_jaccard=0.0, 
                                               max_minhash_source_name="N/A", 
                                               max_ngram_score=0.0, 
                                               max_ngram_source_name="N/A", 
                                               high_ngram_source_count_threshold_0_7=0, 
                                               semantic_hit_block_count=0, 
                                               semantic_involved_input_block_count=0, 
                                               max_semantic_score=0.0, 
                                               max_semantic_source_name="N/A"),
            hit_details_by_source={}, error_message=None
        )

    def _make_result_serializable(self, result_data: CoreResultData) -> Dict:
        """确保 CoreResultData 可 JSON 序列化。"""
        serializable_data = json.loads(json.dumps(
            result_data, default=str))  # 使用 str 作为默认转换器可能丢失信息，但简单
        # 尝试更精细的转换
        try:
            metrics = serializable_data.get("aggregate_metrics", {})
            for key, value in metrics.items():
                if isinstance(value, (np.integer, np.int_)):
                    metrics[key] = int(value)
                elif isinstance(value, (np.floating, np.float64)):
                    metrics[key] = float(value) if math.isfinite(
                        value) else 0.0  # 处理 NaN/Inf
            # 可以在这里添加对 hit_details_by_source 中 similarity_score 的处理
        except Exception as e:
            logger.warning(f"序列化结果时进行类型转换失败: {e}")
            # 返回简单序列化的结果
        return serializable_data
