# main_app.py
import json
import os
import logging
from flask import Blueprint, Flask, jsonify, request, send_from_directory
from flask_cors import CORS

import config  # 项目配置
import utils   # 项目通用工具

# --- 导入 Minerva OCR Blueprint ---
from minerva_ocr.api import minerva_bp

# --- 导入并初始化查重核心引擎 ---
from plagiarism_core.preprocessor import preprocess_text
from plagiarism_core.core_engine import PlagiarismChecker
import plagiarism_core.similarity_calculator

# --- 配置日志 ---
logging.basicConfig(level=logging.DEBUG if config.DEBUG_LOGGING else logging.INFO,
                    format='%(asctime)s - %(levelname)s - [%(module)s:%(lineno)d] - %(message)s')
logger = logging.getLogger(__name__)

app = Flask(__name__, static_folder=None) # 禁用默认 static，稍后分别注册
CORS(app)


# --- 注册 Minerva OCR Blueprint ---
app.register_blueprint(minerva_bp)
# 为 Minerva 的静态文件提供服务 (如果 test_api.html 放在其模块的 static 下)
@app.route('/minerva_static/<path:filename>')
def minerva_static_files(filename):
    base_dir = os.path.dirname(__file__)
    static_folder = os.path.join(base_dir, 'minerva_ocr', 'static')
    full_path = os.path.join(static_folder, filename)
    logger.info(f"Serving minerva_static file: '{filename}'")
    logger.info(f"Calculated base_dir: '{base_dir}'") # 这是 main_app.py 所在的目录
    logger.info(f"Calculated static_folder for minerva: '{static_folder}'") # 这是Flask认为静态文件所在的目录
    logger.info(f"Attempting to send file from full_path: '{full_path}'") # 这是Flask尝试加载的完整文件路径
    if not os.path.exists(full_path):
        logger.error(f"FLASK SAYS: FILE NOT FOUND AT: '{full_path}'") # 如果Flask找不到文件，会打印这个
        return jsonify({"error": "File not found on server by Flask", "path_checked": full_path}), 404
    logger.info(f"FLASK SAYS: File exists at '{full_path}'. Proceeding with send_from_directory.")
    return send_from_directory(static_folder, filename)


# --- 初始化查重引擎实例 (全局或按需) ---
# 对于本地验证，可以全局初始化一个实例。大规模部署时可能需要更复杂的管理。
try:
    logger.info("Initializing PlagiarismChecker instance...")
    plagiarism_checker_instance = PlagiarismChecker()
    logger.info("PlagiarismChecker instance initialized successfully.")
except Exception as e:
    logger.critical(f"Failed to initialize PlagiarismChecker: {e}", exc_info=True)
    plagiarism_checker_instance = None # 标记为不可用

# =======================================================
#         查重服务 API (Plagiarism Service API)
# =======================================================
plagiarism_bp = Blueprint('plagiarism_checker', __name__, url_prefix='/plagiarism')

@plagiarism_bp.route('/check_file', methods=['POST'])
def api_check_file():
    if not plagiarism_checker_instance:
        return jsonify({"success": False, "error": "查重引擎未初始化或初始化失败。"}), 503

    if 'file' not in request.files:
        return jsonify({"success": False, "error": "请求中没有文件部分"}), 400
    file = request.files['file']
    if file.filename == '':
        return jsonify({"success": False, "error": "未选择文件"}), 400

    if file and utils.general_allowed_file(file.filename, config.MINERVA_ALLOWED_PDF_EXTENSIONS):
        try:
            # 保存上传文件到查重服务的临时目录
            filename = utils.secure_filename(file.filename) # 使用 utils 中的 secure_filename
            temp_file_path = os.path.join(config.TEMP_DIR, f"{utils.generate_task_id()}_{filename}")
            file.save(temp_file_path)
            logger.info(f"查重API：文件 '{filename}' 已保存到: {temp_file_path}")

            # 调用核心查重引擎
            result_data_from_engine = plagiarism_checker_instance.check(temp_file_path)

            if result_data_from_engine.get("error"): # 如果引擎返回了错误
                return jsonify({
                    "success": False,
                    "error": result_data_from_engine["error"],
                    "task_id": result_data_from_engine.get("task_id"),
                    "report_data": result_data_from_engine.get("metrics") # 即使有错，也可能返回部分metrics
                }), 500 # 或 200 但 success:false

            if result_data_from_engine.get("metrics") and result_data_from_engine.get("task_id"):
                return jsonify({
                    "success": True,
                    "message": "查重完成",
                    "task_id": result_data_from_engine["task_id"],
                    "report_data": result_data_from_engine["metrics"] # metrics 字段就是 CoreResultData
                }), 200
            else:
                # 这种情况理论上不应该发生，如果 check() 总是按预期返回
                logger.error(f"查重API：引擎返回结果结构不完整。 TaskID: {result_data_from_engine.get('task_id')}, Metrics_Present: {bool(result_data_from_engine.get('metrics'))}")
                return jsonify({
                    "success": False,
                    "error": "查重引擎返回结果结构不完整。",
                    "task_id": result_data_from_engine.get("task_id")
                }), 500

        except Exception as e:
            logger.error(f"查重API处理文件 '{file.filename}' 时发生错误: {e}", exc_info=True)
            return jsonify({"success": False, "error": f"处理文件时出错: {str(e)}"}), 500
    else:
        return jsonify({"success": False, "error": f"文件类型不允许 (仅支持: {', '.join(config.ALLOWED_EXTENSIONS)})"}), 400


@plagiarism_bp.route('/get_report_data/<task_id>', methods=['GET'])
def api_get_report_data(task_id):
    if not plagiarism_checker_instance:
        return jsonify({"success": False, "error": "查重引擎未初始化。"}), 503

    try:
        # 就是 CoreResultData 的直接 JSON dump。
        metrics_path = utils.build_result_path(config.RESULT_DIR, task_id, "_metrics.json")
        if not os.path.exists(metrics_path):
            return jsonify({"success": False, "error": "未找到指定任务的报告数据。"}), 404

        with open(metrics_path, 'r', encoding=config.DEFAULT_ENCODING) as f:
            report_data = json.load(f) # 这应该是 CoreResultData

        # 加载对应的清理后文本用于高亮
        cleaned_text_path = utils.build_result_path(config.RESULT_DIR, task_id, "_cleaned.txt")
        cleaned_text_content = None
        if os.path.exists(cleaned_text_path):
            with open(cleaned_text_path, 'r', encoding=config.DEFAULT_ENCODING) as f_text:
                cleaned_text_content = f_text.read()

        if report_data:
            return jsonify({
                "success": True,
                "report_data": report_data,
                "cleaned_input_text": cleaned_text_content # 将原文一起返回
            }), 200
        else:
            return jsonify({"success": False, "error": "无法加载报告数据或数据为空。"}), 404
    except Exception as e:
        logger.error(f"获取报告数据 API (task_id: {task_id}) 出错: {e}", exc_info=True)
        return jsonify({"success": False, "error": f"获取报告数据失败: {str(e)}"}), 500

@plagiarism_bp.route('/preprocess_text', methods=['POST'])
def api_preprocess_text():
    if not plagiarism_checker_instance: # 预处理不直接依赖完整引擎，但可以复用其 _load_or_process_document
        return jsonify({"success": False, "error": "查重引擎未初始化或初始化失败。"}), 503

    data_to_process = None
    source_filename = "uploaded_text_snippet.txt" # 默认名

    if 'file' in request.files and request.files['file'].filename != '':
        file = request.files['file']
        if utils.general_allowed_file(file.filename, allowed_extensions=config.ALLOWED_EXTENSIONS):
            try:
                # 为上传的文件创建一个临时路径来调用 _load_or_process_document
                source_filename = utils.secure_filename(file.filename)
                temp_file_path = os.path.join(config.TEMP_DIR, f"preprocess_{utils.generate_task_id()}_{source_filename}")
                file.save(temp_file_path)
                # 复用引擎的文档处理逻辑（包含缓存）
                processed_doc = plagiarism_checker_instance._load_or_process_document(temp_file_path, is_corpus=False)
                if processed_doc:
                    data_to_process = processed_doc
                # os.remove(temp_file_path) # 清理
            except Exception as e:
                logger.error(f"预处理API：处理上传文件 '{file.filename}' 失败: {e}", exc_info=True)
                return jsonify({"success": False, "error": f"处理上传文件失败: {str(e)}"}), 500
        else:
            return jsonify({"success": False, "error": "上传的文件类型不允许"}), 400
        
    elif 'text_content' in request.form and request.form['text_content'].strip():
        raw_text = request.form['text_content']
        # 直接调用预处理函数 (不走缓存，因为是临时文本)
        cleaned_text, paragraphs_pos, sentences_pos, total_words, total_paragraphs_count = \
            preprocess_text(raw_text) # 注意导入
        data_to_process = {
            "source_file_path": source_filename,
            "cleaned_text": cleaned_text,
            "paragraphs_pos": paragraphs_pos,
            "sentences_pos": sentences_pos,
            "stats": {"total_words": total_words, "total_paragraphs": total_paragraphs_count, "total_chars": len(cleaned_text)}
        }
    else:
        return jsonify({"success": False, "error": "请提供文件或文本内容进行预处理。"}), 400

    if data_to_process:

        min_length_config = getattr(config, 'MIN_SEMANTIC_CHUNK_LENGTH', '未配置') # 提供一个默认值

        # 只返回需要可视化的部分
        response_data = {
            "cleaned_text_preview": data_to_process["cleaned_text"][:500] + ("..." if len(data_to_process["cleaned_text"]) > 500 else ""), # 预览
            "paragraphs": [{"text": p[0], "start": p[1], "end": p[2]} for p in data_to_process["paragraphs_pos"][:20]], # 预览前20段
            "sentences": [{"text": s[0], "start": s[1], "end": s[2]} for s in data_to_process["sentences_pos"][:50]],   # 预览前50句
            "stats": data_to_process["stats"],
            "full_cleaned_text_available": bool(data_to_process["cleaned_text"]), # 指示是否有完整文本（如果太大可能不直接返回）
            "config_min_chunk_length": min_length_config
        }
        return jsonify({"success": True, "data": response_data}), 200
    else:
        return jsonify({"success": False, "error": "未能处理提供的输入。"}), 500


@plagiarism_bp.route('/get_candidates', methods=['POST'])
def api_get_candidates():
    if not plagiarism_checker_instance or not plagiarism_checker_instance.candidate_generator:
        return jsonify({"success": False, "error": "候选集生成器未初始化。"}), 503

    # 必须上传文件
    if 'file' not in request.files or request.files['file'].filename == '':
        return jsonify({"success": False, "error": "请上传文件以生成候选集。"}), 400
    file = request.files['file']

    try:
        filename = utils.secure_filename(file.filename)
        temp_file_path = os.path.join(config.TEMP_DIR, f"candidate_{utils.generate_task_id()}_{filename}")
        file.save(temp_file_path)
        input_doc_data = plagiarism_checker_instance._load_or_process_document(temp_file_path, is_corpus=False)
        # os.remove(temp_file_path)

        if not input_doc_data or not input_doc_data.get("cleaned_text"):
             return jsonify({"success": False, "error": "无法处理输入文件或文件内容为空。"}), 400
        
        # 为此API调用生成一个ID
        current_api_task_id = utils.generate_task_id()
        candidate_ids, recall_stats = plagiarism_checker_instance.candidate_generator.generate(input_doc_data)
        return jsonify({
            "success": True,
            "data": {
                "candidate_ids": sorted(list(candidate_ids))[:200], # 返回前200个，避免过长
                "total_candidates": len(candidate_ids),
                "recall_stats": recall_stats,
                "api_call_task_id": current_api_task_id
            }
        }), 200
    except Exception as e:
        logger.error(f"生成候选集API出错: {e}", exc_info=True)
        return jsonify({"success": False, "error": f"生成候选集失败: {str(e)}"}), 500


@plagiarism_bp.route('/compare_two_texts', methods=['POST'])
def api_compare_two_texts():
    if not plagiarism_checker_instance:
        return jsonify({"success": False, "error": "比较器未初始化。"}), 503
    try:
        payload = request.get_json()
        text1 = payload.get('text1')
        text2 = payload.get('text2')
        options = payload.get('options', {})

        if not text1 or not text2:
            return jsonify({"success": False, "error": "请提供两段文本进行比较。"}), 400

        results = {}
        # 注意：这里的比较是临时的，不涉及完整语料库和索引
        # 我们需要直接调用 similarity_calculator 和 semantic_comparer 的方法

        if options.get('compare_literal', True):
            # similarity_calculator.find_matches_sequencematcher 需要一个 dict 作为语料
            # 我们模拟一个只包含 text2 的语料
            mock_corpus_for_compare = {"text2_source": text2}
            # find_matches_sequencematcher 返回 (input_start, corpus_start, length, corpus_id)
            # 对于前端高亮，corpus_start 对应 text2 的起始，input_start 对应 text1 的起始
            literal_matches_raw = plagiarism_core.similarity_calculator.find_matches_sequencematcher(text1, mock_corpus_for_compare)
            # 转换格式以适应前端的 highlightComparisonText (它期望 match[0] 是 text1 的 start, match[1] 是 text2 的 start)
            results['literal_matches'] = [[m[0], m[1], m[2]] for m in literal_matches_raw if m[3] == "text2_source"]

        if options.get('compare_semantic', True) and plagiarism_checker_instance.semantic_comparer_instance:
            # semantic_comparer.test_similarity 计算两个文本的整体相似度
            # 如果要块级比较，逻辑会更复杂
            score = plagiarism_checker_instance.semantic_comparer_instance.test_similarity(text1, text2)
            results['semantic_score'] = score if score is not None else "计算失败"
        else:
            results['semantic_score'] = "未执行或比较器不可用"


        if options.get('compare_ngram', True):
            mock_corpus_for_ngram = {"text2_source": text2}
            ngram_scores = plagiarism_core.similarity_calculator.calculate_ngram_cosine_similarity(text1, mock_corpus_for_ngram)
            results['ngram_score_vs_text2'] = ngram_scores.get("text2_source", "N/A")


        return jsonify({"success": True, "data": results}), 200
    except Exception as e:
        logger.error(f"双文本比较API出错: {e}", exc_info=True)
        return jsonify({"success": False, "error": f"比较失败: {str(e)}"}), 500


@plagiarism_bp.route('/get_vector', methods=['POST'])
def api_get_vector():
    if not plagiarism_checker_instance or not plagiarism_checker_instance.semantic_comparer_instance:
        return jsonify({"success": False, "error": "语义模型未初始化。"}), 503
    try:
        payload = request.get_json()
        text_content = payload.get('text_content')
        if not text_content:
            return jsonify({"success": False, "error": "请提供文本内容。"}), 400

        # 使用 semantic_comparer 的模型直接编码
        # 注意：model.encode 通常返回 numpy array 或 tensor，需要转为 list 才能 JSON 序列化
        model = plagiarism_checker_instance.semantic_comparer_instance.model
        if not model:
             return jsonify({"success": False, "error": "语义模型对象不存在。"}), 503

        embedding = model.encode(text_content, convert_to_numpy=True, show_progress_bar=False)
        vector_list = embedding.tolist() if hasattr(embedding, 'tolist') else list(embedding) # 确保是list

        return jsonify({"success": True, "data": {"text": text_content, "vector": vector_list}}), 200
    except Exception as e:
        logger.error(f"获取向量API出错: {e}", exc_info=True)
        return jsonify({"success": False, "error": f"获取向量失败: {str(e)}"}), 500


# --- 注册查重 Blueprint ---
app.register_blueprint(plagiarism_bp)
# 为查重的静态文件提供服务
@app.route('/plagiarism_static/<path:filename>')
def plagiarism_static_files(filename):
    base_dir = os.path.dirname(__file__)
    static_folder = os.path.join(base_dir, 'plagiarism_core', 'static')
    full_path = os.path.join(static_folder, filename)
    logger.info(f"Serving plagiarism_static file: '{filename}'")
    logger.info(f"Calculated base_dir: '{base_dir}'")
    logger.info(f"Calculated static_folder for plagiarism: '{static_folder}'")
    logger.info(f"Attempting to send file from full_path: '{full_path}'")
    if not os.path.exists(full_path):
        logger.error(f"FLASK SAYS: FILE NOT FOUND AT: '{full_path}'")
        return jsonify({"error": "File not found on server by Flask", "path_checked": full_path}), 404
    logger.info(f"FLASK SAYS: File exists at '{full_path}'. Proceeding with send_from_directory.")
    return send_from_directory(static_folder, filename)


# --- 主页或导航页 (可选) ---
@app.route('/')
def index():
    # 简单的导航页面链接到两个测试API HTML
    minerva_url = "/minerva_static/minerva_test_api.html"
    plagiarism_url = "/plagiarism_static/plagiarism_test_api.html"
    return f"""
    <h1>本地验证系统</h1>
    <h3>验证 PDF 内容提取，指标提取、术语提取，文件查重（数据清洗、分段分句、向量索引、召回测试）</h>
    <p><a href="{minerva_url}">OCR 服务测试页面 (Minerva)</a></p>
    <p><a href="{plagiarism_url}">查重功能与过程验证页面</a></p>
    """

if __name__ == '__main__':
    HOST = '127.0.0.1'
    PORT = 5055 # 与 Minerva API 保持一致或使用不同端口
    DEBUG_MODE = True # 方便调试

    logger.info(f"主应用服务准备在 http://{HOST}:{PORT}/ 上运行 (调试模式: {DEBUG_MODE})")
    # 自动打开浏览器逻辑可以保留，或者让用户手动打开
    app.run(host=HOST, port=PORT, debug=DEBUG_MODE, use_reloader=False if DEBUG_MODE else True) # 开发时 use_reloader=False 避免引擎多次初始化