from flask import (
    Flask,
    render_template,
    request,
    jsonify,
    send_from_directory,
    send_file,
    session,
    redirect,
    url_for,
    flash,
)
import os
import json
from werkzeug.utils import secure_filename
import utils
import time
import requests
import uuid
from urllib.parse import urlparse
from datetime import datetime
from zoneinfo import ZoneInfo
from flask_apscheduler import APScheduler
import config
from functools import wraps
import logging
import hashlib
from logging.handlers import TimedRotatingFileHandler
import threading
from flask_sqlalchemy import SQLAlchemy
import pandas as pd
from io import BytesIO

app = Flask(__name__)

# 确保instance文件夹存在
db_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "db")
if not os.path.exists(db_path):
    os.makedirs(db_path)

app.config.from_object(config)
app.secret_key = app.config["SECRET_KEY"]  # 从config中读取SECRET_KEY用于session加密

# --- 数据库配置 ---
# 构建数据库文件的绝对路径
db_path = os.path.join(db_path, "yolo.db")
app.config["SQLALCHEMY_DATABASE_URI"] = f"sqlite:///{db_path}"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)

# 在应用上下文中创建数据库表
with app.app_context():
    db.create_all()


# --- 数据库模型 ---
class DetectionRecord(db.Model):
    id = db.Column(db.Integer, primary_key=True)
    timestamp = db.Column(db.DateTime, default=datetime.utcnow)
    image_path = db.Column(db.String(255), nullable=False)
    annotated_image_path = db.Column(db.String(255), nullable=False)
    sku_count = db.Column(db.Integer)
    sku_ratio = db.Column(db.String(20))
    layer_count = db.Column(db.Integer)
    sku_details = db.Column(db.Text)  # Store as JSON string
    layers_details = db.Column(db.Text)  # Store as JSON string

    def __repr__(self):
        return f"<DetectionRecord {self.id}>"


# --- 批量处理任务存储 ---
batch_tasks = {}  # { "task_id": { "status": "...", "results": [...] } }
batch_tasks_lock = threading.Lock()


# --- 日志配置 ---
# 确保日志目录存在
log_file_path = app.config.get("LOG_FILE", "app.log")
log_dir = os.path.dirname(log_file_path)
if log_dir and not os.path.exists(log_dir):
    os.makedirs(log_dir)

# 创建一个通用的格式化器
formatter = logging.Formatter(
    "%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]"
)

# 1. 配置日志到文件（按天轮替）
log_level_str = app.config.get("LOG_LEVEL", "INFO").upper()
log_level = getattr(logging, log_level_str, logging.INFO)

file_handler = TimedRotatingFileHandler(
    log_file_path,
    when="midnight",  # 每天午夜轮替
    interval=1,
    backupCount=app.config.get("LOG_BACKUP_COUNT", 7),
    encoding="utf-8",
)
file_handler.setFormatter(formatter)
file_handler.setLevel(log_level)

# 2. 配置日志到控制台
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(log_level)

# 获取根logger并添加两个处理器
root_logger = logging.getLogger()
root_logger.addHandler(file_handler)
root_logger.addHandler(stream_handler)
root_logger.setLevel(log_level)

# 移除Flask的默认处理器，以避免日志重复
app.logger.handlers.clear()
app.logger.propagate = True

# 禁用Werkzeug的默认日志记录器，以避免重复的访问日志
werkzeug_logger = logging.getLogger("werkzeug")
werkzeug_logger.handlers = []

app.logger.info("应用启动，日志已配置输出到文件和控制台")

# 加载模型
bottle_model = utils.get_yolo_model(app.config["BOTTLE_MODEL_PATH"])
sku_model = utils.get_yolo_model(app.config["SKU_MODEL_PATH"])

# 初始化定时任务
scheduler = APScheduler()
scheduler.init_app(app)


if not os.path.exists(app.config["UPLOAD_FOLDER"]):
    os.makedirs(app.config["UPLOAD_FOLDER"])

# 确保批量结果文件夹存在
if not os.path.exists(app.config["BATCH_RESULTS_FOLDER"]):
    os.makedirs(app.config["BATCH_RESULTS_FOLDER"])


def cleanup_old_files():
    """
    清理static/uploads目录下超过指定天数的文件。
    """
    folder = app.config["UPLOAD_FOLDER"]
    interval_days = app.config.get("CLEANUP_INTERVAL_DAYS", 3)  # 默认为3天
    now = time.time()
    cleanup_threshold = now - (interval_days * 24 * 60 * 60)

    for filename in os.listdir(folder):
        file_path = os.path.join(folder, filename)
        if os.path.isfile(file_path):
            try:
                if os.path.getmtime(file_path) < cleanup_threshold:
                    os.remove(file_path)
                    app.logger.debug(f"已删除旧文件: {filename}")
            except Exception as e:
                app.logger.error(f"删除文件 {filename} 时出错: {e}")
    app.logger.info(f"定时清理任务完成于: {datetime.now()}")


# 添加定时任务，每天在指定小时的0分执行
cleanup_hour = app.config.get("CLEANUP_HOUR", 0)
scheduler.add_job(
    id="CleanupTask",
    func=cleanup_old_files,
    trigger="cron",
    hour=cleanup_hour,
    minute=0,
)

# 启动定时任务
scheduler.start()


# --- API Key认证装饰器 ---
def require_api_key(f):
    @wraps(f)
    def decorated_function(*args, **kwargs):
        # 从请求头中获取API Key
        api_key = request.headers.get("X-API-KEY")
        # 获取配置中的API Keys字典
        api_keys_dict = app.config.get("API_KEYS", {})

        # 检查API Key是否存在于字典中
        if api_key and api_key in api_keys_dict:
            # 认证成功，继续
            return f(*args, **kwargs)
        else:
            app.logger.warning(
                f"认证失败: 无效的API Key '{api_key}' from IP: {request.remote_addr}"
            )
            # 认证失败
            return (
                jsonify(
                    {"success": False, "msg": "认证失败: 无效的API Key", "data": None}
                ),
                401,
            )

    return decorated_function


# --- 用户认证装饰器 ---
def login_required(f):
    @wraps(f)
    def decorated_function(*args, **kwargs):
        # 如果在config.py中禁用了登录，则直接跳过检查
        if not app.config.get("LOGIN_ENABLED", True):
            return f(*args, **kwargs)

        if "username" not in session:
            return redirect(url_for("login", next=request.url))
        return f(*args, **kwargs)

    return decorated_function


@app.route("/login", methods=["GET", "POST"])
def login():
    if request.method == "POST":
        username = request.form["username"]
        password = request.form["password"]
        hashed_password = hashlib.md5(password.encode()).hexdigest()

        users = app.config.get("USERS", {})
        if username in users and users[username] == hashed_password:
            session["username"] = username
            flash("登录成功!", "success")
            next_page = request.args.get("next")
            return redirect(next_page or url_for("index"))
        else:
            flash("用户名或密码错误", "danger")
            return render_template("login.html", error="用户名或密码错误")
    return render_template("login.html")


@app.route("/logout")
def logout():
    session.pop("username", None)
    flash("您已成功退出登录。", "info")
    return redirect(url_for("login"))


@app.route("/")
@login_required
def index():
    return render_template("index.html")


@app.route("/uploads/<path:filename>")
@login_required
def uploaded_file(filename):
    # 添加缓存控制头，确保浏览器不缓存图片
    response = send_from_directory(app.config["UPLOAD_FOLDER"], filename)
    response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
    response.headers["Pragma"] = "no-cache"
    response.headers["Expires"] = "0"
    return response


@app.route("/history")
@login_required
def history():
    page = request.args.get("page", 1, type=int)
    per_page = request.args.get("per_page", 10, type=int)
    search_query = request.args.get("search", type=str)

    query = DetectionRecord.query.order_by(DetectionRecord.timestamp.desc())

    if search_query:
        # 使用 SQLite 的 INSTR 函数进行不区分大小写的模糊搜索
        # 注意：image_path 存储的是相对路径，例如 'uploads/image.jpg'
        # 我们需要搜索的是文件名部分，即 'image.jpg'
        # SQLite 的 INSTR(string, substring) 返回 substring 在 string 中第一次出现的位置，如果不存在则返回 0
        # 我们可以通过 INSTR(image_path, search_query) > 0 来判断是否包含
        # 为了更精确地匹配文件名，我们可以在数据库层面处理文件名提取，或者在应用层面处理
        # 考虑到 image_path 格式是 'uploads/filename.ext'，我们可以直接搜索 filename.ext
        # 或者更健壮地，使用 LIKE '%search_query%'
        # 考虑到 image_path 存储的是 'uploads/xxx.jpg'，我们应该搜索 xxx.jpg
        # 我们可以使用 SQLAlchemy 的 func.lower 和 like
        # 提取文件名部分进行模糊搜索
        # 使用 SQLAlchemy 的 func.substr 和 func.instr 来模拟 os.path.basename
        # 或者更简单地，直接在 Python 层面处理文件名，但这会影响数据库查询的效率
        # 考虑到 SQLite 的限制，直接使用 LIKE '%search_query%' 可能是最兼容的方式
        # 但用户要求右模糊匹配，且之前反馈首字母问题，说明需要精确匹配文件名部分
        # 尝试使用 SQLite 的 substr 和 instr 函数来获取文件名
        # INSTR(image_path, '/') 找到最后一个 '/' 的位置，然后从该位置之后截取
        # 如果没有 '/'，则整个 image_path 就是文件名
        query = query.filter(
            db.func.lower(
                db.func.substr(
                    DetectionRecord.image_path,
                    db.func.instr(DetectionRecord.image_path, "/") + 1,
                )
            ).like(f"{search_query.lower()}%")
        )

    records = query.paginate(page=page, per_page=per_page)

    # 将UTC时间转换为北京时间
    beijing_tz = ZoneInfo("Asia/Shanghai")
    for record in records.items:
        if record.timestamp:
            # 1. 假设数据库中的时间是UTC（因为是utcnow()生成的）
            # 2. 将其设置为UTC时区
            # 3. 转换为北京时区
            record.timestamp = record.timestamp.replace(
                tzinfo=ZoneInfo("UTC")
            ).astimezone(beijing_tz)

    return render_template(
        "history.html", records=records, per_page=per_page, search_query=search_query
    )


@app.route("/history/delete", methods=["POST"])
@login_required
def delete_records():
    data = request.get_json()
    record_ids = data.get("ids", [])

    if not record_ids:
        return jsonify({"success": False, "message": "没有提供记录ID"}), 400

    try:
        # 在删除数据库记录前，先获取文件路径
        records_to_delete = DetectionRecord.query.filter(
            DetectionRecord.id.in_(record_ids)
        ).all()
        paths_to_delete = []
        for record in records_to_delete:
            # 添加原始图片和标注图片的绝对路径
            if record.image_path:
                paths_to_delete.append(
                    os.path.join(
                        app.config["UPLOAD_FOLDER"], os.path.basename(record.image_path)
                    )
                )
            if record.annotated_image_path:
                paths_to_delete.append(
                    os.path.join(
                        app.config["UPLOAD_FOLDER"],
                        os.path.basename(record.annotated_image_path),
                    )
                )

        # 删除数据库记录
        db.session.query(DetectionRecord).filter(
            DetectionRecord.id.in_(record_ids)
        ).delete(synchronize_session=False)
        db.session.commit()

        # 删除关联的图片文件
        for path in paths_to_delete:
            try:
                if os.path.exists(path):
                    os.remove(path)
                    app.logger.info(f"文件已成功删除: {path}")  # 增加成功删除的日志
            except Exception as e:
                app.logger.error(f"删除文件 {path} 时出错: {e}")

        flash(f"成功删除 {len(record_ids)} 条记录。", "success")
        return jsonify({"success": True, "message": "记录已删除"})
    except Exception as e:
        db.session.rollback()
        app.logger.error(f"删除记录时出错: {e}")
        return jsonify({"success": False, "message": "删除记录时出错"}), 500


@app.route("/history/<int:record_id>")
@login_required
def history_detail(record_id):
    record = DetectionRecord.query.get_or_404(record_id)
    result_data = {
        # "bottle_count": record.bottle_count, # 移除瓶子总数
        "sku_count": record.sku_count,
        "sku_ratio": record.sku_ratio,
        "sku_details": json.loads(record.sku_details),
        "layer_count": record.layer_count,
        "layers_details": json.loads(record.layers_details),
        "image_path": f"/{record.annotated_image_path}",
    }
    return render_template("result.html", result=result_data, from_history=True)


@app.route("/export/history")
@login_required
def export_history():
    records = DetectionRecord.query.all()
    data = []
    beijing_tz = ZoneInfo("Asia/Shanghai")
    for record in records:
        sku_details = json.loads(record.sku_details)
        layers_details = json.loads(record.layers_details)

        # 转换时间
        local_timestamp = (
            record.timestamp.replace(tzinfo=ZoneInfo("UTC"))
            .astimezone(beijing_tz)
            .replace(tzinfo=None)
            if record.timestamp
            else None
        )

        data.append(
            {
                "ID": record.id,
                "检测时间": local_timestamp,
                "图片名称": os.path.basename(record.image_path),
                "SKU图片识别URL": url_for(
                    "uploaded_file",
                    filename=os.path.basename(record.annotated_image_path),
                    _external=True,
                ),
                # "瓶子总数": record.bottle_count, # 移除瓶子总数
                "SKU数量": record.sku_count,
                "层数": record.layer_count,
                "SKU详情": json.dumps(sku_details, ensure_ascii=False, indent=4),
                "分层详情": json.dumps(layers_details, ensure_ascii=False, indent=4),
            }
        )
    df = pd.DataFrame(data)
    output = BytesIO()
    with pd.ExcelWriter(output, engine="openpyxl") as writer:
        df.to_excel(writer, index=False, sheet_name="历史记录")
    output.seek(0)
    return send_file(
        output,
        download_name="history.xlsx",
        as_attachment=True,
        mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
    )


def process_detection(file_path):
    # 为原始文件名和标注文件名生成相对路径，以便存储
    timestamp = int(time.time())
    original_filename = os.path.basename(file_path)
    filename_base, file_extension = os.path.splitext(original_filename)

    new_filename_base = f"{filename_base}_{timestamp}"
    new_original_filename = f"{new_filename_base}{file_extension}"
    annotated_filename = f"{new_filename_base}_annotated{file_extension}"

    new_file_path = os.path.join(app.config["UPLOAD_FOLDER"], new_original_filename)
    os.rename(file_path, new_file_path)
    file_path = new_file_path

    relative_original_path = os.path.join("uploads", new_original_filename)
    relative_annotated_path = os.path.join("uploads", annotated_filename)
    annotated_file_path = os.path.join(app.config["UPLOAD_FOLDER"], annotated_filename)

    bottle_conf = app.config["BOTTLE_CONFIDENCE_THRESHOLD"]
    bottle_iou = app.config["BOTTLE_IOU_THRESHOLD"]
    sku_conf = app.config["SKU_CONFIDENCE_THRESHOLD"]
    sku_iou = app.config["SKU_IOU_THRESHOLD"]
    slope_tolerance = app.config["SLOPE_TOLERANCE"]
    clarity_threshold = app.config["CLARITY_THRESHOLD"]

    # --- 计时开始 ---
    start_time = time.time()

    result = utils.process_all_detections(
        bottle_model,
        sku_model,
        file_path,
        annotated_file_path,
        bottle_conf=bottle_conf,
        bottle_iou=bottle_iou,
        sku_conf=sku_conf,
        sku_iou=sku_iou,
        slope_tolerance=slope_tolerance,
        clarity_threshold=clarity_threshold,
    )

    # 新的返回格式处理
    if len(result) == 2 and result[0] is None:
        # 返回的是错误信息 (None, "error message")
        return {"error": result[1]}

    (
        sku_count,
        sku_details,
        layer_count,
        layers_details,
    ) = result

    # --- 计时结束 ---
    end_time = time.time()
    processing_duration = end_time - start_time
    app.logger.debug(f"核心检测过程耗时: {processing_duration:.2f} 秒")

    sku_ratio = "N/A"  # SKU占比不再需要，设置为N/A

    result_data = {
        "sku_count": sku_count,
        "sku_details": sku_details,
        "layer_count": layer_count,
        "layers_details": layers_details,
        "image_path": f"/{relative_annotated_path}",
    }

    # --- 保存到数据库 ---
    try:
        record = DetectionRecord(
            image_path=relative_original_path,
            annotated_image_path=relative_annotated_path,
            sku_count=sku_count,
            layer_count=layer_count,
            sku_details=json.dumps(sku_details),
            layers_details=json.dumps(layers_details),
        )
        db.session.add(record)
        db.session.commit()
        app.logger.info(f"检测记录 {record.id} 已保存到数据库。")
    except Exception as e:
        db.session.rollback()
        app.logger.error(f"保存检测记录到数据库时出错: {e}")

    return result_data


@app.route("/predict/image", methods=["POST"])
@login_required
def predict_image():
    if "file" not in request.files:
        return "没有文件部分"
    file = request.files["file"]
    if file.filename == "":
        return "未选择文件"
    if file:
        filename = secure_filename(file.filename)
        file_path = os.path.join(app.config["UPLOAD_FOLDER"], filename)
        file.save(file_path)

        result = process_detection(file_path)
        if "error" in result:
            return render_template("result.html", error=result["error"])

        return render_template("result.html", result=result)


@app.route("/predict/camera", methods=["POST"])
@login_required
def predict_camera():
    if "file" not in request.files:
        return "没有文件部分"
    file = request.files["file"]
    if file:
        filename = "capture.jpg"
        file_path = os.path.join(app.config["UPLOAD_FOLDER"], filename)
        file.save(file_path)

        result = process_detection(file_path)
        if "error" in result:
            return render_template("result.html", error=result["error"])

        return render_template("result.html", result=result)


@app.route("/api/predict", methods=["POST"])
@require_api_key
def api_predict():
    if "file" not in request.files:
        return jsonify({"success": False, "msg": "没有文件部分", "data": None})
    file = request.files["file"]
    if file.filename == "":
        return jsonify({"success": False, "msg": "未选择文件", "data": None})
    if file:
        # 获取文件大小
        file.seek(0, os.SEEK_END)
        file_size_bytes = file.tell()
        file.seek(0)
        file_size_kb = file_size_bytes / 1024

        # 记录API调用来源和请求信息
        api_key = request.headers.get("X-API-KEY")
        system_name = app.config.get("API_KEYS", {}).get(api_key, "Unknown System")
        app.logger.debug(
            f"API call from '{system_name}' for file: '{file.filename}' (Size: {file_size_kb:.2f} KB)"
        )

        filename = secure_filename(file.filename)
        file_path = os.path.join(app.config["UPLOAD_FOLDER"], filename)
        file.save(file_path)

        result = process_detection(file_path)

        # API调用后立即删除原始上传图片
        try:
            os.remove(file_path)
        except Exception as e:
            app.logger.error(f"删除原始上传文件 {file_path} 时出错: {e}")

        if "error" in result:
            app.logger.error(
                f"API call from '{system_name}' for file: '{file.filename}' failed. Reason: {result['error']}"
            )
            return jsonify({"success": False, "msg": result["error"], "data": None})

        # 构建成功的返回数据结构
        app.logger.debug(
            f"API call from '{system_name}' for file: '{file.filename}' processed successfully."
        )
        # 生成完整的图片下载链接
        image_url = request.host_url.rstrip("/") + "/" + result.get("image_path", "")

        response_data = {
            "success": True,
            "msg": "api.success",
            "data": {
                "sku_count": result.get("sku_count"),
                "layers": result.get("layer_count"),
                "objects": result.get("layers_details", []),
                "imagepath": image_url,
            },
        }

        return jsonify(response_data)


@app.route("/api/predict_url", methods=["POST"])
@require_api_key
def api_predict_url():
    json_data = request.get_json()
    if not json_data or "url" not in json_data:
        return jsonify({"success": False, "msg": "请求体中缺少'url'字段", "data": None})

    image_url = json_data["url"]
    system_name = app.config.get("API_KEYS", {}).get(
        request.headers.get("X-API-KEY"), "Unknown System"
    )
    app.logger.debug(f"API call from '{system_name}' for URL: '{image_url}'")

    # Safely create a temporary filename to avoid issues with special characters in URLs
    try:
        path = urlparse(image_url).path
        ext = os.path.splitext(path)[1]
        if not ext:  # If no extension, default to .jpg
            ext = ".jpg"
    except Exception:
        ext = ".jpg"  # Fallback in case of parsing error

    temp_filename = f"temp_{uuid.uuid4().hex}{ext}"
    temp_filepath = os.path.join(app.config["UPLOAD_FOLDER"], temp_filename)

    try:
        # 下载图片
        response = requests.get(image_url, stream=True, timeout=15)  # 增加超时时间
        response.raise_for_status()

        # 获取预期的文件大小
        expected_size = response.headers.get("Content-Length")
        if expected_size is not None:
            expected_size = int(expected_size)

        # 写入文件并记录实际大小
        actual_size = 0
        with open(temp_filepath, "wb") as f:
            for chunk in response.iter_content(chunk_size=8192):
                if chunk:
                    f.write(chunk)
                    actual_size += len(chunk)

        # 验证文件完整性
        if expected_size is not None and actual_size < expected_size:
            os.remove(temp_filepath)  # 删除不完整的文件
            app.logger.error(
                f"下载的文件不完整 from URL '{image_url}'. 预期大小: {expected_size}, 实际大小: {actual_size}"
            )
            return jsonify(
                {"success": False, "msg": "下载的文件不完整，请重试", "data": None}
            )

    except requests.exceptions.RequestException as e:
        app.logger.error(f"下载图片失败 from URL '{image_url}': {e}")
        return jsonify({"success": False, "msg": f"下载图片失败: {e}", "data": None})

    # 处理图片
    result = process_detection(temp_filepath)

    # 删除临时下载的图片
    try:
        os.remove(temp_filepath)
    except Exception as e:
        app.logger.error(f"删除临时文件 {temp_filepath} 时出错: {e}")

    if "error" in result:
        app.logger.error(
            f"API call from '{system_name}' for URL: '{image_url}' failed. Reason: {result['error']}"
        )
        return jsonify({"success": False, "msg": result["error"], "data": None})

    # 构建成功的返回数据结构
    app.logger.debug(
        f"API call from '{system_name}' for URL: '{image_url}' processed successfully."
    )
    annotated_image_url = (
        request.host_url.rstrip("/") + "/" + result.get("image_path", "")
    )

    response_data = {
        "success": True,
        "msg": "api.success",
        "data": {
            "sku_count": result.get("sku_count"),
            "layers": result.get("layer_count"),
            "objects": result.get("layers_details", []),
            "imagepath": annotated_image_url,
        },
    }

    return jsonify(response_data)


def process_batch_urls_async(task_id, urls, system_name, host_url):
    """
    在后台线程中处理批量URL。
    """
    results = []
    total_urls = len(urls)
    app.logger.info(f"后台任务 {task_id} 开始，处理 {total_urls} 个URL。")

    for i, image_url in enumerate(urls):
        app.logger.debug(
            f"任务 {task_id}: 正在处理第 {i+1}/{total_urls} 个URL: {image_url}"
        )
        # --- 复用 api_predict_url 的逻辑 ---
        temp_filepath = None
        try:
            path = urlparse(image_url).path
            ext = os.path.splitext(path)[1] or ".jpg"
            temp_filename = f"temp_batch_{uuid.uuid4().hex}{ext}"
            temp_filepath = os.path.join(app.config["UPLOAD_FOLDER"], temp_filename)

            response = requests.get(image_url, stream=True, timeout=15)
            response.raise_for_status()
            with open(temp_filepath, "wb") as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)

            # 处理图片
            result = process_detection(temp_filepath)

            if "error" in result:
                raise Exception(result["error"])

            # 构建结果
            annotated_image_url = (
                host_url.rstrip("/") + "/" + result.get("image_path", "")
            )
            results.append(
                {
                    "source_url": image_url,
                    "success": True,
                    "data": {
                        "sku_count": result.get("sku_count"),
                        "layers": result.get("layer_count"),
                        "objects": result.get("layers_details", []),
                        "imagepath": annotated_image_url,
                    },
                }
            )

        except Exception as e:
            app.logger.error(f"任务 {task_id} 处理URL {image_url} 失败: {e}")
            results.append({"source_url": image_url, "success": False, "error": str(e)})
        finally:
            if temp_filepath and os.path.exists(temp_filepath):
                try:
                    os.remove(temp_filepath)
                except Exception as e:
                    app.logger.error(
                        f"删除批量任务临时文件 {temp_filepath} 时出错: {e}"
                    )

    # 构建最终结果对象
    final_result = {"status": "completed", "results": results}

    # 将结果持久化到文件
    result_filepath = os.path.join(
        app.config["BATCH_RESULTS_FOLDER"], f"{task_id}.json"
    )
    try:
        with open(result_filepath, "w", encoding="utf-8") as f:
            json.dump(final_result, f, ensure_ascii=False, indent=4)
        app.logger.info(f"任务 {task_id} 的结果已保存到 {result_filepath}")
    except Exception as e:
        app.logger.error(f"保存任务 {task_id} 结果到文件时出错: {e}")

    # 从内存中移除已完成的任务
    with batch_tasks_lock:
        if task_id in batch_tasks:
            del batch_tasks[task_id]
    app.logger.info(f"后台任务 {task_id} 完成，并已从内存中移除。")


@app.route("/api/predict_batch", methods=["POST"])
@require_api_key
def api_predict_batch():
    json_data = request.get_json()
    if (
        not json_data
        or "urls" not in json_data
        or not isinstance(json_data["urls"], list)
    ):
        return (
            jsonify(
                {"success": False, "msg": "请求体必须包含一个'urls'列表", "data": None}
            ),
            400,
        )

    urls = json_data["urls"]
    if not urls:
        return (
            jsonify({"success": False, "msg": "'urls'列表不能为空", "data": None}),
            400,
        )

    task_id = f"batch_{uuid.uuid4().hex}"
    api_key = request.headers.get("X-API-KEY")
    system_name = app.config.get("API_KEYS", {}).get(api_key, "Unknown System")

    # 初始化任务状态
    with batch_tasks_lock:
        batch_tasks[task_id] = {"status": "processing", "results": []}

    # 启动后台线程
    host_url = request.host_url
    thread = threading.Thread(
        target=process_batch_urls_async, args=(task_id, urls, system_name, host_url)
    )
    thread.daemon = True  # 设置为守护线程，主程序退出时线程也退出
    thread.start()

    app.logger.info(
        f"来自 '{system_name}' 的批量任务已提交，任务ID: {task_id}，包含 {len(urls)} 个URL。"
    )

    return jsonify({"success": True, "msg": "任务已提交", "data": {"task_id": task_id}})


@app.route("/api/batch_result/<task_id>", methods=["GET"])
@require_api_key
def get_batch_result(task_id):
    # 1. 首先在内存中查找正在处理的任务
    with batch_tasks_lock:
        task_in_memory = batch_tasks.get(task_id)

    if task_in_memory:
        return jsonify({"success": True, "msg": "查询成功", "data": task_in_memory})

    # 2. 如果内存中没有，则从文件系统中查找已完成的任务
    result_filepath = os.path.join(
        app.config["BATCH_RESULTS_FOLDER"], f"{secure_filename(task_id)}.json"
    )

    if os.path.exists(result_filepath):
        try:
            with open(result_filepath, "r", encoding="utf-8") as f:
                task_from_file = json.load(f)
            return jsonify({"success": True, "msg": "查询成功", "data": task_from_file})
        except Exception as e:
            app.logger.error(f"读取任务 {task_id} 结果文件时出错: {e}")
            return (
                jsonify({"success": False, "msg": "读取结果文件失败", "data": None}),
                500,
            )

    # 3. 如果都找不到，则任务不存在
    return (
        jsonify({"success": False, "msg": "未找到指定的任务ID", "data": None}),
        404,
    )


if __name__ == "__main__":
    app.run(host="0.0.0.0", port=8000, debug=True, use_reloader=True)
