import boto3
import botocore
import hmac, hashlib, time, os, json
from flask import Flask, Response, redirect, render_template, request, jsonify, Blueprint, current_app, stream_with_context, session, g
import base64
import requests
import zipfile  # 确保导入
import io  # 确保导入
import humanize
import mimetypes
import csv
import threading
import uuid

from s3_web_browser.s3 import generate_presigned_url, parse_responses, upload_object, list_objects, delete_object, \
    rename_object, get_paginated_bucket_contents, get_paginated_bucket_contents_with_versions, \
    initiate_multipart_upload, generate_presigned_urls_for_parts, complete_multipart_upload, abort_multipart_upload, \
    upload_part_from_stream, stream_from_url_to_s3, get_object_info,  _stream_rtsp_to_s3, _stream_http_to_s3, \
    get_bucket_logging, put_bucket_logging, delete_bucket_logging
# from s3_web_browser.s3 import list_objects, parse_responses
from requests_aws4auth import AWS4Auth
import xml.etree.ElementTree as ET
from botocore.exceptions import NoCredentialsError, ClientError
from datetime import datetime
from urllib.parse import urlparse, quote, unquote   # 添加这行
from werkzeug.http import http_date
from functools import wraps

# =========================================================================
# 1. 创建一个蓝图，并设置统一的 URL 前缀
# =========================================================================
api_v1 = Blueprint('api_v1', __name__, url_prefix='/api/v1')

def register_routes(app: Flask) -> None:  # noqa:C901
    def s3_client_required(f):
        @wraps(f)
        def decorated_function(*args, **kwargs):
            if 'aws_access_key_id' not in session or 'aws_secret_access_key' not in session:
                return jsonify({"error": "用户未登录或会话已过期"}), 401

            try:
                # 使用会话中的密钥和 __init__.py 中的模板，动态创建S3客户端
                s3_client = boto3.client(
                    "s3",
                    aws_access_key_id=session['aws_access_key_id'],
                    aws_secret_access_key=session['aws_secret_access_key'],
                    **app.config["AWS_KWARGS_TEMPLATE"]
                )
                # 将客户端存入g对象，以便路由函数可以访问
                g.s3_client = s3_client
            except Exception as e:
                app.logger.error(f"动态创建S3客户端失败: {e}")
                return jsonify({"error": f"创建S3客户端失败: {e}"}), 500

            return f(*args, **kwargs)
        return decorated_function

    # ==================== 2. 新增：认证相关的API接口 ====================
    # 这些接口不需要登录，所以不加装饰器
    @app.route("/api/v1/auth/login", methods=["POST"])
    def login_api():
        data = request.get_json()
        access_key_id = data.get('access_key_id')
        secret_access_key = data.get('secret_access_key')

        if not access_key_id or not secret_access_key:
            return jsonify({"error": "Access Key ID 和 Secret Access Key 不能为空"}), 400

        try:
            temp_s3_client = boto3.client(
                "s3",
                aws_access_key_id=access_key_id,
                aws_secret_access_key=secret_access_key,
                **app.config["AWS_KWARGS_TEMPLATE"]
            )
            temp_s3_client.list_buckets()
        except ClientError as e:
            error_code = e.response['Error']['Code']
            if error_code in ['InvalidAccessKeyId', 'SignatureDoesNotMatch', 'AccessDenied']:
                app.logger.warning(f"登录尝试失败，凭证无效: {access_key_id}")
                return jsonify({"error": "凭证无效，请检查您的Access Key ID和Secret Access Key"}), 401
            app.logger.error(f"登录时连接存储服务失败: {error_code}")
            return jsonify({"error": f"连接存储服务失败: {error_code}"}), 500
        except Exception as e:
            app.logger.error(f"登录时发生未知错误: {e}")
            return jsonify({"error": "登录失败，发生未知服务器错误"}), 500

        session['aws_access_key_id'] = access_key_id
        session['aws_secret_access_key'] = secret_access_key
        session.permanent = True 
        
        app.logger.info(f"用户 {access_key_id} 登录成功")
        return jsonify({
            "message": "登录成功",
            "user": {"name": access_key_id}
        })

    @app.route("/api/v1/auth/logout", methods=["POST"])
    def logout_api():
        user_id = session.get('aws_access_key_id', '未知用户')
        session.clear()
        app.logger.info(f"用户 {user_id} 已登出")
        return jsonify({"message": "已成功登出"}), 200

    @app.route("/api/v1/auth/status", methods=["GET"])
    def auth_status_api():
        if 'aws_access_key_id' in session:
            return jsonify({
                "isLoggedIn": True,
                "user": {"name": session['aws_access_key_id']}
            })
        else:
            return jsonify({"isLoggedIn": False, "user": None})
            
    # =========================================================================
    # == 这是 routes.py 中 get_buckets_api 的最终正确版本，请仔细核对 ==
    # =========================================================================
    @app.route("/api/v1/buckets", methods=["GET"])
    @s3_client_required
    def get_buckets_api():
        app.logger.info("真实 API 被调用！正在从联通云获取存储桶列表...")

        # --- 【修改点 1】: 从请求中获取分页参数 ---
        try:
            page = request.args.get("page", 1, type=int)
            limit = request.args.get("limit", 10, type=int)
        except (ValueError, TypeError):
            page = 1
            limit = 10
            app.logger.warning("无效的分页参数，已重置为默认值 page=1, limit=10")

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            response = s3_client.list_buckets()

            print("="*20, "s3_client.list_buckets() 的原始响应", "="*20)
            print(response)
            print("="*60)

            bucket_list_detailed = []

            if 'Buckets' in response:
                
                # --- 【修改点 2】: 获取完整列表，并进行内存分页 ---
                all_buckets_from_s3 = response['Buckets']
                total_items = len(all_buckets_from_s3) # 获取总数

                # 计算切片索引
                start_index = (page - 1) * limit
                end_index = start_index + limit
                
                # 获取当前页的 buckets
                paginated_buckets = all_buckets_from_s3[start_index:end_index]

                # --- 【修改点 3】: 将 for 循环的目标改为分页后的列表 ---
                for bucket in paginated_buckets:
                    bucket_name = bucket['Name']
                    
                    # 1. 设置初始占位符 (此部分逻辑完全不变)
                    bucket_info = {
                        "name": bucket_name,
                        "creation_date": bucket['CreationDate'].isoformat(),
                        "region": "查询中...", 
                        "storage_class": "标准存储",
                        "redundancy_type": "同城冗余",
                        "size": "数据准备中",
                        "versioning": "查询中..."
                    }
                    
                    # 2. 获取真实地域信息 (此部分逻辑完全不变)
                    try:
                        loc_response = s3_client.get_bucket_location(Bucket=bucket_name)
                        region = loc_response.get('LocationConstraint')
                        if not region:
                            endpoint_host = app.config["AWS_KWARGS"]["endpoint_url"].split("//")[-1]
                            region = endpoint_host.split('.')[0]
                        bucket_info['region'] = region
                    except ClientError as loc_error:
                        app.logger.warning(f"无法获取桶 {bucket_name} 的地域信息: {loc_error}")
                        bucket_info['region'] = "获取失败"

                    # 3. 获取真实版本控制状态 (此部分逻辑也已正确，完全不变)
                    try:
                        ver_response = s3_client.get_bucket_versioning(Bucket=bucket_name)
                        status = ver_response.get('Status', 'Disabled')
                        if status == 'Enabled':
                            bucket_info['versioning'] = '已开启'
                        elif status == 'Suspended':
                            bucket_info['versioning'] = '已暂停'
                        else:
                            bucket_info['versioning'] = '未开启'
                    except ClientError as ver_error:
                        app.logger.warning(f"无法获取桶 {bucket_name} 的版本控制信息: {ver_error}")
                        bucket_info['versioning'] = "获取失败"

                    # 4. 计算存储桶大小 (此部分逻辑完全不变)
                    # 注意：这个计算可能会很慢。对于分页来说，每次只计算当前页的10个桶，性能会比之前好。
                    try:
                        total_size_bytes = 0
                        paginator = s3_client.get_paginator('list_objects_v2')
                        for p in paginator.paginate(Bucket=bucket_name):
                            if 'Contents' in p:
                                total_size_bytes += sum(item['Size'] for item in p['Contents'])
                        
                        bucket_info['size'] = humanize.naturalsize(total_size_bytes, binary=True, format='%.1f')
                    except ClientError:
                        bucket_info['size'] = "计算失败(权限?)"

                    bucket_list_detailed.append(bucket_info)
            
            # --- 【修改点 4】: 构造正确的返回数据 ---
            response_data = {
                "items": bucket_list_detailed, # items 是当前页的数据
                "total": total_items           # total 是所有桶的总数
            }

            # (您的打印日志逻辑保持不变)
            import json
            print("="*20, "后端准备返回的数据", "="*20)
            print(json.dumps(response_data, indent=2, ensure_ascii=False)) 
            print("="*50)

            return jsonify(response_data)

        # (您的异常处理逻辑保持不变)
        except (NoCredentialsError, ClientError) as e:
            app.logger.error(f"连接 S3 服务时发生错误: {e}")
            return jsonify({"error": f"无法连接存储服务，请检查后端配置或网络：{e}"}), 500
        except Exception as e:
            app.logger.error(f"获取存储桶列表时发生未知错误: {e}")
            return jsonify({"error": "获取存储桶列表失败，发生未知错误"}), 500


    # 在 routes.py 中，升级 create_bucket_api
    @app.route("/api/v1/buckets", methods=["POST"])
    @s3_client_required
    def create_bucket_api():
        """
        API 接口：创建一个新的存储桶，并根据需要设置ACL和版本控制。
        """
        # 1. 从请求中获取 JSON 数据
        data = request.get_json()
        if not data:
            return jsonify({"error": "请求体不能为空且必须是 JSON 格式"}), 400

        # 2. 获取所有前端可能传来的参数
        bucket_name = data.get('bucket_name')
        region = data.get('region')
        acl = data.get('acl', 'private')  # 如果前端没传，默认为 'private'
        versioning_enabled = data.get('versioning', False) # 如果前端没传，默认为 False

        # 3. 验证必要参数
        if not bucket_name:
            return jsonify({"error": "缺少 'bucket_name' 参数"}), 400
        if not region:
            # 对于联通云这种通过 endpoint 确定大区域的，这个参数是必须的
            return jsonify({"error": "缺少 'region' 参数"}), 400

        app.logger.info(
            f"收到创建存储桶请求: Name={bucket_name}, Region={region}, ACL={acl}, Versioning={versioning_enabled}"
        )
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            # 4. 【第一步】创建存储桶
            # 准备 create_bucket 的参数
            create_kwargs = {
                'Bucket': bucket_name,
                # ACL 将在创建后通过 put_bucket_acl 单独设置，这样更灵活且兼容性更好
            }
            
            # 根据 S3 协议，对于非 us-east-1 的地域，必须在 CreateBucketConfiguration 中指定 LocationConstraint
            # 您的前端固定传递 "fujian-zhihcs"，所以这里总是需要设置
            create_kwargs['CreateBucketConfiguration'] = {
                'LocationConstraint': region
            }
            
            app.logger.info(f"正在执行 s3_client.create_bucket，参数: {create_kwargs}")
            s3_client.create_bucket(**create_kwargs)
            app.logger.info(f"存储桶 '{bucket_name}' 创建成功。")

            # 5. 【第二步】设置存储桶的 ACL (访问控制列表)
            # 这个操作应该在桶创建成功后立刻执行
            app.logger.info(f"正在为 '{bucket_name}' 设置 ACL 为 '{acl}'...")
            s3_client.put_bucket_acl(Bucket=bucket_name, ACL=acl)
            app.logger.info(f"为 '{bucket_name}' 设置 ACL 成功。")

            # 6. 【第三步】根据前端的开关，设置版本控制
            if versioning_enabled:
                app.logger.info(f"正在为 '{bucket_name}' 开启版本控制...")
                s3_client.put_bucket_versioning(
                    Bucket=bucket_name,
                    VersioningConfiguration={
                        'Status': 'Enabled'  # 'Enabled' 表示开启，'Suspended' 表示暂停
                    }
                )
                app.logger.info(f"为 '{bucket_name}' 开启版本控制成功。")
            else:
                # 如果开关是关闭的，可以显式地确保它是暂停状态（对于已存在的桶）
                # 对于新桶，默认就是关闭的，但显式调用更稳妥
                app.logger.info(f"为 '{bucket_name}' 确认版本控制为关闭状态。")
                # 注意：Boto3 要求即使是关闭也要提供完整的结构
                s3_client.put_bucket_versioning(
                    Bucket=bucket_name,
                    VersioningConfiguration={
                        'Status': 'Suspended'
                    }
                )


            # 7. 返回最终的成功响应
            return jsonify({"message": f"存储桶 '{bucket_name}' 创建成功并完成配置"}), 201

        except ClientError as e:
            # 异常处理部分保持不变，它已经很好了
            error_code = e.response["Error"]["Code"]
            error_message = e.response["Error"]["Message"]
            app.logger.error(f"创建或配置桶 '{bucket_name}' 时失败: {error_code} - {error_message}")
            return jsonify({"error": f"操作失败: {error_message}"}), 409 if error_code == 'BucketAlreadyOwnedByYou' else 500
        except Exception as e:
            app.logger.error(f"创建桶 '{bucket_name}' 时发生未知错误: {e}")
            return jsonify({"error": "服务器内部错误"}), 500

    @app.route("/api/v1/buckets/<bucket_name>/size", methods=["GET"])
    @s3_client_required
    def get_bucket_size_api(bucket_name):
        """
        API 接口：计算并返回单个存储桶的总大小和对象数量。
        警告：对于有大量对象的桶，此操作可能非常缓慢。
        """
        app.logger.info(f"开始计算存储桶 '{bucket_name}' 的大小...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        total_size_bytes = 0
        object_count = 0
        
        try:
            paginator = s3_client.get_paginator('list_objects_v2')
            # 遍历存储桶内的所有对象页面
            for page in paginator.paginate(Bucket=bucket_name):
                if 'Contents' in page:
                    # 累加当前页所有对象的大小
                    total_size_bytes += sum(item['Size'] for item in page['Contents'])
                    object_count += len(page['Contents'])

            # 使用 humanize 库格式化大小
            human_readable_size = humanize.naturalsize(total_size_bytes, binary=True) # binary=True 使用 KiB, MiB
            
            app.logger.info(f"'{bucket_name}' 大小计算完成: {human_readable_size}, 对象数量: {object_count}")
            
            return jsonify({
                "bucket_name": bucket_name,
                "size_bytes": total_size_bytes,
                "size_human": human_readable_size,
                "object_count": object_count
            })

        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            app.logger.error(f"计算桶 '{bucket_name}' 大小时出错 (权限不足?): {e}")
            return jsonify({"error": f"计算失败: {error_code}"}), 500
        except Exception as e:
            app.logger.error(f"计算桶 '{bucket_name}' 大小时发生未知错误: {e}")
            return jsonify({"error": "计算时发生未知错误"}), 500
    

    # 在 routes.py 中添加这个新的 API 路由

    @app.route("/api/v1/buckets/<bucket_name>/versioning", methods=["PUT"])
    @s3_client_required
    def update_bucket_versioning_api(bucket_name):
        """
        API 接口：开启或暂停指定存储桶的版本控制。
        """
        # 1. 从请求体中获取新的状态
        data = request.get_json()
        if not data or 'status' not in data:
            return jsonify({"error": "请求体中必须包含 'status' 字段"}), 400

        new_status = data.get('status')
        if new_status not in ['Enabled', 'Suspended']:
            return jsonify({"error": "状态值只能是 'Enabled' 或 'Suspended'"}), 400

        app.logger.info(f"收到更新 {bucket_name} 版本控制的请求，新状态: {new_status}")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            # 2. 调用 Boto3 的 put_bucket_versioning
            s3_client.put_bucket_versioning(
                Bucket=bucket_name,
                VersioningConfiguration={
                    'Status': new_status
                }
            )
            
            # 3. 返回成功响应
            action = "开启" if new_status == 'Enabled' else "暂停"
            return jsonify({"message": f"存储桶 '{bucket_name}' 的版本控制已成功{action}"}), 200

        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            error_message = e.response["Error"]["Message"]
            app.logger.error(f"更新 {bucket_name} 版本控制失败: {error_code} - {error_message}")
            return jsonify({"error": f"操作失败: {error_message}"}), 500
        except Exception as e:
            app.logger.error(f"更新 {bucket_name} 版本控制时发生未知错误: {e}")
            return jsonify({"error": "服务器内部错误"}), 500
    
    # 在 routes.py 中的这段代码保持不变，它已经很完美了
    @app.route("/api/v1/buckets/<bucket_name>", methods=["DELETE"])
    @s3_client_required
    def delete_bucket_api(bucket_name):
        """
        API 接口：删除一个存储桶。
        前提条件：存储桶必须为空。
        """
        app.logger.info(f"收到删除存储桶 '{bucket_name}' 的请求...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        try:
            # 1. 检查存储桶是否为空
            response = s3_client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
            if 'Contents' in response and response['Contents']:
                app.logger.warning(f"删除失败：存储桶 '{bucket_name}' 不为空。")
                return jsonify({"error": "存储桶不为空，无法删除。请先清空存储桶内的所有对象。"}), 409

            # 2. 如果为空，则执行删除操作
            s3_client.delete_bucket(Bucket=bucket_name)
            app.logger.info(f"存储桶 '{bucket_name}' 已成功删除。")
            
            return jsonify({"message": f"存储桶 '{bucket_name}' 删除成功"}), 200

        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            app.logger.error(f"删除存储桶 '{bucket_name}' 时出错 (ClientError): {e}")
            if error_code == 'NoSuchBucket':
                return jsonify({"error": "存储桶不存在"}), 404
            else:
                return jsonify({"error": f"删除失败: {error_code}"}), 500
        except Exception as e:
            app.logger.error(f"删除存储桶 '{bucket_name}' 时发生未知错误: {e}")
            return jsonify({"error": "服务器内部错误"}), 500
    
    @app.route("/api/v1/preview", methods=['GET']) # <--- 加上 /api/v1 前缀
    @s3_client_required
    def preview_api(): # <--- 重命名以示区分
        # 从查询参数中获取 bucket 和 path
        bucket_name = request.args.get('bucket_name')
        path = request.args.get('path')

        if not bucket_name or not path:
            return "Missing bucket_name or path", 400

        app.logger.info(f"API v2.0: Attempting to preview: Bucket={bucket_name}, Path={path}")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        try:
            # 使用 get_object 获取文件
            obj = s3_client.get_object(Bucket=bucket_name, Key=path)
            data = obj['Body'].read()
            
            # 【关键】从S3的元数据中获取真实的 ContentType
            # 而不是用 request.args.get()
            content_type = obj.get('ContentType', 'application/octet-stream')
            
            app.logger.info(f"Successfully got object. Content-Type: {content_type}, Size: {len(data)}")
            
            # 直接返回 Response 对象
            return Response(data, content_type=content_type)

        except ClientError as e:
            error_code = e.response.get("Error", {}).get("Code")
            app.logger.error(f"ClientError during preview: {error_code} - {e}")
            # 返回文本错误信息和正确的状态码
            if error_code == 'AccessDenied':
                return "无权限预览此文件。", 403
            elif error_code == 'NoSuchKey' or error_code == 'NotFound':
                return "文件不存在。", 404
            else:
                return f"预览失败：{error_code}", 500
        except Exception as e:
            app.logger.error(f"General error during preview: {e}")
            return "预览失败，发生未知错误。", 500

    @app.route("/api/v1/buckets/<bucket_name>/details", methods=["GET"])
    @s3_client_required
    def get_bucket_details_api(bucket_name):
        """
        API 接口：获取单个存储桶的详细信息，包括版本控制状态。
        """
        app.logger.info(f"正在获取存储桶 '{bucket_name}' 的详细信息...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            # 1. 获取地域信息
            try:
                loc_response = s3_client.get_bucket_location(Bucket=bucket_name)
                region = loc_response.get('LocationConstraint')
                if not region: # 某些区域（如 us-east-1）可能返回 null
                    endpoint_host = app.config["AWS_KWARGS"]["endpoint_url"].split("//")[-1]
                    region = endpoint_host.split('.')[0]
            except ClientError as loc_error:
                app.logger.warning(f"无法获取桶 {bucket_name} 的地域信息: {loc_error}")
                region = "获取失败"
                
            # 2. 获取版本控制状态 (核心)
            try:
                ver_response = s3_client.get_bucket_versioning(Bucket=bucket_name)
                # 'Status' 字段可能不存在，如果不存在，则版本控制未开启
                status = ver_response.get('Status', 'Disabled') 
                if status == 'Enabled':
                    versioning_status = '已开启'
                elif status == 'Suspended':
                    versioning_status = '已暂停'
                else:
                    versioning_status = '未开启'
            except ClientError as ver_error:
                # 【重要】当一个桶从未开启过版本控制时，get_bucket_versioning 会报错
                # 我们需要捕获这个特定的情况
                if ver_error.response['Error']['Code'] in ['NoSuchVersioningConfiguration', 'VersioningNotEnabled']: # 根据不同S3实现，错误码可能不同
                    versioning_status = '未开启'
                else:
                    app.logger.warning(f"无法获取桶 {bucket_name} 的版本控制信息: {ver_error}")
                    versioning_status = "获取失败"

            # 3. 组装返回数据
            bucket_details = {
                "name": bucket_name,
                "region": region,
                "versioning": versioning_status,
                # 未来可以添加其他信息，如ACLs, Policy等
            }

            app.logger.info(f"成功获取桶详情: {bucket_details}")
            return jsonify(bucket_details)

        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            app.logger.error(f"获取桶 '{bucket_name}' 详情时出错: {error_code}")
            if error_code == 'NoSuchBucket':
                return jsonify({"error": "存储桶不存在"}), 404
            else:
                return jsonify({"error": f"获取详情失败: {error_code}"}), 500
        except Exception as e:
            app.logger.error(f"获取桶 '{bucket_name}' 详情时发生未知错误: {e}")
            return jsonify({"error": "服务器内部错误"}), 500

    @app.route("/api/v1/buckets/<bucket_name>", defaults={"path": ""}, methods=["GET"])
    @app.route("/api/v1/buckets/<bucket_name>/", defaults={"path": ""}, methods=["GET"])  # <--- 添加这一行
    @app.route("/api/v1/buckets/<bucket_name>/<path:path>", methods=["GET"])
    @s3_client_required
    def get_bucket_contents_api(bucket_name: str, path: str):
        try:
            page = request.args.get("page", 1, type=int)
            items_per_page = request.args.get("limit", 50, type=int)
            search_param = request.args.get("search", "")
            # 【新增】获取 'versions' 参数，如果值为 'true' 则为 True
            show_versions = request.args.get("versions", "false").lower() == "true"

            # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
            s3_client = g.s3_client

            if show_versions:
                # 如果需要显示版本，调用新函数
                data = get_paginated_bucket_contents_with_versions(
                    s3_client, bucket_name, path, search_param, page, items_per_page
                )
                # 对于版本数据，它已经是字典了，不需要转换
                response_items = data["items"]
            else:
                # 否则，使用旧的逻辑
                data = get_paginated_bucket_contents(
                    s3_client, bucket_name, path, search_param, page, items_per_page
                )
                # 将 S3Entry 对象转换为字典
                response_items = [item.__dict__ for item in data["items"]]

            return jsonify({
                "items": response_items,
                "total": data["total"],
                "path": path,
                "bucket_name": bucket_name
            })

        except botocore.exceptions.ClientError as e:
            error_code = e.response["Error"]["Code"]
            app.logger.error(f"API Error fetching bucket contents for {bucket_name}/{path}: {e}")
            return jsonify({"error": f"获取文件列表失败: {error_code}"}), 500
        except Exception as e:
            app.logger.error(f"API Generic Error fetching bucket contents for {bucket_name}/{path}: {e}")
            return jsonify({"error": f"服务器内部错误"}), 500

    @app.route("/api/v1/buckets/<bucket_name>/objects/details", methods=["GET"])
    @s3_client_required
    def get_object_details_api(bucket_name):
        key = request.args.get('key')
        expiration = request.args.get('expiration', 3600, type=int)

        if not key:
            return jsonify({"error": "缺少 'key' 参数"}), 400

        app.logger.info(f"正在获取对象详情: Bucket='{bucket_name}', Key='{key}'")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            response = s3_client.head_object(Bucket=bucket_name, Key=key)
            
            acl_response = s3_client.get_bucket_acl(Bucket=bucket_name)
            grants = acl_response.get('Grants', [])
            bucket_acl = 'private'
            for grant in grants:
                grantee = grant.get('Grantee', {})
                if grantee.get('URI') == 'http://acs.amazonaws.com/groups/global/AllUsers':
                    permission = grant.get('Permission')
                    if permission in ['FULL_CONTROL', 'WRITE']:
                        bucket_acl = 'public-read-write'
                        break
                    elif permission == 'READ':
                        bucket_acl = 'public-read'
            
            object_acl_response = s3_client.get_object_acl(Bucket=bucket_name, Key=key)
            object_grants = object_acl_response.get('Grants', [])
            object_acl_string = 'private'
            for grant in object_grants:
                grantee = grant.get('Grantee', {})
                permission = grant.get('Permission')
                uri = grantee.get('URI', '')
                if 'AllUsers' in uri:
                    if permission in ['FULL_CONTROL', 'WRITE']:
                        object_acl_string = 'public-read-write'
                        break
                    elif permission == 'READ':
                        object_acl_string = 'public-read'
                elif 'AuthenticatedUsers' in uri:
                    if object_acl_string not in ['public-read-write', 'public-read']:
                        object_acl_string = 'authenticated-read'

            access_url = ""
            if bucket_acl in ['public-read', 'public-read-write']:
                endpoint_url = app.config["AWS_KWARGS"].get("endpoint_url")
                if endpoint_url:
                    parsed_endpoint = urlparse(endpoint_url)
                    scheme = "https"
                    hostname = parsed_endpoint.hostname
                    encoded_key = quote(key)
                    access_url = f"{scheme}://{bucket_name}.{hostname}/{encoded_key}"
                else:
                    access_url = "无法生成公网URL (未配置Endpoint)"
            else:
                access_url = generate_presigned_url(s3_client, bucket_name, key, expiration=expiration)

            details = {
                "name": key,
                "base_name": os.path.basename(key),
                "size_bytes": response.get('ContentLength'),
                "size_human": humanize.naturalsize(response.get('ContentLength'), binary=True) if response.get('ContentLength') is not None else 'N/A',
                "etag": response.get('ETag', '').strip('"'),
                "last_modified": response.get('LastModified').isoformat() if response.get('LastModified') else None,
                "content_type": response.get('ContentType', 'application/octet-stream'),
                "metadata": response.get('Metadata', {}),
                "version_id": response.get('VersionId'),
                "bucket_acl": bucket_acl,
                "access_url": access_url,
                "object_acl": object_acl_string, 
                "storage_class": response.get('StorageClass'),
                "http_headers": {
                    "ContentEncoding": response.get('ContentEncoding'),
                    "ContentLanguage": response.get('ContentLanguage'),
                    "ContentDisposition": response.get('ContentDisposition'),
                    "CacheControl": response.get('CacheControl'),
                    "Expires": response.get('Expires').isoformat() if response.get('Expires') else None,
                }
            }

            return jsonify(details)

        except ClientError as e:
            error_code = e.response.get("Error", {}).get("Code")
            if error_code in ['NoSuchKey', 'NotFound', '404']:
                app.logger.debug(f"轮询或查找：对象 {key} 不存在，返回 404。")
                return jsonify({"error": "Object not found"}), 404
            else:
                app.logger.error(f"获取对象详情时发生S3错误 (key: {key}): {error_code} - {e}")
                return jsonify({"error": f"获取详情失败: {error_code}"}), 500
                
        except Exception as e:
            app.logger.error(f"获取对象详情时发生未知错误 (key: {key}): {e}", exc_info=True)
            return jsonify({"error": "服务器内部发生未知错误"}), 500

    # =================================================================================
    # ==      【最终类型修复版】设置对象元数据                                       ==
    # =================================================================================
    @app.route("/api/v1/buckets/<bucket_name>/objects/metadata", methods=["PUT"])
    @s3_client_required
    def set_object_metadata_api(bucket_name):
        """
        API 接口：修改一个已存在对象的元数据。
        采用适应性策略，通过发送空值来明确删除一个自定义元数据键，以兼容非标准S3实现。
        """
        app.logger.info("--- [DEBUG] Running Final Type-Fix Version v5 ---") 
        data = request.get_json()
        key = data.get('key')
        new_headers_from_frontend = data.get('headers', {})
        new_metadata_from_frontend = data.get('metadata', {})

        if not key:
            return jsonify({"error": "请求中缺少 'key' 参数"}), 400

        app.logger.info(f"收到对 '{key}' 的元数据更新请求: headers={new_headers_from_frontend}, metadata={new_metadata_from_frontend}")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            current_object_info = s3_client.head_object(Bucket=bucket_name, Key=key)
            
            # 1. 从当前对象信息开始构建最终参数
            final_args_to_send = {
                'ContentType': current_object_info.get('ContentType'),
                'ContentLanguage': current_object_info.get('ContentLanguage'),
                'ContentEncoding': current_object_info.get('ContentEncoding'),
                'ContentDisposition': current_object_info.get('ContentDisposition'),
                'CacheControl': current_object_info.get('CacheControl'),
                'Expires': current_object_info.get('Expires'),
                'Metadata': current_object_info.get('Metadata', {}),
            }

            # 2. 合并前端发来的标准头修改
            if 'ContentType' in new_headers_from_frontend: final_args_to_send['ContentType'] = new_headers_from_frontend['ContentType']
            if 'ContentLanguage' in new_headers_from_frontend: final_args_to_send['ContentLanguage'] = new_headers_from_frontend['ContentLanguage']
            if 'ContentEncoding' in new_headers_from_frontend: final_args_to_send['ContentEncoding'] = new_headers_from_frontend['ContentEncoding']
            if 'ContentDisposition' in new_headers_from_frontend: final_args_to_send['ContentDisposition'] = new_headers_from_frontend['ContentDisposition']
            if 'CacheControl' in new_headers_from_frontend: final_args_to_send['CacheControl'] = new_headers_from_frontend['CacheControl']
            if 'Expires' in new_headers_from_frontend: final_args_to_send['Expires'] = new_headers_from_frontend['Expires']

            # 3. 构建最终的自定义元数据 (空值删除策略)
            final_metadata = new_metadata_from_frontend.copy()
            current_metadata_on_server = current_object_info.get('Metadata', {})
            for old_key in current_metadata_on_server:
                if old_key.lower() not in [k.lower() for k in final_metadata.keys()]:
                    final_metadata[old_key] = ''
                    app.logger.info(f"检测到元数据 '{old_key}' 已被删除，准备发送空值以清空。")
            final_args_to_send['Metadata'] = final_metadata

            # ======================= 【核心修复在这里】 =======================
            # 在传递给Boto3之前，移除所有值为 None 的键
            final_boto_params = {k: v for k, v in final_args_to_send.items() if v is not None}
            # ===============================================================
            
            app.logger.info(f"即将使用【最终参数】调用 copy_object: {final_boto_params}")
            
            # 4. 执行写入
            s3_client.copy_object(
                CopySource={'Bucket': bucket_name, 'Key': key},
                Bucket=bucket_name,
                Key=key,
                MetadataDirective='REPLACE',
                **final_boto_params # 使用清理过的参数
            )

            return jsonify({"message": f"文件 '{os.path.basename(key)}' 的元数据已成功更新"}), 200

        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            app.logger.error(f"设置元数据时发生未知错误 (key: {key}): {error_code} - {e}")
            return jsonify({"error": f"操作失败: {error_code}"}), 500
        except Exception as e:
            app.logger.error(f"设置元数据时发生未知错误: {e}")
            return jsonify({"error": "服务器内部发生未知错误"}), 500
    
    # =================================================================================
    # ==      【新增】批量设置对象元数据 API                                         ==
    # =================================================================================
    @app.route("/api/v1/buckets/<bucket_name>/objects/batch-metadata", methods=["PUT"])
    @s3_client_required
    def batch_set_object_metadata_api(bucket_name):
        """
        API 接口：批量修改多个对象的元数据 (仅限标准HTTP头)。
        """
        data = request.get_json()
        keys = data.get('keys', [])
        headers_to_update = data.get('headers', {})

        if not keys:
            return jsonify({"error": "请求中缺少 'keys' (文件列表)"}), 400
        if not headers_to_update:
            return jsonify({"error": "没有提供任何要更新的元数据"}), 400

        app.logger.info(f"收到对 {len(keys)} 个对象的批量元数据更新请求: {headers_to_update}")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        success_count = 0
        errors = []

        # 循环处理每个文件
        for key in keys:
            try:
                # 同样遵循 Read-Merge-Write 模式，保证健壮性
                # 1. 读取
                current_object_info = s3_client.head_object(Bucket=bucket_name, Key=key)
                
                # 2. 合并
                final_args = {
                    'ContentType': current_object_info.get('ContentType'),
                    'ContentLanguage': current_object_info.get('ContentLanguage'),
                    'ContentEncoding': current_object_info.get('ContentEncoding'),
                    'ContentDisposition': current_object_info.get('ContentDisposition'),
                    'CacheControl': current_object_info.get('CacheControl'),
                    'Expires': current_object_info.get('Expires'),
                    # 【注意】我们保留了原有的自定义元数据，因为批量操作不修改它
                    'Metadata': current_object_info.get('Metadata', {}),
                }
                # 用新的标准头覆盖
                final_args.update(headers_to_update)

                # 3. 清理并写入
                final_boto_params = {k: v for k, v in final_args.items() if v is not None}
                s3_client.copy_object(
                    CopySource={'Bucket': bucket_name, 'Key': key},
                    Bucket=bucket_name,
                    Key=key,
                    MetadataDirective='REPLACE',
                    **final_boto_params
                )
                success_count += 1
            except ClientError as e:
                error_message = e.response["Error"]["Code"]
                app.logger.error(f"批量更新元数据时，处理 '{key}' 失败: {error_message}")
                errors.append({"key": key, "error": error_message})
            except Exception as e:
                app.logger.error(f"批量更新元数据时，处理 '{key}' 发生未知错误: {e}")
                errors.append({"key": key, "error": "未知服务器错误"})

        if not errors:
            return jsonify({"message": f"成功更新了 {success_count} 个文件的元数据"}), 200
        else:
            return jsonify({
                "message": f"操作完成，{success_count} 个成功，{len(errors)} 个失败。",
                "errors": errors
            }), 207 # 207 Multi-Status
        
    @app.route("/api/v1/upload/buckets/<bucket_name>", methods=["POST"])
    @s3_client_required
    def upload_file_api_unified(bucket_name: str):
        """
        一个统一的文件上传API，它信任并使用前端提供的完整 'key'，
        并支持可选的 StorageClass 参数。
        """
        # 1. 检查文件部分 (逻辑不变)
        if 'file' not in request.files:
            return jsonify({"error": "请求中未找到 'file' 部分"}), 400
        
        uploaded_file = request.files['file']
        if uploaded_file.filename == '':
            return jsonify({"error": "未选择文件"}), 400

        # 2. 从表单获取 S3 Key 和【新的】Storage Class (逻辑微调)
        s3_key = request.form.get('key')
        if not s3_key:
            return jsonify({"error": "请求中缺少 'key' (文件存储路径)"}), 400
        
        # 因为我们是单一地域，所以直接从 app.config 创建客户端
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            # 3. 构建 ExtraArgs (逻辑微调)
            content_type, _ = mimetypes.guess_type(s3_key)
            if content_type is None:
                content_type = 'application/octet-stream'
            
            extra_args = {'ContentType': content_type}
            acl = request.form.get('acl')

            # 2. 如果获取到了 acl (即前端传了 'public-read' 等值)
            if acl:
                # 3. 将它添加到 ExtraArgs 字典中
                extra_args['ACL'] = acl
                # (可选) 添加日志，方便调试
                current_app.logger.info(f"Uploading '{s3_key}' with ACL set to: '{acl}'")
            app.logger.debug(f"Uploading with ExtraArgs: {extra_args}") # 增加调试日志

            # 4. 执行上传 (逻辑不变)
            s3_client.upload_fileobj(
                uploaded_file,
                bucket_name,
                s3_key,
                ExtraArgs=extra_args
            )
            return jsonify({"message": f"文件 '{s3_key}' 上传成功."}), 201

        except ClientError as e:
            app.logger.error(f"API Upload failed: {e}")
            return jsonify({"error": str(e)}), 500
        except Exception as e:
            app.logger.error(f"API Upload unexpected error: {e}")
            return jsonify({"error": "服务器发生未知错误"}), 500
        
    @app.route("/api/v1/create-folder/buckets/<bucket_name>/", defaults={'path': ''}, methods=["POST"])
    @app.route("/api/v1/create-folder/buckets/<bucket_name>/<path:path>", methods=["POST"])
    @s3_client_required
    def create_folder(bucket_name, path):
        folder_name = request.form.get("folder_name", "").strip()
        if not folder_name or '/' in folder_name:
            # 正确：返回 JSON 错误信息和 400 状态码
            return jsonify({"error": "文件夹名不能为空且不能包含'/'"}), 400
        
        # 拼接完整 key
        prefix = f"{path.rstrip('/')}/" if path else ""
        folder_key = f"{prefix}{folder_name.strip('/')}/"
        
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        try:
            s3_client.put_object(Bucket=bucket_name, Key=folder_key, Body=b"")
            # 正确：返回成功的 JSON 信息和 201 状态码 (201 Created)
            return jsonify({"message": "目录创建成功", "key": folder_key}), 201

        except ClientError as e:
            error_code = e.response.get("Error", {}).get("Code")
            # 正确：返回 JSON 错误信息和 500 状态码
            return jsonify({"error": f"创建文件夹失败: {error_code}"}), 500
        
    # 在 routes.py 文件中，找到其他 API 路由的位置，添加以下新路由

    # ======================================================================
    # ==      【接口A】普通批量删除 (创建删除标记)      ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/delete-items", methods=["POST"])
    @s3_client_required
    def delete_items_api(bucket_name):
        """
        API 接口：普通批量删除文件和文件夹（创建删除标记）。
        【终极健壮版】采用逐个删除，以保证对所有S3兼容存储的最佳兼容性。
        """
        data = request.get_json()
        items_to_delete = data.get('items', [])
        if not items_to_delete: 
            return jsonify({"error": "缺少 'items' 列表"}), 400

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        deleted_items = []
        errors = []

        # 展开文件夹
        all_keys_to_process = set()
        for item_key in items_to_delete:
            if item_key.endswith('/'):
                paginator = s3_client.get_paginator('list_objects_v2')
                try:
                    for page in paginator.paginate(Bucket=bucket_name, Prefix=item_key):
                        if "Contents" in page:
                            for obj in page["Contents"]: 
                                all_keys_to_process.add(obj['Key'])
                    all_keys_to_process.add(item_key)
                except ClientError as e:
                    errors.append({"key": item_key, "error": f"列出文件夹内容失败: {e}"})
            else:
                all_keys_to_process.add(item_key)
        
        # --- 【核心修复】从批量API改为逐个API调用 ---
        for key in all_keys_to_process:
            try:
                s3_client.delete_object(Bucket=bucket_name, Key=key)
                deleted_items.append(key)
            except ClientError as e:
                errors.append({"key": key, "error": str(e)})

        # --- 统一的错误处理和返回 ---
        if not errors:
            return jsonify({"message": "所有选定项目已成功删除"}), 200
        else:
            return jsonify({"message": "操作完成，但有部分项目处理失败", "errors": errors}), 207
        
    # ======================================================================
    # ==      【接口B】彻底批量删除 (删除所有版本)      ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/delete-items-permanently", methods=["POST"])
    @s3_client_required
    def delete_items_permanently_api(bucket_name):
        """
        API 接口：【终极版】彻底批量删除文件和文件夹。
        它会删除所有历史版本、删除标记，以及文件夹本身的占位符对象。
        """
        data = request.get_json()
        items_to_delete = data.get('items', [])
        if not items_to_delete: 
            return jsonify({"error": "缺少 'items' 列表"}), 400

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        # --- 【核心修复：新的、更全面的对象收集逻辑】 ---
        objects_to_delete_permanently = []
        try:
            versions_paginator = s3_client.get_paginator('list_object_versions')
            
            # 1. 遍历用户勾选的每一个项目
            for item_key in items_to_delete:
                # 2. 对每个项目（无论是文件还是文件夹），都去获取其名下所有的版本和删除标记
                for page in versions_paginator.paginate(Bucket=bucket_name, Prefix=item_key):
                    # a. 收集所有版本
                    if 'Versions' in page:
                        for v in page['Versions']:
                            # Prefix匹配会匹配到 'folder/' 和 'folder/file.txt'
                            # 这里的逻辑是正确的，因为我们要删除所有这些
                            objects_to_delete_permanently.append({'Key': v['Key'], 'VersionId': v['VersionId']})
                    
                    # b. 收集所有删除标记
                    if 'DeleteMarkers' in page:
                        for dm in page['DeleteMarkers']:
                            objects_to_delete_permanently.append({'Key': dm['Key'], 'VersionId': dm['VersionId']})
            
            if not objects_to_delete_permanently:
                return jsonify({"message": "没有找到可彻底删除的版本"}), 200

            # 3. 去重并执行批量彻底删除
            # 使用字典来去重，因为 Boto3 不喜欢重复的条目
            unique_objects = [dict(t) for t in {tuple(d.items()) for d in objects_to_delete_permanently}]
            
            app.logger.info(f"准备彻底删除 {len(unique_objects)} 个版本/标记...")
            
            # Boto3的delete_objects最多一次1000个
            for obj in unique_objects:
                s3_client.delete_object(
                    Bucket=bucket_name,
                    Key=obj['Key'],
                    VersionId=obj['VersionId']
                )
                
            return jsonify({"message": "所有选定项目及其历史版本已成功彻底删除"}), 200
            
        except ClientError as e:
            return jsonify({"error": f"彻底删除失败: {e}"}), 500
        
    @app.route("/api/v1/buckets/<bucket_name>/objects/delete-version", methods=["POST"])
    @s3_client_required
    def delete_object_version_api(bucket_name):
        """
        API 接口：彻底删除一个对象的指定版本 (包括删除标记)。
        """
        data = request.get_json()
        if not data or 'key' not in data or 'versionId' not in data:
            return jsonify({"error": "缺少 'key' 或 'versionId' 参数"}), 400
        
        key = data['key']
        version_id = data['versionId']
        
        app.logger.info(f"准备彻底删除对象 '{key}' 的版本 '{version_id}'...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            s3_client.delete_object(
                Bucket=bucket_name,
                Key=key,
                VersionId=version_id
            )
            return jsonify({"message": f"版本 '{version_id}' 已被彻底删除"}), 200

        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            app.logger.error(f"彻底删除版本时出错: {error_code}")
            return jsonify({"error": f"操作失败: {error_code}"}), 500
        
    @app.route("/api/v1/buckets/<bucket_name>/objects/restore-version", methods=["POST"])
    @s3_client_required
    def restore_object_version_api(bucket_name):
        """
        API 接口：恢复一个对象的指定历史版本。
        【最终方案】使用两步复制法，规避S3兼容存储对同名复制的实现缺陷。
        """
        data = request.get_json()
        if not data or 'key' not in data or 'versionId' not in data:
            return jsonify({"error": "请求体中必须包含 'key' 和 'versionId' 参数"}), 400

        key = data.get('key')
        version_id = data.get('versionId')

        if not version_id:
            return jsonify({"error": "恢复历史版本时，'versionId' 不能为空"}), 400

        app.logger.info(f"【Plan E】准备恢复对象 '{key}' 的版本 '{version_id}'...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        # 创建一个唯一的临时文件名
        temp_key = f"{key}.restore-temp-{int(time.time() * 1000)}"
        
        try:
            # --- 步骤 1: 将历史版本复制到一个临时文件 ---
            copy_source_with_version = {
                'Bucket': bucket_name,
                'Key': key,
                'VersionId': version_id
            }
            app.logger.info(f"步骤1：正在将历史版本复制到临时文件 '{temp_key}'...")
            s3_client.copy_object(
                Bucket=bucket_name,
                CopySource=copy_source_with_version,
                Key=temp_key
            )
            app.logger.info(f"步骤1成功。")

            # --- 步骤 2: 将临时文件复制回原文件，创建新版本 ---
            # 此时的源是临时文件，它没有VersionId，我们已经证明这个操作是可行的
            copy_source_from_temp = {
                'Bucket': bucket_name,
                'Key': temp_key
            }
            app.logger.info(f"步骤2：正在将临时文件覆盖回原文件 '{key}'...")
            s3_client.copy_object(
                Bucket=bucket_name,
                CopySource=copy_source_from_temp,
                Key=key
            )
            app.logger.info(f"步骤2成功。'{key}' 的最新版本已恢复。")
            
            return jsonify({"message": f"版本 '{version_id}' 已成功恢复"}), 200

        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            error_message = e.response["Error"]["Message"]
            app.logger.error(f"恢复版本时出错: {error_code} - {error_message}")
            return jsonify({"error": f"操作失败: {error_message}"}), 500
        finally:
            # --- 步骤 3: 无论成功失败，都尝试删除临时文件 ---
            try:
                app.logger.info(f"步骤3：正在清理临时文件 '{temp_key}'...")
                
                # 3a. 获取临时文件的 VersionId
                # head_object 是获取单个对象元数据最高效的方式
                temp_object_meta = s3_client.head_object(Bucket=bucket_name, Key=temp_key)
                temp_version_id = temp_object_meta.get('VersionId')

                if not temp_version_id:
                    # 如果没有版本ID（比如桶意外关闭了版本控制），就执行普通删除
                    app.logger.warning(f"临时文件 '{temp_key}' 没有版本ID，执行普通删除。")
                    s3_client.delete_object(Bucket=bucket_name, Key=temp_key)
                else:
                    # 3b. 执行带版本ID的删除，即“彻底删除”
                    app.logger.info(f"获取到临时文件版本ID: {temp_version_id}。准备彻底删除...")
                    s3_client.delete_object(
                        Bucket=bucket_name, 
                        Key=temp_key,
                        VersionId=temp_version_id # <-- 关键在这里
                    )
                
                app.logger.info(f"步骤3清理成功。")
                
            except ClientError as cleanup_error:
                # 如果连 head_object 都失败了（比如临时文件创建失败），也只记录警告
                if cleanup_error.response['Error']['Code'] == 'NoSuchKey':
                    app.logger.warning(f"临时文件 '{temp_key}' 不存在，无需清理。")
                else:
                    app.logger.warning(f"清理临时文件 '{temp_key}' 失败: {cleanup_error}")
    
    @app.route('/api/v1/batch_download_items', methods=['POST'])
    @s3_client_required
    def batch_download_items():
        bucket_name = request.form.get('bucket_name')
        # 接收选定的文件和文件夹路径
        selected_items_str = request.form.get('selected_items')  # 将参数名改为 "selected_items"

        if not bucket_name or not selected_items_str:
            app.logger.warning("Missing bucket_name or selected_items for batch download.")
            return "缺少桶名或选定的文件/文件夹路径。", 400

        # 解析选定的文件和文件夹路径列表
        selected_items = [item.strip() for item in selected_items_str.split(',') if item.strip()]
        if not selected_items:
            app.logger.warning("No items selected for batch download.")
            return "没有选择任何文件或文件夹进行下载。", 400

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        zip_buffer = io.BytesIO()
        with zipfile.ZipFile(zip_buffer, 'a', zipfile.ZIP_DEFLATED, False) as zip_file:
            for item_path in selected_items:
                try:
                    if item_path.endswith('/'):  # 判断是文件夹
                        app.logger.info(f"Processing folder for batch download: {item_path}")
                        # 遍历文件夹下的所有对象并添加到zip
                        paginator = s3_client.get_paginator('list_objects_v2')
                        for page in paginator.paginate(Bucket=bucket_name, Prefix=item_path):
                            if "Contents" in page:
                                for obj in page["Contents"]:
                                    key = obj["Key"]
                                    # 排除0字节的文件夹标记本身
                                    if key.endswith('/') and obj.get('Size', 0) == 0:
                                        continue

                                    try:
                                        file_obj = s3_client.get_object(Bucket=bucket_name, Key=key)
                                        file_contents = file_obj['Body'].read()
                                        # 构建在zip文件中的相对路径
                                        # 例如，如果选择了 folderA/，其中的 folderA/subfolder/file.txt
                                        # 应该变成 zip 中的 subfolder/file.txt
                                        # 而不是 folderA/subfolder/file.txt （如果用户只下载 folderA）
                                        # 或者如果用户选择的是根目录下的 folderA/
                                        # zip 中应该是 folderA/subfolder/file.txt
                                        #
                                        # 为了统一，假设用户选中的是当前页面展示的顶层项
                                        # item_path 是顶层项，key是其内部的绝对路径
                                        # 如果 item_path 是 "my_folder/"
                                        # 并且 key 是 "my_folder/sub_folder/file.txt"
                                        # 那么 arcname 应该是 "my_folder/sub_folder/file.txt"
                                        #
                                        # 这里的逻辑是：如果用户选择了 "folderA/", 那么zip里应该包含 "folderA/file1.txt", "folderA/sub/file2.txt"
                                        # 如果用户选择了 "file.txt", 那么zip里应该包含 "file.txt"
                                        #
                                        # 简化处理：直接使用 S3 Key 作为 zip 内部路径。这样可以保持一致性。
                                        # 如果用户想在zip中去掉顶层文件夹，前端可以调整传值，或者后端处理。
                                        # 目前，假设用户期望zip中包含选定的顶层文件/文件夹结构。
                                        zip_file.writestr(key, file_contents)
                                        app.logger.info(f"Added {key} to zip from folder {item_path}.")
                                    except ClientError as e:
                                        error_code = e.response.get("Error", {}).get("Code")
                                        app.logger.error(
                                            f"Error adding {key} from folder {item_path} to zip (ClientError: {error_code}): {e}")
                                    except Exception as e:
                                        app.logger.error(
                                            f"General error adding {key} from folder {item_path} to zip: {e}")
                    else:  # 是文件
                        app.logger.info(f"Processing file for batch download: {item_path}")
                        obj = s3_client.get_object(Bucket=bucket_name, Key=item_path)
                        file_contents = obj['Body'].read()
                        # 将文件添加到zip文件，路径名保持一致
                        zip_file.writestr(item_path, file_contents)
                        app.logger.info(f"Added {item_path} to zip.")
                except ClientError as e:
                    error_code = e.response.get("Error", {}).get("Code")
                    app.logger.error(
                        f"Error processing {item_path} for batch download (ClientError: {error_code}): {e}")
                except Exception as e:
                    app.logger.error(f"General error processing {item_path} for batch download: {e}")

        zip_buffer.seek(0)  # 将缓冲区指针移到开头

        # 确定zip文件名
        # 可以根据当前目录或桶名生成
        current_path_arg = request.form.get('current_path', '').rstrip('/')  # 获取当前路径，用于生成文件名
        if current_path_arg:
            zip_filename = f"{os.path.basename(current_path_arg)}_batch_download.zip"
        else:
            zip_filename = f"{bucket_name}_batch_download.zip"

        encoded_zip_filename = quote(zip_filename, encoding='utf-8', safe='')

        response = Response(zip_buffer.getvalue(), mimetype='application/zip')
        response.headers['Content-Disposition'] = f'attachment; filename*=UTF-8''{encoded_zip_filename}'
        response.headers['Content-Length'] = str(len(zip_buffer.getvalue()))

        app.logger.info(
            f"Successfully generated batch download zip for Bucket={bucket_name} with {len(selected_items)} top-level items.")
        return response

    def _is_bucket_publicly_readable(s3_client, bucket_name):
        """
        一个辅助函数，只判断一个【存储桶】本身是否为公共读。
        返回 True (公共) 或 False (私有)。
        """
        try:
            acl_response = s3_client.get_bucket_acl(Bucket=bucket_name)
            for grant in acl_response.get('Grants', []):
                grantee = grant.get('Grantee', {})
                permission = grant.get('Permission')
                if (grantee.get('URI') == 'http://acs.amazonaws.com/groups/global/AllUsers' and
                    permission in ['READ', 'FULL_CONTROL']):
                    return True
            return False
        except ClientError:
            # 出错时，安全起见，一律按私有处理
            return False
        
    # ======================================================================
    # ==      1. 终极版：一个能检查分层权限的ACL状态检查辅助函数      ==
    # ======================================================================
    def _is_object_publicly_readable(s3_client, bucket_name, key):
        """
        一个终极辅助函数，准确判断一个【特定对象】是否真的可以被公网匿名读取。
        它会检查桶和对象两级的ACL。
        返回 True (公共) 或 False (私有)。
        """
        try:
            # --- 步骤 1: 检查桶的ACL ---
            bucket_acl_response = s3_client.get_bucket_acl(Bucket=bucket_name)
            
            # --- 【测谎仪】打印桶的原始ACL ---
            # current_app.logger.info(f"--- [Lie Detector] Bucket '{bucket_name}' Raw ACL Response ---")
            # current_app.logger.info(json.dumps(bucket_acl_response, indent=2, default=str))
            
            bucket_grants = bucket_acl_response.get('Grants', [])
            
            is_bucket_public = False
            for grant in bucket_grants:
                grantee = grant.get('Grantee', {})
                permission = grant.get('Permission')
                if (grantee.get('URI') == 'http://acs.amazonaws.com/groups/global/AllUsers' and
                    permission in ['READ', 'FULL_CONTROL']):
                    is_bucket_public = True
                    break
            
            if not is_bucket_public:
                current_app.logger.info(f"--- [Decision] Object '{key}' is PRIVATE because Bucket '{bucket_name}' is private. ---")
                return False

            # --- 步骤 2: 如果桶是公共的，再检查对象的ACL ---
            object_acl_response = s3_client.get_object_acl(Bucket=bucket_name, Key=key)

            # --- 【测谎仪】打印对象的原始ACL ---
            # current_app.logger.info(f"--- [Lie Detector] Object '{key}' Raw ACL Response ---")
            # current_app.logger.info(json.dumps(object_acl_response, indent=2, default=str))

            object_grants = object_acl_response.get('Grants', [])
            
            is_object_public = False
            for grant in object_grants:
                grantee = grant.get('Grantee', {})
                permission = grant.get('Permission')
                if (grantee.get('URI') == 'http://acs.amazonaws.com/groups/global/AllUsers' and
                    permission in ['READ', 'FULL_CONTROL']):
                    is_object_public = True
                    break
            
            if is_object_public:
                current_app.logger.info(f"--- [Decision] Object '{key}' is PUBLIC because BOTH Bucket and Object are public. ---")
                return True
            else:
                current_app.logger.info(f"--- [Decision] Object '{key}' is PRIVATE because Object itself is private. ---")
                return False

        except ClientError as e:
            current_app.logger.warning(f"--- [Decision] Failed to get ACL for '{bucket_name}/{key}', defaulting to PRIVATE. Error: {e} ---")
            return False

    # ======================================================================
    # ==      2. 单个文件/文件夹的智能链接生成器 (统一接口)      ==
    # ======================================================================
    @app.route('/api/v1/generate-download-link', methods=['POST'])
    @s3_client_required
    def generate_download_link():
        data = request.get_json()
        bucket_name = data.get('bucket_name')
        item = data.get('item') # { name: "...", type: "..." }
        expires_in = data.get('expires_in', 300)

        if not all([bucket_name, item, item.get('name'), item.get('type')]):
            return jsonify({"error": "缺少参数"}), 400

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        key = item['name']
        item_type = item['type']
        
        # 决定最终下载的URL端点
        download_endpoint = '/api/v1/download-folder' if item_type == 'folder' else '/api/v1/download'
        
        # 【核心】调用我们唯一的决策大脑
        # 对于文件夹，我们只检查桶的权限作为代表
        is_public = False
        if item_type == 'folder':
            # 简化逻辑：文件夹的下载权限跟随桶的权限
            # (更复杂的逻辑可以检查文件夹下所有文件，但会很慢)
            try:
                # 一个简化的桶权限检查
                acl_response = s3_client.get_bucket_acl(Bucket=bucket_name)
                for grant in acl_response.get('Grants', []):
                    if (grant.get('Grantee', {}).get('URI') == 'http://acs.amazonaws.com/groups/global/AllUsers' and
                        grant.get('Permission') in ['READ', 'FULL_CONTROL']):
                        is_public = True
                        break
            except ClientError:
                is_public = False
        else: # file
            is_public = _is_object_publicly_readable(s3_client, bucket_name, key)
        
        # 根据决策生成链接
        if is_public:
            relative_url = f"{download_endpoint}?bucket_name={quote(bucket_name)}&path={quote(key)}"
            return jsonify({"download_url": relative_url})
        else:
            expires = int(time.time()) + expires_in
            message_to_sign = f"{bucket_name}\n{key}\n{expires}"
            salt = current_app.config['URL_SIGNING_SALT'].encode('utf-8')
            signature = hmac.new(salt, message_to_sign.encode('utf-8'), hashlib.sha256).hexdigest()
            relative_url = (f"{download_endpoint}?bucket_name={quote(bucket_name)}"
                            f"&path={quote(key)}"
                            f"&expires={expires}"
                            f"&signature={signature}")
            return jsonify({"download_url": relative_url})



    # ======================================================================
    # ==      3. 修改：最终的下载代理接口 (现在也调用辅助函数)      ==
    # ======================================================================
    @app.route('/api/v1/download')
    @s3_client_required
    def download_proxy_final():
        bucket_name = request.args.get('bucket_name')
        key = request.args.get('path')
        if not bucket_name or not key:
            return "请求缺少 bucket_name 或 path 参数", 400
            
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        # <--- REPLACED LOGIC: 现在只需一行调用
        is_public = _is_object_publicly_readable(s3_client, bucket_name, key)
                
        # 【核心安全检查】
        if not is_public:
            # ... 签名验证逻辑保持不变 ...
            try:
                expires = int(request.args.get('expires'))
                signature_from_url = request.args.get('signature')
                if not expires or not signature_from_url: raise ValueError
            except (TypeError, ValueError):
                return "私有资源访问缺少有效的签名参数，禁止访问", 403
            if time.time() > expires: return "下载链接已过期", 410
            message_to_verify = f"{bucket_name}\n{key}\n{expires}"
            salt = current_app.config['URL_SIGNING_SALT'].encode('utf-8')
            expected_signature = hmac.new(salt, message_to_verify.encode('utf-8'), hashlib.sha256).hexdigest()
            if not hmac.compare_digest(expected_signature, signature_from_url):
                return "签名无效，禁止访问", 403

        # 所有验证通过，执行代理下载
        try:
            s3_object = s3_client.get_object(Bucket=bucket_name, Key=key)
            # ... 完整的流式下载代码保持不变 ...
            def generate_chunks():
                for chunk in s3_object['Body'].iter_chunks(chunk_size=65536): yield chunk
            filename = os.path.basename(key)
            response = Response(generate_chunks(), content_type='application/octet-stream')
            response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{quote(filename, encoding='utf-8', safe='')}"
            if 'ContentLength' in s3_object:
                response.headers["Content-Length"] = str(s3_object['ContentLength'])
            return response
        except ClientError as e:
            return f"文件访问失败: {e.response['Error']['Code']}", 404
        
    # --- 接口3: 【新增】为文件夹生成下载链接 ---
    @app.route('/api/v1/generate-folder-download-link', methods=['POST'])
    @s3_client_required
    def generate_folder_download_link():
        # 这部分逻辑与 generate-download-link 完全相同
        data = request.get_json()
        bucket_name = data.get('bucket_name')
        key = data.get('path') # 'key' 在这里代表文件夹的前缀
        expires_in = data.get('expires_in', 3600) # 文件夹打包可能耗时，有效期可以长一些

        if not bucket_name or not key:
            return jsonify({"error": "缺少参数"}), 400

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        bucket_acl = _is_bucket_publicly_readable(s3_client, bucket_name, key)
        
        # 根据权限返回不同类型的链接，但URL指向新的文件夹下载接口
        if bucket_acl == 'public-read':
            relative_url = f"/api/v1/download-folder?bucket_name={quote(bucket_name)}&path={quote(key)}"
            return jsonify({"download_url": relative_url})
        else: # private
            expires = int(time.time()) + expires_in
            message_to_sign = f"{bucket_name}\n{key}\n{expires}" # 签名的内容也保持一致
            salt = current_app.config['URL_SIGNING_SALT'].encode('utf-8')
            signature = hmac.new(salt, message_to_sign.encode('utf-8'), hashlib.sha256).hexdigest()
            relative_url = (f"/api/v1/download-folder?bucket_name={quote(bucket_name)}"
                            f"&path={quote(key)}"
                            f"&expires={expires}"
                            f"&signature={signature}")
            return jsonify({"download_url": relative_url})


    # ======================================================================
    # ==      5. 最终的【文件夹】下载代理接口 (现在进行修复)      ==
    # ======================================================================
    @app.route('/api/v1/download-folder')
    @s3_client_required
    def download_folder_proxy():
        bucket_name = request.args.get('bucket_name')
        prefix = request.args.get('path')
        if not bucket_name or not prefix:
            return "请求缺少参数", 400
                
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        # 【修复】文件夹下载的安全检查，逻辑与单个文件完全一致
        # 简化逻辑：文件夹的权限跟随桶的权限
        is_public = _is_bucket_publicly_readable(s3_client, bucket_name)
            
        if not is_public:
            # 如果桶是私有的，就强制验证签名
            try:
                expires = int(request.args.get('expires'))
                signature_from_url = request.args.get('signature')
                if not expires or not signature_from_url: raise ValueError
            except (TypeError, ValueError):
                return "私有资源访问缺少有效的签名参数，禁止访问", 403
            if time.time() > expires: return "下载链接已过期", 410
            
            message_to_verify = f"{bucket_name}\n{prefix}\n{expires}"
            salt = current_app.config['URL_SIGNING_SALT'].encode('utf-8')
            expected_signature = hmac.new(salt, message_to_verify.encode('utf-8'), hashlib.sha256).hexdigest()
            if not hmac.compare_digest(expected_signature, signature_from_url):
                return "签名无效，禁止访问", 403
        # =======================================================
                
        # 2. 【打包逻辑】验证通过后，开始打包
        zip_buffer = io.BytesIO()
        with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zf:
            paginator = s3_client.get_paginator('list_objects_v2')
            for page in paginator.paginate(Bucket=bucket_name, Prefix=prefix):
                if "Contents" in page:
                    for obj in page["Contents"]:
                        key = obj["Key"]
                        if key.endswith('/') and obj.get('Size', 0) == 0: continue
                        file_obj = s3_client.get_object(Bucket=bucket_name, Key=key)
                        file_contents = file_obj['Body'].read()
                        # 写入zip，并保留文件夹内部的相对路径
                        arcname = os.path.relpath(key, os.path.dirname(prefix))
                        zf.writestr(arcname, file_contents)
        
        zip_buffer.seek(0)
        
        # 3. 【返回响应】
        folder_name = os.path.basename(prefix.rstrip('/')) or bucket_name
        response = Response(zip_buffer.getvalue(), mimetype='application/zip')
        response.headers['Content-Disposition'] = f"attachment; filename*=UTF-8''{quote(f'{folder_name}.zip')}"
        response.headers['Content-Length'] = str(zip_buffer.getbuffer().nbytes)
        return response
    
    # ========【新增：终极版的批量/文件夹链接生成器】========
    @app.route('/api/v1/generate-batch-download-links', methods=['POST'])
    @s3_client_required
    def generate_batch_download_links():
        data = request.get_json()
        bucket_name = data.get('bucket_name')
        items = data.get('items', []) # items 是一个包含 { name: "...", type: "..." } 的列表
        expires_in = data.get('expires_in', 3600)

        if not bucket_name or not items:
            return jsonify({"error": "缺少参数"}), 400

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        # 最终要返回给前端的、带安全链接的文件列表
        links_to_download = []
        
        try:
            # 1. 展开所有文件夹，得到一个纯粹的文件列表
            all_file_keys = []
            for item in items:
                if item.get('type') == 'folder':
                    paginator = s3_client.get_paginator('list_objects_v2')
                    for page in paginator.paginate(Bucket=bucket_name, Prefix=item.get('name')):
                        if "Contents" in page:
                            for obj in page["Contents"]:
                                key = obj["Key"]
                                if not key.endswith('/'): # 只添加文件
                                    all_file_keys.append(key)
                else: # file
                    all_file_keys.append(item.get('name'))
            
            # 去重，以防用户同时勾选了文件夹和其中的文件
            unique_file_keys = sorted(list(set(all_file_keys)))

            # 2. 为每一个文件独立生成安全链接
            for key in unique_file_keys:
                is_public = _is_object_publicly_readable(s3_client, bucket_name, key)
                
                if is_public:
                    relative_url = f"/api/v1/download?bucket_name={quote(bucket_name)}&path={quote(key)}"
                    links_to_download.append({"key": key, "url": relative_url})
                else: # private
                    expires = int(time.time()) + expires_in
                    message_to_sign = f"{bucket_name}\n{key}\n{expires}"
                    salt = current_app.config['URL_SIGNING_SALT'].encode('utf-8')
                    signature = hmac.new(salt, message_to_sign.encode('utf-8'), hashlib.sha256).hexdigest()
                    relative_url = (f"/api/v1/download?bucket_name={quote(bucket_name)}"
                                    f"&path={quote(key)}"
                                    f"&expires={expires}"
                                    f"&signature={signature}")
                    links_to_download.append({"key": key, "url": relative_url})

            return jsonify({"links": links_to_download})

        except ClientError as e:
            return jsonify({"error": f"操作失败: {e.response['Error']['Code']}"}), 500

    @app.route("/api/v1/buckets/<bucket_name>/pre-delete-check", methods=["GET"])
    @s3_client_required
    def pre_delete_check_api(bucket_name):
        """
        API 接口：执行删除存储桶前的预检查。
        返回一个包含待办事项的清单。
        """
        app.logger.info(f"✅ 正确的 pre_delete_check_api 被调用了！ Bucket: {bucket_name}")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        checklist = {
            "can_delete": True,
            "reasons": []
        }

        try:
            # 检查1：存储桶中是否有任何对象
            # response = s3_client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
            
            # 检查1：存储桶中是否有任何对象 (逻辑不变)
            obj_response = s3_client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
            if 'Contents' in obj_response and obj_response['Contents']:
                checklist["can_delete"] = False
                checklist["reasons"].append({
                    "id": "clear_objects",
                    "description": "存储桶中仍包含文件或对象。",
                    "action_text": "前往清空",
                    "action_type": "link_to_files", # 前端会用这个类型来生成链接
                    "status": "pending"
                })
            
            # 【新增】检查2：存储桶中是否有任何未完成的分片上传 (碎片)
            multipart_response = s3_client.list_multipart_uploads(Bucket=bucket_name, MaxUploads=1)
            if 'Uploads' in multipart_response and multipart_response['Uploads']:
                checklist["can_delete"] = False
                checklist["reasons"].append({
                    "id": "clear_fragments", # 给前端一个新的ID来识别
                    "description": "存储桶中包含未完成的分片上传任务 (碎片)。",
                    "action_text": "管理碎片",
                    "action_type": "link_to_fragments", # 前端会用这个类型来生成链接
                    "status": "pending"
                })
            
            # 未来可以在这里添加更多检查，比如分片上传、版本控制等...

            app.logger.info(f"返回给前端的 checklist: {json.dumps(checklist)}")
            return jsonify(checklist)

        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            app.logger.error(f"预检查存储桶 '{bucket_name}' 时出错 (ClientError): {e}")
            if error_code == 'NoSuchBucket':
                return jsonify({"error": "存储桶不存在"}), 404
            # 返回一个标准的错误响应
            return jsonify({"error": f"检查失败: {error_code}"}), 500
        except Exception as e:
            app.logger.error(f"预检查存储桶 '{bucket_name}' 时发生未知错误: {e}")
            return jsonify({"error": "服务器内部错误"}), 500

    # =================================================================================
    # ==             导出 URL 列表为 CSV 的 API                                     ==
    # =================================================================================
    @app.route("/api/v1/buckets/<bucket_name>/export-urls", methods=["POST"])
    @s3_client_required
    def export_urls_api(bucket_name):
        """
        API 接口：为选定的文件和文件夹生成访问URL，并导出为CSV文件。
        该接口的URL生成逻辑与 /generate-batch-download-links 完全一致。
        """
        data = request.get_json()
        # 【修改】现在我们接收 items 列表，而不是 keys
        items = data.get('items', []) 
        expires_in = data.get('expires_in', 3600)
        encode_filename = data.get('encode_filename', True)

        if not items:
            return jsonify({"error": "请求中缺少 'items' (文件/文件夹列表)"}), 400

        app.logger.info(f"收到为 {len(items)} 个顶层项目导出URL的请求。")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        output = io.StringIO()
        writer = csv.writer(output)
        writer.writerow(['object', 'url'])
        base_url = request.host_url.rstrip('/')

        try:
            # === 1. 【完全复用】展开所有文件夹，得到一个纯粹的文件列表 ===
            all_file_keys = []
            for item in items:
                # 前端发送的 items 列表不包含 type，所以我们通过 key 是否以 '/' 结尾来判断
                key = item.get('name')
                if not key: continue

                if key.endswith('/'): # 认为是文件夹
                    paginator = s3_client.get_paginator('list_objects_v2')
                    for page in paginator.paginate(Bucket=bucket_name, Prefix=key):
                        if "Contents" in page:
                            for obj in page["Contents"]:
                                obj_key = obj["Key"]
                                if not obj_key.endswith('/'):
                                    all_file_keys.append(obj_key)
                else: # 认为是文件
                    all_file_keys.append(key)
            
            unique_file_keys = sorted(list(set(all_file_keys)))
            app.logger.info(f"展开后共得到 {len(unique_file_keys)} 个唯一文件用于导出URL。")

            # === 2. 【完全复用】为每一个文件独立生成安全链接并写入CSV ===
            for key in unique_file_keys:
                # a. 逐个检查每个文件的公共可读性
                is_public = _is_object_publicly_readable(s3_client, bucket_name, key)
                
                # b. 根据权限生成相对URL
                relative_url = ""
                if is_public:
                    relative_url = f"/api/v1/download?bucket_name={quote(bucket_name)}&path={quote(key)}"
                else: # private
                    expires = int(time.time()) + expires_in
                    message_to_sign = f"{bucket_name}\n{key}\n{expires}"
                    salt = current_app.config['URL_SIGNING_SALT'].encode('utf-8')
                    signature = hmac.new(salt, message_to_sign.encode('utf-8'), hashlib.sha256).hexdigest()
                    relative_url = (f"/api/v1/download?bucket_name={quote(bucket_name)}"
                                    f"&path={quote(key)}"
                                    f"&expires={expires}"
                                    f"&signature={signature}")

                # c. 拼接完整URL
                full_url = f"{base_url}{relative_url}"
                
                # d. 写入CSV行
                object_name_to_write = key if encode_filename else unquote(key)
                writer.writerow([object_name_to_write, full_url])

            # 3. 准备并返回HTTP响应
            csv_content = output.getvalue()
            output.close()
            response = Response(csv_content, mimetype='text/csv')
            response.headers['Content-Disposition'] = f'attachment; filename="exported_urls_{bucket_name}.csv"'
            return response

        except ClientError as e:
            # 返回一个包含错误的CSV，而不是JSON，这样用户能知道出了什么问题
            output.close() # 确保关闭
            error_output = io.StringIO()
            error_writer = csv.writer(error_output)
            error_writer.writerow(['error', 'message'])
            error_writer.writerow([e.response['Error']['Code'], e.response['Error']['Message']])
            error_csv = error_output.getvalue()
            error_output.close()
            return Response(error_csv, mimetype='text/csv', status=500)
        except Exception as e:
            app.logger.error(f"导出URL时发生未知错误: {e}")
            # 这里可以返回一个通用错误
            return jsonify({"error": "服务器内部发生未知错误"}), 500


    # ======================================================================
    # ==      阻止公共访问 (Public Access Block) - GET 接口 (健壮版)      ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/public-access-block", methods=["GET"])
    @s3_client_required
    def get_public_access_block_api(bucket_name):
        """
        API 接口：获取指定存储桶的“阻止公共访问”配置。
        【健壮版】能处理S3兼容存储不支持此API的情况。
        """
        app.logger.info(f"正在获取 {bucket_name} 的 PublicAccessBlock 配置...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            response = s3_client.get_public_access_block(Bucket=bucket_name)
            config = response.get('PublicAccessBlockConfiguration', {})
            
            # 为了方便前端使用，我们计算一个总开关状态
            # 如果配置存在且所有选项都为True，则我们认为总开关是开启的
            is_enabled = all(config.values()) if config else False

            app.logger.info(f"{bucket_name} 的 PublicAccessBlock 状态为: {'开启' if is_enabled else '关闭'}")
            return jsonify({
                "is_enabled": is_enabled,
                "configuration": config
            })

        except ClientError as e:
            # 【核心容错逻辑】
            # 捕获所有客户端错误，这能覆盖 'NoSuchPublicAccessBlockConfiguration', 
            # 'NotImplemented', 'InvalidRequest' 等所有可能的兼容性问题。
            error_code = e.response["Error"]["Code"]
            app.logger.warning(
                f"无法获取 {bucket_name} 的 PublicAccessBlock 配置 (错误码: {error_code})。"
                f"这很可能是因为S3兼容存储不支持此API。将安全地返回“关闭”状态。"
            )
            # 无论是什么错误，都安全地返回默认的“关闭”状态，防止前端应用崩溃。
            return jsonify({
                "is_enabled": False,
                "configuration": {}
            })


    # ======================================================================
    # ==      阻止公共访问 (Public Access Block) - PUT 接口             ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/public-access-block", methods=["PUT"])
    @s3_client_required
    def put_public_access_block_api(bucket_name):
        """
        API 接口：设置指定存储桶的“阻止公共访问”配置。
        """
        data = request.get_json()
        if not data or 'is_enabled' not in data:
            return jsonify({"error": "请求体中必须包含 'is_enabled' (true/false) 字段"}), 400

        is_enabled = data.get('is_enabled', False)
        
        action = "开启" if is_enabled else "关闭"
        app.logger.info(f"正在为 {bucket_name} {action} PublicAccessBlock...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        # 根据前端传来的总开关状态，构建S3 API需要的完整配置字典
        config = {
            'BlockPublicAcls': is_enabled,
            'IgnorePublicAcls': is_enabled,
            'BlockPublicPolicy': is_enabled,
            'RestrictPublicBuckets': is_enabled
        }

        try:
            s3_client.put_public_access_block(
                Bucket=bucket_name,
                PublicAccessBlockConfiguration=config
            )
            return jsonify({"message": f"“阻止公共访问”已成功{action}"}), 200

        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            # 【容错逻辑】如果S3兼容存储不支持此API，它可能会在这里报错
            app.logger.error(f"设置 PublicAccessBlock 失败 (错误码: {error_code})")
            return jsonify({"error": f"设置配置失败: {error_code}. 您的存储服务可能不支持此功能。"}), 500

    @app.route("/api/v1/buckets/<bucket_name>/acl", methods=["GET"])
    @s3_client_required
    def get_bucket_acl_api(bucket_name):
        """
        API 接口：获取指定存储桶的ACL。
        Boto3返回的原始数据很复杂，这里进行简化处理。
        """
        app.logger.info(f"正在获取 {bucket_name} 的 Bucket ACL...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            response = s3_client.get_bucket_acl(Bucket=bucket_name)
            grants = response.get('Grants', [])
            
            # S3 ACL 逻辑：
            # - 如果有给 AllUsers (http://acs.amazonaws.com/groups/global/AllUsers) 的 READ 权限 -> public-read
            # - 如果有给 AllUsers 的 FULL_CONTROL 或 WRITE 权限 -> public-read-write
            # - 否则 -> private
            
            acl = 'private'
            for grant in grants:
                grantee = grant.get('Grantee', {})
                if grantee.get('URI') == 'http://acs.amazonaws.com/groups/global/AllUsers':
                    permission = grant.get('Permission')
                    if permission in ['FULL_CONTROL', 'WRITE']:
                        acl = 'public-read-write'
                        break # 优先级最高
                    elif permission == 'READ':
                        acl = 'public-read'
            
            return jsonify({"acl": acl})

        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            app.logger.error(f"获取 Bucket ACL 失败: {error_code}")
            return jsonify({"error": f"获取ACL配置失败: {error_code}"}), 500


    @app.route("/api/v1/buckets/<bucket_name>/acl", methods=["PUT"])
    @s3_client_required
    def put_bucket_acl_api(bucket_name):
        """
        API 接口：设置指定存储桶的ACL。
        """
        data = request.get_json()
        new_acl = data.get('acl')
        if new_acl not in ['private', 'public-read', 'public-read-write']:
            return jsonify({"error": "无效的ACL值"}), 400

        app.logger.info(f"正在为 {bucket_name} 设置新的 Bucket ACL: {new_acl}")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            s3_client.put_bucket_acl(Bucket=bucket_name, ACL=new_acl)
            return jsonify({"message": f"存储桶ACL已成功设置为 {new_acl}"}), 200
        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            app.logger.error(f"设置 Bucket ACL 失败: {error_code}")
            return jsonify({"error": f"设置ACL失败: {error_code}"}), 500
        
    @app.route('/api/v1/buckets/<bucket_name>/objects/acl', methods=['PUT'])
    @s3_client_required
    def set_object_acl(bucket_name):
        """
        API 接口：设置单个对象的ACL。
        """
        data = request.get_json()
        if not data:
            return jsonify({"error": "请求体不能为空且必须是 JSON 格式"}), 400
            
        key = data.get('key')
        acl = data.get('acl')

        if not key or not acl:
            return jsonify({"error": "请求中缺少 'key' 或 'acl' 参数"}), 400

        # 验证传入的ACL值是否是S3支持的标准值
        valid_acls = ['private', 'public-read', 'public-read-write', 'authenticated-read']
        if acl not in valid_acls:
            return jsonify({"error": f"无效的ACL值: {acl}. 只接受 {valid_acls}"}), 400

        app.logger.info(f"准备为对象 '{key}' (在桶 '{bucket_name}' 中) 设置ACL为 '{acl}'")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            # 调用 Boto3 的 put_object_acl 方法
            s3_client.put_object_acl(
                Bucket=bucket_name,
                Key=key,
                ACL=acl
            )
            
            return jsonify({"message": f"文件 '{key}' 的ACL已成功设置为 '{acl}'"}), 200

        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            app.logger.error(f"设置对象ACL失败 (key: {key}): {error_code}")
            # 提供更具体的错误信息给前端
            if error_code == 'NoSuchKey':
                return jsonify({"error": "操作失败：指定的文件不存在"}), 404
            return jsonify({"error": f"操作失败，存储服务返回错误: {error_code}"}), 500
        except Exception as e:
            app.logger.error(f"设置对象ACL时发生未知错误: {e}")
            return jsonify({"error": "服务器内部发生未知错误"}), 500
        
    # ======================================================================
    # ==      生命周期 (Lifecycle) - GET 接口 (获取配置)      ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/lifecycle", methods=["GET"])
    @s3_client_required
    def get_bucket_lifecycle_api(bucket_name):
        """
        API 接口：获取指定存储桶的生命周期配置。
        """
        app.logger.info(f"正在获取 {bucket_name} 的生命周期配置...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            response = s3_client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
            # 如果调用成功，但没有 'Rules' 字段，也返回空列表
            rules = response.get('Rules', [])
            return jsonify(rules)
        except ClientError as e:
            # 【核心修改】在这里增加对多种错误码的判断
            error_code = e.response['Error']['Code']
            app.logger.error(f"get_bucket_lifecycle_api::error_code: {error_code}")
            # 检查错误码是否是表示“没有配置”的几种常见情况
            # 1. 'NoSuchLifecycleConfiguration' 是标准S3的返回码
            # 2. 'NoSuchBucket' 是某些S3兼容存储在这种情况下会返回的错误码
            if error_code in ['NoSuchLifecycleConfiguration', 'NoSuchBucket', 'NoSuchBucketLc']:
                app.logger.info(
                    f"桶 {bucket_name} 没有生命周期配置 (返回码: {error_code})，这被视为正常情况，返回空列表。"
                )
                return jsonify([])
            else:
                # 其他真正的 S3 错误 (如 AccessDenied)
                app.logger.error(f"获取生命周期配置时发生未知错误: {e}")
                return jsonify({"error": f"获取配置失败: {error_code}"}), 500

    # ======================================================================
    # ==      生命周期 (Lifecycle) - PUT 接口 (设置配置)      ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/lifecycle", methods=["PUT"])
    @s3_client_required
    def put_bucket_lifecycle_api(bucket_name):
        """
        API 接口：设置指定存储桶的生命周期配置。
        注意：此操作会【覆盖】所有现有的生命周期规则。
        """
        data = request.get_json()
        rules = data.get('rules')
        if rules is None: # 允许传入空列表来清空规则
            return jsonify({"error": "请求体中必须包含 'rules' 列表"}), 400

        app.logger.info(f"正在为 {bucket_name} 设置新的生命周期配置...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        try:
            # 如果 rules 是空列表，表示要删除所有规则
            if not rules:
                 s3_client.delete_bucket_lifecycle(Bucket=bucket_name)
                 app.logger.info(f"已成功清空桶 {bucket_name} 的所有生命周期规则。")
                 return jsonify({"message": "生命周期配置已清空"}), 200

            # 构造 Boto3 需要的配置字典
            lifecycle_configuration = {'Rules': rules}
            
            # 【调试利器】在执行前打印出最终要发送给Boto3的结构
            import json
            app.logger.debug(f"即将发送给 Boto3 的配置: {json.dumps(lifecycle_configuration, indent=2)}")

            s3_client.put_bucket_lifecycle_configuration(
                Bucket=bucket_name,
                LifecycleConfiguration=lifecycle_configuration
            )
            return jsonify({"message": "生命周期配置已成功更新"}), 200
        except ClientError as e:
            app.logger.error(f"设置生命周期配置失败: {e}")
            return jsonify({"error": f"设置配置失败: {e.response['Error']['Code']}", "details": e.response['Error']['Message']}), 500
            
    # ======================================================================
    # ==      生命周期 (Lifecycle) - DELETE 接口 (清空配置)     ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/lifecycle", methods=["DELETE"])
    @s3_client_required
    def delete_bucket_lifecycle_api(bucket_name):
        """
        API 接口：清空指定存储桶的所有生命周期规则。
        """
        app.logger.info(f"正在清空 {bucket_name} 的所有生命周期规则...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            s3_client.delete_bucket_lifecycle(Bucket=bucket_name)
            return jsonify({"message": "生命周期配置已成功清空"}), 200
        except ClientError as e:
            # 即使没有配置就去删除，boto3也可能不会报错，但以防万一
            app.logger.error(f"清空生命周期配置失败: {e}")
            return jsonify({"error": f"操作失败: {e.response['Error']['Code']}"}), 500
            
    @app.route("/api/v1/buckets/<string:bucket_name>/multipart/initiate", methods=["POST"])
    @s3_client_required
    def initiate_multipart_upload_api(bucket_name):
        """API - 步骤1: 初始化分片上传 (此接口保持不变)"""
        data = request.get_json()
        key = data.get('key')
        acl = data.get('acl')
        if not key:
            return jsonify({"error": "请求中缺少 'key' (文件路径)"}), 400

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            response = initiate_multipart_upload(s3_client, bucket_name, key, acl)
            return jsonify({"uploadId": response.get('UploadId')})
        except ClientError as e:
            return jsonify({"error": f"操作失败: {e.response['Error']['Code']}"}), 500

    # ============================================================================
    # == 【核心修正】确保路由路径唯一且明确，避免与其他规则冲突                ==
    # ============================================================================
    @app.route("/api/v1/buckets/<string:bucket_name>/multipart/upload-part", methods=["POST"])
    @s3_client_required
    def upload_part_proxy_api(bucket_name):
        """API - 步骤2 (代理模式): 接收浏览器发来的分片，并将其转发给S3"""
        current_app.logger.info(f"✅ Correct upload_part_proxy_api endpoint was hit for bucket: {bucket_name}")
        
        # 从表单数据中获取元数据
        key = request.form.get('key')
        upload_id = request.form.get('uploadId')
        part_number = request.form.get('partNumber', type=int)
        
        if not all([key, upload_id, part_number]):
            return jsonify({"error": "请求缺少 key, uploadId, 或 partNumber 参数"}), 400
            
        # 从文件部分获取分片数据
        if 'chunk' not in request.files:
            return jsonify({"error": "请求中未找到 'chunk' 文件部分"}), 400
        
        chunk_file = request.files['chunk']
        
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            response = upload_part_from_stream(s3_client, bucket_name, key, upload_id, part_number, chunk_file.stream)
            etag = response.get('ETag', '').strip('"')
            return jsonify({"etag": etag})
        except ClientError as e:
            current_app.logger.error(f"代理上传分片 {part_number} 失败: {e}")
            return jsonify({"error": f"分片上传失败: {e.response['Error']['Code']}"}), 500
    # ============================================================================
    # ============================================================================


    @app.route("/api/v1/buckets/<string:bucket_name>/multipart/complete", methods=["POST"])
    @s3_client_required
    def complete_multipart_upload_api(bucket_name):
        """API - 步骤3: 完成分片上传 (此接口保持不变)"""
        data = request.get_json()
        key = data.get('key')
        upload_id = data.get('uploadId')
        parts = data.get('parts')
        if not all([key, upload_id, parts]):
            return jsonify({"error": "请求缺少 key, uploadId, 或 parts 参数"}), 400
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            complete_multipart_upload(s3_client, bucket_name, key, upload_id, parts)
            return jsonify({"message": "文件上传成功并已合并"}), 200
        except ClientError as e:
            return jsonify({"error": f"文件合并失败: {e.response['Error']['Code']}", "details": e.response['Error'].get('Message')}), 500

    @app.route("/api/v1/buckets/<string:bucket_name>/multipart/abort", methods=["POST"])
    @s3_client_required
    def abort_multipart_upload_api(bucket_name):
        """API - 步骤4: 中止分片上传 (此接口保持不变)"""
        data = request.get_json()
        key = data.get('key')
        upload_id = data.get('uploadId')
        if not all([key, upload_id]):
            return jsonify({"error": "请求缺少 key 或 uploadId 参数"}), 400
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            abort_multipart_upload(s3_client, bucket_name, key, upload_id)
            return jsonify({"message": "上传已中止，所有碎片已清理"}), 200
        except ClientError as e:
            return jsonify({"error": f"操作失败: {e.response['Error']['Code']}"}), 500

    # ======================================================================
    # ==                 碎片管理 - GET 接口 (获取列表)                    ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/multipart-uploads", methods=["GET"])
    @s3_client_required
    def list_multipart_uploads_api(bucket_name):
        """
        API 接口：获取指定存储桶中所有进行中的分片上传任务（即碎片）。
        """
        app.logger.info(f"正在获取桶 '{bucket_name}' 的碎片列表...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        try:
            paginator = s3_client.get_paginator('list_multipart_uploads')
            all_uploads = []
            # 遍历所有分页，确保获取完整列表
            for page in paginator.paginate(Bucket=bucket_name):
                if 'Uploads' in page:
                    all_uploads.extend(page['Uploads'])
            
            # 将原始数据格式化为前端需要的、可JSON序列化的格式
            formatted_uploads = [
                {
                    "key": upload.get('Key'),
                    "uploadId": upload.get('UploadId'),
                    # datetime 对象需要转换为 ISO 格式字符串
                    "initiated": upload.get('Initiated').isoformat() if upload.get('Initiated') else None
                } for upload in all_uploads
            ]

            return jsonify(formatted_uploads)

        except ClientError as e:
            error_code = e.response["Error"]["Code"]
            app.logger.error(f"获取碎片列表时出错: {error_code}")
            # 这里可以根据S3兼容存储的实际返回值进行调整
            return jsonify({"error": f"获取碎片列表失败: {error_code}"}), 500
    
    # ======================================================================
    # ==                  碎片管理 - DELETE 接口 (批量中止)                 ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/multipart-uploads", methods=["DELETE"])
    @s3_client_required
    def abort_multipart_uploads_api(bucket_name):
        """
        API 接口：批量中止（删除）一个或多个分片上传任务。
        """
        data = request.get_json()
        uploads_to_abort = data.get('uploads', [])
        if not uploads_to_abort:
            return jsonify({"error": "请求中缺少 'uploads' 列表"}), 400

        app.logger.info(f"收到为桶 '{bucket_name}' 清理 {len(uploads_to_abort)} 个碎片的请求。")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        errors = []
        success_count = 0
        
        # 逐个中止，这样即使部分失败，其他也能成功
        for upload in uploads_to_abort:
            key = upload.get('key')
            upload_id = upload.get('uploadId')
            if not key or not upload_id:
                errors.append({"key": key or "未知", "error": "缺少 key 或 uploadId"})
                continue
            try:
                abort_multipart_upload(s3_client, bucket_name, key, upload_id)
                success_count += 1
            except ClientError as e:
                error_code = e.response["Error"]["Code"]
                errors.append({"key": key, "uploadId": upload_id, "error": error_code})

        if not errors:
            return jsonify({"message": f"成功清理了 {success_count} 个碎片任务"}), 200
        else:
            return jsonify({
                "message": f"操作完成：{success_count} 个成功，{len(errors)} 个失败。",
                "errors": errors
            }), 207 # 207 Multi-Status
    
    # delete_items_api(bucket_name)
    # def get_bucket_contents_api(bucket_name: str, path: str):

    #     app.logger.info(f"模拟 API 被调用！正在为 Bucket '{bucket_name}' 的路径 '{path}' 返回假数据...")
        
    #     # --- 模拟代码（仿照阿里云OSS截图）---
    #     # 我们可以根据路径返回不同的数据，来模拟进入子文件夹
    #     if "测试目录1" in path:
    #         # 这是当用户点击“测试目录1/”后看到的内容
    #         fake_items = [
    #             {"name": path + "子文件1.txt", "type": "file", "size": "1.23MB", "date_modified": "2025-07-01 10:00:00"},
    #             {"name": path + "子目录/", "type": "folder", "size": "未统计", "date_modified": "2025-07-01 10:00:00"},
    #         ]
    #     else:
    #         # 这是根目录下的内容
    #         fake_items = [
    #             # 文件夹
    #             {"name": "测试目录1/", "type": "folder", "size": "未统计", "date_modified": "2025-06-27 10:36:53"},
    #             # 文件
    #             {"name": "cat.jpg", "type": "file", "size": "137.497 KB", "date_modified": "2025-06-27 10:36:53"},
    #             {"name": "微信图片_20250422105158.jpg", "type": "file", "size": "170.861 KB", "date_modified": "2025-06-27 10:36:53"},
    #             {"name": "悠米的星空冒险2.txt", "type": "file", "size": "4.099 KB", "date_modified": "2025-06-27 16:11:54"},
    #             {"name": "悠米的星空冒险.txt", "type": "file", "size": "3.45 KB", "date_modified": "2025-06-27 16:11:55"},
    #             {"name": "视觉桥梁.docx", "type": "file", "size": "4.037 MB", "date_modified": "2025-06-27 16:11:55"},
    #         ]
        
    #     # 模拟搜索功能
    #     search_param = request.args.get("search", "")
    #     if search_param:
    #         fake_items = [item for item in fake_items if search_param.lower() in item['name'].lower()]

    #     # 模拟分页功能
    #     page = request.args.get("page", 1, type=int)
    #     items_per_page = request.args.get("limit", 30, type=int)
    #     total_items = len(fake_items)
    #     start_idx = (page - 1) * items_per_page
    #     end_idx = start_idx + items_per_page
    #     paginated_items = fake_items[start_idx:end_idx]
        
    #     return jsonify({
    #         "items": paginated_items,
    #         "total": total_items,
    #         "path": path,
    #         "bucket_name": bucket_name
    #     })

    # -------------------------------------------------------------

    # @app.route("/", methods=["GET"])
    # def index() -> str:
    #     s3 = boto3.resource("s3", **app.config["AWS_KWARGS"])
    #     all_buckets = s3.buckets.all()
    #     return render_template("index.html", buckets=all_buckets)

    # @app.route("/buckets")
    # def buckets() -> str:
    #     s3 = boto3.resource("s3", **app.config["AWS_KWARGS"])
    #     all_buckets = s3.buckets.all()
    #     return render_template("index.html", buckets=all_buckets)

    @app.route("/search/buckets/<bucket_name>", defaults={"path": ""})
    @app.route("/search/buckets/<bucket_name>/<path:path>")
    @s3_client_required
    def search_bucket(bucket_name: str, path: str) -> str:
        page = request.args.get("page", 1, type=int)
        items_per_page = app.config["PAGE_ITEMS"]
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        paginator = s3_client.get_paginator("list_objects_v2")
        all_entries = []
        all_prefixes = []

        try:
            # Collect all objects and folders
            # 注意：这里为了搜索，需要遍历所有前缀下的内容
            # search_bucket 逻辑本身就要求遍历，所以这里的 total_pages 计算是合理的
            for page_iterator in paginator.paginate(Bucket=bucket_name, Prefix=path):
                if "Contents" in page_iterator:
                    all_entries.extend([
                        {"Key": item["Key"], "Size": item["Size"], "LastModified": item["LastModified"]}
                        for item in page_iterator["Contents"]
                        if not item["Key"].endswith("/")
                    ])

            for page_iterator in paginator.paginate(Bucket=bucket_name, Prefix=path, Delimiter="/"):
                if "CommonPrefixes" in page_iterator:
                    all_prefixes.extend(page_iterator["CommonPrefixes"])

            # Create response structure (for parse_responses)
            # Combine all_entries and all_prefixes for search filtering
            combined_raw_data = [{"Contents": all_entries, "CommonPrefixes": all_prefixes}]

            search_param = request.args.get("search", "")
            contents = parse_responses(combined_raw_data, search_param)

            # Calculate pagination
            total_items = len(contents)
            total_pages = (total_items + items_per_page - 1) // items_per_page
            start_idx = (page - 1) * items_per_page
            end_idx = start_idx + items_per_page
            paginated_contents = contents[start_idx:end_idx]

            return render_template(
                "bucket_contents.html",
                contents=paginated_contents,
                bucket_name=bucket_name,
                path=path,
                search_param=search_param,
                current_page=page,
                total_pages=total_pages,
            )


        except botocore.exceptions.ClientError as e:
            error_code = e.response["Error"]["Code"]
            if error_code == "AccessDenied":
                return render_template(
                    "error.html",
                    error="You do not have permission to access this bucket.",
                )
            elif error_code == "NoSuchBucket":
                return render_template("error.html", error="The specified bucket does not exist.")
            else:
                return render_template("error.html", error=f"An unknown error occurred: {e}")

    @app.route("/buckets/<bucket_name>", defaults={"path": ""})
    @app.route("/buckets/<bucket_name>/<path:path>")
    @s3_client_required
    def view_bucket(bucket_name: str, path: str) -> str:
        page = request.args.get("page", 1, type=int)
        items_per_page = app.config["PAGE_ITEMS"]  # 使用 app.config["PAGE_ITEMS"]

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client

        # ====== 恢复 total_objects 和 total_pages 的计算 ======
        # 获取当前前缀下的所有文件和文件夹的总数，用于计算总页数
        paginator = s3_client.get_paginator("list_objects_v2")
        total_objects = 0

        # 迭代所有页面，但只获取当前层级的CommonPrefixes和Contents
        # 注意：list_objects_v2 默认不递归，通过 Delimiter='/' 来获取当前层级

        # 第一次迭代：计算文件夹（CommonPrefixes）
        for page_iterator in paginator.paginate(Bucket=bucket_name, Prefix=path, Delimiter="/"):
            if "CommonPrefixes" in page_iterator:
                total_objects += len(page_iterator["CommonPrefixes"])
            # 在没有 Delimiter 的情况下，Contents会包含所有子文件。
            # 但是在有 Delimiter 的情况下，Contents只包含当前层级的文件。
            if "Contents" in page_iterator:
                # 排除可能存在的以 / 结尾的空对象（表示文件夹本身）
                total_objects += sum(1 for obj in page_iterator["Contents"] if not obj["Key"].endswith("/"))

        total_pages = (total_objects + items_per_page - 1) // items_per_page
        # =======================================================

        try:
            # 计算 continuation_token 以获取当前页的内容
            continuation_token = None
            if page > 1:
                # 为了获取第 page 页，需要跳过前面的 page-1 页
                # 这里假设 list_objects 内部正确处理了 S3 的分页
                # 但是，list_objects 的设计可能更偏向于一次性获取一页
                # 且通过 `list_objects` 函数本身去处理 S3 的分页逻辑，而非手动迭代 `paginator`
                #
                # 如果 list_objects(client, bucket, prefix, max_keys, delimiter, continuation_token)
                # 是一个包装器，每次只返回一页数据并包含 NextContinuationToken
                # 那么为了跳到第 N 页，你需要 N-1 次调用来获取 NextContinuationToken
                # 这是一个效率较低的方法，因为每次都会发起S3请求
                #
                # 重新思考：如果 `list_objects` 内部已经封装了 `list_objects_v2`，
                # 并且每次只返回 `max_keys` 数量的结果，那么 `continuation_token` 应该
                # 是上一次请求的 `NextContinuationToken`。
                #
                # 这里为了兼容性，保留之前的分页逻辑（尽管可能效率不高，但能工作）

                # 临时存储上一个页面的响应，以便获取下一个continuation_token
                temp_response = None
                for _ in range(page - 1):  # 迭代 (page - 1) 次以获取正确的起始 token
                    # 每次获取 items_per_page 的数据，以便计算下一个 token
                    temp_response = list_objects(
                        s3_client, bucket_name, path, items_per_page, "/", continuation_token
                    )
                    if not temp_response.get("IsTruncated"):
                        # 如果没有更多数据，说明请求的页码超出了实际总页数
                        # 可以选择返回空或者重定向到最后一页
                        break
                    continuation_token = temp_response.get("NextContinuationToken")

                # 如果经过多次迭代后发现没有 continuation_token 但 page > 1，说明该页不存在
                if page > 1 and not continuation_token and temp_response and temp_response.get("IsTruncated"):
                    # 这通常意味着前一页是最后一页，而你请求了超出范围的页码。
                    # 此时，continuation_token 会是 None，因为没有更多数据。
                    # 我们应该展示最后一页的内容
                    pass  # 保持 continuation_token 为 None，让 list_objects 获取第一页（或者最后一页已有的）

            # 获取当前页的内容
            response = list_objects(s3_client, bucket_name, path, items_per_page, "/", continuation_token)

            # 使用 parse_responses 处理 S3 响应，并应用搜索过滤（如果存在）
            contents = parse_responses([response], request.args.get("search", ""))

            return render_template(
                "bucket_contents.html",
                contents=contents,
                bucket_name=bucket_name,
                path=path,
                search_param=request.args.get("search", ""),
                current_page=page,
                total_pages=total_pages,  # 现在 total_pages 已经被定义了！
                # next_continuation_token=response.get("NextContinuationToken") # 如果模板需要这个，可以保留
            )
        except botocore.exceptions.ClientError as e:
            error_code = e.response["Error"]["Code"]
            if error_code == "AccessDenied":
                return render_template(
                    "error.html",
                    error="You do not have permission to access this bucket.",
                )
            elif error_code == "NoSuchBucket":
                return render_template("error.html", error="The specified bucket does not exist.")
            else:
                return render_template("error.html", error=f"An unknown error occurred: {e}")

    # =============================================================
    # ==             “从URL上传”的API接口                         ==
    # =============================================================
    @app.route("/api/v1/buckets/<string:bucket_name>/upload-from-url", methods=["POST"])
    @s3_client_required
    def upload_from_url_api(bucket_name):
        data = request.get_json()
        if not data: return jsonify({"error": "请求体不能为空"}), 400
        source_url = data.get('source_url')
        destination_key = data.get('destination_key')
        chunk_size_mb = data.get('chunk_size_mb', 10)
        if not source_url or not destination_key:
            return jsonify({"error": "缺少必要参数"}), 400

        current_app.logger.info(f"收到从URL上传请求: {source_url}")
        is_live_stream = source_url.lower().startswith('rtsp://')
        task_id = str(uuid.uuid4())
        if not hasattr(current_app, 'running_tasks'):
            current_app.running_tasks = {}
        current_app.running_tasks[task_id] = {'status': 'starting', 'progress': 0}
        
        aws_kwargs = current_app.config["AWS_KWARGS"]
        app_context = current_app._get_current_object()
        # ==========================================================
        # ==      【核心修正：为所有任务预先获取文件大小】          ==
        # ==========================================================
        total_size = -1 # 默认大小未知（主要用于RTSP流）
        if not is_live_stream:
            try:
                # 只对 HTTP/HTTPS 链接发送 HEAD 请求
                head_response = requests.head(source_url, allow_redirects=True, timeout=5)
                head_response.raise_for_status()
                content_length = head_response.headers.get('Content-Length')
                if content_length:
                    total_size = int(content_length)
                else:
                    # 如果源服务器没有提供 Content-Length，我们也标记为大小未知
                    total_size = -1 
            except requests.exceptions.RequestException as e:
                current_app.logger.warning(f"无法预先获取 {source_url} 的大小: {e}")
                total_size = -1 # 获取失败也标记为大小未知
        
        # ==========================================================
        
        # --- 您现有的、正确的“智能调度”逻辑保持不变 ---
        aws_kwargs = current_app.config["AWS_KWARGS"]
        
        if is_live_stream:
            # --- A. RTSP 直播流的专属逻辑 ---
            task_id = str(uuid.uuid4())
            if not hasattr(current_app, 'running_tasks'): current_app.running_tasks = {}
            current_app.running_tasks[task_id] = {'status': 'starting', 'progress': 0}
            
            app_context = current_app._get_current_object()
            thread = threading.Thread(
                target=_stream_rtsp_to_s3,
                args=(app_context, task_id, aws_kwargs, bucket_name, source_url, destination_key, chunk_size_mb)
            )
            thread.start()
            
            return jsonify({
                "message": "RTSP 直播流录制任务已创建。",
                "task_info": {
                    "taskId": task_id, "id": task_id, "name": os.path.basename(destination_key),
                    "size": total_size, # 对于RTSP，这里总是-1
                    "bucketName": bucket_name, "basePath": os.path.dirname(destination_key) + '/',
                    "isLiveStream": True
                }
            }), 202

        else:
            # --- B. 普通 HTTP 文件的专属逻辑 ---
            # 【注意】HTTP 任务也需要纳入任务管理，以便前端可以轮询
            task_id = str(uuid.uuid4())
            if not hasattr(current_app, 'running_tasks'): current_app.running_tasks = {}
            current_app.running_tasks[task_id] = {'status': 'starting', 'progress': 0}

            app_context = current_app._get_current_object()
            thread = threading.Thread(
                target=_stream_http_to_s3,
                args=(app_context, task_id, aws_kwargs, bucket_name, source_url, destination_key, chunk_size_mb)
            )
            thread.start()
            
            return jsonify({
                "message": "HTTP 文件后台传输任务已创建。",
                "task_info": {
                    "taskId": task_id,
                    "id": task_id,
                    "name": os.path.basename(destination_key),
                    "size": total_size, # <-- 【关键】使用我们刚刚获取到的真实大小
                    "bucketName": bucket_name,
                    "basePath": os.path.dirname(destination_key) + '/',
                    "isLiveStream": False
                }
            }), 202
    
    # =============================================================
    # == 【核心新增】支持HTTP范围请求的视频流代理API             ==
    # =============================================================
    @app.route("/api/v1/stream-video/<string:bucket_name>/<path:key>")
    @s3_client_required
    def stream_video_api(bucket_name, key):
        """
        一个能处理HTTP Range Requests的视频流代理接口。
        """
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        # 1. 首先，获取文件信息
        object_info = get_object_info(s3_client, bucket_name, key)
        if not object_info:
            return "File not found", 404
            
        file_size = object_info['size']
        etag = object_info['etag']
        
        # 2. 解析浏览器发来的 Range 请求头
        range_header = request.headers.get('Range', None)
        if not range_header:
            # --- 场景A: 首次请求，没有Range头 ---
            # 浏览器只是想知道文件有多大
            resp = Response(None, 200, mimetype=object_info['content_type'])
            resp.headers.add('Content-Length', str(file_size))
            resp.headers.add('Accept-Ranges', 'bytes') # 【关键】告诉浏览器我支持范围请求
            return resp

        # --- 场景B: 后续请求，带有Range头 ---
        try:
            # 解析 'bytes=start-end'
            byte_range = range_header.split('=')[1]
            start_str, end_str = byte_range.split('-')
            start = int(start_str)
            end = int(end_str) if end_str else file_size - 1
        except (ValueError, IndexError):
            return "Invalid Range header", 400

        if start >= file_size or end >= file_size or start > end:
            return "Requested range not satisfiable", 416 # Range Not Satisfiable
            
        # 3. 只从S3获取指定范围的数据
        try:
            s3_resp = s3_client.get_object(
                Bucket=bucket_name, 
                Key=key, 
                Range=f'bytes={start}-{end}'
            )
        except botocore.exceptions.ClientError as e:
            return f"Failed to get object range from S3: {e}", 500
            
        # 4. 构建并返回 206 Partial Content 响应
        def generate_chunks():
            for chunk in s3_resp['Body'].iter_chunks(chunk_size=65536):
                yield chunk

        # 【关键】构建一个完全符合HTTP规范的206响应
        resp = Response(generate_chunks(), 206, mimetype=object_info['content_type'])
        resp.headers.add('Content-Range', f'bytes {start}-{end}/{file_size}')
        resp.headers.add('ETag', etag)
        resp.headers.add('Content-Length', str(s3_resp['ContentLength']))
        resp.headers.add('Last-Modified', http_date(s3_resp['LastModified']))
        resp.headers.add('Accept-Ranges', 'bytes')
        
        return resp
    
    @app.route("/api/v1/tasks/<string:task_id>/stop", methods=["POST"])
    @s3_client_required
    def stop_live_stream_task(task_id):
        app.logger.info(f"收到【立即】停止任务 {task_id} 的请求...")
        
        if hasattr(app, 'running_tasks') and task_id in app.running_tasks:
            task_status = app.running_tasks[task_id]
            
            # 【1. 先设置停止标志，让后台线程知道是我们主动停止的】
            task_status['stop_flag'] = True
            
            # 【2. 获取 ffmpeg 进程对象】
            process = task_status.get('process')
            
            if process and process.poll() is None: # 检查进程是否还在运行
                try:
                    # 【3. 发送 SIGTERM 信号，这是一个优雅的终止请求】
                    # FFmpeg 收到这个信号后，会完成当前正在处理的包，然后干净地退出
                    process.terminate()
                    app.logger.info(f"已向任务 {task_id} 的 ffmpeg 进程发送 terminate 信号。")
                    
                    # (可选) 可以增加一个等待，确保进程真的结束
                    # process.wait(timeout=10) 
                    
                    return jsonify({"message": "停止信号已发送，录制将立即停止并保存当前文件。"}), 200
                except Exception as e:
                    app.logger.error(f"终止任务 {task_id} 进程时发生错误: {e}")
                    return jsonify({"error": "终止进程时发生错误"}), 500
            else:
                app.logger.warning(f"任务 {task_id} 已经设置了停止标志，但未找到正在运行的 ffmpeg 进程。")
                return jsonify({"message": "任务已在停止过程中，或已结束。"}), 200
        else:
            app.logger.warning(f"尝试停止一个不存在或已完成的任务: {task_id}")
            return jsonify({"error": "任务不存在或已完成。"}), 404

    @app.route("/api/v1/tasks/<string:task_id>/status", methods=["GET"])
    @s3_client_required
    def get_task_status(task_id):
        task_status = current_app.running_tasks.get(task_id)
        if task_status:
            return jsonify({
                "status": task_status.get('status', 'running'),
                "progress": task_status.get('progress', 0)
            }), 200
        else:
            return jsonify({"status": "completed", "progress": 100}), 404

    @app.route("/api/v1/buckets/<bucket_name>/website", methods=["GET"])
    @s3_client_required
    def get_bucket_website_api(bucket_name):
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            response = s3_client.get_bucket_website(Bucket=bucket_name)
            # boto3成功时会返回这样的结构，我们直接返回给前端
            # {'IndexDocument': {'Suffix': 'index.html'}, 'ErrorDocument': {'Key': 'error.html'}}
            config = {
                "index_document": response.get('IndexDocument', {}).get('Suffix'),
                "error_document": response.get('ErrorDocument', {}).get('Key')
            }
            return jsonify(config)
        except ClientError as e:
            # 捕获“没有配置”的特定错误
            if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
                return jsonify({}) # 返回空对象表示未配置
            else:
                # 其他错误正常抛出
                return jsonify({"error": str(e)}), 500
            
    @app.route("/api/v1/buckets/<bucket_name>/website", methods=["PUT"])
    @s3_client_required
    def put_bucket_website_api(bucket_name):
        data = request.get_json()
        index_doc = data.get('index_document')
        error_doc = data.get('error_document')

        if not index_doc:
            return jsonify({"error": "索引页(index_document)是必需的"}), 400

        website_configuration = {
            'IndexDocument': {'Suffix': index_doc}
        }
        if error_doc:
            website_configuration['ErrorDocument'] = {'Key': error_doc}

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            s3_client.put_bucket_website(
                Bucket=bucket_name,
                WebsiteConfiguration=website_configuration
            )
            return jsonify({"message": "静态网站配置已更新"}), 200
        except ClientError as e:
            return jsonify({"error": str(e)}), 500
        
    @app.route("/api/v1/buckets/<bucket_name>/website", methods=["DELETE"])
    @s3_client_required
    def delete_bucket_website_api(bucket_name):
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            s3_client.delete_bucket_website(Bucket=bucket_name)
            return jsonify({"message": "静态网站配置已删除"}), 200
        except ClientError as e:
            return jsonify({"error": str(e)}), 500

    # =============================================================
    # ==      【新增】静态网站开发代理路由                      ==
    # =============================================================
    # 这个路由的设计非常巧妙，它能捕获所有以 /dev-proxy/ 开头的请求
    # 并且能处理任意深度的路径
    @app.route('/dev-proxy/<string:bucket_name>/<path:path>', methods=['GET'])
    @app.route('/dev-proxy/<string:bucket_name>/', defaults={'path': ''}, methods=['GET'])
    @s3_client_required
    def static_website_proxy(bucket_name, path):
        if not current_app.debug:
            return "Proxy is only available in debug mode.", 404

        # --- 1. 获取基础配置 ---
        endpoint_url = current_app.config["AWS_KWARGS"].get("endpoint_url", "")
        if not endpoint_url:
            return "Endpoint URL not configured in the backend.", 500
        
        # 解析出 host，例如：obs-fujian-zhihcs.cucloud.cn
        endpoint_host = urlparse(endpoint_url).hostname
        
        # 【核心】从您已有的 hosts 文件或配置中获取基础IP地址
        #    这个IP是唯一需要知道的。我们硬编码在这里，或者未来从配置读取。
        #    请确保这个IP是正确的！
        target_ip = "100.127.195.250"

        # --- 2. 构造目标信息 ---
        # 目标域名 (用于 Host 头)
        target_host = f"{bucket_name}.{endpoint_host}"
        # 目标URL (直接使用IP地址，绕过DNS)
        target_url = f"http://{target_ip}/{path}"

        current_app.logger.info(f"DEV PROXY (NO-HOSTS):")
        current_app.logger.info(f"  - Target Host Header: {target_host}")
        current_app.logger.info(f"  - Requesting URL (IP-based): {target_url}")

        try:
            # --- 3. 【关键】手动设置 Host 头 ---
            # 复制原始请求头
            headers = {key: value for (key, value) in request.headers}
            # 强制覆盖 Host 头
            headers['Host'] = target_host
            
            resp = requests.get(target_url, headers=headers, stream=True, timeout=30)
            resp.raise_for_status()

            # --- 4. 流式返回响应 (逻辑不变) ---
            response_headers = [(k, v) for k, v in resp.raw.headers.items() if k.lower() not in ['content-encoding', 'transfer-encoding']]
            return Response(stream_with_context(resp.iter_content(chunk_size=8192)), resp.status_code, response_headers)

        except requests.exceptions.RequestException as e:
            current_app.logger.error(f"DEV PROXY (NO-HOSTS): Error fetching {target_url} with Host {target_host}: {e}")
            if hasattr(e, 'response') and e.response is not None:
                return Response(e.response.content, e.response.status_code, e.response.headers.items())
            return f"Failed to proxy request: {e}", 502
        
    # ======================================================================
    # ==      日志管理 (Logging) - GET, PUT, DELETE 接口               ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/logging", methods=["GET"])
    @s3_client_required
    def get_bucket_logging_api(bucket_name):
        """API: 获取存储桶的日志记录配置"""
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            # 直接调用 s3.py 中的 get_bucket_logging
            response = get_bucket_logging(s3_client, bucket_name)
            
            # boto3 成功时会返回 LoggingEnabled 字段
            if 'LoggingEnabled' in response:
                return jsonify({
                    "enabled": True,
                    "target_bucket": response['LoggingEnabled'].get('TargetBucket'),
                    "target_prefix": response['LoggingEnabled'].get('TargetPrefix')
                })
            else:
                # 理论上 boto3 要么成功返回 LoggingEnabled，要么抛异常
                # 但为了健壮性，增加此分支
                return jsonify({"enabled": False})

        except ClientError as e:
            # 【核心】当没有配置时，Boto3会抛出这个特定的错误
            if e.response['Error']['Code'] == 'NoSuchLoggingConfiguration':
                app.logger.info(f"桶 {bucket_name} 没有日志配置，返回 enabled: false")
                return jsonify({"enabled": False})
            else:
                # 处理其他真实错误，如 AccessDenied
                app.logger.error(f"获取日志配置失败: {e}")
                return jsonify({"error": f"获取配置失败: {e.response['Error']['Code']}"}), 500

    @app.route("/api/v1/buckets/<bucket_name>/logging", methods=["PUT"])
    @s3_client_required
    def put_bucket_logging_api(bucket_name):
        """API: 开启或更新存储桶的日志记录"""
        data = request.get_json()
        target_bucket = data.get('target_bucket')
        target_prefix = data.get('target_prefix', '') # 前缀可以为空

        if not target_bucket:
            return jsonify({"error": "缺少 'target_bucket' 参数"}), 400

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            # 调用 s3.py 中的 put_bucket_logging
            put_bucket_logging(s3_client, bucket_name, target_bucket, target_prefix)
            return jsonify({"message": "日志记录已成功配置"}), 200
        except ClientError as e:
            app.logger.error(f"设置日志配置失败: {e}")
            return jsonify({"error": f"设置配置失败: {e.response['Error']['Code']}"}), 500

    @app.route("/api/v1/buckets/<bucket_name>/logging", methods=["DELETE"])
    @s3_client_required
    def delete_bucket_logging_api(bucket_name):
        """API: 关闭存储桶的日志记录"""
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            # 调用 s3.py 中的 delete_bucket_logging
            delete_bucket_logging(s3_client, bucket_name)
            return jsonify({"message": "日志记录已成功关闭"}), 200
        except ClientError as e:
            app.logger.error(f"关闭日志配置失败: {e}")
            return jsonify({"error": f"关闭配置失败: {e.response['Error']['Code']}"}), 500

    # --- 在 s3_web_browser/routes.py 文件中，找到合适的区域添加以下路由 ---
# 推荐放在日志管理配置API的下方

    # ======================================================================
    # ==      日志查询 - (A) 列出日志文件 API                          ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/logs", methods=["GET"])
    @s3_client_required
    def list_log_files_api(bucket_name):
        """API: 分页列出指定存储桶和前缀下的日志文件"""
        prefix = request.args.get('prefix', '')
        page = request.args.get('page', 1, type=int)
        limit = request.args.get('limit', 50, type=int)

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        try:
            # 使用 paginator 来高效处理分页
            paginator = s3_client.get_paginator('list_objects_v2')
            pages = paginator.paginate(Bucket=bucket_name, Prefix=prefix)
            
            # 筛选出非目录的对象
            all_log_objects = [
                obj for page_iterator in pages 
                for obj in page_iterator.get('Contents', []) 
                if not obj['Key'].endswith('/')
            ]
            
            # 按修改时间降序排序，最新的日志在最前面
            all_log_objects.sort(key=lambda x: x['LastModified'], reverse=True)
            
            # 手动进行分页
            total = len(all_log_objects)
            start = (page - 1) * limit
            end = start + limit
            paginated_objects = all_log_objects[start:end]

            # 格式化为前端需要的数据结构
            formatted_items = [{
                "key": obj['Key'],
                "name": os.path.basename(obj['Key']),
                "date": obj['LastModified'].strftime('%Y-%m-%d %H:%M:%S'),
                "size": humanize.naturalsize(obj['Size'], binary=True)
            } for obj in paginated_objects]

            return jsonify({"items": formatted_items, "total": total})

        except ClientError as e:
            app.logger.error(f"列出日志文件失败: {e}")
            return jsonify({"error": f"列出日志文件失败: {e.response['Error']['Code']}"}), 500

    # ======================================================================
    # ==      日志查询 - (B) 读取并解析日志内容 API                    ==
    # ======================================================================
    import re
    # S3 访问日志的正则表达式 (一个相对通用的版本)
    LOG_REGEX_UNICOM = re.compile(
        r'(\S+)\s'  # 1: Bucket Owner
        r'(\S+)\s'  # 2: Bucket Name
        r'\[(.+?)\]\s'  # 3: Timestamp
        r'(\S+)\s'  # 4: Remote IP
        r'(\S+)\s'  # 5: Requester ID
        r'(\S+)\s'  # 6: Request ID
        r'(\S+)\s'  # 7: Operation
        r'(\S+)\s'  # 8: Key
        r'"([^"]*)"\s'  # 9: Request-URI
        r'(\S+)\s'  # 10: HTTP status
        r'(\S+)\s'  # 11: Error Code
        r'(\S+)\s'  # 12: Bytes Sent
        r'(\S+)\s'  # 13: Object Size
        r'(\S+)\s'  # 14: Total Time
        r'(\S+)\s'  # 15: Turn-Around Time
        r'"([^"]*)"\s'  # 16: Referer
        r'"([^"]*)"\s'  # 17: User-Agent
        r'(\S+)\s'  # 18: Version ID
        r'(\S+)'  # 19: Host Header
    )
    
    @app.route("/api/v1/buckets/<bucket_name>/logs/content", methods=["GET"])
    @s3_client_required
    def read_log_content_api(bucket_name):
        """API: 读取、解析单个日志文件，并分页返回内容 (增强版)"""
        key = request.args.get('key')
        page = request.args.get('page', 1, type=int)
        limit = request.args.get('limit', 50, type=int)

        if not key:
            return jsonify({"error": "缺少 'key' 参数"}), 400

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            # 1. 从S3下载日志文件到内存
            log_object = s3_client.get_object(Bucket=bucket_name, Key=key)
            log_content = log_object['Body'].read().decode('utf-8')
            
            # 2. 逐行解析
            log_lines = log_content.strip().split('\n')
            parsed_entries = []
            for line in log_lines:
                match = LOG_REGEX_UNICOM.match(line)
                if match:
                    groups = match.groups()
                    
                    # 3. 将捕获的组映射到有意义的键名
                    entry = {
                        "owner": groups[0],
                        "bucket": groups[1],
                        "timestamp": groups[2],
                        "remote_ip": groups[3],
                        "requester": groups[4],
                        "request_id": groups[5],
                        "operation": groups[6],
                        "key": groups[7] if groups[7] != '-' else '', # 如果key是'-',返回空字符串
                        "request_uri": groups[8],
                        "http_status": groups[9],
                        "error_code": groups[10] if groups[10] != '-' else '',
                        "bytes_sent": groups[11],
                        "object_size": groups[12],
                        "total_time_ms": groups[13],
                        "turnaround_time_ms": groups[14],
                        "referer": groups[15],
                        "user_agent": groups[16],
                        "version_id": groups[17]
                    }
                    parsed_entries.append(entry)
                else:
                    # 如果某行不匹配，可以记录下来用于调试
                    app.logger.warning(f"日志行无法解析: {line}")
            
            # 4. 手动分页
            total = len(parsed_entries)
            start = (page - 1) * limit
            end = start + limit
            paginated_entries = parsed_entries[start:end]

            return jsonify({"items": paginated_entries, "total": total})

        except ClientError as e:
            app.logger.error(f"读取日志内容失败: {e}")
            return jsonify({"error": f"读取日志内容失败: {e.response['Error']['Code']}"}), 500

    # ======================================================================
    # ==      日志查询 - (C) 下载并重命名日志文件 API                  ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/logs/download", methods=["GET"])
    @s3_client_required
    def download_log_file_api(bucket_name):
        """API: 下载指定的日志文件，并在下载时动态添加 .log 扩展名"""
        key = request.args.get('key')
        if not key:
            return "Missing 'key' parameter", 400

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            # 1. 从S3获取日志文件对象
            s3_object = s3_client.get_object(Bucket=bucket_name, Key=key)

            # 2. 准备流式响应
            def generate_chunks():
                for chunk in s3_object['Body'].iter_chunks(chunk_size=65536):
                    yield chunk
            
            # 3. 【核心】构造新的带 .log 后缀的文件名
            base_name = os.path.basename(key)
            new_filename = f"{base_name}.log"
            
            # 4. 创建响应对象
            response = Response(generate_chunks(), content_type='text/plain')
            
            # 5. 【魔法发生的地方】设置 Content-Disposition 头
            #    filename*=UTF-8''... 是处理包含非ASCII字符文件名的标准方式，非常健壮
            response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{quote(new_filename)}"
            
            # (可选) 设置文件大小，让浏览器可以显示下载进度
            if 'ContentLength' in s3_object:
                response.headers["Content-Length"] = str(s3_object['ContentLength'])

            return response

        except ClientError as e:
            if e.response['Error']['Code'] == 'NoSuchKey':
                return "Log file not found", 404
            app.logger.error(f"下载日志文件失败: {e}")
            return f"Failed to download log file: {e.response['Error']['Code']}", 500
        
    # ======================================================================
    # ==      日志查询 - (D) 批量下载并重命名日志文件 API              ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/logs/batch-download", methods=["POST"])
    @s3_client_required
    def batch_download_log_files_api(bucket_name):
        """
        API: 批量下载指定的日志文件，在服务器端打包成ZIP，并为每个文件添加.log扩展名。
        """
        data = request.get_json()
        keys = data.get('keys', [])
        if not keys:
            return jsonify({"error": "Missing 'keys' list in request"}), 400

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        
        # 1. 在内存中创建一个ZIP文件缓冲区
        zip_buffer = io.BytesIO()
        
        # 2. 使用 zipfile 库向缓冲区中写入文件
        #    'w' 模式表示写入，ZIP_DEFLATED 是标准的压缩模式
        with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zf:
            for key in keys:
                try:
                    # a. 从S3获取单个日志文件
                    s3_object = s3_client.get_object(Bucket=bucket_name, Key=key)
                    file_contents = s3_object['Body'].read()
                    
                    # b. 【核心】构造新的带 .log 后缀的文件名
                    new_filename_in_zip = f"{os.path.basename(key)}.log"
                    
                    # c. 将文件内容以新名字写入ZIP压缩包
                    zf.writestr(new_filename_in_zip, file_contents)

                except ClientError as e:
                    # 如果某个文件下载失败，可以选择跳过或中止
                    app.logger.warning(f"批量下载日志时，跳过文件 {key}，原因: {e}")
                    continue # 继续处理下一个文件
        
        # 3. 将缓冲区的指针移到开头，准备读取
        zip_buffer.seek(0)

        # 4. 构造并返回 ZIP 文件的HTTP响应
        zip_filename = f"{bucket_name}-logs-batch-{int(time.time())}.zip"
        response = Response(zip_buffer.getvalue(), mimetype='application/zip')
        response.headers['Content-Disposition'] = f"attachment; filename*=UTF-8''{quote(zip_filename)}"
        response.headers['Content-Length'] = str(len(zip_buffer.getvalue()))
        
        return response
    
    @app.route("/api/v1/buckets/<bucket_name>/policy", methods=["GET"])
    @s3_client_required
    def get_bucket_policy_api(bucket_name):
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            response = s3_client.get_bucket_policy(Bucket=bucket_name)
            # Boto3 返回的 Policy 是一个 JSON 字符串，需要解析成 Python 字典再返回给前端
            policy_document = json.loads(response['Policy'])
            return jsonify(policy_document)
        except ClientError as e:
            # 【关键】如果策略不存在，S3 会返回 'NoSuchBucketPolicy' 错误
            if e.response['Error']['Code'] == 'NoSuchBucketPolicy':
                return jsonify({}) # 返回一个空对象，表示没有策略
            else:
                # 处理其他错误，如权限不足
                app.logger.error(f"获取 Bucket Policy 失败: {e}")
                return jsonify({"error": str(e)}), 500
            
    @app.route("/api/v1/buckets/<bucket_name>/policy", methods=["PUT"])
    @s3_client_required
    def put_bucket_policy_api(bucket_name):
        # 1. 接收前端发来的 JSON 对象
        policy_document = request.get_json()
        if not policy_document:
            return jsonify({"error": "请求体不能为空且必须是 JSON 格式"}), 400

        # ==================== 【在这里添加日志】 ====================
        # 日志1：打印前端发来的原始 Python 字典
        current_app.logger.info("--- [DEBUG] Received Policy Document from Frontend (as Python Dict) ---")
        current_app.logger.info(policy_document)
        # ==========================================================

        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            # 2. 将 Python 字典转换为 JSON 字符串
            policy_string = json.dumps(policy_document)
            
            # ==================== 【在这里添加日志】 ====================
            # 日志2：打印即将传递给 Boto3 的最终 JSON 字符串
            current_app.logger.info("--- [DEBUG] Final Policy String to be sent to Boto3 ---")
            current_app.logger.info(policy_string)
            # ==========================================================

            # 3. 调用 Boto3
            s3_client.put_bucket_policy(
                Bucket=bucket_name,
                Policy=policy_string
            )
            return jsonify({"message": "存储桶策略已成功更新"}), 200
            
        except ClientError as e:
            # ==================== 【在这里添加日志】 ====================
            # 日志3：打印 Boto3 抛出的完整错误响应，这至关重要！
            current_app.logger.error("--- [DEBUG] Boto3 ClientError Details ---")
            current_app.logger.error(e.response)
            # ==========================================================
            
            # 原有的错误处理逻辑保持不变
            current_app.logger.error(f"设置 Bucket Policy 失败: {e}")
            return jsonify({"error": f"策略设置失败: {e.response['Error']['Message']}"}), 400

    @app.route("/api/v1/buckets/<bucket_name>/policy", methods=["DELETE"])
    @s3_client_required
    def delete_bucket_policy_api(bucket_name):
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            s3_client.delete_bucket_policy(Bucket=bucket_name)
            return jsonify({"message": "存储桶策略已成功删除"}), 200
        except ClientError as e:
            app.logger.error(f"删除 Bucket Policy 失败: {e}")
            return jsonify({"error": str(e)}), 500

    # ======================================================================
    # ==      跨域资源共享 (CORS) - GET, PUT, DELETE 接口                ==
    # ======================================================================
    @app.route("/api/v1/buckets/<bucket_name>/cors", methods=["GET"])
    @s3_client_required
    def get_bucket_cors_api(bucket_name):
        """API: 获取存储桶的CORS配置 (健壮版)"""
        app.logger.info(f"正在获取 {bucket_name} 的 CORS 配置...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            response = s3_client.get_bucket_cors(Bucket=bucket_name)
            rules = response.get('CORSRules', [])
            return jsonify(rules)
        except ClientError as e:
            # 【核心修正1】保持对已知 ClientError 的精细处理
            if e.response['Error']['Code'] == 'NoSuchBucketCors':
                app.logger.info(f"桶 {bucket_name} 没有CORS配置(错误码: NoSuchBucketCors)，返回空列表。")
                return jsonify([])
            else:
                # 其他真正的 ClientError (如 AccessDenied)
                app.logger.error(f"获取CORS配置时发生预料之外的ClientError: {e}")
                return jsonify({"error": f"获取配置失败: {e.response['Error']['Code']}"}), 500
        except Exception as e:
            # 【核心修正2】增加一个通用的异常捕获块
            # 这个块能捕获任何意想不到的错误，防止服务器崩溃
            app.logger.error(f"获取CORS配置时发生未处理的异常: {e}", exc_info=True) # exc_info=True 会记录完整的错误堆栈
            return jsonify({"error": f"服务器内部发生未知错误: {str(e)}"}), 500

    @app.route("/api/v1/buckets/<bucket_name>/cors", methods=["PUT"])
    @s3_client_required
    def put_bucket_cors_api(bucket_name):
        """API: 设置或更新存储桶的CORS配置"""
        # 前端会发送一个包含所有规则的数组
        rules = request.get_json()
        if not isinstance(rules, list):
            return jsonify({"error": "请求体必须是一个规则数组"}), 400

        app.logger.info(f"正在为 {bucket_name} 设置 {len(rules)} 条 CORS 规则...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            cors_configuration = {
                'CORSRules': rules
            }
            s3_client.put_bucket_cors(
                Bucket=bucket_name,
                CORSConfiguration=cors_configuration
            )
            return jsonify({"message": "CORS 配置已成功更新"}), 200
        except ClientError as e:
            # 捕获可能的格式错误等问题
            app.logger.error(f"设置CORS配置失败: {e}")
            return jsonify({"error": f"配置失败: {e.response['Error']['Message']}"}), 400

    @app.route("/api/v1/buckets/<bucket_name>/cors", methods=["DELETE"])
    @s3_client_required
    def delete_bucket_cors_api(bucket_name):
        """API: 删除存储桶的所有CORS配置"""
        app.logger.info(f"正在删除 {bucket_name} 的所有 CORS 配置...")
        # s3_client = boto3.client("s3", **app.config["AWS_KWARGS"])
        s3_client = g.s3_client
        try:
            s3_client.delete_bucket_cors(Bucket=bucket_name)
            return jsonify({"message": "CORS 配置已成功删除"}), 200
        except ClientError as e:
            app.logger.error(f"删除CORS配置失败: {e}")
            return jsonify({"error": f"删除配置失败: {e.response['Error']['Code']}"}), 500