import dataclasses
import datetime
import botocore
import humanize
import logging
import requests
import boto3
import base64
import time
from urllib.parse import urlparse, quote, unquote
import ffmpeg as ffmpeg_process
import threading
import re

from typing import List, Set, Optional, Dict, Any

@dataclasses.dataclass(eq=True, frozen=True)
class S3Entry:
    """Representation of S3 object."""
    name: str
    type: str
    size: str = ""
    date_modified: str = ""

def parse_responses(responses: list, search_param: str) -> List[S3Entry]:
    contents = set()  # type: Set[S3Entry]
    for response in responses:
        # Add folders to contents
        if isinstance(response, dict) and "CommonPrefixes" in response:
            for item in response["CommonPrefixes"]:
                contents.add(S3Entry(name=item["Prefix"], type="folder"))

        # Add files to contents
        if isinstance(response, dict) and "Contents" in response:
            for item in response["Contents"]:
                if not item["Key"].endswith("/"):
                    contents.add(
                        S3Entry(
                            name=item["Key"],
                            type="file",
                            size=humanize.naturalsize(item["Size"], binary=True),
                            date_modified=item["LastModified"],
                        )
                    )

    contents_list = list(contents)
    if search_param:
        # TODO: use regex
        contents_list = list(filter(lambda x: search_param.lower() in x.name.lower(), contents_list))
    return sorted(contents_list, key=lambda x: (x.type == "file", x.name.lower()))

def list_objects(
    s3_client: botocore.client.BaseClient,
    bucket_name: str,
    path: str,
    page_items: int,
    delimiter: str = "",
    page_token: Optional[str] = None,
) -> Dict[str, Any]:
    list_params = {"Bucket": bucket_name, "Prefix": path, "MaxKeys": page_items}
    if delimiter:
        list_params["Delimiter"] = "/"
    if page_token:
        list_params["ContinuationToken"] = page_token

    return s3_client.list_objects_v2(**list_params)

def upload_object(s3_client, bucket_name: str, key: str, file_content: bytes):
    """
    上传文件到 S3
    :param s3_client: S3 客户端
    :param bucket_name: 桶名
    :param key: S3 里的完整路径（比如 'folder1/file.txt'）
    :param file_content: 要上传的文件内容（字节）
    """
    s3_client.put_object(
        Bucket=bucket_name,
        Key=key,
        Body=file_content
    )

def delete_object(s3_client, bucket_name: str, key: str):
    """
    删除 S3 的某个对象
    :param s3_client: S3 客户端
    :param bucket_name: 桶名
    :param key: S3 里的完整路径
    """
    s3_client.delete_object(
        Bucket=bucket_name,
        Key=key
    )

def rename_object(s3_client, bucket_name: str, old_key: str, new_key: str):
    """
    重命名 S3 对象（本质是复制然后删除原对象）
    """
    # 复制
    s3_client.copy_object(
        Bucket=bucket_name,
        CopySource={'Bucket': bucket_name, 'Key': old_key},
        Key=new_key
    )
    # 删除原对象
    s3_client.delete_object(
        Bucket=bucket_name,
        Key=old_key
    )

def generate_presigned_url(s3_client, bucket_name: str, key: str, filename: str = None, expiration: int = 3600):
    """
    生成一个预签名URL，用于直接下载对象。
    :param s3_client: S3客户端实例。
    :param bucket_name: 桶名称。
    :param key: 对象键（路径）。
    :param filename: 可选参数，指定下载时文件的名称。这将强制浏览器下载而不是预览。
    :param expiration: URL的有效期（秒），默认为1小时。
    :return: 预签名URL字符串。
    """
    params = {'Bucket': bucket_name, 'Key': key}

    if filename:
        # 添加 Content-Disposition 头，强制浏览器下载并指定文件名
        # filename需要进行URL编码以处理特殊字符，但boto3会处理好
        params['ResponseContentDisposition'] = f'attachment; filename="{filename}"'

    try:
        response = s3_client.generate_presigned_url(
            'get_object',
            Params=params,
            ExpiresIn=expiration
        )
        print(f"Generated presigned URL: {response}")  # <--- 添加这行
        return response
    except Exception as e:
        print(f"Error generating presigned URL for {key}: {e}")
        return None
    
# --- 在 s3.py 文件末尾添加以下函数 ---

def get_paginated_bucket_contents_with_versions(
    s3_client: botocore.client.BaseClient,
    bucket_name: str,
    path: str,
    search_param: str = "",
    page: int = 1,
    items_per_page: int = 50,
) -> Dict[str, Any]:
    """
    获取、解析、搜索和分页S3存储桶的内容，【包含所有历史版本】。
    """
    paginator = s3_client.get_paginator("list_object_versions")
    
    # 1. 获取所有版本和删除标记
    all_versions_raw = []
    # Delimiter='/' 确保我们只看当前层级
    for page_iterator in paginator.paginate(Bucket=bucket_name, Prefix=path, Delimiter="/"):
        # 处理文件夹
        if "CommonPrefixes" in page_iterator:
            for prefix in page_iterator["CommonPrefixes"]:
                all_versions_raw.append({
                    "Key": prefix["Prefix"],
                    "Size": 0,
                    "LastModified": None, # 文件夹没有修改时间
                    "IsLatest": True,
                    "is_folder": True, # 自定义标记
                })

        # 处理文件版本
        if "Versions" in page_iterator:
            all_versions_raw.extend(page_iterator["Versions"])
            
        # 处理删除标记 (Delete Markers)
        if "DeleteMarkers" in page_iterator:
            # 给删除标记添加一个 'Size' 字段以便排序和处理
            for marker in page_iterator["DeleteMarkers"]:
                marker['Size'] = 0
                marker['is_delete_marker'] = True # 自定义标记
            all_versions_raw.extend(page_iterator["DeleteMarkers"])

    # 2. 将扁平列表分组为树形结构
    grouped_objects = {}
    for item in all_versions_raw:
        key = item["Key"]
        
        # 创建 S3Entry-like 的字典
        entry = {
            "name": key,
            "version_id": item.get("VersionId"),
            "is_latest": item.get("IsLatest", False),
            "size": humanize.naturalsize(item["Size"], binary=True) if "Size" in item and item["Size"] is not None else "-",
            "date_modified": item["LastModified"].isoformat() if "LastModified" in item and item["LastModified"] is not None else None,
            "type": "folder" if item.get("is_folder") else ("delete_marker" if item.get("is_delete_marker") else "file"),
            "history": [] # 准备存放历史版本
        }

        if key not in grouped_objects:
            grouped_objects[key] = {"latest": None, "history": []}
        
        if entry["is_latest"]:
            grouped_objects[key]["latest"] = entry
        else:
            # 历史版本和非最新的删除标记都放入 history
            grouped_objects[key]["history"].append(entry)

    # 3. 构建最终的列表，并应用搜索
    final_list = []
    for key, group in grouped_objects.items():
        if group["latest"]:
            # 合并历史记录到最新版本对象中
            group["latest"]["history"] = sorted(group["history"], key=lambda x: x["date_modified"] or '', reverse=True)
            final_list.append(group["latest"])
    
    # 按类型和名称排序主列表
    final_list = sorted(final_list, key=lambda x: (x["type"] == "file", x["name"].lower()))
    
    # 应用搜索 (在主列表上)
    if search_param:
        final_list = list(filter(lambda x: search_param.lower() in x["name"].lower(), final_list))
        
    # 4. 手动分页
    total_items = len(final_list)
    start_idx = (page - 1) * items_per_page
    end_idx = start_idx + items_per_page
    paginated_items = final_list[start_idx:end_idx]

    return {
        "items": paginated_items,
        "total": total_items,
    }

def initiate_multipart_upload(s3_client: botocore.client.BaseClient, bucket_name: str, key: str, acl: str = None) -> Dict[str, Any]:
    """
    步骤1：初始化一个分片上传任务。
    :return: 包含 UploadId 的字典。
    """
    logger = logging.getLogger(__name__) # <-- 【修正】使用标准日志记录
    logger.info(f"Initiating multipart upload for s3://{bucket_name}/{key}")
    # 未来可以在这里添加 ContentType, Metadata 等参数
    # 构造参数字典
    params = {
        'Bucket': bucket_name,
        'Key': key
    }
    
    # 【核心修改】如果传入了 acl，就添加到参数中
    if acl:
        params['ACL'] = acl
        logger.info(f"  ... with ACL: {acl}")
    
    return s3_client.create_multipart_upload(**params)


def generate_presigned_urls_for_parts(
    s3_client: botocore.client.BaseClient,
    bucket_name: str,
    key: str,
    upload_id: str,
    part_numbers: List[int],
    expiration: int = 3600
) -> Dict[int, str]:
    """
    步骤2：为指定的分片编号批量生成预签名URL。
    """
    logger = logging.getLogger(__name__) # <-- 【修正】使用标准日志记录
    logger.info(f"Generating presigned URLs for {len(part_numbers)} parts. UploadId: {upload_id}")
    
    urls = {}
    for part_number in part_numbers:
        params = {
            'Bucket': bucket_name,
            'Key': key,
            'UploadId': upload_id,
            'PartNumber': part_number
        }
        url = s3_client.generate_presigned_url('upload_part', Params=params, ExpiresIn=expiration)
        urls[part_number] = url
    return urls


def complete_multipart_upload(
    s3_client: botocore.client.BaseClient,
    bucket_name: str,
    key: str,
    upload_id: str,
    parts: List[Dict[str, Any]]
) -> Dict[str, Any]:
    """
    步骤3：完成分片上传，将所有分片合并成一个文件。
    :param parts: 一个字典列表，格式为 [{'PartNumber': 1, 'ETag': '...'}, ...]
    """
    logger = logging.getLogger(__name__) # <-- 【修正】使用标准日志记录
    logger.info(f"Completing multipart upload for s3://{bucket_name}/{key}. UploadId: {upload_id}")
    
    # Boto3 需要的格式是 {'Parts': [...]}
    multipart_upload_config = {'Parts': parts}
    
    return s3_client.complete_multipart_upload(
        Bucket=bucket_name,
        Key=key,
        UploadId=upload_id,
        MultipartUpload=multipart_upload_config
    )


def abort_multipart_upload(
    s3_client: botocore.client.BaseClient,
    bucket_name: str,
    key: str,
    upload_id: str
):
    """
    步骤4：中止一个分片上传任务，并清理所有已上传的碎片。
    """
    logger = logging.getLogger(__name__) # <-- 【修正】使用标准日志记录
    logger.info(f"Aborting multipart upload for s3://{bucket_name}/{key}. UploadId: {upload_id}")
    
    return s3_client.abort_multipart_upload(
        Bucket=bucket_name,
        Key=key,
        UploadId=upload_id
    )

def upload_part_from_stream(
    s3_client: botocore.client.BaseClient,
    bucket_name: str,
    key: str,
    upload_id: str,
    part_number: int,
    file_stream
) -> Dict[str, Any]:
    """
    【代理模式】从文件流上传一个分片。
    :param file_stream: 从请求中接收到的文件流 (e.g., request.stream or request.files['file'])
    :return: 包含 ETag 的字典，用于 complete_multipart_upload。
    """
    logger = logging.getLogger(__name__)
    logger.info(f"Uploading part {part_number} for s3://{bucket_name}/{key} via proxy. UploadId: {upload_id}")
    
    response = s3_client.upload_part(
        Bucket=bucket_name,
        Key=key,
        UploadId=upload_id,
        PartNumber=part_number,
        Body=file_stream
    )
    # 我们只需要返回ETag
    return {'ETag': response.get('ETag')}


def get_paginated_bucket_contents(
    s3_client: botocore.client.BaseClient,
    bucket_name: str,
    path: str,
    search_param: str = "",
    page: int = 1,
    items_per_page: int = 50,
) -> Dict[str, Any]:
    """
    一个统一的函数，用于获取、解析、搜索和分页S3存储桶的内容。
    
    :param s3_client: Boto3 S3 客户端。
    :param bucket_name: 桶名。
    :param path: 当前路径/前缀。
    :param search_param: 搜索关键词。
    :param page: 当前页码。
    :param items_per_page: 每页项目数。
    :return: 包含分页后项目和总数的字典。
    """
    paginator = s3_client.get_paginator("list_objects_v2")
    
    # 依然需要获取所有条目来支持在当前路径下搜索和正确分页
    # 这是 S3 list 操作的特性，无法在服务端直接对当前目录进行分页和搜索
    all_items_raw = []
    
    # 1. 获取所有文件夹 (CommonPrefixes) 和文件 (Contents)
    # Delimiter='/' 确保我们只看当前层级
    for page_iterator in paginator.paginate(Bucket=bucket_name, Prefix=path, Delimiter="/"):
        response_chunk = {}
        if "CommonPrefixes" in page_iterator:
            response_chunk["CommonPrefixes"] = page_iterator["CommonPrefixes"]
        if "Contents" in page_iterator:
            response_chunk["Contents"] = page_iterator["Contents"]
        if response_chunk:
            all_items_raw.append(response_chunk)
            
    # 2. 解析、排序并应用搜索过滤
    all_items_parsed = parse_responses(all_items_raw, search_param)
    
    # 3. 手动进行分页
    total_items = len(all_items_parsed)
    start_idx = (page - 1) * items_per_page
    end_idx = start_idx + items_per_page
    paginated_items = all_items_parsed[start_idx:end_idx]
    
    return {
        "items": paginated_items,
        "total": total_items,
    }


def _stream_rtsp_to_s3(app, task_id: str, aws_kwargs: Dict[str, Any], bucket_name: str, source_url: str, destination_key: str, chunk_size_mb: int):
    logger = logging.getLogger('s3_web_browser.background_worker')
    
    # 1. 从全局任务管理器获取自己的状态存储区
    task_status = app.running_tasks.get(task_id)
    if not task_status:
        logger.error(f"RTSP 任务 {task_id} 在启动时未找到状态记录，线程退出。")
        return
        
    task_status['process'] = None # 初始化进程句柄

    try:
        # 2. 永续运行，并由 stop_flag 控制
        while not task_status.get('stop_flag', False):
            s3_client: botocore.client.BaseClient = None
            upload_id: Optional[str] = None
            current_key = destination_key
            
            try:
                s3_client = boto3.client("s3", **aws_kwargs)
                
                timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
                key_parts = destination_key.rsplit('.', 1)
                current_key = f"{key_parts[0]}_{timestamp}.{key_parts[1]}" if len(key_parts) == 2 else f"{destination_key}_{timestamp}"
                
                logger.info(f"RTSP 任务开始/重启: 从 {source_url} 上传到 s3://{bucket_name}/{current_key}")
                
                response = s3_client.create_multipart_upload(Bucket=bucket_name, Key=current_key)
                upload_id = response['UploadId']
                
                parsed_url = urlparse(source_url)
                username = unquote(parsed_url.username) if parsed_url.username else ''
                password = unquote(parsed_url.password) if parsed_url.password else ''
                safe_username = quote(username, safe='')
                safe_password = quote(password, safe='')
                hostname_part = parsed_url.hostname
                if parsed_url.port:
                    hostname_part += f":{parsed_url.port}"
                final_rtsp_url = f"{parsed_url.scheme}://"
                if safe_username or safe_password:
                    final_rtsp_url += f"{safe_username}:{safe_password}@"
                final_rtsp_url += f"{hostname_part}{parsed_url.path}"
                
                process = (
                    ffmpeg_process
                    .input(final_rtsp_url, rtsp_transport='tcp')
                    .output('pipe:1', format='mp4', vcodec='copy', acodec='aac', movflags='frag_keyframe+empty_moov')
                    .run_async(pipe_stdout=True, pipe_stderr=True)
                )
                
                # 3. 将进程对象存入共享状态，以便 stop 接口可以访问
                task_status['process'] = process

                completed_parts = []
                part_number = 1
                CHUNK_SIZE_BYTES = chunk_size_mb * 1024 * 1024

                def log_progress(pipe):
                    progress_re = re.compile(r"size=\s*(?P<size>\d+)kB\s+time=(?P<time>\S+)\s+bitrate=(?P<bitrate>\S+)")
                    while True:
                        line = pipe.readline()
                        if not line: break
                        line_str = line.decode('utf-8', errors='ignore').strip()
                        match = progress_re.search(line_str)
                        if match:
                            progress_info = match.groupdict()
                            logger.info(
                                f"FFmpeg 进度 for {current_key}: "
                                f"已处理大小={progress_info['size']}kB, "
                                f"时长={progress_info['time']}, "
                                f"码率={progress_info['bitrate']}"
                            )
                        else:
                            logger.debug(f"FFmpeg stderr: {line_str}")

                progress_thread = threading.Thread(target=log_progress, args=(process.stderr,), daemon=True)
                progress_thread.start()
                
                while True:
                    chunk = process.stdout.read(CHUNK_SIZE_BYTES)
                    if not chunk: break
                    logger.info(f"S3 上传: 正在上传分片 {part_number} (大小: {humanize.naturalsize(len(chunk), binary=True)}) for {current_key}...")
                    part_response = s3_client.upload_part(
                        Bucket=bucket_name, Key=current_key, UploadId=upload_id,
                        PartNumber=part_number, Body=chunk
                    )
                    etag = part_response.get('ETag', '').strip('"')
                    completed_parts.append({'PartNumber': part_number, 'ETag': f'"{etag}"'})
                    part_number += 1
                
                process.wait()
                progress_thread.join(timeout=1)
                task_status['process'] = None # 进程已结束，清空句柄

                # 4. 检查退出码，如果是被我们手动停止的，则不认为是错误
                if process.returncode != 0 and not task_status.get('stop_flag', False):
                    raise RuntimeError("ffmpeg-python处理失败，请检查上面的 stderr 日志。")

                if completed_parts:
                    s3_client.complete_multipart_upload(
                        Bucket=bucket_name, Key=current_key, UploadId=upload_id,
                        MultipartUpload={'Parts': sorted(completed_parts, key=lambda i: i['PartNumber'])}
                    )
                    logger.info(f"✅ RTSP 分段成功: {current_key}")
                else:
                     if upload_id:
                        s3_client.abort_multipart_upload(Bucket=bucket_name, Key=current_key, UploadId=upload_id)
                        logger.warning(f"RTSP 任务 {current_key} 没有生成任何分片，已中止。")
                
                # 5. 如果是被手动停止的，就跳出永续循环
                if task_status.get('stop_flag', False):
                    logger.info(f"RTSP 任务 {task_id} 检测到停止信号，退出循环。")
                    break

                time.sleep(1)

            except Exception as e:
                logger.error(f"❌ RTSP 任务在一个循环中失败: {e}", exc_info=False)
                if upload_id and s3_client:
                    try:
                        s3_client.abort_multipart_upload(Bucket=bucket_name, Key=current_key, UploadId=upload_id)
                        logger.warning(f"已中止并清理了失败任务的碎片, UploadId: {upload_id}")
                    except Exception: pass
                
                logger.info("RTSP 任务出错，将在60秒后重试...")
                time.sleep(60)
    finally:
        # 6. 无论如何，线程结束时，从任务管理器中清理自己
        if task_id in app.running_tasks:
            del app.running_tasks[task_id]
            logger.info(f"已从任务管理器中清理 RTSP 任务 {task_id}。")

# =========================================================================
# ==      【新函数 2】专门处理普通 HTTP 文件 (一次性任务)              ==
# =========================================================================
def _stream_http_to_s3(app, task_id: str, aws_kwargs: Dict[str, Any], bucket_name: str, source_url: str, destination_key: str, chunk_size_mb: int):
    logger = logging.getLogger('s3_web_browser.background_worker')
    s3_client: botocore.client.BaseClient = None
    upload_id: Optional[str] = None
    
    # 1. 从全局任务管理器获取自己的状态存储区
    task_status = app.running_tasks.get(task_id)
    if not task_status:
        logger.error(f"HTTP 任务 {task_id} 在启动时未找到状态记录，线程退出。")
        return

    try:
        s3_client = boto3.client("s3", **aws_kwargs)
        logger.info(f"HTTP 任务开始: {destination_key}")
        task_status['status'] = 'running'
        
        response = s3_client.create_multipart_upload(Bucket=bucket_name, Key=destination_key)
        upload_id = response['UploadId']
        
        part_number = 1
        completed_parts = []
        CHUNK_SIZE_BYTES = chunk_size_mb * 1024 * 1024
        
        with requests.get(source_url, stream=True, timeout=(10, 60)) as r:
            r.raise_for_status()
            total_size = int(r.headers.get('content-length', 0))
            bytes_processed = 0
            
            for chunk in r.iter_content(chunk_size=CHUNK_SIZE_BYTES):
                if chunk:
                    part_response = s3_client.upload_part(
                        Bucket=bucket_name, Key=destination_key, UploadId=upload_id,
                        PartNumber=part_number, Body=chunk
                    )
                    etag = part_response.get('ETag', '').strip('"')
                    completed_parts.append({'PartNumber': part_number, 'ETag': f'"{etag}"'})
                    part_number += 1
                    
                    # 2. 【核心】实时汇报进度
                    bytes_processed += len(chunk)
                    if total_size > 0:
                        task_status['progress'] = int((bytes_processed / total_size) * 100)

        if completed_parts:
            s3_client.complete_multipart_upload(
                Bucket=bucket_name, Key=destination_key, UploadId=upload_id,
                MultipartUpload={'Parts': sorted(completed_parts, key=lambda i: i['PartNumber'])}
            )
            logger.info(f"✅ HTTP 文件上传成功: {destination_key}")
            task_status['status'] = 'success'
            task_status['progress'] = 100
        else:
            if upload_id:
                s3_client.abort_multipart_upload(Bucket=bucket_name, Key=destination_key, UploadId=upload_id)
            task_status['status'] = 'failed'

    except Exception as e:
        logger.error(f"❌ HTTP 任务失败: {e}", exc_info=True)
        if task_status: task_status['status'] = 'failed'
        if upload_id and s3_client:
            try: s3_client.abort_multipart_upload(Bucket=bucket_name, Key=destination_key, UploadId=upload_id)
            except Exception: pass
    finally:
        # 3. 任务结束时，从管理器中清理
        if task_id in app.running_tasks:
            del app.running_tasks[task_id]
            logger.info(f"已从任务管理器中清理 HTTP 任务 {task_id}。")

# =========================================================================
# ==      【调度函数】这是 routes.py 唯一需要调用的函数                ==
# =========================================================================
def stream_from_url_to_s3(app, task_id: str, aws_kwargs: Dict[str, Any], bucket_name: str, source_url: str, destination_key: str, chunk_size_mb: int) -> None:
    """
    根据 URL 类型，选择并调用合适的后台处理函数。
    """
    if source_url.lower().startswith('rtsp://'):
        # 对于RTSP流，我们需要传递所有7个参数
        _stream_rtsp_to_s3(app, task_id, aws_kwargs, bucket_name, source_url, destination_key, chunk_size_mb)
    else:
        # 对于HTTP流，它不需要 app 和 task_id，所以我们只传后面5个
        _stream_http_to_s3(aws_kwargs, bucket_name, source_url, destination_key, chunk_size_mb)
    

def get_object_info(s3_client: botocore.client.BaseClient, bucket_name: str, key: str) -> Optional[Dict[str, Any]]:
    """
    使用 head_object 高效地获取单个S3对象的元数据。
    如果对象不存在，返回 None。

    :param s3_client: Boto3 S3 客户端实例。
    :param bucket_name: 存储桶名称。
    :param key: 对象键 (完整路径)。
    :return: 包含对象核心信息的字典，或在找不到对象时返回 None。
    """
    try:
        # head_object 是一个轻量级请求，只返回对象的元数据，不下载文件内容
        response = s3_client.head_object(Bucket=bucket_name, Key=key)
        
        # 只返回我们需要的核心信息
        return {
            "size": response.get('ContentLength'),
            "content_type": response.get('ContentType', 'application/octet-stream'),
            "etag": response.get('ETag', '').strip('"'),
            "last_modified": response.get('LastModified') # 这个在 stream-video 接口里也用到了
        }
    except botocore.exceptions.ClientError as e:
        # 当 head_object 找不到文件时，它会抛出一个 ClientError
        # 错误码可能是 '404', 'NotFound', 或 'NoSuchKey'，取决于S3兼容存储的实现
        if e.response['Error']['Code'] in ['404', 'NotFound', 'NoSuchKey']:
            return None # 明确地返回 None 表示文件不存在
        else:
            # 对于其他错误（如 AccessDenied），让异常向上抛出，由调用方处理
            raise

def get_bucket_logging(s3_client: botocore.client.BaseClient, bucket_name: str) -> Dict[str, Any]:
    """
    获取存储桶的日志配置。
    如果未配置，会抛出 ClientError (NoSuchLoggingConfiguration)，由上层处理。
    """
    return s3_client.get_bucket_logging(Bucket=bucket_name)


def put_bucket_logging(s3_client: botocore.client.BaseClient, bucket_name: str, target_bucket: str, target_prefix: str):
    """
    设置或更新存储桶的日志配置。
    """
    logging_configuration = {
        'LoggingEnabled': {
            'TargetBucket': target_bucket,
            'TargetPrefix': target_prefix
        }
    }
    s3_client.put_bucket_logging(
        Bucket=bucket_name,
        BucketLoggingStatus=logging_configuration
    )


def delete_bucket_logging(s3_client: botocore.client.BaseClient, bucket_name: str):
    """
    关闭存储桶的日志记录 (通过传入一个空的 BucketLoggingStatus)。
    """
    s3_client.put_bucket_logging(
        Bucket=bucket_name,
        BucketLoggingStatus={} # 传入空字典表示禁用
    )