import os
import re
import time
import json
import mmap
import httpx
import aiofiles
import asyncio
import hashlib
import subprocess
from io import BytesIO
from app.supply import MAX_REDIRECTS, get_timestamp, log
from diskcache import FanoutCache
from typing import Any, Dict, Tuple, Optional, Deque, Union
from collections import deque, OrderedDict
from fastapi import Request, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from urllib.parse import parse_qs, urljoin, urlparse, urlunparse

from app.proxy import infer_type

# 缓存配置
disk_cache = FanoutCache("/path/to/caches", size_limit=8 * 1024**3)  # 8GB磁盘缓存
memory_cache = OrderedDict()  # 有序内存缓存字典
CHUNK_SIZE = 2 * 1024 * 1024  # 2MB分段
MAX_CHUNK_SIZE = 8 * 1024 * 1024  # 8MB硬性上限
INITIAL_TEST_COUNT = 3  # 初始测速次数
MEMORY_CACHE_LIMIT = 512 * 1024**2  # 512MB内存缓存限制
current_memory_usage = 0  # 当前内存使用量
# 修改CHUNK_SIZE相关配置
INITIAL_CHUNK_SIZE = 512 * 1024  # 初始512KB
MEDIUM_CHUNK_SIZE = 1024 * 1024  # 中等1MB
FINAL_CHUNK_SIZE = 2 * 1024 * 1024  # 最终2MB

# 当前歌曲管理
current_song_cache_key = None  # 记录当前播放歌曲的cache_key
current_song_segments = set()  # 记录当前歌曲的所有片段key


def free_memory_if_needed():
    """智能内存清理：优先清理非当前歌曲的缓存"""
    global current_memory_usage

    while current_memory_usage > MEMORY_CACHE_LIMIT * 1.5:
        # 优先清理非当前歌曲的缓存
        for key in list(memory_cache.keys()):
            if key not in current_song_segments:
                current_memory_usage -= len(memory_cache[key])
                del memory_cache[key]
                log(f"Cleared non-current segment: {key}")
                if current_memory_usage <= MEMORY_CACHE_LIMIT:
                    return

        # 如果仍然需要清理，强制清理最久未使用的
        if memory_cache:
            key, _ = memory_cache.popitem(last=False)
            current_memory_usage -= len(memory_cache[key])
            log(f"Force cleared LRU segment: {key}")


class MediaCacheManager:

    _segment_sizes = {}  # 跟踪每个cache_key的分段大小状态

    _segment_speeds = {}  # 记录每个cache_key的下载速度

    @classmethod
    def get_current_chunk_size(cls, cache_key: str) -> int:
        """动态计算当前分段大小（带智能分段保护）"""
        if cache_key not in cls._segment_sizes:
            cls._segment_sizes[cache_key] = {
                "current_size": INITIAL_CHUNK_SIZE,
                "loaded_count": 1,
                "avg_speed": 1,  # KB/s
                "file_size": 0,  # 新增：记录文件大小
            }
            return INITIAL_CHUNK_SIZE

        state = cls._segment_sizes[cache_key]

        # 保持原有逻辑不变
        if state["loaded_count"] >= INITIAL_TEST_COUNT:
            if state["file_size"] > 0:
                estimated_segments = state["file_size"] / (
                    state["avg_speed"] * 1024
                )  # 转换为MB/s
                if estimated_segments < INITIAL_TEST_COUNT:
                    target_size = state["file_size"] // INITIAL_TEST_COUNT
                    chunk_size = min(
                        max(target_size, INITIAL_CHUNK_SIZE),  # 不低于初始大小
                        MAX_CHUNK_SIZE,  # 不超过最大限制
                    )
            else:
                chunk_size = CHUNK_SIZE
        else:
            log("正在测试分段大小...")
            chunk_size = [INITIAL_CHUNK_SIZE, MEDIUM_CHUNK_SIZE, FINAL_CHUNK_SIZE][
                min(state["loaded_count"], 2)  # 安全索引限制
            ]
        log(f"当前分段大小：{chunk_size // 1024**2 }M")
        return chunk_size

    @classmethod
    def update_segment_speed(cls, cache_key: str, size: int, duration: float):
        """更新下载速度统计"""
        if cache_key not in cls._segment_speeds:
            cls._segment_speeds[cache_key] = []

        speed = size / duration / 1024  # KB/s
        cls._segment_speeds[cache_key].append(speed)

        # 更新平均速度（简单移动平均）
        if cache_key in cls._segment_sizes:
            speeds = cls._segment_speeds[cache_key][-INITIAL_TEST_COUNT:]
            cls._segment_sizes[cache_key]["avg_speed"] = sum(speeds) / len(speeds)
        log(f"平均下载速度：{cls._segment_sizes[cache_key]['avg_speed'] // 1024} MB/s")

    @classmethod
    def increment_loaded_count(cls, cache_key: str):
        """增加已加载片段计数"""
        if cache_key in cls._segment_sizes:
            cls._segment_sizes[cache_key]["loaded_count"] += 1

    @staticmethod
    def generate_cache_key(source: str, params: dict = None) -> str:
        """生成包含分段策略信息的缓存键"""
        key_data = {
            "source": source,
            "params": params or {},
            "chunk_strategy": "dynamic",  # 标记使用动态分段策略
        }
        return hashlib.md5(json.dumps(key_data, sort_keys=True).encode()).hexdigest()

    @staticmethod
    def parse_range_header(range_header: str, file_size: int) -> Tuple[int, int]:
        """精确解析Range请求头"""
        try:
            unit, ranges = range_header.strip().split("=")
            if unit != "bytes":
                raise ValueError("Only byte ranges supported")

            start_str, end_str = ranges.split("-")
            start = int(start_str) if start_str else 0
            end = int(end_str) if end_str else file_size - 1

            return max(0, start), min(file_size - 1, end)
        except Exception as e:
            raise HTTPException(status_code=416, detail=f"Range header error: {str(e)}")

    @classmethod
    async def get_content_length(cls, request: Request, source: str) -> int:
        """获取内容长度"""
        # 参数类型检查
        if not isinstance(source, str):
            raise ValueError("source must be a string (URL or file path)")

        # 根据 source 类型选择获取长度的方法
        length = await (
            cls._get_remote_content_length(request, source)
            if source.startswith(("http://", "https://"))
            else cls._get_local_content_length(source)
        )

        # 更新对应 cache_key 的文件大小
        cache_key = cls.generate_cache_key(source)
        if cache_key in cls._segment_sizes:
            cls._segment_sizes[cache_key]["file_size"] = length

        return length

    @staticmethod
    async def _get_remote_content_length(url: str, client: httpx.AsyncClient) -> int:
        """获取远程文件精确长度"""
        try:
            resp = await client.head(url)
            if content_length := resp.headers.get("content-length"):
                return int(content_length)

            async with client.stream(
                "GET", url, headers={"range": "bytes=0-1"}
            ) as resp:
                log(f"响应状态: {resp.status_code}")
                log(f"响应内容: {resp.text[:500]}...")
                log(f"响应头: {resp.headers}")

                if content_range := resp.headers.get("content-range"):
                    log(f"Content-Range: {content_range}")
                    return int(content_range.split("/")[1])

            # async with client.stream(
            #     "GET", clean_url, params=merged_params, headers=headers
            # ) as resp:
            #     if content_range := resp.headers.get("content-range"):
            #         log(f"Content-Range: {content_range}")
            #         return int(content_range.split("/")[1])
            return 0
        except Exception as e:
            log(f"Failed to get remote content length: {str(e)}")
            return 0

    @staticmethod
    async def _get_local_content_length(file_path: str) -> int:
        """获取本地文件精确长度"""
        try:
            return os.path.getsize(file_path)
        except Exception as e:
            log(f"Failed to get local file size: {str(e)}")
            return 0

    @staticmethod
    async def prefetch_current_song(
        cache_key: str,
        source: str,
        content_length: int,
        client: httpx.AsyncClient = None,
    ):
        """智能预加载：仅加载已存在的缓存片段"""
        global current_memory_usage

        # 获取所有已缓存的片段信息
        cache_meta = disk_cache.get(f"{cache_key}_meta")
        if not cache_meta:
            return

        # 仅加载已存在的片段到内存
        for seg_key in cache_meta["segments"]:
            if seg_key not in memory_cache and disk_cache.get(seg_key):
                data = disk_cache.get(seg_key)
                memory_cache[seg_key] = data
                current_memory_usage += len(data)
                memory_cache.move_to_end(seg_key)  # 更新访问时间
                free_memory_if_needed()

    @staticmethod
    async def _fetch_and_cache_segment(
        client: httpx.AsyncClient,
        url: str,
        seg_start: int,
        seg_end: int,
        cache_seg_key: str,
    ) -> bytes:
        """获取并精确缓存远程分段"""
        global current_memory_usage

        # 检查磁盘缓存
        if cached := disk_cache.get(cache_seg_key):
            if cache_seg_key in current_song_segments:
                memory_cache[cache_seg_key] = cached
                current_memory_usage += len(cached)
                memory_cache.move_to_end(cache_seg_key)
                free_memory_if_needed()
            return cached

        # 精确获取所需范围数据
        range_header = f"bytes={seg_start}-{seg_end}"
        try:
            start_time = time.time()
            async with client.stream(
                "GET", url, headers={"Range": range_header}
            ) as resp:
                resp.raise_for_status()
                actual_length = int(resp.headers.get("content-length", 0))
                buffer = BytesIO()
                received = 0

                async for chunk in resp.aiter_bytes():
                    received += len(chunk)
                    buffer.write(chunk)

                if received != actual_length:
                    raise ValueError(
                        f"Incomplete data: expected {actual_length}, got {received}"
                    )

                data = buffer.getvalue()

                # 原子化写入磁盘缓存
                with disk_cache.transact():
                    disk_cache.set(cache_seg_key, data)
                    # 更新元数据
                    meta_key = f"{cache_seg_key.rsplit('_', 1)[0]}_meta"
                    if meta := disk_cache.get(meta_key):
                        meta["segments"].add(cache_seg_key)
                        disk_cache.set(meta_key, meta)

                # 按需加载到内存
                if (
                    cache_seg_key in current_song_segments
                    and current_memory_usage + len(data) <= MEMORY_CACHE_LIMIT * 1.5
                ):
                    memory_cache[cache_seg_key] = data
                    current_memory_usage += len(data)
                    memory_cache.move_to_end(cache_seg_key)
                    free_memory_if_needed()

                duration = time.time() - start_time
                MediaCacheManager.update_segment_speed(
                    cache_seg_key.rsplit("_", 3)[0],  # 提取原始cache_key
                    len(data),
                    duration,
                )

                return data
        except Exception as e:
            log(f"Failed to fetch segment {cache_seg_key}: {str(e)}")
            raise

    @staticmethod
    async def _read_and_cache_segment(
        file_path: str, seg_start: int, seg_end: int, cache_seg_key: str
    ) -> bytes:
        """读取并精确缓存本地分段"""
        global current_memory_usage

        if cached := disk_cache.get(cache_seg_key):
            if cache_seg_key in current_song_segments:
                memory_cache[cache_seg_key] = cached
                current_memory_usage += len(cached)
                memory_cache.move_to_end(cache_seg_key)
                free_memory_if_needed()
            return cached

        try:
            async with aiofiles.open(file_path, "rb") as f:
                await f.seek(seg_start)
                data = await f.read(seg_end - seg_start + 1)

                # 原子化写入磁盘缓存
                with disk_cache.transact():
                    disk_cache.set(cache_seg_key, data)
                    # 更新元数据
                    meta_key = f"{cache_seg_key.rsplit('_', 1)[0]}_meta"
                    if meta := disk_cache.get(meta_key):
                        meta["segments"].add(cache_seg_key)
                        disk_cache.set(meta_key, meta)

                # 按需加载到内存
                if (
                    cache_seg_key in current_song_segments
                    and current_memory_usage + len(data) <= MEMORY_CACHE_LIMIT * 1.5
                ):
                    memory_cache[cache_seg_key] = data
                    current_memory_usage += len(data)
                    memory_cache.move_to_end(cache_seg_key)
                    free_memory_if_needed()

                return data
        except Exception as e:
            log(f"Failed to read segment {cache_seg_key}: {str(e)}")
            raise

    @staticmethod
    async def get_or_load_segment(
        cache_key: str,
        source: str,
        seg_start: int,
        seg_end: int,
        client: httpx.AsyncClient = None,
    ) -> bytes:
        """统一获取分段数据，加强边界检查"""
        global current_memory_usage

        # 获取当前动态分段大小
        chunk_size = MediaCacheManager.get_current_chunk_size(cache_key)
        seg_index = seg_start // chunk_size
        actual_seg_start = seg_index * chunk_size
        actual_seg_end = min((seg_index + 1) * chunk_size - 1, seg_end)

        # 生成包含完整信息的缓存键
        cache_seg_key = f"{cache_key}_seg_{seg_index}_{chunk_size}"

        # 边界检查
        content_length = disk_cache.get(f"{cache_key}_length")
        if actual_seg_end >= content_length:
            actual_seg_end = content_length - 1
        if actual_seg_start >= content_length:
            log(
                f"Segment {cache_seg_key} is out of bounds (file size: {content_length})"
            )
            return b""  # 返回空数据，避免中断流

        try:
            # 尝试从内存获取
            if data := memory_cache.get(cache_seg_key):
                memory_cache.move_to_end(cache_seg_key)
                return data[
                    seg_start - actual_seg_start : seg_end - actual_seg_start + 1
                ]

            # 获取当前片段
            if source.startswith(("http://", "https://")):
                segment_data = await MediaCacheManager._fetch_and_cache_segment(
                    client, source, actual_seg_start, actual_seg_end, cache_seg_key
                )
                MediaCacheManager.increment_loaded_count(cache_key)
            else:
                segment_data = await MediaCacheManager._read_and_cache_segment(
                    source, actual_seg_start, actual_seg_end, cache_seg_key
                )

            return segment_data[
                seg_start - actual_seg_start : seg_end - actual_seg_start + 1
            ]

        except Exception as e:
            log(f"Failed to load segment {cache_seg_key}: {str(e)}")
            return b""  # 返回空数据，避免中断流


# 兼容接口
async def stream_remote_file(request: Request, remote_url: str):
    """处理远程资源的流式播放"""
    global current_song_cache_key, current_song_segments

    client = getattr(request.app.state, "client", None)

    # 生成精确缓存键
    cache_params = {}
    cache_key = MediaCacheManager.generate_cache_key(remote_url, cache_params)

    # 处理歌曲切换
    if cache_key != current_song_cache_key:
        current_song_segments.clear()
        current_song_cache_key = cache_key
        log(f"Switched to new song: {cache_key}")

    # 获取/设置内容长度
    content_length = disk_cache.get(f"{cache_key}_length")
    if not content_length:
        content_length = await MediaCacheManager.get_content_length(request, remote_url)
        disk_cache.set(f"{cache_key}_length", content_length)

        # 初始化缓存元数据
        total_segments = (content_length + CHUNK_SIZE - 1) // CHUNK_SIZE
        current_song_segments.update(
            f"{cache_key}_seg_{i}" for i in range(total_segments)
        )
        disk_cache.set(
            f"{cache_key}_meta", {"segments": set(), "total_length": content_length}
        )

        # 确保 _segment_sizes 已正确初始化
        if cache_key not in MediaCacheManager._segment_sizes:
            MediaCacheManager._segment_sizes[cache_key] = {
                "current_size": INITIAL_CHUNK_SIZE,
                "loaded_count": 1,
                "avg_speed": 1,  # KB/s
                "file_size": content_length,  # 设置文件大小
            }
            log(f"Initialized _segment_sizes for cache_key {cache_key}")

    # 处理Range请求
    range_header = request.headers.get("range")
    start, end = 0, content_length - 1
    if range_header:
        try:
            start, end = MediaCacheManager.parse_range_header(
                range_header, content_length
            )
        except HTTPException:
            pass

    # 首次访问时预加载已有缓存
    if f"{cache_key}_prefetched" not in disk_cache:
        disk_cache.set(f"{cache_key}_prefetched", True)
        await MediaCacheManager.prefetch_current_song(
            cache_key, remote_url, content_length, client
        )

    # 预加载
    async def prefetch_next_segments(
        cache_key: str,
        source: str,
        current_pos: int,
        client: httpx.AsyncClient,
        prefetch_count: int = 1,  # 默认预加载3个片段
    ):
        """增强版预加载函数，可自定义预加载数量"""
        chunk_size = MediaCacheManager.get_current_chunk_size(cache_key)
        content_length = disk_cache.get(f"{cache_key}_length")

        tasks = []
        for i in range(1, prefetch_count + 1):
            seg_start = current_pos + i * chunk_size
            seg_end = min(seg_start + chunk_size - 1, content_length - 1)

            if seg_start >= content_length:
                break  # 如果起始位置超出文件大小，停止预加载

            seg_index = seg_start // chunk_size
            cache_seg_key = f"{cache_key}_seg_{seg_index}_{chunk_size}"

            if not disk_cache.get(cache_seg_key):
                task = MediaCacheManager.get_or_load_segment(
                    cache_key, source, seg_start, seg_end, client
                )
                tasks.append(task)

        await asyncio.gather(*tasks, return_exceptions=True)

    # 流式响应生成器
    async def generate_stream():
        pos = start
        retry_count = 0
        max_retries = 3

        while pos <= end:
            try:
                chunk_size = MediaCacheManager.get_current_chunk_size(cache_key)
                seg_start = (pos // chunk_size) * chunk_size
                seg_end = min(seg_start + chunk_size - 1, end)

                segment = await MediaCacheManager.get_or_load_segment(
                    cache_key, remote_url, pos, seg_end, client
                )

                yield segment

                # 在每次成功加载后预加载接下来的片段
                await prefetch_next_segments(cache_key, remote_url, pos, client)

                pos = seg_end + 1
                retry_count = 0  # 重置重试计数器

                # 动态调整分段大小
                if pos - start > INITIAL_CHUNK_SIZE * 3:  # 已加载超过1.5MB
                    MediaCacheManager._segment_sizes[cache_key][
                        "current_size"
                    ] = MEDIUM_CHUNK_SIZE
                if pos - start > MEDIUM_CHUNK_SIZE * 3:  # 已加载超过3MB
                    MediaCacheManager._segment_sizes[cache_key][
                        "current_size"
                    ] = FINAL_CHUNK_SIZE

            except Exception as e:
                retry_count += 1
                if retry_count > max_retries:
                    log(f"Failed after {max_retries} retries at position {pos}")
                    raise HTTPException(500, "Streaming failed after multiple retries")

                await asyncio.sleep(0.1 * retry_count)  # 指数退避
                log(f"Retrying segment at {pos}, attempt {retry_count}")

    headers = {
        "Content-Type": "audio/mpeg",
        "Accept-Ranges": "bytes",
        "Content-Range": f"bytes {start}-{end}/{content_length}",
        "Content-Length": str(end - start + 1),
        "Cache-Control": "private, max-age=3600",
    }

    return StreamingResponse(
        generate_stream(), headers=headers, status_code=206 if range_header else 200
    )


# async def stream_local_file(request: Request, file_path: str):
#     def get_audio_duration(file_path: str) -> float:
#         try:
#             # 使用ffprobe获取准确时长
#             cmd = [
#                 "ffprobe",
#                 "-v",
#                 "error",
#                 "-show_entries",
#                 "format=duration",
#                 "-of",
#                 "default=nolog_wrappers=1:nokey=1",
#                 file_path,
#             ]
#             result = subprocess.run(
#                 cmd,
#                 stdout=subprocess.PIPE,
#                 stderr=subprocess.PIPE,
#                 text=True,
#                 timeout=10,
#             )

#             if result.returncode != 0:
#                 raise Exception(result.stderr.strip())

#             duration = float(result.stdout.strip())
#             if duration <= 0:
#                 raise Exception("无效的时长值")

#             return duration

#         except Exception as e:
#             log(f"ffprobe获取时长失败: {str(e)}，尝试备用方法")

#             # 备用方法：解析FFmpeg输出
#             try:
#                 cmd = ["ffmpeg", "-i", file_path]
#                 result = subprocess.run(
#                     cmd,
#                     stdout=subprocess.PIPE,
#                     stderr=subprocess.PIPE,
#                     text=True,
#                     timeout=10,
#                 )

#                 match = re.search(
#                     r"Duration: (\d{2}):(\d{2}):(\d{2})\.\d+", result.stderr
#                 )
#                 if match:
#                     h, m, s = map(int, match.groups())
#                     return h * 3600 + m * 60 + s

#                 raise Exception("无法解析时长信息")
#             except Exception as fallback_e:
#                 log(f"所有时长获取方法均失败: {str(fallback_e)}")
#                 return 0

#     try:
#         # 获取源文件时长
#         source_duration = get_audio_duration(file_path)
#         if not source_duration:
#             raise HTTPException(status_code=500, detail="无法获取音频时长")

#         log(f"源文件时长确认: {source_duration}秒")

#         # 固定比特率设置（320kbps）
#         FIXED_BITRATE = 320000
#         content_length = int(FIXED_BITRATE * source_duration / 8)
#         log(
#             f"固定比特率模式 | 比特率: {FIXED_BITRATE//1000}kbps | 计算大小: {content_length}字节"
#         )

#         # 处理 Range 请求头
#         range_header = request.headers.get("range")
#         start, end = 0, content_length - 1

#         if range_header:
#             try:
#                 unit, ranges = range_header.split("=")
#                 if unit != "bytes":
#                     raise ValueError

#                 start_str, end_str = ranges.split("-")
#                 start = int(start_str)
#                 end = int(end_str) if end_str else content_length - 1

#                 # 验证范围有效性
#                 if start >= content_length or end >= content_length or start > end:
#                     raise HTTPException(
#                         status_code=416,
#                         detail=f"请求范围无效，文件大小: {content_length}",
#                         headers={"Content-Range": f"bytes */{content_length}"},
#                     )

#             except (ValueError, IndexError):
#                 raise HTTPException(
#                     status_code=416,
#                     detail="无效的 Range 头格式",
#                     headers={"Content-Range": f"bytes */{content_length}"},
#                 )

#         # 配置 FFmpeg 命令（支持 seek）
#         seek_time = start / (FIXED_BITRATE / 8)
#         remaining_duration = source_duration - seek_time

#         ffmpeg_command = [
#             "ffmpeg",
#             "-ss",
#             str(seek_time),
#             "-i",
#             file_path,
#             "-f",
#             "mp3",
#             "-acodec",
#             "libmp3lame",
#             "-b:a",
#             f"{FIXED_BITRATE//1000}k",
#             "-vn",
#             "-t",
#             str(remaining_duration),
#             "-fflags",
#             "+genpts",
#             "-loglevel",
#             "error",
#             "-",
#         ]

#         log(f"FFmpeg命令: {' '.join(ffmpeg_command)}")

#         process = subprocess.Popen(
#             ffmpeg_command,
#             stdout=subprocess.PIPE,
#             stderr=subprocess.PIPE,
#             bufsize=10**6,
#         )

#         # 设置响应头
#         headers = {
#             "Content-Type": "audio/mpeg",
#             "Accept-Ranges": "bytes",
#             "Content-Duration": str(source_duration),
#             "X-Audio-Bitrate": str(FIXED_BITRATE),
#             "Cache-Control": "no-cache",
#         }

#         if range_header:
#             content_range = f"bytes {start}-{end}/{content_length}"
#             headers.update(
#                 {
#                     "Content-Range": content_range,
#                     "Content-Length": str(end - start + 1),
#                 }
#             )
#             status_code = 206
#             log(f"处理范围请求: {content_range}")
#         else:
#             headers["Content-Length"] = str(content_length)
#             status_code = 200

#         def audio_stream():
#             bytes_to_send = end - start + 1
#             bytes_sent = 0
#             chunk_size = 128 * 1024  # 128KB chunks

#             try:
#                 while bytes_sent < bytes_to_send and process.poll() is None:
#                     remaining = bytes_to_send - bytes_sent
#                     current_chunk_size = min(chunk_size, remaining)

#                     chunk = process.stdout.read(current_chunk_size)
#                     if not chunk:
#                         break

#                     bytes_sent += len(chunk)
#                     yield chunk

#                 # 检查是否完成传输
#                 if bytes_sent < bytes_to_send:
#                     log(f"警告: 流提前结束，已发送 {bytes_sent}/{bytes_to_send} 字节")
#                 else:
#                     log(f"流传输完成: 共发送 {bytes_sent} 字节")

#             except GeneratorExit:
#                 log("客户端断开连接，终止流传输")
#                 raise
#             except Exception as e:
#                 log(f"流传输错误: {str(e)}")
#                 raise
#             finally:
#                 # 确保进程被正确终止
#                 if process.poll() is None:
#                     try:
#                         process.terminate()
#                         try:
#                             process.wait(timeout=1.0)
#                         except subprocess.TimeoutExpired:
#                             process.kill()
#                             log("强制终止FFmpeg进程")
#                     except Exception as e:
#                         log(f"终止FFmpeg进程时出错: {str(e)}")

#                 # 记录FFmpeg错误输出（如果有）
#                 stderr_output = process.stderr.read()
#                 if stderr_output:
#                     log(
#                         f"FFmpeg错误输出: {stderr_output.decode('utf-8', errors='ignore')}"
#                     )

#         return StreamingResponse(
#             audio_stream(),
#             headers=headers,
#             media_type="audio/mpeg",
#             status_code=status_code,
#         )

#     except HTTPException:
#         raise
#     except Exception as e:
#         log(f"流创建失败: {str(e)}")
#         raise HTTPException(status_code=500, detail=f"音频流错误: {str(e)}")


async def stream_local_file(request: Request, file_path: str):
    """处理本地文件的流式播放"""
    try:
        file_size = os.path.getsize(file_path)
    except Exception as e:
        log(f"Failed to get local file size: {str(e)}")
        raise HTTPException(404, "File not found")

    # 处理Range请求
    range_header = request.headers.get("range")
    start, end = 0, file_size - 1
    if range_header:
        try:
            start, end = MediaCacheManager.parse_range_header(range_header, file_size)
        except HTTPException:
            pass

    # 使用内存映射文件
    with open(file_path, "rb") as f:
        mmapped_file = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)

        # 预加载逻辑
        async def prefetch_next_segments(current_pos: int, prefetch_count: int = 3):
            """预加载接下来的片段到内存"""
            tasks = []
            for i in range(1, prefetch_count + 1):
                seg_start = current_pos + i * CHUNK_SIZE
                seg_end = min(seg_start + CHUNK_SIZE - 1, file_size - 1)

                if seg_start >= file_size:
                    break  # 如果起始位置超出文件大小，停止预加载

                task = asyncio.create_task(
                    read_segment(mmapped_file, seg_start, seg_end)
                )
                tasks.append(task)

            return await asyncio.gather(*tasks, return_exceptions=True)

        async def read_segment(
            mmapped_file: mmap.mmap, seg_start: int, seg_end: int
        ) -> bytes:
            """从内存映射文件中读取指定范围的数据"""
            data = mmapped_file[seg_start : seg_end + 1]
            return data

        # 流式响应生成器
        async def generate_stream():
            pos = start
            remaining = end - start + 1

            while remaining > 0:
                chunk_size = min(remaining, CHUNK_SIZE)
                data = mmapped_file[pos : pos + chunk_size]

                yield data
                remaining -= len(data)
                pos += len(data)

                # 预加载接下来的片段
                asyncio.create_task(prefetch_next_segments(pos))

        headers = {
            "Content-Type": "audio/mpeg",
            "Accept-Ranges": "bytes",
            "Content-Range": f"bytes {start}-{end}/{file_size}",
            "Content-Length": str(end - start + 1),
            "Cache-Control": "private, max-age=3600",
        }

        response = StreamingResponse(
            generate_stream(), headers=headers, status_code=206 if range_header else 200
        )
        response.on_close = lambda: mmapped_file.close()
        return response
