import os
import re
import json
import time
import math
import shutil
import subprocess
from urllib.parse import urljoin, urlparse
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import List, Optional, Dict, Tuple, Set
import threading

import requests

try:
    from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
    from cryptography.hazmat.backends import default_backend
    CRYPTO_AVAILABLE = True
except Exception:
    CRYPTO_AVAILABLE = False

try:
    from tqdm import tqdm
    TQDM_AVAILABLE = True
except Exception:
    TQDM_AVAILABLE = False


class M3U8Downloader:
    """
    一个支持多线程、断点续传、进度输出、加密解密、点播与直播的 m3u8 下载器。

    设计要点：
    - 并发下载：使用 ThreadPoolExecutor 并发拉取分片，提升下载吞吐。
    - 断点续传：基于 `.part` 临时文件与 Range 追加（若分片加密则强制全量重下以保证解密正确）。
    - 事件回调：提供统一事件钩子（如 on_download_start/on_segment_progress/on_error），便于外部 UI/日志处理。
    - 暂停/恢复：通过线程事件 `_resume_event` 在安全点阻塞/恢复（不强制中断正在进行的 I/O）。
    - 错误处理：网络与解析错误通过回调通知，避免抛出致命异常导致程序崩溃。
    """
    def __init__(
        self,
        url: str,
        output_dir: str = "downloads",
        max_workers: int = 8,
        timeout: int = 15,
        retries: int = 3,
        headers: Optional[Dict[str, str]] = None,
        proxy: Optional[str] = None,
        live_poll_interval: int = 5,
        download_id: str = "",
        event_handler: Optional[object] = None,
    ):
        self.url = url
        self.output_dir = output_dir
        self.max_workers = max_workers
        self.timeout = timeout
        self.retries = retries
        self.headers = headers or {}
        self.proxy = proxy
        self.session = requests.Session()
        if self.headers:
            self.session.headers.update(self.headers)
        if self.proxy:
            self.session.proxies.update({"http": proxy, "https": proxy})
        self.live_poll_interval = live_poll_interval
        self.download_id = str(download_id) if download_id is not None else ""
        self.event_handler = event_handler

        os.makedirs(self.output_dir, exist_ok=True)
        self.state_path = os.path.join(self.output_dir, "state.json")
        self.downloaded_segments: Set[str] = set()
        self._load_state()

        self.base_url = None
        self.is_live = False
        self.target_duration = None
        self.media_sequence = 0

        # 加密相关
        self.method = None
        self.key_uri = None
        self.key_bytes = None
        self.iv_map: Dict[int, bytes] = {}

        # 合并相关
        self._vod_segments_cache: List[Dict] = []
        self._live_segments_accum: List[Dict] = []

        # 统计与速度
        self._bytes_downloaded: int = 0
        self._bytes_lock = threading.Lock()
        self._download_start_ts: Optional[float] = None

        # 暂停/恢复控制事件
        self._resume_event = threading.Event()
        self._resume_event.set()

        # 停止控制事件（协作式停止）
        self._stop_event = threading.Event()
        self._stop_merge_mp4: bool = False

    # 事件触发工具
    def _emit(self, name: str, **kwargs) -> None:
        """触发事件回调（若提供了 `event_handler`）。

        约定：自动注入 `download_id`，其余参数由调用方传入；
        回调应为同名方法，例如 `on_segment_progress(...)`。
        """
        h = getattr(self, 'event_handler', None)
        if not h:
            return
        cb = getattr(h, name, None)
        if callable(cb):
            try:
                # 将 download_id 注入到回调参数中
                kwargs.setdefault('download_id', self.download_id)
                cb(**kwargs)
            except Exception:
                pass

    def _overall_speed_bps(self) -> float:
        """计算总体平均速度（B/s），用于整体进度展示。"""
        if not self._download_start_ts:
            return 0.0
        elapsed = max(1e-6, time.time() - self._download_start_ts)
        with self._bytes_lock:
            total = self._bytes_downloaded
        return total / elapsed

    # ------------------------ 公共方法 ------------------------
    def download(self, live_duration_seconds: Optional[int] = None, segment_limit: Optional[int] = None,
                 merge_mp4: bool = False, mp4_name: str = "output.mp4", cleanup_ts: bool = False) -> str:
        """下载入口。返回保存目录。
        - `live_duration_seconds`：直播模式下，最多抓取多少秒的内容。
        - `segment_limit`：直播模式下，最多抓取多少个分片。
        - `merge_mp4`：下载完成后是否合并为 mp4（需要 `ffmpeg`）。
        - `mp4_name`：合并生成的 mp4 文件名（位于输出目录下）。
        - `cleanup_ts`：合并成功后是否删除 TS 分片文件。

        暂停/恢复：在下载流程的多个安全点调用 `_resume_event.wait()`，外部调用 `pause()/resume()` 控制。
        错误处理：解析/网络错误会通过 `on_error` 回调通知并中止后续步骤，但不会抛出致命异常。
        """
        self._download_start_ts = time.time()
        with self._bytes_lock:
            self._bytes_downloaded = 0
        try:
            playlist_url, is_variant = self._resolve_playlist(self.url)
            self.base_url = self._compute_base_url(playlist_url)
            playlist_text = self._fetch_text(playlist_url)
            self._parse_playlist_header(playlist_text)
            segments = self._parse_segments(playlist_text)
        except Exception as e:
            # 网络、解析、密钥拉取等错误统一回调，不抛出致命异常
            self._emit('on_error', context='resolve_or_fetch', message=str(e))
            return self.output_dir

        self._emit('on_download_start', is_live=self.is_live, total_segments=len(segments) if not self.is_live else None, output_dir=self.output_dir)

        if not self.is_live:
            self._vod_segments_cache = segments
            self._download_vod(segments)
            if self._stop_event.is_set():
                # 若为停止触发，且需要合并为 MP4，则对已完成分片进行拼接
                if self._stop_merge_mp4:
                    try:
                        eff = [s for s in self._vod_segments_cache if s.get('filename') in self.downloaded_segments]
                        if eff:
                            out_mp4 = os.path.join(self.output_dir, 'stopped_merged.mp4')
                            self.merge_to_mp4(eff, out_mp4)
                    except Exception as e:
                        self._emit('on_error', context='merge', message=str(e))
                        print(f"[merge] 合并失败: {e}")
            elif merge_mp4:
                try:
                    self.merge_to_mp4(self._vod_segments_cache, os.path.join(self.output_dir, mp4_name))
                    if cleanup_ts:
                        self._cleanup_segments(self._vod_segments_cache)
                except Exception as e:
                    self._emit('on_error', context='merge', message=str(e))
                    print(f"[merge] 合并失败: {e}")
        else:
            self._download_live(playlist_url, initial_segments=segments, live_duration_seconds=live_duration_seconds, segment_limit=segment_limit)
            if self._stop_event.is_set():
                if self._stop_merge_mp4:
                    try:
                        eff = [s for s in self._live_segments_accum if s.get('filename') in self.downloaded_segments]
                        if eff:
                            out_mp4 = os.path.join(self.output_dir, 'stopped_merged.mp4')
                            self.merge_to_mp4(eff, out_mp4)
                    except Exception as e:
                        self._emit('on_error', context='merge', message=str(e))
                        print(f"[merge] 合并失败: {e}")
            elif merge_mp4:
                try:
                    self.merge_to_mp4(self._live_segments_accum, os.path.join(self.output_dir, mp4_name))
                    if cleanup_ts:
                        self._cleanup_segments(self._live_segments_accum)
                except Exception as e:
                    self._emit('on_error', context='merge', message=str(e))
                    print(f"[merge] 合并失败: {e}")

        self._emit('on_download_complete', output_dir=self.output_dir)
        return self.output_dir

    def stop(self, merge_mp4: bool = False) -> None:
        """请求协作式停止下载；可选择在停止后合并已下载的 MP4。"""
        self._stop_merge_mp4 = merge_mp4
        self._stop_event.set()

    # ------------------------ 解析相关 ------------------------
    def _resolve_playlist(self, url: str) -> Tuple[str, bool]:
        # 如果是主列表，选择带宽最高的子列表
        text = self._fetch_text(url)
        if "#EXT-X-STREAM-INF" in text:
            best_bw = -1
            best_uri = None
            base = url.rsplit('/', 1)[0] + '/'
            for line in text.splitlines():
                if line.startswith('#EXT-X-STREAM-INF'):
                    m = re.search(r'BANDWIDTH=(\d+)', line)
                    if m:
                        bw = int(m.group(1))
                        if bw > best_bw:
                            best_bw = bw
                            best_uri = None  # 下一行处理
                elif best_bw >= 0 and not line.startswith('#'):
                    best_uri = urljoin(base, line)
                    break
            if best_uri:
                return best_uri, True
        return url, False

    def _compute_base_url(self, url: str) -> str:
        parsed = urlparse(url)
        # 去掉最后的文件名，保留目录
        base = url.rsplit('/', 1)[0] + '/'
        return base

    def _fetch_text(self, url: str) -> str:
        resp = self.session.get(url, timeout=self.timeout)
        resp.raise_for_status()
        return resp.text

    def _parse_playlist_header(self, text: str) -> None:
        self.is_live = "#EXT-X-ENDLIST" not in text
        td_match = re.search(r"#EXT-X-TARGETDURATION:(\d+)", text)
        self.target_duration = int(td_match.group(1)) if td_match else None
        ms_match = re.search(r"#EXT-X-MEDIA-SEQUENCE:(\d+)", text)
        self.media_sequence = int(ms_match.group(1)) if ms_match else 0

        # 解析密钥（可能在头部或分片中多次出现）
        # 这里仅保留最近遇到的KEY设置，分片解析时可覆盖iv
        key_line = None
        for line in text.splitlines():
            if line.startswith("#EXT-X-KEY"):
                key_line = line
        if key_line:
            self._parse_key_line(key_line)

    def _attr_value(self, line: str, key: str) -> Optional[str]:
        m = re.search(rf"{key}=([^,]+)", line)
        return m.group(1) if m else None

    def _strip_quotes(self, s: Optional[str]) -> Optional[str]:
        if not s:
            return s
        if s.startswith('"') and s.endswith('"'):
            return s[1:-1]
        if s.startswith("'") and s.endswith("'"):
            return s[1:-1]
        return s

    def _parse_key_line(self, line: str) -> None:
        method = self._attr_value(line, 'METHOD')
        self.method = self._strip_quotes(method)
        uri = self._attr_value(line, 'URI')
        self.key_uri = self._strip_quotes(uri)
        iv = self._attr_value(line, 'IV')
        iv_hex = self._strip_quotes(iv)
        if self.method == 'AES-128' and self.key_uri:
            if not CRYPTO_AVAILABLE:
                raise RuntimeError("cryptography 不可用，无法解密 AES-128 分片")
            key_url = urljoin(self.base_url, self.key_uri)
            self.key_bytes = self._fetch_key(key_url)
        if iv_hex:
            # 记录在 iv_map 中，分片解析时若未提供IV则使用
            try:
                iv_bytes = bytes.fromhex(iv_hex[2:] if iv_hex.startswith('0x') else iv_hex)
                self.iv_map[self.media_sequence] = iv_bytes
            except Exception:
                pass

    def _fetch_key(self, url: str) -> bytes:
        resp = self.session.get(url, timeout=self.timeout)
        resp.raise_for_status()
        return resp.content

    def _parse_segments(self, text: str) -> List[Dict]:
        segments: List[Dict] = []
        current_duration = None
        current_method = self.method
        current_key_uri = self.key_uri
        current_key_bytes = self.key_bytes
        current_iv_hex = None
        seq = self.media_sequence
        for line in text.splitlines():
            line = line.strip()
            if not line:
                continue
            if line.startswith('#EXT-X-KEY'):
                # 新的 key 配置
                self._parse_key_line(line)
                current_method = self.method
                current_key_uri = self.key_uri
                current_key_bytes = self.key_bytes
                current_iv_hex = self._attr_value(line, 'IV')
                current_iv_hex = self._strip_quotes(current_iv_hex)
            elif line.startswith('#EXTINF:'):
                d_match = re.search(r"#EXTINF:([0-9.]+)", line)
                current_duration = float(d_match.group(1)) if d_match else None
                # 下一个非注释行即为URI
                # 直接在下一个迭代处理
            elif not line.startswith("#"):
                uri = urljoin(self.base_url, line)
                segments.append({
                    "uri": uri,
                    "filename": self._segment_filename(seq),
                    "duration": current_duration,
                    "seq": seq,
                    "method": current_method,
                    "key_uri": current_key_uri,
                    "key_bytes": current_key_bytes,
                    "iv_hex": current_iv_hex,
                })
                seq += 1
                current_duration = None
                current_iv_hex = None
        return segments

    def _segment_filename(self, seq: int) -> str:
        return f"seg_{seq:08d}.ts"

    # ------------------------ 下载实现 ------------------------
    def _download_vod(self, segments: List[Dict]) -> None:
        """点播下载：并发拉取给定分片列表并维护整体进度与状态。"""
        total = len(segments)
        progress = None
        if TQDM_AVAILABLE:
            progress = tqdm(total=total, desc="VOD下载", unit="seg")
        completed = 0
        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            futures = []
            for seg in segments:
                # 暂停检查（在提交任务前）
                self._resume_event.wait()
                if self._stop_event.is_set():
                    break
                futures.append(executor.submit(self._download_one_segment, seg))
            # 若在提交阶段就收到了停止信号，取消尚未开始的任务
            if self._stop_event.is_set():
                for f in futures:
                    try:
                        f.cancel()
                    except Exception:
                        pass
            else:
                for fut in as_completed(futures):
                    ok = fut.result()
                    completed += 1
                    if progress:
                        progress.update(1)
                    else:
                        self._print_progress(completed, total)
                    self._emit('on_overall_progress', completed=completed, total=total, overall_speed_bps=self._overall_speed_bps())
                    if self._stop_event.is_set():
                        # 收到停止信号后，尽量取消尚未开始的任务
                        for f in futures:
                            try:
                                f.cancel()
                            except Exception:
                                pass
                        break
        if progress:
            progress.close()
        self._save_state()

    def _download_live(self, playlist_url: str, initial_segments: List[Dict], live_duration_seconds: Optional[int], segment_limit: Optional[int]) -> None:
        """直播下载：周期性拉取最新播放列表，过滤新分片并增量下载。

        退出条件：满足 `live_duration_seconds` 或 `segment_limit`，或外部逻辑主动结束。
        """
        downloaded_count = 0
        seen: Set[str] = set(s['uri'] for s in initial_segments)
        # 先下载初始分片
        self._live_segments_accum.extend(initial_segments)
        self._download_vod(initial_segments)
        downloaded_count += len(initial_segments)
        start_time = time.time()

        while True:
            # 直播循环的暂停检查
            self._resume_event.wait()
            if self._stop_event.is_set():
                break
            if live_duration_seconds and (time.time() - start_time) >= live_duration_seconds:
                break
            if segment_limit and downloaded_count >= segment_limit:
                break
            time.sleep(self.live_poll_interval)
            try:
                text = self._fetch_text(playlist_url)
                self._parse_playlist_header(text)
                new_segments = self._parse_segments(text)
                to_download = [s for s in new_segments if s['uri'] not in seen]
                if not to_download:
                    continue
                self._live_segments_accum.extend(to_download)
                self._download_vod(to_download)
                for s in to_download:
                    seen.add(s['uri'])
                downloaded_count += len(to_download)
            except Exception as e:
                print(f"[live] 拉取/下载失败: {e}")
                continue
        print(f"直播下载结束，共下载分片: {downloaded_count}")

    def _print_progress(self, completed: int, total: int) -> None:
        percent = (completed / total * 100.0) if total else 0.0
        print(f"进度: {completed}/{total} ({percent:.2f}%)")

    def _download_one_segment(self, seg: Dict) -> bool:
        """下载单个分片（带重试、可选解密与断点续传）。

        策略：
        - 若目标文件已存在，直接标记完成；
        - 若 `.part` 存在且分片未加密，尝试 Range 续传；加密分片则删除 `.part` 重下；
        - 读取循环中周期性触发 `on_segment_progress`，并在安全点支持暂停；
        - 失败达到重试上限时触发 `on_segment_error` 并返回 False。
        """
        filename = seg['filename']
        out_path = os.path.join(self.output_dir, filename)
        temp_path = out_path + ".part"

        # 断点续传策略：如果目标文件存在，认为已完成；如果part存在尝试Range追加，否则重新下载
        if os.path.exists(out_path):
            self.downloaded_segments.add(filename)
            return True

        uri = seg['uri']
        method = seg.get('method')
        key_bytes = seg.get('key_bytes')
        iv_hex = seg.get('iv_hex')
        seq = seg.get('seq')
        iv_bytes = self._derive_iv(iv_hex, seq)

        self._emit('on_segment_start', seq=seq, filename=filename, uri=uri)

        for attempt in range(self.retries):
            try:
                # 每次尝试前检查是否暂停
                self._resume_event.wait()
                if self._stop_event.is_set():
                    raise InterruptedError("stopped")
                headers = {}
                # 如果是加密分片，为保证CBC解密正确性，不做Range续传（必须从头解密）
                encrypted = (method == "AES-128" and key_bytes and CRYPTO_AVAILABLE)
                existing = 0
                if os.path.exists(temp_path):
                    existing = os.path.getsize(temp_path)
                    if existing > 0:
                        if encrypted:
                            # 删除残留part，重新从头下载
                            try:
                                os.remove(temp_path)
                            except Exception:
                                pass
                            existing = 0
                        else:
                            headers['Range'] = f"bytes={existing}-"

                started = time.time()
                seg_bytes = 0
                last_emit = started

                with self.session.get(uri, timeout=self.timeout, stream=True, headers=headers) as resp:
                    # 如果服务器不支持Range，返回200；否则206
                    if resp.status_code not in (200, 206):
                        resp.raise_for_status()
                    # 若是重新开始，清空part
                    if resp.status_code == 200 and os.path.exists(temp_path):
                        os.remove(temp_path)
                    # 以二进制写入/追加
                    mode = 'ab' if (headers.get('Range') and resp.status_code == 206) else 'wb'
                    with open(temp_path, mode) as f:
                        if encrypted:
                            decryptor = Cipher(
                                algorithms.AES(key_bytes),
                                modes.CBC(iv_bytes),
                                backend=default_backend()
                            ).decryptor()
                            for chunk in resp.iter_content(chunk_size=1024 * 64):
                                # 暂停检查
                                self._resume_event.wait()
                                if self._stop_event.is_set():
                                    try:
                                        resp.close()
                                    except Exception:
                                        pass
                                    raise InterruptedError("stopped")
                                if not chunk:
                                    continue
                                seg_bytes += len(chunk)
                                with self._bytes_lock:
                                    self._bytes_downloaded += len(chunk)
                                dec = decryptor.update(chunk)
                                if dec:
                                    f.write(dec)
                                now = time.time()
                                elapsed = max(1e-6, now - started)
                                if now - last_emit >= 0.5:
                                    self._emit('on_segment_progress', seq=seq, filename=filename, bytes_downloaded=seg_bytes, elapsed=elapsed, speed_bps=seg_bytes/elapsed)
                                    last_emit = now
                            final = decryptor.finalize()
                            if final:
                                f.write(final)
                        else:
                            for chunk in resp.iter_content(chunk_size=1024 * 64):
                                # 暂停检查
                                self._resume_event.wait()
                                if self._stop_event.is_set():
                                    try:
                                        resp.close()
                                    except Exception:
                                        pass
                                    raise InterruptedError("stopped")
                                if not chunk:
                                    continue
                                seg_bytes += len(chunk)
                                with self._bytes_lock:
                                    self._bytes_downloaded += len(chunk)
                                f.write(chunk)
                                now = time.time()
                                elapsed = max(1e-6, now - started)
                                if now - last_emit >= 0.5:
                                    self._emit('on_segment_progress', seq=seq, filename=filename, bytes_downloaded=seg_bytes, elapsed=elapsed, speed_bps=seg_bytes/elapsed)
                                    last_emit = now
                shutil.move(temp_path, out_path)
                self.downloaded_segments.add(filename)
                elapsed = max(1e-6, time.time() - started)
                self._emit('on_segment_complete', seq=seq, filename=filename, path=out_path, elapsed=elapsed, size_bytes=seg_bytes, speed_bps=seg_bytes/elapsed)
                return True
            except InterruptedError:
                # 停止触发：保留 `.part` 用于后续续传或观察，不上报错误
                return False
            except Exception as e:
                if attempt + 1 >= self.retries:
                    self._emit('on_segment_error', seq=seq, filename=filename, error=str(e))
                    print(f"分片下载失败 seq={seq}: {e}")
                    try:
                        if os.path.exists(temp_path):
                            os.remove(temp_path)
                    except Exception:
                        pass
                    return False
                else:
                    time.sleep(1.0 + attempt * 0.5)
        return False

    def _derive_iv(self, iv_hex: Optional[str], seq: Optional[int]) -> bytes:
        if iv_hex:
            try:
                return bytes.fromhex(iv_hex[2:] if iv_hex.startswith('0x') else iv_hex)
            except Exception:
                pass
        if seq is None:
            return bytes(16)
        return seq.to_bytes(16, byteorder='big')

    # ------------------------ 合并相关 ------------------------
    def merge_to_mp4(self, segments: List[Dict], output_path: str) -> None:
        """使用 ffmpeg 合并 TS 分片为 MP4，并通过事件输出进度。"""
        list_file = os.path.join(self.output_dir, 'concat_list.txt')
        with open(list_file, 'w', encoding='utf-8') as f:
            for s in segments:
                ts_path = os.path.join(self.output_dir, s['filename'])
                f.write(f"file '{ts_path}'\n")

        self._emit('on_merge_start', total_units=len(segments), unit_label='files', output_path=output_path)

        cmd = [
            'ffmpeg', '-y', '-f', 'concat', '-safe', '0',
            '-i', list_file,
            '-c', 'copy',
            output_path
        ]
        try:
            proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1)
            total = None
            processed = 0.0
            for line in proc.stdout:
                # 解析进度（可选/简单）
                if 'Duration:' in line:
                    m = re.search(r"Duration: (\d+):(\d+):(\d+\.\d+)", line)
                    if m:
                        h, mi, s = int(m.group(1)), int(m.group(2)), float(m.group(3))
                        total = h * 3600 + mi * 60 + s
                if 'time=' in line and total:
                    m = re.search(r"time=(\d+):(\d+):(\d+\.\d+)", line)
                    if m:
                        h, mi, s = int(m.group(1)), int(m.group(2)), float(m.group(3))
                        cur = h * 3600 + mi * 60 + s
                        processed = cur
                        pct = min(100.0, processed / total * 100.0)
                        self._emit('on_merge_progress', processed=processed, total=total, percent=pct)
            proc.wait()
            if proc.returncode != 0:
                raise RuntimeError('ffmpeg 合并失败')
        finally:
            pass

        self._emit('on_merge_complete', output_path=output_path)

    def merge_to_ts(self, segments: List[Dict], output_path: str) -> None:
        """使用 ffmpeg 合并 TS 分片为单一 TS，输出进度事件。"""
        list_file = os.path.join(self.output_dir, 'concat_list.txt')
        with open(list_file, 'w', encoding='utf-8') as f:
            for s in segments:
                ts_path = os.path.join(self.output_dir, s['filename'])
                f.write(f"file '{ts_path}'\n")

        self._emit('on_merge_start', total_units=len(segments), unit_label='files', output_path=output_path)

        cmd = [
            'ffmpeg', '-y', '-f', 'concat', '-safe', '0',
            '-i', list_file,
            '-c', 'copy',
            '-f', 'mpegts',
            output_path
        ]
        try:
            proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1)
            total = None
            processed = 0.0
            for line in proc.stdout:
                if 'Duration:' in line:
                    m = re.search(r"Duration: (\d+):(\d+):(\d+\.\d+)", line)
                    if m:
                        h, mi, s = int(m.group(1)), int(m.group(2)), float(m.group(3))
                        total = h * 3600 + mi * 60 + s
                if 'time=' in line and total:
                    m = re.search(r"time=(\d+):(\d+):(\d+\.\d+)", line)
                    if m:
                        h, mi, s = int(m.group(1)), int(m.group(2)), float(m.group(3))
                        cur = h * 3600 + mi * 60 + s
                        processed = cur
                        pct = min(100.0, processed / total * 100.0)
                        self._emit('on_merge_progress', processed=processed, total=total, percent=pct)
            proc.wait()
            if proc.returncode != 0:
                raise RuntimeError('ffmpeg TS 合并失败')
        finally:
            pass

        self._emit('on_merge_complete', output_path=output_path)

    def _cleanup_segments(self, segments: List[Dict]) -> None:
        # 删除已合并的分片及辅助文件
        for s in segments:
            try:
                ts_path = os.path.join(self.output_dir, s['filename'])
                if os.path.exists(ts_path):
                    os.remove(ts_path)
                part_path = ts_path + '.part'
                if os.path.exists(part_path):
                    os.remove(part_path)
            except Exception:
                pass
        list_file = os.path.join(self.output_dir, 'concat_list.txt')
        try:
            if os.path.exists(list_file):
                os.remove(list_file)
        except Exception:
            pass

    # ------------------------ 状态管理 ------------------------
    def _load_state(self) -> None:
        """加载已下载分片集合，支持断点续传。"""
        if os.path.exists(self.state_path):
            try:
                with open(self.state_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                    self.downloaded_segments = set(data.get('downloaded_segments', []))
            except Exception:
                self.downloaded_segments = set()

    def _save_state(self) -> None:
        """保存当前已下载分片集合至状态文件。"""
        try:
            with open(self.state_path, 'w', encoding='utf-8') as f:
                json.dump({
                    'downloaded_segments': list(self.downloaded_segments)
                }, f, ensure_ascii=False, indent=2)
        except Exception:
            pass

    # ------------------------ 暂停/恢复接口 ------------------------
    def pause(self) -> None:
        """暂停下载（在安全点阻塞，不中断正在进行的 I/O）。"""
        self._resume_event.clear()

    def resume(self) -> None:
        """恢复下载（解除阻塞，继续调度与读取循环）。"""
        self._resume_event.set()


def download_batch(urls: List[str], base_output_dir: str = "downloads", **kwargs) -> None:
    """批量下载入口，逐个顺序下载，避免过多并发占用网络。"""
    os.makedirs(base_output_dir, exist_ok=True)
    for idx, url in enumerate(urls, start=1):
        sub_dir = os.path.join(base_output_dir, f"task_{idx:03d}")
        print(f"开始批量第{idx}项: {url}")
        d = M3U8Downloader(url=url, output_dir=sub_dir, **kwargs)
        d.download()
        print(f"完成: {sub_dir}")