import re
import os
import hashlib
import shutil
import subprocess
from urllib.parse import urljoin, urlparse
from concurrent.futures import ThreadPoolExecutor, as_completed

import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from playwright.sync_api import sync_playwright

# ====== 配置 ======
PAGE_URL = "https://www.yinhuadm.xyz/p/4870-1-8.html"
PAT = re.compile(r"\.m3u8($|\?|#)", re.I)
MAX_WORKERS = 100
REQUEST_TIMEOUT = 20
EXPECT_TIMEOUT = 30000  # 30s

# ====== HTTP 会话（连接池 + 重试）======
def make_session():
    s = requests.Session()
    s.headers.update({
        "User-Agent": ("Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                       "AppleWebKit/537.36 (KHTML, like Gecko) "
                       "Chrome/124.0.0.0 Safari/537.36"),
        "Accept": "*/*",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
    })
    retry = Retry(
        total=5, connect=3, read=3, backoff_factor=0.5,
        status_forcelist=(429, 500, 502, 503, 504),
        allowed_methods=frozenset(["GET"])
    )
    s.mount("http://", HTTPAdapter(max_retries=retry, pool_connections=50, pool_maxsize=50))
    s.mount("https://", HTTPAdapter(max_retries=retry, pool_connections=50, pool_maxsize=50))
    return s

# ====== 下载一个分片 ======
def download_one(session: requests.Session, ts_url: str, save_path: str, overwrite=False):
    try:
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        if (not overwrite) and os.path.exists(save_path) and os.path.getsize(save_path) > 0:
            return f"[SKIP] {os.path.basename(save_path)}"
        with session.get(ts_url, stream=True, timeout=REQUEST_TIMEOUT) as r:
            r.raise_for_status()
            with open(save_path, "wb") as f:
                for chunk in r.iter_content(1 << 14):
                    if chunk:
                        f.write(chunk)
        return f"[OK]   {os.path.basename(save_path)}"
    except Exception as e:
        return f"[ERR]  {os.path.basename(save_path)} -> {e}"

# ====== 解析 m3u8（返回分片URL列表，同时检查是否有加密）======
def parse_m3u8(session: requests.Session, m3u8_url: str):
    resp = session.get(m3u8_url, timeout=REQUEST_TIMEOUT)
    resp.raise_for_status()
    lines = resp.text.strip().splitlines()

    segs = []
    encrypted = False
    for line in lines:
        line = line.strip()
        if not line:
            continue
        if line.startswith("#"):
            if line.startswith("#EXT-X-KEY"):
                encrypted = True
            continue
        segs.append(urljoin(m3u8_url, line))
    return segs, encrypted

# ====== 调用 ffmpeg 合并 ======
def ffmpeg_concat(out_dir: str, basename: str = "output"):
    ffmpeg_path = shutil.which("ffmpeg")
    list_path = os.path.join(out_dir, "filelist.txt")
    if not os.path.exists(list_path):
        print("[merge] 未找到 filelist.txt，跳过合并")
        return

    if not ffmpeg_path:
        print("[merge] 未检测到 ffmpeg，请安装后将其加入 PATH。")
        print("        Windows 可安装 ffmpeg 并把 bin 目录加入系统环境变量 PATH。")
        print(f"        手动合并命令：ffmpeg -f concat -safe 0 -i \"{list_path}\" -c copy \"{os.path.join(out_dir, basename + '.ts')}\"")
        return

    # 1) 先尝试无损合并（速度快、无重新编码）
    out_ts = os.path.join(out_dir, f"{basename}.ts")
    cmd_copy = [ffmpeg_path, "-y", "-f", "concat", "-safe", "0", "-i", list_path, "-c", "copy", out_ts]
    print("[merge] 尝试无损合并（-c copy）…")
    r = subprocess.run(cmd_copy, capture_output=True, text=True)
    if r.returncode == 0 and os.path.exists(out_ts) and os.path.getsize(out_ts) > 0:
        print(f"[merge] 合并成功（无损）：{out_ts}")
        return
    else:
        print("[merge] 无损合并失败，可能是分片跨清晰度/编解码不一致。回退到重编码…")
        # 可打印错误帮助排查
        if r.stderr:
            print("[ffmpeg stderr]\n", r.stderr[-2000:])  # 截取末尾部分，避免刷屏

    # 2) 回退到重编码（最稳通用）
    out_mp4 = os.path.join(out_dir, f"{basename}.mp4")
    cmd_reencode = [
        ffmpeg_path, "-y", "-f", "concat", "-safe", "0", "-i", list_path,
        "-c:v", "libx264", "-c:a", "aac", "-movflags", "+faststart", out_mp4
    ]
    r2 = subprocess.run(cmd_reencode, capture_output=True, text=True)
    if r2.returncode == 0 and os.path.exists(out_mp4) and os.path.getsize(out_mp4) > 0:
        print(f"[merge] 合并成功（重编码）：{out_mp4}")
    else:
        print("[merge] 重编码也失败了，请检查分片是否完整/加密，或手动执行以下命令调试：")
        print("  无损：", " ".join(cmd_copy))
        print("  重编：", " ".join(cmd_reencode))
        if r2.stderr:
            print("[ffmpeg stderr]\n", r2.stderr[-2000:])

# ====== 主流程 ======
def run():
    with sync_playwright() as pw:
        browser = pw.chromium.launch(headless=False)
        ctx = browser.new_context()
        page = ctx.new_page()

        try:
            # 等首个 m3u8
            with page.expect_event(
                "request",
                predicate=lambda r: PAT.search(r.url) is not None,
                timeout=EXPECT_TIMEOUT,
            ) as req_info:
                page.goto(PAGE_URL, wait_until="domcontentloaded")

            req = req_info.value
            index_url = str(req.url)
            print("[m3u8 index]", index_url)

            sess = make_session()

            # 主/变体：从 index.m3u8 中挑一个子 m3u8（通常 mixed 或带码率的）
            master_lines = sess.get(index_url, timeout=REQUEST_TIMEOUT).text.strip().splitlines()
            candidates = [ln.strip() for ln in master_lines if ln.strip() and not ln.startswith("#") and ln.strip().endswith(".m3u8")]
            if not candidates:
                # 兜底：有些站把子清单写在末行
                last_ln = master_lines[-1].strip()
                if last_ln.endswith(".m3u8"):
                    candidates = [last_ln]
                else:
                    raise RuntimeError("未在 index.m3u8 中找到子 m3u8")
            # 简单策略：取最后一个
            media_m3u8 = urljoin(index_url, candidates[-1])
            print("[m3u8 media]", media_m3u8)

            # 解析子清单
            seg_urls, encrypted = parse_m3u8(sess, media_m3u8)
            print(f"[m3u8 segs] 分片数：{len(seg_urls)}")
            if encrypted:
                print("[warn] 检测到 #EXT-X-KEY（加密）。该脚本未实现解密，若分片加密将无法正常合并。")

            # 输出目录
            parsed = urlparse(media_m3u8)
            base_id = (parsed.netloc + parsed.path).replace("/", "_").strip("_")
            base_id = f"{base_id}_{hashlib.md5(media_m3u8.encode()).hexdigest()[:8]}"
            out_dir = os.path.join(os.getcwd(), "downloads", base_id)
            os.makedirs(out_dir, exist_ok=True)
            print("[save dir]", out_dir)

            # 下载
            print(f"[download] workers={MAX_WORKERS}")
            results = []
            with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool:
                futs = []
                for i, u in enumerate(seg_urls):
                    name = f"{i:05d}.ts"
                    save_path = os.path.join(out_dir, name)
                    futs.append(pool.submit(download_one, sess, u, save_path))
                for fut in as_completed(futs):
                    msg = fut.result()
                    results.append(msg)
                    if msg.startswith("[ERR]") or msg.startswith("[OK]"):
                        print(msg)
            errs = [x for x in results if x.startswith("[ERR]")]
            print(f"[done] total={len(results)}  ok/skip={len(results)-len(errs)}  err={len(errs)}")

            # filelist.txt（相对路径）
            list_path = os.path.join(out_dir, "filelist.txt")
            with open(list_path, "w", encoding="utf-8") as f:
                for i in range(len(seg_urls)):
                    f.write(f"file '{i:05d}.ts'\n")
            print(f"[hint] 已生成: {list_path}")

            # 自动合并
            ffmpeg_concat(out_dir, basename="output")

        finally:
            try:
                browser.close()
            except Exception:
                pass

if __name__ == "__main__":
    run()
