#!/usr/bin/env python3
"""
Simple MHT to HTML extractor.

Saves embedded binary parts (images/audio/video/other) into an `att` folder
and rewrites HTML references to point to that folder.

Usage:
    python mht_to_html.py <input.mht> [--out <out.html>] [--att <att_dir>]

This script uses the stdlib `email` parser and makes a best-effort to
guess file extensions from Content-Type or from magic bytes.
"""

# 说明（中文）:
# 本脚本用于将 QQ 导出的 .mht/.mhtml 文件解析为单独的 HTML 文件，
# 并将嵌入的二进制资源（图片、音频、视频等）提取到一个 `att/` 目录下。
# 主要步骤：
# 1) 手动解析 MHT 的 MIME multipart 部分（通过 boundary 分割），
# 2) 对每个 part 读取 headers（Content-Type, Content-Location, Content-ID 等），
# 3) 根据 Content-Transfer-Encoding 解码正文并猜测扩展名（或使用 Content-Type 映射），
# 4) 将非空的资源写入到 `att/`，并记录原始 key -> 相对路径 的映射，
# 5) 使用 BeautifulSoup 精确替换 HTML 中的 src/href 引用，指向提取出来的文件，
# 6) 按内容哈希去重并静默删除重复文件，更新引用；将无法识别的 part-*.dat 移到 archive/ 以便排查。

import argparse
import os
import hashlib
from pathlib import Path
# email parser not used in manual parsing

EXT_MAP = {
    "image/jpeg": ".jpg",
    "image/pjpeg": ".jpg",
    "image/png": ".png",
    "image/gif": ".gif",
    "image/bmp": ".bmp",
    "image/webp": ".webp",
    "audio/mpeg": ".mp3",
    "audio/mp3": ".mp3",
    "audio/wav": ".wav",
    "video/mp4": ".mp4",
    "video/mpeg": ".mpg",
}


def guess_ext_from_bytes(blob, ctype=None):
    # 优先使用已知的 MIME type 到扩展名映射
    if ctype and ctype in EXT_MAP:
        return EXT_MAP[ctype]
    if not blob:
        return ".dat"
    if blob.startswith(b"\xff\xd8"):
        return ".jpg"
    if blob.startswith(b"\x89PNG"):
        return ".png"
    if blob[:3] == b"GIF":
        return ".gif"
    if blob[:4] == b"RIFF" and b"WAVE" in blob[:12]:
        return ".wav"
    if blob[:4] == b"ftyp" or b"moov" in blob[:12] or blob[:3] == b"\x00\x00\x00":
        return ".mp4"
    return ".dat"


def safe_name(name: str) -> str:
    # 清理文件名：把路径分隔符替换为下划线，移除开头的 ./，保留花括号等字符
    return name.replace("\\", "_").replace("/", "_").lstrip("./")


def main() -> None:
    p = argparse.ArgumentParser()
    p.add_argument("mht", help="input .mht/.mhtml file")
    p.add_argument("--out", help="output html file (default: same name .html)")
    p.add_argument("--att", help="attachments directory (default: <mht_parent>/att)")
    args = p.parse_args()

    mht_path = Path(args.mht).resolve()
    if not mht_path.exists():
        print("ERROR: input file not found:", mht_path)
        return

    # Default output: output/<mht-stem>/
    stem = mht_path.stem
    base_out_dir = Path("output") / stem
    out_html = Path(args.out) if args.out else (base_out_dir / f"{stem}.html")
    att_dir = Path(args.att) if args.att else (base_out_dir / "att")
    att_dir.mkdir(parents=True, exist_ok=True)
    archive_dir = base_out_dir / "archive"
    archive_dir.mkdir(parents=True, exist_ok=True)

    # 建立当前 att 目录下文件的内容哈希映射，便于复用相同内容的文件（避免重复写入）
    hash_map = {}
    try:
        for f in att_dir.iterdir():
            if not f.is_file():
                continue
            try:
                h = hashlib.sha256()
                with open(f, "rb") as fh:
                    for chunk in iter(lambda: fh.read(8192), b""):
                        h.update(chunk)
                digest = h.hexdigest()
                # 仅记录第一个遇到的路径（作为复用目标）
                if digest not in hash_map:
                    hash_map[digest] = f
            except Exception:
                continue
    except Exception:
        hash_map = {}

    # 读取整个 MHT 原始字节，以便手动解析 multipart 边界与各个 part
    raw = mht_path.read_bytes()

    # 从文件头尝试找到 multipart 的 boundary 参数（用于分割各个 MIME part）
    import re
    import base64
    import quopri

    m = re.search(rb'boundary="([^"]+)"', raw)
    if not m:
        m = re.search(rb"boundary=([^;\r\n]+)", raw)
    if not m:
        print("Could not find MIME boundary in MHT; aborting.")
        return
    boundary = m.group(1)

    # html: 最终要写出的 HTML 内容（优先来自 MHT 的 text/html part）
    html = None
    # saved_map: 原始 part key（Content-Location/Content-ID/filename/part-N） -> 相对路径
    saved_map = {}
    # counter: 作为 fallback 的 part 编号（例如 part-0, part-1）
    counter = 0

    # Split parts by the exact boundary marker
    parts = raw.split(b"--" + boundary)
    # 计算实际有效 parts 的数量（排除空和结尾标记），用于决定是否显示进度提示
    total_parts = sum(1 for chunk in parts if chunk and chunk.strip() not in (b"--", b""))
    # 当 parts 很多时启用周期性进度提示，默认阈值为 50（可视为“较大”文件）
    show_progress = total_parts >= 50
    # 进度打印间隔：如果 parts 很多，每间隔一部分比例打印一次，最小间隔 20
    if show_progress:
        progress_interval = max(20, total_parts // 20)
    else:
        progress_interval = None

    parts_processed = 0
    files_written = 0
    bytes_written = 0
    for chunk in parts:
        if not chunk or chunk.strip() in (b"--", b""):
            continue
        parts_processed += 1
        # each chunk contains headers, a blank line, then body
        try:
            head, body = chunk.split(b"\r\n\r\n", 1)
        except ValueError:
            # try LF only
            try:
                head, body = chunk.split(b"\n\n", 1)
            except Exception:
                continue

    # 解析每个 part 的头部（简单按行拆分，忽略多行 header 的复杂情形）
    # 头部中的常见字段：Content-Type, Content-Location, Content-ID, Content-Transfer-Encoding, Content-Disposition
    # 注意：这里用的是 lenient 的解码（忽略解码错误），以便处理非标准的 QQ 导出 MHT
    # 如果需要更严格的解析可以改用 email.parser 库，但实践中手写解析更稳健对付 QQ MHT
    # parse headers
        headers = {}
        for line in head.splitlines():
            if b":" in line:
                k, v = line.split(b":", 1)
                headers[k.decode("utf-8", errors="ignore").strip()] = v.decode(
                    "utf-8", errors="ignore"
                ).strip()

        # Content-Type 的主类型（无参数），默认 octet-stream
        ctype = (
            headers.get("Content-Type", "").split(";")[0].strip()
            or "application/octet-stream"
        )
        content_location = headers.get("Content-Location")
        content_id = headers.get("Content-ID")
        cte = headers.get("Content-Transfer-Encoding", "").lower()
        disp = headers.get("Content-Disposition", "")

        filename = None
        if "filename=" in disp:
            fn = disp.split("filename=")[1].strip()
            if fn.startswith(('"', "'")) and fn.endswith(('"', "'")):
                fn = fn[1:-1]
            filename = fn

        # 清理 body：去掉可能的前导 CRLF，并截断尾部的 boundary 标记行
        # 某些 MHT 的分隔符行会被包含在 body 里，需要去除
        if body.startswith(b"\r\n"):
            body = body[2:]
        if body.startswith(b"\n"):
            body = body[1:]

        # strip final boundary marker if present
        body = re.split(
            rb"\r?\n--" + re.escape(boundary) + rb".*$", body, flags=re.DOTALL
        )[0]

    # 根据 Content-Transfer-Encoding 对 body 解码，支持 base64 与 quoted-printable
        payload_bytes = body
        if cte == "base64":
            try:
                payload_bytes = base64.b64decode(body, validate=False)
            except Exception:
                payload_bytes = base64.b64decode(body + b"==")
        elif cte in ("quoted-printable", "quotedprintable"):
            payload_bytes = quopri.decodestring(body)

    # 如果这是 text/html 部分，优先把它拿来作为最终的 HTML（只取第一个 text/html part）
    # 注意：HTML 可能使用 utf-8 或 gbk 编码，这里按 utf-8 尝试，失败后降级到 gbk
    # 将 HTML 存入 html 变量并跳过后续写文件逻辑
        if ctype == "text/html" and html is None:
            try:
                html = payload_bytes.decode("utf-8", errors="replace")
            except Exception:
                try:
                    html = payload_bytes.decode("gbk", errors="replace")
                except Exception:
                    html = ""
            continue

    # 选择该 part 的 key/name，用于生成目标文件名和在 HTML 中查找引用
    # 优先使用 Content-Disposition 中的 filename，其次使用 Content-Location，再其次使用 Content-ID，最后退回到 part-N 形式
        cid_clean = None
        if content_id:
            cid_clean = content_id.strip()
            if cid_clean.startswith("<") and cid_clean.endswith(">"):
                cid_clean = cid_clean[1:-1]

        key = filename or content_location or cid_clean or f"part-{counter}"
        counter += 1

    # 依据 Content-Type 或根据前 N 字节猜测扩展名（优先使用 Content-Type 映射，后备用文件头魔术字）
        content_ext = None
        if ctype and ctype in EXT_MAP:
            content_ext = EXT_MAP[ctype]

        ext = content_ext or guess_ext_from_bytes(payload_bytes[:64], ctype)

        base = safe_name(Path(key).name)
        name_root, name_ext = os.path.splitext(base)
        # Use Content-Type extension when available (override .dat or any ext if user prefers strict mapping)
        if content_ext:
            dest_name = name_root + content_ext
        else:
            if name_ext:
                dest_name = base
            else:
                dest_name = base + ext

        dest = att_dir / dest_name

        # 如果之前运行留下了同名的 .dat（无扩展），且当前能确定更合适的扩展名，则尝试把 .dat 重命名为正确的扩展名
        # 这样可以减少重复文件并保持用户可读的扩展
        try:
            name_root = Path(dest_name).stem
        except Exception:
            name_root = Path(dest).stem

        if content_ext:
            dat_candidate = att_dir / (name_root + ".dat")
            alt_dest = att_dir / (name_root + content_ext)
            if dat_candidate.exists() and not alt_dest.exists():
                # rename existing .dat to the correct extension to avoid duplicates
                try:
                    dat_candidate.replace(alt_dest)
                    dest = alt_dest
                except Exception:
                    pass

        i = 1
        while dest.exists():
            dest = att_dir / f"{Path(dest_name).stem}_{i}{Path(dest_name).suffix}"
            i += 1

    # 如果 payload 为空，打印诊断信息并跳过写文件和映射，避免产生 0 字节文件
        if not payload_bytes:
            info_items = [f"key={key!r}", f"ctype={ctype}", f"cte={cte}"]
            if filename:
                info_items.append(f"filename={filename!r}")
            if content_location:
                info_items.append(f"content-location={content_location!r}")
            if content_id:
                info_items.append(f"content-id={content_id!r}")
            print("[skip-empty] " + " ".join(info_items))
            # 不创建空文件，也不在 saved_map 中加入映射
        else:
            # 计算 payload 内容哈希，若已有相同内容文件则复用，避免产生重复文件（例如 _1 后缀）
            try:
                h = hashlib.sha256(payload_bytes).hexdigest()
            except Exception:
                h = None

            if h and h in hash_map:
                # 已存在相同内容的文件：复用该文件（不写重复文件），并把 saved_map 指向复用文件的相对路径
                existing = hash_map[h]
                rel = os.path.relpath(existing, out_html.parent)
                saved_map[str(key)] = rel.replace("\\", "/")
            else:
                # 如果不存在相同内容的文件，则写入新文件
                with open(dest, "wb") as f:
                    f.write(payload_bytes)

                # 统计写入量，便于进度报告
                written = len(payload_bytes)
                files_written += 1
                bytes_written += written

                rel = os.path.relpath(dest, out_html.parent)
                saved_map[str(key)] = rel.replace("\\", "/")

                # 把新写入的文件加入 hash_map，便于后续 parts 复用
                if h:
                    try:
                        hash_map[h] = dest
                    except Exception:
                        pass

        # 周期性打印进度（仅在大文件时启用），使用简洁行信息便于观察
        if show_progress and progress_interval and parts_processed % progress_interval == 0:
            print(f"[progress] parts {parts_processed}/{total_parts}, files {files_written}, bytes {bytes_written}")

    # fallback: if no html part found, try to extract HTML section from raw bytes
    if html is None:
        try:
            m = re.search(rb"(<html[\s\S]*?</html>)", raw, flags=re.IGNORECASE)
            if m:
                html = m.group(1).decode("utf-8", errors="replace")
            else:
                html = ""
        except Exception:
            html = ""

    if html is None:
        print("No HTML part found in MHT.")
        return

    # 使用 BeautifulSoup 安全地更新 HTML 引用（仅替换 src 和 href 属性）
    # 这样可以避免简单字符串替换导致的误替换或替换到不应修改的地方
    try:
        from bs4 import BeautifulSoup

        soup = BeautifulSoup(html, "html.parser")

        def map_value(val) -> str:
            if not val:
                return val
            new = val
            # check full URL/path and filename occurrences
            for orig, newrel in saved_map.items():
                # try variants
                variants = [orig]
                if orig.startswith("<") and orig.endswith(">"):
                    variants.append(orig[1:-1])
                variants.append("{" + orig + "}")
                variants.append(Path(orig).name)
                for v in variants:
                    if not v:
                        continue
                    if v in new:
                        new = new.replace(v, newrel)
            return new

        # 遍历所有带 src 的标签并替换
        for tag in soup.find_all(src=True):
            old = tag["src"]
            tag["src"] = map_value(old)
        # 遍历所有带 href 的标签并替换
        for tag in soup.find_all(href=True):
            old = tag["href"]
            tag["href"] = map_value(old)

        html = str(soup)
    except Exception:
        # fallback to safer string-based replacement if BeautifulSoup unavailable
        for orig, newrel in saved_map.items():
            variants = [orig]
            if orig.startswith("<") and orig.endswith(">"):
                variants.append(orig[1:-1])
            variants.append("{" + orig + "}")
            for v in variants:
                if v and isinstance(html, str) and v in html:
                    html = html.replace(v, newrel)

    out_html.parent.mkdir(parents=True, exist_ok=True)
    out_html.write_text(html, encoding="utf-8")

    print("Wrote HTML:", out_html)
    print("Saved attachments to:", att_dir)

    # Cleanup: remove .dat duplicates if a better-typed file exists for same stem
    # 说明：此段用于清理掉同名但为 .dat 的临时文件（如果存在更明确扩展名的同名文件）
    try:
        for f in att_dir.iterdir():
            if f.suffix.lower() == ".dat":
                stem = f.stem
                # look for any other file with same stem but different suffix
                others = list(att_dir.glob(f"{stem}.*"))
                more = [o for o in others if o.suffix.lower() != ".dat"]
                if more:
                    try:
                        f.unlink()
                    except Exception:
                        pass
    except Exception:
        pass

    # 自动清理：按内容哈希删除重复文件（保留首选文件）
    try:
        digest_map = {}
        for f in att_dir.iterdir():
            if not f.is_file():
                continue
            try:
                h = hashlib.sha256()
                with open(f, "rb") as fh:
                    for chunk in iter(lambda: fh.read(8192), b""):
                        h.update(chunk)
                d = h.hexdigest()
                digest_map.setdefault(d, []).append(f)
            except Exception:
                continue

        preferred_exts = set(EXT_MAP.values())
        for d, files in digest_map.items():
            if len(files) <= 1:
                continue
            # 选择要保留的文件：优先被本次 saved_map 引用的文件，其次首选常见扩展，最后按名称稳定排序
            name_to_file = {f.name: f for f in files}
            keep = None
            for v in saved_map.values():
                nm = Path(v).name
                if nm in name_to_file:
                    keep = name_to_file[nm]
                    break
            if keep is None:
                files_sorted = sorted(
                    files,
                    key=lambda f: (
                        0 if f.suffix.lower() in preferred_exts else 1,
                        f.name.count("_"),
                        f.name,
                    ),
                )
                keep = files_sorted[0]

            # 删除其它重复文件并更新 saved_map 中的引用
            for f in files:
                if f == keep:
                    continue
                try:
                    f.unlink()
                except Exception:
                    pass

            # 更新 saved_map 中指向已删除文件的引用
            for k, v in list(saved_map.items()):
                if Path(v).name in [ff.name for ff in files] and Path(v).name != keep.name:
                    newrel = os.path.relpath(keep, out_html.parent).replace("\\", "/")
                    saved_map[k] = newrel
    except Exception:
        pass

    # Move any part-*.dat into archive (to avoid clutter); keep for inspection，并输出原始文件名信息
    try:
        for p in list(att_dir.glob("part-*.dat")):
            # 查找 saved_map 反向映射，输出原始 key
            orig_keys = [
                k for k, v in saved_map.items() if os.path.basename(v) == p.name
            ]
            info = f"[archive] {p.name}"
            if orig_keys:
                info += f"  <- 原始名: {orig_keys} "
            print(info)
            try:
                p.replace(archive_dir / p.name)
            except Exception:
                pass
    except Exception:
        pass


if __name__ == "__main__":
    main()
