#!/usr/bin/env python3
import csv
import os
from pathlib import Path
import re
import zipfile
import tarfile

import re
from pathlib import Path
import zipfile
import tarfile

# --- 配置 --- 
_MAX_ARCHIVE_READ_BYTES = 1024 * 64  # 从归档内每个文件读取的最大字节数（64KB）
_MAX_SOURCE_SIZE = 2 * 1024 * 1024  # 2MB，跳过超大源文件读取

# 魔数与关键词表（可按需扩展）
_MAGIC_PATTERNS = {
    b"\x89PNG": "PNG",
    b"%PDF": "PDF",
    b"PK\x03\x04": "ZIP",
    b"GIF8": "GIF",
    b"\xFF\xD8\xFF": "JPEG",
    b"\x1A\x45\xDF\xA3": "MKV/WebM",
    b"\xD4\xC3\xB2\xA1": "PCAP",
    b"II*\x00": "TIFF",
    b"BM": "BMP",
    b"OggS": "OGG",
    b"ID3": "MP3",
}

_LIB_KEYWORDS = {
    # 图像类
    "png": "PNG", "jpeg": "JPEG", "jpg": "JPEG", "gif": "GIF", "tiff": "TIFF", "bmp": "BMP", "webp": "WEBP",
    # 文档
    "pdf": "PDF", "xml": "XML", "json": "JSON", "html": "HTML",
    # 压缩/打包
    "zlib": "ZIP", "inflate": "ZIP", "deflate": "ZIP", "lz4": "LZ4", "zstd": "ZSTD", "tar": "TAR",
    # 媒体/网络/协议
    "mp3": "MP3", "ogg": "OGG", "wav": "WAV", "flac": "FLAC", "pcap": "PCAP", "http": "HTTP", "tls": "TLS", "mqtt": "MQTT",
    # 数据结构
    "protobuf": "PROTOBUF", "yaml": "YAML", "csv": "CSV",
    # 字体/渲染
    "freetype": "FONT", "harfbuzz": "FONT",
    # 视频
    "av1": "VIDEO", "hevc": "VIDEO", "h264": "VIDEO", "mkv": "MKV/WebM", "mp4": "VIDEO",
}

_EXT_HINTS = {
    ".png": "PNG", ".jpg": "JPEG", ".jpeg": "JPEG", ".gif": "GIF", ".pdf": "PDF", ".pcap": "PCAP",
    ".json": "JSON", ".xml": "XML", ".yaml": "YAML", ".yml": "YAML", ".mp3": "MP3", ".mp4": "VIDEO",
    ".zip": "ZIP", ".tar": "TAR", ".tgz": "TAR",
}

# --- 辅助函数 ---
def _target_name_from_harness(harness_path: Path) -> str:
    """
    从 harness 文件名推断目标名：
    - 去掉扩展名
    - 去掉常见后缀 _fuzz/_fuzzer/_harness/-fuzz/_test 等
    """
    base = harness_path.stem
    for suf in ("_fuzz", "_fuzzer", "_harness", "-fuzz", "_test"):
        if base.endswith(suf):
            return base[: -len(suf)]
    return base

def _type_from_magic_bytes(data: bytes):
    """根据二进制开头判断类型"""
    for magic, t in _MAGIC_PATTERNS.items():
        if magic in data:
            return t
    return None

def _guess_type_from_name(fname: str):
    """根据文件名扩展猜类型"""
    low = fname.lower()
    for ext, t in _EXT_HINTS.items():
        if low.endswith(ext):
            return t
    return None

def _analyze_dir_types(d: Path):
    """
    遍历目录下文件，依据扩展名和魔数推断类型集合。
    返回逗号分隔的类型字符串（若无则返回 'none'）。
    """
    types = set()
    # 递归但限制深度/数量不是必要的（目录通常很小）
    for f in d.rglob("*"):
        if f.is_dir():
            continue
        # 先看扩展名
        t = _guess_type_from_name(f.name)
        if t:
            types.add(t)
            continue
        # 再尝试读取前几字节判断魔数（异常安全）
        try:
            with f.open("rb") as fh:
                head = fh.read(256)  # 256 bytes 足够匹配多数魔数
                mt = _type_from_magic_bytes(head)
                if mt:
                    types.add(mt)
        except Exception:
            continue
    return ",".join(sorted(types)) if types else "none"

def _analyze_zip_types(zip_path: Path, target_guess: str = None):
    """分析 zip 包内文件类型（通过文件名与前若干字节）"""
    types = set()
    try:
        with zipfile.ZipFile(zip_path, "r") as z:
            # 列举文件并优先选含 target_guess 的条目
            names = z.namelist()
            # 先尝试匹配含 target_guess 的文件名
            candidates = names
            if target_guess:
                matches = [n for n in names if target_guess.lower() in n.lower()]
                if matches:
                    candidates = matches
            # 检查文件扩展名和前缀魔数（对每个候选读少量字节）
            for n in candidates:
                ln = n.lower()
                t = _guess_type_from_name(ln)
                if t:
                    types.add(t)
                    continue
                try:
                    with z.open(n) as fh:
                        head = fh.read(_MAX_ARCHIVE_READ_BYTES)
                        mt = _type_from_magic_bytes(head)
                        if mt:
                            types.add(mt)
                except Exception:
                    continue
    except Exception:
        return "none"
    return ",".join(sorted(types)) if types else "none"

def _analyze_tar_types(tar_path: Path, target_guess: str = None):
    """分析 tar(.gz/.tgz) 包内文件类型"""
    types = set()
    try:
        with tarfile.open(tar_path, "r:*") as tf:
            members = [m for m in tf.getmembers() if m.isreg()]
            names = [m.name for m in members]
            candidates = names
            if target_guess:
                matches = [n for n in names if target_guess.lower() in n.lower()]
                if matches:
                    candidates = matches
            for m in members:
                if m.name not in candidates:
                    continue
                # 扩展名判断
                t = _guess_type_from_name(m.name)
                if t:
                    types.add(t)
                    continue
                # 读取前缀
                try:
                    f = tf.extractfile(m)
                    if not f:
                        continue
                    head = f.read(_MAX_ARCHIVE_READ_BYTES)
                    mt = _type_from_magic_bytes(head)
                    if mt:
                        types.add(mt)
                except Exception:
                    continue
    except Exception:
        return "none"
    return ",".join(sorted(types)) if types else "none"

def _extract_fuzzer_body(text: str):
    """
    找到第一个 LLVMFuzzerTestOneInput 出现并返回匹配的函数体（简单的括号计数匹配）。
    返回函数体字符串（不含外层花括号），找不到则返回 empty string。
    """
    m = re.search(r"LLVMFuzzerTestOneInput\s*\([^)]*\)\s*\{", text)
    if not m:
        return ""
    start = m.end()  # 指向 '{' 后
    # 向后平衡大括号
    depth = 1
    i = start
    n = len(text)
    while i < n and depth > 0:
        c = text[i]
        if c == "{":
            depth += 1
        elif c == "}":
            depth -= 1
        i += 1
    if depth != 0:
        # 未能匹配完整的大括号，回退到一个较小的窗口作为策略（避免无限失败）
        return text[start: start + 4096]
    # 返回内部内容（不含最终闭合的 '}'）
    return text[start: i-1]

def _analyze_body_for_types(body: str):
    """在函数体（字符串）中分析魔数/关键词/扩展名线索，返回集合或空集"""
    types = set()
    lower = body.lower()

    # 1) 查找 hex escape magic（例如 "\x89PNG"）
    for magic, t in _MAGIC_PATTERNS.items():
        # 将 magic bytes 转成源代码中可能出现的转义字符串形式
        esc = "".join([f"\\x{b:02x}" for b in magic])
        if esc in body:
            types.add(t)
    # 2) 查找裸文本 magic（某些代码直接放入二进制或字符常量）
    for magic, t in _MAGIC_PATTERNS.items():
        try:
            if magic.decode('latin1') in body:
                types.add(t)
        except Exception:
            pass

    # 3) 关键词/库名
    for key, t in _LIB_KEYWORDS.items():
        if re.search(rf"\b{re.escape(key)}\b", lower):
            types.add(t)

    # 4) 文件扩展名字符串
    for ext, t in _EXT_HINTS.items():
        if ext in lower:
            types.add(t)

    return types

# --- 主函数（按 harness 单独分析） ---
def detect_seed_type_for_harness(project_dir: Path, harness_path: Path) -> str:
    """
    针对单个 harness (包含 LLVMFuzzerTestOneInput 的源文件) 检测其对应的 seed 类型。
    返回格式：
      - 如果在 seed_corpus/corpus 找到对应目录： "dir:<dirname>:TYPE1,TYPE2" 或 "dir:<dirname>:none"
      - 如果在 seed_corpus/corpus 找到匹配的归档： "zip:<name>:TYPE1" / "tar:<name>:TYPE1"
      - 如果使用源码分析返回 "TYPE1,TYPE2" 或 "none"
    """
    # 目标名猜测，例如 harness 名为 "png_fuzz.c" -> target_guess="png"
    target_guess = _target_name_from_harness(harness_path)

    # 1) 在 seed_corpus / corpus 中寻找匹配目录或归档
    for folder in ("seed_corpus", "corpus"):
        d = project_dir / folder
        if not d.exists():
            continue

        # 优先查找同名目录： seed_corpus/<target_guess> 或 精确匹配 harness 文件名（不含扩展）
        direct_dir = d / target_guess
        if direct_dir.exists() and direct_dir.is_dir():
            types = _analyze_dir_types(direct_dir)
            return f"dir:{direct_dir.name}:{types}"

        # 如果没有直接目录，则尝试在该目录内寻找名字包含 target_guess 的子目录（优先）或直接的 archive 文件
        # 先子目录
        for child in d.iterdir():
            if child.is_dir() and target_guess.lower() in child.name.lower():
                types = _analyze_dir_types(child)
                return f"dir:{child.name}:{types}"

        # 再查找归档文件（优先匹配含 target_guess 的文件名）
        # zip, tar.gz, tgz, tar
        archives = [f for f in d.iterdir() if f.is_file() and f.suffix.lower() in (".zip", ".tar", ".tgz")]
        # include .tar.gz explicitly
        archives += [f for f in d.iterdir() if f.is_file() and str(f.name).lower().endswith(".tar.gz")]
        # if any archive contains target_guess in name, prefer those
        preferred = [a for a in archives if target_guess.lower() in a.name.lower()]
        search_list = preferred if preferred else archives
        for a in search_list:
            an = a.name.lower()
            if an.endswith(".zip"):
                types = _analyze_zip_types(a, target_guess)
                return f"zip:{a.name}:{types}"
            if an.endswith(".tar") or an.endswith(".tgz") or an.endswith(".tar.gz"):
                types = _analyze_tar_types(a, target_guess)
                return f"tar:{a.name}:{types}"

    # 2) 若 corpus 中没有匹配，则分析 harness 源码本身（函数体）
    # 读取文件（限制大小）
    try:
        if harness_path.stat().st_size > _MAX_SOURCE_SIZE:
            # 文件过大时只读取前一段
            txt = harness_path.read_text(errors="ignore")[:20000]
        else:
            txt = harness_path.read_text(errors="ignore")
    except Exception:
        return "none"

    body = _extract_fuzzer_body(txt)
    if not body:
        # 如果未能提取到函数体，也尝试全文分析
        body = txt

    types = _analyze_body_for_types(body)
    if types:
        sorted_types = sorted(types, key=lambda t: ["PNG", "JPEG", "GIF", "PDF", "ZIP", "PCAP", "JSON", "XML", "FONT", "VIDEO"].index(t) if t in ["PNG","JPEG","GIF","PDF","ZIP","PCAP","JSON","XML","FONT","VIDEO"] else 999)
        return ",".join(sorted_types)
    return "none"


def find_harness_c_files(project_dir: Path):
    """
    递归查找项目中所有包含 LLVMFuzzerTestOneInput 的源文件 (.c/.cc/.cpp)。

     特性：
      - 自动递归子目录
      - 跳过 build/out/.git 等目录
      - 支持多种源文件后缀
      - 分块读取避免大文件占内存
      - 容错性强（编码错误/权限错误会跳过）
      - 返回按路径排序的结果
    """
    harnesses = []
    skip_dirs = {".git", "build", "out", "bin", "lib", "__pycache__", "dist", "target"}
    exts = {".c", ".cc", ".cpp"}
    MAX_SIZE = 2 * 1024 * 1024  # 2 MB

    for root, dirs, files in os.walk(project_dir):
        # 动态修改 dirs，避免进入跳过目录
        dirs[:] = [d for d in dirs if d not in skip_dirs and not d.startswith(".")]

        for fname in files:
            fpath = Path(root) / fname
            if fpath.suffix.lower() not in exts:
                continue
            try:
                if fpath.stat().st_size > MAX_SIZE:
                    continue  # 跳过超大文件

                # 分块读取以提高效率
                found = False
                with open(fpath, "rb") as f:
                    for chunk in iter(lambda: f.read(8192), b""):
                        if b"LLVMFuzzerTestOneInput" in chunk:
                            found = True
                            break

                if found:
                    harnesses.append(fpath)

            except (OSError, UnicodeDecodeError):
                continue  # 跳过读取异常的文件

    harnesses.sort()
    return harnesses

def classify_oracle_from_c(cpath: Path):
    try:
        txt = cpath.read_text(errors="ignore").lower()
    except Exception:
        return "no checking"
    if "assert" in txt or "__assert_fail" in txt:
        return "assertion"
    if any(k in txt for k in ["asan", "ubsan", "msan", "__sanitizer"]):
        return "sanitizer"
    if any(k in txt for k in ["abort(", "exit(", "__builtin_trap("]):
        return "abort"
    return "no_checking"

def analyze_project(out_dir: Path):
    results = []
    proj_name = out_dir.parent.name if out_dir.parent else "unknown"

    if not out_dir.exists():
        print(f"[WARN] out_dir not found: {out_dir}")
        results.append({
            "project": proj_name,
            "harness_c": "-",
            "seed_type": "none",
            "oracle_type": "-"
        })
            # === 写入失败项目文件 ===
        failed_log = Path("failed_pro")

        try:
            # 若不存在则自动创建（包括父目录）
            failed_log.parent.mkdir(parents=True, exist_ok=True)
            failed_log.touch(exist_ok=True)

            # 以追加模式写入
            with failed_log.open("a", encoding="utf-8") as f:
                f.write(f"{proj_name}\n")

            print(f"  → recorded to {failed_log.resolve()}")

        except Exception as e:
                print(f"  [ERROR] Failed to record {proj_name} to {failed_log}: {e}")

        return results

    # 查找包含 LLVMFuzzerTestOneInput 的 C 文件
    harnesses = find_harness_c_files(out_dir)

    if not harnesses:
        results.append({
            "project": proj_name,
            "harness_c": "-",
            "seed_type": "-",
            "oracle_type": "-"
        })
    else:
        for h in harnesses:
            oracle = classify_oracle_from_c(h)
            # 检测种子文件类型
            seed_type = detect_seed_type_for_harness(out_dir, h)
            results.append({
                "project": proj_name,
                "harness_c": h.name,
                "seed_type": seed_type,
                "oracle_type": oracle
            })

    return results

def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--in", required=True, dest="input_dir", help="Path to the project's out directory")
    parser.add_argument("--out", default="ossfuzz_full_scan.csv", help="Output CSV file")
    args = parser.parse_args()

    out_dir = Path(args.input_dir).resolve()
    if not out_dir.exists() or not out_dir.is_dir():
        print(f"❌ Input path {out_dir} does not exist or is not a directory")
        return

    results = analyze_project(out_dir)

    file_exists = Path(args.out).exists()

    with open(args.out, "a", newline="", encoding="utf-8") as f:
        fieldnames = ["project", "harness_c", "seed_type", "oracle_type"]
        writer = csv.DictWriter(f, fieldnames=fieldnames)

        # 如果文件第一次创建，则写入表头
        if not file_exists:
            writer.writeheader()

        # 写入结果
        for r in results:
            writer.writerow(r)

    print(f"\n✅ Analysis complete. Results written to {args.out}")

if __name__ == "__main__":
    main()
