#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
url_info_harvester.py
-----------------------------------------
一个给入门者用的“小而稳”的 URL 初步体检脚本。

设计目标（很朴素）：
1) 只碰公开页面：取标题/描述/H1、提取站内外链接、粗略匹配邮箱/电话样式；
2) 记录基础取证信息：HTTP 响应头（前若干项）、robots.txt 情况、简单的 DNS 解析；
3) 生成 3 份产物：HTML 报告、CSV(链接清单)、JSON(元信息与计数)；
4) 默认“温和”：有可调延时，且默认尊重 robots.txt（--robots enforce）。

命令行参数里加了几个“人味儿”的字段：
--signature   署名/昵称，会写进报告（方便以后翻回来看谁跑的）
--run-id      这次运行的 ID，不写就自动生成（时间戳+随机）
--ua          在默认 UA 后面追加一段自己的标识（比如 BuoyIntel/1.0）
--delay       每次请求后的暂停秒数，入门建议 1~2 秒
--robots      enforce/ignore，默认 enforce（严格遵守 robots）

依赖：requests, beautifulsoup4, tldextract, lxml
Python 3.9+ 测试
"""

from __future__ import annotations
import argparse
import csv
import json
import os
import re
import socket
import time
import random
import string
from typing import Dict, List, Tuple
from urllib.parse import urljoin, urlparse
from urllib import robotparser

import requests
from bs4 import BeautifulSoup
import tldextract


# —— 一些“顺手用”的常量 —— #

# 选择一个常见的桌面 UA 做底，后面可追加自己的标识（--ua）
DEFAULT_UA_BASE = (
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
    "AppleWebKit/537.36 (KHTML, like Gecko) "
    "Chrome/124.0.0.0 Safari/537.36"
)

# 很普通的邮箱/电话样式匹配（只是样式匹配，不是有效性验证）
EMAIL_RE = re.compile(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}")
PHONE_RE = re.compile(r"(?:\+?\d[\d\-\s()]{6,}\d)")

# 超时别设太大，新手阶段 10~15 秒够用
REQ_TIMEOUT = 15


# —— 小工具函数 —— #

def gen_run_id() -> str:
    """生成一个带时间戳的 run_id，方便以后检索记录。"""
    ts = time.strftime("%Y%m%d%H%M%S")
    rnd = "".join(random.choices(string.ascii_lowercase + string.digits, k=5))
    return f"{ts}-{rnd}"


def safe_filename(name: str) -> str:
    """把字符串收拾成“能当文件名”的样子（保守做法）。"""
    return re.sub(r"[^A-Za-z0-9._-]+", "_", name)[:80]


def get_root_and_host(url: str) -> Tuple[str, str]:
    """返回站点根（scheme://host）和 host 本身。"""
    p = urlparse(url)
    return f"{p.scheme}://{p.netloc}", p.netloc


def build_ua(extra_tail: str | None, run_id: str | None) -> str:
    """
    组合 UA：默认 UA + [自己的标识] + [run_id]。
    入门阶段，明确自己是谁就好，没必要装得太“像爬虫”。
    """
    ua = DEFAULT_UA_BASE
    tail = []
    if extra_tail:
        tail.append(extra_tail.strip())
    if run_id:
        tail.append(f"run_id/{run_id}")
    if tail:
        ua = ua + " " + " ".join(tail)
    return ua


def http_get(url: str, ua: str) -> requests.Response:
    """最普通的一次 GET，请求头就带个 UA。"""
    return requests.get(url, headers={"User-Agent": ua}, timeout=REQ_TIMEOUT, allow_redirects=True)


def dns_lookup(host: str) -> Dict[str, object]:
    """很轻量的 DNS 解析：解析出当前可见的 A 记录列表。"""
    try:
        name, aliaslist, ipaddrlist = socket.gethostbyname_ex(host)
        return {"host": name, "ips": ipaddrlist}
    except Exception as e:
        return {"host": host, "error": str(e)}


def check_robots(root: str, ua: str, target_url: str) -> Dict[str, object]:
    """
    读取并判断 robots.txt。
    - 很多站没有 robots.txt；这种情况下我们当作“未明确禁止”，allowed=True。
    - 这里只做 can_fetch 的简单判断，够用。
    """
    robots_url = urljoin(root + "/", "robots.txt")
    info = {"url": robots_url, "present": False, "allowed": True, "raw": ""}

    try:
        resp = http_get(robots_url, ua)
        text = resp.text if resp.status_code == 200 else ""
        if text and ("Disallow" in text or "Allow" in text):
            rp = robotparser.RobotFileParser()
            rp.parse(text.splitlines())
            info["present"] = True
            info["raw"] = text[:20000]  # 报告里只放前 2w 字，够看
            info["allowed"] = rp.can_fetch(ua, target_url)
        else:
            # 没文件或不可读：当作“未禁止”
            info["present"] = False
            info["allowed"] = True
    except Exception:
        info["present"] = False
        info["allowed"] = True

    return info


def extract_page_info(base_url: str, html: str) -> Dict[str, object]:
    """
    从页面里捞最基础的信息。新手要的就是“简单但稳定”：
    - title / meta description / 前几个 H1
    - 所有 <a href>（后面分站内/外）
    - 页面纯文本里做个邮箱/电话样式的粗匹配
    """
    soup = BeautifulSoup(html, "lxml")

    title = (soup.title.string or "").strip() if soup.title else ""
    desc = ""
    md = soup.find("meta", attrs={"name": "description"})
    if md and md.get("content"):
        desc = md["content"].strip()

    h1s = [h.get_text(strip=True) for h in soup.find_all("h1")][:5]

    links: List[str] = []
    for a in soup.find_all("a", href=True):
        href = a["href"].strip()
        links.append(urljoin(base_url, href))

    text = soup.get_text(separator=" ", strip=True)
    emails = sorted(set(EMAIL_RE.findall(text)))
    phones = sorted(set(PHONE_RE.findall(text)))

    return {
        "title": title,
        "description": desc,
        "h1": h1s,
        "links": links,
        "emails": emails,
        "phones": phones,
    }


def split_internal_external(base_url: str, links: List[str]) -> Tuple[List[str], List[str]]:
    """按“注册域”粗分站内/站外链接；够用，不刻意做得很学术。"""
    base = tldextract.extract(base_url)
    base_reg = f"{base.domain}.{base.suffix}" if base.suffix else base.domain

    internal, external = [], []
    for u in links:
        try:
            ext = tldextract.extract(u)
            reg = f"{ext.domain}.{ext.suffix}" if ext.suffix else ext.domain
            (internal if reg == base_reg else external).append(u)
        except Exception:
            external.append(u)
    return internal, external


def render_html_report(
    meta: Dict[str, object],
    headers: Dict[str, str],
    robots: Dict[str, object],
    dnsinfo: Dict[str, object],
    internal: List[str],
    external: List[str],
    signature: str,
    run_id: str,
    ua: str,
    tag: str,
) -> str:
    """拼一份干净的 HTML 报告，方便非技术同学直接打开看。"""

    def li(items: List[str], limit: int | None = None) -> str:
        data = items if limit is None else items[:limit]
        return "".join(
            f"<li><a href='{x}' target='_blank' rel='nofollow noopener'>{x}</a></li>"
            for x in data
        )

    return f"""<!doctype html>
<html lang="zh-CN"><head><meta charset="utf-8">
<title>URL 体检报告 · {meta.get('title') or meta.get('url')}</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<style>
body{{font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica,Arial,sans-serif;max-width:980px;margin:32px auto;padding:0 16px;}}
h1{{font-size:22px}} h2{{font-size:18px;margin-top:28px}}
code,pre{{background:#f6f8fa;padding:2px 6px;border-radius:4px;overflow:auto}}
small{{color:#666}} .muted{{color:#777}}
section{{margin-bottom:20px}} ul{{line-height:1.55}}
.card{{border:1px solid #ddd;border-radius:12px;padding:16px;margin:12px 0;}}
.kv b{{display:inline-block;min-width:120px}}
.bad{{color:#b00020}} .good{{color:#0a7f2e}}
</style></head><body>

<h1>URL 初步体检报告</h1>

<div class="card kv">
  <div><b>签名</b>：{signature or "-"}　<b>Run ID</b>：{run_id}</div>
  <div><b>User-Agent</b>：<span class="muted">{ua}</span></div>
  <div><b>标签</b>：{tag or "-"}</div>
  <div><b>时间</b>：{time.strftime("%Y-%m-%d %H:%M:%S")}</div>
</div>

<div class="card">
  <b>标题</b>：{meta.get('title') or '(无)'}<br>
  <b>描述</b>：{meta.get('description') or '(无)'}<br>
  <b>H1</b>：{", ".join(meta.get('h1', [])) or '(无)'}<br>
  <b>邮箱样式</b>：{", ".join(meta.get('emails', [])) or '(未发现)'}<br>
  <b>电话样式</b>：{", ".join(meta.get('phones', [])) or '(未发现)'}
</div>

<section class="card">
  <h2>响应头（前 20 项）</h2>
  <pre>{json.dumps(dict(list(headers.items())[:20]), ensure_ascii=False, indent=2)}</pre>
</section>

<section class="card">
  <h2>robots.txt</h2>
  <div><a href="{robots.get('url')}" target="_blank" rel="nofollow noopener">{robots.get('url')}</a></div>
  <div>状态：{"<span class='good'>允许抓取</span>" if robots.get("allowed") else "<span class='bad'>禁止抓取</span>"}　{ "(检测到文件)" if robots.get("present") else "(未检测到文件)"}</div>
  <pre>{(robots.get('raw')[:2000] + ('\\n…' if len(robots.get('raw'))>2000 else '')) if robots.get('raw') else '(无)'}</pre>
</section>

<section class="card">
  <h2>DNS</h2>
  <pre>{json.dumps(dnsinfo, ensure_ascii=False, indent=2)}</pre>
</section>

<section class="card"><h2>站内链接（前 50）</h2><ul>{li(internal, 50)}</ul></section>
<section class="card"><h2>外部链接（前 50）</h2><ul>{li(external, 50)}</ul></section>

<footer class="muted"><small>仅针对公开信息做温和取证；频率可控，尊重站点规则。</small></footer>
</body></html>"""


# —— 主流程 —— #

def main() -> None:
    ap = argparse.ArgumentParser(
        description="URL 初步体检（入门版）：温和抓取公开页面信息并生成报告"
    )
    ap.add_argument("url", help="起始 URL（含 http/https）")
    ap.add_argument("--out", default="./out", help="输出目录（默认 ./out）")
    ap.add_argument("--signature", default="", help="个性化签名/昵称，写入报告")
    ap.add_argument("--run-id", default="", help="自定义运行 ID（留空则自动生成）")
    ap.add_argument("--ua", default="", help="在默认 UA 后追加的标识（例如 BuoyIntel/1.0）")
    ap.add_argument("--delay", type=float, default=1.0, help="请求之间的延时（秒），建议 1~2")
    ap.add_argument("--robots", choices=["enforce", "ignore"], default="enforce",
                    help="是否严格遵守 robots（默认 enforce）")
    ap.add_argument("--tag", default="", help="给这次任务打个标签，写进报告")
    args = ap.parse_args()

    # 运行信息
    run_id = args.run_id or gen_run_id()
    ua = build_ua(args.ua, run_id)

    # 输出目录
    os.makedirs(args.out, exist_ok=True)

    # 站点根与主机名
    root, host = get_root_and_host(args.url)

    # 先看 robots（默认严格）
    robots = check_robots(root, ua, args.url)

    # 页面正文的变量（有些情况下我们就不抓了）
    html_text = ""
    status = None
    headers: Dict[str, str] = {}

    if robots.get("allowed", True) or args.robots == "ignore":
        # 允许抓取或用户选择忽略 robots
        print("[-] 请求页面：", args.url)
        resp = http_get(args.url, ua)
        status = resp.status_code
        headers = dict(resp.headers)
        if resp.status_code == 200:
            html_text = resp.text
        else:
            print(f"[!] HTTP 状态码：{resp.status_code}，报告里会照实记录。")
        time.sleep(max(args.delay, 0.0))  # 给站点一点喘息时间
    else:
        # 明确禁止时，如果 enforce，就不抓正文
        status = "blocked_by_robots"
        headers = {}
        html_text = ""
        print("[!] robots 显示禁止抓取，本次不请求正文（--robots ignore 可强制抓取）")

    # 即使没抓到正文，也给一份结构化输出，方便后续合并数据
    meta = {"title": "", "description": "", "h1": [], "links": [], "emails": [], "phones": [], "url": args.url}
    if html_text:
        meta = extract_page_info(args.url, html_text)
        meta["url"] = args.url

    internal, external = split_internal_external(args.url, meta["links"])
    dnsinfo = dns_lookup(host)

    # 生成文件名前缀（host_时间戳）
    stamp = time.strftime("%Y%m%d_%H%M%S")
    prefix = safe_filename(f"{host}_{stamp}")

    # —— 写 CSV（站内/站外链接清单） —— #
    links_csv = os.path.join(args.out, f"{prefix}_links.csv")
    with open(links_csv, "w", newline="", encoding="utf-8") as f:
        w = csv.writer(f)
        w.writerow(["type", "url"])
        for u in internal:
            w.writerow(["internal", u])
        for u in external:
            w.writerow(["external", u])

    # —— 写 JSON（元信息与计数，含运行信息） —— #
    meta_json = os.path.join(args.out, f"{prefix}_meta.json")
    with open(meta_json, "w", encoding="utf-8") as f:
        json.dump(
            {
                "input_url": args.url,
                "http_status": status,
                "headers": headers,
                "title": meta["title"],
                "description": meta["description"],
                "h1": meta["h1"],
                "emails": meta["emails"],
                "phones": meta["phones"],
                "internal_count": len(internal),
                "external_count": len(external),
                "signature": args.signature,
                "run_id": run_id,
                "ua": ua,
                "tag": args.tag,
                "robots": {"present": robots.get("present"), "allowed": robots.get("allowed")},
                "timestamp": stamp,
            },
            f,
            ensure_ascii=False,
            indent=2,
        )

    # —— 写 HTML 报告 —— #
    report_html = os.path.join(args.out, f"{prefix}_report.html")
    html = render_html_report(meta, headers, robots, dnsinfo, internal, external,
                              args.signature, run_id, ua, args.tag)
    with open(report_html, "w", encoding="utf-8") as f:
        f.write(html)

    # —— 额外留个 about.json，方便长期归档 —— #
    about_json = os.path.join(args.out, f"{prefix}_about.json")
    with open(about_json, "w", encoding="utf-8") as f:
        json.dump({"signature": args.signature, "run_id": run_id, "ua": ua}, f, ensure_ascii=False, indent=2)

    # 控制台上报个进度（中文就行）
    print("[✓] 已完成")
    print("  报告：", report_html)
    print("  链接清单 CSV：", links_csv)
    print("  元信息 JSON：", meta_json)
    print("  运行信息：", about_json)


if __name__ == "__main__":
    main()
