#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
RokuHentai 站内关键词爬虫
用法:  python rokuhentai_spider.py  [关键词，默认 chunlieater]
"""

import os
import sys
import time
import datetime
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
from urllib.parse import urljoin, urlparse

# ---------- 全局配置 ----------
BASE_URL = "https://rokuhentai.com"
SEARCH_TPL = f"{BASE_URL}/?q={{keyword}}"
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                  "AppleWebKit/537.36 (KHTML, like Gecko) "
                  "Chrome/123.0 Safari/537.36"
}
SAVE_ROOT = os.path.join("result", "Rihentai")
TIME_FMT = "%Y%m%d_%H%M%S"
# ------------------------------

def log(msg):
    """打印带时间戳的日志"""
    print(f"[{datetime.datetime.now():%F %T}]  {msg}")

def ensure_dir(path):
    """递归创建目录"""
    if not os.path.exists(path):
        os.makedirs(path, exist_ok=True)

def get_soup(url, **kw):
    """统一拉取并解析 HTML"""
    log(f"GET  {url}")
    resp = requests.get(url, headers=HEADERS, timeout=30, **kw)
    resp.raise_for_status()
    return BeautifulSoup(resp.text, "lxml")

def list_search_pages(keyword):
    """生成搜索页翻页 URL（默认先抓 20 页，可改）"""
    for page in range(20):
        url = SEARCH_TPL.format(keyword=keyword)
        if page > 0:
            url += f"&page={page}"
        yield url

def parse_gallery_links(soup):
    """提取搜索结果页所有漫画子页相对路径"""
    cards = soup.select("div.site-manga-card")
    log(f"本页发现 {len(cards)} 本漫画")
    for card in cards:
        # 点封面进入第一话
        a = card.select_one("a.site-manga-card__link")
        if not a:
            continue
        href = a["href"]          # 形如 /wdl7go/0
        yield urljoin(BASE_URL, href)

def parse_reader_images(soup):
    """提取阅读页所有图片真实地址（含懒加载）"""
    imgs = soup.select("img.site-reader__image")
    log(f"本话共 {len(imgs)} 张图")
    for img in imgs:
        src = img.get("src") or img.get("data-src")
        if src:
            yield urljoin(BASE_URL, src)

def download_one_image(img_url, save_dir):
    """单图下载，自动跳过已存在文件"""
    fname = os.path.basename(urlparse(img_url).path)
    dst = os.path.join(save_dir, fname)
    if os.path.isfile(dst):
        log(f"跳过已存在  {fname}")
        return
    resp = requests.get(img_url, headers=HEADERS, stream=True, timeout=60)
    resp.raise_for_status()
    with open(dst, "wb") as f:
        for chunk in resp.iter_content(1024 * 64):
            f.write(chunk)
    log(f"保存  {dst}")

def rokuhentai_crawler(keyword="chunlieater"):
    log(f"keyword = {keyword}")
    save_home = os.path.join(SAVE_ROOT, f"{keyword}_{datetime.datetime.now().strftime(TIME_FMT)}")
    ensure_dir(save_home)

    # 1. 遍历搜索页
    for search_url in list_search_pages(keyword):
        try:
            soup = get_soup(search_url)
        except Exception as e:
            log(f"搜索页拉取失败: {e}")
            continue

        # 2. 遍历每本漫画
        for gallery_url in parse_gallery_links(soup):
            try:
                g_soup = get_soup(gallery_url)
            except Exception as e:
                log(f"  漫画页失败: {e}")
                continue

            # 取漫画 id 当子目录
            gid = urlparse(gallery_url).path.strip("/").split("/")[0]
            gallery_dir = os.path.join(save_home, gid)
            ensure_dir(gallery_dir)

            # 3. 下载本话全部图片
            img_urls = list(parse_reader_images(g_soup))
            for u in tqdm(img_urls, desc=f"  {gid}", ncols=80):
                try:
                    download_one_image(u, gallery_dir)
                except Exception as e:
                    log(f"    图下载异常: {u}  -> {e}")
                    continue

            # 礼貌间隔
            time.sleep(0.5)

    log("全部任务完成！")

if __name__ == "__main__":
    kw = sys.argv[1] if len(sys.argv) > 1 else "chunlieater"
    rokuhentai_crawler(kw)