import os
import requests
from bs4 import BeautifulSoup
import time
from datetime import datetime

# 配置参数
KEYWORD = "chunlieater"
BASE_URL = "https://rokuhentai.com"
SEARCH_URL = f"{BASE_URL}/?q={KEYWORD}"
SAVE_DIR = os.path.join("result", "Rihentai", datetime.now().strftime("%Y%m%d"))
HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36'
}

def create_directory():
    """创建存储目录"""
    if not os.path.exists(SAVE_DIR):
        os.makedirs(SAVE_DIR)
        print(f"[INFO] 创建目录: {SAVE_DIR}")

def download_image(img_url, comic_id, page_num):
    """下载并保存图片"""
    try:
        response = requests.get(img_url, headers=HEADERS, timeout=10)
        if response.status_code == 200:
            file_path = os.path.join(SAVE_DIR, f"{comic_id}_{page_num}.jpg")
            with open(file_path, 'wb') as f:
                f.write(response.content)
            print(f"[SUCCESS] 已保存: {file_path}")
        else:
            print(f"[WARNING] 下载失败: HTTP {response.status_code}")
    except Exception as e:
        print(f"[ERROR] 下载异常: {str(e)}")

def parse_subpage(subpage_url, comic_id):
    """解析子页面并下载图片"""
    print(f"[PROCESS] 正在处理子页面: {subpage_url}")
    try:
        response = requests.get(subpage_url, headers=HEADERS)
        soup = BeautifulSoup(response.text, 'lxml')
        
        # 查找所有图片标签（包含正常和懒加载）
        img_tags = soup.find_all('img', class_='site-reader__image')
        print(f"[INFO] 发现 {len(img_tags)} 张图片")
        
        for idx, img in enumerate(img_tags):
            # 优先使用 data-src 属性（懒加载图片）
            img_url = img.get('data-src') or img.get('src')
            if img_url:
                print(f"[DOWNLOAD] 下载第 {idx+1} 张: {img_url}")
                download_image(img_url, comic_id, idx)
                time.sleep(0.5)  # 请求间隔
    except Exception as e:
        print(f"[CRITICAL] 子页面解析失败: {str(e)}")

def rokuhentai_crawler():
    create_directory()
    print(f"[START] 开始爬取关键字: {KEYWORD}")
    
    try:
        # 获取搜索结果页
        search_res = requests.get(SEARCH_URL, headers=HEADERS)
        soup = BeautifulSoup(search_res.text, 'html.parser')
        
        # 查找所有漫画卡片
        manga_cards = soup.find_all('div', class_='mdc-layout-grid__cell site-manga-card')
        print(f"[FOUND] 发现 {len(manga_cards)} 部漫画")
        
        for card in manga_cards:
            comic_id = card['id'].split('-')[-1]  # 提取 ID 如wdl7go
            subpage_url = f"{BASE_URL}/{comic_id}/0"
            print(f"[HANDLE] 处理漫画 ID: {comic_id}")
            parse_subpage(subpage_url, comic_id)
            
    except Exception as e:
        print(f"[FATAL] 主流程异常: {str(e)}")

if __name__ == "__main__":
    rokuhentai_crawler()
