import asyncio
import httpx
import json
import os
import logging
import random
from pathlib import Path
from typing import List, Optional, Tuple
import aiofiles
class Book:
    def __init__(
            self,
            book_id:str,
            book_name:str,
            author:str,
            book_intro:str,
            book_cover:str,
            main_figure:str
    ):
            self.book_id = int(book_id)
            self.book_name = book_name
            self.author = author.strip()
            self.book_intro = book_intro.replace('\n','')
            self.book_cover = book_cover
            self.main_figure = ','.join(filter(lambda x:x!='',main_figure.strip().replace(' ','').split('\n')))
    def __str__(self):
        return json.dumps(self.__dict__,indent=4,ensure_ascii=False)
class BookChapter:
    def __init__(
            self,
            id:str,
            title:str,
            is_vip:0,
            update_time:str,
            words:str,
            index:str,
            chapter_sort:str
    ):
        self.chapter_id = int(id)
        self.title=title
        self.is_vip = False if is_vip=='0' else True
        self.update_time = int(update_time)
        self.words = int(words)
        self.index=  int(index)
        self.chapter_sort = int(chapter_sort)
    def __str__(self):
        return json.dumps(self.__dict__,indent=4,ensure_ascii=False)


# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 创建存储目录
DATA_DIR = Path("qimao_books")
DATA_DIR.mkdir(exist_ok=True)

# 常量
BASE_URL = "https://www.qimao.com/shuku/"
TOC_API = "https://www.qimao.com/qimaoapi/api/book/chapter-list?book_id="
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
    "Referer": "https://www.qimao.com/",
}

# 并发限制（建议 10~20，避免被封）
SEMAPHORE = asyncio.Semaphore(15)

# 超时设置
TIMEOUT = httpx.Timeout(10.0)

# Book 和 BookChapter 类保持不变（略）

async def fetch_json(client: httpx.AsyncClient, url: str) -> Optional[dict]:
    async with SEMAPHORE:
        try:
            resp = await client.get(url, headers=HEADERS, timeout=TIMEOUT)
            resp.raise_for_status()
            return resp.json()
        except Exception as e:
            logger.error(f"JSON 请求失败: {url} - {e}")
            return None

async def fetch_html(client: httpx.AsyncClient, url: str) -> Optional[str]:
    async with SEMAPHORE:
        try:
            resp = await client.get(url, headers=HEADERS, timeout=TIMEOUT)
            resp.raise_for_status()
            return resp.text
        except Exception as e:
            logger.error(f"HTML 请求失败: {url} - {e}")
            return None

async def get_book_toc(client: httpx.AsyncClient, book_id: int) -> List[BookChapter]:
    url = f"{TOC_API}{book_id}"
    data = await fetch_json(client, url)
    if not data or not data.get("data") or not data["data"].get("chapters"):
        return []
    chapters = []
    for item in data["data"]["chapters"]:
        try:
            chapters.append(BookChapter(**item))
        except Exception as e:
            logger.warning(f"章节解析失败: {item} - {e}")
    return chapters

async def get_chapter_content(client: httpx.AsyncClient, book_id: int, chapter_id: int) -> str:
    url = f"{BASE_URL}{book_id}-{chapter_id}/"
    html = await fetch_html(client, url)
    if not html:
        return ""
    from bs4 import BeautifulSoup
    soup = BeautifulSoup(html, 'lxml')
    paragraphs = soup.select('.chapter-detail-wrap-content p')
    content = '\n'.join(f'<p>{p.get_text(strip=True)}</p>' for p in paragraphs)
    return content

async def get_book_meta(client: httpx.AsyncClient, book_id: int) -> Optional[Tuple[Book, List[BookChapter]]]:
    url = f"{BASE_URL}{book_id}/"
    html = await fetch_html(client, url)
    if not html:
        return None

    from bs4 import BeautifulSoup
    soup = BeautifulSoup(html, 'lxml')

    try:
        title_elem = soup.select_one('.title .txt')
        title = title_elem.text.strip() if title_elem else '未知书名'

        author_elem = soup.select_one('.sub-title span:first-child em a')
        author = author_elem.text.strip() if author_elem else '未知作者'

        main_figure_elem = soup.select('.sub-title span:last-child em')
        main_figure = ''.join(em.get_text(strip=True) for em in main_figure_elem) or '未知主角'

        intro_elem = soup.select_one('.intro')
        book_intro = intro_elem.get_text(strip=True) if intro_elem else '未知简介'

        cover_elem = soup.select_one('.wrap-pic img')
        book_cover = cover_elem.get('src', '') if cover_elem else ''

        book = Book(
            book_id=str(book_id),
            book_name=title,
            author=author,
            book_intro=book_intro,
            book_cover=book_cover,
            main_figure=main_figure
        )

        chapters = await get_book_toc(client, book_id)
        return book, chapters

    except Exception as e:
        logger.error(f"解析书籍 {book_id} 元数据失败: {e}")
        return None

async def save_book_data(book: Book, chapters: List[BookChapter], client: httpx.AsyncClient):
    book_dir = DATA_DIR / str(book.book_id)
    book_dir.mkdir(exist_ok=True)

    # 1. 异步保存元数据 meta.json
    meta_path = book_dir / "meta.json"
    async with aiofiles.open(meta_path, 'w', encoding='utf-8') as f:
        await f.write(json.dumps(book.__dict__, ensure_ascii=False, indent=2))

    # 2. 异步保存章节列表 chapters.json
    chapters_data = [chap.__dict__ for chap in chapters]
    chapters_path = book_dir / "chapters.json"
    async with aiofiles.open(chapters_path, 'w', encoding='utf-8') as f:
        await f.write(json.dumps(chapters_data, ensure_ascii=False, indent=2))

    # 3. 异步保存免费章节内容（每章一个 HTML 文件）
    tasks = []
    for chap in chapters:
        if chap.is_vip:
            continue
        tasks.append(save_chapter_content(book_dir, book.book_id, chap, client))

    # 并发保存章节（可控制并发数，避免文件句柄过多）
    if tasks:
        # 每批处理 10 个章节写入（可调）
        for i in range(0, len(tasks), 10):
            await asyncio.gather(*tasks[i:i+10])
            await asyncio.sleep(0.1)  # 轻微节流

async def save_chapter_content(book_dir: Path, book_id: int, chap: BookChapter, client: httpx.AsyncClient):
    content = await get_chapter_content(client, book_id, chap.chapter_id)
    if not content:
        return
    # 安全文件名：去除非法字符
    safe_title = "".join(c for c in chap.title if c.isalnum() or c in (' ', '-', '_')).rstrip()
    filename = f"{chap.index:04d}_{safe_title}.html"
    chap_path = book_dir / filename

    async with aiofiles.open(chap_path, 'w', encoding='utf-8') as f:
        await f.write(content)

    # 随机延迟（防封，放在写完后）
    await asyncio.sleep(random.uniform(0.3, 0.8))

async def process_book(client: httpx.AsyncClient, book_id: int):
    try:
        result = await get_book_meta(client, book_id)
        if result:
            book, chapters = result
            if book.book_name =='未知书名':
                logger.info(f"书籍 {book_id} 不存在或无法访问")
                return
            logger.info(f"成功获取书籍: {book.book_id} - {book.book_name} ({len(chapters)} 章)")
            await save_book_data(book, chapters, client)
        else:
            logger.debug(f"书籍 {book_id} 不存在或无法访问")
    except Exception as e:
        logger.error(f"处理书籍 {book_id} 时出错: {e}")

async def main():
    start_id = 1790000
    end_id = 1799999  # 建议先小范围测试！不要直接跑 2e6

    # 使用连接池
    async with httpx.AsyncClient(http2=True, follow_redirects=True) as client:
        tasks = []
        for bid in range(start_id, end_id + 1):
            book_dir = DATA_DIR / str(bid)
            if os.path.exists(book_dir):
                logger.info(f"书籍 {bid} 已存在，跳过")
                continue
            logger.info(f"开始处理书籍 {bid}")
            tasks.append(process_book(client, bid))
            # 控制总并发（避免任务太多）
            if len(tasks) >= 6:
                await asyncio.gather(*tasks)
                tasks = []
                await asyncio.sleep(1)  # 批次间休息

        if tasks:
            await asyncio.gather(*tasks)

if __name__ == "__main__":
    asyncio.run(main())