import asyncio
import io
import json
import re
from pathlib import Path
from typing import List, Tuple
import zipfile
import aiofiles
import httpx
import logging
from bs4 import BeautifulSoup

# === 配置 ===
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

DATA_DIR = Path("D:/data/aixia_books")
DATA_DIR.mkdir(exist_ok=True)

HEADERS = {
    
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
}
TIMEOUT = httpx.Timeout(15.0)
SEMAPHORE = asyncio.Semaphore(20)

BASE_URL = "https://ixdzs8.com/sort/"
TOC_API = "https://ixdzs8.com/read/"

# === 工具函数（略，保持你已有的 process_book 和 fetch_html）===
# === 工具函数：章节切分（适配你的格式）===
def split_chapters_smart(text: str) -> List[Tuple[str, str]]:
    lines = text.splitlines()

    # 跳过前7行 或 直到分隔线
    start_idx = 0
    for i, line in enumerate(lines):
        if "------章节内容开始-------" in line:
            start_idx = i + 1
            break
    else:
        start_idx = min(6, len(lines))  # 默认跳过前7行

    lines = lines[start_idx:]

    chapter_re = re.compile(r'^第[零一二三四五六七八九十百千\d]+章.*')
    chapters = []
    current_title = "未知章节"
    current_content = []

    for line in lines:
        stripped = line.rstrip()
        if line.startswith('='):
            break
        if not stripped:
            current_content.append(line)
            continue

        # 判断是否为“顶格章节标题”
        is_chapter = (line == line.lstrip()) and chapter_re.match(stripped)

        if is_chapter:
            if current_content or chapters:
                chapters.append((current_title, "\n".join(current_content)))
                current_content = []
            current_title = stripped
        else:
            current_content.append(line.strip())

    if current_content or not chapters:
        chapters.append((current_title, "\n".join(current_content)))

    return chapters

# === 处理单本书 ===
async def process_book(client: httpx.AsyncClient, book_id: str):
    bid = int(book_id)
    book_dir = DATA_DIR / str(bid)
    if book_dir.exists():
        logger.info(f"书籍 {bid} 已存在，跳过")
        return

    try:
        # 1. 获取书籍详情页
        toc_url = f'{TOC_API}{bid}'
        async with SEMAPHORE:
            resp = await client.get(toc_url, headers=HEADERS, timeout=TIMEOUT)
            resp.raise_for_status()
        soup = BeautifulSoup(resp.text, 'lxml')

        book_name = (soup.select_one('.n-text h1') or soup.select_one('h1')).get_text(strip=True) or "未知书名"
        author_elem = soup.select_one('.n-text p a')
        author = author_elem.get_text(strip=True) if author_elem else "未知作者"
        intro_elem = soup.select_one('.pintro')
        intro = intro_elem.get_text(strip=True) if intro_elem else ""
        intro =intro.replace('内容简介：','')
        cover_img = soup.select_one('.n-img img')
        cover_url = cover_img.get('src') if cover_img else None

        # 下载链接（通常是 .zip）
        download_link = None
        for a in soup.select('.n-btn a'):
            if 'down' in a.get('href', '') or '.zip' in a.get('href', ''):
                download_link = a.get('href')
                break

        if not download_link:
            logger.warning(f"书籍 {bid} 未找到下载链接")
            return

        # 2. 下载 ZIP
        async with SEMAPHORE:
            zip_resp = await client.get(download_link, headers=HEADERS, timeout=TIMEOUT)
            zip_resp.raise_for_status()

        # 检查是否是 HTML（防伪装）
        if b'<html' in zip_resp.content[:500].lower():
            logger.error(f"书籍 {bid} 返回的是 HTML 页面，非 ZIP")
            return

        # 3. 解压 ZIP（取第一个文件）
        zip_buffer = io.BytesIO(zip_resp.content)
        try:
            with zipfile.ZipFile(zip_buffer) as zf:
                file_infos = zf.infolist()
                if not file_infos:
                    logger.error(f"书籍 {bid} ZIP 为空")
                    return
                raw_data = zf.read(file_infos[0])
        except zipfile.BadZipFile:
            logger.error(f"书籍 {bid} ZIP 损坏")
            return

        # 尝试多种编码
        txt_content = None
        for enc in ['utf-8', 'gbk', 'gb2312']:
            try:
                txt_content = raw_data.decode(enc)
                break
            except UnicodeDecodeError:
                continue
        if txt_content is None:
            logger.error(f"书籍 {bid} 无法解码文本")
            return

        # 4. 切分章节
        chapters = split_chapters_smart(txt_content)
        
        if not chapters:
            logger.warning(f"书籍 {bid} 未识别到任何章节")
            return
        if len(chapters)<=1:
            logger.warning(f"书籍 {bid} 未识别到任何章节")
            return

        # 5. 保存到文件系统
        book_dir.mkdir(parents=True, exist_ok=True)

        # 6. 保存元数据
        meta = {
            "book_id": bid,
            "book_name": book_name,
            "author": author,
            "intro": intro,
            "cover_url": cover_url,
            "download_url": download_link,
            "chapter_count": len(chapters),
            "is_ended": True,
            "chapters": [
                {
                    "title":chapter[0],
                    "content":chapter[1]
                } for chapter in chapters
            ],

        }
        async with aiofiles.open(book_dir / "meta.json", 'w', encoding='utf-8') as f:
            await f.write(json.dumps(meta, ensure_ascii=False, indent=2))

        # 7. 下载封面（可选）
        if cover_url:
            try:
                async with SEMAPHORE:
                    img_resp = await client.get(cover_url, headers=HEADERS, timeout=TIMEOUT)
                async with aiofiles.open(book_dir / "cover.jpg", 'wb') as f:
                    await f.write(img_resp.content)
            except Exception as e:
                logger.warning(f"封面下载失败 {bid}: {e}")

        logger.info(f"✅ 书籍 {bid}「{book_name}」处理完成，共 {len(chapters)} 章")

    except Exception as e:
        logger.error(f"❌ 书籍 {bid} 处理失败: {e}")

async def fetch_html(client: httpx.AsyncClient, url: str) -> str | None:
    async with SEMAPHORE:
        try:
            resp = await client.get(url, headers=HEADERS, timeout=TIMEOUT)
            resp.raise_for_status()
            return resp.text
        except Exception as e:
            logger.error(f"HTML 请求失败: {url} - {e}")
            return None

# === 生产者：爬取 book_id 并放入队列 ===
async def book_id_producer(client: httpx.AsyncClient, queue: asyncio.Queue, done_event: asyncio.Event):
    sort_index = list(range(0, 11))
    pages_pattern = re.compile(r'/sort/\d+/index-0-2-0-(\d+)\.html')

    try:
        for idx in sort_index:
            try:
                first_page_url = f"{BASE_URL}{idx}/index-0-2-0-1.html"
                html = await fetch_html(client, first_page_url)
                if not html:
                    continue

                soup = BeautifulSoup(html, 'lxml')
                last_page_elem = soup.select_one('.page .pagei a:last-child')
                max_page = 1
                if last_page_elem:
                    match = pages_pattern.search(last_page_elem.get('href', ''))
                    max_page = int(match.group(1)) if match else 1

                logger.info(f"分类 {idx} 共 {max_page} 页")
                already_bids= []
                for page in range(1, max_page + 1):
                    url = f"{BASE_URL}{idx}/index-0-2-0-{page}.html"
                    html = await fetch_html(client, url)
                    if not html:
                        continue
                    soup = BeautifulSoup(html, 'lxml')
                    items = soup.select('.panel .u-list li')
                    for item in items:
                        data_url = item.get('data-url')
                        if data_url:
                            bid = data_url.strip('/').split('/')[-1]
                            if bid.isdigit():
                                await queue.put(bid)  # 👈 立即放入队列
                                already_bids.append(bid)
                    if len(already_bids) > 100:
                        break
                    logger.info(f"分类 {idx} 第 {page} 页，已推送 ID 到队列")

            except Exception as e:
                logger.error(f"分类 {idx} 抓取失败: {e}")

    finally:
        done_event.set()  # 告诉消费者：不再有新 ID

# === 消费者：处理书籍 ===
async def book_consumer(client: httpx.AsyncClient, queue: asyncio.Queue, done_event: asyncio.Event):
    while True:
        try:
            # 等待新 ID，或生产者结束
            try:
                bid = await asyncio.wait_for(queue.get(), timeout=5.0)
                await process_book(client, bid)
                queue.task_done()
            except asyncio.TimeoutError:
                if done_event.is_set() and queue.empty():
                    break
        except Exception as e:
            logger.error(f"消费者异常: {e}")
            if done_event.is_set() and queue.empty():
                break

# === 主函数：启动生产者 + 多个消费者 ===
async def main():
    queue = asyncio.Queue(maxsize=100)  # 限制队列大小防内存爆炸
    done_event = asyncio.Event()

    async with httpx.AsyncClient(http2=True, follow_redirects=True) as client:
        # 启动 1 个生产者 + N 个消费者
        producer = asyncio.create_task(book_id_producer(client, queue, done_event))
        consumers = [
            asyncio.create_task(book_consumer(client, queue, done_event))
            for _ in range(10)  # 10 个并发消费者
        ]

        await producer
        await queue.join()  # 等待队列清空
        for c in consumers:
            c.cancel()

    logger.info("所有任务完成！")

if __name__ == "__main__":
    asyncio.run(main())