import requests
from bs4 import BeautifulSoup
import logging
from pathlib import Path
from typing import Optional, List

# 配置常量
BASE_URL = 'http://www.wuxia.net.cn'
NOVEL_URL = f'{BASE_URL}/book/yitiantulongji.html'
OUTPUT_DIR = Path('output')

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def setup_environment():
    """创建必要的目录"""
    OUTPUT_DIR.mkdir(exist_ok=True)

def get_soup(url: str) -> Optional[BeautifulSoup]:
    """获取并解析网页内容"""
    try:
        response = requests.get(url)
        response.encoding = response.apparent_encoding
        return BeautifulSoup(response.text, 'html.parser')
    except Exception as e:
        logging.error(f"Failed to fetch URL {url}: {str(e)}")
        return None

def get_chapter_links(soup: BeautifulSoup) -> List[dict]:
    """获取所有章节链接"""
    try:
        book_div = soup.find(class_="book")
        if not book_div:
            return []
        
        # 获取第8个子节点中的所有链接
        children = list(book_div.children)
        if len(children) <= 7:
            return []
            
        links = children[7].find_all('a')
        return [{'url': f"{BASE_URL}{link['href']}", 'title': link.text} for link in links]
    except Exception as e:
        logging.error(f"Failed to parse chapter links: {str(e)}")
        return []

def save_chapter(chapter: dict) -> bool:
    """保存单个章节内容"""
    try:
        soup = get_soup(chapter['url'])
        if not soup:
            return False

        content = soup.find_all(class_="text")
        if not content:
            return False

        filepath = OUTPUT_DIR / f"{chapter['title']}.txt"
        with filepath.open('w', encoding='utf-8') as f:
            for section in content:
                f.write(section.text)
        
        logging.info(f"Successfully saved chapter: {chapter['title']}")
        return True
    except Exception as e:
        logging.error(f"Failed to save chapter {chapter['title']}: {str(e)}")
        return False

def main():
    """主函数"""
    setup_environment()
    
    # 获取小说主页
    soup = get_soup(NOVEL_URL)
    if not soup:
        return

    # 获取所有章节链接
    chapters = get_chapter_links(soup)
    if not chapters:
        logging.error("No chapters found")
        return

    # 下载所有章节
    for chapter in chapters:
        save_chapter(chapter)

if __name__ == "__main__":
    main()