# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import os

# 目标网址
base_url = "https://xiyouji.5000yan.com"

# 创建保存目录
save_dir = "西游记"
os.makedirs(save_dir, exist_ok=True)

# 访问主页，获取章节列表
response = requests.get(base_url)
response.encoding = 'utf-8'  # 设置编码格式
soup = BeautifulSoup(response.text, "html.parser")

chapters = soup.select("li.p-2 a")

for chapter in chapters:
    chapter_title = chapter.text.strip()
    chapter_url =chapter["href"]
    
    print(f"正在访问章节: {chapter_title} - {chapter_url}")

    # 访问章节页面
    chapter_response = requests.get(chapter_url)
    chapter_response.encoding = 'utf-8'

    # Debug：打印网页内容
    print(f"网页内容（部分）：\n{chapter_response.text[:1000]}...\n")  # 只打印前1000字符，避免太长

    chapter_soup = BeautifulSoup(chapter_response.text, "html.parser")
    
    # 提取章节标题（h5标签）
    title_tag = chapter_soup.select_one("h5.text-center")
    if title_tag:
        chapter_title = title_tag.text.strip()

    # 提取章节内容（div.grap）
    content_tag = chapter_soup.select_one("div.grap")
    if content_tag:
        content = "\n".join(p.text.strip() for p in content_tag.find_all("div") if p.text.strip())
    else:
        print(f"未能提取章节内容: {chapter_title}")
        continue  # 跳过该章节

    # 保存为txt文件
    file_path = os.path.join(save_dir, f"{chapter_title}.txt")
    with open(file_path, "w", encoding="utf-8") as f:
        f.write(content)
    
    print(f"已保存: {file_path}")

print("爬取完成！")
