import requests
import re
import json
import time
import random

# 设置请求头，模拟浏览器访问
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0"
}

# 目标网站的URL
base_url = "https://xiyouji.5000yan.com/"


# 获取章节目录
def get_chapter_list():
    response = requests.get(base_url, headers=headers)
    response.encoding = "UTF-8"
    content = response.text

    # 使用正则表达式提取章节链接和标题
    pattern = r'<li class="p-2"><a target=".*?"\s+href="(.*?)">(.*?)</a>'
    chapters = re.findall(pattern, content, re.DOTALL)

    # 将章节链接和标题存储在字典中
    chapter_dict = {title.strip(): link.strip() for link, title in chapters}
    return chapter_dict


# 获取章节内容
def get_chapter_content(chapter_url):
    response = requests.get(chapter_url, headers=headers)
    response.encoding = "UTF-8"
    content = response.text

    # 使用正则表达式提取章节正文
    pattern = r'<div class="grap">(.*?)<div></div>'
    match = re.search(pattern, content, re.DOTALL)
    if match:
        chapter_content = match.group(1).strip()
        # 清理多余的HTML标签
        chapter_content = re.sub(r'<.*?>', '', chapter_content)
        return chapter_content
    else:
        return None


# 保存章节内容到文件
def save_chapters(chapter_dict):
    with open("西游记.txt", mode="wt", encoding="utf-8") as file:
        for title, url in chapter_dict.items():
            print(f"正在爬取：{title}")
            content = get_chapter_content(url)
            if content:
                file.write(title + "\n\n")  # 写入章节标题
                file.write(content + "\n\n")  # 写入章节内容
            else:
                print(f"未能获取到章节：{title}")
            # 模拟人类行为，随机休眠3到5秒
            time.sleep(random.randint(3, 5))


if __name__ == "__main__":
    # 获取章节列表
    chapters = get_chapter_list()
    # 保存章节内容到文件
    save_chapters(chapters)
