# 这是一个示例 Python 脚本。
import requests
from bs4 import BeautifulSoup
import re

# 目标网站的基础 URL
base_url = "https://www.23dsg.com"
# 起始页面的路径
start_url = "/jietiandi/131448/43167495.html"
# 停止爬取的章节名称
end_chapter = "第500章"
current_url = base_url + start_url
txt_name = "dest.txt"


def handler():
    global current_url
    while True:
        # 发送请求获取页面内容
        response = requests.get(current_url)
        if response.status_code != 200:
            print(f"Failed to retrieve page: {current_url}")
            break

        # 使用 BeautifulSoup 解析 HTML
        soup = BeautifulSoup(response.text, 'html.parser')

        # 查找 div id="booktxt" 中的文字内容
        booktxt_div = soup.find('div', id='booktxt')
        if not booktxt_div:
            print("Could not find div with id='booktxt'")
            break

        # 提取 <p> 标签中的内容，并在每段之间插入换行符
        paragraphs = booktxt_div.find_all('p')
        text_content = "\n".join(p.get_text(strip=True) for p in paragraphs)
        # 定义正则表达式，匹配 "第数字章" 的模式
        pattern = r"第\d+章"
        # 使用 re.search 检查是否包含匹配的内容
        if re.search(pattern, text_content):
            print("当前章节:", re.findall(pattern, text_content))

        with open(txt_name, "a", encoding="utf-8") as file:
            file.write(text_content + "\n\n")  # 每章之间添加两个换行符分隔

        # 检查是否出现第100章
        if end_chapter in text_content:
            print("Reached Chapter . Stopping.")
            break

        # 查找下一个链接
        next_link = soup.find('a', id='next_url')
        if not next_link:
            print("No more pages. Stopping.")
            break

        # 获取下一个页面的 URL
        next_url = next_link.get('href')
        if not next_url.startswith('http'):  # 如果是相对路径，则拼接成完整 URL
            next_url = base_url + next_url

        # 更新 current_url 为下一个页面的 URL
        current_url = next_url
        # print(f"Moving to next page: {current_url}")


def clean_text():
    # 清空 test.txt 文件的内容
    with open(txt_name, "w", encoding="utf-8"):
        pass  # 不需要写入任何内容，直接关闭文件即可清空内容

    print("txt 文件已清空。")


# 按装订区域中的绿色按钮以运行脚本。
if __name__ == '__main__':
    clean_text()
    handler()
    print("Crawling completed.")
