import requests
import time  # 导入 time 模块用于实现睡眠功能
# pip install lxml
# 我有两个python，想安装python3对应的，所以采取下面这种安装方式
# D:/Python/Python311/python.exe -m pip install lxml 
from lxml import etree

def clean_text(text):
        replacements = [
            ('\n', ''),
            ('\r', ''),
            ('推荐都市大神老施新书:', ''),
            ('\xa0', '')
            # 可以在这里添加更多需要替换的内容，格式为 (旧字符串, 新字符串)
        ]
        for old, new in replacements:
            text = text.replace(old, new)
        return text

base_url="http://www.tycqzw.net"
url = "/3_3925/18342912.html"


has_next=True
while has_next:
    headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36"
    }
    response = requests.get(base_url+url, headers=headers)
    response.encoding = "utf-8"
    # print(response.text)

    tree = etree.HTML(response.text)
    # 标题
    title = tree.xpath("//h1/text()")[0]
    # 正文
    info_list = tree.xpath('//div[@id="content"]/text()')
    # 使用 clean_text 函数处理每个元素并拼接成字符串
    info_text = "\n".join([clean_text(item) for item in info_list])

    # 保存
    with open("白袍总管.txt", "a", encoding="utf-8") as f:
        f.write(title  + info_text )
    
    print(f'拉取完成章节：{title}')

    # has_next=False

    time.sleep(0.3)  # 每次循环结束后睡眠 3 秒

    # 获取下一页链接
    next_url_list = tree.xpath('//div[@class="bottem1"]/a[4]/@href')
    # 检查 next_url_list 是否为空
    if next_url_list:
        url = next_url_list[0]
        print(f'下一页链接为：{url}')
        # 若下一页链接为列表，停止循环
        if url == '/3_3925/21262208.html':
            break

    

    