# #发送请求必须的库
# import requests
# from lxml import etree
# #向谁发送
# url = 'https://www.yourbiquge.com/read/12612/4374885.html'
# #伪装一下，假装自己是浏览器访问，而不是通过代码。
# while True:
    
#     headers = {
#         'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0'
#     }
#     #如何获取
#     resp =  requests.get(url,headers=headers)
#     #如何响应
#     # print(resp.text)
#     e = etree.HTML(resp.text) 
#     title = e.xpath('string(//h1)')
#     info= e.xpath('string(//article)')
#     url=f'https://www.yourbiquge.com{e.xpath("//body/div[1]/div[1]/div[7]/a[3]/@href")[0]}'
#     # print(title)
#     # print(info)
#     #保存
#     with open('觅长生.txt', 'w', encoding='utf-8') as f:
#         f.write(title + '\n')
#         f.write(info)


import requests
from lxml import etree
import time

# 初始URL
url = 'https://www.yourbiquge.com/read/12612/4374885.html'

# 打开文件一次，并保持写入模式
with open('觅长生.txt', 'w', encoding='utf-8') as f:
    while True:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0'
        }

        # 发送HTTP GET请求
        try:
            resp = requests.get(url, headers=headers, timeout=10)  # 设置超时时间
            resp.raise_for_status()  # 如果请求失败则抛出异常
        except requests.RequestException as e:
            print(f"请求错误: {e}")
            # 等待一段时间后重试
            time.sleep(5)
            continue  # 继续下一次循环重试
        
        # 解析HTML内容
        e = etree.HTML(resp.text)
        
        # 提取章节标题和内容
        title = e.xpath('string(//h1)').strip()
        info = e.xpath('string(//article)').strip()
        
        if not title or not info:
            print("无法提取章节标题或内容，可能是页面结构发生了变化。")
            break
        
        # 保存到文件
        f.write(title + '\n')
        f.write(info + '\n\n')  # 添加一个换行符以分隔章节
        
        # 提取下一章的URL
        next_url_list = e.xpath('//body/div[1]/div[1]/div[7]/a[3]/@href')
        
        if next_url_list:
            # 获取第一个URL（应为下一章的链接）
            next_url = next_url_list[0]
            
            # 检查是否是完整的URL，否则拼接域名
            if not next_url.startswith('http'):
                next_url = 'https://www.yourbiquge.com' + next_url
            
            # 更新URL，以便在下一次循环中获取下一章
            url = next_url
        else:
            print("未找到下一章链接，结束爬取。")
            break
