import requests
from bs4 import BeautifulSoup
from selenium import webdriver

# 替换为你要爬取的小说目录页面的 URL
root_url = "https://www.shuqi.com"
novel_index_url = "https://www.shuqi.com/chapter?bid=8334232"

# 获取小说目录页面的内容
driver = webdriver.PhantomJS()
response = driver.get(novel_index_url)
# response = requests.get(novel_index_url)
# response.encoding = response.apparent_encoding  # 修复可能的编码问题
html_content = response.text

# 解析目录页面
soup = BeautifulSoup(html_content, "html.parser")

# 提取每一章的链接
chapter_links = [link['href'] for link in soup.find('table', class_="chapterul").find_all('a')]  # 根据实际情况修改标签和属性




# 用于保存小说内容的文件
with open("novel.txt", "w", encoding="utf-8") as f:
    # 遍历每一章的链接
    for link in chapter_links[1:3]:
        # 获取章节页面的内容
        chapter_url = f"https://www.shuqi.com{link}"  # 根据实际情况拼接完整 URL
        response = requests.get(chapter_url)
        response.encoding = response.apparent_encoding
        html_content = response.text

        # 解析章节页面
        chapter_soup = BeautifulSoup(html_content, "html.parser")
        node = chapter_soup.find("div", class_="chapter-content")
        print(node)
        exit()
        # 提取章节标题和内容
        chapter_title = chapter_soup.find('h3', class_='chapter-title').get_text()  # 根据实际情况修改标签和属性
        content_node = chapter_soup.find('div', class_='chapter-p') # 判断节点是否有内容
        if content_node is not None:
            chapter_content = content_node.get_text()  # 根据实际情况修改标签和属性

        # 将章节标题和内容写入文件
        f.write(chapter_title + "\n")
        f.write(chapter_content + "\n\n")

print("小说已保存到 novel.txt")
