import requests
from bs4 import BeautifulSoup

# 请求网页内容
url = 'https://fanqienovel.com/page/7253751028594510907?enter_from=search'  # 替换为你实际的网址
response = requests.get(url)
response.raise_for_status()  # 如果请求失败会抛出异常

# 解析HTML
soup = BeautifulSoup(response.text, 'html.parser')

# 查找所有章节链接
chapters = soup.select('.chapter-item a.chapter-item-title')
chapter_links = [chap['href'] for chap in chapters]

title = "https://fanqienovel.com"
suffix = "?enter_from=reader"
content = ""

for link in chapter_links:
	# print(link)
	url_each = title + link + suffix
	content += url_each + "\n"
	# print(url_each)

with open("网页所有链接.txt", "w", encoding="utf-8") as f:
	f.write(content)
