import requests
from bs4 import BeautifulSoup

# 目标网页的 URL
# url = 'https://www.xiaoshubao.net/read/483951/1.html'
# url = 'https://www.xiaoshubao.net/read/483951/1_1.html'

# url = 'https://www.xiaoshubao.net/read/483951/274_1.html'

# 使用列表生成式创建字符串数组
# https://www.xiaoshubao.net/read/483951/1.html
# 'https://www.xiaoshubao.net/read/483951/1_1.html'
# 'https://www.xiaoshubao.net/read/483951/2.html'
# 'https://www.xiaoshubao.net/read/483951/2_1.html'
# https://www.xiaoshubao.net/read/483951/731.html
# https://www.xiaoshubao.net/read/483951/590.html

# https://www.xiaoshubao.net/read/483951/580.html
# https://www.xiaoshubao.net/read/483951/633.html

url_list = [f'https://www.xiaoshubao.net/read/483951/{i}.html' for i in range(580, 633)]
# 创建一个新的列表来存储结果
new_url_list = []

# 遍历name_list并追加新元素
for name in url_list:
    new_url_list.append(name)
    new_url_list.append(f'{name}_1')
    # new_url_list.append(f'\n')


# print(new_url_list)

# # 发送 GET 请求获取网页内容
# response = requests.get(url)
# response.encoding = 'utf-8'  # 设置编码
#
# # 使用 BeautifulSoup 解析网页内容
# soup = BeautifulSoup(response.text, 'html.parser')
#
# # 获取小说标题
# h1 = soup.find_all('h1')
# chapter_content = ''
# for p in h1:
#     print(p.get_text())
#     # chapter_title = p.get_text()
#     chapter_content = p.get_text() + '\n'
#
# # 保留换行符
# for br in soup.find_all('br'):
#     br.replace_with('\n')
#
# # 保留换行符
# for p in soup.find_all('p'):
#     p.replace_with('\n')
#
# content_div = soup.find_all('div', attrs={"id": "content"})
# for div in content_div:
#     print(div.get_text())
#     chapter_content = chapter_content + div.get_text() + '\n'
# chapter_content = content_div.getText()

# h1 = soup.find_all('h1')
# for p in h1:
#     print(p.get_text())

# paragraphs = soup.find_all('p')
# for p in paragraphs:
#     print(p.get_text())


# 定义要写入的字符串
# text = "这是要写入文件的内容。"

# 打开文件（如果文件不存在会自动创建）
# with open('截运道师.txt', 'w', encoding='utf-8') as file:
#     # 写入字符串到文件
#     file.write(chapter_content)
#
# print("内容已成功写入example.txt文件。")
#


def content_get(url):
    # 发送 GET 请求获取网页内容
    response = requests.get(url)
    response.encoding = 'utf-8'  # 设置编码

    # 使用 BeautifulSoup 解析网页内容
    soup = BeautifulSoup(response.text, 'html.parser')

    # 获取小说标题
    h1 = soup.find_all('h1')
    chapter_content = ''
    for p in h1:
        # print(p.get_text())
        chapter_content = p.get_text() + '\n'

    # 保留换行符
    for br in soup.find_all('br'):
        br.replace_with('\n')

    # 去掉p标签
    for p in soup.find_all('p'):
        p.replace_with('\n')

    content_div = soup.find_all('div', attrs={"id": "content"})
    for div in content_div:
        # print(div.get_text())
        chapter_content = chapter_content + div.get_text() + '\n'

    return chapter_content


# 打开文件以写入模式
progress = '0%'
with open("截运道师_588_641_4.txt", "w", encoding="utf-8") as file:
    for index, url in enumerate(url_list):
        try:
            text = content_get(url)
            # 将网页文本内容写入文件
            file.write(text + "\n\n")
            print(f"进度{index / len(url_list) :.0%}")
        except requests.exceptions.RequestException as e:
            print(f"Error fetching {url}: {e}")

print("已成功写入 截运道师_588_641.txt 文件。")
