import requests
from bs4 import BeautifulSoup
import re


def get_urls(category_url):
    """获取所有章节的url"""
    urls = []  # 存储章节链接的列表
    try:
        response = requests.get(category_url)  # 发送 HTTP 请求
        if response.status_code == 200:  # 检查请求是否成功
            soup = BeautifulSoup(response.text, 'html.parser')  # 使用 BeautifulSoup 解析 HTML 内容
            for link in soup.find_all('a'):  # 遍历所有链接
                title = link.get_text(strip=True)
                href = link.get('href')  # 获取链接的 href 属性
                if href and href.startswith('/book/37583/') and href.endswith('.html'):  # 检查 href
                    urls.append([title, href])
        else:
            print(f"请求失败，状态码: {response.status_code}")
    except requests.RequestException as e:
        print(f"请求发生异常: {e}")

    # 整理url列表
    new_urls = []
    start_Flag = False
    for i in range(len(urls)):
        if start_Flag == False and '第1章:奴仆' in urls[i][0]:
            start_Flag = True
        if start_Flag:
            new_urls.append([urls[i][0], 'http://www.xsbiquge.la' + urls[i][1]])
    return new_urls if new_urls != [] else ["未获取到任何目录"]


def fetch_novel(url):
    content = ''
    try:
        response = requests.get(url)  # 发送 HTTP 请求
        if response.status_code == 200:  # 检查请求是否成功
            soup = BeautifulSoup(response.text, 'html.parser')  # 使用 BeautifulSoup 解析 HTML 内容
            novel_content_obj = soup.find('div', class_='showtxt')  # 假设小说内容在 class 为 novel-content 的 div 元素中
            novel_content = str(novel_content_obj.text).strip()
            novel_content = re.sub(r'\n+', '\n', novel_content)
            novel_content = re.sub(r'\s+', '\n', novel_content)
            if novel_content_obj:
                # print('【正文：】', '\n', novel_content)  # 打印小说内容
                content += novel_content
            else:
                print("未找到小说内容所在的元素。")
        else:
            print(f"请求失败，状态码: {response.status_code}")
    except requests.RequestException as e:
        print(f"请求发生异常: {e}")
    return content


if __name__ == "__main__":
    novel_name = 'fix'
    category_url = "http://www.xsbiquge.la/book/37583/"
    urls = get_urls(category_url)
    content = ''
    for i in range(len(urls)):
        print(f'{i + 1}/{len(urls)}:  {urls[i][0]}')
        content += f'\n\n{urls[i][0]}\n'
        content += fetch_novel(urls[i][1])
    open(f'{novel_name}.txt', 'w', encoding='utf-8').write(content)

"""
37  http://www.xsbiquge.la/book/37583/21678021.html
38  http://www.xsbiquge.la/book/37583/21678022.html
xx  http://www.xsbiquge.la/book/37583/23155609.html
"""
