import requests,re
import chardet,json
from bs4 import BeautifulSoup


def fetch_page_content(url):
    """
    发送 HTTP 请求并获取页面内容
    :param url: 要爬取的网页 URL
    :return: 页面的 HTML 内容，如果请求失败返回 None
    """
    try:
        # 发送 GET 请求
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "Accept-Encoding": "gzip, deflate, br",
            "Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "Cache-Control": "max-age=0"
        }
        response = requests.get(url, headers=headers)
        # 检测编码
        encoding = chardet.detect(response.content)['encoding']
        # 使用检测到的编码进行解码
        response.encoding = encoding
        # 检查请求是否成功
        if response.status_code == 200:
            return response.text
        else:
            print(f"请求失败，状态码: {response.status_code}")
            return None
    except requests.RequestException as e:
        print(f"请求发生异常: {e}")
        return None


def parse_page_chapter(html_content):
    """
    使用 BeautifulSoup 解析 HTML 内容并提取信息
    :param html_content: 页面的 HTML 内容
    :return: 提取的信息列表
    """
    if html_content:
        # 创建一个 BeautifulSoup 对象
        soup = BeautifulSoup(html_content, 'html.parser')
        # 这里以提取页面中所有的链接为例，你可以根据需要修改提取逻辑
        links = []
        for link in soup.select("dd > a"):
            title = link.text
            title = "第"+title[:title.find(".")]+title[title.find("章"):]
            href = "https://www.sanjiangge.net"+link.get('href')
            if href:
                links.append([title, href])
                with open("novelist.txt","a",encoding="utf-8") as f:
                    f.write(title+" : "+href+"\n")
        return links
    return []


def main():
    for i in range(1,40):
        # 要爬取的网页 URL----不同的小说，这块可以改
        url = 'https://www.sanjiangge.net/book/32/32616_%d/index.html'%i
        # 获取页面内容
        html_content = fetch_page_content(url)
        # 解析页面内容
        extracted_links = parse_page_chapter(html_content)
        if extracted_links:
            for link in extracted_links:
                print(link)



if __name__ == "__main__":
    main()