import requests,re
import chardet,json
from bs4 import BeautifulSoup


def fetch_page_content(url):
    """
    发送 HTTP 请求并获取页面内容
    :param url: 要爬取的网页 URL
    :return: 页面的 HTML 内容，如果请求失败返回 None
    """
    try:
        # 发送 GET 请求
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "Accept-Encoding": "gzip, deflate, br",
            "Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "Cache-Control": "max-age=0"
        }
        response = requests.get(url, headers=headers)
        # 检测编码
        encoding = chardet.detect(response.content)['encoding']
        # 使用检测到的编码进行解码
        response.encoding = encoding
        # 检查请求是否成功
        if response.status_code == 200:
            return response.text
        else:
            print(f"请求失败，状态码: {response.status_code}")
            return None
    except requests.RequestException as e:
        print(f"请求发生异常: {e}")
        return None


def parse_page_chapter(html_content):
    """
    使用 BeautifulSoup 解析 HTML 内容并提取信息
    :param html_content: 页面的 HTML 内容
    :return: 提取的信息列表
    """
    if html_content:
        # 创建一个 BeautifulSoup 对象
        soup = BeautifulSoup(html_content, 'html.parser')
        title = soup.find_all("h1")
        content = soup.find_all(id="chaptercontent")
        title = title[0].text
        content = re.sub(r'<p>|</p>', '', content[0].text)
        content = re.sub(r'君落花提示您：看后求收藏（三江阁网www.sanjiangge.net），接着再看更方便。', '', content)
        content = re.sub(r'无弹窗在线阅读本站域名 Www.sanjiangge.Com 手机同步阅读请访问 M.sanjiangge.Com', '', content)
        content = re.sub(r'免责声明：本文小说内容为转载作品，内容章节均由网友上传，与三江阁（http://www.sanjiangge.com/）无关，转载至本站只是为了宣传本书让更多读者欣赏。其原创性以及文中陈述文字和内容未经本站证实，对本文以及其中全部或者部分内容、文字的真实性、完整性、及时性本站不作任何保证或承诺，请读者仅作参考，并请自行核实相关内容。 ', '', content)
        res = title+content+"\n"
        with open("novel_content.txt","a",encoding="utf-8") as f:
            f.write(res)
        


def main():
    urllist = []
    with open("novelist.txt","r",encoding="utf-8") as f:
        data = f.readlines()
    for x in data:
        st = x.split(" : ")[1].strip()
        urllist.append(st)
    k = 3401
    u = k
    for i in urllist[u:u+170]:
        print("第%d章开始了"%k)
        # 获取页面内容
        html_content = fetch_page_content(i)
        # 解析页面内容
        extracted_links = parse_page_chapter(html_content)
        k=k+1



if __name__ == "__main__":
    main()