# -*- coding: utf-8 -*-
# @Time    : 2025/3/10 10:03
# @Author  : cx
# @File    : XiaoShuoPaQu.py
# @Software: PyCharm

from urllib import request
from urllib import parse  # 编码要用到
from wsgiref.validate import check_iterator

from PIL.PdfParser import pdf_repr
from lxml import etree  # 解析HTML代码
# from bs4 import BeautifulSoup # BeautifulSoup 是一个用于解析 HTML 和 XML 文档的 Python 库
import requests
from pymysql import connect
from requests.exceptions import RequestException  # 抛出网络请求异常
import re  # 正则表达式
from http.cookiejar import CookieJar  # Cookie专用
import time
import os
from GongJuKu import MYSQL_CZ

# 随机字符需要的2个库
import random
import string

# 字符编码处理
import chardet
import re

# 解码
import base64

"""
www.biqgg.cc 此网站内容有删减
"""


class Cs:
    # https://www.qu04.cc/book/172104/
    # www.biqgg.cc
    __xiaYiZhang = None
    __zhenWen = ""
    __jieShu = True
    __shuMing = "择日走红.txt"

    def __init__(self):
        self.__jieShu = True

    def __del__(self):
        pass

    def bao_cun(self, neiRong, filename):
        try:
            with open(filename, "a", encoding="utf-8") as f:
                f.write(neiRong + "\n\n")  # 添加两个换行作为章节分隔
                return True
        except PermissionError:
            print(f"错误：文件 {filename} 被其他程序占用")
            return False
        except Exception as e:
            print(f"未知错误：{str(e)}")
            return False

    def qing_qiu_tou(self, referer):  # 来路
        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.6261.95 Safari/537.36",
            "referer": referer,
        }
        return headers

    # 查找书名
    def get_shu_ku(self, url01):
        url = url01
        headers = self.qing_qiu_tou(url)
        res2 = requests.get(url, headers=headers)

        # 获取网页数据并将数据解码为html格式
        # htmltext = etree.HTML(res2.content.decode())
        # htmltext = etree.HTML(res2.content.decode("gbk"))
        # 检测编码
        encoding = chardet.detect(res2.content)["encoding"]
        print(f"检测到编码: {encoding}")  # 调试输出编码类型
        # 解码并解析
        try:
            htmltext = etree.HTML(res2.content.decode(encoding))
        except UnicodeDecodeError:
            # 回退到常见中文编码
            try:
                htmltext = etree.HTML(res2.content.decode("gbk"))
                print("强转编码为gbk")
            except Exception as e:
                print(f"解码失败: {e}")
                exit()
        """
        将获取的网页数据,转换为字符串的形式,这里的作用为方便打印输出
        """
        daYin = False
        if daYin:
            html_str = etree.tostring(
                htmltext,  # Element 对象
                encoding="utf-8",  # 指定编码（与原网页一致）
                pretty_print=True,  # 格式化输出（自动缩进）
                method="html",  # 输出为 HTML（默认）
            ).decode(
                "utf-8"
            )  # 将字节流转为字符串
            print(html_str)

        # 获取最大章节数
        maxZhangJie = 0
        try:
            last_chapter_href = htmltext.xpath(
                '//dl/dd[not(contains(@class, "more pc_none"))][last()]/a/@href'
            )[0]
            maxZhangJie = last_chapter_href.split("/")[-1].split(".")[0]
            print(maxZhangJie)
        except IndexError:
            print("未找到章节链接，可能页面结构变化或章节未展开")
        maxZhangJie_int = int(maxZhangJie)
        if maxZhangJie_int > 0:
            jishu = 0
            # https://www.qu04.cc/book/172104/1.html    &&    https://www.qu04.cc/book/172104/
            base_url = "https://www.qu04.cc/book/172104/{}.html"
            # 生成1到20的章节URL
            for chapter in range(1, maxZhangJie_int):
                url = base_url.format(chapter)
                headers = self.qing_qiu_tou(url)
                res2 = requests.get(url, headers=headers)

                # 获取网页数据并将数据解码为html格式
                # htmltext = etree.HTML(res2.content.decode())
                # htmltext = etree.HTML(res2.content.decode("gbk"))
                # 检测编码
                encoding = chardet.detect(res2.content)["encoding"]
                # print(f"检测到编码: {encoding}")  # 调试输出编码类型
                # 解码并解析
                try:
                    htmltext = etree.HTML(res2.content.decode(encoding))
                except UnicodeDecodeError:
                    # 回退到常见中文编码
                    try:
                        htmltext = etree.HTML(res2.content.decode("gbk"))
                        print("强转编码为gbk")
                    except Exception as e:
                        print(f"解码失败: {e}")
                        exit()
                # 获取章节名
                zhangJieMing = htmltext.xpath('//div[@class="content"]/h1/text()')
                print(f"抓取: {zhangJieMing[0]}")
                self.__zhenWen = self.__zhenWen + zhangJieMing[0] + "\n"
                # 提取小说正文
                full_text = htmltext.xpath(
                    '//div[@id="chaptercontent"]/descendant::text()'
                )
                # 过滤收藏行
                # clean_text = '\n'.join(
                #     line for line in full_text
                #     if '请收藏本站' not in line
                # ).strip()

                # 过滤收藏行、点此报错和加入书签的信息
                filter_keywords = ["请收藏本站", "点此报错", "加入书签"]
                clean_text = "\n".join(
                    line
                    for line in full_text
                    if not any(keyword in line for keyword in filter_keywords)
                ).strip()
                self.__zhenWen = self.__zhenWen + clean_text + "\n"

                if jishu > 50:
                    pd = self.bao_cun(self.__zhenWen, self.__shuMing)
                    if not pd:
                        print("出错啦")
                        with open(self.__shuMing, "a", encoding="utf-8") as f:
                            f.write(f"{chapter}出错啦,目录写入失败\n\n")
                    else:
                        self.__zhenWen = ""
                        print("保存50章")
                    jishu = 0

            pd = self.bao_cun(self.__zhenWen, self.__shuMing)
            if not pd:
                print("出错啦")
                with open(self.__shuMing, "a", encoding="utf-8") as f:
                    f.write(f"出错啦,最后写入失败\n\n")
            else:
                print("爬取并保存完成")

        # 提取下一章的链接,next_chapter_link
        daYin = False
        if daYin:
            next_chapter_link = htmltext.xpath(
                '//div[@class="Readpage pc_none"]/a[text()="下一章"]/@href'
            )
            if next_chapter_link:
                next_chapter_link = next_chapter_link[0]
                url = "https://www.qu04.cc/" + next_chapter_link
            else:
                self.__jieShu = False
            print(next_chapter_link)


"""
笔趣阁_小说抓取
"""


class BiQuGe:
    __xiaYiZhang = None
    __zhenWen = ""
    __jieShu = True
    __shuMing = "择日走红.txt"

    def __init__(self):
        self.__jieShu = True

    def __del__(self):
        pass

    def bao_cun(self, neiRong, filename):
        try:
            with open(filename, "a", encoding="utf-8") as f:
                f.write(neiRong + "\n\n")  # 添加两个换行作为章节分隔
                return True
        except PermissionError:
            print(f"错误：文件 {filename} 被其他程序占用")
            return False
        except Exception as e:
            print(f"未知错误：{str(e)}")
            return False

    def qing_qiu_tou(self, referer):  # 来路
        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.6261.95 Safari/537.36",
            "referer": referer,
        }
        return headers

    # 查找书名
    def get_shu_ku(self, url01):
        url = url01
        headers = self.qing_qiu_tou(url)
        res2 = requests.get(url, headers=headers)

        # 获取网页数据并将数据解码为html格式
        # htmltext = etree.HTML(res2.content.decode())
        # htmltext = etree.HTML(res2.content.decode("gbk"))
        # 检测编码
        encoding = chardet.detect(res2.content)["encoding"]
        print(f"检测到编码: {encoding}")  # 调试输出编码类型
        # 解码并解析
        try:
            htmltext = etree.HTML(res2.content.decode(encoding))
        except UnicodeDecodeError:
            # 回退到常见中文编码
            try:
                htmltext = etree.HTML(res2.content.decode("gbk"))
                print("强转编码为gbk")
            except Exception as e:
                print(f"解码失败: {e}")
                exit()

        # 提取下一章的链接,next_chapter_link
        next_chapter_link = htmltext.xpath(
            '//div[@class="read_btn"]/a[text()="下一章"]/@href'
        )
        if next_chapter_link:
            next_chapter_link = next_chapter_link[0]
        else:
            self.__jieShu = False

        jishu = 0

        while self.__jieShu:
            res2 = requests.get(url, headers=headers)

            # 获取网页数据并将数据解码为html格式
            # htmltext = etree.HTML(res2.content.decode())
            # htmltext = etree.HTML(res2.content.decode("gbk"))
            # 检测编码
            encoding = chardet.detect(res2.content)["encoding"]
            print(f"检测到编码01: {encoding}")  # 调试输出编码类型
            # 解码并解析
            try:
                htmltext = etree.HTML(res2.content.decode(encoding))
            except UnicodeDecodeError:
                # 回退到常见中文编码
                try:
                    htmltext = etree.HTML(res2.content.decode("gbk"))
                    print("强转编码为gbk")
                except Exception as e:
                    print(f"解码失败: {e}")
                    exit()

            # 提取下一章的链接,next_chapter_link
            next_chapter_link = htmltext.xpath(
                '//div[@class="read_btn"]/a[text()="下一章"]/@href'
            )
            if next_chapter_link:
                next_chapter_link = next_chapter_link[0]
                url = "https://www.biqvdu.com/" + next_chapter_link
            else:
                self.__jieShu = False

            # 提取章节名字,chapter_name
            chapter_name = htmltext.xpath('//div[@class="word_read"]/h3/text()')
            if chapter_name:
                chapter_name = chapter_name[0].strip()
            else:
                chapter_name = "自定义"

            # 提取所有<script>标签的文本
            script_tags = htmltext.xpath("//script/text()")
            # 正则表达式匹配Base64字符串
            base64_pattern = re.compile(r"qsbs\.bb\('([^']+)'\)")
            # 存储解码后的文本
            decoded_text = []
            for script in script_tags:
                # 查找所有匹配的Base64字符串
                matches = base64_pattern.findall(script)
                for encoded_str in matches:
                    try:
                        # 解码Base64（处理可能的填充问题）
                        pad = len(encoded_str) % 4
                        if pad != 0:
                            encoded_str += "=" * (4 - pad)
                        decoded_byte = base64.b64decode(encoded_str)
                        decoded_text.append(decoded_byte.decode("utf-8"))
                    except Exception as e:
                        print(f"解码失败: {e}")

            # 拼接所有文本
            full_content = "".join(decoded_text)
            # 去除爬取的正文当中的<p>标签,clean_content=正文
            clean_content = re.sub(r"<\/?p>", "", full_content)
            self.__zhenWen = self.__zhenWen + chapter_name + "\r\n"
            self.__zhenWen = self.__zhenWen + clean_content + "\r\n"
            # print(clean_content)
            print(f"爬取: {chapter_name}")
            print(f"下一章: {url}")
            print(self.__jieShu)
            jishu = jishu + 1
            if jishu > 100:
                pd = self.bao_cun(self.__zhenWen, self.__shuMing)
                if not pd:
                    print("出错啦")
                    with open(self.__shuMing, "a", encoding="utf-8") as f:
                        f.write(next_chapter_link + "出错啦,目录写入失败" + "\n\n")
                else:
                    self.__zhenWen = ""
                    print("保存100章")
                jishu = 0

        pd = self.bao_cun(self.__zhenWen, self.__shuMing)
        if not pd:
            print("出错啦")
            with open(self.__shuMing, "a", encoding="utf-8") as f:
                f.write(next_chapter_link + "出错啦,目录写入失败" + "\n\n")
        else:
            print("爬取并保存完成")


class XiaoShuo_3qxsa:
    __daMing = "https://l4l4.3qxsa.cc/xiaoshuo/629142/"
    # __referer = None
    __shuMing = "大明：开局请朱元璋退位.txt"

    def __init__(self):
        self.__referer = self.__daMing

    def qing_qiu_tou(self, referer):  # 来路
        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.6261.95 Safari/537.36",
            "referer": referer,
        }
        return headers

    def bao_cun(self, neiRong, filename):
        try:
            with open(filename, "a", encoding="utf-8") as f:
                f.write(neiRong + "\n\n")  # 添加两个换行作为章节分隔
                return True
        except PermissionError:
            print(f"错误：文件 {filename} 被其他程序占用")
            return False
        except Exception as e:
            print(f"未知错误：{str(e)}")
            return False

    def bao_cun_mu_lu(self, neiRong, filename):
        try:
            with open(filename, "a", encoding="utf-8") as f:
                f.write(neiRong)
                return True
        except PermissionError:
            print(f"错误：文件 {filename} 被其他程序占用")
            return False
        except Exception as e:
            print(f"未知错误：{str(e)}")
            return False

    def get_mu_lu(self, url):
        headers = self.qing_qiu_tou(self.__referer)
        res2 = requests.get(url, headers=headers)

        # 获取网页数据并将数据解码为html格式
        # htmltext = etree.HTML(res2.content.decode())
        # htmltext = etree.HTML(res2.content.decode("gbk"))
        # 检测编码
        encoding = chardet.detect(res2.content)["encoding"]
        # print(f"检测到编码: {encoding}")  # 调试输出编码类型
        # 解码并解析
        try:
            htmltext = etree.HTML(res2.content.decode(encoding))
        except UnicodeDecodeError:
            # 回退到常见中文编码
            try:
                htmltext = etree.HTML(res2.content.decode("gbk"))
            except Exception as e:
                print(f"解码失败: {e}")
                exit()

        # res2.encoding = "gbk"  # 手动指定编码（如果已知）
        # htmltext = etree.HTML(res2.text)  # 自动解码为字符串
        # print(htmltext)
        # 假设 htmltext 是你的 Element 对象
        html_str = etree.tostring(
            htmltext,  # Element 对象
            encoding="utf-8",  # 指定编码（与原网页一致）
            pretty_print=True,  # 格式化输出（自动缩进）
            method="html",  # 输出为 HTML（默认）
        ).decode(
            "utf-8"
        )  # 将字节流转为字符串
        print(html_str)

        # 提取所有 option 的 value 属性
        option_values = htmltext.xpath('//select[@id="indexselect"]/option/@value')
        # 分割字符串提取目标部分
        extracted_ids = []
        for value in option_values:
            parts = value.split("/")  # 按斜杠分割路径
            if len(parts) >= 3:  # 确保分割后有足够的部分
                target_id = parts[2]  # 取第三个部分（索引从0开始）
                extracted_ids.append(target_id)
        print("字符串分割结果:", extracted_ids)

    def get_nei_rong(self, url):
        headers = self.qing_qiu_tou(self.__referer)
        res2 = requests.get(url, headers=headers)

        html = etree.HTML(res2.content.decode())

        # 步骤1：精准定位正文容器
        content_div = html.xpath('//div[@id="nr"]/div[@id="nr1"]')[0]

        # 方式2：通过class定位（需确保唯一性）
        # content_div = html.xpath('//div[@class="novelcontent"]')[0]

        # 方式3：组合定位（更严谨）
        # content_div = html.xpath('//div[@id="novelcontent" and @class="novelcontent"]')[0]

        # 提取所有段落文本（过滤最后的分页提示）,
        # [not(contains(., "本章未完"))] 过滤条件：排除内容包含"本章未完"的 <p> 标签
        # /text()	提取匹配到的 <p> 标签内的纯文本内容
        # paragraphs = content_div.xpath('.//p[not(contains(., "本章未完"))]/text()')
        # 步骤2：提取合并所有文本内容（自动处理嵌套标签）
        raw_text = content_div.xpath("string(.)")

        # 步骤3：深度清洗文本
        clean_text = re.sub(
            r"\s+",  # 匹配所有空白字符
            "\n",  # 替换为单个换行符
            raw_text.replace("\u3000", ""),  # 先去除全角空格(&nbsp;)
        ).strip()

        # 步骤4：智能分段处理
        paragraphs = [
            p.strip()
            for p in clean_text.split("\n")
            if p.strip() and not p.startswith("……")  # 过滤特殊段落
        ]

        # 最终结果拼接
        novel_content = "\n".join(paragraphs)

        # print(novel_content)
        return novel_content

    def get_zhang_jie(self, url):
        headers = self.qing_qiu_tou(self.__referer)
        res2 = requests.get(url, headers=headers)

        # 获取网页数据并将数据解码为html格式
        # htmltext = etree.HTML(res2.content.decode())
        # htmltext = etree.HTML(res2.content.decode("gbk"))
        # 检测编码
        encoding = chardet.detect(res2.content)["encoding"]
        # print(f"检测到编码: {encoding}")  # 调试输出编码类型
        # 解码并解析
        try:
            htmltext = etree.HTML(res2.content.decode(encoding))
        except UnicodeDecodeError:
            # 回退到常见中文编码
            try:
                htmltext = etree.HTML(res2.content.decode("gbk"))
            except Exception as e:
                print(f"解码失败: {e}")
                exit()

        # res2.encoding = "gbk"  # 手动指定编码（如果已知）
        # htmltext = etree.HTML(res2.text)  # 自动解码为字符串
        # print(htmltext)
        # 假设 htmltext 是你的 Element 对象
        html_str = etree.tostring(
            htmltext,  # Element 对象
            encoding="utf-8",  # 指定编码（与原网页一致）
            pretty_print=True,  # 格式化输出（自动缩进）
            method="html",  # 输出为 HTML（默认）
        ).decode(
            "utf-8"
        )  # 将字节流转为字符串

        # 提取所有章节链接的href属性
        option_values = htmltext.xpath('//ul[@class="chapter"]//li/a/@href')
        option_mz = htmltext.xpath('//ul[@class="chapter"]//li/a/@title')
        ci = 1
        chapter_ids = []
        for link in option_values:

            var = option_mz[ci]
            pd = self.bao_cun(var, self.__shuMing)
            if not pd:

                print("出错啦")
                with open(self.__shuMing, "a", encoding="utf-8") as f:
                    f.write(link + "出错啦,目录写入失败" + "\n\n")
                break
            url = "http://www.meiyee.cc" + link
            neiRong = self.get_nei_rong(url)
            pd = self.bao_cun(neiRong, self.__shuMing)
            if not pd:

                print("出错啦")
                with open(self.__shuMing, "a", encoding="utf-8") as f:
                    f.write(link + "出错啦,写入失败" + "\n\n")
                break
            ci = ci + 1
            print(link)
            print(f"抓取{var}中")


class Tianshuwx:
    __xiaYiZhang = None

    def __init__(self):
        pass

    def __del__(self):
        pass

    def qing_qiu_tou(self, referer):  # 来路
        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.6261.95 Safari/537.36",
            "referer": referer,
        }
        return headers

    # 查找书名
    def get_shu_ku(self, url):
        headers = self.qing_qiu_tou(url)
        res2 = requests.get(url, headers=headers)

        # 获取网页数据并将数据解码为html格式
        # htmltext = etree.HTML(res2.content.decode())
        # htmltext = etree.HTML(res2.content.decode("gbk"))
        # 检测编码
        encoding = chardet.detect(res2.content)["encoding"]
        print(f"检测到编码: {encoding}")  # 调试输出编码类型
        # 解码并解析
        try:
            htmltext = etree.HTML(res2.content.decode(encoding))
        except UnicodeDecodeError:
            # 回退到常见中文编码
            try:
                htmltext = etree.HTML(res2.content.decode("gbk"))
                print("强转编码为gbk")
            except Exception as e:
                print(f"解码失败: {e}")
                exit()

        # 提取章节名字,chapter_name
        chapter_name = htmltext.xpath('//div[@class="word_read"]/h3/text()')
        if chapter_name:
            chapter_name = chapter_name[0].strip()
        else:
            chapter_name = None

        # 提取下一章的链接,next_chapter_link
        next_chapter_link = htmltext.xpath(
            '//div[@class="read_btn"]/a[text()="下一章"]/@href'
        )
        if next_chapter_link:
            next_chapter_link = next_chapter_link[0]
        else:
            next_chapter_link = None
        self.__xiaYiZhang = next_chapter_link

        # 提取所有<script>标签的文本
        script_tags = htmltext.xpath("//script/text()")
        # 正则表达式匹配Base64字符串
        base64_pattern = re.compile(r"qsbs\.bb\('([^']+)'\)")
        # 存储解码后的文本
        decoded_text = []
        for script in script_tags:
            # 查找所有匹配的Base64字符串
            matches = base64_pattern.findall(script)
            for encoded_str in matches:
                try:
                    # 解码Base64（处理可能的填充问题）
                    pad = len(encoded_str) % 4
                    if pad != 0:
                        encoded_str += "=" * (4 - pad)
                    decoded_byte = base64.b64decode(encoded_str)
                    decoded_text.append(decoded_byte.decode("utf-8"))
                except Exception as e:
                    print(f"解码失败: {e}")

        # 拼接所有文本
        full_content = "".join(decoded_text)
        # 去除爬取的正文当中的<p>标签,clean_content=正文
        clean_content = re.sub(r"<\/?p>", "", full_content)
        print(clean_content)

        print("章节名字:", chapter_name)
        print("下一章链接:", next_chapter_link)

    def cs(self):
        print("00")


class Cs:
    __xiaYiZhang = None
    __zhenWen = ""
    __jieShu = True
    __shuMing = "我的末日列车.txt"
    # __url = 'http://www.xhytd.com/131/131552/51808340.html'
    __url = "http://www.xhytd.com/153/153793/59210040.html"

    def __init__(self):
        self.__jieShu = True

    def __del__(self):
        pass

    def bao_cun(self, neiRong, filename):
        try:
            with open(filename, "a", encoding="utf-8") as f:
                f.write(neiRong + "\n\n")  # 添加两个换行作为章节分隔
                return True
        except PermissionError:
            print(f"错误：文件 {filename} 被其他程序占用")
            return False
        except Exception as e:
            print(f"未知错误：{str(e)}")
            return False

    def qing_qiu_tou(self, referer):  # 来路
        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.6261.95 Safari/537.36",
            "referer": referer,
        }
        return headers

    # 查找书名
    def get_shu_ku(self, url01):
        url = url01
        headers = self.qing_qiu_tou(url)
        res2 = requests.get(url, headers=headers)

        # 获取网页数据并将数据解码为html格式
        # htmltext = etree.HTML(res2.content.decode())
        # htmltext = etree.HTML(res2.content.decode("gbk"))
        # 检测编码
        encoding = chardet.detect(res2.content)["encoding"]
        print(f"检测到编码: {encoding}")  # 调试输出编码类型
        # 解码并解析
        try:
            htmltext = etree.HTML(res2.content.decode(encoding))
        except UnicodeDecodeError:
            # 回退到常见中文编码
            try:
                htmltext = etree.HTML(res2.content.decode("gbk"))
                print("强转编码为gbk")
            except Exception as e:
                print(f"解码失败: {e}")
                exit()
        """
        将获取的网页数据,转换为字符串的形式,这里的作用为方便打印输出
        """
        daYin = False
        if daYin:
            html_str = etree.tostring(
                htmltext,  # Element 对象
                encoding="utf-8",  # 指定编码（与原网页一致）
                pretty_print=True,  # 格式化输出（自动缩进）
                method="html",  # 输出为 HTML（默认）
            ).decode(
                "utf-8"
            )  # 将字节流转为字符串
            print(html_str)

        # 获取最大章节数
        maxZhangJie = 0
        try:
            last_chapter_href = htmltext.xpath(
                '//dl/dd[not(contains(@class, "more pc_none"))][last()]/a/@href'
            )[0]
            maxZhangJie = last_chapter_href.split("/")[-1].split(".")[0]
            print(maxZhangJie)
        except IndexError:
            print("未找到章节链接，可能页面结构变化或章节未展开")
        maxZhangJie_int = int(maxZhangJie)
        if maxZhangJie_int > 0:
            jishu = 0
            # https://www.qu04.cc/book/172104/1.html    &&    https://www.qu04.cc/book/172104/
            base_url = "https://www.qu04.cc/book/172104/{}.html"
            # 生成1到20的章节URL
            for chapter in range(1, maxZhangJie_int):
                url = base_url.format(chapter)
                headers = self.qing_qiu_tou(url)
                res2 = requests.get(url, headers=headers)

                # 获取网页数据并将数据解码为html格式
                # htmltext = etree.HTML(res2.content.decode())
                # htmltext = etree.HTML(res2.content.decode("gbk"))
                # 检测编码
                encoding = chardet.detect(res2.content)["encoding"]
                # print(f"检测到编码: {encoding}")  # 调试输出编码类型
                # 解码并解析
                try:
                    htmltext = etree.HTML(res2.content.decode(encoding))
                except UnicodeDecodeError:
                    # 回退到常见中文编码
                    try:
                        htmltext = etree.HTML(res2.content.decode("gbk"))
                        print("强转编码为gbk")
                    except Exception as e:
                        print(f"解码失败: {e}")
                        exit()
                # 获取章节名
                zhangJieMing = htmltext.xpath('//div[@class="content"]/h1/text()')
                print(f"抓取: {zhangJieMing[0]}")
                self.__zhenWen = self.__zhenWen + zhangJieMing[0] + "\n"
                # 提取小说正文
                full_text = htmltext.xpath(
                    '//div[@id="chaptercontent"]/descendant::text()'
                )
                # 过滤收藏行
                # clean_text = '\n'.join(
                #     line for line in full_text
                #     if '请收藏本站' not in line
                # ).strip()

                # 过滤收藏行、点此报错和加入书签的信息
                filter_keywords = ["请收藏本站", "点此报错", "加入书签"]
                clean_text = "\n".join(
                    line
                    for line in full_text
                    if not any(keyword in line for keyword in filter_keywords)
                ).strip()
                self.__zhenWen = self.__zhenWen + clean_text + "\n"

                if jishu > 50:
                    pd = self.bao_cun(self.__zhenWen, self.__shuMing)
                    if not pd:
                        print("出错啦")
                        with open(self.__shuMing, "a", encoding="utf-8") as f:
                            f.write(f"{chapter}出错啦,目录写入失败\n\n")
                    else:
                        self.__zhenWen = ""
                        print("保存50章")
                    jishu = 0

            pd = self.bao_cun(self.__zhenWen, self.__shuMing)
            if not pd:
                print("出错啦")
                with open(self.__shuMing, "a", encoding="utf-8") as f:
                    f.write(f"出错啦,最后写入失败\n\n")
            else:
                print("爬取并保存完成")

        # 提取下一章的链接,next_chapter_link
        daYin = False
        if daYin:
            next_chapter_link = htmltext.xpath(
                '//div[@class="Readpage pc_none"]/a[text()="下一章"]/@href'
            )
            if next_chapter_link:
                next_chapter_link = next_chapter_link[0]
                url = "https://www.qu04.cc/" + next_chapter_link
            else:
                self.__jieShu = False
            print(next_chapter_link)
    def ls(self,shuju,xsmz):

        # 使用 etree.HTML 解析 HTML 字符串
        tree = etree.HTML(shuju)

        # 使用 XPath 查找所有包含小说名称的 <a> 标签
        novel_links = tree.xpath('//span[@class="s2 wid"]/a')

        for link in novel_links:
            # 获取 <a> 标签的文本内容
            text = link.text
            if text and text == xsmz:
                print(f'匹配到 : {xsmz}')
                # 获取当前 <a> 标签所在的 <li> 标签
                li_element = link.getparent().getparent()
                # 在 <li> 标签内查找最新章节的 <a> 标签
                chapter_link = li_element.xpath('.//span[@class="s3 wid3"]/a/@href')
                if chapter_link:
                    zhangJie = "http://www.xhytd.com" + chapter_link[0]
                    print(f'最新章节链接: {chapter_link[0]}')

    def cs(self):
        url = self.__url

        headers = self.qing_qiu_tou(url)
        res2 = requests.get(url, headers=headers)

        # 获取网页数据并将数据解码为html格式
        # htmltext = etree.HTML(res2.content.decode())
        # htmltext = etree.HTML(res2.content.decode("gbk"))
        # 检测编码
        encoding = chardet.detect(res2.content)["encoding"]
        print(f"检测到编码: {encoding}")  # 调试输出编码类型
        # 解码并解析
        try:
            htmltext = etree.HTML(res2.content.decode(encoding))
        except UnicodeDecodeError:
            # 回退到常见中文编码
            try:
                htmltext = etree.HTML(res2.content.decode("gbk"))
                print("强转编码为gbk")
            except Exception as e:
                print(f"解码失败: {e}")
                exit()
        """
        将获取的网页数据,转换为字符串的形式
        """
        daYin = True
        if daYin:
            html_str = etree.tostring(
                htmltext,  # Element 对象
                encoding="utf-8",  # 指定编码（与原网页一致）
                pretty_print=True,  # 格式化输出（自动缩进）
                method="html",  # 输出为 HTML（默认）
            ).decode(
                "utf-8"
            )  # 将字节流转为字符串
            print(html_str)

        # 提取下一章的链接,next_chapter_link
        next_chapter_link = htmltext.xpath(
            '//div[@class="bottem1"]/a[text()="下一章"]/@href'
        )
        if next_chapter_link:
            next_chapter_link = next_chapter_link[0]
            url = "https://www.biqvdu.com/" + next_chapter_link
        else:
            self.__jieShu = False
        print(next_chapter_link)

        # 获取最大章节数
        xuanZe = False
        if xuanZe:
            try:
                last_chapter_href = htmltext.xpath(
                    '//dl/dd[not(contains(@class, "more pc_none"))][last()]/a/@href'
                )[0]
                maxZhangJie = last_chapter_href.split("/")[-1].split(".")[0]
                print(maxZhangJie)
            except IndexError:
                print("未找到章节链接，可能页面结构变化或章节未展开")

        # 获取章节名字
        zhangJieMing = htmltext.xpath('//div[@class="bookname"]/h1/text()')
        print(zhangJieMing[0])

        # 提取小说正文
        full_text = htmltext.xpath('//div[@id="content"]/descendant::text()')
        print(full_text[3])
        print(full_text[85])
        for aa, isi in enumerate(full_text):
            if isi == "最新网址：www.xhytd.com":
                print(aa)

        print("full_text")

        # 提取目标文本
        # 方法 1：仅元素节点
        nodes = htmltext.xpath(
            '//div[@id="content"]/p[contains(text(), "天才一秒记住本站地址")]'
        )[0]
        # 提取下一个兄弟节点的文本内容
        next_p = nodes.getnext()

        # 处理文本内容
        if next_p is not None and next_p.tag == "p":
            # 提取原始文本并处理特殊符号
            content = etree.tostring(next_p, method="text", encoding="unicode")
            content = content.replace("\u00a0", "")  # 去除nbsp
            content = content.replace("\r\n", "\n")  # 统一换行符
            content = content.strip()  # 去除首尾空白
            print(content)
        else:
            print("未找到目标内容")

        print("ok_ok")



    def cs01(self, xiaoShuoMing):
        url = "http://www.xhytd.com/search.html?name=" + xiaoShuoMing
        referer = None
        headers = self.qing_qiu_tou(referer)
        res2 = requests.get(url, headers=headers)

        # 获取网页数据并将数据解码为html格式
        # htmltext = etree.HTML(res2.content.decode())
        # htmltext = etree.HTML(res2.content.decode("gbk"))
        # 检测编码
        encoding = chardet.detect(res2.content)["encoding"]
        print(f"检测到编码: {encoding}")  # 调试输出编码类型
        # 解码并解析
        try:
            htmltext = etree.HTML(res2.content.decode(encoding))
        except UnicodeDecodeError:
            # 回退到常见中文编码
            try:
                htmltext = etree.HTML(res2.content.decode("gbk"))
                print("强转编码为gbk")
            except Exception as e:
                print(f"解码失败: {e}")
                exit()
        """
        将获取的网页数据,转换为字符串的形式
        """
        daYin = True
        html_str = ""

        if daYin:

            html_str = etree.tostring(
                htmltext,  # Element 对象
                encoding="utf-8",  # 指定编码（与原网页一致）
                pretty_print=True,  # 格式化输出（自动缩进）
                method="html",  # 输出为 HTML（默认）
            ).decode(
                "utf-8"
            )  # 将字节流转为字符串
            print(html_str)
        return html_str



if __name__ == "__main__":
    print("ok")
    mz = "我的末日列车"
    ks = Cs()
    ss = ks.cs01(mz)
    ks.ls(ss,mz)
