# -*- coding: utf-8 -*-
# @Time    : 2025/4/4 21:26
# @Author  : cx
# @File    : xhytd.py
# @Software: PyCharm
# http://www.xhytd.com/search.html?name=%E6%9C%AB%E6%97%A5%3A%E6%88%91%E6%89%93%E9%80%A0%E6%97%A0%E9%99%90%E5%88%97%E8%BD%A6  我的末日列车
# http://www.xhytd.com/ 小说爬虫

# 爬虫数据抓取
from lxml import etree  # 解析HTML代码
import requests
#字符编码处理
import chardet
# 正则表达式
import re
#解码
import base64

class XHYTD:
    __xiaYiZhang = None
    __zhenWen = ""
    __zhangJie = ""
    __jieShu = None
    __shuMing = ""
    #__url = 'http://www.xhytd.com/131/131552/51808340.html'
    __url = ''
    #__url = 'http://www.xhytd.com/153/153793/60257244.html'

    def __init__(self):
        """
        构造函数
        """
        self.__jieShu = False
    def __del__(self):
        """
        析构函数
        :return:
        """
        pass


    def setCanShu(self,url,shuMing):
        """
        参数设置
        :param url: 小说的第一章网址,必须是"www.xhytd.com"小说站的小说
        :param shuMing: 小说名
        :return: 如果网址正确返回 True
        """
        if "www.xhytd.com" in url:
            self.__url = url
            self.__shuMing = "xs/" + shuMing + ".text"
            self.__jieShu = True
            return True
        else:
            return False

    def bao_cun(self,neiRong, filename):
        """
        将数据追加保存到硬盘
        :param neiRong: 要追加保存的数据内容
        :param filename: 要保存的目标文件,没有这个文件则创建此文件,文件必须是带扩展名的完整文件名
        :return:
        """
        try:
            #  with 语句来管理文件的上下文,open() 函数打开一个文件
            with open(filename, 'a', encoding='utf-8') as f:
                f.write(neiRong + '\n\n')  # 添加两个换行作为章节分隔
                return True
        except PermissionError:
            print(f"错误：文件 {filename} 被其他程序占用")
            return False
        except Exception as e:
            print(f"未知错误：{str(e)}")
            return False

    def qing_qiu_tou(self, referer): # 来路
        """
        组合请求头
        :param referer: 来路,从什么页面跳转
        :return: 请求头
        """
        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.6261.95 Safari/537.36",
            "referer": referer
        }
        return headers

    def cha_zhao(self,xiaoShuoMing):
        url = "http://www.xhytd.com/search.html?name=" + xiaoShuoMing
        referer = None
        headers = self.qing_qiu_tou(referer)
        res2 = requests.get(url, headers=headers)

        # 获取网页数据并将数据解码为html格式
        # htmltext = etree.HTML(res2.content.decode())
        # htmltext = etree.HTML(res2.content.decode("gbk"))
        # 检测编码
        encoding = chardet.detect(res2.content)["encoding"]
        print(f"检测到编码: {encoding}")  # 调试输出编码类型
        # 解码并解析
        try:
            htmltext = etree.HTML(res2.content.decode(encoding))
        except UnicodeDecodeError:
            # 回退到常见中文编码
            try:
                htmltext = etree.HTML(res2.content.decode("gbk"))
                print("强转编码为gbk")
            except Exception as e:
                print(f"解码失败: {e}")
                exit()
        """
        将获取的网页数据,转换为字符串的形式
        """
        daYin = False
        html_str = ""

        if daYin:
            html_str = etree.tostring(
                htmltext,  # Element 对象
                encoding="utf-8",  # 指定编码（与原网页一致）
                pretty_print=True,  # 格式化输出（自动缩进）
                method="html",  # 输出为 HTML（默认）
            ).decode(
                "utf-8"
            )  # 将字节流转为字符串
            print(html_str)

        # 使用 etree.HTML 解析 HTML 字符串
        #tree = etree.HTML(shuju)

        # 使用 XPath 查找所有包含小说名称的 <a> 标签
        novel_links = htmltext.xpath('//span[@class="s2 wid"]/a')

        for link in novel_links:
            # 获取 <a> 标签的文本内容
            text = link.text
            if text and text == xiaoShuoMing:
                print(f'匹配到 : {xiaoShuoMing}')
                # 获取当前 <a> 标签所在的 <li> 标签
                li_element = link.getparent().getparent()
                # 在 <li> 标签内查找最新章节的 <a> 标签
                chapter_link = li_element.xpath('.//span[@class="s3 wid3"]/a/@href')
                if chapter_link:
                    zhangJie = "http://www.xhytd.com" + chapter_link[0]
                    print(f'最新章节链接: http://www.xhytd.com{chapter_link[0]}')

    def nei_rong(self):
        """
        爬取小说内容
        :return:
        """
        url = self.__url
        referer = None
        gg =False
        if gg:
            headers = self.qing_qiu_tou(url)
            res2 = requests.get(url, headers=headers)

            # 获取网页数据并将数据解码为html格式
            # htmltext = etree.HTML(res2.content.decode())
            # htmltext = etree.HTML(res2.content.decode("gbk"))
            # 检测编码
            encoding = chardet.detect(res2.content)['encoding']
            print(f"检测到编码: {encoding}")  # 调试输出编码类型
            # 解码并解析
            try:
                htmltext = etree.HTML(res2.content.decode(encoding))
            except UnicodeDecodeError:
                # 回退到常见中文编码
                try:
                    htmltext = etree.HTML(res2.content.decode("gbk"))
                    print('强转编码为gbk')
                except Exception as e:
                    print(f"解码失败: {e}")
                    exit()
            """
            将获取的网页数据,转换为字符串的形式
            """
            daYin = False
            if daYin:
                html_str = etree.tostring(
                    htmltext,  # Element 对象
                    encoding="utf-8",  # 指定编码（与原网页一致）
                    pretty_print=True,  # 格式化输出（自动缩进）
                    method="html"  # 输出为 HTML（默认）
                ).decode("utf-8")  # 将字节流转为字符串
                print(html_str)

            # 提取下一章的链接,next_chapter_link
            next_chapter_link = htmltext.xpath('//div[@class="bottem1"]/a[text()="下一章"]/@href')
            if next_chapter_link:
                next_chapter_link = next_chapter_link[0]
                url = "https://www.biqvdu.com/" + next_chapter_link
            else:
                self.__jieShu = False
            print(next_chapter_link)
            if "html" in next_chapter_link:
                self.__jieShu = True
            else:
                self.__jieShu = False
        # 在这里清空 __shuMing对应的txt,重新爬取
        """
        """
        ci = 0
        while self.__jieShu:
            headers = self.qing_qiu_tou(referer)
            res2 = requests.get(url, headers=headers)
            # 设定请求来源 referer
            referer = url
            # 检测编码
            encoding = chardet.detect(res2.content)['encoding']
            print(f"检测到编码: {encoding}")  # 调试输出编码类型
            # 解码并解析
            try:
                htmltext = etree.HTML(res2.content.decode(encoding))
            except UnicodeDecodeError:
                # 回退到常见中文编码
                try:
                    htmltext = etree.HTML(res2.content.decode("gbk"))
                    print('强转编码为gbk')
                except Exception as e:
                    print(f"解码失败: {e}")
                    exit()

            # 提取下一章的链接,next_chapter_link
            next_chapter_link = htmltext.xpath('//div[@class="bottem1"]/a[text()="下一章"]/@href')
            if next_chapter_link:
                next_chapter_link = next_chapter_link[0]
                url = "http://www.xhytd.com" + next_chapter_link
            else:
                self.__jieShu = False
            print(next_chapter_link)
            if "html" in next_chapter_link:
                self.__jieShu = True
            else:
                self.__jieShu = False
                # 获取章节名字
            zhangJieMing = htmltext.xpath('//div[@class="bookname"]/h1/text()')
            if zhangJieMing:
                print(zhangJieMing[0])
                self.__zhangJie = self.__zhangJie + zhangJieMing[0] + "\r\n"
            else:
                print("章节名错误")
            # 提取小说正文
            # descendant::：这是一个轴（axis），它代表选取当前节点的所有后代节点，也就是子节点、孙节点等所有层级的子节点
            # text()：这是一个节点测试，用于选取文本节点。
            # descendant::text() 的作用是选取当前节点的所有后代文本节点。
            full_text = htmltext.xpath('//div[@id="content"]/descendant::text()')
            #print(full_text[3])
            #print(full_text[29])
            xuh = 3
            for aa, isi in enumerate(full_text):
                if isi == "最新网址：www.xhytd.com":
                    print(aa)
                    break
                self.__zhangJie = self.__zhangJie + full_text[xuh] + "\n"
                xuh = xuh + 1
            self.__zhenWen = self.__zhenWen + self.__zhangJie
            self.__zhangJie=""
            if ci == 50:
                pd = self.bao_cun(self.__zhenWen, self.__shuMing)
                if not pd:
                    print("出错啦")
                    with open(self.__shuMing, 'a', encoding='utf-8') as f:
                        f.write(f"{self.__shuMing}出错啦,目录写入失败\n\n")
                else:
                    self.__zhenWen = ""
                    ci = 0
                    print('保存50章')

            ci = ci + 1
        pd = self.bao_cun(self.__zhenWen, self.__shuMing)
        if not pd:
            print("出错啦")
            with open(self.__shuMing, 'a', encoding='utf-8') as f:
                f.write(f"{self.__shuMing}最后50章出错啦,目录写入失败\n\n")
        else:
            print("全部保存成功")
