# 【一】导入模块
import os.path

import requests
from lxml import etree
from fake_useragent import UserAgent


# 【二】写爬虫程序
class SpiderBiQuGe(object):
    def __init__(self):
        self.headers = {
            "User-Agent": UserAgent().random
        }
        self.novel_name = ""
        self.area = "https://www.bqgie.cc"
        # 根目录
        self.BASE_DIR = os.path.dirname(__file__)
        # 源码文件目录 : 存储爬取到的每一页的源码数据
        self.SOURCE_DIR = os.path.join(self.BASE_DIR, "novel", "source")
        # 文档目录 ： 存储解析到的小说的内容
        self.NOVEL_DIR = os.path.join(self.BASE_DIR, "novel", "novel")

    # 负责写入和读取本地文件数据的
    def handler(self, file_name, novel_name, data="", mode="a", encoding="utf8", tag="source"):
        if tag == "source":
            file_dir = os.path.join(self.SOURCE_DIR, novel_name)
            file_last = "html"
        elif tag == "novel":
            file_dir = os.path.join(self.NOVEL_DIR, novel_name)
            file_last = "text"
        else:
            file_dir = self.SOURCE_DIR
            file_last = ""
        os.makedirs(file_dir, exist_ok=True)
        file_path = os.path.join(file_dir, f"{file_name}.{file_last}")
        if not os.path.exists(file_path):
            with open(file=file_path, mode=mode, encoding=encoding) as fp:
                if mode == "a" or mode == "w":
                    fp.write(data)
            print(f"当前文件 : ", f"{file_name}.{file_last}", "保存完成!")
        else:
            with open(file=file_path, mode=mode, encoding=encoding) as fp:
                if mode == "a" or mode == "w":
                    fp.close()
                    print(f"当前文件 : ", f"{file_name}.{file_last}", "已存在!")
                else:
                    data = fp.read()
            return data

    def create_tree(self, target_url):
        # 发起请求
        response = requests.get(url=target_url, headers=self.headers)
        # 获取响应源码
        page_text = response.text
        return page_text, etree.HTML(page_text)

    # 抓取小说主页
    def spider_index(self):
        # 定义目标地址
        target_url = f"{self.area}/book/32286/"
        page_text, tree = self.create_tree(target_url=target_url)
        novel_name = tree.xpath('//div[@class="info"]/h1/text()')[0].strip()
        novel_info = tree.xpath('//div[@class="info"]/div[2]/span')
        author_name = novel_info[0].xpath("./text()")[0]
        is_close = novel_info[1].xpath("./text()")[0]
        update_time = novel_info[2].xpath("./text()")[0]
        last_update = f'最新：{novel_info[-1].xpath("./a/text()")[0]}({self.area + novel_info[-1].xpath("./a/@href")[0]})'
        data = f'''
                {novel_name}
        {author_name}
        {is_close}
        {update_time}
        {last_update}
        '''
        self.novel_name = novel_name
        self.handler(
            file_name="index",
            novel_name=novel_name,
            data=page_text
        )
        self.handler(
            file_name=novel_name,
            novel_name=novel_name,
            data=data,
            tag="novel"
        )

    def spider_capter(self):
        self.novel_name = "斗破苍穹"
        # 获取到首页的源码
        index_page_text = self.handler(file_name="index", novel_name=self.novel_name, mode="r")
        tree = etree.HTML(index_page_text)

        # /html/body/div[6]/dl/dd
        # /html/body/div[5]/dl
        dd_list_hide = tree.xpath("/html/body/div[5]/dl/span/dd")
        dd_list = tree.xpath("/html/body/div[5]/dl/dd")
        dd_list.extend(dd_list_hide)
        detail_url_list = []
        for dd in dd_list:
            detail_url = self.area + dd.xpath("./a/@href")[0]
            detail_title =  dd.xpath("./a/text()")[0]
            if "dd_show" in detail_url:
                pass
            else:
                page_text, tree = self.create_tree(target_url=detail_url)
                self.handler(
                    file_name=detail_title,
                    novel_name=self.novel_name,
                    data=page_text
                )

    def main(self):
        # self.spider_index()
        self.spider_capter()


if __name__ == '__main__':
    spider = SpiderBiQuGe()
    spider.main()
