"""
爬虫思路：
（一）数据来源分析
    1、爬什么   章节名称、章节url
    2、去哪儿爬  URL = ‘https://book.qidian.com/info/1033951330/’
        判断静态还是动态？ 源代码搜索
        静态的html ==> 文档搜索项

（二）爬虫代码实现
    1、发生请求
    2、获取数据
    3、解析数据
    4、保存数据
"""
import requests
from lxml import etree
import time
import 起点中文小说爬取 as my_func


def get_html(url, headers):
    try:
        resp = requests.get(url=url, headers=headers)
        resp.encoding = 'utf-8'
        return resp.text
    except:
        return ''


def parse_html(html):
    try:
        # 返回结果
        result = []
        # 第一步：将html映射成标签树（对象）
        tree = etree.HTML(html)
        # 第二步：tree的XPath方法去解析数据
        chapter_name = tree.xpath('//div[@class="volume"]/ul/li/h2/a/text()')  # 获取文本   text()
        chapter_url = tree.xpath('//div[@class="volume"]/ul/li/h2/a/@href')   # 获取属性   @属性名

        for name, url in zip(chapter_name, chapter_url):  # 将多个可迭代对象，压成一个可迭代对象
            chapter = {
                'name': name,
                'url': 'https:' + url,
            }
            result.append(chapter)
        """
                xpath()方法
                功能：用XPath语法来解析数据
                参数：XPath语法（字符串）
                返回值：带有结果的列表
            """
        return result
    except:
        return []


# 调用另一个我写的Py文件“起点中文小说爬取”
def crawler_noval(chapters):
    for chapter in chapters:
        my_func.main(chapter)   # [{}, {}, {} ....] ==> chapters # {}  ==> chapter
        time.sleep(2)


def main(book_url):
    base_url = book_url + '#Catalog'
    print(base_url)
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.48',
    }
    html = get_html(base_url, headers)
    result = parse_html(html)

    crawler_noval(result)



if __name__ == '__main__':
    book_url = 'https://book.qidian.com/info/2750457/#Catalog'
    main(book_url)
