'''
扒光一部小说
'''

import requests
from lxml import etree
import asyncio
import aiohttp
import aiofiles

def get_every_chapter_url(url):
    headers = {
        "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
    }
    resp = requests.get(url, headers=headers) # 请求遇到443的时候可以加它尝试 verify=False
    resp.encoding = 'gbk'
    tree = etree.HTML(resp.text)
    href_list = tree.xpath("//ul[@id='section-list']/li/a/@href")
    # 'https://www.zanghaihua.org/book/41120/3538328.html'
    # 3538328.html
    return href_list

async def download_one(url):
    async with aiohttp.ClientSession() as session:
        async with session.get(url) as resp:
            page_source = await resp.text()
            tree = etree.HTML(page_source)
            # 标题（第几章）
            title = tree.xpath("//div[@class='reader-main']/h1/text()")[0].strip()
            # 内容
            content = ''.join(tree.xpath("//div[@class='reader-main']/div[@class='content']/text()")).replace('\xa0','')
            # 写入文件
            async with aiofiles.open(rf'D:\ui\reptile_next\novel_content\飞灰\{title}.txt','w',encoding='utf-8') as f:
                await f.write(content)
    print('下载完毕！',url)

async def download(href_list):
    tasks = []
    for href in href_list:
        complete_href = 'https://www.zanghaihua.org/book/41120/' + href
        t = asyncio.create_task(download_one(complete_href))
        tasks.append(t)
    await asyncio.wait(tasks)

def main():
    # 主页面url
    url = 'https://www.zanghaihua.org/book/41120/'
    # 1. 拿到页面当中每一个章节的 url
    href_list = get_every_chapter_url(url)
    # 2. 启动协程，开始一节一节的下载
    event_loop = asyncio.get_event_loop()  # 拿到事件循环
    event_loop.run_until_complete(download(href_list))  # event_loop 执行协程对象，直到该对象内的内容执行完毕为止
    # asyncio.run(download(href_list))

if __name__ == '__main__':
    main()

