import os.path
import re

import requests
from lxml import etree
import asyncio
import aiohttp
import aiofiles

headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/114.0'}


# 异步协程获取小说文章内容
async def down_one(url, dir_path):
    while True:
        try:
            async with aiohttp.ClientSession() as session:
                async with session.get(url) as resp:
                    html = await resp.text()
                    et = etree.HTML(html)
                    title = et.xpath('//meta[@name="keywords"]/@content')[0].strip()
                    content = ''.join(et.xpath('//div[@id="content"]/text()')).strip()

                    # 把小说内容生成到本地
                    async with aiofiles.open(f'{dir_path}/{title}.txt', mode='w', encoding='utf-8') as f:
                        await f.write(content)
                    await asyncio.sleep(2)
        except:
            print('报错了 {}'.format(url))


    print('下载完成 {} '.format(url))


# 开始批量任务
async def download(ret_list):
    dir_path = ret_list['book_name']
    if not os.path.exists(dir_path):
        os.mkdir(dir_path)

    tasks = []
    for url in ret_list['urls']:
        t = asyncio.create_task(down_one(url, dir_path))
        tasks.append(t)
    await asyncio.wait(tasks)


def get_content_urls(url):
    urls = []
    book_name = ''

    response = requests.get(url, headers=headers)
    response.encoding = 'gbk'
    html = response.text
    et = etree.HTML(html)
    book_name = et.xpath('//meta[@property="og:novel:book_name"]/@content')[0]
    result = et.xpath('//ul[@id="section-list"]/li/a/@href')

    if result:
        for r in result:
            urls.append(url + r)

    return {
            'book_name': book_name,
            'urls': urls
    }


def main():
    # 1.拿到页面当中每个章节url
    # 2.启动协程。开始下载

    # 小说目录网址
    dir_url = 'https://www.zanghaihua.org/book/40746/'
    ret_list = get_content_urls(dir_url)

    asyncio.run(download(ret_list))


if __name__ == '__main__':
    main()