import re
import requests
import asyncio
import aiohttp
import aiofiles
from lxml import etree

'''
https://www.qbiqu.com/79_79354/ ===> 章节url
'''

async def aiodownload(url,lack_url,title):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36"
    }
    chapter_complete_url = url + lack_url
    async with aiohttp.ClientSession() as session:
        async with session.get(chapter_complete_url,headers=headers) as resp:
            text = await resp.text()
            selector = etree.HTML(text)
            content = selector.xpath('//*[@id="content"]/text()')
            novel_content = ''.join(content)
            novel_content_s = novel_content.replace(u'\xa0', '').replace(' ', '').replace('\n', '')
            print(f'{title}--->正在下载...')
            async with aiofiles.open(r'D:\ui\reptile\coroutines\data\%s'%title,'w',encoding='utf-8') as f:
                await f.write(title +'\n\n' + novel_content_s)

async def get_chapter_name(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36"
    }
    res = requests.get(url,headers=headers)
    res.encoding = 'gbk'
    page_content = res.text

    obj = re.compile(r'<dd><a href="(?P<chapter_lack_url>.*?)">(?P<chapter_name>.*?)</a></dd>', re.S)
    result = obj.finditer(page_content)
    tasks = []
    for novel_chapter_name in result:
        title = novel_chapter_name.group('chapter_name')
        lack_url_s = novel_chapter_name.group('chapter_lack_url')
        lack_url = lack_url_s.split('/')[2]
        # 准备异步任务
        tasks.append(asyncio.create_task(aiodownload(url,lack_url,title)))
    await asyncio.wait(tasks)


if __name__ == '__main__':
    url = 'https://www.qbiqu.com/79_79354/'
    loop = asyncio.get_event_loop()
    loop.run_until_complete(get_chapter_name(url))


'''
https://www.qbiqu.com/79_79354/
https://www.qbiqu.com/7_7355/

'''