import requests
import asyncio
import aiohttp
import aiofiles
from bs4 import BeautifulSoup
import os

# 给请求指定一个请求头来模拟chrome浏览器
global headers
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60'}
# 全职艺术家地址
book = 'https://www.shuquge.com/txt/142974/'
# 定义存储位置
global save_path
save_path = 'D:/全职艺术家'
if os.path.exists(save_path) is False:
    os.makedirs(save_path)

async def aiodownload(chapter, title):
    async with aiohttp.ClientSession() as session:
        async with session.get(chapter) as resp:
            html_doc = await resp.text(encoding='utf-8')
            bf = BeautifulSoup(html_doc, 'html.parser')
            texts = bf.find_all('div', id="content")
            if len(texts):
                content = texts[0].text.replace('\xa0' * 4, '\n')
                async with aiofiles.open(title, mode="w", encoding="utf-8") as f:
                    await f.write(content)  # 把小说内容写出
            else:
                print(title)


async def getCatalog(book):
    res = requests.get(book, headers=headers)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    # # 获取所有的章节
    a = soup.find('div', class_='listmain').find_all('a')[12:]
    print('总章节数: %d ' % len(a))
    tasks = []
    for each in a:
        chapter = book + each.get('href')
        title = each.text
        tasks.append(aiodownload(chapter, save_path + "/" +title + ".txt"))
    await asyncio.wait(tasks)

if __name__ == '__main__':
    loop = asyncio.get_event_loop()
    loop.run_until_complete(getCatalog(book))
    print('end')




