import aiohttp
import asyncio
import aiofiles
from lxml import etree
import json


async def main():
    url_temp = "http://bbs.itheima.com/forum-425-{}.html"
    url_list = [url_temp.format(i) for i in range(1, 71)]
    tasks = []  # 任务列表
    for url in url_list:
        tasks.append(asyncio.create_task(parse_html(url)))
    await asyncio.wait(tasks)


async def parse_html(url):
    print('正在请求：', url)
    headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; "
                             "Windows NT 6.1; Trident / 5.0;"}
    async with aiohttp.ClientSession()as session:
        async with session.get(url, headers=headers) as resp:
            html_str = await resp.text(errors='ignore')
    content_list = []
    html = etree.HTML(html_str)
    node_list = html.xpath('//th[contains(@class,"new") and '
                           'contains(@class,"forumtit")]')
    for node in node_list:
        title_num = 0
        item = dict()
        item['文章标题'] = node.xpath('./a[1]/text()')[0]
        item['文章链接'] = node.xpath('./a[1]/@href')[0]
        item['文章作者'] = node.xpath(
            './div[@class="foruminfo"]//a/span/text()')[0]
        # 发布时间（具体日期）
        release_time = node.xpath(
            './div[2]/i/span[1]/text()')[0].strip().replace('@', '')
        # 发布时间（某天前）
        one_page = node.xpath('//div[2]/i/span[1]/span/@title')
        if one_page:
            if title_num < len(one_page):
                release_time = node.xpath(
                    '//div[2]/i/span[1]/span/@title')[title_num]
        item['发布时间'] = release_time
        content_list.append(item)
    async with aiofiles.open("coroutine-heima.json", "a+",
                             encoding='utf-8') as fp:
        await fp.write(json.dumps(content_list, ensure_ascii=False,
                                  indent=2))
        print("数据写入成功")


if __name__ == '__main__':
    loop = asyncio.get_event_loop()  # 创建事件循环
    loop.run_until_complete(main())
