# -*- coding: utf-8 -*-,
# @File : 4. 多任务异步爬虫.py,
# @Author : lidong,
# @IDEA: PyCharm
# @Time :  2022/5/11 16:24
import aiohttp
import asyncio
import time
from lxml import etree
urls = [
    'http://127.0.0.1:5000/bobo',
    'http://127.0.0.1:5000/jay',
    'http://127.0.0.1:5000/tom'
]

async def get_request(url):
    # 实例化好了一个请求对象
    async with aiohttp.ClientSession() as session:
        # 发起请求，返回一个响应对象
        async with await session.get(url) as response:
            # 获取了字符串形式的响应数据
            # text()获取字符串响应流
            # read()获取byte类型的响应流
            page_text = await response.text()
    return page_text

def get_result(t):
    page_text = t.result()
    tree = etree.HTML(page_text)
    h1 = tree.xpath('/html/body/h1/text()')
    print(h1)


if __name__ == '__main__':
    start = time.time()
    tasks = []
    for i in urls:
        c = get_request(i)
        task = asyncio.ensure_future(c)
        task.add_done_callback(get_result)
        tasks.append(task)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(asyncio.wait(tasks))
    print('总耗时； ', time.time()-start)