#!/usr/bin/env python
# -*- coding:utf-8 -*-

"""
@author zyx
@since 2022/2/13 18:12
@file: c07_多任务异步爬虫案例.py
"""

import requests
import asyncio
import time
from lxml import etree
import aiohttp

start = time.time()
urls = [
    'http://127.0.0.1:5000/bobo',
    'http://127.0.0.1:5000/jay',
    'http://127.0.0.1:5000/tom'
]


# 该任务是用来对指定url发起请求，获取响应数据
async def get_request(url):
    # requests是不支持异步的模块
    # response = await requests.get(url=url)
    # page_text = response.text
    # 创建请求对象（sess）
    async with aiohttp.ClientSession() as sess:
        # 基于请求对象发起请求
        # 此处的get是发起get请求，常用参数：url,headers,params,proxy
        # post方法发起post请求，常用参数：url,headers,data,proxy
        # 发现处理代理的参数和requests不一样（注意），此处处理代理使用proxy='http://ip:port'
        async with await sess.get(url=url) as response:
            page_text = await response.text()
            # text():获取字符串形式的响应数据
            # read()：获取二进制形式的响应数据
            return page_text


def parse(t):  # 回调函数专门用于数据解析
    # 获取任务对象请求到的页面源码数据
    page_text = t.result()
    tree = etree.HTML(page_text)
    a = tree.xpath('//a[@id="feng"]/@href')[0]
    print(a)


tasks = []
for url in urls:
    c = get_request(url)
    task = asyncio.ensure_future(c)
    task.add_done_callback(parse)
    tasks.append(task)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))

print('总耗时:', time.time() - start)
