# coding:utf-8

"""
测试基于协程的异步爬虫任务
以期望可以用于【迁移平台】批量“启动”任务
"""
import requests
import time
from lxml import etree
import aiohttp
import asyncio

urls = [
    'https://aaai.org/ocs/index.php/AAAI/AAAI18/paper/viewPaper/16488',
    'https://aaai.org/ocs/index.php/AAAI/AAAI18/paper/viewPaper/16583',
    # 省略后面8个url...
]
titles = []
sem = asyncio.Semaphore(10)  # 信号量，控制协程数，防止爬的过快
'''
提交请求获取AAAI网页,并解析HTML获取title
'''

"""
def get_title(url,cnt):
    response = requests.get(url)  # 提交请求,获取响应内容
    html = response.content       # 获取网页内容(content返回的是bytes型数据,text()获取的是Unicode型数据)
    title = etree.HTML(html).xpath('//*[@id="title"]/text()') # 由xpath解析HTML
    print('第%d个title:%s' % (cnt,''.join(title)))
"""


async def get_title(url):
    with(await sem):
        # async with是异步上下文管理器
        async with aiohttp.ClientSession() as session:  # 获取session
            async with session.request('GET', url) as resp:  # 提出请求
                # html_unicode = await resp.text()
                # html = bytes(bytearray(html_unicode, encoding='utf-8'))
                html = await resp.read()  # 可直接获取bytes
                title = etree.HTML(html).xpath('//*[@id="title"]/text()')
                print(''.join(title))
    # response = requests.get(url)  # 提交请求,获取响应内容
    # html = response.content  # 获取网页内容(content返回的是bytes型数据,text()获取的是Unicode型数据)
    # title = etree.HTML(html).xpath('//*[@id="title"]/text()')  # 由xpath解析HTML
    # print('第个title:%s' % (''.join(title)))


'''
调用方
'''


def main():
    loop = asyncio.get_event_loop()  # 获取事件循环
    tasks = [get_title(url) for url in urls]  # 把所有任务放到一个列表中
    loop.run_until_complete(asyncio.wait(tasks))  # 激活协程
    loop.close()  # 关闭事件循环


if __name__ == '__main__':
    start = time.time()
    main()  # 调用方
    print('总耗时：%.5f秒' % float(time.time() - start))


# import time
# from lxml import etree
# import requests
#
# urls = [
#     'https://aaai.org/ocs/index.php/AAAI/AAAI18/paper/viewPaper/16488',
#     'https://aaai.org/ocs/index.php/AAAI/AAAI18/paper/viewPaper/16583',
#     # 省略后面8个url...
# ]
# '''
# 提交请求获取AAAI网页,并解析HTML获取title
# '''
#
#
# def get_title(url, cnt):
#     response = requests.get(url)  # 提交请求,获取响应内容
#     html = response.content  # 获取网页内容(content返回的是bytes型数据,text()获取的是Unicode型数据)
#     title = etree.HTML(html).xpath('//*[@id="title"]/text()')  # 由xpath解析HTML
#     print('第%d个title:%s' % (cnt, ''.join(title)))
#
#
# if __name__ == '__main__':
#     start1 = time.time()
#     i = 0
#     for url in urls:
#         i = i + 1
#         start = time.time()
#         get_title(url, i)
#         print('第%d个title爬取耗时:%.5f秒' % (i, float(time.time() - start)))
#     print('爬取总耗时:%.5f秒' % float(time.time() - start1))
