import asyncio
import time
from functools import partial

import aiohttp
from lxml import etree

start = time.time()

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61"
                  " Safari/537.36"
}
urls = ['http://www.chamang.cn/article/1/']
url = 'http://www.chamang.cn/article/'
base_url = 'http://www.chamang.cn/article/%d/'


async def get_list_page(req_url):
    # async定义一个协程函数
    async with aiohttp.ClientSession() as session:
        async with await session.get(req_url, headers=headers) as response:
            # await+可等待的对象（协程对象、Future、Task对象） 是io等待
            print("请求url:%s,响应码是：%d" % (req_url, response.status))
            if response.status == 200:
                html = await response.text()
                selector = etree.HTML(html)
                href_path = "//div[@class='list']/ul/li[@class='img']/a/@href"
                next_page_path = "//div[@id='pages']/a[@class='nextpage']/@href"
                # 文章详情页url
                href_list = selector.xpath(href_path)
                # 下一页的url
                next_page_url = selector.xpath(next_page_path)[0]
                # print(href_list, next_page_url)
                # await get_list_page(next_page_url)
                return [href_list, next_page_url]


async def func():
    res = await get_list_page(url)
    # 得到每个详情页url
    print(res[0])
    # 得到下一页列表页的url
    print(res[1])


asyncio.run(func())

end = time.time()

print("总耗时：", end - start)
