import asyncio
import time
from functools import partial

import aiohttp
import arequests
from lxml import etree

start = time.time()

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61"
                  " Safari/537.36"
}
urls = ['http://www.chamang.cn/article/1/']
url = 'http://www.chamang.cn/article/'
base_url = 'http://www.chamang.cn/article/%d/'



async def get_detail_page(req_url):
    # async定义一个协程函数
    async with aiohttp.ClientSession() as session:
        async with await session.get(req_url, headers=headers) as response:
            if response.status == 200:
                html = await response.text()
                # 解析内容
                selector = etree.HTML(html)
                title_path = "//h1[@class='art-title']/text()"
                category_path = "//div[@class='art-meta']/span[@class='item'][2]/a/text()"
                content_path = "//div[@class='art-conts']/div[2]"
                title = selector.xpath(title_path)[0]
                category = selector.xpath(category_path)[0]
                # 使用xpath("string(.)")是使用text()获取不到值的解决方案
                # strip() 函数去除空格
                content = selector.xpath(content_path)[0].xpath("string(.)").strip()
                print(title, category, content)


# task 在事件循环中添加多个任务
# 创建task
# asyncio.create_task(协程对象)
# task = asyncio.ensure_future(协程对象)

def get_list_page(req_url):
    response = arequests.get(req_url, headers=headers)
    print("请求url:%s,响应码是：%d" % (req_url, response.status_code))
    if response.status_code == 200:
        # 得到列表页的结果
        html = response.text
        selector = etree.HTML(html)
        href_path = "//div[@class='list']/ul/li[@class='img']/a/@href"
        next_page_path = "//div[@id='pages']/a[@class='nextpage']/@href"
        # 文章详情页url
        href_list = selector.xpath(href_path)
        tasks = []
        for href in href_list:
            # 列表页里详情页的内容，使用协程来爬取
            tasks.append(get_detail_page(href))
            # break
        # 启动协程
        asyncio.run(asyncio.wait(tasks))

        # 下一页的url
        next_page_url = selector.xpath(next_page_path)[0]
        # 递归得到下一页的内容
        get_list_page(next_page_url)

get_list_page(url)


end = time.time()

print("总耗时：", end - start)
