import aiohttp
import asyncio
from eroica import url_add
from eroica import asy_get
from eroica import linkMongo
import time

mycol = linkMongo('test')
def insert_data(data):
    mycol.insert_aone(data)

# 主逻辑
async def main():
    left_time = time.time()
    url = 'https://ssr1.scrape.center/'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
    }
    xpath_url = '//*[@id="index"]/div[1]//*/div/div[1]/a/@href'

    # 正确使用 async with 创建 session
    async with aiohttp.ClientSession() as session:
        # 获取所有子页面 URL（注意这里要 await）
        sub_paths = await asy_get(session, url, headers, xpath_url)
        sub_urls = url_add(url, sub_paths)
        print(sub_urls)

        # 定义 XPath 表达式
        xpath_data = {
            'title': '//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/a/h2/text()',
            'author': '//*[@id="detail"]/div[1]/div/div/div[1]/div/div[3]/p[1]/text()',
            'content': '//*[@id="detail"]/div[2]/div/div/div/div/div/p/text()'
        }

        # 并发请求每个子页面内容
        tasks = [
            asy_get(session, sub_url, headers, xpath_data)
            for sub_url in sub_urls
        ]
        results = await asyncio.gather(*tasks)
        for data in results:
            print(data)
    rignt_time = time.time()
    print('耗时：', rignt_time - left_time)


# 启动异步主程序
asyncio.run(main())
