import asyncio
from crawlee.crawlers import BeautifulSoupCrawler, BeautifulSoupCrawlingContext
from crawlee import Request


async def main() -> None:
    # 初始化爬虫
    crawler = BeautifulSoupCrawler(max_requests_per_crawl=5)

    @crawler.router.default_handler
    async def request_handler(context: BeautifulSoupCrawlingContext) -> None:
        print(f'处理: {context.request.url}')

        data = {
            'url': context.request.url,
            'title': context.soup.title.string if context.soup.title else '无标题',
        }

        await context.push_data(data)
        await context.enqueue_links()

    # 创建带有自定义headers的Request对象
    custom_headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    }

    # 使用Request对象而不是URL字符串
    request = Request.from_url(
        url='https://www.hao123.com',
        headers=custom_headers
    )

    await crawler.run([request])

if __name__ == '__main__':
    asyncio.run(main())
