#!/usr/bin/env python
"""
开发环境爬虫触发脚本
直接调用爬虫任务，绕过认证
"""
import asyncio
import sys
import os
from datetime import datetime

# 添加爬虫源码路径
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))

from src.spiders.weibo_hot_spider import WeiboHotSpider
from src.spiders.zhihu_hot_spider import ZhihuHotSpider
from src.spiders.toutiao_hot_spider import ToutiaoHotSpider


async def trigger_crawlers():
    """触发所有爬虫"""
    print(f"[{datetime.now()}] Starting crawlers...")
    
    # 爬虫配置
    spider_configs = [
        (WeiboHotSpider(), "https://s.weibo.com/top/summary"),
        (ZhihuHotSpider(), "https://www.zhihu.com/hot"),
        (ToutiaoHotSpider(), "https://www.toutiao.com"),
    ]
    
    # 运行所有爬虫
    tasks = []
    for spider, url in spider_configs:
        print(f"[{datetime.now()}] Triggering {spider.__class__.__name__} with URL: {url}")
        task = asyncio.create_task(spider.crawl(url))
        tasks.append((spider, task))
    
    # 等待所有爬虫完成
    results = await asyncio.gather(*[task for _, task in tasks], return_exceptions=True)
    
    # 打印结果
    for (spider, _), result in zip(tasks, results):
        if isinstance(result, Exception):
            print(f"[{datetime.now()}] {spider.__class__.__name__} failed: {result}")
        else:
            print(f"[{datetime.now()}] {spider.__class__.__name__} completed successfully")
    
    print(f"[{datetime.now()}] All crawlers finished")


if __name__ == "__main__":
    asyncio.run(trigger_crawlers())