import asyncio

from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, CacheMode
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy

schema = {
    "name": "Articles",
    "baseSelector": ".telegraph-list",
    "fields": [
        {"name": "time", "selector": ".telegraph-content-box .telegraph-time-box", "type": "text"},
        {"name": "content", "selector": ".telegraph-content-box .telegraph-time-box + span", "type": "text"},
        {"name": "stock", "selector": ".telegraph-stock-plate-box .industry-stock", "type": "text"},
        {"name": "tag_labels", "selector": ".clearfix:has(> .label-item)", "type": "text","multiple": True}
    ]
}
var_js_code = []

for num in range(1,2):
    print(num)
    var_js_code.append("window.scrollTo(0, document.body.scrollHeight);")
    var_js_code.append("document.querySelector('.more-button')?.click();")
    timeout = 2000 * num
    var_js_code.append(f"await new Promise(resolve => setTimeout(resolve, {timeout}));")

run_config = CrawlerRunConfig(
     css_selector=".telegraph-list",  # Focus on .main-content region only
     cache_mode=CacheMode.BYPASS,
     js_code=var_js_code,
    wait_for="css:.telegraph-content-left",
    page_timeout=30000,
    extraction_strategy=JsonCssExtractionStrategy(schema,debug=True),
    magic=True
    )
async def main():
    async with AsyncWebCrawler(verbose=True) as crawler:
        # result = await crawler.arun("https://www.cls.cn/telegraph",
        #                             css_selector=".telegraph-list .telegraph-content-box",  # 根据实际页面修改选择器
        #                             cache_mode=CacheMode.BYPASS
        #                            )
        result = await crawler.arun("https://www.cls.cn/telegraph",config=run_config)
        print(result.extracted_content)
        #print(result.markdown)

        # 假设提取结果保存在 result.extracted_content 中
        # 若返回值为字符串，则可以通过换行符分割为列表；如果已经是列表，则直接使用


        #print(result.markdown[:20000])  # Print first 300 chars

if __name__ == "__main__":
    asyncio.run(main())