import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai import JsonCssExtractionStrategy
import json 

async def extract_structured_data_using_css_extractor():
    print("\n--- Using JsonCssExtractionStrategy for Fast Structured Output ---")
    schema = {
        'name': 'Course Cards', 
        'baseSelector': '.framework-collection-item', 
        'fields': [
            {'name': 'category', 'selector': '.heading-50.gradient-color', 'type': 'text'}, 
            {'name': 'description', 'selector': '.charge-content', 'type': 'text'}, 
            {'name': 'course_title', 'selector': '.div-block-215 .text-block-93', 'type': 'text'}, 
            {'name': 'course_icon', 'selector': '.div-block-215 img', 'type': 'attribute', 'attribute': 'src'}, 
            {'name': 'course_icon_alt', 'selector': '.div-block-215 img', 'type': 'attribute', 'attribute': 'alt'}, 
            {'name': 'detailed_description', 'selector': '.course-content-text p.paragraph', 'type': 'text'}
        ]
    }   

    browser_config = BrowserConfig(headless=False, java_script_enabled=True)

    js_click_tabs = """
    (async () => {
        // 首先点击CREATE的各个字母标签
        const tabs = document.querySelectorAll("section.charge-methodology .tabs-menu-3 > div");
        for(let tab of tabs) {
            tab.scrollIntoView();
            tab.click();
            await new Promise(r => setTimeout(r, 500));
        }
        
        // 然后点击每个课程卡片的下拉箭头按钮
        await new Promise(r => setTimeout(r, 1000));
        const dropdownButtons = document.querySelectorAll("div.course-dropdown .w-dropdown-toggle");
        for(let button of dropdownButtons) {
            button.scrollIntoView();
            button.click();
            await new Promise(r => setTimeout(r, 500));
        }
        
        // 为确保所有内容都被加载，添加额外的等待时间
        await new Promise(r => setTimeout(r, 1000));
    })();
    """

    crawler_config = CrawlerRunConfig(
        cache_mode=CacheMode.BYPASS,
        extraction_strategy=JsonCssExtractionStrategy(schema),
        js_code=[js_click_tabs],
    )

    async with AsyncWebCrawler(config=browser_config) as crawler:
        result = await crawler.arun(
            url="https://www.kidocode.com/degrees/technology", config=crawler_config
        )

        companies = json.loads(result.extracted_content)
        print(f"Successfully extracted {len(companies)} companies")
        
        if companies:
            for i in range(len(companies)):
                print(json.dumps(companies[i], indent=2))
        
            # 保存提取结果到 json 文件
            with open('course_cards.json', 'w', encoding='utf-8') as f:
                json.dump(companies, f, ensure_ascii=False, indent=2)
                print("Extracted data saved to course_cards.json")

        else:
            print("No companies found. Raw extracted content:")
            print(result.extracted_content)

async def main():
    await extract_structured_data_using_css_extractor()

if __name__ == "__main__":
    asyncio.run(main())
