#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import asyncio
import json
from pathlib import Path
from dotenv import load_dotenv
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai.content_filter_strategy import PruningContentFilter
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy

# 加载环境变量（如有需要）
load_dotenv()

async def simple_crawl(url):
    """执行简单的网页爬取并提取markdown内容"""
    print(f"开始爬取页面: {url}")
    
    # 配置浏览器
    browser_config = BrowserConfig(
        headless=True,  # 无头模式，不显示浏览器界面
        verbose=True,   # 启用详细日志
    )
    
    # 配置爬虫运行参数
    run_config = CrawlerRunConfig(
        cache_mode=CacheMode.BYPASS,  # 绕过缓存，每次都获取最新内容
        markdown_generator=DefaultMarkdownGenerator(
            content_filter=PruningContentFilter(threshold=0.48, threshold_type="fixed", min_word_threshold=0)
        ),
        # 其他参数可根据需要添加
    )
    
    # 创建并使用异步爬虫
    async with AsyncWebCrawler(config=browser_config) as crawler:
        result = await crawler.arun(
            url=url,
            config=run_config
        )
        
        if result.success:
            print(f"爬取成功！内容长度: {len(result.markdown.fit_markdown)} 字符")
            # 保存markdown内容到文件
            save_path = Path(f"output_{url.replace('://', '_').replace('/', '_')}.md")
            with open(save_path, 'w', encoding='utf-8') as f:
                f.write(result.markdown.fit_markdown)
            print(f"内容已保存到: {save_path}")
            return result
        else:
            print(f"爬取失败: {result.error_message}")
            return None

async def extract_structured_data(url, schema):
    """使用CSS选择器提取结构化数据"""
    print(f"从页面提取结构化数据: {url}")
    
    browser_config = BrowserConfig(headless=True, verbose=True)
    
    # 创建CSS提取策略
    extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
    
    run_config = CrawlerRunConfig(
        extraction_strategy=extraction_strategy,
        cache_mode=CacheMode.BYPASS
    )
    
    async with AsyncWebCrawler(config=browser_config) as crawler:
        result = await crawler.arun(
            url=url,
            config=run_config
        )
        
        if result.success and result.extracted_content:
            data = json.loads(result.extracted_content)
            print(f"成功提取 {len(data)} 条数据")
            # 保存提取的数据到JSON文件
            save_path = Path(f"data_{url.replace('://', '_').replace('/', '_')}.json")
            with open(save_path, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            print(f"数据已保存到: {save_path}")
            return data
        else:
            print(f"数据提取失败: {result.error_message if result.error_message else '未找到匹配数据'}")
            return None

async def deep_crawl_example(start_url, max_pages=5):
    """深度爬取示例，从起始URL开始爬取多个相关页面"""
    print(f"开始深度爬取，起始页面: {start_url}")
    
    browser_config = BrowserConfig(headless=True, verbose=True)
    run_config = CrawlerRunConfig(
        # 仅爬取同一域名下的链接
        scope='domain',
        # 输出包括markdown和链接
        output_formats=['markdown', 'links'],
        # 每次请求之间的延迟（秒）
        delay_between_requests=0.5
    )
    
    async with AsyncWebCrawler(max_concurrent_tasks=3) as crawler:
        print(f"使用BFS策略开始深度爬取...")
        
        crawl_generator = await crawler.adeep_crawl(
            start_url=start_url,
            strategy="bfs",    # 广度优先搜索策略
            max_depth=2,       # 最大爬取深度
            max_pages=max_pages, # 最大爬取页面数
            config=run_config
        )
        
        crawled_count = 0
        results = []
        
        async for result in crawl_generator:
            crawled_count += 1
            if result.success:
                results.append(result)
                internal_links = len(result.links.get('internal', []))
                print(f"[{crawled_count:02d}] 深度: {result.depth}, URL: {result.url}, 内部链接数: {internal_links}")
            else:
                print(f"爬取失败: {result.url}, 错误: {result.error_message}")
        
        print(f"深度爬取完成。共爬取 {crawled_count} 个页面。")
        return results

async def main():
    """主函数，演示不同的爬虫功能"""
    print("Crawl4AI 爬虫演示程序")
    print("-" * 50)
    
    # 1. 简单页面爬取
    #url = "https://docs.crawl4ai.com/"
    url = "https://www.xiaohongshu.com/explore"
    await simple_crawl(url)
    
    # 2. 结构化数据提取
    news_schema = {
        "name": "新闻列表",
        "baseSelector": "article.news-item",  # 假设的选择器
        "fields": [
            {
                "name": "title",
                "selector": "h2.title",
                "type": "text",
            },
            {
                "name": "summary",
                "selector": "p.summary",
                "type": "text",
            },
            {
                "name": "link",
                "selector": "a.read-more",
                "type": "attribute",
                "attribute": "href"
            }
        ]
    }
    
    # 注意：需要替换为包含实际与schema匹配结构的URL
    # 这里使用示例URL，实际使用时需要更改
    data_url = "https://news.example.com/"
    print("结构化数据提取被注释掉了，因为需要针对特定网站调整schema")
    # await extract_structured_data(data_url, news_schema)
    
    # 3. 深度爬取
    print("深度爬取被注释掉了，请取消注释使用")
    # await deep_crawl_example("https://docs.crawl4ai.com/", max_pages=3)
    
    print("\n演示完成！")

# 运行主函数
if __name__ == "__main__":
    asyncio.run(main()) 