#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import sys
import asyncio
import json
import argparse
from pathlib import Path
from dotenv import load_dotenv
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai.content_filter_strategy import PruningContentFilter, BM25ContentFilter
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator

# 加载环境变量
load_dotenv()

class Crawl4AIDemo:
    def __init__(self):
        """初始化爬虫演示程序"""
        self.browser_config = BrowserConfig(
            headless=True,
            verbose=True,
        )
    
    async def crawl_page(self, url, output_dir="output", use_bm25=False, query=None):
        """爬取单个页面并保存为Markdown"""
        os.makedirs(output_dir, exist_ok=True)
        output_file = Path(output_dir) / f"{url.replace('://', '_').replace('/', '_')}.md"
        
        # 根据参数选择内容过滤策略
        if use_bm25 and query:
            content_filter = BM25ContentFilter(user_query=query, bm25_threshold=1.0)
            print(f"使用BM25内容过滤，关键词查询: '{query}'")
        else:
            content_filter = PruningContentFilter(threshold=0.48, threshold_type="fixed", min_word_threshold=0)
            print("使用默认剪枝内容过滤")
        
        # 配置爬虫运行参数
        run_config = CrawlerRunConfig(
            cache_mode=CacheMode.BYPASS,
            markdown_generator=DefaultMarkdownGenerator(content_filter=content_filter),
        )
        
        print(f"开始爬取: {url}")
        async with AsyncWebCrawler(config=self.browser_config) as crawler:
            result = await crawler.arun(
                url=url,
                config=run_config
            )
            
            if result.success:
                # 保存markdown内容
                with open(output_file, 'w', encoding='utf-8') as f:
                    f.write(result.markdown.fit_markdown)
                print(f"✓ 爬取成功! Markdown内容已保存到: {output_file}")
                print(f"  - 原始Markdown长度: {len(result.markdown.raw_markdown)} 字符")
                print(f"  - 过滤后Markdown长度: {len(result.markdown.fit_markdown)} 字符")
                return True
            else:
                print(f"✗ 爬取失败: {result.error_message}")
                return False
    
    async def crawl_multiple(self, urls, output_dir="output"):
        """并行爬取多个URL"""
        os.makedirs(output_dir, exist_ok=True)
        
        print(f"开始并行爬取 {len(urls)} 个URL...")
        run_config = CrawlerRunConfig(
            cache_mode=CacheMode.BYPASS,
            output_formats=['markdown', 'links']
        )
        
        async with AsyncWebCrawler(max_concurrent_tasks=3) as crawler:
            results = await crawler.arun_many(urls=urls, config=run_config)
            
            success_count = 0
            for result in results:
                if result.success:
                    success_count += 1
                    output_file = Path(output_dir) / f"{result.url.replace('://', '_').replace('/', '_')}.md"
                    with open(output_file, 'w', encoding='utf-8') as f:
                        f.write(result.markdown.fit_markdown)
                    print(f"✓ [{success_count}/{len(urls)}] 成功爬取: {result.url}")
                else:
                    print(f"✗ 爬取失败: {result.url}, 错误: {result.error_message}")
            
            print(f"批量爬取完成。成功: {success_count}/{len(urls)}")
    
    async def deep_crawl(self, start_url, max_pages=5, max_depth=2, output_dir="output"):
        """深度爬取网站"""
        os.makedirs(output_dir, exist_ok=True)
        
        run_config = CrawlerRunConfig(
            scope='domain',  # 仅爬取同一域名
            output_formats=['markdown', 'links'],
            delay_between_requests=0.5  # 请求之间的延迟
        )
        
        print(f"开始深度爬取: {start_url}")
        print(f"设置: 最大深度={max_depth}, 最大页面数={max_pages}")
        
        async with AsyncWebCrawler(max_concurrent_tasks=3) as crawler:
            crawl_generator = await crawler.adeep_crawl(
                start_url=start_url,
                strategy="bfs",  # 广度优先搜索
                max_depth=max_depth,
                max_pages=max_pages,
                config=run_config
            )
            
            crawled_count = 0
            success_count = 0
            
            async for result in crawl_generator:
                crawled_count += 1
                if result.success:
                    success_count += 1
                    # 创建一个包含深度信息的文件名
                    filename = f"depth_{result.depth}_url_{result.url.replace('://', '_').replace('/', '_')}.md"
                    output_file = Path(output_dir) / filename
                    
                    with open(output_file, 'w', encoding='utf-8') as f:
                        f.write(result.markdown.fit_markdown)
                        
                    internal_links = len(result.links.get('internal', []))
                    print(f"✓ [{crawled_count}] 深度: {result.depth}, URL: {result.url}, 内部链接: {internal_links}")
                else:
                    print(f"✗ [{crawled_count}] 爬取失败: {result.url}, 错误: {result.error_message}")
            
            print(f"\n深度爬取完成。总尝试: {crawled_count}, 成功: {success_count}")
            return success_count

def parse_arguments():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="Crawl4AI 爬虫演示程序")
    
    # 子命令处理
    subparsers = parser.add_subparsers(dest="command", help="选择操作命令")
    
    # 单页爬取命令
    single_parser = subparsers.add_parser("single", help="爬取单个URL")
    single_parser.add_argument("url", help="要爬取的URL")
    single_parser.add_argument("--bm25", action="store_true", help="使用BM25内容过滤算法")
    single_parser.add_argument("--query", help="BM25过滤的关键词查询")
    single_parser.add_argument("--output", "-o", default="output", help="输出目录")
    
    # 批量爬取命令
    multi_parser = subparsers.add_parser("multi", help="批量爬取多个URL")
    multi_parser.add_argument("urls", nargs="+", help="要爬取的URL列表")
    multi_parser.add_argument("--output", "-o", default="output", help="输出目录")
    
    # 深度爬取命令
    deep_parser = subparsers.add_parser("deep", help="深度爬取网站")
    deep_parser.add_argument("start_url", help="起始URL")
    deep_parser.add_argument("--max-pages", type=int, default=5, help="最大爬取页面数")
    deep_parser.add_argument("--max-depth", type=int, default=2, help="最大爬取深度")
    deep_parser.add_argument("--output", "-o", default="output", help="输出目录")
    
    return parser.parse_args()

async def main():
    """主函数"""
    args = parse_arguments()
    
    # 如果没有提供命令，显示帮助信息
    if not args.command:
        print("错误: 需要指定命令。使用 -h 查看帮助。")
        return
    
    demo = Crawl4AIDemo()
    
    if args.command == "single":
        # 单页爬取
        await demo.crawl_page(
            url=args.url, 
            output_dir=args.output, 
            use_bm25=args.bm25, 
            query=args.query
        )
    elif args.command == "multi":
        # 批量爬取
        await demo.crawl_multiple(urls=args.urls, output_dir=args.output)
    elif args.command == "deep":
        # 深度爬取
        await demo.deep_crawl(
            start_url=args.start_url,
            max_pages=args.max_pages,
            max_depth=args.max_depth,
            output_dir=args.output
        )

if __name__ == "__main__":
    asyncio.run(main()) 