#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import asyncio
import json
import argparse
from pathlib import Path
from dotenv import load_dotenv
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy

# 加载环境变量
load_dotenv()

# 定义一些常用的提取模式
EXTRACTION_SCHEMAS = {
    "news": {
        "name": "新闻列表",
        "baseSelector": "article, div.news-item, .article-item, .news-card",
        "fields": [
            {
                "name": "title",
                "selector": "h1, h2, h3, .title, .headline", 
                "type": "text",
            },
            {
                "name": "summary",
                "selector": "p.summary, .description, .excerpt, .subtitle", 
                "type": "text",
            },
            {
                "name": "link",
                "selector": "a.read-more, a[href], .article-link", 
                "type": "attribute",
                "attribute": "href"
            },
            {
                "name": "date",
                "selector": ".date, .time, time, .published-date",
                "type": "text",
            }
        ]
    },
    "products": {
        "name": "产品列表",
        "baseSelector": ".product, .product-item, .product-card, li.item",
        "fields": [
            {
                "name": "name",
                "selector": ".product-title, .product-name, h2, h3",
                "type": "text",
            },
            {
                "name": "price",
                "selector": ".price, .product-price, .amount",
                "type": "text",
            },
            {
                "name": "image",
                "selector": "img",
                "type": "attribute",
                "attribute": "src"
            },
            {
                "name": "link",
                "selector": "a",
                "type": "attribute",
                "attribute": "href"
            }
        ]
    },
    "table": {
        "name": "表格数据",
        "baseSelector": "table tr:not(:first-child)",
        "fields": [
            {
                "name": "cells",
                "selector": "td",
                "type": "list",
                "fields": [
                    {
                        "name": "content",
                        "selector": "td",
                        "type": "text"
                    }
                ]
            }
        ]
    }
}

# 自定义提取模式（根据URL和选择器自定义）
def create_custom_schema(base_selector, item_selectors):
    """创建自定义的提取模式"""
    schema = {
        "name": "自定义提取",
        "baseSelector": base_selector,
        "fields": []
    }
    
    for name, selector in item_selectors.items():
        if name.endswith("_attr"):
            # 如果字段名以_attr结尾，则视为属性提取
            field_name = name.rsplit("_attr_", 1)[0]
            attr_name = name.rsplit("_attr_", 1)[1] if "_attr_" in name else "href"
            schema["fields"].append({
                "name": field_name,
                "selector": selector,
                "type": "attribute",
                "attribute": attr_name
            })
        else:
            # 否则视为文本提取
            schema["fields"].append({
                "name": name,
                "selector": selector,
                "type": "text"
            })
    
    return schema

async def extract_data(url, schema_name=None, custom_schema=None, output_dir="output"):
    """根据指定模式提取结构化数据"""
    os.makedirs(output_dir, exist_ok=True)
    
    # 确定使用哪个提取模式
    schema = None
    if schema_name and schema_name in EXTRACTION_SCHEMAS:
        schema = EXTRACTION_SCHEMAS[schema_name]
        print(f"使用预定义模式: {schema_name}")
    elif custom_schema:
        schema = custom_schema
        print("使用自定义模式")
    else:
        print("错误: 必须提供有效的模式名称或自定义模式")
        return False
    
    # 浏览器配置
    browser_config = BrowserConfig(headless=True, verbose=True)
    
    # 创建提取策略
    extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
    
    # 爬虫运行配置
    run_config = CrawlerRunConfig(
        extraction_strategy=extraction_strategy,
        cache_mode=CacheMode.BYPASS,
        output_formats=["extracted_content", "html"]
    )
    
    print(f"开始从 {url} 提取数据...")
    
    # 创建并使用异步爬虫
    async with AsyncWebCrawler(config=browser_config) as crawler:
        result = await crawler.arun(
            url=url,
            config=run_config
        )
        
        if result.success and result.extracted_content:
            try:
                # 解析提取的数据
                data = json.loads(result.extracted_content)
                
                # 计算提取项数量
                count = len(data) if isinstance(data, list) else 1
                print(f"✓ 成功提取 {count} 个项目")
                
                # 保存到JSON文件
                safe_filename = url.replace("://", "_").replace("/", "_")
                output_file = Path(output_dir) / f"data_{safe_filename}.json"
                with open(output_file, 'w', encoding='utf-8') as f:
                    json.dump(data, f, ensure_ascii=False, indent=2)
                print(f"数据已保存到: {output_file}")
                
                # 保存HTML源码（用于调试）
                html_file = Path(output_dir) / f"source_{safe_filename}.html"
                with open(html_file, 'w', encoding='utf-8') as f:
                    f.write(result.html)
                print(f"HTML源码已保存到: {html_file}")
                
                return True
            except json.JSONDecodeError as e:
                print(f"✗ 解析提取内容失败: {e}")
                return False
        else:
            print(f"✗ 提取失败: {result.error_message if not result.success else '未找到匹配内容'}")
            return False

def parse_arguments():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="Crawl4AI 结构化数据提取示例")
    
    parser.add_argument("url", help="要爬取的URL")
    parser.add_argument("--schema", "-s", choices=list(EXTRACTION_SCHEMAS.keys()),
                       help=f"预定义的提取模式: {', '.join(EXTRACTION_SCHEMAS.keys())}")
    parser.add_argument("--base", help="自定义基础选择器 (例如: 'div.product')")
    parser.add_argument("--field", "-f", action="append", nargs=2, metavar=("NAME", "SELECTOR"),
                       help="自定义字段选择器 (例如: title 'h2.title')")
    parser.add_argument("--attr", "-a", action="append", nargs=3, metavar=("NAME", "SELECTOR", "ATTRIBUTE"),
                       help="自定义属性选择器 (例如: image 'img' src)")
    parser.add_argument("--output", "-o", default="output", help="输出目录")
    
    return parser.parse_args()

async def main():
    """主函数"""
    args = parse_arguments()
    
    # 如果提供了自定义字段，则创建自定义模式
    custom_schema = None
    if args.base and (args.field or args.attr):
        fields = {}
        
        # 添加文本字段
        if args.field:
            for name, selector in args.field:
                fields[name] = selector
        
        # 添加属性字段
        if args.attr:
            for name, selector, attribute in args.attr:
                fields[f"{name}_attr_{attribute}"] = selector
        
        custom_schema = create_custom_schema(args.base, fields)
    
    # 如果既没有提供预定义模式也没有提供足够的自定义参数
    if not args.schema and not custom_schema:
        print("错误: 必须提供预定义模式名称或完整的自定义模式参数")
        print(f"可用的预定义模式: {', '.join(EXTRACTION_SCHEMAS.keys())}")
        return
    
    await extract_data(
        url=args.url,
        schema_name=args.schema,
        custom_schema=custom_schema,
        output_dir=args.output
    )

if __name__ == "__main__":
    asyncio.run(main()) 