#!/usr/bin/env python3
"""
多方式爬虫工具使用示例
"""

import logging
from crawler_manager import CrawlerManager

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)

def example_1_simple_crawl():
    """示例1: 简单网页爬取"""
    print("=== 示例1: 简单网页爬取 ===")
    
    crawler = CrawlerManager()
    
    # 爬取单个网页
    result = crawler.crawl_single_url("https://httpbin.org/get", method='http')
    
    if result:
        print(f"成功爬取，响应时间: {result['response_time']:.2f}秒")
        print(f"内容长度: {len(result['content'])} 字符")
    
    crawler.close()

def example_2_multiple_urls():
    """示例2: 批量URL爬取"""
    print("\n=== 示例2: 批量URL爬取 ===")
    
    crawler = CrawlerManager()
    
    urls = [
        "https://httpbin.org/get",
        "https://httpbin.org/user-agent",
        "https://httpbin.org/headers"
    ]
    
    results = crawler.crawl_multiple_urls(urls, method='http', max_workers=3)
    
    print(f"成功爬取 {len(results)} 个URL")
    
    # 保存结果
    crawler.save_results(results, format='json')
    crawler.save_results(results, format='csv')
    
    # 打印统计信息
    stats = crawler.get_statistics()
    print(f"总请求数: {stats['total_requests']}")
    print(f"成功率: {stats.get('success_rate', 0):.2%}")
    
    crawler.close()

def example_3_with_selectors():
    """示例3: 使用CSS选择器解析数据"""
    print("\n=== 示例3: 数据解析示例 ===")
    
    crawler = CrawlerManager()
    
    # 定义CSS选择器
    selectors = {
        'title': 'title',
        'headings': 'h1, h2, h3',
        'links': 'a[href]'
    }
    
    # 爬取并解析网页
    result = crawler.crawl_single_url(
        "https://example.com",
        method='http',
        selectors=selectors
    )
    
    if result and 'parsed_data' in result:
        parsed = result['parsed_data']
        print(f"页面标题: {parsed.get('title', 'N/A')}")
        print(f"找到 {len(parsed.get('headings', []))} 个标题")
        print(f"找到 {len(parsed.get('links', []))} 个链接")
    
    crawler.close()

def example_4_selenium_crawl():
    """示例4: Selenium浏览器爬取"""
    print("\n=== 示例4: Selenium浏览器爬取 ===")
    
    crawler = CrawlerManager()
    
    # 使用Selenium爬取动态内容
    result = crawler.crawl_single_url(
        "https://example.com",
        method='selenium',
        wait_element='body',  # 等待body元素加载
        wait_time=10
    )
    
    if result:
        print(f"Selenium爬取成功，内容长度: {len(result['content'])}")
        
        # 检查反爬虫检测信号
        if result['detection_signals']['captcha_detected']:
            print("⚠️ 检测到验证码")
        if result['detection_signals']['rate_limited']:
            print("⚠️ 检测到限流")
    
    crawler.close()

def example_5_pagination_crawl():
    """示例5: 分页爬取"""
    print("\n=== 示例5: 分页爬取 ===")
    
    crawler = CrawlerManager()
    
    # 爬取分页内容
    results = crawler.crawl_with_pagination(
        base_url="https://httpbin.org/get",
        page_param="page",
        max_pages=3,
        method='http'
    )
    
    print(f"分页爬取完成，共获取 {len(results)} 页")
    
    crawler.close()

def example_6_with_proxy():
    """示例6: 使用代理爬取"""
    print("\n=== 示例6: 代理爬取示例 ===")
    
    # 修改配置启用代理
    crawler = CrawlerManager()
    
    # 手动设置代理（如果有的话）
    proxy_list = [
        "http://proxy1.example.com:8080",
        "http://proxy2.example.com:8080"
    ]
    
    if crawler.proxy_manager:
        crawler.proxy_manager.load_proxies_from_list(proxy_list)
        crawler.proxy_manager.test_all_proxies()
        
        proxy_stats = crawler.proxy_manager.get_proxy_stats()
        print(f"代理统计: {proxy_stats}")
    
    result = crawler.crawl_single_url("https://httpbin.org/ip", method='http')
    
    if result:
        print("使用代理爬取成功")
    
    crawler.close()

def example_7_data_storage():
    """示例7: 数据存储示例"""
    print("\n=== 示例7: 数据存储示例 ===")
    
    crawler = CrawlerManager()
    
    # 爬取一些数据
    urls = ["https://httpbin.org/get", "https://httpbin.org/user-agent"]
    results = crawler.crawl_multiple_urls(urls, method='http')
    
    if results:
        # 保存为不同格式
        crawler.save_results(results, format='json', filename='demo_results.json')
        crawler.save_results(results, format='csv', filename='demo_results.csv')
        
        # 保存到数据库
        for result in results:
            data = {
                'url': result['url'],
                'title': 'Demo Page',
                'content': result['content'][:1000],  # 限制长度
                'metadata': {'method': result['method']}
            }
            crawler.storage.save_to_database(data)
        
        print("数据已保存为多种格式")
        
        # 查询数据库
        db_results = crawler.storage.query_database(
            "SELECT url, timestamp FROM crawl_data ORDER BY timestamp DESC LIMIT 5"
        )
        
        if db_results:
            print("数据库中的最新记录:")
            for record in db_results:
                print(f"  {record['url']} - {record['timestamp']}")
    
    crawler.close()

def example_8_custom_headers():
    """示例8: 自定义请求头"""
    print("\n=== 示例8: 自定义请求头 ===")
    
    crawler = CrawlerManager()
    
    custom_headers = {
        'Accept': 'application/json',
        'Authorization': 'Bearer your-token-here',
        'Custom-Header': 'custom-value'
    }
    
    result = crawler.crawl_single_url(
        "https://httpbin.org/headers",
        method='http',
        headers=custom_headers
    )
    
    if result:
        print("使用自定义请求头爬取成功")
    
    crawler.close()

def main():
    """运行所有示例"""
    print("🕷️  多方式爬虫工具演示")
    print("=" * 50)
    
    try:
        example_1_simple_crawl()
        example_2_multiple_urls()
        example_3_with_selectors()
        # example_4_selenium_crawl()  # 需要浏览器驱动
        example_5_pagination_crawl()
        # example_6_with_proxy()  # 需要有效代理
        example_7_data_storage()
        example_8_custom_headers()
        
        print("\n✅ 所有示例运行完成！")
        print("请查看 output/ 目录中的结果文件")
        
    except KeyboardInterrupt:
        print("\n⚠️ 用户中断操作")
    except Exception as e:
        print(f"\n❌ 运行出错: {str(e)}")

if __name__ == "__main__":
    main()