#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
WebScraper 使用示例
展示爬虫类的各种功能和使用方法
"""

from scraper import WebScraper
import time


def basic_scraping_example():
    """基础爬取示例"""
    print("=" * 50)
    print("🔍 基础爬取示例")
    print("=" * 50)
    
    scraper = None
    try:
        # 创建爬虫实例（无头模式）
        scraper = WebScraper(headless=True, timeout=10)
        
        # 访问网站
        url = "https://om0813.zw7gmc49.work/"
        if scraper.navigate_to(url, wait_time=5):
            
            # 获取页面信息
            page_info = scraper.get_page_info()
            if page_info:
                print(f"📄 页面标题: {page_info['title']}")
                print(f"🔗 链接数量: {page_info['links_count']}")
                print(f"🖼️ 图片数量: {page_info['images_count']}")
            
            # 截图
            scraper.take_screenshot("basic_example.png")
            
            # 下载图片
            downloaded = scraper.download_images(max_count=2)
            print(f"📥 下载了 {len(downloaded)} 张图片")
            
            # 保存数据
            if page_info:
                scraper.save_data(page_info, "basic_example_data.json")
        
    except Exception as e:
        print(f"❌ 基础示例执行失败: {e}")
    finally:
        if scraper:
            scraper.close()


def advanced_scraping_example():
    """高级爬取示例"""
    print("\n" + "=" * 50)
    print("🚀 高级爬取示例")
    print("=" * 50)
    
    scraper = None
    try:
        # 创建爬虫实例（有头模式，可以看到浏览器操作）
        scraper = WebScraper(headless=False, timeout=15)
        
        # 访问网站
        url = "https://om0813.zw7gmc49.work/"
        if scraper.navigate_to(url, wait_time=3):
            
            # 等待特定元素出现
            print("⏳ 等待页面元素加载...")
            time.sleep(2)
            
            # 查找特定元素
            links = scraper.find_elements_by_selector("a", "href")
            print(f"🔗 找到 {len(links)} 个链接")
            
            # 获取页面文本
            body_text = scraper.get_text("body")
            print(f"📝 页面文本长度: {len(body_text)} 字符")
            
            # 截图
            scraper.take_screenshot("advanced_example.png")
            
            # 下载更多图片
            downloaded = scraper.download_images(max_count=5, timeout=15)
            print(f"📥 下载了 {len(downloaded)} 张图片")
            
            # 保存详细数据
            page_info = scraper.get_page_info(max_links=20, max_images=15, text_length=1000)
            if page_info:
                scraper.save_data(page_info, "advanced_example_data.json")
        
    except Exception as e:
        print(f"❌ 高级示例执行失败: {e}")
    finally:
        if scraper:
            scraper.close()


def custom_scraping_example():
    """自定义爬取示例"""
    print("\n" + "=" * 50)
    print("🎯 自定义爬取示例")
    print("=" * 50)
    
    scraper = None
    try:
        # 创建爬虫实例
        scraper = WebScraper(headless=True, timeout=20)
        
        # 访问网站
        url = "https://om0813.zw7gmc49.work/"
        if scraper.navigate_to(url, wait_time=4):
            
            # 自定义页面信息获取
            custom_info = {
                "url": scraper.driver.current_url,
                "title": scraper.driver.title,
                "viewport_size": scraper.driver.get_window_size(),
                "page_source_length": len(scraper.driver.page_source),
                "cookies_count": len(scraper.driver.get_cookies()),
                "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
            }
            
            print("📊 自定义页面信息:")
            for key, value in custom_info.items():
                print(f"   - {key}: {value}")
            
            # 保存自定义数据
            scraper.save_data(custom_info, "custom_example_data.json")
            
            # 截图
            scraper.take_screenshot("custom_example.png")
        
    except Exception as e:
        print(f"❌ 自定义示例执行失败: {e}")
    finally:
        if scraper:
            scraper.close()


def main():
    """主函数 - 运行所有示例"""
    print("🚀 WebScraper 使用示例程序")
    print("本程序将展示爬虫类的各种功能")
    
    try:
        # 运行基础示例
        basic_scraping_example()
        
        # 等待一下
        time.sleep(2)
        
        # 运行高级示例
        advanced_scraping_example()
        
        # 等待一下
        time.sleep(2)
        
        # 运行自定义示例
        custom_scraping_example()
        
        print("\n🎉 所有示例执行完成！")
        print("请查看 downloads 目录中的结果文件")
        
    except KeyboardInterrupt:
        print("\n⏹️ 用户中断了程序")
    except Exception as e:
        print(f"\n❌ 程序执行出错: {e}")


if __name__ == "__main__":
    main()
