#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
微信公众号文章爬虫使用示例
"""

from wechat_crawler import WeChatArticleCrawler


def example_single_article():
    """单篇文章爬取示例"""
    print("=== 单篇文章爬取示例 ===")
    
    # 示例链接（请替换为实际的微信文章链接）
    url = "https://mp.weixin.qq.com/s/example_article_url"
    
    # 创建爬虫实例
    crawler = WeChatArticleCrawler(use_selenium=False)
    
    try:
        # 爬取并保存文章
        saved_files = crawler.crawl_and_save(
            url=url,
            format_type="both",  # 保存为Markdown和HTML两种格式
            output_dir="output"
        )
        
        if saved_files:
            print(f"成功保存 {len(saved_files)} 个文件:")
            for file_path in saved_files:
                print(f"  - {file_path}")
        else:
            print("保存失败")
    
    except Exception as e:
        print(f"爬取失败: {e}")
    
    finally:
        crawler.close()


def example_batch_crawl():
    """批量爬取示例"""
    print("\n=== 批量爬取示例 ===")
    
    # 示例链接列表（请替换为实际的微信文章链接）
    urls = [
        "https://mp.weixin.qq.com/s/example_article_url_1",
        "https://mp.weixin.qq.com/s/example_article_url_2",
        "https://mp.weixin.qq.com/s/example_article_url_3"
    ]
    
    from batch_crawler import BatchWeChatCrawler
    
    # 创建批量爬虫实例
    batch_crawler = BatchWeChatCrawler(
        use_selenium=False,
        delay=2  # 请求间隔2秒
    )
    
    try:
        # 批量爬取
        batch_crawler.crawl_from_list(
            urls=urls,
            format_type="both",
            output_dir="output"
        )
    
    except Exception as e:
        print(f"批量爬取失败: {e}")
    
    finally:
        batch_crawler.close()


def example_custom_usage():
    """自定义使用示例"""
    print("\n=== 自定义使用示例 ===")
    
    url = "https://mp.weixin.qq.com/s/example_article_url"
    
    # 创建爬虫实例，使用Selenium
    crawler = WeChatArticleCrawler(use_selenium=True)
    
    try:
        # 只提取文章内容，不保存文件
        article_info = crawler.extract_article_content(url)
        
        if article_info:
            print(f"文章标题: {article_info['title']}")
            print(f"作者: {article_info['author']}")
            print(f"发布时间: {article_info['publish_time']}")
            print(f"内容长度: {len(article_info['text_content'])} 字符")
            
            # 自定义保存逻辑
            # 只保存为Markdown格式
            md_file = crawler.save_as_markdown(article_info, "custom_output")
            if md_file:
                print(f"Markdown文件已保存: {md_file}")
        
        else:
            print("提取文章内容失败")
    
    except Exception as e:
        print(f"处理失败: {e}")
    
    finally:
        crawler.close()


if __name__ == "__main__":
    print("微信公众号文章爬虫使用示例")
    print("=" * 50)
    print("注意: 请将示例中的链接替换为实际的微信公众号文章链接")
    print()
    
    # 运行示例（注释掉避免实际执行）
    # example_single_article()
    # example_batch_crawl()
    # example_custom_usage()
    
    print("请查看代码中的示例，并替换为实际的微信文章链接后运行。")