#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
简单的API使用演示
展示如何通过RESTful API获取微信公众号文章内容
"""

import requests
import json


def demo_single_article():
    """演示单篇文章爬取"""
    print("=== 单篇文章爬取演示 ===")
    
    # API服务器地址
    api_url = "http://localhost:8080/api/crawl"
    
    # 要爬取的微信文章链接
    article_url = "https://mp.weixin.qq.com/s/S9kSrZ7uavUD21G4ljyhOA"
    
    # 请求数据
    request_data = {
        "url": article_url,
        "format": "text"  # 可选: text, html, markdown
    }
    
    print(f"正在爬取文章: {article_url}")
    
    try:
        # 发送POST请求
        response = requests.post(
            api_url,
            json=request_data,
            headers={'Content-Type': 'application/json'},
            timeout=30
        )
        
        # 检查响应状态
        if response.status_code == 200:
            result = response.json()
            
            if result['success']:
                # 提取文章数据
                article = result['data']
                
                print("\n✓ 爬取成功！")
                print(f"标题: {article['title']}")
                print(f"作者: {article['author']}")
                print(f"发布时间: {article['publish_time']}")
                print(f"内容长度: {article['content_length']} 字符")
                print(f"内容格式: {article['content_type']}")
                print("\n--- 内容预览 ---")
                print(article['content'][:300] + "...")
                
                return article
            else:
                print(f"✗ 爬取失败: {result['error']}")
                return None
        else:
            print(f"✗ 请求失败: HTTP {response.status_code}")
            print(f"错误信息: {response.text}")
            return None
    
    except requests.exceptions.Timeout:
        print("✗ 请求超时")
        return None
    except requests.exceptions.ConnectionError:
        print("✗ 连接失败，请确保API服务器正在运行")
        return None
    except Exception as e:
        print(f"✗ 发生错误: {e}")
        return None


def demo_different_formats():
    """演示不同格式的输出"""
    print("\n=== 不同格式输出演示 ===")
    
    api_url = "http://localhost:8080/api/crawl"
    article_url = "https://mp.weixin.qq.com/s/S9kSrZ7uavUD21G4ljyhOA"
    
    formats = ['text', 'html', 'markdown']
    
    for format_type in formats:
        print(f"\n--- {format_type.upper()} 格式 ---")
        
        request_data = {
            "url": article_url,
            "format": format_type
        }
        
        try:
            response = requests.post(api_url, json=request_data, timeout=30)
            
            if response.status_code == 200:
                result = response.json()
                if result['success']:
                    article = result['data']
                    print(f"✓ 格式: {article['content_type']}")
                    print(f"✓ 长度: {article['content_length']} 字符")
                    print(f"✓ 预览: {article['content'][:150]}...")
                else:
                    print(f"✗ 失败: {result['error']}")
            else:
                print(f"✗ HTTP错误: {response.status_code}")
        
        except Exception as e:
            print(f"✗ 异常: {e}")


def demo_batch_crawl():
    """演示批量爬取"""
    print("\n=== 批量爬取演示 ===")
    
    api_url = "http://localhost:8080/api/batch-crawl"
    
    # 多个文章链接（示例，请替换为实际链接）
    urls = [
        "https://mp.weixin.qq.com/s/S9kSrZ7uavUD21G4ljyhOA",
        # 可以添加更多链接
    ]
    
    request_data = {
        "urls": urls,
        "format": "text"
    }
    
    print(f"正在批量爬取 {len(urls)} 篇文章...")
    
    try:
        response = requests.post(api_url, json=request_data, timeout=60)
        
        if response.status_code == 200:
            result = response.json()
            
            if result['success']:
                data = result['data']
                print(f"\n✓ 批量爬取完成")
                print(f"总计: {data['total']} 篇")
                print(f"成功: {data['success_count']} 篇")
                print(f"失败: {data['failed_count']} 篇")
                
                print("\n--- 详细结果 ---")
                for i, item in enumerate(data['results'], 1):
                    print(f"\n文章 {i}:")
                    if item['success']:
                        article_data = item['data']
                        print(f"  ✓ 标题: {article_data['title']}")
                        print(f"  ✓ 作者: {article_data['author']}")
                        print(f"  ✓ 长度: {article_data['content_length']} 字符")
                    else:
                        print(f"  ✗ 失败: {item['error']}")
            else:
                print(f"✗ 批量爬取失败: {result['error']}")
        else:
            print(f"✗ 请求失败: HTTP {response.status_code}")
    
    except Exception as e:
        print(f"✗ 发生错误: {e}")


def demo_health_check():
    """演示健康检查"""
    print("\n=== 健康检查演示 ===")
    
    health_url = "http://localhost:8080/api/health"
    
    try:
        response = requests.get(health_url, timeout=5)
        
        if response.status_code == 200:
            data = response.json()
            print("✓ API服务运行正常")
            print(f"  状态: {data['status']}")
            print(f"  时间: {data['timestamp']}")
            print(f"  服务: {data['service']}")
        else:
            print(f"✗ 健康检查失败: HTTP {response.status_code}")
    
    except Exception as e:
        print(f"✗ 健康检查异常: {e}")


def main():
    """主演示函数"""
    print("微信公众号文章爬虫 RESTful API 使用演示")
    print("=" * 60)
    print("注意: 请确保API服务器正在运行 (python api_server.py)")
    print("=" * 60)
    
    # 运行各种演示
    demo_health_check()
    demo_single_article()
    demo_different_formats()
    demo_batch_crawl()
    
    print("\n" + "=" * 60)
    print("演示完成！")
    print("\nAPI接口说明:")
    print("- GET  /api/health     : 健康检查")
    print("- POST /api/crawl      : 单篇文章爬取")
    print("- POST /api/batch-crawl: 批量文章爬取")
    print("\n详细文档请查看: API_DOCUMENTATION.md")


if __name__ == "__main__":
    main()