import requests
import os
import json
import time
from urllib.parse import quote

def crawl_urls(urls):
    """
    发送POST请求并保存结果
    
    Args:
        urls: URL地址列表
    """
    # 创建结果目录
    result_dir = 'result_json'
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
        print(f"创建结果目录: {result_dir}")
    
    # API地址
    api_url = 'http://47.99.144.80:6664/api/crawl'
    
    # 处理每个URL
    for i, url in enumerate(urls, 1):
        try:
            # 构建请求数据
            data = {"url": url}
            
            print(f"\n处理第 {i}/{len(urls)} 个URL:")
            print(f"URL: {url}")
            
            # 发送POST请求
            response = requests.post(api_url, json=data)
            response.raise_for_status()  # 检查请求是否成功
            
            # 获取响应内容
            result = response.json()
            
            # 使用URL的编码作为文件名（避免特殊字符）
            filename = quote(url, safe='') + '.txt'
            filepath = os.path.join(result_dir, filename)
            
            # 保存结果
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(result, f, ensure_ascii=False, indent=2)
            
            print(f"✅ 成功保存结果到: {filepath}")
            
            # 添加短暂延迟，避免请求过快
            time.sleep(1)
            
        except requests.RequestException as e:
            print(f"❌ 请求失败: {str(e)}")
        except Exception as e:
            print(f"❌ 处理出错: {str(e)}")

if __name__ == "__main__":
    # 测试URL列表
    # test_urls = [
    #     "https://example.com/page1",
    #     "https://example.com/page2",
    #     "https://example.com/page3"
    # ]
    test_urls = [
        "https://www.xiaohongshu.com/explore/64fe5853000000001f036ceb?xsec_token=ABT2GgfP0cfFDC8Uk_K_OjKBilLIzj2p1zHhXyzF8AfYs=&xsec_source=pc_search&source=web_explore_feed"
    ]
    
    print("请输入URL列表（每行一个URL，输入完成后按Ctrl+D或Ctrl+Z）：")
    try:
        # 从标准输入读取URL列表
        urls = [line.strip() for line in iter(input, '') if line.strip()]
    except EOFError:
        # 如果没有输入，使用测试URL列表
        urls = test_urls
    
    if not urls:
        print("没有输入URL！")
        exit(1)
    
    print(f"\n开始处理 {len(urls)} 个URL...")
    crawl_urls(urls) 