# pull_data.py
import asyncio
from playwright.async_api import async_playwright
import json
from datetime import datetime
import argparse

async def monitor_website(url, output_file, headless=True, wait_time=10000):
    """
    监控指定网站的所有网络请求，包括请求和响应数据

    Args:
        url (str): 要监控的目标网址
        output_file (str): 输出JSON文件名
        headless (bool): 是否无头模式运行
        wait_time (int): 等待页面加载的时间（毫秒）
    """
    all_requests = []
    
    async with async_playwright() as p:
        # 启动浏览器
        browser = await p.chromium.launch(headless=headless)
        context = await browser.new_context(
            viewport={'width': 1920, 'height': 1080},
            # 可以设置用户代理等其他浏览器上下文选项
            user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        )
        page = await context.new_page()

        # 监听请求事件
        def on_request(request):
            request_data = {
                "timestamp": datetime.now().isoformat(),
                "url": request.url,
                "method": request.method,
                "resource_type": request.resource_type,
                "headers": dict(request.headers),
                "post_data": request.post_data,  # POST参数
                "frame_url": request.frame.url if request.frame else None
            }
            
            # 将请求数据存储在请求对象中，以便在响应中使用
            request._custom_data = request_data
            print(f"[请求] [{datetime.now().strftime('%H:%M:%S')}] {request.method} {request.url}")

        # 监听响应事件
        async def on_response(response):
            try:
                # 获取对应的请求
                request = response.request
                
                # 获取响应数据
                response_data = {
                    "status": response.status,
                    "status_text": response.status_text,
                    "headers": dict(response.headers),
                    "url": response.url,
                    "request_url": request.url,
                    "method": request.method
                }
                
                # 尝试获取响应体（注意：不是所有响应都能获取）
                try:
                    response_body = await response.body()
                    # 只保存文本类型的响应体，避免二进制数据
                    if 'content-type' in response.headers and 'text' in response.headers['content-type'].lower():
                        response_data["body"] = response_body.decode('utf-8', errors='ignore')
                    elif 'content-type' in response.headers and 'json' in response.headers['content-type'].lower():
                        response_data["body"] = response_body.decode('utf-8', errors='ignore')
                    else:
                        # 对于非文本内容，只保存前1000个字符或记录大小
                        if len(response_body) < 1000:
                            response_data["body_preview"] = response_body.decode('utf-8', errors='ignore')
                        else:
                            response_data["body_size"] = len(response_body)
                except Exception as e:
                    response_data["body_error"] = str(e)
                
                # 获取之前存储的请求数据
                request_data = getattr(request, '_custom_data', {})
                
                # 合并请求和响应数据
                complete_data = {
                    **request_data,
                    "response": response_data
                }
                
                all_requests.append(complete_data)
                print(f"[响应] [{datetime.now().strftime('%H:%M:%S')}] {response.status} {response.url}")
                
            except Exception as e:
                print(f"处理响应时出错: {e}")

        page.on("request", on_request)
        page.on("response", on_response)

        try:
            print(f"开始监控: {url}")
            # 导航到目标页面
            response = await page.goto(url, wait_until="domcontentloaded")
            print(f"页面加载状态: {response.status if response else '未知'}")
            
            # 等待一段时间，让页面充分加载（包括异步请求）
            print(f"等待 {wait_time/1000} 秒收集请求...")
            await asyncio.sleep(wait_time / 1000)
            
        except Exception as e:
            print(f"导航过程中发生错误: {e}")
        finally:
            # 保存结果到JSON文件
            if all_requests:
                # 处理可能无法序列化的数据
                def serialize_data(obj):
                    if isinstance(obj, bytes):
                        return obj.decode('utf-8', errors='ignore')
                    elif isinstance(obj, Exception):
                        return str(obj)
                    raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
                
                with open(output_file, 'w', encoding='utf-8') as f:
                    json.dump(all_requests, f, indent=2, ensure_ascii=False, default=serialize_data)
                print(f"\n捕获到 {len(all_requests)} 个请求/响应，已保存到 {output_file}")
                
                # 打印统计信息
                print("\n=== 统计信息 ===")
                status_codes = {}
                methods = {}
                resource_types = {}
                
                for req in all_requests:
                    # 统计状态码
                    if 'response' in req and 'status' in req['response']:
                        status = req['response']['status']
                        status_codes[status] = status_codes.get(status, 0) + 1
                    
                    # 统计请求方法
                    if 'method' in req:
                        method = req['method']
                        methods[method] = methods.get(method, 0) + 1
                    
                    # 统计资源类型
                    if 'resource_type' in req:
                        resource_type = req['resource_type']
                        resource_types[resource_type] = resource_types.get(resource_type, 0) + 1
                
                print(f"状态码分布: {status_codes}")
                print(f"请求方法分布: {methods}")
                print(f"资源类型分布: {resource_types}")
            else:
                print("没有捕获到任何请求")
            
            await browser.close()

def main():
    parser = argparse.ArgumentParser(description='网页请求监控工具（收集请求和响应数据）')
    parser.add_argument('--url', default='www.baidu.com', help='要监控的网址')
    parser.add_argument('--output', default='captured_requests.json', help='输出文件名')
    parser.add_argument('--visible', action='store_false', dest='headless', help='显示浏览器界面（默认无头模式）')
    parser.add_argument('--wait', type=int, default=20000, help='等待时间（毫秒）')
    
    args = parser.parse_args()
    
    # 运行监控
    asyncio.run(monitor_website(
        url=args.url,
        output_file=args.output,
        headless=args.headless,
        wait_time=args.wait
    ))

if __name__ == "__main__":
    main()