# analyze_requests.py
import json
import re
from urllib.parse import urlparse, parse_qs

def analyze_captured_requests(json_file_path, target_url=None):
    """
    分析捕获的请求数据
    
    Args:
        json_file_path (str): JSON文件路径
        target_url (str): 目标URL，如果提供则只分析相关请求
    """
    try:
        with open(json_file_path, 'r', encoding='utf-8') as f:
            requests_data = json.load(f)
    except FileNotFoundError:
        print(f"文件 {json_file_path} 不存在")
        return
    except json.JSONDecodeError as e:
        print(f"JSON解析错误: {e}")
        return
    
    print(f"总共捕获到 {len(requests_data)} 个请求/响应")
    
    # 如果指定了目标URL，筛选相关请求
    if target_url:
        filtered_requests = filter_requests_by_url(requests_data, target_url)
        print(f"与 {target_url} 相关的请求有 {len(filtered_requests)} 个")
        requests_data = filtered_requests
    
    # 分析请求
    analyze_requests(requests_data, target_url)

def filter_requests_by_url(requests_data, target_url):
    """
    根据URL筛选相关请求
    """
    filtered = []
    target_parsed = urlparse(target_url)
    target_path = target_parsed.path
    target_domain = target_parsed.netloc
    
    for request in requests_data:
        # 检查请求URL
        request_url = request.get('url', '')
        if request_url:
            parsed_url = urlparse(request_url)
            # 匹配域名和路径
            if target_domain in parsed_url.netloc and target_path in parsed_url.path:
                filtered.append(request)
                continue
        
        # 检查响应中的请求URL
        response = request.get('response', {})
        response_request_url = response.get('request_url', '')
        if response_request_url:
            parsed_url = urlparse(response_request_url)
            if target_domain in parsed_url.netloc and target_path in parsed_url.path:
                filtered.append(request)
    
    return filtered

def analyze_requests(requests_data, target_url=None):
    """
    分析请求数据
    """
    print("\n=== 请求分析 ===")
    
    for i, request in enumerate(requests_data):
        print(f"\n--- 请求 {i+1} ---")
        
        # 基本信息
        url = request.get('url', 'N/A')
        method = request.get('method', 'N/A')
        resource_type = request.get('resource_type', 'N/A')
        
        print(f"URL: {url}")
        print(f"方法: {method}")
        print(f"资源类型: {resource_type}")
        
        # 响应信息
        response = request.get('response', {})
        if response:
            status = response.get('status', 'N/A')
            status_text = response.get('status_text', 'N/A')
            print(f"响应状态: {status} {status_text}")
            
            # 提取响应体
            body = response.get('body', '')
            if body:
                print("响应体内容:")
                # 如果是JSON格式，格式化输出
                if isinstance(body, str) and (body.strip().startswith('{') or body.strip().startswith('[')):
                    try:
                        import json as json_module
                        parsed_json = json_module.loads(body)
                        print(json_module.dumps(parsed_json, indent=2, ensure_ascii=False))
                    except:
                        # 如果不是有效的JSON，直接输出
                        print_truncated(body, 500)
                else:
                    print_truncated(body, 500)
            else:
                body_preview = response.get('body_preview', '')
                if body_preview:
                    print("响应体预览:")
                    print_truncated(body_preview, 200)
                
                body_size = response.get('body_size', '')
                if body_size:
                    print(f"响应体大小: {body_size} 字节")
        else:
            print("无响应数据")

def print_truncated(text, max_length=500):
    """
    截断并打印文本
    """
    if len(text) > max_length:
        print(text[:max_length] + "...(内容过长已截断)")
    else:
        print(text)

def extract_specific_data(json_file_path, target_url):
    """
    提取特定URL的响应体数据
    """
    try:
        with open(json_file_path, 'r', encoding='utf-8') as f:
            requests_data = json.load(f)
    except FileNotFoundError:
        print(f"文件 {json_file_path} 不存在")
        return
    except json.JSONDecodeError as e:
        print(f"JSON解析错误: {e}")
        return
    
    print(f"正在分析与 {target_url} 相关的请求...")
    
    # 筛选相关请求
    filtered_requests = filter_requests_by_url(requests_data, target_url)
    
    if not filtered_requests:
        print("未找到相关请求")
        return
    
    print(f"找到 {len(filtered_requests)} 个相关请求")
    
    # 提取响应体
    extracted_data = []
    for i, request in enumerate(filtered_requests):
        response = request.get('response', {})
        body = response.get('body', '')
        
        if body:
            extracted_data.append({
                'request_url': request.get('url', ''),
                'response_status': response.get('status', 'N/A'),
                'body': body
            })
            print(f"\n--- 请求 {i+1} 响应体 ---")
            print(f"URL: {request.get('url', '')}")
            print(f"状态: {response.get('status', 'N/A')}")
            print("响应体:")
            
            # 尝试解析为JSON
            try:
                if isinstance(body, str) and (body.strip().startswith('{') or body.strip().startswith('[')):
                    parsed_json = json.loads(body)
                    print(json.dumps(parsed_json, indent=2, ensure_ascii=False))
                else:
                    print_truncated(body, 1000)
            except json.JSONDecodeError:
                print_truncated(body, 1000)
        else:
            print(f"请求 {i+1} 无响应体数据")
    
    # 保存提取的数据
    output_file = 'extracted_data.json'
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(extracted_data, f, indent=2, ensure_ascii=False)
    print(f"\n提取的数据已保存到 {output_file}")

def search_in_responses(json_file_path, search_term):
    """
    在所有响应体中搜索特定内容
    """
    try:
        with open(json_file_path, 'r', encoding='utf-8') as f:
            requests_data = json.load(f)
    except FileNotFoundError:
        print(f"文件 {json_file_path} 不存在")
        return
    except json.JSONDecodeError as e:
        print(f"JSON解析错误: {e}")
        return
    
    print(f"在所有响应体中搜索: {search_term}")
    
    matches = []
    for i, request in enumerate(requests_data):
        response = request.get('response', {})
        body = response.get('body', '')
        
        if body and isinstance(body, str) and search_term in body:
            matches.append({
                'index': i+1,
                'url': request.get('url', ''),
                'status': response.get('status', 'N/A'),
                'matched_content': body
            })
            print(f"\n--- 匹配 {len(matches)} ---")
            print(f"URL: {request.get('url', '')}")
            print(f"状态: {response.get('status', 'N/A')}")
            # 显示匹配内容的上下文
            lines = body.split('\n')
            for j, line in enumerate(lines):
                if search_term in line:
                    start = max(0, j-2)
                    end = min(len(lines), j+3)
                    print("上下文:")
                    for k in range(start, end):
                        marker = ">>> " if k == j else "    "
                        print(f"{marker}{lines[k]}")
    
    print(f"\n总共找到 {len(matches)} 个匹配项")

def extract_between_patterns(json_file_path, start_pattern, end_pattern, include_patterns=True):
    """
    提取两个模式之间的内容
    
    Args:
        json_file_path (str): JSON文件路径
        start_pattern (str): 开始模式
        end_pattern (str): 结束模式
        include_patterns (bool): 是否包含开始和结束模式
    """
    try:
        with open(json_file_path, 'r', encoding='utf-8') as f:
            requests_data = json.load(f)
    except FileNotFoundError:
        print(f"文件 {json_file_path} 不存在")
        return
    except json.JSONDecodeError as e:
        print(f"JSON解析错误: {e}")
        return
    
    print(f"查找 '{start_pattern}' 和 '{end_pattern}' 之间的内容")
    
    extracted_contents = []
    for i, request in enumerate(requests_data):
        response = request.get('response', {})
        body = response.get('body', '')
        
        if body and isinstance(body, str):
            # 查找开始和结束位置
            start_pos = body.find(start_pattern)
            if start_pos != -1:
                end_pos = body.find(end_pattern, start_pos + len(start_pattern))
                if end_pos != -1:
                    # 根据参数决定是否包含模式本身
                    if include_patterns:
                        # 包含开始和结束模式
                        extracted_text = body[start_pos:end_pos + len(end_pattern)]
                    else:
                        # 不包含开始和结束模式，只提取中间内容
                        start_pos += len(start_pattern)
                        extracted_text = body[start_pos:end_pos]
                    
                    extracted_contents.append({
                        'request_index': i+1,
                        'url': request.get('url', ''),
                        'status': response.get('status', 'N/A'),
                        'extracted_content': extracted_text.strip()
                    })
                    print(f"\n--- 提取内容 {len(extracted_contents)} (来自请求 {i+1}) ---")
                    print(f"URL: {request.get('url', '')}")
                    print(f"状态: {response.get('status', 'N/A')}")
                    print("提取的内容:")
                    print(extracted_text.strip())
    
    if extracted_contents:
        # 保存提取的内容
        output_file = 'extracted_between_patterns.json'
        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump(extracted_contents, f, indent=2, ensure_ascii=False)
        print(f"\n提取的内容已保存到 {output_file}")
    else:
        print("未找到匹配的内容")

def extract_remaining_time_info(json_file_path):
    """
    专门提取剩余时间信息的函数（包含开始和结束字符）
    """
    start_pattern = "剩余时间"
    end_pattern = "www.baidu.com"
    extract_between_patterns(json_file_path, start_pattern, end_pattern, include_patterns=True)

def main():
    """
    主函数 - 提供多种分析选项
    """
    import argparse
    
    parser = argparse.ArgumentParser(description='分析捕获的请求数据')
    parser.add_argument('--file', default='captured_requests.json', help='JSON文件路径')
    parser.add_argument('--url', help='分析特定URL的请求')
    parser.add_argument('--search', help='在响应体中搜索特定内容')
    parser.add_argument('--extract', action='store_true', help='提取特定URL的响应体')
    parser.add_argument('--extract-between', nargs=2, metavar=('START', 'END'), 
                       help='提取两个字符串之间的内容')
    parser.add_argument('--extract-time', action='store_true', 
                       help='提取剩余时间信息（预设模式，包含开始和结束字符）')
    
    args = parser.parse_args()
    
    if args.search:
        search_in_responses(args.file, args.search)
    elif args.extract and args.url:
        extract_specific_data(args.file, args.url)
    elif args.extract_between:
        extract_between_patterns(args.file, args.extract_between[0], args.extract_between[1], include_patterns=False)
    elif args.extract_time:
        extract_remaining_time_info(args.file)
    elif args.url:
        analyze_captured_requests(args.file, args.url)
    else:
        analyze_captured_requests(args.file)

if __name__ == "__main__":
    main()