#!/usr/bin/env python3
import re
from urllib.parse import urlparse

def extract_base_url(url):
    """提取URL的基础部分（不包含查询参数）"""
    parsed = urlparse(url)
    return f"{parsed.scheme}://{parsed.netloc}{parsed.path}"

def filter_duplicate_urls(input_file, output_file):
    """过滤重复URL的请求，只保留每个URL的第一次出现"""
    seen_urls = set()
    filtered_interactions = []
    current_interaction = []
    current_url = None
    
    with open(input_file, 'r', encoding='utf-8') as f:
        content = f.read()
    
    # 按交互分割内容
    interactions = re.split(r'=+\n交互 #\d+\n', content)
    
    # 处理第一部分（文件头）
    if interactions:
        header = interactions[0]
        filtered_interactions.append(header)
        interactions = interactions[1:]
    
    interaction_count = 0
    for i, interaction in enumerate(interactions, 1):
        if not interaction.strip():
            continue
            
        # 查找URL
        url_match = re.search(r'URL: (https?://[^\s\n]+)', interaction)
        if url_match:
            full_url = url_match.group(1)
            base_url = extract_base_url(full_url)
            
            # 如果这个基础URL没有见过，则保留这个交互
            if base_url not in seen_urls:
                seen_urls.add(base_url)
                interaction_count += 1
                
                # 重新编号交互
                interaction_header = f"=================================================================================\n交互 #{interaction_count}\n"
                filtered_interactions.append(interaction_header + interaction)
    
    # 写入过滤后的文件
    with open(output_file, 'w', encoding='utf-8') as f:
        for interaction in filtered_interactions:
            f.write(interaction)
    
    print(f"过滤完成！")
    print(f"原始交互数量: {len(interactions)}")
    print(f"过滤后交互数量: {interaction_count}")
    print(f"去重的URL数量: {len(seen_urls)}")
    print(f"输出文件: {output_file}")

if __name__ == "__main__":
    input_file = "mitmproxy_traffic_claude_filtered.txt"
    output_file = "claude_filtered.txt"
    
    filter_duplicate_urls(input_file, output_file)
