#!/usr/bin/env python3
"""
Claude Code & Copilot 请求结构分析脚本
解析mitmproxy导出的原始请求文件，生成结构化数据用于映射分析
"""

import os
import re
import json
import glob
from typing import Dict, List, Any, Optional, Tuple
from pathlib import Path


class RequestAnalyzer:
    def __init__(self, base_dir: str):
        self.base_dir = Path(base_dir)
        self.claude_dir = self.base_dir / "claudeApi"
        self.copilot_dir = self.base_dir / "copilotApi"
        
    def parse_raw_file(self, file_path: Path) -> Dict[str, Any]:
        """解析单个原始请求文件"""
        content = file_path.read_text(encoding='utf-8')
        lines = content.split('\n')
        
        # 找到请求和响应分界线
        request_end = -1
        response_start = -1
        
        for i, line in enumerate(lines):
            if line.strip() == '' and i > 0 and '→' not in lines[i-1]:
                if request_end == -1:
                    request_end = i
            elif line.startswith('HTTP/'):
                response_start = i
                break
        
        # 解析请求部分
        request_data = self._parse_request_section(lines[:request_end])
        
        # 解析响应部分  
        response_data = {}
        if response_start > 0:
            response_data = self._parse_response_section(lines[response_start:])
            
        return {
            'file': file_path.name,
            'request': request_data,
            'response': response_data
        }
    
    def _parse_request_section(self, lines: List[str]) -> Dict[str, Any]:
        """解析请求部分"""
        headers = {}
        body = ""
        method = ""
        url = ""
        
        # 解析请求行和头部
        body_start = -1
        for i, line in enumerate(lines):
            if not line.strip():
                body_start = i + 1
                break
                
            clean_line = re.sub(r'^\s*\d+→', '', line).strip()
            
            if i == 0:  # 请求行
                parts = clean_line.split(' ', 2)
                if len(parts) >= 2:
                    method = parts[0]
                    url = parts[1]
            else:  # 头部
                if ':' in clean_line:
                    key, value = clean_line.split(':', 1)
                    headers[key.strip()] = value.strip()
        
        # 解析请求体
        if body_start > 0 and body_start < len(lines):
            body_lines = lines[body_start:]
            body = '\n'.join([re.sub(r'^\s*\d+→', '', line) for line in body_lines])
            
            # 尝试解析JSON
            try:
                body_json = json.loads(body)
                # 清理大字段内容，保留结构
                body = self._clean_large_fields(body_json)
            except:
                # 保留原始文本
                pass
        
        return {
            'method': method,
            'url': url,
            'headers': headers,
            'body': body
        }
    
    def _parse_response_section(self, lines: List[str]) -> Dict[str, Any]:
        """解析响应部分"""
        headers = {}
        body = ""
        status_line = ""
        
        # 解析状态行和头部
        body_start = -1
        for i, line in enumerate(lines):
            if not line.strip():
                body_start = i + 1
                break
                
            clean_line = re.sub(r'^\s*\d+→', '', line).strip()
            
            if i == 0:  # 状态行
                status_line = clean_line
            else:  # 头部
                if ':' in clean_line:
                    key, value = clean_line.split(':', 1)
                    headers[key.strip()] = value.strip()
        
        # 解析响应体
        if body_start > 0 and body_start < len(lines):
            body_lines = lines[body_start:]
            body = '\n'.join([re.sub(r'^\s*\d+→', '', line) for line in body_lines])
            
            # 尝试解析JSON或SSE
            if body.strip().startswith('{'):
                try:
                    body_json = json.loads(body)
                    body = self._clean_large_fields(body_json)
                except:
                    pass
            elif 'data:' in body:
                # SSE格式，提取关键结构
                body = self._extract_sse_structure(body)
        
        return {
            'status': status_line,
            'headers': headers, 
            'body': body
        }
    
    def _clean_large_fields(self, data: Any, max_length: int = 200) -> Any:
        """清理大字段内容，保留结构"""
        if isinstance(data, dict):
            result = {}
            for key, value in data.items():
                if key in ['content', 'text', 'messages', 'tools', 'system']:
                    if isinstance(value, str) and len(value) > max_length:
                        result[key] = f"[TRUNCATED:{len(value)} chars]"
                    elif isinstance(value, list) and len(value) > 5:
                        result[key] = f"[ARRAY:{len(value)} items]" 
                    elif isinstance(value, dict):
                        result[key] = self._clean_large_fields(value, max_length)
                    else:
                        result[key] = value
                else:
                    result[key] = self._clean_large_fields(value, max_length)
            return result
        elif isinstance(data, list):
            if len(data) > 10:
                return f"[ARRAY:{len(data)} items]"
            return [self._clean_large_fields(item, max_length) for item in data]
        elif isinstance(data, str) and len(data) > max_length:
            return f"[TEXT:{len(data)} chars]"
        else:
            return data
    
    def _extract_sse_structure(self, sse_body: str) -> Dict[str, Any]:
        """提取SSE响应的关键结构"""
        events = []
        lines = sse_body.split('\n')
        
        current_event = {}
        for line in lines:
            line = line.strip()
            if line.startswith('event:'):
                current_event['event'] = line[6:].strip()
            elif line.startswith('data:'):
                data_str = line[5:].strip()
                try:
                    data_json = json.loads(data_str)
                    current_event['data'] = self._clean_large_fields(data_json)
                except:
                    current_event['data'] = data_str
                    
                if current_event:
                    events.append(current_event.copy())
                    current_event = {}
        
        return {
            'format': 'sse',
            'events': events[:5]  # 只保留前5个事件
        }
    
    def get_sorted_files(self, directory: Path, pattern: str = "*.txt") -> List[Path]:
        """按文件名中的数字排序获取文件列表"""
        files = list(directory.glob(pattern))
        
        def extract_number(filename: str) -> int:
            # 提取文件名中的数字
            match = re.search(r'(\d+)', filename)
            return int(match.group(1)) if match else 0
            
        return sorted(files, key=lambda f: extract_number(f.name))
    
    def analyze_all_files(self) -> Dict[str, List[Dict]]:
        """分析所有文件"""
        results = {
            'claude_requests': [],
            'copilot_requests': []
        }
        
        # 分析Claude Code请求
        claude_files = self.get_sorted_files(self.claude_dir)
        print(f"Found {len(claude_files)} Claude Code files")
        
        for file_path in claude_files:
            try:
                parsed = self.parse_raw_file(file_path)
                results['claude_requests'].append(parsed)
                print(f"Parsed: {file_path.name}")
            except Exception as e:
                print(f"Error parsing {file_path.name}: {e}")
        
        # 分析Copilot请求
        copilot_files = self.get_sorted_files(self.copilot_dir)
        print(f"Found {len(copilot_files)} Copilot files")
        
        for file_path in copilot_files:
            try:
                parsed = self.parse_raw_file(file_path)
                results['copilot_requests'].append(parsed)
                print(f"Parsed: {file_path.name}")
            except Exception as e:
                print(f"Error parsing {file_path.name}: {e}")
        
        return results
    
    def save_results(self, results: Dict[str, List[Dict]], output_file: str = "parsed_requests.json"):
        """保存结果到JSON文件"""
        output_path = self.base_dir / output_file
        
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(results, f, indent=2, ensure_ascii=False)
        
        print(f"Results saved to: {output_path}")
        
        # 生成统计信息
        stats = {
            'claude_requests': len(results['claude_requests']),
            'copilot_requests': len(results['copilot_requests']),
            'total_requests': len(results['claude_requests']) + len(results['copilot_requests'])
        }
        
        stats_path = self.base_dir / "analysis_stats.json"
        with open(stats_path, 'w', encoding='utf-8') as f:
            json.dump(stats, f, indent=2, ensure_ascii=False)
        
        print(f"Statistics saved to: {stats_path}")
        return output_path


def main():
    """主函数"""
    analyzer = RequestAnalyzer("/home/hxz/cc/miniCompose/tool/urlAnaly")
    
    print("开始分析请求文件...")
    results = analyzer.analyze_all_files()
    
    print("保存分析结果...")
    output_path = analyzer.save_results(results)
    
    print(f"\n分析完成!")
    print(f"Claude Code 请求: {len(results['claude_requests'])} 个")
    print(f"Copilot 请求: {len(results['copilot_requests'])} 个")
    print(f"结果文件: {output_path}")


if __name__ == "__main__":
    main()