#!/usr/bin/env python3
"""
C/C++ Function Call Tree Analysis MCP Server

A comprehensive MCP server that provides multiple tools for analyzing C/C++ code,
combining features from regex-based parsing, advanced flow analysis, and Clang AST parsing.
"""

import asyncio
import os
import json
import sys
import tempfile
import subprocess
from pathlib import Path
from typing import Dict, List, Optional, Any, Set, Tuple, Union
from dataclasses import dataclass, asdict
import re
from collections import defaultdict

from fastmcp import FastMCP

# Import our analysis modules
from src.cpp_analyzer import (
    CppCodeAnalyzer,
    AnalysisConfig,
    AnalysisResult,
    FunctionInfo,
    CallGraph
)

# Import validation modules
from src.validation import (
    validate_analysis,
    generate_validation_report,
    ValidationResult,
    CallTreeValidator
)

# Create MCP server instance
mcp = FastMCP("C++ Function Call Tree Analyzer")

@dataclass
class AnalysisRequest:
    """Request configuration for analysis."""
    paths: List[str]
    recursive: bool = True
    max_depth: int = 10
    include_standard_lib: bool = False
    include_external: bool = False
    output_format: str = "tree"  # "tree", "json", "detailed"
    root_functions: Optional[List[str]] = None
    exclude_functions: Optional[List[str]] = None
    include_line_counts: bool = True
    detect_cycles: bool = True
    filter_patterns: Optional[List[str]] = None


@mcp.tool
def analyze_file(
    file_path: str,
    max_depth: int = 10,
    output_format: str = "tree",
    include_line_counts: bool = True,
    exclude_functions: Optional[str] = None,
    root_function: Optional[str] = None
) -> str:
    """
    Analyze a single C/C++ file and generate function call tree.
    
    Args:
        file_path: Path to the C/C++ file to analyze
        max_depth: Maximum depth for call tree traversal
        output_format: Output format ("tree", "json", "detailed")
        include_line_counts: Whether to include line count information
        exclude_functions: Comma-separated list of functions to exclude
        root_function: Specific root function to analyze (optional)
    
    Returns:
        Analysis results in the specified format
    """
    try:
        # Validate file path
        if not os.path.exists(file_path):
            return f"Error: File '{file_path}' not found."
        
        if not file_path.endswith(('.c', '.cpp', '.cxx', '.cc', '.h', '.hpp', '.hxx')):
            return f"Error: '{file_path}' is not a C/C++ file."
        
        # Parse exclude functions
        exclude_list = []
        if exclude_functions:
            exclude_list = [func.strip() for func in exclude_functions.split(',')]
        
        # Create analysis configuration
        config = AnalysisConfig(
            paths=[file_path],
            recursive=False,
            max_depth=max_depth,
            include_standard_lib=False,
            include_external=False,
            exclude_functions=exclude_list,
            root_functions=[root_function] if root_function else None,
            include_line_counts=include_line_counts,
            detect_cycles=True
        )
        
        # Perform analysis
        analyzer = CppCodeAnalyzer(config)
        result = analyzer.analyze()
        
        # Format output
        if output_format == "json":
            return json.dumps(asdict(result), indent=2, default=str)
        elif output_format == "detailed":
            return _format_detailed_analysis(result)
        else:  # tree format
            return _format_tree_analysis(result)
            
    except Exception as e:
        return f"Error analyzing file: {str(e)}"


@mcp.tool
def analyze_directory(
    directory_path: str,
    recursive: bool = True,
    max_depth: int = 10,
    output_format: str = "tree",
    include_line_counts: bool = True,
    exclude_functions: Optional[str] = None,
    file_patterns: Optional[str] = None
) -> str:
    """
    Analyze all C/C++ files in a directory and generate comprehensive call tree.
    
    Args:
        directory_path: Path to the directory containing C/C++ files
        recursive: Whether to scan subdirectories recursively
        max_depth: Maximum depth for call tree traversal
        output_format: Output format ("tree", "json", "detailed")
        include_line_counts: Whether to include line count information
        exclude_functions: Comma-separated list of functions to exclude
        file_patterns: Comma-separated file patterns to include (e.g., "*.cpp,*.h")
    
    Returns:
        Analysis results for all found functions
    """
    try:
        # Validate directory path
        if not os.path.exists(directory_path):
            return f"Error: Directory '{directory_path}' not found."
        
        if not os.path.isdir(directory_path):
            return f"Error: '{directory_path}' is not a directory."
        
        # Parse exclude functions
        exclude_list = []
        if exclude_functions:
            exclude_list = [func.strip() for func in exclude_functions.split(',')]
        
        # Parse file patterns
        patterns = []
        if file_patterns:
            patterns = [pattern.strip() for pattern in file_patterns.split(',')]
        
        # Create analysis configuration
        config = AnalysisConfig(
            paths=[directory_path],
            recursive=recursive,
            max_depth=max_depth,
            include_standard_lib=False,
            include_external=False,
            exclude_functions=exclude_list,
            include_line_counts=include_line_counts,
            detect_cycles=True,
            file_patterns=patterns
        )
        
        # Perform analysis
        analyzer = CppCodeAnalyzer(config)
        result = analyzer.analyze()
        
        # Format output
        if output_format == "json":
            return json.dumps(asdict(result), indent=2, default=str)
        elif output_format == "detailed":
            return _format_detailed_analysis(result)
        else:  # tree format
            return _format_tree_analysis(result)
            
    except Exception as e:
        return f"Error analyzing directory: {str(e)}"


@mcp.tool
def analyze_project(
    project_path: str,
    compile_commands_path: Optional[str] = None,
    max_depth: int = 15,
    output_format: str = "tree",
    include_line_counts: bool = True,
    exclude_functions: Optional[str] = None,
    use_clang_analysis: bool = True
) -> str:
    """
    Perform comprehensive project analysis with optional Clang-based parsing.
    
    Args:
        project_path: Path to the project root directory
        compile_commands_path: Optional path to compile_commands.json file
        max_depth: Maximum depth for call tree traversal
        output_format: Output format ("tree", "json", "detailed")
        include_line_counts: Whether to include line count information
        exclude_functions: Comma-separated list of functions to exclude
        use_clang_analysis: Whether to use Clang AST for more accurate analysis
    
    Returns:
        Comprehensive project analysis results
    """
    try:
        # Validate project path
        if not os.path.exists(project_path):
            return f"Error: Project path '{project_path}' not found."
        
        if not os.path.isdir(project_path):
            return f"Error: '{project_path}' is not a directory."
        
        # Check for compile_commands.json
        if not compile_commands_path:
            # Look for common locations
            potential_paths = [
                os.path.join(project_path, "compile_commands.json"),
                os.path.join(project_path, "build", "compile_commands.json"),
                os.path.join(project_path, "cmake-build-debug", "compile_commands.json")
            ]
            for path in potential_paths:
                if os.path.exists(path):
                    compile_commands_path = path
                    break
        
        # Parse exclude functions
        exclude_list = []
        if exclude_functions:
            exclude_list = [func.strip() for func in exclude_functions.split(',')]
        
        # Create analysis configuration
        config = AnalysisConfig(
            paths=[project_path],
            recursive=True,
            max_depth=max_depth,
            include_standard_lib=False,
            include_external=False,
            exclude_functions=exclude_list,
            include_line_counts=include_line_counts,
            detect_cycles=True,
            use_clang=use_clang_analysis,
            compile_commands_path=compile_commands_path
        )
        
        # Perform analysis
        analyzer = CppCodeAnalyzer(config)
        result = analyzer.analyze()
        
        # Format output
        if output_format == "json":
            return json.dumps(asdict(result), indent=2, default=str)
        elif output_format == "detailed":
            return _format_detailed_analysis(result)
        else:  # tree format
            return _format_tree_analysis(result)
            
    except Exception as e:
        return f"Error analyzing project: {str(e)}"


@mcp.tool
def generate_tree(
    analysis_data: str,
    root_function: str,
    max_depth: int = 10,
    show_parameters: bool = True,
    show_line_counts: bool = True,
    compact_format: bool = False
) -> str:
    """
    Generate a visual function call tree from analysis data.
    
    Args:
        analysis_data: JSON string containing analysis results
        root_function: Name of the root function to start the tree from
        max_depth: Maximum depth to display in the tree
        show_parameters: Whether to show function parameters in the tree
        show_line_counts: Whether to show line count information
        compact_format: Use compact formatting to save space
    
    Returns:
        Visual tree representation of function calls
    """
    try:
        # Parse analysis data
        data = json.loads(analysis_data)
        
        # Create call graph from data
        call_graph = CallGraph()
        functions = {}
        
        # Reconstruct function information
        for func_data in data.get('functions', []):
            func_info = FunctionInfo(
                name=func_data['name'],
                namespace=func_data.get('namespace', ''),
                class_name=func_data.get('class_name'),
                return_type=func_data.get('return_type', ''),
                parameters=func_data.get('parameters', []),
                line_count=func_data.get('line_count', 0),
                source_file=func_data.get('source_file', ''),
                start_line=func_data.get('start_line', 0)
            )
            functions[func_info.full_name] = func_info
        
        # Reconstruct call relationships
        for edge in data.get('call_graph', {}).get('edges', []):
            call_graph.add_call(edge['caller'], edge['callee'])
        
        # Generate tree visualization
        tree_output = _generate_tree_visualization(
            call_graph, functions, root_function, max_depth,
            show_parameters, show_line_counts, compact_format
        )
        
        return tree_output
        
    except json.JSONDecodeError:
        return "Error: Invalid JSON data provided."
    except Exception as e:
        return f"Error generating tree: {str(e)}"


@mcp.tool
def generate_report(
    analysis_data: str,
    include_statistics: bool = True,
    include_cycle_analysis: bool = True,
    include_complexity_metrics: bool = True,
    output_file: Optional[str] = None
) -> str:
    """
    Generate a detailed analysis report from analysis data.
    
    Args:
        analysis_data: JSON string containing analysis results
        include_statistics: Whether to include statistical analysis
        include_cycle_analysis: Whether to include cycle detection results
        include_complexity_metrics: Whether to include complexity metrics
        output_file: Optional file path to save the report
    
    Returns:
        Detailed analysis report
    """
    try:
        # Parse analysis data
        data = json.loads(analysis_data)
        
        # Generate report sections
        report_sections = []
        
        # Header
        report_sections.append("# C/C++ Function Call Tree Analysis Report")
        report_sections.append("=" * 60)
        report_sections.append("")
        
        # Basic information
        report_sections.append("## Analysis Summary")
        report_sections.append(f"- Total functions analyzed: {len(data.get('functions', []))}")
        report_sections.append(f"- Total call relationships: {len(data.get('call_graph', {}).get('edges', []))}")
        report_sections.append(f"- Analysis timestamp: {data.get('timestamp', 'Unknown')}")
        report_sections.append("")
        
        # Statistics
        if include_statistics:
            report_sections.append("## Statistical Analysis")
            stats = _generate_statistics(data)
            for stat_line in stats:
                report_sections.append(stat_line)
            report_sections.append("")
        
        # Cycle analysis
        if include_cycle_analysis:
            report_sections.append("## Cycle Detection")
            cycles = data.get('cycles', [])
            if cycles:
                report_sections.append(f"Found {len(cycles)} cycles:")
                for i, cycle in enumerate(cycles, 1):
                    report_sections.append(f"  {i}. {' -> '.join(cycle)} -> {cycle[0]}")
            else:
                report_sections.append("No cycles detected in the call graph.")
            report_sections.append("")
        
        # Function details
        report_sections.append("## Function Details")
        for func in data.get('functions', []):
            report_sections.append(f"### {func['name']}")
            if func.get('namespace'):
                report_sections.append(f"- Namespace: {func['namespace']}")
            if func.get('class_name'):
                report_sections.append(f"- Class: {func['class_name']}")
            if func.get('return_type'):
                report_sections.append(f"- Return Type: {func['return_type']}")
            if func.get('line_count'):
                report_sections.append(f"- Line Count: {func['line_count']}")
            if func.get('source_file'):
                report_sections.append(f"- Source File: {func['source_file']}")
            report_sections.append("")
        
        # Combine all sections
        report = "\n".join(report_sections)
        
        # Save to file if requested
        if output_file:
            try:
                with open(output_file, 'w', encoding='utf-8') as f:
                    f.write(report)
                report += f"\n\nReport saved to: {output_file}"
            except Exception as e:
                report += f"\n\nWarning: Could not save to file: {str(e)}"
        
        return report
        
    except json.JSONDecodeError:
        return "Error: Invalid JSON data provided."
    except Exception as e:
        return f"Error generating report: {str(e)}"


@mcp.tool
def export_json(
    source_path: str,
    output_file: str,
    recursive: bool = True,
    include_source_code: bool = False,
    pretty_format: bool = True
) -> str:
    """
    Export analysis results as JSON for external processing.
    
    Args:
        source_path: Path to analyze (file or directory)
        output_file: Path to save the JSON output
        recursive: Whether to scan directories recursively
        include_source_code: Whether to include function source code
        pretty_format: Whether to format JSON with indentation
    
    Returns:
        Status message about the export operation
    """
    try:
        # Validate source path
        if not os.path.exists(source_path):
            return f"Error: Source path '{source_path}' not found."
        
        # Create analysis configuration
        config = AnalysisConfig(
            paths=[source_path],
            recursive=recursive,
            max_depth=50,  # Deep analysis for export
            include_standard_lib=False,
            include_external=False,
            include_line_counts=True,
            detect_cycles=True,
            include_source_code=include_source_code
        )
        
        # Perform analysis
        analyzer = CppCodeAnalyzer(config)
        result = analyzer.analyze()
        
        # Convert to JSON
        json_data = asdict(result)
        
        # Save to file
        indent = 2 if pretty_format else None
        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump(json_data, f, indent=indent, default=str)
        
        return f"Analysis results exported to: {output_file}\n" \
               f"Total functions: {len(result.functions)}\n" \
               f"Total call relationships: {len(result.call_graph.edges)}"
        
    except Exception as e:
        return f"Error exporting JSON: {str(e)}"


def _format_tree_analysis(result: AnalysisResult) -> str:
    """Format analysis result as a tree visualization."""
    output = []
    
    # Header
    output.append("🌳 C/C++ Function Call Tree Analysis")
    output.append("=" * 50)
    output.append("")
    
    # Summary
    output.append(f"📊 Analysis Summary:")
    output.append(f"  - Total functions: {len(result.functions)}")
    output.append(f"  - Call relationships: {len(result.call_graph.edges)}")
    if result.cycles:
        output.append(f"  - Cycles detected: {len(result.cycles)}")
    output.append("")
    
    # Find entry points
    entry_points = result.call_graph.find_entry_points()
    
    if not entry_points:
        # If no clear entry points, use the first few functions
        entry_points = [func.full_name for func in result.functions[:5]]
    
    # Create a lookup dict for functions
    functions_dict = {func.full_name: func for func in result.functions}
    
    # Generate trees for each entry point
    for entry_point in entry_points[:3]:  # Limit to first 3 entry points
        if entry_point in functions_dict:
            output.append(f"🌲 Call Tree starting from: {entry_point}")
            output.append("-" * 40)
            tree_lines = _generate_tree_visualization(
                result.call_graph, functions_dict, entry_point, 
                max_depth=10, show_parameters=True, show_line_counts=True
            )
            output.append(tree_lines)
            output.append("")
    
    return "\n".join(output)


def _format_detailed_analysis(result: AnalysisResult) -> str:
    """Format analysis result with detailed information."""
    output = []
    
    # Header
    output.append("📋 Detailed C/C++ Function Analysis Report")
    output.append("=" * 60)
    output.append("")
    
    # Comprehensive summary
    output.append("📊 Comprehensive Summary:")
    output.append(f"  - Total functions analyzed: {len(result.functions)}")
    output.append(f"  - Total call relationships: {len(result.call_graph.edges)}")
    output.append(f"  - Source files processed: {len(set(f.source_file for f in result.functions))}")
    
    # Calculate statistics
    total_lines = sum(f.line_count for f in result.functions if f.line_count)
    avg_lines = total_lines / len(result.functions) if result.functions else 0
    output.append(f"  - Total lines of code: {total_lines}")
    output.append(f"  - Average function length: {avg_lines:.1f} lines")
    
    if result.cycles:
        output.append(f"  - Cycles detected: {len(result.cycles)}")
        
    output.append("")
    
    # Cycle analysis
    if result.cycles:
        output.append("🔄 Cycle Analysis:")
        for i, cycle in enumerate(result.cycles, 1):
            output.append(f"  Cycle {i}: {' -> '.join(cycle)} -> {cycle[0]}")
        output.append("")
    
    # Function details by namespace/class
    namespaces = defaultdict(list)
    for func in result.functions:
        key = func.namespace or func.class_name or "<global>"
        namespaces[key].append(func)
    
    output.append("📁 Functions by Namespace/Class:")
    for namespace, funcs in sorted(namespaces.items()):
        output.append(f"  {namespace}: {len(funcs)} functions")
        for func in sorted(funcs, key=lambda f: f.name):
            params = f"({', '.join(func.parameters)})" if func.parameters else "()"
            lines_info = f" [{func.line_count} lines]" if func.line_count else ""
            output.append(f"    - {func.name}{params}{lines_info}")
    output.append("")
    
    # Most called functions
    call_counts = defaultdict(int)
    for caller, callees in result.call_graph.edges.items():
        for callee in callees:
            call_counts[callee] += 1
    
    if call_counts:
        output.append("📈 Most Called Functions:")
        sorted_calls = sorted(call_counts.items(), key=lambda x: x[1], reverse=True)
        for func, count in sorted_calls[:10]:  # Top 10
            output.append(f"  - {func}: called {count} times")
        output.append("")
    
    return "\n".join(output)


def _generate_tree_visualization(
    call_graph: CallGraph, 
    functions: Dict[str, FunctionInfo], 
    root_function: str, 
    max_depth: int = 10,
    show_parameters: bool = True,
    show_line_counts: bool = True,
    compact_format: bool = False
) -> str:
    """Generate a visual tree representation of function calls."""
    
    def _format_function_display(func_name: str, depth: int = 0) -> str:
        func_info = functions.get(func_name)
        if not func_info:
            return func_name
        
        # Build display name
        display_parts = [func_info.name]
        
        if show_parameters and func_info.parameters and not compact_format:
            params = ', '.join(func_info.parameters[:3])  # Limit parameters shown
            if len(func_info.parameters) > 3:
                params += ', ...'
            display_parts.append(f"({params})")
        
        if show_line_counts and func_info.line_count:
            display_parts.append(f"[{func_info.line_count} lines]")
        
        return ' '.join(display_parts)
    
    def _build_tree(node: str, visited: Set[str], prefix: str = "", is_last: bool = True, depth: int = 0) -> List[str]:
        if depth > max_depth:
            return [f"{prefix}{'└── ' if is_last else '├── '}... (max depth reached)"]
        
        lines = []
        
        # Current node
        symbol = "└── " if is_last and depth > 0 else ("├── " if depth > 0 else "")
        display_name = _format_function_display(node, depth)
        
        if node in visited:
            lines.append(f"{prefix}{symbol}{display_name} (cycle detected)")
            return lines
        
        lines.append(f"{prefix}{symbol}{display_name}")
        
        # Get children
        children = list(call_graph.edges.get(node, []))
        if not children:
            return lines
        
        # Add to visited set for cycle detection
        new_visited = visited | {node}
        
        # Generate children
        next_prefix = prefix + ("    " if is_last or depth == 0 else "│   ")
        for i, child in enumerate(children):
            is_last_child = (i == len(children) - 1)
            child_lines = _build_tree(child, new_visited, next_prefix, is_last_child, depth + 1)
            lines.extend(child_lines)
        
        return lines
    
    # Start the tree generation
    tree_lines = _build_tree(root_function, set())
    return "\n".join(tree_lines)


def _generate_statistics(data: Dict[str, Any]) -> List[str]:
    """Generate statistical analysis from analysis data."""
    stats = []
    
    functions = data.get('functions', [])
    call_graph = data.get('call_graph', {})
    edges = call_graph.get('edges', [])
    
    if not functions:
        return ["No functions found for statistical analysis."]
    
    # Function count by namespace/class
    namespaces = defaultdict(int)
    for func in functions:
        key = func.get('namespace') or func.get('class_name') or '<global>'
        namespaces[key] += 1
    
    stats.append("Function distribution:")
    for namespace, count in sorted(namespaces.items()):
        stats.append(f"  - {namespace}: {count} functions")
    
    # Line count statistics
    line_counts = [f.get('line_count', 0) for f in functions if f.get('line_count')]
    if line_counts:
        stats.append("")
        stats.append("Line count statistics:")
        stats.append(f"  - Total lines: {sum(line_counts)}")
        stats.append(f"  - Average lines per function: {sum(line_counts) / len(line_counts):.1f}")
        stats.append(f"  - Largest function: {max(line_counts)} lines")
        stats.append(f"  - Smallest function: {min(line_counts)} lines")
    
    # Call relationship statistics
    call_counts = defaultdict(int)
    for edge in edges:
        call_counts[edge.get('callee', '')] += 1
    
    if call_counts:
        stats.append("")
        stats.append("Call relationship statistics:")
        stats.append(f"  - Most called function: {max(call_counts, key=call_counts.get)} ({max(call_counts.values())} calls)")
        stats.append(f"  - Average calls per function: {len(edges) / len(functions):.1f}")
    
    return stats


@mcp.tool()
def validate_analysis_data(
    analysis_data: str,
    validation_options: Optional[Dict[str, bool]] = None,
    runtime_data_file: Optional[str] = None
) -> str:
    """
    Validate function call tree analysis for correctness and identify potential issues.
    
    This tool performs comprehensive validation of the analysis results including:
    - Call relationship validation
    - Parameter compatibility checking
    - Cyclic dependency detection
    - Numerical relationship analysis
    - Optional runtime data cross-reference
    
    Args:
        analysis_data: JSON string containing the analysis results from previous analysis
        validation_options: Optional dict specifying which validations to run
        runtime_data_file: Optional path to runtime profiling data (gprof, JSON format)
    
    Returns:
        Comprehensive validation report with issues, warnings, and confidence scores
    """
    try:
        # Parse analysis data
        if isinstance(analysis_data, str):
            analysis_dict = json.loads(analysis_data)
        else:
            analysis_dict = analysis_data
        
        # Set default validation options
        if validation_options is None:
            validation_options = {
                'call_relationships': True,
                'parameter_compatibility': True,
                'cyclic_dependencies': True,
                'numerical_relationships': True,
                'runtime_validation': bool(runtime_data_file)
            }
        
        # Run validation
        validation_results = validate_analysis(
            analysis_dict, 
            validation_options,
            runtime_data_file
        )
        
        # Generate report
        report = generate_validation_report(validation_results)
        
        # Add summary statistics
        total_issues = sum(len(result.issues) for result in validation_results.values())
        total_warnings = sum(len(result.warnings) for result in validation_results.values())
        avg_confidence = sum(result.confidence for result in validation_results.values()) / len(validation_results)
        
        summary = f"""
## Validation Summary
- **Total Issues**: {total_issues}
- **Total Warnings**: {total_warnings}
- **Average Confidence**: {avg_confidence:.2f}/1.0
- **Validations Run**: {len(validation_results)}

{report}
"""
        
        return summary.strip()
        
    except Exception as e:
        return f"❌ Error during validation: {str(e)}\n\nPlease check that the analysis_data is valid JSON from a previous analysis."


@mcp.tool()
def validate_with_runtime_data(
    analysis_data: str,
    runtime_profile_file: str,
    profile_format: str = "auto"
) -> str:
    """
    Cross-validate static analysis results with runtime profiling data.
    
    This tool compares the static call tree analysis with actual runtime execution
    data to identify discrepancies and calculate accuracy metrics.
    
    Args:
        analysis_data: JSON string containing the static analysis results
        runtime_profile_file: Path to runtime profiling data file
        profile_format: Format of profiling data ("gprof", "json", "auto")
    
    Returns:
        Validation report comparing static vs runtime call relationships
    """
    try:
        # Check if runtime file exists
        if not os.path.exists(runtime_profile_file):
            return f"❌ Error: Runtime profile file not found: {runtime_profile_file}"
        
        # Parse analysis data
        if isinstance(analysis_data, str):
            analysis_dict = json.loads(analysis_data)
        else:
            analysis_dict = analysis_data
        
        # Run runtime validation only
        validation_options = {
            'call_relationships': False,
            'parameter_compatibility': False,
            'cyclic_dependencies': False,
            'numerical_relationships': False,
            'runtime_validation': True
        }
        
        validation_results = validate_analysis(
            analysis_dict,
            validation_options,
            runtime_profile_file
        )
        
        if 'runtime_validation' not in validation_results:
            return "❌ Error: Runtime validation failed to execute"
        
        result = validation_results['runtime_validation']
        
        # Create detailed runtime validation report
        report = []
        report.append("# Runtime Validation Report")
        report.append("=" * 40)
        report.append("")
        
        report.append(f"**Status**: {'✅ Valid' if result.is_valid else '❌ Issues Found'}")
        report.append(f"**Accuracy**: {result.confidence:.2f}/1.0")
        report.append("")
        
        if 'accuracy' in result.details:
            accuracy = result.details['accuracy']
            correctly_identified = result.details.get('correctly_identified', 0)
            total_runtime = result.details.get('total_runtime_calls', 0)
            
            report.append("## Accuracy Metrics")
            report.append(f"- Correctly identified calls: {correctly_identified}/{total_runtime}")
            report.append(f"- Accuracy percentage: {accuracy * 100:.1f}%")
            report.append("")
        
        if result.warnings:
            report.append("## Warnings")
            for warning in result.warnings:
                report.append(f"- ⚠️ {warning}")
            report.append("")
        
        if result.issues:
            report.append("## Issues")
            for issue in result.issues:
                report.append(f"- ❌ {issue}")
            report.append("")
        
        # Add details about discrepancies
        if 'missing_in_static' in result.details:
            missing_static = result.details['missing_in_static']
            report.append(f"## Calls Found at Runtime but Not in Static Analysis ({len(missing_static)} shown)")
            for call_pair in missing_static:
                if isinstance(call_pair, tuple) and len(call_pair) == 2:
                    report.append(f"- {call_pair[0]} → {call_pair[1]}")
                else:
                    report.append(f"- {call_pair}")
            report.append("")
        
        if 'missing_in_runtime' in result.details:
            missing_runtime = result.details['missing_in_runtime']
            report.append(f"## Static Calls Not Found at Runtime ({len(missing_runtime)} shown)")
            for call_pair in missing_runtime:
                if isinstance(call_pair, tuple) and len(call_pair) == 2:
                    report.append(f"- {call_pair[0]} → {call_pair[1]}")
                else:
                    report.append(f"- {call_pair}")
        
        return "\n".join(report)
        
    except Exception as e:
        return f"❌ Error during runtime validation: {str(e)}"


@mcp.tool()
def analyze_numerical_relationships(
    analysis_data: str,
    focus_area: str = "all"
) -> str:
    """
    Analyze numerical and computational relationships in the function call tree.
    
    This tool provides deep analysis of computational patterns, data flow,
    complexity metrics, and numerical relationships between functions.
    
    Args:
        analysis_data: JSON string containing the analysis results
        focus_area: Area to focus on ("complexity", "dataflow", "patterns", "all")
    
    Returns:
        Detailed analysis of numerical relationships and computational patterns
    """
    try:
        # Parse analysis data
        if isinstance(analysis_data, str):
            analysis_dict = json.loads(analysis_data)
        else:
            analysis_dict = analysis_data
        
        # Create validator for numerical analysis
        validator = CallTreeValidator(analysis_dict)
        
        # Run numerical relationship validation
        result = validator.validate_numerical_relationships()
        
        report = []
        report.append("# Numerical Relationships Analysis")
        report.append("=" * 45)
        report.append("")
        
        # Extract and format details
        details = result.details
        
        if focus_area in ["all", "complexity"] and 'function_complexity' in details:
            complexity_data = details['function_complexity']
            report.append("## Function Complexity Analysis")
            
            # Sort functions by complexity score
            complex_functions = sorted(
                complexity_data.items(),
                key=lambda x: x[1].get('complexity_score', 0),
                reverse=True
            )
            
            report.append("### Most Complex Functions")
            for func_name, metrics in complex_functions[:10]:  # Top 10
                score = metrics.get('complexity_score', 0)
                lines = metrics.get('line_count', 0)
                cyclomatic = metrics.get('cyclomatic_complexity', 0)
                calls = metrics.get('call_count', 0)
                
                report.append(f"- **{func_name}** (Score: {score:.1f})")
                report.append(f"  - Lines: {lines}, Cyclomatic: {cyclomatic}, Calls: {calls}")
            report.append("")
        
        if focus_area in ["all", "dataflow"] and 'data_flow' in details:
            flow_data = details['data_flow']
            report.append("## Data Flow Analysis")
            
            sources = flow_data.get('data_sources', [])
            sinks = flow_data.get('data_sinks', [])
            processors = flow_data.get('data_processors', [])
            bottlenecks = flow_data.get('potential_bottlenecks', [])
            
            report.append(f"- **Data Sources** ({len(sources)}): Functions that don't call others")
            if sources:
                report.append(f"  - Examples: {', '.join(sources[:5])}")
            
            report.append(f"- **Data Sinks** ({len(sinks)}): Functions that aren't called by others")
            if sinks:
                report.append(f"  - Examples: {', '.join(sinks[:5])}")
            
            report.append(f"- **Data Processors** ({len(processors)}): Functions that both call and are called")
            
            if bottlenecks:
                report.append("- **Potential Bottlenecks**:")
                for bottleneck in bottlenecks:
                    func = bottleneck['function']
                    callers = bottleneck['callers']
                    callees = bottleneck['callees']
                    report.append(f"  - {func}: {callers} callers, {callees} callees")
            
            report.append("")
        
        if focus_area in ["all", "patterns"] and 'computational_patterns' in details:
            patterns = details['computational_patterns']
            report.append("## Computational Patterns")
            
            for pattern_type, functions in patterns.items():
                if functions:
                    pattern_name = pattern_type.replace('_', ' ').title()
                    report.append(f"- **{pattern_name}** ({len(functions)}):")
                    for func in functions[:5]:  # Show first 5
                        report.append(f"  - {func}")
                    if len(functions) > 5:
                        report.append(f"  - ... and {len(functions) - 5} more")
            
            report.append("")
        
        if focus_area in ["all", "statistics"] and 'statistics' in details:
            stats = details['statistics']
            report.append("## Call Graph Statistics")
            
            total_funcs = stats.get('total_functions', 0)
            total_calls = stats.get('total_calls', 0)
            avg_calls = stats.get('avg_calls_per_function', 0)
            max_calls = stats.get('max_calls_from_function', 0)
            no_calls = stats.get('functions_with_no_calls', 0)
            
            report.append(f"- **Total Functions**: {total_funcs}")
            report.append(f"- **Total Function Calls**: {total_calls}")
            report.append(f"- **Average Calls per Function**: {avg_calls:.1f}")
            report.append(f"- **Maximum Calls from Single Function**: {max_calls}")
            report.append(f"- **Functions with No Outgoing Calls**: {no_calls}")
            
            if 'call_distribution' in stats:
                distribution = stats['call_distribution']
                report.append("- **Call Distribution**:")
                for call_count, function_count in sorted(distribution.items()):
                    report.append(f"  - {function_count} functions make {call_count} calls")
        
        # Add warnings and issues
        if result.warnings:
            report.append("## Warnings")
            for warning in result.warnings:
                report.append(f"- ⚠️ {warning}")
            report.append("")
        
        if result.issues:
            report.append("## Issues")
            for issue in result.issues:
                report.append(f"- ❌ {issue}")
        
        return "\n".join(report)
        
    except Exception as e:
        return f"❌ Error during numerical analysis: {str(e)}"


if __name__ == "__main__":
    # Run the MCP server
    mcp.run()
