"""
Validation module for C++ function call tree analysis.

This module provides various validation techniques to verify the correctness
of the generated function call trees and identify potential issues.
"""

import re
import os
import json
import subprocess
import tempfile
from typing import Dict, List, Set, Tuple, Optional, Any
from dataclasses import dataclass
from collections import defaultdict, Counter
import logging

logger = logging.getLogger(__name__)

@dataclass
class ValidationResult:
    """Result of a validation check."""
    is_valid: bool
    confidence: float  # 0.0 to 1.0
    issues: List[str]
    warnings: List[str]
    details: Dict[str, Any]

@dataclass
class FunctionSignature:
    """Function signature information for validation."""
    name: str
    return_type: str
    parameters: List[Tuple[str, str]]  # (type, name) pairs
    is_template: bool = False
    namespace: str = ""
    class_name: str = ""

class CallTreeValidator:
    """Main validator for function call trees."""
    
    def __init__(self, analysis_data: Dict[str, Any]):
        self.analysis_data = analysis_data
        self.functions = analysis_data.get('functions', {})
        self.call_graph = analysis_data.get('call_graph', {})
        self.source_files = analysis_data.get('source_files', [])
        
        # Extract function signatures for validation
        self.signatures = self._extract_signatures()
        
    def _extract_signatures(self) -> Dict[str, FunctionSignature]:
        """Extract function signatures from the analysis data."""
        signatures = {}
        
        for func_name, func_data in self.functions.items():
            # Parse function signature from source code
            signature = self._parse_function_signature(func_name, func_data)
            if signature:
                signatures[func_name] = signature
                
        return signatures
    
    def _parse_function_signature(self, func_name: str, func_data: Dict) -> Optional[FunctionSignature]:
        """Parse function signature from function data."""
        source_code = func_data.get('source_code', '')
        if not source_code:
            return None
            
        # Enhanced regex to capture function signatures
        patterns = [
            # Standard function: return_type function_name(params)
            r'(?P<return_type>[\w:<>]+(?:\s*[*&])*)\s+(?P<name>\w+)\s*\((?P<params>[^)]*)\)',
            # Constructor: ClassName(params)
            r'(?P<name>\w+)\s*\((?P<params>[^)]*)\)\s*(?::\s*[\w\s,()]+)?\s*\{',
            # Destructor: ~ClassName()
            r'~(?P<name>\w+)\s*\((?P<params>[^)]*)\)',
        ]
        
        for pattern in patterns:
            match = re.search(pattern, source_code)
            if match:
                name = match.group('name')
                return_type = match.groupdict().get('return_type', 'void')
                params_str = match.group('params')
                
                # Parse parameters
                parameters = self._parse_parameters(params_str)
                
                # Check if it's a template function
                is_template = 'template' in source_code[:source_code.find(match.group(0))]
                
                return FunctionSignature(
                    name=name,
                    return_type=return_type.strip(),
                    parameters=parameters,
                    is_template=is_template
                )
        
        return None
    
    def _parse_parameters(self, params_str: str) -> List[Tuple[str, str]]:
        """Parse function parameters string."""
        if not params_str or params_str.strip() == 'void':
            return []
            
        parameters = []
        # Split by comma, but be careful about template parameters
        param_parts = []
        bracket_count = 0
        current_part = ""
        
        for char in params_str:
            if char == '<':
                bracket_count += 1
            elif char == '>':
                bracket_count -= 1
            elif char == ',' and bracket_count == 0:
                param_parts.append(current_part.strip())
                current_part = ""
                continue
            current_part += char
            
        if current_part.strip():
            param_parts.append(current_part.strip())
        
        for param in param_parts:
            param = param.strip()
            if param:
                # Extract type and name
                parts = param.split()
                if len(parts) >= 2:
                    param_type = ' '.join(parts[:-1])
                    param_name = parts[-1]
                    # Remove default values
                    if '=' in param_name:
                        param_name = param_name.split('=')[0].strip()
                    parameters.append((param_type, param_name))
                elif len(parts) == 1:
                    # Type without name
                    parameters.append((parts[0], ""))
        
        return parameters
    
    def validate_call_relationships(self) -> ValidationResult:
        """Validate that function call relationships make sense."""
        issues = []
        warnings = []
        details = {}
        
        # Check for calls to non-existent functions
        all_function_names = set(self.functions.keys())
        external_calls = set()
        
        for caller, callees in self.call_graph.items():
            if caller not in all_function_names:
                issues.append(f"Caller function '{caller}' not found in function definitions")
                continue
                
            for callee in callees:
                if callee not in all_function_names:
                    external_calls.add(callee)
        
        # External calls are not necessarily issues, but worth noting
        if external_calls:
            details['external_calls'] = list(external_calls)
            if len(external_calls) > 20:  # Many external calls might indicate parsing issues
                warnings.append(f"Found {len(external_calls)} external function calls - verify parsing accuracy")
        
        # Check for unreachable functions
        called_functions = set()
        for callees in self.call_graph.values():
            called_functions.update(callees)
        
        unreachable = all_function_names - called_functions
        # Remove main functions and constructors from unreachable list
        unreachable = {f for f in unreachable if not (
            f == 'main' or 
            f.endswith('::main') or 
            '::' not in f and f[0].isupper()  # Likely constructors
        )}
        
        if unreachable:
            details['unreachable_functions'] = list(unreachable)
            if len(unreachable) > len(all_function_names) * 0.3:  # More than 30% unreachable
                warnings.append(f"High percentage of unreachable functions ({len(unreachable)}/{len(all_function_names)}) - check for entry points")
        
        confidence = max(0.5, 1.0 - len(issues) * 0.2 - len(warnings) * 0.1)
        
        return ValidationResult(
            is_valid=len(issues) == 0,
            confidence=confidence,
            issues=issues,
            warnings=warnings,
            details=details
        )
    
    def validate_parameter_compatibility(self) -> ValidationResult:
        """Validate parameter type compatibility in function calls."""
        issues = []
        warnings = []
        details = {}
        
        incompatible_calls = []
        
        for caller, callees in self.call_graph.items():
            caller_sig = self.signatures.get(caller)
            if not caller_sig:
                continue
                
            for callee in callees:
                callee_sig = self.signatures.get(callee)
                if not callee_sig:
                    continue
                
                # For now, we can't validate actual parameter passing without 
                # parsing the call sites, but we can check for obvious mismatches
                
                # Check if function expects parameters but might be called without
                if len(callee_sig.parameters) > 0:
                    required_params = [p for p in callee_sig.parameters if '=' not in str(p)]
                    if len(required_params) > 5:  # Functions with many required parameters
                        warnings.append(f"Function '{callee}' has {len(required_params)} required parameters - verify call sites")
        
        details['parameter_analysis'] = {
            'functions_with_signatures': len(self.signatures),
            'total_functions': len(self.functions),
            'coverage': len(self.signatures) / max(1, len(self.functions))
        }
        
        confidence = 0.7 if len(self.signatures) > 0 else 0.3
        
        return ValidationResult(
            is_valid=len(issues) == 0,
            confidence=confidence,
            issues=issues,
            warnings=warnings,
            details=details
        )
    
    def validate_cyclic_dependencies(self) -> ValidationResult:
        """Detect and validate cyclic dependencies in the call graph."""
        issues = []
        warnings = []
        details = {}
        
        cycles = self._find_cycles()
        
        if cycles:
            details['cycles'] = cycles
            for cycle in cycles:
                if len(cycle) == 2:
                    warnings.append(f"Mutual recursion detected: {' <-> '.join(cycle)}")
                else:
                    issues.append(f"Cycle detected: {' -> '.join(cycle + [cycle[0]])}")
        
        # Check for deep recursion
        max_depth = self._calculate_max_call_depth()
        details['max_call_depth'] = max_depth
        
        if max_depth > 20:
            warnings.append(f"Very deep call chain detected (depth: {max_depth}) - potential stack overflow risk")
        
        confidence = 1.0 - len(issues) * 0.3 - len(warnings) * 0.1
        
        return ValidationResult(
            is_valid=len(issues) == 0,
            confidence=confidence,
            issues=issues,
            warnings=warnings,
            details=details
        )
    
    def _find_cycles(self) -> List[List[str]]:
        """Find cycles in the call graph using DFS."""
        visited = set()
        rec_stack = set()
        cycles = []
        
        def dfs(node, path):
            if node in rec_stack:
                # Found a cycle
                cycle_start = path.index(node)
                cycle = path[cycle_start:]
                cycles.append(cycle)
                return
            
            if node in visited:
                return
            
            visited.add(node)
            rec_stack.add(node)
            
            for neighbor in self.call_graph.get(node, []):
                dfs(neighbor, path + [neighbor])
            
            rec_stack.remove(node)
        
        for node in self.call_graph:
            if node not in visited:
                dfs(node, [node])
        
        return cycles
    
    def _calculate_max_call_depth(self) -> int:
        """Calculate the maximum call depth in the call graph."""
        def dfs_depth(node, visited):
            if node in visited:
                return 0  # Cycle detected
            
            visited.add(node)
            max_child_depth = 0
            
            for callee in self.call_graph.get(node, []):
                child_depth = dfs_depth(callee, visited.copy())
                max_child_depth = max(max_child_depth, child_depth)
            
            return 1 + max_child_depth
        
        max_depth = 0
        for root in self.call_graph:
            depth = dfs_depth(root, set())
            max_depth = max(max_depth, depth)
        
        return max_depth
    
    def validate_numerical_relationships(self) -> ValidationResult:
        """Validate numerical relationships and data flow patterns."""
        issues = []
        warnings = []
        details = {}
        
        # Analyze function complexity and relationships
        complexity_analysis = self._analyze_function_complexity()
        details.update(complexity_analysis)
        
        # Check for potential data flow issues
        data_flow_issues = self._analyze_data_flow_patterns()
        details['data_flow'] = data_flow_issues
        
        # Identify computational patterns
        computational_patterns = self._identify_computational_patterns()
        details['computational_patterns'] = computational_patterns
        
        # Statistical analysis
        stats = self._calculate_call_graph_statistics()
        details['statistics'] = stats
        
        # Generate warnings based on analysis
        if stats['avg_calls_per_function'] > 10:
            warnings.append(f"High average calls per function ({stats['avg_calls_per_function']:.1f}) - consider function decomposition")
        
        if stats['max_calls_from_function'] > 20:
            warnings.append(f"Function with {stats['max_calls_from_function']} calls detected - potential design issue")
        
        confidence = 0.8
        
        return ValidationResult(
            is_valid=len(issues) == 0,
            confidence=confidence,
            issues=issues,
            warnings=warnings,
            details=details
        )
    
    def _analyze_function_complexity(self) -> Dict[str, Any]:
        """Analyze function complexity metrics."""
        complexity_metrics = {}
        
        for func_name, func_data in self.functions.items():
            source_code = func_data.get('source_code', '')
            line_count = func_data.get('line_count', 0)
            
            # Calculate various complexity metrics
            cyclomatic_complexity = self._calculate_cyclomatic_complexity(source_code)
            call_count = len(self.call_graph.get(func_name, []))
            
            complexity_metrics[func_name] = {
                'line_count': line_count,
                'cyclomatic_complexity': cyclomatic_complexity,
                'call_count': call_count,
                'complexity_score': line_count * 0.1 + cyclomatic_complexity * 2 + call_count * 0.5
            }
        
        return {'function_complexity': complexity_metrics}
    
    def _calculate_cyclomatic_complexity(self, source_code: str) -> int:
        """Calculate McCabe cyclomatic complexity."""
        if not source_code:
            return 1
        
        # Count decision points
        decision_keywords = ['if', 'else', 'while', 'for', 'switch', 'case', '&&', '||', '?']
        complexity = 1  # Base complexity
        
        for keyword in decision_keywords:
            if keyword in ['&&', '||', '?']:
                complexity += source_code.count(keyword)
            else:
                complexity += len(re.findall(r'\b' + keyword + r'\b', source_code))
        
        return complexity
    
    def _analyze_data_flow_patterns(self) -> Dict[str, Any]:
        """Analyze data flow patterns in the call graph."""
        patterns = {
            'data_sources': [],  # Functions that don't call others (data sources)
            'data_sinks': [],    # Functions that aren't called by others (data sinks)
            'data_processors': [], # Functions that both call and are called
            'potential_bottlenecks': []
        }
        
        all_functions = set(self.functions.keys())
        called_functions = set()
        for callees in self.call_graph.values():
            called_functions.update(callees)
        
        for func_name in all_functions:
            calls_others = len(self.call_graph.get(func_name, [])) > 0
            is_called = func_name in called_functions
            
            if not calls_others and is_called:
                patterns['data_sinks'].append(func_name)
            elif calls_others and not is_called:
                patterns['data_sources'].append(func_name)
            elif calls_others and is_called:
                patterns['data_processors'].append(func_name)
                # Check if it's a potential bottleneck (called by many, calls many)
                callers = [c for c, callees in self.call_graph.items() if func_name in callees]
                if len(callers) > 3 and len(self.call_graph.get(func_name, [])) > 3:
                    patterns['potential_bottlenecks'].append({
                        'function': func_name,
                        'callers': len(callers),
                        'callees': len(self.call_graph.get(func_name, []))
                    })
        
        return patterns
    
    def _identify_computational_patterns(self) -> Dict[str, Any]:
        """Identify computational patterns in the code."""
        patterns = {
            'mathematical_functions': [],
            'io_functions': [],
            'memory_functions': [],
            'control_functions': []
        }
        
        # Keywords that indicate different types of computational patterns
        math_keywords = ['calculate', 'compute', 'add', 'multiply', 'divide', 'sqrt', 'pow', 'sin', 'cos', 'matrix', 'vector']
        io_keywords = ['read', 'write', 'print', 'input', 'output', 'file', 'stream']
        memory_keywords = ['malloc', 'free', 'new', 'delete', 'allocate', 'deallocate']
        control_keywords = ['init', 'setup', 'cleanup', 'main', 'start', 'stop', 'control']
        
        for func_name, func_data in self.functions.items():
            source_code = func_data.get('source_code', '').lower()
            func_name_lower = func_name.lower()
            
            # Check function name and source code for patterns
            if any(keyword in func_name_lower or keyword in source_code for keyword in math_keywords):
                patterns['mathematical_functions'].append(func_name)
            elif any(keyword in func_name_lower or keyword in source_code for keyword in io_keywords):
                patterns['io_functions'].append(func_name)
            elif any(keyword in func_name_lower or keyword in source_code for keyword in memory_keywords):
                patterns['memory_functions'].append(func_name)
            elif any(keyword in func_name_lower or keyword in source_code for keyword in control_keywords):
                patterns['control_functions'].append(func_name)
        
        return patterns
    
    def _calculate_call_graph_statistics(self) -> Dict[str, Any]:
        """Calculate statistical metrics for the call graph."""
        total_functions = len(self.functions)
        total_calls = sum(len(callees) for callees in self.call_graph.values())
        
        if total_functions == 0:
            return {'error': 'No functions found'}
        
        call_counts = [len(callees) for callees in self.call_graph.values()]
        
        stats = {
            'total_functions': total_functions,
            'total_calls': total_calls,
            'avg_calls_per_function': total_calls / total_functions,
            'max_calls_from_function': max(call_counts) if call_counts else 0,
            'functions_with_no_calls': sum(1 for count in call_counts if count == 0),
            'call_distribution': Counter(call_counts)
        }
        
        return stats


class RuntimeValidator:
    """Validator that can cross-reference with runtime data."""
    
    def __init__(self, static_analysis: Dict[str, Any], runtime_data: Optional[Dict[str, Any]] = None):
        self.static_analysis = static_analysis
        self.runtime_data = runtime_data or {}
    
    def validate_against_profiling_data(self, profiling_file: str) -> ValidationResult:
        """Validate call tree against profiling data (gprof, perf, etc.)."""
        issues = []
        warnings = []
        details = {}
        
        try:
            if profiling_file.endswith('.gprof'):
                runtime_calls = self._parse_gprof_data(profiling_file)
            elif profiling_file.endswith('.json'):
                runtime_calls = self._parse_json_profiling_data(profiling_file)
            else:
                issues.append(f"Unsupported profiling file format: {profiling_file}")
                return ValidationResult(False, 0.0, issues, warnings, details)
            
            # Compare static analysis with runtime data
            static_calls = set()
            for caller, callees in self.static_analysis.get('call_graph', {}).items():
                for callee in callees:
                    static_calls.add((caller, callee))
            
            runtime_call_set = set(runtime_calls.keys())
            
            # Find discrepancies
            missing_in_static = runtime_call_set - static_calls
            missing_in_runtime = static_calls - runtime_call_set
            
            if missing_in_static:
                warnings.append(f"Found {len(missing_in_static)} calls in runtime that weren't detected statically")
                details['missing_in_static'] = list(missing_in_static)[:10]  # Limit output
            
            if missing_in_runtime:
                warnings.append(f"Found {len(missing_in_runtime)} static calls that didn't occur at runtime")
                details['missing_in_runtime'] = list(missing_in_runtime)[:10]
            
            # Calculate accuracy metrics
            total_runtime_calls = len(runtime_call_set)
            correctly_identified = len(static_calls & runtime_call_set)
            
            if total_runtime_calls > 0:
                accuracy = correctly_identified / total_runtime_calls
                details['accuracy'] = accuracy
                details['correctly_identified'] = correctly_identified
                details['total_runtime_calls'] = total_runtime_calls
            else:
                accuracy = 0.0
            
            confidence = accuracy
            
        except Exception as e:
            issues.append(f"Error processing profiling data: {str(e)}")
            confidence = 0.0
        
        return ValidationResult(
            is_valid=len(issues) == 0,
            confidence=confidence,
            issues=issues,
            warnings=warnings,
            details=details
        )
    
    def _parse_gprof_data(self, filename: str) -> Dict[Tuple[str, str], int]:
        """Parse gprof output to extract call relationships."""
        calls = {}
        # This is a simplified parser - real gprof parsing would be more complex
        try:
            with open(filename, 'r') as f:
                content = f.read()
                # Look for call graph section
                # This is a placeholder - would need proper gprof parsing
                pass
        except Exception as e:
            logger.error(f"Error parsing gprof data: {e}")
        
        return calls
    
    def _parse_json_profiling_data(self, filename: str) -> Dict[Tuple[str, str], int]:
        """Parse JSON profiling data."""
        calls = {}
        try:
            with open(filename, 'r') as f:
                data = json.load(f)
                # Extract call relationships from JSON structure
                # Format depends on the profiling tool used
                if 'calls' in data:
                    for call_data in data['calls']:
                        caller = call_data.get('caller')
                        callee = call_data.get('callee')
                        count = call_data.get('count', 1)
                        if caller and callee:
                            calls[(caller, callee)] = count
        except Exception as e:
            logger.error(f"Error parsing JSON profiling data: {e}")
        
        return calls


def validate_analysis(analysis_data: Dict[str, Any], 
                     validation_options: Dict[str, bool] = None,
                     runtime_data_file: str = None) -> Dict[str, ValidationResult]:
    """
    Main validation function that runs all validation checks.
    
    Args:
        analysis_data: The static analysis results
        validation_options: Dict specifying which validations to run
        runtime_data_file: Optional runtime profiling data file
    
    Returns:
        Dictionary of validation results
    """
    if validation_options is None:
        validation_options = {
            'call_relationships': True,
            'parameter_compatibility': True,
            'cyclic_dependencies': True,
            'numerical_relationships': True,
            'runtime_validation': bool(runtime_data_file)
        }
    
    validator = CallTreeValidator(analysis_data)
    results = {}
    
    if validation_options.get('call_relationships', True):
        results['call_relationships'] = validator.validate_call_relationships()
    
    if validation_options.get('parameter_compatibility', True):
        results['parameter_compatibility'] = validator.validate_parameter_compatibility()
    
    if validation_options.get('cyclic_dependencies', True):
        results['cyclic_dependencies'] = validator.validate_cyclic_dependencies()
    
    if validation_options.get('numerical_relationships', True):
        results['numerical_relationships'] = validator.validate_numerical_relationships()
    
    if validation_options.get('runtime_validation', False) and runtime_data_file:
        runtime_validator = RuntimeValidator(analysis_data)
        results['runtime_validation'] = runtime_validator.validate_against_profiling_data(runtime_data_file)
    
    return results


def generate_validation_report(validation_results: Dict[str, ValidationResult]) -> str:
    """Generate a comprehensive validation report."""
    report = []
    report.append("# Function Call Tree Validation Report")
    report.append("=" * 50)
    report.append("")
    
    overall_confidence = sum(result.confidence for result in validation_results.values()) / len(validation_results)
    overall_valid = all(result.is_valid for result in validation_results.values())
    
    report.append(f"## Overall Assessment")
    report.append(f"- **Status**: {'✅ VALID' if overall_valid else '❌ ISSUES FOUND'}")
    report.append(f"- **Confidence**: {overall_confidence:.2f}/1.0")
    report.append("")
    
    for validation_name, result in validation_results.items():
        report.append(f"## {validation_name.replace('_', ' ').title()}")
        report.append(f"- **Status**: {'✅ Valid' if result.is_valid else '❌ Issues Found'}")
        report.append(f"- **Confidence**: {result.confidence:.2f}/1.0")
        
        if result.issues:
            report.append(f"- **Issues ({len(result.issues)}):**")
            for issue in result.issues:
                report.append(f"  - ❌ {issue}")
        
        if result.warnings:
            report.append(f"- **Warnings ({len(result.warnings)}):**")
            for warning in result.warnings:
                report.append(f"  - ⚠️ {warning}")
        
        if result.details:
            report.append(f"- **Details:**")
            for key, value in result.details.items():
                if isinstance(value, (list, dict)) and len(str(value)) > 100:
                    report.append(f"  - {key}: {type(value).__name__} with {len(value)} items")
                else:
                    report.append(f"  - {key}: {value}")
        
        report.append("")
    
    return "\n".join(report)
