"""Analysis Engine for Problem Correlation and Root Cause Analysis.

Provides advanced analysis capabilities including problem correlation,
pattern matching, and root cause analysis.
"""

import json
import logging
import time
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Tuple
from dataclasses import dataclass, field

from .diagnostic_engine import DetectedProblem, AnalysisResult, ProblemType, SeverityLevel

logger = logging.getLogger(__name__)


@dataclass
class ProblemPattern:
    """Definition of a problem pattern."""
    pattern_id: str
    name: str
    description: str
    required_problems: List[ProblemType]
    optional_problems: List[ProblemType] = field(default_factory=list)
    time_window: int = 300  # seconds
    component_correlation: bool = False
    sequence_sensitive: bool = False
    confidence_boost: float = 0.2
    root_cause_indicators: List[str] = field(default_factory=list)


@dataclass
class CorrelationGroup:
    """Group of correlated problems."""
    group_id: str
    problems: List[DetectedProblem]
    correlation_type: str  # time, component, causal
    correlation_score: float
    primary_problem: Optional[DetectedProblem] = None
    secondary_problems: List[DetectedProblem] = field(default_factory=list)


class AnalysisEngine:
    """Analysis and correlation engine for detected problems."""
    
    def __init__(self, config: Dict[str, Any]):
        """Initialize analysis engine.
        
        Args:
            config: Engine configuration
        """
        self.config = config.get("analysis", {})
        
        # Analysis settings
        self.correlation_window = self.config.get("correlation_window", 300)
        self.pattern_threshold = self.config.get("pattern_match_threshold", 0.7)
        self.root_cause_depth = self.config.get("root_cause_depth", 5)
        self.historical_weight = self.config.get("historical_weight", 0.3)
        
        # Pattern library
        self.problem_patterns = self._load_problem_patterns()
        
        # Historical analysis data
        self.historical_problems: List[DetectedProblem] = []
        self.historical_correlations: List[CorrelationGroup] = []
        self.max_history = 1000
        
        # Analysis statistics
        self.statistics = {
            "analyses_performed": 0,
            "correlations_found": 0,
            "patterns_matched": 0,
            "root_causes_identified": 0
        }
        
        logger.info("AnalysisEngine initialized")
    
    def _load_problem_patterns(self) -> Dict[str, ProblemPattern]:
        """Load predefined problem patterns."""
        patterns = {}
        
        # Network cascade failure pattern
        patterns["network_cascade"] = ProblemPattern(
            pattern_id="network_cascade",
            name="Network Cascade Failure",
            description="Network connectivity issues leading to service failures",
            required_problems=[ProblemType.NETWORK_CONNECTIVITY, ProblemType.SERVICE_UNAVAILABLE],
            optional_problems=[ProblemType.PERFORMANCE_DEGRADATION],
            time_window=300,
            sequence_sensitive=True,
            confidence_boost=0.3,
            root_cause_indicators=["network_interface", "internet_gateway", "dns"]
        )
        
        # Resource exhaustion spiral
        patterns["resource_spiral"] = ProblemPattern(
            pattern_id="resource_spiral",
            name="Resource Exhaustion Spiral",
            description="Cascading resource exhaustion leading to performance degradation",
            required_problems=[ProblemType.RESOURCE_EXHAUSTION, ProblemType.PERFORMANCE_DEGRADATION],
            optional_problems=[ProblemType.SOFTWARE_ERROR],
            component_correlation=True,
            confidence_boost=0.25,
            root_cause_indicators=["cpu", "memory", "disk", "system_performance"]
        )
        
        # Configuration conflict pattern
        patterns["config_conflict"] = ProblemPattern(
            pattern_id="config_conflict",
            name="Configuration Conflict",
            description="Configuration issues causing software errors",
            required_problems=[ProblemType.CONFIGURATION_ISSUE, ProblemType.SOFTWARE_ERROR],
            optional_problems=[ProblemType.SERVICE_UNAVAILABLE],
            time_window=600,
            confidence_boost=0.2,
            root_cause_indicators=["configuration", "syntax", "file_permissions"]
        )
        
        # Hardware failure cascade
        patterns["hardware_cascade"] = ProblemPattern(
            pattern_id="hardware_cascade",
            name="Hardware Failure Cascade",
            description="Hardware issues leading to system instability",
            required_problems=[ProblemType.HARDWARE_FAILURE],
            optional_problems=[ProblemType.PERFORMANCE_DEGRADATION, ProblemType.SOFTWARE_ERROR],
            component_correlation=True,
            confidence_boost=0.3,
            root_cause_indicators=["hardware", "temperature", "cpu", "memory", "disk"]
        )
        
        return patterns
    
    async def analyze_problems(self, problems: List[DetectedProblem], 
                             device_context: Dict[str, Any]) -> List[AnalysisResult]:
        """Analyze a collection of detected problems.
        
        Args:
            problems: List of detected problems
            device_context: Device context information
            
        Returns:
            List of analysis results
        """
        if not problems:
            return []
        
        start_time = time.time()
        results = []
        
        try:
            # Add problems to historical data
            self._add_to_history(problems)
            
            # Step 1: Problem correlation analysis
            correlations = await self._correlate_problems(problems)
            if correlations:
                correlation_results = self._create_correlation_results(correlations)
                results.extend(correlation_results)
            
            # Step 2: Pattern matching analysis
            pattern_results = await self._match_patterns(problems)
            results.extend(pattern_results)
            
            # Step 3: Root cause analysis
            root_cause_results = await self._analyze_root_causes(problems, correlations)
            results.extend(root_cause_results)
            
            # Step 4: Historical similarity analysis
            if self.historical_problems:
                historical_results = await self._analyze_historical_similarity(problems)
                results.extend(historical_results)
            
            # Update statistics
            self.statistics["analyses_performed"] += 1
            self.statistics["correlations_found"] += len(correlations)
            self.statistics["patterns_matched"] += len([r for r in results if "pattern" in r.analysis_type])
            self.statistics["root_causes_identified"] += len([r for r in results if r.root_cause])
            
            analysis_duration = time.time() - start_time
            logger.info(f"Analysis completed in {analysis_duration:.2f}s, {len(results)} results")
            
        except Exception as e:
            logger.error(f"Error during problem analysis: {e}")
            # Return error analysis result
            results.append(AnalysisResult(
                analysis_id=f"analysis_error_{int(time.time())}",
                analysis_type="error",
                root_cause=None,
                correlation_score=0.0,
                conclusion=f"Analysis failed: {str(e)}",
                confidence=0.5,
                analysis_duration=time.time() - start_time
            ))
        
        return results
    
    async def _correlate_problems(self, problems: List[DetectedProblem]) -> List[CorrelationGroup]:
        """Find correlations between problems."""
        correlations = []
        
        # Time-based correlation
        time_groups = self._group_by_time(problems)
        for group in time_groups:
            if len(group) > 1:
                correlation = CorrelationGroup(
                    group_id=f"time_correlation_{int(time.time())}_{len(correlations)}",
                    problems=group,
                    correlation_type="time",
                    correlation_score=self._calculate_time_correlation_score(group)
                )
                correlations.append(correlation)
        
        # Component-based correlation
        component_groups = self._group_by_component(problems)
        for component, component_problems in component_groups.items():
            if len(component_problems) > 1:
                correlation = CorrelationGroup(
                    group_id=f"component_correlation_{component}_{int(time.time())}",
                    problems=component_problems,
                    correlation_type="component",
                    correlation_score=0.8  # High correlation for same component
                )
                correlations.append(correlation)
        
        # Causal correlation (based on problem types and severity)
        causal_groups = self._find_causal_relationships(problems)
        correlations.extend(causal_groups)
        
        return correlations
    
    def _group_by_time(self, problems: List[DetectedProblem]) -> List[List[DetectedProblem]]:
        """Group problems by detection time window."""
        if len(problems) < 2:
            return []
        
        # Sort problems by detection time
        sorted_problems = sorted(problems, key=lambda p: p.detected_at)
        groups = []
        current_group = [sorted_problems[0]]
        
        for problem in sorted_problems[1:]:
            time_diff = (problem.detected_at - current_group[0].detected_at).total_seconds()
            
            if time_diff <= self.correlation_window:
                current_group.append(problem)
            else:
                if len(current_group) > 1:
                    groups.append(current_group)
                current_group = [problem]
        
        # Add the last group if it has multiple problems
        if len(current_group) > 1:
            groups.append(current_group)
        
        return groups
    
    def _group_by_component(self, problems: List[DetectedProblem]) -> Dict[str, List[DetectedProblem]]:
        """Group problems by affected components."""
        component_groups = {}
        
        for problem in problems:
            for component in problem.affected_components:
                if component not in component_groups:
                    component_groups[component] = []
                component_groups[component].append(problem)
        
        # Only return components with multiple problems
        return {k: v for k, v in component_groups.items() if len(v) > 1}
    
    def _find_causal_relationships(self, problems: List[DetectedProblem]) -> List[CorrelationGroup]:
        """Find causal relationships between problems."""
        causal_groups = []
        
        # Define causal relationships
        causal_rules = {
            ProblemType.NETWORK_CONNECTIVITY: [ProblemType.SERVICE_UNAVAILABLE],
            ProblemType.RESOURCE_EXHAUSTION: [ProblemType.PERFORMANCE_DEGRADATION, ProblemType.SOFTWARE_ERROR],
            ProblemType.HARDWARE_FAILURE: [ProblemType.RESOURCE_EXHAUSTION, ProblemType.SOFTWARE_ERROR],
            ProblemType.CONFIGURATION_ISSUE: [ProblemType.SOFTWARE_ERROR, ProblemType.SERVICE_UNAVAILABLE]
        }
        
        # Group problems by type
        problems_by_type = {}
        for problem in problems:
            if problem.problem_type not in problems_by_type:
                problems_by_type[problem.problem_type] = []
            problems_by_type[problem.problem_type].append(problem)
        
        # Find causal relationships
        for cause_type, effect_types in causal_rules.items():
            if cause_type in problems_by_type:
                for effect_type in effect_types:
                    if effect_type in problems_by_type:
                        # Create causal correlation
                        cause_problems = problems_by_type[cause_type]
                        effect_problems = problems_by_type[effect_type]
                        
                        # Check time ordering (cause should come before effect)
                        valid_pairs = []
                        for cause in cause_problems:
                            for effect in effect_problems:
                                time_diff = (effect.detected_at - cause.detected_at).total_seconds()
                                if 0 <= time_diff <= self.correlation_window:
                                    valid_pairs.append((cause, effect))
                        
                        if valid_pairs:
                            # Group by primary cause
                            cause_groups = {}
                            for cause, effect in valid_pairs:
                                if cause.problem_id not in cause_groups:
                                    cause_groups[cause.problem_id] = {
                                        'cause': cause,
                                        'effects': []
                                    }
                                cause_groups[cause.problem_id]['effects'].append(effect)
                            
                            for group_data in cause_groups.values():
                                correlation = CorrelationGroup(
                                    group_id=f"causal_{cause_type.value}_{effect_type.value}_{int(time.time())}",
                                    problems=[group_data['cause']] + group_data['effects'],
                                    correlation_type="causal",
                                    correlation_score=0.9,  # High confidence for causal relationships
                                    primary_problem=group_data['cause'],
                                    secondary_problems=group_data['effects']
                                )
                                causal_groups.append(correlation)
        
        return causal_groups
    
    def _calculate_time_correlation_score(self, problems: List[DetectedProblem]) -> float:
        """Calculate correlation score based on time proximity."""
        if len(problems) < 2:
            return 0.0
        
        # Calculate time span of the problem group
        times = [p.detected_at for p in problems]
        time_span = (max(times) - min(times)).total_seconds()
        
        # Score based on how tightly clustered the problems are
        if time_span == 0:
            return 1.0
        elif time_span <= 60:  # Within 1 minute
            return 0.9
        elif time_span <= 300:  # Within 5 minutes
            return 0.8
        elif time_span <= 600:  # Within 10 minutes
            return 0.6
        else:
            return 0.3
    
    def _create_correlation_results(self, correlations: List[CorrelationGroup]) -> List[AnalysisResult]:
        """Create analysis results from correlation groups."""
        results = []
        
        for correlation in correlations:
            # Determine root cause and contributing factors
            root_cause = None
            contributing_factors = []
            
            if correlation.primary_problem:
                root_cause = f"{correlation.primary_problem.problem_type.value}: {correlation.primary_problem.title}"
                contributing_factors = [f"{p.problem_type.value}: {p.title}" for p in correlation.secondary_problems]
            else:
                # No clear primary problem, analyze by severity and confidence
                problems_by_severity = sorted(correlation.problems, 
                                            key=lambda p: (p.severity.value, p.confidence), reverse=True)
                if problems_by_severity:
                    root_cause = f"{problems_by_severity[0].problem_type.value}: {problems_by_severity[0].title}"
                    contributing_factors = [f"{p.problem_type.value}: {p.title}" for p in problems_by_severity[1:]]
            
            result = AnalysisResult(
                analysis_id=correlation.group_id,
                analysis_type=f"{correlation.correlation_type}_correlation",
                root_cause=root_cause,
                contributing_factors=contributing_factors,
                correlation_score=correlation.correlation_score,
                pattern_matches=[],
                conclusion=self._generate_correlation_conclusion(correlation),
                confidence=correlation.correlation_score,
                analysis_duration=0.0  # Will be set by caller
            )
            results.append(result)
        
        return results
    
    def _generate_correlation_conclusion(self, correlation: CorrelationGroup) -> str:
        """Generate conclusion text for correlation."""
        problem_count = len(correlation.problems)
        correlation_type = correlation.correlation_type
        
        if correlation_type == "time":
            return f"Found {problem_count} problems occurring within a {self.correlation_window}s time window, suggesting a common trigger or cascading failure."
        elif correlation_type == "component":
            components = set()
            for problem in correlation.problems:
                components.update(problem.affected_components)
            return f"Found {problem_count} problems affecting common components: {', '.join(components)}, indicating component-specific issues."
        elif correlation_type == "causal":
            if correlation.primary_problem:
                return f"Identified causal relationship: '{correlation.primary_problem.title}' appears to be causing {len(correlation.secondary_problems)} related problem(s)."
            else:
                return f"Found {problem_count} problems with potential causal relationships."
        else:
            return f"Found correlation between {problem_count} problems."
    
    async def _match_patterns(self, problems: List[DetectedProblem]) -> List[AnalysisResult]:
        """Match problems against known patterns."""
        results = []
        
        for pattern_id, pattern in self.problem_patterns.items():
            match_score = await self._calculate_pattern_match_score(problems, pattern)
            
            if match_score >= self.pattern_threshold:
                # Calculate enhanced confidence with pattern boost
                enhanced_confidence = min(1.0, match_score + pattern.confidence_boost)
                
                result = AnalysisResult(
                    analysis_id=f"pattern_match_{pattern_id}_{int(time.time())}",
                    analysis_type="pattern_matching",
                    root_cause=self._infer_root_cause_from_pattern(problems, pattern),
                    correlation_score=match_score,
                    pattern_matches=[pattern.name],
                    conclusion=f"Detected known problem pattern: {pattern.name} - {pattern.description}",
                    confidence=enhanced_confidence,
                    analysis_duration=0.0
                )
                results.append(result)
        
        return results
    
    async def _calculate_pattern_match_score(self, problems: List[DetectedProblem], 
                                           pattern: ProblemPattern) -> float:
        """Calculate how well problems match a pattern."""
        if not problems:
            return 0.0
        
        problem_types = [p.problem_type for p in problems]
        
        # Check required problems
        required_matches = 0
        for required_type in pattern.required_problems:
            if required_type in problem_types:
                required_matches += 1
        
        if required_matches < len(pattern.required_problems):
            return 0.0  # Required problems not present
        
        # Check optional problems
        optional_matches = 0
        for optional_type in pattern.optional_problems:
            if optional_type in problem_types:
                optional_matches += 1
        
        # Base score from required matches
        base_score = required_matches / len(pattern.required_problems)
        
        # Bonus for optional matches
        optional_bonus = 0.0
        if pattern.optional_problems:
            optional_bonus = (optional_matches / len(pattern.optional_problems)) * 0.2
        
        # Time window check
        time_penalty = 0.0
        if pattern.time_window > 0:
            problem_times = [p.detected_at for p in problems]
            time_span = (max(problem_times) - min(problem_times)).total_seconds()
            if time_span > pattern.time_window:
                time_penalty = min(0.3, (time_span - pattern.time_window) / pattern.time_window * 0.1)
        
        # Component correlation check
        component_bonus = 0.0
        if pattern.component_correlation:
            # Check if problems share components
            all_components = set()
            for problem in problems:
                all_components.update(problem.affected_components)
            
            shared_components = 0
            for component in all_components:
                problem_count = sum(1 for p in problems if component in p.affected_components)
                if problem_count > 1:
                    shared_components += 1
            
            if shared_components > 0:
                component_bonus = min(0.2, shared_components * 0.1)
        
        # Sequence sensitivity check
        sequence_bonus = 0.0
        if pattern.sequence_sensitive and len(problems) > 1:
            # Check if problems occurred in a logical sequence
            problems_by_time = sorted(problems, key=lambda p: p.detected_at)
            
            # For now, just bonus for having multiple problems in time order
            sequence_bonus = 0.1
        
        final_score = base_score + optional_bonus + component_bonus + sequence_bonus - time_penalty
        return max(0.0, min(1.0, final_score))
    
    def _infer_root_cause_from_pattern(self, problems: List[DetectedProblem], 
                                     pattern: ProblemPattern) -> Optional[str]:
        """Infer root cause based on pattern and root cause indicators."""
        # Find problems that match root cause indicators
        root_cause_problems = []
        
        for problem in problems:
            for indicator in pattern.root_cause_indicators:
                if indicator in problem.affected_components or indicator in problem.symptom_data.values():
                    root_cause_problems.append(problem)
                    break
        
        if root_cause_problems:
            # Select the most severe/confident problem as root cause
            primary_problem = max(root_cause_problems, 
                                key=lambda p: (p.severity.value, p.confidence))
            return f"Pattern-based root cause: {primary_problem.problem_type.value} in {', '.join(primary_problem.affected_components)}"
        
        # Fallback to first required problem type
        if pattern.required_problems:
            return f"Pattern suggests root cause in: {pattern.required_problems[0].value}"
        
        return None
    
    async def _analyze_root_causes(self, problems: List[DetectedProblem], 
                                 correlations: List[CorrelationGroup]) -> List[AnalysisResult]:
        """Perform root cause analysis."""
        results = []
        
        # Use correlations to identify root causes
        for correlation in correlations:
            if correlation.correlation_type == "causal" and correlation.primary_problem:
                result = AnalysisResult(
                    analysis_id=f"root_cause_{correlation.group_id}",
                    analysis_type="root_cause_analysis",
                    root_cause=f"Primary cause: {correlation.primary_problem.title}",
                    contributing_factors=[p.title for p in correlation.secondary_problems],
                    correlation_score=correlation.correlation_score,
                    conclusion=f"Root cause analysis identifies '{correlation.primary_problem.title}' as the primary cause leading to {len(correlation.secondary_problems)} secondary problem(s).",
                    confidence=0.8,
                    analysis_duration=0.0
                )
                results.append(result)
        
        # Analyze individual high-severity problems for potential root causes
        critical_problems = [p for p in problems if p.severity == SeverityLevel.CRITICAL]
        for problem in critical_problems:
            if not any(correlation.primary_problem and correlation.primary_problem.problem_id == problem.problem_id 
                      for correlation in correlations):
                # This critical problem isn't already identified as a root cause
                result = AnalysisResult(
                    analysis_id=f"individual_root_cause_{problem.problem_id}",
                    analysis_type="individual_root_cause",
                    root_cause=f"Critical issue: {problem.title}",
                    correlation_score=problem.confidence,
                    conclusion=f"Critical severity problem '{problem.title}' identified as potential independent root cause.",
                    confidence=problem.confidence * 0.8,  # Slightly lower confidence for individual analysis
                    analysis_duration=0.0
                )
                results.append(result)
        
        return results
    
    async def _analyze_historical_similarity(self, problems: List[DetectedProblem]) -> List[AnalysisResult]:
        """Analyze similarity with historical problems."""
        if not self.historical_problems:
            return []
        
        results = []
        
        # Group current problems by type
        current_by_type = {}
        for problem in problems:
            if problem.problem_type not in current_by_type:
                current_by_type[problem.problem_type] = []
            current_by_type[problem.problem_type].append(problem)
        
        # Find similar historical patterns
        for problem_type, current_problems in current_by_type.items():
            similar_historical = [p for p in self.historical_problems 
                                if p.problem_type == problem_type]
            
            if len(similar_historical) >= 3:  # Need sufficient historical data
                # Calculate similarity metrics
                avg_historical_confidence = sum(p.confidence for p in similar_historical) / len(similar_historical)
                current_avg_confidence = sum(p.confidence for p in current_problems) / len(current_problems)
                
                # Find most common affected components in historical data
                historical_components = {}
                for p in similar_historical:
                    for component in p.affected_components:
                        historical_components[component] = historical_components.get(component, 0) + 1
                
                common_components = [comp for comp, count in historical_components.items() 
                                   if count >= len(similar_historical) * 0.3]  # At least 30% frequency
                
                # Check if current problems affect similar components
                current_components = set()
                for p in current_problems:
                    current_components.update(p.affected_components)
                
                component_overlap = len(set(common_components) & current_components)
                similarity_score = component_overlap / max(1, len(common_components)) if common_components else 0.5
                
                if similarity_score > 0.3:  # Significant similarity
                    result = AnalysisResult(
                        analysis_id=f"historical_similarity_{problem_type.value}_{int(time.time())}",
                        analysis_type="historical_similarity",
                        root_cause=None,
                        correlation_score=similarity_score,
                        historical_similarity=similarity_score,
                        conclusion=f"Current {problem_type.value} problems show {similarity_score:.1%} similarity to {len(similar_historical)} historical occurrences. Common affected components: {', '.join(common_components[:3])}",
                        confidence=similarity_score * self.historical_weight,
                        analysis_duration=0.0
                    )
                    results.append(result)
        
        return results
    
    def _add_to_history(self, problems: List[DetectedProblem]):
        """Add problems to historical data."""
        self.historical_problems.extend(problems)
        
        # Limit history size
        if len(self.historical_problems) > self.max_history:
            # Remove oldest problems
            self.historical_problems = self.historical_problems[-self.max_history:]
        
        # Clean up old historical data (older than 90 days)
        cutoff_date = datetime.now() - timedelta(days=90)
        self.historical_problems = [p for p in self.historical_problems if p.detected_at > cutoff_date]
    
    def get_analysis_statistics(self) -> Dict[str, Any]:
        """Get analysis engine statistics."""
        return {
            "analysis_statistics": self.statistics.copy(),
            "configuration": {
                "correlation_window": self.correlation_window,
                "pattern_threshold": self.pattern_threshold,
                "root_cause_depth": self.root_cause_depth,
                "historical_weight": self.historical_weight
            },
            "pattern_library": {
                "total_patterns": len(self.problem_patterns),
                "pattern_names": list(self.problem_patterns.keys())
            },
            "historical_data": {
                "total_historical_problems": len(self.historical_problems),
                "historical_correlations": len(self.historical_correlations)
            }
        }


# Example usage and testing
if __name__ == "__main__":
    import asyncio
    from unittest.mock import MagicMock
    
    async def test_analysis_engine():
        """Test analysis engine."""
        # Create configuration
        config = {
            "analysis": {
                "correlation_window": 300,
                "pattern_match_threshold": 0.7,
                "root_cause_depth": 5,
                "historical_weight": 0.3
            }
        }
        
        # Create analysis engine
        engine = AnalysisEngine(config)
        
        # Create test problems
        problems = [
            DetectedProblem(
                problem_id="test_001",
                problem_type=ProblemType.NETWORK_CONNECTIVITY,
                severity=SeverityLevel.HIGH,
                title="Network connectivity failure",
                description="Cannot reach external servers",
                detector_name="network_detector",
                detected_at=datetime.now(),
                confidence=0.9,
                symptom_data={"failed_urls": ["google.com", "github.com"]},
                affected_components=["network_interface", "dns"]
            ),
            DetectedProblem(
                problem_id="test_002",
                problem_type=ProblemType.SERVICE_UNAVAILABLE,
                severity=SeverityLevel.MEDIUM,
                title="Web service unavailable",
                description="HTTP service not responding",
                detector_name="software_detector",
                detected_at=datetime.now(),
                confidence=0.8,
                symptom_data={"service": "httpd", "port": 80},
                affected_components=["web_server", "network_interface"]
            )
        ]
        
        print("Testing analysis engine...")
        
        # Run analysis
        device_context = {"device_id": "test_device_001"}
        results = await engine.analyze_problems(problems, device_context)
        
        print(f"Analysis completed: {len(results)} results")
        
        for result in results:
            print(f"\nAnalysis: {result.analysis_type}")
            print(f"  Confidence: {result.confidence:.2f}")
            print(f"  Correlation: {result.correlation_score:.2f}")
            if result.root_cause:
                print(f"  Root Cause: {result.root_cause}")
            print(f"  Conclusion: {result.conclusion}")
        
        # Get statistics
        stats = engine.get_analysis_statistics()
        print(f"\nEngine Statistics: {stats}")
    
    # Run test
    asyncio.run(test_analysis_engine())