#[cfg(test)]
mod performance_monitoring_tests {
    use codegraph_core::{
        PerformanceMonitor, MonitorConfig, BottleneckAnalyzer, AnalyzerConfig,
        MemoryMonitor, MemoryMonitorConfig, TimingProfiler, ProfilerConfig,
        PerformanceReport, MemorySnapshot, OperationTiming, OperationStats,
    };
    use std::time::{Duration, SystemTime};
    use tokio::time::sleep;

    #[tokio::test]
    async fn test_performance_monitor_basic_functionality() {
        println!("🧪 Testing basic performance monitor functionality...");
        
        let mut monitor = PerformanceMonitor::new();
        
        // Start monitoring
        monitor.start_monitoring().await.expect("Failed to start monitoring");
        
        // Wait for some data collection
        sleep(Duration::from_millis(100)).await;
        
        // Record some test operations
        monitor.record_parse_operation(
            Duration::from_millis(50),
            1024,
            "rust".to_string(),
            true,
            100,
            None,
        ).await.expect("Failed to record parse operation");
        
        monitor.record_graph_update_operation(
            Duration::from_millis(30),
            50,
            25,
            true,
            None,
        ).await.expect("Failed to record graph update");
        
        // Get performance report
        let report = monitor.get_performance_report().await
            .expect("Failed to get performance report");
        
        assert!(report.uptime_seconds > 0);
        assert_eq!(report.parse_stats.total_operations, 1);
        assert_eq!(report.graph_update_stats.total_updates, 1);
        
        monitor.stop_monitoring();
        
        println!("✅ Basic performance monitor functionality: PASSED");
    }

    #[tokio::test]
    async fn test_bottleneck_analyzer() {
        println!("🧪 Testing bottleneck analyzer...");
        
        let mut analyzer = BottleneckAnalyzer::new();
        
        // Create a mock performance report with some issues
        let report = create_mock_performance_report_with_issues();
        analyzer.add_performance_data(report);
        
        // Analyze performance
        let analysis = analyzer.analyze_performance()
            .expect("Failed to analyze performance");
        
        assert!(analysis.performance_score <= 100.0);
        assert!(!analysis.optimization_suggestions.is_empty());
        
        println!("✅ Performance score: {}", analysis.performance_score);
        println!("✅ Bottlenecks detected: {}", analysis.bottlenecks.len());
        println!("✅ Optimization suggestions: {}", analysis.optimization_suggestions.len());
        
        println!("✅ Bottleneck analyzer: PASSED");
    }

    #[tokio::test]
    async fn test_memory_monitor() {
        println!("🧪 Testing memory monitor...");
        
        let mut monitor = MemoryMonitor::new();
        
        // Start monitoring
        monitor.start_monitoring().await.expect("Failed to start memory monitoring");
        
        // Record some allocations
        monitor.record_allocation(
            "test_allocation_1".to_string(),
            1024 * 1024, // 1MB
            "test".to_string(),
            None,
        ).await.expect("Failed to record allocation");
        
        monitor.record_allocation(
            "test_allocation_2".to_string(),
            512 * 1024, // 512KB
            "cache".to_string(),
            None,
        ).await.expect("Failed to record allocation");
        
        // Wait for monitoring
        sleep(Duration::from_millis(100)).await;
        
        // Get current memory usage
        let usage = monitor.get_current_memory_usage().await
            .expect("Failed to get memory usage");
        
        assert!(usage.total_heap > 0);
        
        // Test deallocation
        monitor.record_deallocation("test_allocation_1").await
            .expect("Failed to record deallocation");
        
        // Generate optimization suggestions
        let suggestions = monitor.generate_optimization_suggestions().await
            .expect("Failed to generate suggestions");
        
        println!("✅ Memory usage: {} bytes", usage.used_heap);
        println!("✅ Optimization suggestions: {}", suggestions.len());
        
        monitor.stop_monitoring();
        
        println!("✅ Memory monitor: PASSED");
    }

    #[tokio::test]
    async fn test_timing_profiler() {
        println!("🧪 Testing timing profiler...");
        
        let mut profiler = TimingProfiler::new();
        
        // Start profiling
        profiler.start_profiling().await.expect("Failed to start profiling");
        
        // Test operation timing
        profiler.start_operation("test_op_1".to_string()).await
            .expect("Failed to start operation");
        
        // Simulate some work
        sleep(Duration::from_millis(50)).await;
        
        let duration = profiler.end_operation(
            "test_op_1".to_string(),
            "test_operation".to_string(),
            "test".to_string(),
            true,
            None,
            None,
            Some(1024),
            Some(512),
        ).await.expect("Failed to end operation");
        
        assert!(duration >= Duration::from_millis(45));
        assert!(duration <= Duration::from_millis(100));
        
        // Record multiple operations for statistics
        for i in 0..10 {
            let timing = OperationTiming {
                operation_name: "bulk_test".to_string(),
                category: "test".to_string(),
                start_time: SystemTime::now(),
                duration: Duration::from_millis(10 + i * 5),
                success: i % 10 != 0, // 90% success rate
                error_message: if i % 10 == 0 { Some("Test error".to_string()) } else { None },
                metadata: std::collections::HashMap::new(),
                input_size: Some(1024),
                output_size: Some(512),
            };
            
            profiler.record_timing(timing).await
                .expect("Failed to record timing");
        }
        
        // Wait for statistics calculation
        sleep(Duration::from_millis(100)).await;
        
        // Get operation statistics
        let stats = profiler.get_operation_stats(Some("bulk_test".to_string())).await
            .expect("Failed to get operation stats");
        
        if !stats.is_empty() {
            let stat = &stats[0];
            assert_eq!(stat.operation_name, "bulk_test");
            assert_eq!(stat.total_executions, 10);
            assert_eq!(stat.successful_executions, 9);
            assert_eq!(stat.failed_executions, 1);
            assert!((stat.success_rate - 90.0).abs() < 0.1);
        }
        
        // Generate optimization recommendations
        let recommendations = profiler.generate_optimization_recommendations().await
            .expect("Failed to generate recommendations");
        
        println!("✅ Operation duration: {:?}", duration);
        println!("✅ Statistics generated: {}", stats.len());
        println!("✅ Recommendations: {}", recommendations.len());
        
        profiler.stop_profiling();
        
        println!("✅ Timing profiler: PASSED");
    }

    #[tokio::test]
    async fn test_integrated_performance_monitoring() {
        println!("🧪 Testing integrated performance monitoring system...");
        
        // Initialize all components
        let mut perf_monitor = PerformanceMonitor::new();
        let mut memory_monitor = MemoryMonitor::new();
        let mut timing_profiler = TimingProfiler::new();
        let mut bottleneck_analyzer = BottleneckAnalyzer::new();
        
        // Start all monitoring
        perf_monitor.start_monitoring().await.expect("Failed to start performance monitoring");
        memory_monitor.start_monitoring().await.expect("Failed to start memory monitoring");
        timing_profiler.start_profiling().await.expect("Failed to start timing profiling");
        
        // Simulate some application activity
        for i in 0..5 {
            // Record parse operations
            perf_monitor.record_parse_operation(
                Duration::from_millis(20 + i * 10),
                1024 * (i + 1) as u64,
                "rust".to_string(),
                i % 4 != 0, // 80% success rate
                50 + i * 10,
                if i % 4 == 0 { Some("Parse error".to_string()) } else { None },
            ).await.expect("Failed to record parse operation");
            
            // Record memory allocations
            memory_monitor.record_allocation(
                format!("allocation_{}", i),
                1024 * 1024 * (i + 1), // Increasing allocation sizes
                "application".to_string(),
                None,
            ).await.expect("Failed to record allocation");
            
            // Record timing operations
            timing_profiler.start_operation(format!("operation_{}", i)).await
                .expect("Failed to start operation");
            
            sleep(Duration::from_millis(30)).await;
            
            timing_profiler.end_operation(
                format!("operation_{}", i),
                "test_operation".to_string(),
                "integration_test".to_string(),
                i % 3 != 0, // 67% success rate
                if i % 3 == 0 { Some("Operation failed".to_string()) } else { None },
                None,
                Some(1024),
                Some(512),
            ).await.expect("Failed to end operation");
        }
        
        // Wait for data collection
        sleep(Duration::from_millis(200)).await;
        
        // Get comprehensive performance report
        let perf_report = perf_monitor.get_performance_report().await
            .expect("Failed to get performance report");
        
        let memory_usage = memory_monitor.get_current_memory_usage().await
            .expect("Failed to get memory usage");
        
        let timing_stats = timing_profiler.get_operation_stats(None).await
            .expect("Failed to get timing stats");
        
        // Add data to bottleneck analyzer
        bottleneck_analyzer.add_performance_data(perf_report.clone());
        let analysis = bottleneck_analyzer.analyze_performance()
            .expect("Failed to analyze performance");
        
        // Verify integrated results
        assert!(perf_report.parse_stats.total_operations > 0);
        assert!(memory_usage.total_heap > 0);
        assert!(!timing_stats.is_empty());
        assert!(analysis.performance_score <= 100.0);
        
        println!("✅ Parse operations: {}", perf_report.parse_stats.total_operations);
        println!("✅ Memory usage: {} bytes", memory_usage.used_heap);
        println!("✅ Timing operations: {}", timing_stats.len());
        println!("✅ Performance score: {}", analysis.performance_score);
        println!("✅ Bottlenecks detected: {}", analysis.bottlenecks.len());
        
        // Stop all monitoring
        perf_monitor.stop_monitoring();
        memory_monitor.stop_monitoring();
        timing_profiler.stop_profiling();
        
        println!("✅ Integrated performance monitoring: PASSED");
    }

    #[tokio::test]
    async fn test_performance_optimization_suggestions() {
        println!("🧪 Testing performance optimization suggestions...");
        
        let mut analyzer = BottleneckAnalyzer::new();
        
        // Create performance report with various issues
        let report = create_performance_report_with_multiple_issues();
        analyzer.add_performance_data(report);
        
        let analysis = analyzer.analyze_performance()
            .expect("Failed to analyze performance");
        
        // Verify optimization suggestions are generated
        assert!(!analysis.optimization_suggestions.is_empty());
        
        // Check that suggestions have proper priorities
        let high_priority_suggestions = analysis.optimization_suggestions.iter()
            .filter(|s| s.priority >= 8)
            .count();
        
        assert!(high_priority_suggestions > 0);
        
        // Verify suggestions have implementation details
        for suggestion in &analysis.optimization_suggestions {
            assert!(!suggestion.description.is_empty());
            assert!(!suggestion.expected_improvement.is_empty());
            assert!(!suggestion.implementation_complexity.is_empty());
            assert!(!suggestion.estimated_effort.is_empty());
        }
        
        println!("✅ Total suggestions: {}", analysis.optimization_suggestions.len());
        println!("✅ High priority suggestions: {}", high_priority_suggestions);
        
        println!("✅ Performance optimization suggestions: PASSED");
    }

    // Helper function to create a mock performance report with issues
    fn create_mock_performance_report_with_issues() -> PerformanceReport {
        use codegraph_core::{ParseStats, GraphUpdateStats, MemoryStats, SystemStats, ErrorStats, OperationStats};
        
        PerformanceReport {
            uptime_seconds: 300,
            parse_stats: ParseStats {
                total_operations: 100,
                successful_operations: 85,
                failed_operations: 15,
                average_duration_ms: 150, // Slow parsing
                total_files_processed: 50,
                total_nodes_created: 5000,
                average_file_size_bytes: 10240,
            },
            graph_update_stats: GraphUpdateStats {
                total_updates: 50,
                successful_updates: 45,
                failed_updates: 5,
                average_duration_ms: 80,
                nodes_added: 500,
                nodes_removed: 100,
                edges_added: 1000,
                edges_removed: 200,
            },
            memory_stats: MemoryStats {
                current_usage_bytes: 1024 * 1024 * 800, // 800MB - high usage
                peak_usage_bytes: 1024 * 1024 * 900,
                average_usage_bytes: 1024 * 1024 * 600,
                allocation_count: 10000,
                deallocation_count: 8000,
            },
            system_stats: SystemStats {
                cpu_usage_percent: 85.0, // High CPU usage
                memory_usage_percent: 90.0, // High memory usage
                disk_usage_percent: 75.0,
                network_bytes_in: 1024 * 1024,
                network_bytes_out: 512 * 1024,
            },
            error_stats: ErrorStats {
                total_errors: 20,
                parse_errors: 15,
                graph_errors: 3,
                io_errors: 2,
                network_errors: 0,
            },
            operation_stats: OperationStats {
                operations_by_type: std::collections::HashMap::new(),
                total_operations: 200,
                operations_per_minute: 40.0,
            },
            generated_at: SystemTime::now(),
        }
    }

    // Helper function to create performance report with multiple issues
    fn create_performance_report_with_multiple_issues() -> PerformanceReport {
        use codegraph_core::{ParseStats, GraphUpdateStats, MemoryStats, SystemStats, ErrorStats, OperationStats};
        
        PerformanceReport {
            uptime_seconds: 600,
            parse_stats: ParseStats {
                total_operations: 200,
                successful_operations: 160,
                failed_operations: 40, // High failure rate
                average_duration_ms: 200, // Very slow
                total_files_processed: 100,
                total_nodes_created: 10000,
                average_file_size_bytes: 20480,
            },
            graph_update_stats: GraphUpdateStats {
                total_updates: 100,
                successful_updates: 80,
                failed_updates: 20, // High failure rate
                average_duration_ms: 120, // Slow updates
                nodes_added: 1000,
                nodes_removed: 200,
                edges_added: 2000,
                edges_removed: 400,
            },
            memory_stats: MemoryStats {
                current_usage_bytes: 1024 * 1024 * 950, // 950MB - very high
                peak_usage_bytes: 1024 * 1024 * 1000, // 1GB peak
                average_usage_bytes: 1024 * 1024 * 800,
                allocation_count: 20000,
                deallocation_count: 15000, // Memory leak indication
            },
            system_stats: SystemStats {
                cpu_usage_percent: 95.0, // Critical CPU usage
                memory_usage_percent: 95.0, // Critical memory usage
                disk_usage_percent: 85.0, // High disk usage
                network_bytes_in: 2 * 1024 * 1024,
                network_bytes_out: 1024 * 1024,
            },
            error_stats: ErrorStats {
                total_errors: 50,
                parse_errors: 30,
                graph_errors: 10,
                io_errors: 5,
                network_errors: 5,
            },
            operation_stats: OperationStats {
                operations_by_type: std::collections::HashMap::new(),
                total_operations: 400,
                operations_per_minute: 40.0,
            },
            generated_at: SystemTime::now(),
        }
    }

    #[tokio::test]
    async fn run_comprehensive_performance_test_suite() {
        println!("🎯 Running comprehensive performance monitoring test suite");
        println!("{}", "=".repeat(80));
        
        // Run all individual tests
        test_performance_monitor_basic_functionality().await;
        test_bottleneck_analyzer().await;
        test_memory_monitor().await;
        test_timing_profiler().await;
        test_integrated_performance_monitoring().await;
        test_performance_optimization_suggestions().await;
        
        println!("{}", "=".repeat(80));
        println!("🎉 All performance monitoring tests completed successfully!");
        println!("✅ Performance monitoring and optimization system is fully functional");
    }
}
