// Performance benchmarks for the incremental update system
use codegraph_core::{
    FileWatcher, IncrementalParseEngine, WatcherConfig, 
    CodeChange, TextEdit, Position,
};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::time::sleep;
use tempfile::TempDir;

/// Benchmark file change detection performance
/// Requirement: File change detection delay < 50ms
#[tokio::test]
async fn benchmark_file_change_detection() {
    println!("🔍 Benchmarking file change detection performance...");
    
    let temp_dir = TempDir::new().unwrap();
    let parse_engine = Arc::new(IncrementalParseEngine::new());
    
    // Configure for maximum performance
    let mut config = WatcherConfig::default();
    config.debounce_delay_ms = 25; // Optimized for <50ms requirement
    config.enable_parallel_processing = true;
    
    let mut watcher = FileWatcher::with_config(parse_engine, config);
    watcher.start_watching(temp_dir.path()).await.unwrap();
    
    let mut detection_times = Vec::new();
    
    // Run multiple iterations to get average performance
    for i in 0..10 {
        let test_file = temp_dir.path().join(format!("test_{}.rs", i));
        tokio::fs::write(&test_file, "fn main() {}").await.unwrap();
        
        let start_time = Instant::now();
        
        // Modify file
        tokio::fs::write(&test_file, format!("fn main() {{ println!(\"test {}\"); }}", i)).await.unwrap();
        
        // Wait for detection
        sleep(Duration::from_millis(50)).await;
        
        let detection_time = start_time.elapsed();
        detection_times.push(detection_time.as_millis());
        
        println!("  Iteration {}: {}ms", i + 1, detection_time.as_millis());
    }
    
    let avg_detection_time = detection_times.iter().sum::<u128>() / detection_times.len() as u128;
    let max_detection_time = *detection_times.iter().max().unwrap();
    
    println!("📊 File Change Detection Results:");
    println!("  Average: {}ms", avg_detection_time);
    println!("  Maximum: {}ms", max_detection_time);
    println!("  Requirement: <50ms");
    
    // Verify requirement is met
    assert!(avg_detection_time < 50, "Average detection time {}ms exceeds 50ms requirement", avg_detection_time);
    assert!(max_detection_time < 100, "Maximum detection time {}ms is too high", max_detection_time); // Allow some buffer
    
    println!("✅ File change detection performance: PASSED\n");
    
    watcher.stop_watching().await.unwrap();
}

/// Benchmark incremental update performance
/// Requirement: Large project update time < 500ms
#[tokio::test]
async fn benchmark_incremental_updates() {
    println!("⚡ Benchmarking incremental update performance...");
    
    let parse_engine = Arc::new(IncrementalParseEngine::new());
    let mut update_times = Vec::new();
    
    // Test with different file sizes
    let test_sizes = vec![1000, 5000, 10000, 20000]; // bytes
    
    for size in test_sizes {
        let code_change = CodeChange {
            file_path: format!("large_file_{}.rs", size),
            language: "rust".to_string(),
            edit: TextEdit {
                start_byte: 0,
                old_end_byte: size / 2,
                new_end_byte: size,
                start_position: Position::new(0, 0),
                old_end_position: Position::new(size / 100, 0),
                new_end_position: Position::new(size / 50, 0),
                new_text: generate_rust_code(size),
            },
            timestamp: chrono::Utc::now(),
        };
        
        let start_time = Instant::now();
        let result = parse_engine.update_graph(&code_change).await.unwrap();
        let update_time = start_time.elapsed();
        
        update_times.push(update_time.as_millis());
        
        println!("  File size {} bytes: {}ms (success: {})", 
                 size, update_time.as_millis(), result.success);
    }
    
    let avg_update_time = update_times.iter().sum::<u128>() / update_times.len() as u128;
    let max_update_time = *update_times.iter().max().unwrap();
    
    println!("📊 Incremental Update Results:");
    println!("  Average: {}ms", avg_update_time);
    println!("  Maximum: {}ms", max_update_time);
    println!("  Requirement: <500ms");
    
    // Verify requirement is met
    assert!(max_update_time < 500, "Maximum update time {}ms exceeds 500ms requirement", max_update_time);
    
    println!("✅ Incremental update performance: PASSED\n");
}

/// Benchmark batch processing performance
#[tokio::test]
async fn benchmark_batch_processing() {
    println!("📦 Benchmarking batch processing performance...");
    
    let parse_engine = Arc::new(IncrementalParseEngine::new());
    
    // Test batch sizes
    let batch_sizes = vec![10, 25, 50, 100];
    
    for batch_size in batch_sizes {
        let mut changes = Vec::new();
        
        // Create batch of changes
        for i in 0..batch_size {
            changes.push(CodeChange {
                file_path: format!("batch_file_{}.rs", i),
                language: "rust".to_string(),
                edit: TextEdit {
                    start_byte: 0,
                    old_end_byte: 100,
                    new_end_byte: 200,
                    start_position: Position::new(0, 0),
                    old_end_position: Position::new(5, 0),
                    new_end_position: Position::new(10, 0),
                    new_text: format!("fn batch_function_{}() {{ println!(\"batch {}\"); }}", i, i),
                },
                timestamp: chrono::Utc::now(),
            });
        }
        
        let start_time = Instant::now();
        
        // Process batch
        for change in changes {
            parse_engine.update_graph(&change).await.unwrap();
        }
        
        let batch_time = start_time.elapsed();
        let avg_per_file = batch_time.as_millis() / batch_size as u128;
        
        println!("  Batch size {}: {}ms total, {}ms per file", 
                 batch_size, batch_time.as_millis(), avg_per_file);
        
        // Verify batch processing is efficient
        assert!(avg_per_file < 100, "Average per-file time {}ms is too high for batch size {}", 
                avg_per_file, batch_size);
    }
    
    println!("✅ Batch processing performance: PASSED\n");
}

/// Benchmark memory usage and cleanup
#[tokio::test]
async fn benchmark_memory_usage() {
    println!("💾 Benchmarking memory usage and cleanup...");
    
    let temp_dir = TempDir::new().unwrap();
    let parse_engine = Arc::new(IncrementalParseEngine::new());
    let mut watcher = FileWatcher::new(parse_engine);
    
    watcher.start_watching(temp_dir.path()).await.unwrap();
    
    // Create many files to stress test memory
    let file_count = 200;
    for i in 0..file_count {
        let test_file = temp_dir.path().join(format!("memory_test_{}.rs", i));
        let content = format!("fn test_{}() {{ let x = {}; println!(\"{{}}\", x); }}", i, i);
        tokio::fs::write(&test_file, content).await.unwrap();
    }
    
    // Wait for processing
    sleep(Duration::from_millis(1000)).await;
    
    // Get initial memory stats
    let initial_stats = watcher.get_memory_stats().await;
    println!("  Initial memory usage: {} bytes", initial_stats.current_usage_bytes);
    println!("  Tracked files: {}", initial_stats.tracked_files_count);
    
    // Perform memory cleanup
    let cleanup_start = Instant::now();
    watcher.cleanup_memory().await.unwrap();
    let cleanup_time = cleanup_start.elapsed();
    
    // Get post-cleanup stats
    let final_stats = watcher.get_memory_stats().await;
    println!("  Post-cleanup memory usage: {} bytes", final_stats.current_usage_bytes);
    println!("  Memory saved: {} bytes", initial_stats.current_usage_bytes.saturating_sub(final_stats.current_usage_bytes));
    println!("  Cleanup time: {}ms", cleanup_time.as_millis());
    
    // Verify memory usage is reasonable
    let max_memory_mb = 100; // 100MB limit for this test
    assert!(final_stats.current_usage_bytes < max_memory_mb * 1024 * 1024,
            "Memory usage {} bytes exceeds {}MB limit", 
            final_stats.current_usage_bytes, max_memory_mb);
    
    // Verify cleanup was effective
    assert!(final_stats.current_usage_bytes <= initial_stats.current_usage_bytes,
            "Memory cleanup did not reduce usage");
    
    println!("✅ Memory usage optimization: PASSED\n");
    
    watcher.stop_watching().await.unwrap();
}

/// Comprehensive system performance test
#[tokio::test]
async fn benchmark_complete_system() {
    println!("🚀 Running comprehensive system performance benchmark...");
    
    let temp_dir = TempDir::new().unwrap();
    let parse_engine = Arc::new(IncrementalParseEngine::new());
    
    // Configure for optimal performance
    let mut config = WatcherConfig::default();
    config.debounce_delay_ms = 25;
    config.enable_parallel_processing = true;
    config.enable_conflict_detection = true;
    config.max_concurrent_processors = 8;
    
    let mut watcher = FileWatcher::with_config(parse_engine, config);
    
    let system_start = Instant::now();
    
    // Start the system
    watcher.start_watching(temp_dir.path()).await.unwrap();
    
    // Create a realistic project structure
    let project_files = vec![
        ("src/main.rs", "fn main() { println!(\"Hello, world!\"); }"),
        ("src/lib.rs", "pub mod utils; pub mod models;"),
        ("src/utils.rs", "pub fn helper() -> i32 { 42 }"),
        ("src/models.rs", "pub struct User { pub name: String }"),
        ("Cargo.toml", "[package]\nname = \"test\"\nversion = \"0.1.0\""),
    ];
    
    // Create initial files
    for (file_path, content) in &project_files {
        let full_path = temp_dir.path().join(file_path);
        if let Some(parent) = full_path.parent() {
            tokio::fs::create_dir_all(parent).await.unwrap();
        }
        tokio::fs::write(&full_path, content).await.unwrap();
    }
    
    // Wait for initial processing
    sleep(Duration::from_millis(200)).await;
    
    // Simulate development workflow
    let workflow_start = Instant::now();
    
    // 1. Modify main.rs
    let main_file = temp_dir.path().join("src/main.rs");
    tokio::fs::write(&main_file, 
        "use crate::utils::helper;\nfn main() { println!(\"Result: {}\", helper()); }").await.unwrap();
    
    // 2. Add new function to utils.rs
    let utils_file = temp_dir.path().join("src/utils.rs");
    tokio::fs::write(&utils_file, 
        "pub fn helper() -> i32 { 42 }\npub fn new_function() -> String { \"test\".to_string() }").await.unwrap();
    
    // 3. Modify models.rs
    let models_file = temp_dir.path().join("src/models.rs");
    tokio::fs::write(&models_file, 
        "pub struct User { pub name: String, pub age: u32 }\nimpl User { pub fn new(name: String) -> Self { Self { name, age: 0 } } }").await.unwrap();
    
    // Wait for all processing to complete
    sleep(Duration::from_millis(500)).await;
    
    let workflow_time = workflow_start.elapsed();
    let total_time = system_start.elapsed();
    
    // Get final statistics
    let stats = watcher.get_stats().await;
    let memory_stats = watcher.get_memory_stats().await;
    
    println!("📊 Complete System Performance Results:");
    println!("  Total system time: {}ms", total_time.as_millis());
    println!("  Workflow processing time: {}ms", workflow_time.as_millis());
    println!("  Files tracked: {}", stats.tracked_files_count);
    println!("  Memory usage: {} bytes", memory_stats.current_usage_bytes);
    println!("  Memory pressure: {:.1}%", memory_stats.memory_pressure * 100.0);
    
    // Verify overall system performance
    assert!(workflow_time.as_millis() < 1000, 
            "Workflow processing time {}ms is too slow", workflow_time.as_millis());
    assert!(memory_stats.memory_pressure < 0.8, 
            "Memory pressure {:.1}% is too high", memory_stats.memory_pressure * 100.0);
    
    println!("✅ Complete system performance: PASSED\n");
    
    watcher.stop_watching().await.unwrap();
}

/// Helper function to generate Rust code of specified size
fn generate_rust_code(target_size: usize) -> String {
    let mut code = String::new();
    let mut current_size = 0;
    let mut counter = 0;
    
    code.push_str("// Generated Rust code for performance testing\n");
    code.push_str("use std::collections::HashMap;\n\n");
    
    while current_size < target_size {
        let function = format!(
            "pub fn function_{}(input: i32) -> i32 {{\n    let mut map = HashMap::new();\n    map.insert({}, input * {});\n    map.get(&{}).copied().unwrap_or(0)\n}}\n\n",
            counter, counter, counter + 1, counter
        );
        
        if current_size + function.len() > target_size {
            break;
        }
        
        code.push_str(&function);
        current_size += function.len();
        counter += 1;
    }
    
    code
}

/// Run all performance benchmarks
#[tokio::test]
async fn run_all_performance_benchmarks() {
    println!("🎯 Running all performance benchmarks for incremental update system");
    println!("{}", "=".repeat(80));
    
    // Run individual benchmarks
    benchmark_file_change_detection().await;
    benchmark_incremental_updates().await;
    benchmark_batch_processing().await;
    benchmark_memory_usage().await;
    benchmark_complete_system().await;
    
    println!("🎉 All performance benchmarks completed successfully!");
    println!("✅ File change detection: <50ms requirement MET");
    println!("✅ Incremental updates: <500ms requirement MET");
    println!("✅ Batch processing: Efficient processing VERIFIED");
    println!("✅ Memory optimization: Memory leaks PREVENTED");
    println!("✅ Complete system: All requirements SATISFIED");
    println!("{}", "=".repeat(80));
}
