//! # 性能优化集成示例
//! 
//! 展示RustCloud框架中的各种性能优化功能

use rustcloud::prelude::*;
use rustcloud::performance::*;
use rustcloud::performance_monitor::*;
use std::sync::Arc;
use std::time::Duration;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    // 初始化日志
    tracing_subscriber::init();

    println!("🚀 RustCloud 性能优化演示");
    println!("===========================");

    // 1. 连接池演示
    demo_connection_pool().await?;

    // 2. 内存管理演示
    demo_memory_management().await?;

    // 3. 异步任务池演示
    demo_async_task_pool().await?;

    // 4. 性能监控演示
    demo_performance_monitoring().await?;

    // 5. 综合性能测试
    demo_performance_benchmark().await?;

    println!("\n✅ 性能优化演示完成");
    Ok(())
}

/// 连接池演示
async fn demo_connection_pool() -> ServiceResult<()> {
    println!("\n🔗 1. 连接池演示");

    // 创建连接池配置
    let config = ConnectionPoolConfig {
        max_connections: 10,
        min_connections: 2,
        connection_timeout: Duration::from_secs(5),
        idle_timeout: Duration::from_secs(60),
        acquire_timeout: Duration::from_secs(3),
        validation_interval: Duration::from_secs(30),
    };

    // 创建连接工厂
    let factory = Arc::new(DatabaseConnectionFactory::new("postgresql://localhost:5432/test".to_string()));

    // 创建连接池
    let pool = ConnectionPool::new(config, factory);

    // 初始化连接池
    pool.initialize().await?;
    println!("   连接池初始化完成");

    // 测试连接获取和释放
    let start_time = std::time::Instant::now();
    let mut connection_ids = Vec::new();

    // 并发获取连接
    for i in 0..5 {
        let conn = pool.acquire().await?;
        println!("   获取连接 {}: {}", i + 1, conn.id);
        connection_ids.push(conn.id);
    }

    // 释放连接
    for (i, conn_id) in connection_ids.iter().enumerate() {
        pool.release(conn_id).await?;
        println!("   释放连接 {}: {}", i + 1, conn_id);
    }

    let duration = start_time.elapsed();
    println!("   连接池操作耗时: {:?}", duration);

    // 显示统计信息
    let stats = pool.get_stats().await;
    println!("   连接池统计:");
    println!("     总连接数: {}", stats.total_connections);
    println!("     活跃连接数: {}", stats.active_connections);
    println!("     空闲连接数: {}", stats.idle_connections);
    println!("     获取次数: {}", stats.acquire_count);
    println!("     释放次数: {}", stats.release_count);

    // 关闭连接池
    pool.shutdown().await?;
    println!("   ✅ 连接池演示完成");

    Ok(())
}

/// 内存管理演示
async fn demo_memory_management() -> ServiceResult<()> {
    println!("\n💾 2. 内存管理演示");

    // 创建内存管理器
    let gc_config = GcConfig {
        auto_gc_enabled: true,
        gc_threshold: 0.7,
        gc_interval: Duration::from_secs(5),
        pressure_threshold: 0.9,
    };
    
    let memory_manager = MemoryManager::new(gc_config);

    // 创建内存池
    memory_manager.create_pool("small_objects".to_string(), 1024, 100).await?;
    memory_manager.create_pool("large_objects".to_string(), 8192, 50).await?;
    println!("   创建了两个内存池");

    // 启动自动垃圾回收
    memory_manager.start_auto_gc().await;
    println!("   启动自动垃圾回收");

    // 分配和释放内存测试
    let start_time = std::time::Instant::now();
    let mut allocated_blocks = Vec::new();

    // 分配小对象
    for i in 0..10 {
        let block = memory_manager.allocate("small_objects").await?;
        println!("   分配小对象 {}: {} 字节", i + 1, block.len());
        allocated_blocks.push(("small_objects", block));
    }

    // 分配大对象
    for i in 0..5 {
        let block = memory_manager.allocate("large_objects").await?;
        println!("   分配大对象 {}: {} 字节", i + 1, block.len());
        allocated_blocks.push(("large_objects", block));
    }

    // 释放一半内存
    for (i, (pool_name, block)) in allocated_blocks.drain(..allocated_blocks.len()/2).enumerate() {
        memory_manager.deallocate(pool_name, block).await?;
        println!("   释放对象 {}", i + 1);
    }

    // 手动执行垃圾回收
    let freed_bytes = memory_manager.garbage_collect().await?;
    println!("   手动垃圾回收释放了 {} 字节", freed_bytes);

    let duration = start_time.elapsed();
    println!("   内存管理操作耗时: {:?}", duration);

    // 显示内存统计
    let stats = memory_manager.get_stats().await;
    println!("   内存管理统计:");
    println!("     总分配内存: {} 字节", stats.total_allocated);
    println!("     当前使用内存: {} 字节", stats.current_used);
    println!("     分配次数: {}", stats.allocation_count);
    println!("     释放次数: {}", stats.deallocation_count);
    println!("     GC次数: {}", stats.gc_count);
    println!("     内存池数量: {}", stats.pool_count);

    // 释放剩余内存
    for (pool_name, block) in allocated_blocks {
        memory_manager.deallocate(pool_name, block).await?;
    }

    println!("   ✅ 内存管理演示完成");
    Ok(())
}

/// 异步任务池演示
async fn demo_async_task_pool() -> ServiceResult<()> {
    println!("\n⚡ 3. 异步任务池演示");

    // 创建异步任务池
    let task_pool = AsyncTaskPool::new(4);
    println!("   创建了4个工作线程的异步任务池");

    // 共享计数器
    let counter = Arc::new(tokio::sync::RwLock::new(0));
    let start_time = std::time::Instant::now();

    // 提交多个任务
    let task_count = 20;
    for i in 0..task_count {
        let counter_clone = counter.clone();
        
        task_pool.submit(move || {
            // 模拟一些工作
            std::thread::sleep(std::time::Duration::from_millis(100));
            
            // 异步更新计数器
            let rt = tokio::runtime::Handle::current();
            rt.block_on(async {
                let mut count = counter_clone.write().await;
                *count += 1;
                println!("   任务 {} 完成，当前计数: {}", i + 1, *count);
            });
        }).await?;
    }

    // 等待所有任务完成
    let mut completed = false;
    while !completed {
        tokio::time::sleep(Duration::from_millis(50)).await;
        let count = *counter.read().await;
        if count == task_count {
            completed = true;
        }
    }

    let duration = start_time.elapsed();
    println!("   异步任务池处理 {} 个任务耗时: {:?}", task_count, duration);

    // 显示任务统计
    let stats = task_pool.get_stats().await;
    println!("   任务池统计:");
    println!("     提交任务数: {}", stats.submitted_tasks);
    println!("     完成任务数: {}", stats.completed_tasks);
    println!("     失败任务数: {}", stats.failed_tasks);

    println!("   ✅ 异步任务池演示完成");
    Ok(())
}

/// 性能监控演示
async fn demo_performance_monitoring() -> ServiceResult<()> {
    println!("\n📊 4. 性能监控演示");

    // 创建性能监控配置
    let config = PerformanceMonitorConfig {
        collection_interval: Duration::from_millis(500),
        retention_period: Duration::from_secs(60),
        detailed_monitoring: true,
        thresholds: PerformanceThresholds {
            cpu_warning_threshold: 0.6,
            cpu_critical_threshold: 0.8,
            memory_warning_threshold: 0.7,
            memory_critical_threshold: 0.9,
            response_time_warning_ms: 500,
            response_time_critical_ms: 1000,
        },
    };

    // 创建性能监控器
    let monitor = PerformanceMonitor::new(config);

    // 启动监控
    monitor.start().await?;
    println!("   性能监控器已启动");

    // 等待收集几次数据
    tokio::time::sleep(Duration::from_secs(2)).await;

    // 获取当前指标
    if let Some(current) = monitor.get_current_metrics().await {
        println!("   当前系统指标:");
        println!("     CPU使用率: {:.1}%", current.cpu_usage * 100.0);
        println!("     内存使用率: {:.1}%", current.memory.usage_percent * 100.0);
        println!("     总内存: {} MB", current.memory.total / 1024 / 1024);
        println!("     已用内存: {} MB", current.memory.used / 1024 / 1024);
        println!("     活跃连接数: {}", current.network.active_connections);
        println!("     进程线程数: {}", current.process.thread_count);
    }

    // 获取历史数据
    let history = monitor.get_metrics_history(Some(5)).await;
    println!("   历史数据点数: {}", history.len());

    // 获取性能摘要
    let summary = monitor.get_performance_summary().await;
    println!("   性能摘要:");
    println!("     运行时间: {} 秒", summary.uptime_seconds);
    println!("     平均CPU使用率: {:.1}%", summary.avg_cpu_usage * 100.0);
    println!("     最大CPU使用率: {:.1}%", summary.max_cpu_usage * 100.0);
    println!("     平均内存使用率: {:.1}%", summary.avg_memory_usage * 100.0);
    println!("     最大内存使用率: {:.1}%", summary.max_memory_usage * 100.0);
    println!("     总警报数: {}", summary.total_alerts);
    println!("     数据点数: {}", summary.data_points);

    // 获取警报历史
    let alerts = monitor.get_alert_history(Some(10)).await;
    if !alerts.is_empty() {
        println!("   最近警报:");
        for alert in alerts.iter().take(3) {
            println!("     [{:?}] {}: {}", alert.level, alert.alert_type, alert.message);
        }
    } else {
        println!("   暂无警报");
    }

    // 停止监控
    monitor.stop().await?;
    println!("   ✅ 性能监控演示完成");

    Ok(())
}

/// 综合性能测试
async fn demo_performance_benchmark() -> ServiceResult<()> {
    println!("\n🏆 5. 综合性能基准测试");

    // 创建性能测试配置
    let test_config = BenchmarkConfig {
        concurrent_requests: 100,
        total_requests: 1000,
        request_size: 1024,
        warmup_requests: 50,
    };

    println!("   基准测试配置:");
    println!("     并发请求数: {}", test_config.concurrent_requests);
    println!("     总请求数: {}", test_config.total_requests);
    println!("     请求大小: {} 字节", test_config.request_size);

    // 执行基准测试
    let results = run_benchmark(test_config).await?;

    println!("   基准测试结果:");
    println!("     总耗时: {:?}", results.total_duration);
    println!("     平均响应时间: {:?}", results.avg_response_time);
    println!("     最小响应时间: {:?}", results.min_response_time);
    println!("     最大响应时间: {:?}", results.max_response_time);
    println!("     QPS (每秒请求数): {:.0}", results.qps);
    println!("     吞吐量: {:.2} MB/s", results.throughput_mb_per_sec);
    println!("     成功请求数: {}", results.successful_requests);
    println!("     失败请求数: {}", results.failed_requests);
    println!("     成功率: {:.2}%", results.success_rate * 100.0);

    println!("   ✅ 综合性能基准测试完成");
    Ok(())
}

/// 模拟数据库连接工厂
struct DatabaseConnectionFactory {
    connection_string: String,
}

impl DatabaseConnectionFactory {
    fn new(connection_string: String) -> Self {
        Self { connection_string }
    }
}

#[async_trait::async_trait]
impl ConnectionFactory<DatabaseConnection> for DatabaseConnectionFactory {
    async fn create_connection(&self) -> ServiceResult<DatabaseConnection> {
        // 模拟连接创建延迟
        tokio::time::sleep(Duration::from_millis(10)).await;
        
        Ok(DatabaseConnection {
            id: uuid::Uuid::new_v4().to_string(),
            connection_string: self.connection_string.clone(),
            created_at: std::time::Instant::now(),
        })
    }

    async fn validate_connection(&self, _connection: &DatabaseConnection) -> bool {
        // 模拟连接验证
        true
    }

    async fn destroy_connection(&self, _connection: DatabaseConnection) -> ServiceResult<()> {
        // 模拟连接销毁
        tokio::time::sleep(Duration::from_millis(5)).await;
        Ok(())
    }
}

/// 模拟数据库连接
#[derive(Debug, Clone)]
struct DatabaseConnection {
    id: String,
    connection_string: String,
    created_at: std::time::Instant,
}

/// 基准测试配置
struct BenchmarkConfig {
    concurrent_requests: usize,
    total_requests: usize,
    request_size: usize,
    warmup_requests: usize,
}

/// 基准测试结果
struct BenchmarkResults {
    total_duration: Duration,
    avg_response_time: Duration,
    min_response_time: Duration,
    max_response_time: Duration,
    qps: f64,
    throughput_mb_per_sec: f64,
    successful_requests: usize,
    failed_requests: usize,
    success_rate: f64,
}

/// 执行基准测试
async fn run_benchmark(config: BenchmarkConfig) -> ServiceResult<BenchmarkResults> {
    let start_time = std::time::Instant::now();
    let mut response_times = Vec::new();
    let mut successful_requests = 0;
    let mut failed_requests = 0;

    // 预热
    for _ in 0..config.warmup_requests {
        let _ = simulate_request(config.request_size).await;
    }

    // 实际测试
    let semaphore = Arc::new(tokio::sync::Semaphore::new(config.concurrent_requests));
    let mut handles = Vec::new();

    for _ in 0..config.total_requests {
        let permit = semaphore.clone().acquire_owned().await.unwrap();
        let request_size = config.request_size;
        
        let handle = tokio::spawn(async move {
            let _permit = permit; // 保持许可直到任务完成
            let request_start = std::time::Instant::now();
            let result = simulate_request(request_size).await;
            let response_time = request_start.elapsed();
            (result, response_time)
        });
        
        handles.push(handle);
    }

    // 等待所有请求完成
    for handle in handles {
        match handle.await {
            Ok((result, response_time)) => {
                response_times.push(response_time);
                if result.is_ok() {
                    successful_requests += 1;
                } else {
                    failed_requests += 1;
                }
            }
            Err(_) => {
                failed_requests += 1;
            }
        }
    }

    let total_duration = start_time.elapsed();
    
    // 计算统计数据
    let avg_response_time = Duration::from_nanos(
        response_times.iter().map(|d| d.as_nanos()).sum::<u128>() as u64 / response_times.len() as u64
    );
    let min_response_time = response_times.iter().min().cloned().unwrap_or_default();
    let max_response_time = response_times.iter().max().cloned().unwrap_or_default();
    let qps = config.total_requests as f64 / total_duration.as_secs_f64();
    let throughput_mb_per_sec = (config.total_requests * config.request_size) as f64 / (1024.0 * 1024.0) / total_duration.as_secs_f64();
    let success_rate = successful_requests as f64 / config.total_requests as f64;

    Ok(BenchmarkResults {
        total_duration,
        avg_response_time,
        min_response_time,
        max_response_time,
        qps,
        throughput_mb_per_sec,
        successful_requests,
        failed_requests,
        success_rate,
    })
}

/// 模拟请求处理
async fn simulate_request(size: usize) -> ServiceResult<Vec<u8>> {
    // 模拟请求处理时间
    let processing_time = Duration::from_micros(50 + (size / 100) as u64);
    tokio::time::sleep(processing_time).await;
    
    // 模拟偶尔的失败
    if rand::random::<f64>() < 0.01 { // 1%失败率
        return Err(ServiceError::InternalError("模拟请求失败".to_string()));
    }
    
    // 返回模拟响应数据
    Ok(vec![0u8; size])
}