//! # 流处理性能基准测试
//! 
//! 使用 criterion 进行详细的性能基准测试

use criterion::{black_box, criterion_group, criterion_main, Criterion, BenchmarkId};
use std::time::Duration;
use std::sync::Arc;
use tokio::runtime::Runtime;
use serde_json::json;

use rustcloud_stream::prelude::*;
use rustcloud_bus::memory::InMemoryMessageBus;
use rustcloud_bus::message::MessageBuilder;

/// 创建测试消息
fn create_test_message(id: i32) -> Message {
    MessageBuilder::new()
        .topic("benchmark.test")
        .payload(json!({
            "id": id,
            "data": format!("benchmark_data_{}", id),
            "timestamp": chrono::Utc::now().timestamp(),
            "active": id % 10 != 0,
            "amount": id as f64 * 1.5
        }))
        .build()
}

/// 创建测试消息批次
fn create_message_batch(size: usize) -> Vec<Message> {
    (0..size).map(|i| create_test_message(i as i32)).collect()
}

/// 基准测试：映射处理器性能
fn bench_map_processor(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();
    let processor = MapProcessor::with_mapper(|mut msg: Message| {
        let mut payload = msg.payload().clone();
        payload["processed"] = json!(true);
        payload["processed_at"] = json!(chrono::Utc::now().timestamp());
        msg.set_payload(payload);
        msg
    });

    let mut group = c.benchmark_group("map_processor");
    
    for size in [1, 10, 100, 1000].iter() {
        let messages = create_message_batch(*size);
        
        group.bench_with_input(
            BenchmarkId::new("process_messages", size),
            size,
            |b, _| {
                b.iter(|| {
                    rt.block_on(async {
                        for msg in &messages {
                            let _ = processor.process(black_box(msg.clone())).await.unwrap();
                        }
                    })
                });
            },
        );
    }
    
    group.finish();
}

/// 基准测试：过滤处理器性能
fn bench_filter_processor(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();
    let processor = FilterProcessor::with_predicate(|msg: &Message| {
        msg.payload()["active"].as_bool().unwrap_or(false)
    });

    let mut group = c.benchmark_group("filter_processor");
    
    for size in [1, 10, 100, 1000].iter() {
        let messages = create_message_batch(*size);
        
        group.bench_with_input(
            BenchmarkId::new("filter_messages", size),
            size,
            |b, _| {
                b.iter(|| {
                    rt.block_on(async {
                        for msg in &messages {
                            let _ = processor.process(black_box(msg.clone())).await.unwrap();
                        }
                    })
                });
            },
        );
    }
    
    group.finish();
}

/// 基准测试：聚合处理器性能
fn bench_aggregate_processor(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();
    let processor = AggregateProcessor::new(
        "benchmark_aggregator",
        |msg: &Message| -> StreamResult<String> {
            Ok(msg.payload()["id"].as_i64().unwrap_or(0).to_string())
        },
        |msg: &Message| -> StreamResult<f64> {
            Ok(msg.payload()["amount"].as_f64().unwrap_or(0.0))
        },
        |acc: &f64, value: &f64| acc + value,
        0.0f64,
    );

    let mut group = c.benchmark_group("aggregate_processor");
    
    for size in [1, 10, 100, 1000].iter() {
        let messages = create_message_batch(*size);
        
        group.bench_with_input(
            BenchmarkId::new("aggregate_messages", size),
            size,
            |b, _| {
                b.iter(|| {
                    rt.block_on(async {
                        for msg in &messages {
                            let _ = processor.process(black_box(msg.clone())).await.unwrap();
                        }
                    })
                });
            },
        );
    }
    
    group.finish();
}

/// 基准测试：批处理器性能
fn bench_batch_processor(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();

    let mut group = c.benchmark_group("batch_processor");
    
    for batch_size in [5, 10, 50, 100].iter() {
        let processor = BatchProcessor::new(
            "benchmark_batch",
            *batch_size,
            Duration::from_secs(1),
        );
        let messages = create_message_batch(*batch_size);
        
        group.bench_with_input(
            BenchmarkId::new("batch_messages", batch_size),
            batch_size,
            |b, _| {
                b.iter(|| {
                    rt.block_on(async {
                        for msg in &messages {
                            let _ = processor.process(black_box(msg.clone())).await.unwrap();
                        }
                    })
                });
            },
        );
    }
    
    group.finish();
}

/// 基准测试：处理器链性能
fn bench_processor_chain(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();
    
    let filter = Box::new(MapProcessor::new("chain_filter", |msg| {
        if msg.payload()["active"].as_bool().unwrap_or(false) {
            msg
        } else {
            let mut filtered_msg = msg;
            let mut payload = filtered_msg.payload().clone();
            payload["_filtered"] = json!(true);
            filtered_msg.set_payload(payload);
            filtered_msg
        }
    }));
    
    let mapper = Box::new(MapProcessor::new("chain_mapper", |mut msg| {
        if !msg.payload()["_filtered"].as_bool().unwrap_or(false) {
            let mut payload = msg.payload().clone();
            payload["processed"] = json!(true);
            msg.set_payload(payload);
        }
        msg
    }));
    
    let processor_chain = ProcessorChain::new("benchmark_chain")
        .add_processor(filter)
        .add_processor(mapper);

    let mut group = c.benchmark_group("processor_chain");
    
    for size in [1, 10, 100, 1000].iter() {
        let messages = create_message_batch(*size);
        
        group.bench_with_input(
            BenchmarkId::new("chain_process", size),
            size,
            |b, _| {
                b.iter(|| {
                    rt.block_on(async {
                        for msg in &messages {
                            let _ = processor_chain.process(black_box(msg.clone())).await.unwrap();
                        }
                    })
                });
            },
        );
    }
    
    group.finish();
}

/// 基准测试：并行处理器性能
fn bench_parallel_processor(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();

    let mut group = c.benchmark_group("parallel_processor");
    
    for parallelism in [1, 2, 4, 8].iter() {
        let slow_processor = Box::new(MapProcessor::new("slow_processor", |mut msg: Message| {
            // 模拟轻微耗时处理
            std::thread::sleep(Duration::from_micros(100));
            let mut payload = msg.payload().clone();
            payload["processed"] = json!(true);
            msg.set_payload(payload);
            msg
        }));
        
        let parallel_processor = ParallelProcessor::new(
            "benchmark_parallel",
            slow_processor,
            *parallelism,
        );
        
        let messages = create_message_batch(100); // 固定100个消息
        
        group.bench_with_input(
            BenchmarkId::new("parallel_process", parallelism),
            parallelism,
            |b, _| {
                b.iter(|| {
                    rt.block_on(async {
                        let _ = parallel_processor.process(black_box(messages.clone())).await.unwrap();
                    })
                });
            },
        );
    }
    
    group.finish();
}

/// 基准测试：内存数据源性能
fn bench_memory_source(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();

    let mut group = c.benchmark_group("memory_source");
    
    for size in [10, 100, 1000, 10000].iter() {
        group.bench_with_input(
            BenchmarkId::new("source_throughput", size),
            size,
            |b, &size| {
                b.iter(|| {
                    rt.block_on(async {
                        let mut source = MemoryStreamSource::new("benchmark");
                        let messages = create_message_batch(size);
                        source.add_messages(messages);
                        
                        let mut count = 0;
                        while let Some(Ok(_)) = source.next().await {
                            count += 1;
                            if count >= size {
                                break;
                            }
                        }
                        black_box(count);
                    })
                });
            },
        );
    }
    
    group.finish();
}

/// 基准测试：内存数据汇性能
fn bench_memory_sink(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();

    let mut group = c.benchmark_group("memory_sink");
    
    for size in [10, 100, 1000, 10000].iter() {
        group.bench_with_input(
            BenchmarkId::new("sink_throughput", size),
            size,
            |b, &size| {
                b.iter(|| {
                    rt.block_on(async {
                        let mut sink = MemoryStreamSink::new("benchmark");
                        let messages = create_message_batch(size);
                        
                        for msg in messages {
                            let _ = sink.send(black_box(msg)).await.unwrap();
                        }
                        let _ = sink.flush().await.unwrap();
                    })
                });
            },
        );
    }
    
    group.finish();
}

/// 基准测试：消息总线数据源性能
fn bench_bus_source(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();

    let mut group = c.benchmark_group("bus_source");
    group.measurement_time(Duration::from_secs(10)); // 增加测试时间
    
    for size in [10, 100, 1000].iter() {
        group.bench_with_input(
            BenchmarkId::new("bus_source_throughput", size),
            size,
            |b, &size| {
                b.iter(|| {
                    rt.block_on(async {
                        let bus = Arc::new(InMemoryMessageBus::new().await.unwrap());
                        let mut source = BusStreamSource::with_defaults(bus.clone(), "bench.*").await.unwrap();
                        source.start().await.unwrap();
                        
                        let publisher = bus.create_publisher().unwrap();
                        
                        // 发布消息
                        for i in 0..size {
                            let msg = create_test_message(i as i32);
                            let _ = publisher.publish(msg).await.unwrap();
                        }
                        
                        // 等待并接收消息
                        tokio::time::sleep(Duration::from_millis(10)).await;
                        
                        let mut count = 0;
                        while count < size {
                            if let Some(Ok(_)) = source.next().await {
                                count += 1;
                            } else {
                                break;
                            }
                        }
                        
                        source.stop().await.unwrap();
                        black_box(count);
                    })
                });
            },
        );
    }
    
    group.finish();
}

/// 基准测试：容错机制性能
fn bench_resilience_manager(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();

    let mut group = c.benchmark_group("resilience_manager");
    
    let config = StreamErrorHandlingConfig {
        retry: StreamRetryConfig {
            max_attempts: 3,
            backoff: BackoffStrategy::Fixed { delay: Duration::from_micros(1) },
            retry_conditions: vec!["PROCESSOR_ERROR".to_string()],
            timeout: Duration::from_secs(1),
        },
        dead_letter: StreamDeadLetterConfig {
            enabled: true,
            topic: "bench.dlq".to_string(),
            max_retries_before_dlq: 2,
            retention_duration: Duration::from_secs(3600),
            capacity: 1000,
        },
        error_strategy: ErrorHandlingStrategy::RetryThenDeadLetter,
        circuit_breaker: None,
    };
    
    for scenario in ["success", "retry_success", "failure"].iter() {
        group.bench_with_input(
            BenchmarkId::new("resilience_execute", scenario),
            scenario,
            |b, &scenario| {
                b.iter(|| {
                    rt.block_on(async {
                        let manager = StreamResilienceManager::new(config.clone()).await.unwrap();
                        let msg = create_test_message(1);
                        
                        let result = match scenario {
                            "success" => {
                                manager.execute_with_resilience(
                                    || async { Ok::<String, StreamError>("success".to_string()) },
                                    &msg,
                                    None,
                                ).await
                            }
                            "retry_success" => {
                                let counter = Arc::new(std::sync::atomic::AtomicU32::new(0));
                                let counter_clone = counter.clone();
                                manager.execute_with_resilience(
                                    move || {
                                        let count = counter_clone.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
                                        async move {
                                            if count < 1 {
                                                Err(StreamError::processor_error("test", "Retry needed"))
                                            } else {
                                                Ok("retry_success".to_string())
                                            }
                                        }
                                    },
                                    &msg,
                                    None,
                                ).await
                            }
                            "failure" => {
                                manager.execute_with_resilience(
                                    || async { Err::<String, StreamError>(StreamError::processor_error("test", "Always fails")) },
                                    &msg,
                                    None,
                                ).await
                            }
                            _ => unreachable!(),
                        };
                        
                        black_box(result);
                    })
                });
            },
        );
    }
    
    group.finish();
}

/// 基准测试：端到端管道性能
fn bench_end_to_end_pipeline(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();

    let mut group = c.benchmark_group("end_to_end_pipeline");
    group.measurement_time(Duration::from_secs(15)); // 增加测试时间
    
    for size in [10, 100, 500].iter() {
        group.bench_with_input(
            BenchmarkId::new("pipeline_throughput", size),
            size,
            |b, &size| {
                b.iter(|| {
                    rt.block_on(async {
                        let bus = Arc::new(InMemoryMessageBus::new().await.unwrap());
                        
                        let mut source = BusStreamSource::with_defaults(bus.clone(), "pipeline.input").await.unwrap();
                        source.start().await.unwrap();
                        
                        let mut sink = BusStreamSink::with_defaults(bus.clone(), "pipeline.output").await.unwrap();
                        
                        // 创建处理器链
                        let filter = Box::new(MapProcessor::new("filter", |msg| {
                            if msg.payload()["active"].as_bool().unwrap_or(false) {
                                msg
                            } else {
                                let mut filtered_msg = msg;
                                let mut payload = filtered_msg.payload().clone();
                                payload["_filtered"] = json!(true);
                                filtered_msg.set_payload(payload);
                                filtered_msg
                            }
                        }));
                        
                        let enricher = Box::new(MapProcessor::new("enricher", |mut msg| {
                            if !msg.payload()["_filtered"].as_bool().unwrap_or(false) {
                                let mut payload = msg.payload().clone();
                                payload["processed_at"] = json!(chrono::Utc::now().timestamp());
                                msg.set_payload(payload);
                            }
                            msg
                        }));
                        
                        let processor_chain = ProcessorChain::new("benchmark_pipeline")
                            .add_processor(filter)
                            .add_processor(enricher);
                        
                        // 发布消息
                        let publisher = bus.create_publisher().unwrap();
                        for i in 0..size {
                            let msg = create_test_message(i as i32);
                            let _ = publisher.publish(msg).await.unwrap();
                        }
                        
                        tokio::time::sleep(Duration::from_millis(20)).await;
                        
                        // 处理消息
                        let mut processed = 0;
                        while processed < size {
                            if let Some(Ok(msg)) = source.next().await {
                                let processed_msg = processor_chain.process(msg).await.unwrap();
                                if !processed_msg.payload()["_filtered"].as_bool().unwrap_or(false) {
                                    let _ = sink.send(processed_msg).await.unwrap();
                                }
                                processed += 1;
                            } else {
                                break;
                            }
                        }
                        
                        let _ = sink.flush().await.unwrap();
                        
                        source.stop().await.unwrap();
                        sink.close().await.unwrap();
                        
                        black_box(processed);
                    })
                });
            },
        );
    }
    
    group.finish();
}

criterion_group!(
    benches,
    bench_map_processor,
    bench_filter_processor,
    bench_aggregate_processor,
    bench_batch_processor,
    bench_processor_chain,
    bench_parallel_processor,
    bench_memory_source,
    bench_memory_sink,
    bench_bus_source,
    bench_resilience_manager,
    bench_end_to_end_pipeline
);

criterion_main!(benches);