//! 性能基准测试
//!
//! 测试日志核心组件的性能，验证架构文档的性能目标：
//! - LogEvent 创建: < 100ns
//! - Level 比较: O(1)，零分配
//! - TextFormatter 格式化: < 500ns
//! - 内存占用: < 10MB (基础配置)
//! - 启动时间: < 50ms

use criterion::{criterion_group, criterion_main, Criterion};
use log4r::config::parse_config;
use log4r::formatter::{Formatter, TextFormatter};
use log4r::{Level, LogEvent};
use std::hint::black_box;
use std::time::Instant;

/// 基准测试：Level 比较
///
/// 目标：O(1)，零分配
fn bench_level_comparison(c: &mut Criterion) {
    let mut group = c.benchmark_group("level");

    group.bench_function("comparison", |b| {
        let level1 = Level::Info;
        let level2 = Level::Debug;
        b.iter(|| black_box(level1) >= black_box(level2));
    });

    group.bench_function("should_log", |b| {
        let level = Level::Info;
        let filter = Level::Debug;
        b.iter(|| black_box(level).should_log(black_box(filter)));
    });

    group.bench_function("as_str", |b| {
        let level = Level::Info;
        b.iter(|| black_box(level).as_str());
    });

    group.bench_function("display", |b| {
        let level = Level::Info;
        b.iter(|| {
            let s = level.to_string();
            black_box(s);
        });
    });

    group.finish();
}

/// 基准测试：LogEvent 创建
///
/// 目标：< 100ns
fn bench_log_event_creation(c: &mut Criterion) {
    let mut group = c.benchmark_group("log_event");

    // 使用静态字符串（零拷贝）
    group.bench_function("new_static_strings", |b| {
        b.iter(|| {
            let event = LogEvent::new(
                black_box(Level::Info),
                black_box("my_app::module"),
                black_box("This is a test message"),
            );
            black_box(event);
        });
    });

    // 使用动态字符串（需要分配）
    group.bench_function("new_dynamic_strings", |b| {
        b.iter(|| {
            let target = format!("my_app::module_{}", 42);
            let message = format!("This is message number {}", 123);
            let event = LogEvent::new(black_box(Level::Info), target, message);
            black_box(event);
        });
    });

    // 测试 FATAL 级别（额外设置 write_to_syslog）
    group.bench_function("new_fatal", |b| {
        b.iter(|| {
            let event = LogEvent::new(
                black_box(Level::Fatal),
                black_box("my_app"),
                black_box("Fatal error"),
            );
            black_box(event);
        });
    });

    // 测试辅助方法
    group.bench_function("is_fatal", |b| {
        let event = LogEvent::new(Level::Info, "app", "msg");
        b.iter(|| black_box(event.is_fatal()));
    });

    group.bench_function("should_write_to_syslog", |b| {
        let event = LogEvent::new(Level::Fatal, "app", "msg");
        b.iter(|| black_box(event.should_write_to_syslog()));
    });

    group.finish();
}

/// 基准测试：TextFormatter 格式化
///
/// 目标：< 500ns
fn bench_text_formatter(c: &mut Criterion) {
    let mut group = c.benchmark_group("text_formatter");

    let formatter = TextFormatter::new();

    // 短消息格式化
    group.bench_function("format_short_message", |b| {
        let event = LogEvent::new(Level::Info, "app", "Hello");
        b.iter(|| {
            let output = formatter.format(black_box(&event));
            black_box(output);
        });
    });

    // 长消息格式化
    group.bench_function("format_long_message", |b| {
        let long_message = "This is a much longer log message that contains more text and will require more processing time to format properly.";
        let event = LogEvent::new(Level::Debug, "my_app::module::submodule", long_message);
        b.iter(|| {
            let output = formatter.format(black_box(&event));
            black_box(output);
        });
    });

    // 所有级别格式化
    group.bench_function("format_all_levels", |b| {
        let levels = [
            Level::Trace,
            Level::Debug,
            Level::Info,
            Level::Warn,
            Level::Error,
            Level::Fatal,
        ];
        let events: Vec<_> = levels
            .iter()
            .map(|&level| LogEvent::new(level, "app", "message"))
            .collect();

        b.iter(|| {
            for event in &events {
                let output = formatter.format(black_box(event));
                black_box(output);
            }
        });
    });

    group.finish();
}

/// 基准测试：同步日志吞吐量（占位符，待 Epic 7 实现）
fn bench_sync_logging(c: &mut Criterion) {
    let mut group = c.benchmark_group("sync_logging");

    group.bench_function("noop_baseline", |b| {
        b.iter(|| {
            // 基准线：空操作
            black_box(());
        });
    });

    // TODO: Epic 7 实现 init() 后启用
    // group.bench_function("info_log", |b| {
    //     log4r::init().expect("初始化失败");
    //     b.iter(|| {
    //         log4r::info!("benchmark message");
    //     });
    // });

    group.finish();
}

/// 基准测试：异步日志吞吐量
///
/// Epic 6 性能目标：
/// - 异步吞吐量: > 5M msg/s
/// - 发送延迟 P99: < 100ns
fn bench_async_logging(c: &mut Criterion) {
    use log4r::async_rt::{LogChannel, DEFAULT_QUEUE_SIZE};
    use std::sync::Arc;
    use std::thread;

    let mut group = c.benchmark_group("async_logging");

    // 基准线：空操作
    group.bench_function("noop_baseline", |b| {
        b.iter(|| {
            black_box(());
        });
    });

    // 单线程发送到 channel（队列未满）
    group.bench_function("channel_send_single", |b| {
        let channel = LogChannel::new(100_000);
        b.iter(|| {
            let event = LogEvent::new(
                black_box(Level::Info),
                black_box("bench"),
                black_box("benchmark message"),
            );
            let _ = channel.try_send(event);
        });
        // 清空队列
        while channel.receiver().try_recv().is_ok() {}
    });

    // 批量发送 1000 条消息
    group.bench_function("channel_send_batch_1000", |b| {
        let channel = LogChannel::new(100_000);
        b.iter(|| {
            for i in 0..1000 {
                let event = LogEvent::new(
                    black_box(Level::Info),
                    black_box("bench"),
                    black_box("benchmark message"),
                );
                let _ = channel.try_send(event);
                black_box(i);
            }
        });
        // 清空队列
        while channel.receiver().try_recv().is_ok() {}
    });

    // 多线程并发发送（4 线程）
    group.bench_function("channel_send_concurrent_4threads", |b| {
        let channel = Arc::new(LogChannel::new(100_000));
        b.iter(|| {
            let mut handles = vec![];
            for _ in 0..4 {
                let ch = Arc::clone(&channel);
                let handle = thread::spawn(move || {
                    for _ in 0..250 {
                        let event = LogEvent::new(Level::Info, "bench", "concurrent message");
                        let _ = ch.try_send(event);
                    }
                });
                handles.push(handle);
            }
            for handle in handles {
                let _ = handle.join();
            }
        });
        // 清空队列
        while channel.receiver().try_recv().is_ok() {}
    });

    // 吞吐量测试：测量每秒可发送的消息数
    group.bench_function("throughput_measurement", |b| {
        let channel = LogChannel::new(DEFAULT_QUEUE_SIZE);
        let receiver = channel.receiver().clone();

        // 启动消费者线程
        let stop_flag = Arc::new(std::sync::atomic::AtomicBool::new(false));
        let stop_flag_clone = Arc::clone(&stop_flag);
        let consumer = thread::spawn(move || {
            while !stop_flag_clone.load(std::sync::atomic::Ordering::Relaxed) {
                while receiver.try_recv().is_ok() {}
                thread::yield_now();
            }
            // 清空剩余
            while receiver.try_recv().is_ok() {}
        });

        b.iter(|| {
            for _ in 0..1000 {
                let event = LogEvent::new(Level::Info, "bench", "throughput test");
                let _ = channel.try_send(event);
            }
        });

        stop_flag.store(true, std::sync::atomic::Ordering::Relaxed);
        let _ = consumer.join();
    });

    // 静态字符串发送（最优场景，排除字符串分配开销）
    group.bench_function("channel_static_strings", |b| {
        let channel = LogChannel::new(100_000);

        b.iter(|| {
            let event = LogEvent::new(
                black_box(Level::Info),
                "static_target",
                "static message for benchmark",
            );
            let _ = channel.try_send(event);
        });
        // 清空队列
        while channel.receiver().try_recv().is_ok() {}
    });

    // 批量静态字符串发送（测量真实吞吐量）
    group.bench_function("channel_static_batch_10000", |b| {
        let channel = LogChannel::new(100_000);

        b.iter(|| {
            for _ in 0..10000 {
                let event = LogEvent::new(
                    Level::Info,
                    "static_target",
                    "static message for throughput test",
                );
                let _ = channel.try_send(event);
            }
        });
        // 清空队列
        while channel.receiver().try_recv().is_ok() {}
    });

    group.finish();
}

/// 基准测试：配置解析
///
/// 建立基准线，用于后续优化参考
fn bench_parse_config(c: &mut Criterion) {
    let mut group = c.benchmark_group("config");

    // 最小配置
    let minimal_config = r#"
[log4r]
level = "info"
"#;

    group.bench_function("parse_minimal", |b| {
        b.iter(|| {
            let result = parse_config(black_box(minimal_config));
            let _ = black_box(result);
        });
    });

    // 完整配置
    let full_config = r#"
[log4r]
level = "debug"
app_name = "benchmark-app"

[log4r.modules]
"my_app::db" = "trace"
"my_app::http" = "warn"
"my_app::cache" = "info"

[log4r.appenders.console]
enabled = true
target = "stdout"
level = "info"

[log4r.appenders.file]
enabled = true
path = "logs/app.log"
level = "debug"

[log4r.format]
pattern = "{time} [{level}] {target}: {message}"
time_format = "[year]-[month]-[day] [hour]:[minute]:[second]"
time_zone = "local"

[log4r.async]
enabled = true
queue_size = 10000
"#;

    group.bench_function("parse_full", |b| {
        b.iter(|| {
            let result = parse_config(black_box(full_config));
            let _ = black_box(result);
        });
    });

    group.finish();
}

/// 基准测试：内存占用
///
/// PRD 目标：< 10MB (基础配置运行时)
fn bench_memory_usage(c: &mut Criterion) {
    use log4r::async_rt::LogChannel;

    let mut group = c.benchmark_group("memory");

    // 测量 LogEvent 大小
    group.bench_function("logevent_size", |b| {
        b.iter(|| {
            let size = std::mem::size_of::<LogEvent>();
            black_box(size);
        });
    });

    // 测量 LogChannel 大小
    group.bench_function("logchannel_size", |b| {
        b.iter(|| {
            let channel = LogChannel::new(1000);
            let size = std::mem::size_of_val(&channel);
            black_box(size);
            black_box(channel);
        });
    });

    // 测量批量 LogEvent 分配
    group.bench_function("batch_allocation_1000", |b| {
        b.iter(|| {
            let events: Vec<LogEvent> = (0..1000)
                .map(|_| LogEvent::new(Level::Info, "memory_bench", "测试消息用于内存基准测试"))
                .collect();
            black_box(events);
        });
    });

    // 测量 TextFormatter 内存占用
    group.bench_function("formatter_size", |b| {
        b.iter(|| {
            let formatter = TextFormatter::new();
            let size = std::mem::size_of_val(&formatter);
            black_box(size);
            black_box(formatter);
        });
    });

    // 测量配置结构体大小
    group.bench_function("config_size", |b| {
        b.iter(|| {
            let config = parse_config("[log4r]\nlevel = \"info\"").expect("配置解析失败");
            let size = std::mem::size_of_val(&config);
            black_box(size);
            black_box(config);
        });
    });

    group.finish();

    // 输出内存占用摘要（仅在运行时显示）
    println!("\n📊 内存占用摘要:");
    println!("  LogEvent 大小: {} bytes", std::mem::size_of::<LogEvent>());
    println!("  Level 大小: {} bytes", std::mem::size_of::<Level>());
    println!(
        "  TextFormatter 大小: {} bytes",
        std::mem::size_of::<TextFormatter>()
    );
}

/// 基准测试：启动时间
///
/// PRD 目标：< 50ms (配置加载到就绪)
fn bench_startup_time(c: &mut Criterion) {
    let mut group = c.benchmark_group("startup");

    // 配置解析启动时间
    group.bench_function("config_parse_startup", |b| {
        let config_content = r#"
[log4r]
level = "info"
app_name = "startup-bench"

[log4r.appenders.console]
enabled = true
target = "stdout"

[log4r.appenders.file]
enabled = true
path = "logs/bench.log"

[log4r.async]
enabled = true
queue_size = 10000
"#;
        b.iter(|| {
            let start = Instant::now();
            let config = parse_config(black_box(config_content)).expect("配置解析失败");
            let elapsed = start.elapsed();
            black_box(config);
            black_box(elapsed);
        });
    });

    // LogChannel 创建启动时间
    group.bench_function("channel_create_startup", |b| {
        use log4r::async_rt::LogChannel;
        b.iter(|| {
            let start = Instant::now();
            let channel = LogChannel::new(10000);
            let elapsed = start.elapsed();
            black_box(channel);
            black_box(elapsed);
        });
    });

    // TextFormatter 创建启动时间
    group.bench_function("formatter_create_startup", |b| {
        b.iter(|| {
            let start = Instant::now();
            let formatter = TextFormatter::new();
            let elapsed = start.elapsed();
            black_box(formatter);
            black_box(elapsed);
        });
    });

    // 完整组件初始化启动时间（模拟）
    group.bench_function("full_components_startup", |b| {
        use log4r::async_rt::LogChannel;
        let config_content = r#"
[log4r]
level = "info"
app_name = "startup-bench"
"#;
        b.iter(|| {
            let start = Instant::now();

            // 1. 解析配置
            let _config = parse_config(black_box(config_content)).unwrap();

            // 2. 创建格式化器
            let _formatter = TextFormatter::new();

            // 3. 创建异步通道
            let _channel = LogChannel::new(10000);

            let elapsed = start.elapsed();

            // 验证启动时间 < 50ms
            assert!(elapsed.as_millis() < 50, "启动时间超过 50ms: {:?}", elapsed);

            black_box(elapsed);
        });
    });

    group.finish();
}

/// 基准测试：吞吐量验证
///
/// PRD 目标：
/// - 同步日志吞吐量: > 1M msg/s
/// - 异步日志吞吐量: > 5M msg/s
fn bench_throughput_validation(c: &mut Criterion) {
    use log4r::async_rt::LogChannel;
    use std::sync::Arc;
    use std::thread;
    use std::time::Duration;

    let mut group = c.benchmark_group("throughput_validation");

    // 设置更长的测量时间以获得准确结果
    group.measurement_time(Duration::from_secs(5));

    // 异步吞吐量验证：目标 > 5M msg/s
    group.bench_function("async_throughput_5m_target", |b| {
        let channel = Arc::new(LogChannel::new(100_000));
        let receiver = channel.receiver().clone();

        // 启动消费者线程
        let stop_flag = Arc::new(std::sync::atomic::AtomicBool::new(false));
        let stop_flag_clone = Arc::clone(&stop_flag);
        let consumer = thread::spawn(move || {
            while !stop_flag_clone.load(std::sync::atomic::Ordering::Relaxed) {
                while receiver.try_recv().is_ok() {}
                thread::yield_now();
            }
            while receiver.try_recv().is_ok() {}
        });

        b.iter(|| {
            // 发送 10000 条消息，测量时间
            for _ in 0..10000 {
                let event =
                    LogEvent::new(Level::Info, "throughput_validation", "验证吞吐量目标消息");
                let _ = channel.try_send(event);
            }
        });

        stop_flag.store(true, std::sync::atomic::Ordering::Relaxed);
        let _ = consumer.join();
    });

    // 单线程发送吞吐量
    group.bench_function("single_thread_throughput", |b| {
        let channel = LogChannel::new(100_000);

        b.iter(|| {
            for _ in 0..1000 {
                let event = LogEvent::new(Level::Info, "single_thread", "单线程吞吐量测试");
                let _ = channel.try_send(event);
            }
        });

        // 清空队列
        while channel.receiver().try_recv().is_ok() {}
    });

    group.finish();
}

/// 基准测试：批量处理性能 (Epic 14)
///
/// 目标：
/// - 批量写入吞吐量 >= 2x 单条写入
/// - 批量缓冲区内存占用 < batch_size * LogEvent 大小
fn bench_batch_processing(c: &mut Criterion) {
    use log4r::appender::Appender;
    use log4r::async_rt::{LogChannel, Worker, WorkerConfig};
    use log4r::error::Log4rError;
    use std::sync::atomic::{AtomicUsize, Ordering};
    use std::sync::Arc;
    use std::thread;
    use std::time::Duration;

    let mut group = c.benchmark_group("batch_processing");
    group.measurement_time(Duration::from_secs(5));

    // 空操作 Appender，用于测量纯批量处理开销
    struct NoopAppender {
        write_count: Arc<AtomicUsize>,
        flush_count: Arc<AtomicUsize>,
    }

    impl NoopAppender {
        fn new() -> Self {
            Self {
                write_count: Arc::new(AtomicUsize::new(0)),
                flush_count: Arc::new(AtomicUsize::new(0)),
            }
        }
    }

    impl Appender for NoopAppender {
        fn write(&self, _event: &LogEvent) -> Result<(), Log4rError> {
            self.write_count.fetch_add(1, Ordering::Relaxed);
            Ok(())
        }
        fn flush(&self) -> Result<(), Log4rError> {
            self.flush_count.fetch_add(1, Ordering::Relaxed);
            Ok(())
        }
        fn close(&self) -> Result<(), Log4rError> {
            Ok(())
        }
    }

    // 基准线：单条写入模式 (batch_size=1)
    group.bench_function("single_write_mode", |b| {
        let channel = LogChannel::new(10000);
        let sender = channel.sender();
        let appender = NoopAppender::new();
        let write_count = Arc::clone(&appender.write_count);
        let appenders: Vec<Arc<dyn Appender>> = vec![Arc::new(appender)];
        let fallback: Arc<dyn Appender> = Arc::new(NoopAppender::new());

        let config = WorkerConfig {
            batch_size: 1, // 单条写入
            batch_timeout_ms: 1000,
        };

        let worker = Worker::start(
            channel.receiver().clone(),
            appenders,
            fallback,
            config,
            "bench_single".to_string(),
        );

        b.iter(|| {
            for _ in 0..1000 {
                let event = LogEvent::new(Level::Info, "bench", "single write benchmark");
                let _ = sender.send(event);
            }
        });

        // 等待处理完成
        thread::sleep(Duration::from_millis(100));
        drop(sender);
        let _ = worker.shutdown();

        println!("单条模式写入次数: {}", write_count.load(Ordering::Relaxed));
    });

    // 批量写入模式 (batch_size=100)
    group.bench_function("batch_write_mode_100", |b| {
        let channel = LogChannel::new(10000);
        let sender = channel.sender();
        let appender = NoopAppender::new();
        let write_count = Arc::clone(&appender.write_count);
        let flush_count = Arc::clone(&appender.flush_count);
        let appenders: Vec<Arc<dyn Appender>> = vec![Arc::new(appender)];
        let fallback: Arc<dyn Appender> = Arc::new(NoopAppender::new());

        let config = WorkerConfig {
            batch_size: 100, // 批量写入
            batch_timeout_ms: 1000,
        };

        let worker = Worker::start(
            channel.receiver().clone(),
            appenders,
            fallback,
            config,
            "bench_batch".to_string(),
        );

        b.iter(|| {
            for _ in 0..1000 {
                let event = LogEvent::new(Level::Info, "bench", "batch write benchmark");
                let _ = sender.send(event);
            }
        });

        // 等待处理完成
        thread::sleep(Duration::from_millis(100));
        drop(sender);
        let _ = worker.shutdown();

        println!(
            "批量模式写入次数: {}, flush 次数: {}",
            write_count.load(Ordering::Relaxed),
            flush_count.load(Ordering::Relaxed)
        );
    });

    // 大批量写入模式 (batch_size=500)
    group.bench_function("batch_write_mode_500", |b| {
        let channel = LogChannel::new(10000);
        let sender = channel.sender();
        let appender = NoopAppender::new();
        let appenders: Vec<Arc<dyn Appender>> = vec![Arc::new(appender)];
        let fallback: Arc<dyn Appender> = Arc::new(NoopAppender::new());

        let config = WorkerConfig {
            batch_size: 500,
            batch_timeout_ms: 1000,
        };

        let worker = Worker::start(
            channel.receiver().clone(),
            appenders,
            fallback,
            config,
            "bench_batch_large".to_string(),
        );

        b.iter(|| {
            for _ in 0..1000 {
                let event = LogEvent::new(Level::Info, "bench", "large batch benchmark");
                let _ = sender.send(event);
            }
        });

        thread::sleep(Duration::from_millis(100));
        drop(sender);
        let _ = worker.shutdown();
    });

    // 批量缓冲区内存占用测试
    group.bench_function("batch_buffer_memory", |b| {
        b.iter(|| {
            // 模拟批量缓冲区分配
            let batch_size = 100;
            let mut buffer: Vec<LogEvent> = Vec::with_capacity(batch_size);

            // 填充缓冲区
            for _ in 0..batch_size {
                buffer.push(LogEvent::new(Level::Info, "memory", "memory test"));
            }

            // 计算内存占用
            let event_size = std::mem::size_of::<LogEvent>();
            let buffer_capacity_bytes = buffer.capacity() * event_size;

            black_box(buffer);
            black_box(buffer_capacity_bytes);
        });
    });

    group.finish();

    // 输出批量处理内存摘要
    println!("\n📊 批量处理内存摘要:");
    println!("  LogEvent 大小: {} bytes", std::mem::size_of::<LogEvent>());
    println!(
        "  batch_size=100 缓冲区: {} bytes",
        100 * std::mem::size_of::<LogEvent>()
    );
    println!(
        "  batch_size=1000 缓冲区: {} bytes",
        1000 * std::mem::size_of::<LogEvent>()
    );
}

criterion_group!(
    benches,
    bench_level_comparison,
    bench_log_event_creation,
    bench_text_formatter,
    bench_sync_logging,
    bench_async_logging,
    bench_parse_config,
    bench_memory_usage,
    bench_startup_time,
    bench_throughput_validation,
    bench_batch_processing,
);
criterion_main!(benches);
