//! # Kafka 消息总线适配器
//!
//! 基于 Apache Kafka 的消息总线实现，支持高吞吐量和可靠的消息传递
//!
//! ## 功能特性
//!
//! - **高吞吐量**：基于 Kafka 的高性能消息传递
//! - **分区支持**：自动分区和负载均衡
//! - **消费者组**：支持消费者组和并行处理
//! - **消息持久化**：Kafka 的可靠持久化存储
//! - **事务支持**：支持 Kafka 事务功能
//! - **压缩支持**：消息压缩和批量处理

use crate::{
    bus::{MessageBus, MessageBusStats, Subscriber, SubscriberStats},
    message::Message,
    subscriber::SubscriberConfig,
    error::{MessageBusError, MessageBusResult},
    router::MessageRouter,
};
use async_trait::async_trait;
use rdkafka::{
    ClientConfig, ClientContext,
    consumer::{Consumer, ConsumerContext, Rebalance, StreamConsumer},
    producer::{FutureProducer, FutureRecord},
    message::{OwnedHeaders, Header},
    util::get_rdkafka_version,
    error::KafkaError,
    Message as KafkaMessage, TopicPartitionList,
};
use serde::{Deserialize, Serialize};
use std::{
    collections::HashMap,
    sync::{Arc, atomic::{AtomicU64, AtomicUsize, Ordering}},
    time::{Duration, SystemTime, UNIX_EPOCH},
};
use tokio::{
    sync::{RwLock, mpsc},
    task::JoinHandle,
    time::{sleep, timeout},
};
use tracing::{debug, error, info, warn};
use uuid::Uuid;

/// Kafka 适配器配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct KafkaAdapterConfig {
    /// Kafka broker 地址列表
    pub bootstrap_servers: String,
    /// 安全协议
    pub security_protocol: String,
    /// SASL 机制
    pub sasl_mechanism: Option<String>,
    /// SASL 用户名
    pub sasl_username: Option<String>,
    /// SASL 密码
    pub sasl_password: Option<String>,
    /// 客户端ID
    pub client_id: String,
    /// 消费者组ID
    pub group_id: String,
    /// 会话超时（毫秒）
    pub session_timeout_ms: u32,
    /// 心跳间隔（毫秒）
    pub heartbeat_interval_ms: u32,
    /// 自动提交偏移量
    pub enable_auto_commit: bool,
    /// 自动提交间隔（毫秒）
    pub auto_commit_interval_ms: u32,
    /// 偏移量重置策略
    pub auto_offset_reset: String,
    /// 分区数量
    pub num_partitions: i32,
    /// 副本因子
    pub replication_factor: i16,
    /// 批量大小
    pub batch_size: u32,
    /// 消息压缩类型
    pub compression_type: String,
    /// 请求超时（毫秒）
    pub request_timeout_ms: u32,
    /// 重试间隔（毫秒）
    pub retry_backoff_ms: u32,
}

impl Default for KafkaAdapterConfig {
    fn default() -> Self {
        Self {
            bootstrap_servers: "localhost:9092".to_string(),
            security_protocol: "PLAINTEXT".to_string(),
            sasl_mechanism: None,
            sasl_username: None,
            sasl_password: None,
            client_id: format!("rustcloud_bus_{}", Uuid::new_v4()),
            group_id: "rustcloud_bus_group".to_string(),
            session_timeout_ms: 10000,
            heartbeat_interval_ms: 3000,
            enable_auto_commit: true,
            auto_commit_interval_ms: 5000,
            auto_offset_reset: "latest".to_string(),
            num_partitions: 3,
            replication_factor: 1,
            batch_size: 16384,
            compression_type: "none".to_string(),
            request_timeout_ms: 30000,
            retry_backoff_ms: 100,
        }
    }
}

/// Kafka 消息总线统计信息
#[derive(Debug)]
struct KafkaMessageBusStats {
    total_published: AtomicU64,
    total_received: AtomicU64,
    total_failed: AtomicU64,
    active_subscribers: AtomicUsize,
    start_time: std::time::SystemTime,
    last_message_time: std::sync::RwLock<Option<std::time::SystemTime>>,
    total_message_size: AtomicU64,
    message_count_for_rate: AtomicU64,
}

impl Default for KafkaMessageBusStats {
    fn default() -> Self {
        Self {
            total_published: AtomicU64::new(0),
            total_received: AtomicU64::new(0),
            total_failed: AtomicU64::new(0),
            active_subscribers: AtomicUsize::new(0),
            start_time: std::time::SystemTime::now(),
            last_message_time: std::sync::RwLock::new(None),
            total_message_size: AtomicU64::new(0),
            message_count_for_rate: AtomicU64::new(0),
        }
    }
}

impl KafkaMessageBusStats {
    fn record_published_message(&self, size: usize) {
        self.total_published.fetch_add(1, Ordering::Relaxed);
        self.total_message_size.fetch_add(size as u64, Ordering::Relaxed);
        self.message_count_for_rate.fetch_add(1, Ordering::Relaxed);
        *self.last_message_time.write().unwrap() = Some(std::time::SystemTime::now());
    }
    
    fn record_received_message(&self, size: usize) {
        self.total_received.fetch_add(1, Ordering::Relaxed);
        self.total_message_size.fetch_add(size as u64, Ordering::Relaxed);
        self.message_count_for_rate.fetch_add(1, Ordering::Relaxed);
        *self.last_message_time.write().unwrap() = Some(std::time::SystemTime::now());
    }
    
    fn record_failed_message(&self) {
        self.total_failed.fetch_add(1, Ordering::Relaxed);
    }
    
    fn get_uptime_seconds(&self) -> u64 {
        self.start_time.elapsed().unwrap_or_default().as_secs()
    }
    
    fn get_messages_per_second(&self) -> f64 {
        let uptime = self.get_uptime_seconds();
        if uptime == 0 {
            0.0
        } else {
            self.message_count_for_rate.load(Ordering::Relaxed) as f64 / uptime as f64
        }
    }
    
    fn get_average_message_size(&self) -> u64 {
        let total_messages = self.total_published.load(Ordering::Relaxed) + self.total_received.load(Ordering::Relaxed);
        if total_messages == 0 {
            0
        } else {
            self.total_message_size.load(Ordering::Relaxed) / total_messages
        }
    }
}

/// Kafka 消息总线实现
pub struct KafkaMessageBus {
    config: KafkaAdapterConfig,
    producer: Arc<FutureProducer>,
    router: Arc<MessageRouter>,
    stats: Arc<KafkaMessageBusStats>,
    subscribers: Arc<RwLock<HashMap<String, Arc<KafkaSubscriber>>>>,
    is_closed: Arc<std::sync::atomic::AtomicBool>,
    tasks: Arc<RwLock<Vec<JoinHandle<()>>>>,
}

/// Kafka 上下文实现
#[derive(Clone)]
struct KafkaConsumerContext;

impl ClientContext for KafkaConsumerContext {}

impl ConsumerContext for KafkaConsumerContext {
    fn pre_rebalance(&self, rebalance: &Rebalance) {
        match rebalance {
            Rebalance::Assign(partitions) => {
                info!("Kafka 分区分配: {:?}", partitions);
            }
            Rebalance::Revoke => {
                info!("Kafka 分区撤销");
            }
            Rebalance::Error(e) => {
                error!("Kafka 重平衡错误: {:?}", e);
            }
        }
    }

    fn post_rebalance(&self, rebalance: &Rebalance) {
        match rebalance {
            Rebalance::Assign(partitions) => {
                info!("Kafka 分区分配完成: {:?}", partitions);
            }
            Rebalance::Revoke => {
                info!("Kafka 分区撤销完成");
            }
            Rebalance::Error(e) => {
                error!("Kafka 重平衡后错误: {:?}", e);
            }
        }
    }
}

impl KafkaMessageBus {
    /// 创建新的 Kafka 消息总线
    pub async fn new() -> MessageBusResult<Self> {
        let config = KafkaAdapterConfig::default();
        Self::with_config(config).await
    }

    /// 使用配置创建 Kafka 消息总线
    pub async fn with_config(config: KafkaAdapterConfig) -> MessageBusResult<Self> {
        // 创建生产者
        let producer = Self::create_producer(&config)?;

        let bus = Self {
            config,
            producer: Arc::new(producer),
            router: Arc::new(MessageRouter::new()),
            stats: Arc::new(KafkaMessageBusStats::default()),
            subscribers: Arc::new(RwLock::new(HashMap::new())),
            is_closed: Arc::new(std::sync::atomic::AtomicBool::new(false)),
            tasks: Arc::new(RwLock::new(Vec::new())),
        };

        info!("Kafka 消息总线初始化完成，版本: {:?}", get_rdkafka_version());
        Ok(bus)
    }

    /// 创建生产者
    fn create_producer(config: &KafkaAdapterConfig) -> MessageBusResult<FutureProducer> {
        let mut client_config = ClientConfig::new();
        
        client_config
            .set("bootstrap.servers", &config.bootstrap_servers)
            .set("security.protocol", &config.security_protocol)
            .set("client.id", &config.client_id)
            .set("batch.size", config.batch_size.to_string())
            .set("compression.type", &config.compression_type)
            .set("request.timeout.ms", config.request_timeout_ms.to_string())
            .set("retry.backoff.ms", config.retry_backoff_ms.to_string());

        // 配置 SASL（如果启用）
        if let Some(mechanism) = &config.sasl_mechanism {
            client_config.set("sasl.mechanism", mechanism);
        }
        if let Some(username) = &config.sasl_username {
            client_config.set("sasl.username", username);
        }
        if let Some(password) = &config.sasl_password {
            client_config.set("sasl.password", password);
        }

        client_config
            .create()
            .map_err(|e| MessageBusError::connection_error(format!("创建 Kafka 生产者失败: {}", e)))
    }

    /// 创建消费者
    fn create_consumer(config: &KafkaAdapterConfig) -> MessageBusResult<StreamConsumer<KafkaConsumerContext>> {
        let context = KafkaConsumerContext;
        let mut client_config = ClientConfig::new();
        
        client_config
            .set("bootstrap.servers", &config.bootstrap_servers)
            .set("security.protocol", &config.security_protocol)
            .set("group.id", &config.group_id)
            .set("client.id", &config.client_id)
            .set("session.timeout.ms", config.session_timeout_ms.to_string())
            .set("heartbeat.interval.ms", config.heartbeat_interval_ms.to_string())
            .set("enable.auto.commit", config.enable_auto_commit.to_string())
            .set("auto.commit.interval.ms", config.auto_commit_interval_ms.to_string())
            .set("auto.offset.reset", &config.auto_offset_reset);

        // 配置 SASL（如果启用）
        if let Some(mechanism) = &config.sasl_mechanism {
            client_config.set("sasl.mechanism", mechanism);
        }
        if let Some(username) = &config.sasl_username {
            client_config.set("sasl.username", username);
        }
        if let Some(password) = &config.sasl_password {
            client_config.set("sasl.password", password);
        }

        client_config
            .create_with_context(context)
            .map_err(|e| MessageBusError::connection_error(format!("创建 Kafka 消费者失败: {}", e)))
    }

    /// 序列化消息
    fn serialize_message(&self, message: &Message) -> MessageBusResult<String> {
        serde_json::to_string(message)
            .map_err(|e| MessageBusError::serialization_error(format!("消息序列化失败: {}", e)))
    }

    /// 反序列化消息
    fn deserialize_message(&self, data: &str) -> MessageBusResult<Message> {
        serde_json::from_str(data)
            .map_err(|e| MessageBusError::deserialization_error(format!("消息反序列化失败: {}", e)))
    }

    /// 获取主题名称（将消息主题转换为 Kafka 主题）
    fn get_kafka_topic(&self, topic: &str) -> String {
        format!("rustcloud_{}", topic.replace('.', "_"))
    }
    
    /// 创建 AdminClient
    fn create_admin_client(&self) -> MessageBusResult<rdkafka::admin::AdminClient<rdkafka::client::DefaultClientContext>> {
        use rdkafka::admin::AdminClient;
        use rdkafka::client::DefaultClientContext;
        
        let mut client_config = ClientConfig::new();
        
        client_config
            .set("bootstrap.servers", &self.config.bootstrap_servers)
            .set("security.protocol", &self.config.security_protocol)
            .set("client.id", &format!("{}_admin", self.config.client_id));

        // 配置 SASL（如果启用）
        if let Some(mechanism) = &self.config.sasl_mechanism {
            client_config.set("sasl.mechanism", mechanism);
        }
        if let Some(username) = &self.config.sasl_username {
            client_config.set("sasl.username", username);
        }
        if let Some(password) = &self.config.sasl_password {
            client_config.set("sasl.password", password);
        }

        client_config
            .create::<AdminClient<DefaultClientContext>>()
            .map_err(|e| MessageBusError::connection_error(format!("创建 Kafka AdminClient 失败: {}", e)))
    }
}

#[async_trait]
impl MessageBus for KafkaMessageBus {
    async fn publish(&self, message: Message) -> MessageBusResult<()> {
        if self.is_closed.load(Ordering::Relaxed) {
            return Err(MessageBusError::internal_error("消息总线已关闭"));
        }

        let serialized = self.serialize_message(&message)?;
        let kafka_topic = self.get_kafka_topic(message.topic());
        
        // 创建消息头
        let mut headers = OwnedHeaders::new();
        let message_id_str = message.id().to_string();
        let correlation_id_str = message.metadata().correlation_id.map(|id| id.to_string());
        
        headers = headers.insert(Header { key: "source", value: Some("rustcloud_bus") });
        headers = headers.insert(Header { key: "message_id", value: Some(&message_id_str) });
        
        if let Some(ref correlation_id) = correlation_id_str {
            headers = headers.insert(Header { key: "correlation_id", value: Some(correlation_id) });
        }

        // 创建 Kafka 记录
        let message_key = message.id().to_string();
        let record = FutureRecord::to(&kafka_topic)
            .payload(&serialized)
            .key(&message_key)
            .headers(headers);

        // 发送消息
        let serialized_size = serialized.len();
        match self.producer.send(record, Duration::from_secs(5)).await {
            Ok((partition, offset)) => {
                debug!("消息发布成功: topic={}, partition={}, offset={}", kafka_topic, partition, offset);
                self.stats.record_published_message(serialized_size);
                Ok(())
            }
            Err((e, _)) => {
                self.stats.record_failed_message();
                Err(MessageBusError::publish_error(kafka_topic, format!("Kafka 发布失败: {}", e)))
            }
        }
    }

    async fn subscribe(&self, pattern: &str) -> MessageBusResult<Box<dyn Subscriber>> {
        if self.is_closed.load(Ordering::Relaxed) {
            return Err(MessageBusError::internal_error("消息总线已关闭"));
        }

        let subscription_id = Uuid::new_v4().to_string();
        let kafka_topic = self.get_kafka_topic(pattern);
        let kafka_topic_for_log = kafka_topic.clone();
        
        let config = SubscriberConfig {
            buffer_size: 1000,
            auto_ack: true,
            max_unacked: 100,
            message_timeout_ms: 5000,
            prefetch_count: 10,
        };

        let subscriber = KafkaSubscriber::new(
            subscription_id.clone(),
            pattern.to_string(),
            kafka_topic,
            config,
            self.config.clone(),
            self.stats.clone(),
        ).await?;

        let subscriber_arc = Arc::new(subscriber);
        
        {
            let mut subs = self.subscribers.write().await;
            subs.insert(subscription_id.clone(), subscriber_arc.clone());
        }

        self.stats.active_subscribers.fetch_add(1, Ordering::Relaxed);
        
        info!("创建 Kafka 订阅者: pattern={}, topic={}, id={}", pattern, kafka_topic_for_log, subscription_id);
        Ok(Box::new(subscriber_arc.as_ref().clone()))
    }

    async fn unsubscribe(&self, subscription_id: &str) -> MessageBusResult<()> {
        let mut subs = self.subscribers.write().await;
        if let Some(subscriber) = subs.remove(subscription_id) {
            subscriber.close().await?;
            self.stats.active_subscribers.fetch_sub(1, Ordering::Relaxed);
            info!("取消 Kafka 订阅: id={}", subscription_id);
        }
        Ok(())
    }

    async fn list_topics(&self) -> MessageBusResult<Vec<String>> {
        // 使用 AdminClient 获取主题列表
        use rdkafka::admin::{AdminClient, AdminOptions};
        use rdkafka::client::DefaultClientContext;
        
        let admin_client: AdminClient<DefaultClientContext> = self.create_admin_client()?;
        
        // 设置操作选项
        let _opts = AdminOptions::new().operation_timeout(Some(Duration::from_secs(10)));
        
        match admin_client.inner().fetch_metadata(None, Some(Duration::from_secs(10))) {
            Ok(metadata) => {
                let topics: Vec<String> = metadata
                    .topics()
                    .iter()
                    .map(|topic| topic.name().to_string())
                    .filter(|name| name.starts_with("rustcloud_"))
                    .collect();
                Ok(topics)
            }
            Err(e) => {
                warn!("获取 Kafka 主题列表失败: {}", e);
                Ok(vec![]) // 返回空列表而不是错误
            }
        }
    }

    async fn stats(&self) -> MessageBusResult<MessageBusStats> {
        let active_topics = self.subscribers.read().await.len();
        
        // 尝试获取队列长度（Kafka 中这个概念不直接适用，返回 0）
        let queue_length = 0;
        
        // 估算内存使用量（简单估算）
        let subscribers_count = self.stats.active_subscribers.load(Ordering::Relaxed);
        let estimated_memory = subscribers_count * 1024 * 1024; // 每个订阅者估算 1MB
        
        Ok(MessageBusStats {
            total_published: self.stats.total_published.load(Ordering::Relaxed),
            total_received: self.stats.total_received.load(Ordering::Relaxed),
            total_failed: self.stats.total_failed.load(Ordering::Relaxed),
            active_subscribers: self.stats.active_subscribers.load(Ordering::Relaxed),
            active_topics,
            queue_length,
            uptime_seconds: self.stats.get_uptime_seconds(),
            messages_per_second: self.stats.get_messages_per_second(),
            average_message_size: self.stats.get_average_message_size() as usize,
            memory_usage: estimated_memory,
        })
    }

    async fn health_check(&self) -> MessageBusResult<bool> {
        if self.is_closed.load(Ordering::Relaxed) {
            return Ok(false);
        }

        // 尝试获取集群元数据来验证连接
        use rdkafka::admin::{AdminClient, AdminOptions};
        use rdkafka::client::DefaultClientContext;
        
        match self.create_admin_client() {
            Ok(admin_client) => {
                let _opts = AdminOptions::new().operation_timeout(Some(Duration::from_secs(5)));
                match admin_client.inner().fetch_metadata(None, Some(Duration::from_secs(5))) {
                    Ok(_) => {
                        debug!("Kafka 健康检查通过");
                        Ok(true)
                    }
                    Err(e) => {
                        warn!("Kafka 健康检查失败: {}", e);
                        Ok(false)
                    }
                }
            }
            Err(e) => {
                error!("创建 Kafka AdminClient 失败: {}", e);
                Ok(false)
            }
        }
    }

    async fn close(&self) -> MessageBusResult<()> {
        if self.is_closed.swap(true, Ordering::Relaxed) {
            return Ok(()); // 已经关闭
        }

        info!("正在关闭 Kafka 消息总线...");

        // 关闭所有订阅者
        let mut subs = self.subscribers.write().await;
        for (_, subscriber) in subs.drain() {
            let _ = subscriber.close().await;
        }

        // 停止所有后台任务
        let mut tasks = self.tasks.write().await;
        for task in tasks.drain(..) {
            task.abort();
        }

        info!("Kafka 消息总线已关闭");
        Ok(())
    }

    async fn subscriber_count(&self, topic: &str) -> MessageBusResult<usize> {
        let subs = self.subscribers.read().await;
        let mut count = 0;
        for subscriber in subs.values() {
            if subscriber.is_active() && subscriber.pattern() == topic {
                count += 1;
            }
        }
        Ok(count)
    }

    async fn has_subscribers(&self, topic: &str) -> MessageBusResult<bool> {
        let count = self.subscriber_count(topic).await?;
        Ok(count > 0)
    }
}

/// Kafka 订阅者实现
#[derive(Clone)]
pub struct KafkaSubscriber {
    subscription_id: String,
    pattern: String,
    kafka_topic: String,
    config: SubscriberConfig,
    kafka_config: KafkaAdapterConfig,
    stats: Arc<KafkaMessageBusStats>,
    receiver: Arc<RwLock<Option<mpsc::UnboundedReceiver<Message>>>>,
    is_closed: Arc<std::sync::atomic::AtomicBool>,
    task_handle: Arc<RwLock<Option<JoinHandle<()>>>>,
}

impl KafkaSubscriber {
    async fn new(
        subscription_id: String,
        pattern: String,
        kafka_topic: String,
        config: SubscriberConfig,
        kafka_config: KafkaAdapterConfig,
        bus_stats: Arc<KafkaMessageBusStats>,
    ) -> MessageBusResult<Self> {
        let (tx, rx) = mpsc::unbounded_channel();
        
        let subscriber = Self {
            subscription_id,
            pattern,
            kafka_topic,
            config,
            kafka_config: kafka_config.clone(),
            stats: bus_stats,
            receiver: Arc::new(RwLock::new(Some(rx))),
            is_closed: Arc::new(std::sync::atomic::AtomicBool::new(false)),
            task_handle: Arc::new(RwLock::new(None)),
        };

        // 启动消息接收任务
        let task = subscriber.start_receiving_task(tx).await?;
        *subscriber.task_handle.write().await = Some(task);

        Ok(subscriber)
    }

    async fn start_receiving_task(&self, tx: mpsc::UnboundedSender<Message>) -> MessageBusResult<JoinHandle<()>> {
        let consumer = KafkaMessageBus::create_consumer(&self.kafka_config)?;
        let kafka_topic = self.kafka_topic.clone();
        let stats = self.stats.clone();
        let is_closed = self.is_closed.clone();

        // 订阅主题
        consumer
            .subscribe(&[&kafka_topic])
            .map_err(|e| MessageBusError::subscribe_error(&kafka_topic, format!("Kafka 订阅失败: {}", e)))?;

        let task = tokio::spawn(async move {
            while !is_closed.load(Ordering::Relaxed) {
                match consumer.recv().await {
                    Ok(borrowed_message) => {
                        if let Some(payload) = borrowed_message.payload() {
                            if let Ok(payload_str) = std::str::from_utf8(payload) {
                                if let Ok(message) = serde_json::from_str::<Message>(payload_str) {
                                    if tx.send(message).is_err() {
                                        warn!("发送消息到通道失败，接收者可能已关闭");
                                        break;
                                    }
                                    stats.record_received_message(payload.len());
                                } else {
                                    warn!("消息反序列化失败");
                                    stats.record_failed_message();
                                }
                            }
                        }
                    }
                    Err(e) => {
                        if !is_closed.load(Ordering::Relaxed) {
                            error!("从 Kafka 接收消息失败: {}", e);
                            stats.record_failed_message();
                            sleep(Duration::from_millis(1000)).await;
                        }
                    }
                }
            }
        });

        Ok(task)
    }
}

#[async_trait]
impl Subscriber for KafkaSubscriber {
    fn subscription_id(&self) -> &str {
        &self.subscription_id
    }

    fn pattern(&self) -> &str {
        &self.pattern
    }

    fn is_active(&self) -> bool {
        !self.is_closed.load(Ordering::Relaxed)
    }

    async fn receive(&self) -> MessageBusResult<Option<Message>> {
        self.receive_timeout(5000).await
    }

    async fn try_receive(&self) -> MessageBusResult<Option<Message>> {
        if self.is_closed.load(Ordering::Relaxed) {
            return Err(MessageBusError::internal_error("订阅已关闭"));
        }

        let mut receiver_guard = self.receiver.write().await;
        if let Some(ref mut receiver) = receiver_guard.as_mut() {
            match receiver.try_recv() {
                Ok(message) => Ok(Some(message)),
                Err(mpsc::error::TryRecvError::Empty) => Ok(None),
                Err(mpsc::error::TryRecvError::Disconnected) => {
                    Err(MessageBusError::internal_error("订阅已关闭"))
                }
            }
        } else {
            Err(MessageBusError::internal_error("订阅已关闭"))
        }
    }

    async fn receive_timeout(&self, timeout_ms: u64) -> MessageBusResult<Option<Message>> {
        if self.is_closed.load(Ordering::Relaxed) {
            return Err(MessageBusError::internal_error("订阅已关闭"));
        }

        let mut receiver_guard = self.receiver.write().await;
        if let Some(ref mut receiver) = receiver_guard.as_mut() {
            match timeout(Duration::from_millis(timeout_ms), receiver.recv()).await {
                Ok(Some(message)) => Ok(Some(message)),
                Ok(None) => Err(MessageBusError::internal_error("订阅已关闭")),
                Err(_) => Ok(None), // 超时
            }
        } else {
            Err(MessageBusError::internal_error("订阅已关闭"))
        }
    }

    async fn ack(&self, _message: &Message) -> MessageBusResult<()> {
        // Kafka 的自动提交处理确认
        Ok(())
    }

    async fn nack(&self, _message: &Message, _requeue: bool) -> MessageBusResult<()> {
        // Kafka 的 NACK 处理
        Ok(())
    }

    async fn stats(&self) -> MessageBusResult<SubscriberStats> {
        Ok(SubscriberStats {
            total_received: 0,
            total_acked: 0,
            total_nacked: 0,
            queue_length: 0,
            average_processing_time_ms: 0.0,
            last_received_at: None,
            subscribed_at: chrono::Utc::now(),
        })
    }

    async fn close(&self) -> MessageBusResult<()> {
        if self.is_closed.swap(true, Ordering::Relaxed) {
            return Ok(()); // 已经关闭
        }

        // 停止接收任务
        if let Some(task) = self.task_handle.write().await.take() {
            task.abort();
        }

        // 关闭接收器
        *self.receiver.write().await = None;

        debug!("Kafka 订阅者已关闭: {}", self.subscription_id);
        Ok(())
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use serde_json::json;

    async fn create_test_bus() -> KafkaMessageBus {
        let config = KafkaAdapterConfig {
            bootstrap_servers: "localhost:9092".to_string(),
            group_id: "test_group".to_string(),
            ..Default::default()
        };
        
        KafkaMessageBus::with_config(config).await.unwrap()
    }

    #[tokio::test]
    #[ignore] // 需要运行 Kafka 服务器
    async fn test_kafka_bus_creation() {
        let bus = create_test_bus().await;
        assert!(bus.health_check().await.unwrap());
    }

    #[tokio::test]
    #[ignore] // 需要运行 Kafka 服务器
    async fn test_kafka_publish_subscribe() {
        let bus = create_test_bus().await;
        
        let subscriber = bus.subscribe("test.topic").await.unwrap();
        
        let message = Message::new("test.topic", json!({"data": "test"}));
        bus.publish(message).await.unwrap();
        
        let received = subscriber.receive_timeout(5000).await.unwrap();
        assert!(received.is_some());
        
        let received_msg = received.unwrap();
        assert_eq!(received_msg.topic(), "test.topic");
        assert_eq!(received_msg.payload()["data"], "test");
    }

    #[tokio::test]
    #[ignore] // 需要运行 Kafka 服务器
    async fn test_kafka_multiple_subscribers() {
        let bus = create_test_bus().await;
        
        let sub1 = bus.subscribe("multi.test").await.unwrap();
        let sub2 = bus.subscribe("multi.test").await.unwrap();
        
        let message = Message::new("multi.test", json!({"id": 1}));
        bus.publish(message).await.unwrap();
        
        // 在 Kafka 中，消费者组内的消费者会分区消费
        // 所以可能只有一个订阅者收到消息
        let msg1 = sub1.receive_timeout(5000).await.unwrap();
        let msg2 = sub2.receive_timeout(1000).await.unwrap();
        
        // 至少有一个订阅者收到消息
        assert!(msg1.is_some() || msg2.is_some());
    }

    #[tokio::test]
    #[ignore] // 需要运行 Kafka 服务器
    async fn test_kafka_stats() {
        let bus = create_test_bus().await;
        
        let message = Message::new("stats.test", json!({"test": true}));
        bus.publish(message).await.unwrap();
        
        let stats = bus.stats().await.unwrap();
        assert!(stats.total_published > 0);
    }
}