//! Kafka生产者实现
//! 
//! 提供高性能、可靠的Kafka消息发送功能

use std::time::Duration;
use std::collections::HashMap;
use rdkafka::{
    config::ClientConfig,
    producer::{FutureProducer, FutureRecord, DeliveryResult, Producer},
    error::KafkaError,
    message::OwnedHeaders,
    util::Timeout,
};
use anyhow::{Result, anyhow};
use tracing::{info, error, warn, debug};
use tokio::time::timeout;

use crate::kafka::config::{KafkaConfig, ProducerConfig};
use crate::kafka::messages::KafkaMessage;

/// Kafka生产者
pub struct KafkaProducer {
    /// rdkafka生产者实例
    producer: FutureProducer,
    /// 配置
    config: KafkaConfig,
}

/// Kafka生产者构建器
pub struct KafkaProducerBuilder {
    config: KafkaConfig,
    client_config: ClientConfig,
}

impl KafkaProducerBuilder {
    /// 创建新的构建器
    pub fn new(config: KafkaConfig) -> Self {
        let mut client_config = ClientConfig::new();
        
        // 设置基础配置
        client_config.set("bootstrap.servers", &config.brokers.join(","));
        
        // 设置安全配置
        client_config.set("security.protocol", &config.security.protocol);
        if let Some(mechanism) = &config.security.sasl_mechanism {
            client_config.set("sasl.mechanism", mechanism);
        }
        if let Some(username) = &config.security.sasl_username {
            client_config.set("sasl.username", username);
        }
        if let Some(password) = &config.security.sasl_password {
            client_config.set("sasl.password", password);
        }
        
        // 设置生产者配置
        client_config.set("acks", &config.producer.acks);
        client_config.set("retries", &config.producer.retries.to_string());
        client_config.set("batch.size", &config.producer.batch_size.to_string());
        client_config.set("compression.type", &config.producer.compression_type);
        client_config.set("request.timeout.ms", &config.producer.request_timeout_ms.to_string());
        
        // 设置其他配置
        for (key, value) in &config.producer.additional_properties {
            client_config.set(key, value);
        }
        
        Self {
            config,
            client_config,
        }
    }
    
    /// 设置客户端ID
    pub fn client_id(mut self, client_id: &str) -> Self {
        self.client_config.set("client.id", client_id);
        self
    }
    
    /// 设置幂等性
    pub fn enable_idempotence(mut self, enable: bool) -> Self {
        self.client_config.set("enable.idempotence", &enable.to_string());
        self
    }
    
    /// 设置最大飞行中请求数
    pub fn max_in_flight_requests_per_connection(mut self, max: u32) -> Self {
        self.client_config.set("max.in.flight.requests.per.connection", &max.to_string());
        self
    }
    
    /// 构建生产者
    pub fn build(self) -> Result<KafkaProducer> {
        info!("创建Kafka生产者，brokers: {:?}", self.config.brokers);
        
        let producer: FutureProducer = self.client_config
            .create()
            .map_err(|e| anyhow!("创建Kafka生产者失败: {}", e))?;
        
        Ok(KafkaProducer {
            producer,
            config: self.config,
        })
    }
}

impl KafkaProducer {
    /// 发送消息
    pub async fn send_message(
        &self,
        topic: &str,
        message: &KafkaMessage,
        key: Option<&str>,
    ) -> Result<()> {
        let json = message.to_json()
            .map_err(|e| anyhow!("序列化消息失败: {}", e))?;
        
        self.send_raw(topic, key, &json, Some(&message.header().message_type)).await
    }
    
    /// 发送原始消息
    pub async fn send_raw(
        &self,
        topic: &str,
        key: Option<&str>,
        payload: &str,
        message_type: Option<&str>,
    ) -> Result<()> {
        debug!("发送Kafka消息到主题: {}, key: {:?}", topic, key);
        
        let mut record = FutureRecord::to(topic).payload(payload);
        
        if let Some(k) = key {
            record = record.key(k);
        }
        
        // 添加消息头
        let mut headers = OwnedHeaders::new();
        if let Some(msg_type) = message_type {
            headers = headers.insert(rdkafka::message::Header {
                key: "message_type",
                value: Some(msg_type),
            });
        }
        headers = headers.insert(rdkafka::message::Header {
            key: "sender",
            value: Some("koda-sandbox-manager"),
        });
        record = record.headers(headers);
        
        // 发送消息，带超时
        let timeout_duration = Duration::from_millis(self.config.producer.request_timeout_ms as u64);
        let delivery_result = timeout(
            timeout_duration,
            self.producer.send(record, Timeout::Never)
        ).await;
        
        match delivery_result {
            Ok(Ok((partition, offset))) => {
                debug!("消息发送成功: partition={}, offset={}", partition, offset);
                Ok(())
            }
            Ok(Err((error, _))) => {
                error!("Kafka发送失败: {}", error);
                Err(anyhow!("Kafka发送失败: {}", error))
            }
            Err(_) => {
                error!("Kafka发送超时");
                Err(anyhow!("Kafka发送超时"))
            }
        }
    }
    
    /// 发送沙箱事件
    pub async fn send_sandbox_event(
        &self,
        message: &crate::kafka::messages::SandboxEventMessage,
    ) -> Result<()> {
        let kafka_message = KafkaMessage::SandboxEvent(message.clone());
        let key = message.sandbox_id.to_string();
        
        self.send_message(
            &self.config.topics.sandbox_events,
            &kafka_message,
            Some(&key)
        ).await
    }
    
    /// 发送执行任务
    pub async fn send_execution_task(
        &self,
        message: &crate::kafka::messages::ExecutionTaskMessage,
    ) -> Result<()> {
        let kafka_message = KafkaMessage::ExecutionTask(message.clone());
        let key = message.task_id.clone();
        
        self.send_message(
            &self.config.topics.execution_tasks,
            &kafka_message,
            Some(&key)
        ).await
    }
    
    /// 发送系统指标
    pub async fn send_system_metrics(
        &self,
        message: &crate::kafka::messages::SystemMetricsMessage,
    ) -> Result<()> {
        let kafka_message = KafkaMessage::SystemMetrics(message.clone());
        
        self.send_message(
            &self.config.topics.system_metrics,
            &kafka_message,
            None
        ).await
    }
    
    /// 发送错误报告
    pub async fn send_error_report(
        &self,
        message: &crate::kafka::messages::ErrorReportMessage,
    ) -> Result<()> {
        let kafka_message = KafkaMessage::ErrorReport(message.clone());
        let key = message.sandbox_id.as_ref().map(|id| id.to_string());
        
        self.send_message(
            &self.config.topics.error_reports,
            &kafka_message,
            key.as_deref()
        ).await
    }
    
    /// 发送健康检查结果
    pub async fn send_health_check(
        &self,
        message: &crate::kafka::messages::HealthCheckMessage,
    ) -> Result<()> {
        let kafka_message = KafkaMessage::HealthCheck(message.clone());
        let key = message.component.clone();
        
        self.send_message(
            &self.config.topics.health_checks,
            &kafka_message,
            Some(&key)
        ).await
    }
    
    /// 批量发送消息
    pub async fn send_batch(
        &self,
        messages: Vec<(String, KafkaMessage, Option<String>)>
    ) -> Result<Vec<Result<()>>> {
        let mut results = Vec::new();
        
        for (topic, message, key) in messages {
            let result = self.send_message(
                &topic,
                &message,
                key.as_deref()
            ).await;
            results.push(result);
        }
        
        Ok(results)
    }
    
    /// 刷新并等待所有消息发送完成
    pub async fn flush(&self, timeout_ms: u64) -> Result<()> {
        let timeout_duration = Duration::from_millis(timeout_ms);
        
        match timeout(timeout_duration, async {
            self.producer.flush(Timeout::Never)
        }).await {
            Ok(Ok(())) => {
                info!("Kafka生产者刷新完成");
                Ok(())
            }
            Ok(Err(e)) => {
                error!("Kafka生产者刷新失败: {}", e);
                Err(anyhow!("Kafka生产者刷新失败: {}", e))
            }
            Err(_) => {
                warn!("Kafka生产者刷新超时");
                Err(anyhow!("Kafka生产者刷新超时"))
            }
        }
    }
    
    /// 获取当前配置
    pub fn config(&self) -> &KafkaConfig {
        &self.config
    }
}

impl Drop for KafkaProducer {
    fn drop(&mut self) {
        // 尝试刷新剩余消息
        if let Err(e) = self.producer.flush(Timeout::After(Duration::from_secs(5))) {
            warn!("Kafka生产者关闭时刷新失败: {}", e);
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::kafka::messages::*;
    use chrono::Utc;
    use uuid::Uuid;

    #[tokio::test]
    async fn test_kafka_producer_builder() {
        let config = KafkaConfig::default();
        let builder = KafkaProducerBuilder::new(config)
            .client_id("test-producer")
            .enable_idempotence(true);
        
        // 注意: 这个测试需要实际的Kafka实例才能通过
        // let producer = builder.build().unwrap();
        // assert!(!producer.config().brokers.is_empty());
    }

    #[test]
    fn test_message_serialization() {
        let header = MessageHeader::new("test", "test-sender");
        let message = SandboxEventMessage {
            header,
            sandbox_id: Uuid::new_v4(),
            event_type: SandboxEventType::Created,
            sandbox_state: crate::types::SandboxState::Creating,
            language: crate::types::Language::Python,
            metadata: std::collections::HashMap::new(),
        };
        
        let kafka_message = KafkaMessage::SandboxEvent(message);
        let json = kafka_message.to_json().unwrap();
        let deserialized = KafkaMessage::from_json(&json).unwrap();
        
        assert_eq!(kafka_message.message_type(), deserialized.message_type());
    }
}