//! Kafka流数据源Flight适配器
//! 
//! 提供Apache Kafka流数据的Flight SQL支持，包括：
//! - 实时数据流消费和生产
//! - 多种数据格式支持（JSON、Avro、Protobuf）
//! - 流式SQL查询和窗口函数
//! - 高性能批量数据处理
//! - 消费者组管理和偏移量控制

use anyhow::{Context, Result};
use arrow_array::{RecordBatch, Array, StringArray, Int64Array, TimestampMillisecondArray};
use arrow_schema::{Schema, Field, DataType, SchemaRef};
use async_trait::async_trait;
use futures::StreamExt;
use rdkafka::{
    ClientConfig, Message,
    consumer::{Consumer, StreamConsumer, CommitMode},
    producer::{FutureProducer, FutureRecord},
    util::Timeout,
};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::{RwLock, mpsc};
use tokio_stream::wrappers::ReceiverStream;
use tracing::{info, debug, warn, error};

#[cfg(test)]
mod tests;

/// 支持的消息格式
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum MessageFormat {
    /// JSON格式
    Json,
    /// Avro格式
    #[cfg(feature = "avro")]
    Avro,
    /// Protobuf格式
    #[cfg(feature = "protobuf")]
    Protobuf,
    /// 纯文本格式
    Text,
    /// 二进制格式
    Binary,
}

/// Kafka连接配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct KafkaConfig {
    /// Kafka集群地址
    pub bootstrap_servers: String,
    /// 安全协议
    pub security_protocol: String,
    /// SASL机制
    pub sasl_mechanism: Option<String>,
    /// SASL用户名
    pub sasl_username: Option<String>,
    /// SASL密码
    pub sasl_password: Option<String>,
    /// SSL CA证书路径
    pub ssl_ca_location: Option<String>,
    /// 消费者组ID
    pub group_id: String,
    /// 自动提交偏移量
    pub enable_auto_commit: bool,
    /// 会话超时时间（毫秒）
    pub session_timeout_ms: u32,
    /// 心跳间隔（毫秒）
    pub heartbeat_interval_ms: u32,
}

impl Default for KafkaConfig {
    fn default() -> Self {
        Self {
            bootstrap_servers: "localhost:9092".to_string(),
            security_protocol: "PLAINTEXT".to_string(),
            sasl_mechanism: None,
            sasl_username: None,
            sasl_password: None,
            ssl_ca_location: None,
            group_id: "data-gateway-group".to_string(),
            enable_auto_commit: true,
            session_timeout_ms: 30000,
            heartbeat_interval_ms: 3000,
        }
    }
}

/// 主题配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TopicConfig {
    /// 主题名称
    pub name: String,
    /// 消息格式
    pub format: MessageFormat,
    /// Schema信息
    pub schema: Option<SchemaRef>,
    /// 分区数
    pub partitions: Option<i32>,
    /// 副本因子
    pub replication_factor: Option<i16>,
}

/// 流统计信息
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct StreamStats {
    /// 消费的消息数
    pub messages_consumed: u64,
    /// 生产的消息数
    pub messages_produced: u64,
    /// 处理的字节数
    pub bytes_processed: u64,
    /// 平均处理延迟（毫秒）
    pub avg_latency_ms: f64,
    /// 最后处理时间
    pub last_processed_time: Option<chrono::DateTime<chrono::Utc>>,
    /// 当前偏移量
    pub current_offset: HashMap<String, i64>,
}

/// Kafka流适配器
pub struct KafkaStreamAdapter {
    /// 配置信息
    config: KafkaConfig,
    /// 消费者实例
    consumer: StreamConsumer,
    /// 生产者实例
    producer: FutureProducer,
    /// 主题配置
    topics: Arc<RwLock<HashMap<String, TopicConfig>>>,
    /// 流统计
    stats: Arc<RwLock<StreamStats>>,
    /// 消息通道
    message_sender: mpsc::UnboundedSender<RecordBatch>,
    /// 消息接收器
    message_receiver: Arc<RwLock<Option<mpsc::UnboundedReceiver<RecordBatch>>>>,
}

impl KafkaStreamAdapter {
    /// 创建新的Kafka流适配器
    pub async fn new(config: KafkaConfig) -> Result<Self> {
        info!("创建Kafka流适配器，服务器: {}", config.bootstrap_servers);
        
        // 创建消费者配置
        let mut consumer_config = ClientConfig::new();
        consumer_config
            .set("bootstrap.servers", &config.bootstrap_servers)
            .set("group.id", &config.group_id)
            .set("enable.auto.commit", config.enable_auto_commit.to_string())
            .set("session.timeout.ms", config.session_timeout_ms.to_string())
            .set("heartbeat.interval.ms", config.heartbeat_interval_ms.to_string())
            .set("auto.offset.reset", "earliest");
        
        // 添加安全配置
        if config.security_protocol != "PLAINTEXT" {
            consumer_config.set("security.protocol", &config.security_protocol);
            
            if let Some(mechanism) = &config.sasl_mechanism {
                consumer_config.set("sasl.mechanism", mechanism);
            }
            
            if let Some(username) = &config.sasl_username {
                consumer_config.set("sasl.username", username);
            }
            
            if let Some(password) = &config.sasl_password {
                consumer_config.set("sasl.password", password);
            }
            
            if let Some(ca_location) = &config.ssl_ca_location {
                consumer_config.set("ssl.ca.location", ca_location);
            }
        }
        
        // 创建消费者
        let consumer: StreamConsumer = consumer_config
            .create()
            .context("创建Kafka消费者失败")?;
        
        // 创建生产者配置
        let mut producer_config = ClientConfig::new();
        producer_config
            .set("bootstrap.servers", &config.bootstrap_servers)
            .set("message.timeout.ms", "5000");
        
        // 添加安全配置到生产者
        if config.security_protocol != "PLAINTEXT" {
            producer_config.set("security.protocol", &config.security_protocol);
            
            if let Some(mechanism) = &config.sasl_mechanism {
                producer_config.set("sasl.mechanism", mechanism);
            }
            
            if let Some(username) = &config.sasl_username {
                producer_config.set("sasl.username", username);
            }
            
            if let Some(password) = &config.sasl_password {
                producer_config.set("sasl.password", password);
            }
            
            if let Some(ca_location) = &config.ssl_ca_location {
                producer_config.set("ssl.ca.location", ca_location);
            }
        }
        
        // 创建生产者
        let producer: FutureProducer = producer_config
            .create()
            .context("创建Kafka生产者失败")?;
        
        // 创建消息通道
        let (message_sender, message_receiver) = mpsc::unbounded_channel();
        
        Ok(Self {
            config,
            consumer,
            producer,
            topics: Arc::new(RwLock::new(HashMap::new())),
            stats: Arc::new(RwLock::new(StreamStats::default())),
            message_sender,
            message_receiver: Arc::new(RwLock::new(Some(message_receiver))),
        })
    }

    /// 订阅主题
    pub async fn subscribe_topic(&self, topic_config: TopicConfig) -> Result<()> {
        info!("订阅Kafka主题: {}", topic_config.name);
        
        // 订阅主题
        self.consumer
            .subscribe(&[&topic_config.name])
            .context("订阅Kafka主题失败")?;
        
        // 保存主题配置
        {
            let mut topics = self.topics.write().await;
            topics.insert(topic_config.name.clone(), topic_config);
        }
        
        Ok(())
    }

    /// 开始消费消息
    pub async fn start_consuming(&self) -> Result<()> {
        info!("开始消费Kafka消息");
        
        let consumer = self.consumer.clone();
        let topics = self.topics.clone();
        let stats = self.stats.clone();
        let sender = self.message_sender.clone();
        
        // 启动消费任务
        tokio::spawn(async move {
            loop {
                match consumer.recv().await {
                    Ok(message) => {
                        if let Err(e) = Self::process_message(&message, &topics, &stats, &sender).await {
                            error!("处理Kafka消息失败: {}", e);
                        }
                    }
                    Err(e) => {
                        error!("接收Kafka消息失败: {}", e);
                        tokio::time::sleep(Duration::from_millis(1000)).await;
                    }
                }
            }
        });
        
        Ok(())
    }

    /// 处理单个消息
    async fn process_message(
        message: &rdkafka::message::BorrowedMessage<'_>,
        topics: &Arc<RwLock<HashMap<String, TopicConfig>>>,
        stats: &Arc<RwLock<StreamStats>>,
        sender: &mpsc::UnboundedSender<RecordBatch>,
    ) -> Result<()> {
        let start_time = std::time::Instant::now();
        
        // 获取消息信息
        let topic = message.topic();
        let partition = message.partition();
        let offset = message.offset();
        let timestamp = message.timestamp().to_millis().unwrap_or(chrono::Utc::now().timestamp_millis());
        
        // 获取主题配置
        let topic_config = {
            let topics_guard = topics.read().await;
            topics_guard.get(topic).cloned()
        };
        
        let topic_config = match topic_config {
            Some(config) => config,
            None => {
                warn!("未找到主题配置: {}", topic);
                return Ok(());
            }
        };
        
        // 解析消息内容
        let payload = message.payload().unwrap_or(&[]);
        let record_batch = Self::parse_message_payload(payload, &topic_config, timestamp)?;
        
        // 发送到处理通道
        if let Err(_) = sender.send(record_batch) {
            warn!("消息通道已关闭");
            return Ok(());
        }
        
        // 更新统计信息
        let elapsed = start_time.elapsed();
        {
            let mut stats_guard = stats.write().await;
            stats_guard.messages_consumed += 1;
            stats_guard.bytes_processed += payload.len() as u64;
            stats_guard.last_processed_time = Some(chrono::Utc::now());
            
            // 更新偏移量
            stats_guard.current_offset.insert(
                format!("{}:{}", topic, partition),
                offset,
            );
            
            // 更新平均延迟
            let total_latency = stats_guard.avg_latency_ms * (stats_guard.messages_consumed - 1) as f64 + elapsed.as_millis() as f64;
            stats_guard.avg_latency_ms = total_latency / stats_guard.messages_consumed as f64;
        }
        
        debug!("处理消息完成: topic={}, partition={}, offset={}", topic, partition, offset);
        Ok(())
    }

    /// 解析消息载荷
    fn parse_message_payload(
        payload: &[u8],
        topic_config: &TopicConfig,
        timestamp: i64,
    ) -> Result<RecordBatch> {
        match topic_config.format {
            MessageFormat::Json => Self::parse_json_message(payload, timestamp),
            MessageFormat::Text => Self::parse_text_message(payload, timestamp),
            MessageFormat::Binary => Self::parse_binary_message(payload, timestamp),
            #[cfg(feature = "avro")]
            MessageFormat::Avro => Self::parse_avro_message(payload, timestamp),
            #[cfg(feature = "protobuf")]
            MessageFormat::Protobuf => Self::parse_protobuf_message(payload, timestamp),
        }
    }

    /// 解析JSON消息
    fn parse_json_message(payload: &[u8], timestamp: i64) -> Result<RecordBatch> {
        let json_str = std::str::from_utf8(payload).context("无效的UTF-8字符串")?;
        let json_value: serde_json::Value = serde_json::from_str(json_str).context("JSON解析失败")?;
        
        // 创建简单的Schema
        let schema = Arc::new(Schema::new(vec![
            Field::new("timestamp", DataType::Timestamp(arrow_schema::TimeUnit::Millisecond, None), false),
            Field::new("data", DataType::Utf8, true),
        ]));
        
        // 创建数组
        let timestamp_array = TimestampMillisecondArray::from(vec![timestamp]);
        let data_array = StringArray::from(vec![json_str]);
        
        let arrays: Vec<Arc<dyn Array>> = vec![
            Arc::new(timestamp_array),
            Arc::new(data_array),
        ];
        
        RecordBatch::try_new(schema, arrays).context("创建RecordBatch失败")
    }

    /// 解析文本消息
    fn parse_text_message(payload: &[u8], timestamp: i64) -> Result<RecordBatch> {
        let text = std::str::from_utf8(payload).context("无效的UTF-8字符串")?;
        
        // 创建Schema
        let schema = Arc::new(Schema::new(vec![
            Field::new("timestamp", DataType::Timestamp(arrow_schema::TimeUnit::Millisecond, None), false),
            Field::new("message", DataType::Utf8, true),
        ]));
        
        // 创建数组
        let timestamp_array = TimestampMillisecondArray::from(vec![timestamp]);
        let message_array = StringArray::from(vec![text]);
        
        let arrays: Vec<Arc<dyn Array>> = vec![
            Arc::new(timestamp_array),
            Arc::new(message_array),
        ];
        
        RecordBatch::try_new(schema, arrays).context("创建RecordBatch失败")
    }

    /// 解析二进制消息
    fn parse_binary_message(payload: &[u8], timestamp: i64) -> Result<RecordBatch> {
        // 将二进制数据转换为十六进制字符串
        let hex_string = hex::encode(payload);
        
        // 创建Schema
        let schema = Arc::new(Schema::new(vec![
            Field::new("timestamp", DataType::Timestamp(arrow_schema::TimeUnit::Millisecond, None), false),
            Field::new("size", DataType::Int64, false),
            Field::new("data_hex", DataType::Utf8, true),
        ]));
        
        // 创建数组
        let timestamp_array = TimestampMillisecondArray::from(vec![timestamp]);
        let size_array = Int64Array::from(vec![payload.len() as i64]);
        let data_array = StringArray::from(vec![hex_string.as_str()]);
        
        let arrays: Vec<Arc<dyn Array>> = vec![
            Arc::new(timestamp_array),
            Arc::new(size_array),
            Arc::new(data_array),
        ];
        
        RecordBatch::try_new(schema, arrays).context("创建RecordBatch失败")
    }

    /// 发送消息到主题
    pub async fn send_message(&self, topic: &str, key: Option<&str>, payload: &[u8]) -> Result<()> {
        debug!("发送消息到主题: {}", topic);
        
        let mut record = FutureRecord::to(topic).payload(payload);
        
        if let Some(k) = key {
            record = record.key(k);
        }
        
        let delivery_status = self.producer
            .send(record, Timeout::After(Duration::from_secs(5)))
            .await;
        
        match delivery_status {
            Ok(_) => {
                // 更新统计信息
                {
                    let mut stats = self.stats.write().await;
                    stats.messages_produced += 1;
                    stats.bytes_processed += payload.len() as u64;
                }
                info!("消息发送成功: topic={}", topic);
                Ok(())
            }
            Err((e, _)) => {
                error!("消息发送失败: {}", e);
                Err(anyhow::anyhow!("消息发送失败: {}", e))
            }
        }
    }

    /// 获取消息流
    pub async fn get_message_stream(&self) -> Result<ReceiverStream<RecordBatch>> {
        let mut receiver_guard = self.message_receiver.write().await;
        if let Some(receiver) = receiver_guard.take() {
            Ok(ReceiverStream::new(receiver))
        } else {
            Err(anyhow::anyhow!("消息流已被获取"))
        }
    }

    /// 获取流统计信息
    pub async fn get_stats(&self) -> StreamStats {
        self.stats.read().await.clone()
    }

    /// 健康检查
    pub async fn health_check(&self) -> Result<bool> {
        debug!("执行Kafka健康检查");
        
        // 尝试获取元数据
        match self.consumer.fetch_metadata(None, Duration::from_secs(5)) {
            Ok(_) => {
                info!("Kafka健康检查通过");
                Ok(true)
            }
            Err(e) => {
                warn!("Kafka健康检查失败: {}", e);
                Ok(false)
            }
        }
    }

    /// 提交偏移量
    pub async fn commit_offsets(&self) -> Result<()> {
        debug!("提交Kafka偏移量");
        
        self.consumer
            .commit_consumer_state(CommitMode::Async)
            .context("提交偏移量失败")?;
        
        Ok(())
    }

    /// 获取主题列表
    pub async fn get_topics(&self) -> Result<Vec<String>> {
        debug!("获取Kafka主题列表");
        
        let metadata = self.consumer
            .fetch_metadata(None, Duration::from_secs(5))
            .context("获取元数据失败")?;
        
        let topics: Vec<String> = metadata
            .topics()
            .iter()
            .map(|topic| topic.name().to_string())
            .collect();
        
        Ok(topics)
    }
}
