//! S3对象存储Flight适配器
//! 
//! 提供S3兼容对象存储的Flight SQL支持，包括：
//! - 多种文件格式支持（Parquet、CSV、JSON）
//! - S3/Azure/GCS/本地文件系统统一访问
//! - 分区表和虚拟表支持
//! - 高性能流式数据处理
//! - 智能Schema推断

use anyhow::{Context, Result};
use arrow_array::RecordBatch;
use arrow_schema::{Schema, Field, DataType, SchemaRef};
use async_trait::async_trait;
use datafusion::prelude::*;
use object_store::{ObjectStore, path::Path as ObjectPath};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::{info, debug, warn, error};
use url::Url;

#[cfg(test)]
mod tests;

/// 支持的文件格式
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum FileFormat {
    /// Parquet格式
    Parquet,
    /// CSV格式
    Csv,
    /// JSON格式
    Json,
    /// 自动检测
    Auto,
}

/// 支持的对象存储类型
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum StorageType {
    /// Amazon S3
    S3 {
        bucket: String,
        region: String,
        access_key_id: Option<String>,
        secret_access_key: Option<String>,
    },
    /// Azure Blob Storage
    Azure {
        container: String,
        account: String,
        access_key: Option<String>,
    },
    /// Google Cloud Storage
    Gcs {
        bucket: String,
        service_account_key: Option<String>,
    },
    /// 本地文件系统
    Local {
        root_path: String,
    },
}

/// S3存储适配器配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct S3StorageConfig {
    /// 存储类型
    pub storage_type: StorageType,
    /// 默认文件格式
    pub default_format: FileFormat,
    /// 启用压缩
    pub enable_compression: bool,
    /// 批次大小
    pub batch_size: usize,
    /// 最大并发连接数
    pub max_connections: usize,
    /// 连接超时时间（秒）
    pub connect_timeout: u64,
    /// 读取超时时间（秒）
    pub read_timeout: u64,
}

impl Default for S3StorageConfig {
    fn default() -> Self {
        Self {
            storage_type: StorageType::Local {
                root_path: "./data".to_string(),
            },
            default_format: FileFormat::Auto,
            enable_compression: true,
            batch_size: 8192,
            max_connections: 10,
            connect_timeout: 30,
            read_timeout: 300,
        }
    }
}

/// 文件信息
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FileInfo {
    /// 文件路径
    pub path: String,
    /// 文件大小
    pub size: u64,
    /// 文件格式
    pub format: FileFormat,
    /// 最后修改时间
    pub last_modified: chrono::DateTime<chrono::Utc>,
    /// Schema信息
    pub schema: Option<SchemaRef>,
}

/// 存储统计信息
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct StorageStats {
    /// 总文件数
    pub total_files: u64,
    /// 总数据大小（字节）
    pub total_size: u64,
    /// 读取的文件数
    pub files_read: u64,
    /// 读取的数据量（字节）
    pub bytes_read: u64,
    /// 平均读取时间（毫秒）
    pub avg_read_time_ms: f64,
    /// 最后访问时间
    pub last_access_time: Option<chrono::DateTime<chrono::Utc>>,
}

/// S3存储Flight适配器
pub struct S3StorageAdapter {
    /// 配置信息
    config: S3StorageConfig,
    /// 对象存储实例
    store: Arc<dyn ObjectStore>,
    /// DataFusion会话上下文
    ctx: SessionContext,
    /// 文件信息缓存
    file_cache: Arc<RwLock<HashMap<String, FileInfo>>>,
    /// 存储统计
    stats: Arc<RwLock<StorageStats>>,
}

impl S3StorageAdapter {
    /// 创建新的S3存储适配器
    pub async fn new(config: S3StorageConfig) -> Result<Self> {
        info!("创建S3存储适配器，类型: {:?}", config.storage_type);
        
        // 创建对象存储实例
        let store = Self::create_object_store(&config.storage_type).await?;
        
        // 创建DataFusion会话上下文
        let ctx = SessionContext::new();
        
        Ok(Self {
            config,
            store,
            ctx,
            file_cache: Arc::new(RwLock::new(HashMap::new())),
            stats: Arc::new(RwLock::new(StorageStats::default())),
        })
    }

    /// 创建对象存储实例
    async fn create_object_store(storage_type: &StorageType) -> Result<Arc<dyn ObjectStore>> {
        match storage_type {
            StorageType::S3 { bucket, region, access_key_id, secret_access_key } => {
                use object_store::aws::{AmazonS3Builder, AmazonS3};
                
                let mut builder = AmazonS3Builder::new()
                    .with_bucket_name(bucket)
                    .with_region(region);
                
                if let Some(key_id) = access_key_id {
                    builder = builder.with_access_key_id(key_id);
                }
                
                if let Some(secret_key) = secret_access_key {
                    builder = builder.with_secret_access_key(secret_key);
                }
                
                let s3 = builder.build().context("创建S3存储失败")?;
                Ok(Arc::new(s3))
            }
            StorageType::Azure { container, account, access_key } => {
                use object_store::azure::{MicrosoftAzureBuilder};
                
                let mut builder = MicrosoftAzureBuilder::new()
                    .with_container_name(container)
                    .with_account(account);
                
                if let Some(key) = access_key {
                    builder = builder.with_access_key(key);
                }
                
                let azure = builder.build().context("创建Azure存储失败")?;
                Ok(Arc::new(azure))
            }
            StorageType::Gcs { bucket, service_account_key } => {
                use object_store::gcp::{GoogleCloudStorageBuilder};
                
                let mut builder = GoogleCloudStorageBuilder::new()
                    .with_bucket_name(bucket);
                
                if let Some(key) = service_account_key {
                    builder = builder.with_service_account_key(key);
                }
                
                let gcs = builder.build().context("创建GCS存储失败")?;
                Ok(Arc::new(gcs))
            }
            StorageType::Local { root_path } => {
                use object_store::local::LocalFileSystem;
                
                let local = LocalFileSystem::new_with_prefix(root_path)
                    .context("创建本地文件系统失败")?;
                Ok(Arc::new(local))
            }
        }
    }

    /// 列出文件
    pub async fn list_files(&self, prefix: Option<&str>) -> Result<Vec<FileInfo>> {
        debug!("列出文件，前缀: {:?}", prefix);
        
        let prefix_path = prefix.map(ObjectPath::from).unwrap_or_else(|| ObjectPath::from(""));
        
        let mut files = Vec::new();
        let mut list_stream = self.store.list(Some(&prefix_path));
        
        while let Some(meta) = list_stream.next().await {
            let meta = meta.context("获取文件元数据失败")?;
            
            let format = self.detect_file_format(&meta.location.to_string());
            let file_info = FileInfo {
                path: meta.location.to_string(),
                size: meta.size,
                format,
                last_modified: meta.last_modified,
                schema: None, // 延迟加载
            };
            
            files.push(file_info);
        }
        
        // 更新统计信息
        {
            let mut stats = self.stats.write().await;
            stats.total_files = files.len() as u64;
            stats.total_size = files.iter().map(|f| f.size).sum();
        }
        
        Ok(files)
    }

    /// 检测文件格式
    fn detect_file_format(&self, path: &str) -> FileFormat {
        if path.ends_with(".parquet") {
            FileFormat::Parquet
        } else if path.ends_with(".csv") {
            FileFormat::Csv
        } else if path.ends_with(".json") || path.ends_with(".jsonl") {
            FileFormat::Json
        } else {
            self.config.default_format.clone()
        }
    }

    /// 读取文件并返回RecordBatch
    pub async fn read_file(&self, path: &str) -> Result<Vec<RecordBatch>> {
        let start_time = std::time::Instant::now();
        debug!("读取文件: {}", path);
        
        // 更新统计信息
        {
            let mut stats = self.stats.write().await;
            stats.files_read += 1;
            stats.last_access_time = Some(chrono::Utc::now());
        }
        
        let file_path = ObjectPath::from(path);
        let format = self.detect_file_format(path);
        
        let result = match format {
            FileFormat::Parquet => self.read_parquet_file(&file_path).await,
            FileFormat::Csv => self.read_csv_file(&file_path).await,
            FileFormat::Json => self.read_json_file(&file_path).await,
            FileFormat::Auto => {
                // 尝试不同格式
                if let Ok(batches) = self.read_parquet_file(&file_path).await {
                    Ok(batches)
                } else if let Ok(batches) = self.read_csv_file(&file_path).await {
                    Ok(batches)
                } else {
                    self.read_json_file(&file_path).await
                }
            }
        };
        
        // 更新统计信息
        let elapsed = start_time.elapsed();
        {
            let mut stats = self.stats.write().await;
            if let Ok(batches) = &result {
                let bytes_read: usize = batches.iter().map(|b| {
                    b.get_array_memory_size()
                }).sum();
                stats.bytes_read += bytes_read as u64;
            }
            
            // 更新平均读取时间
            let total_time = stats.avg_read_time_ms * (stats.files_read - 1) as f64 + elapsed.as_millis() as f64;
            stats.avg_read_time_ms = total_time / stats.files_read as f64;
        }
        
        result
    }

    /// 读取Parquet文件
    async fn read_parquet_file(&self, path: &ObjectPath) -> Result<Vec<RecordBatch>> {
        use arrow::ipc::reader::StreamReader;
        use parquet::arrow::async_reader::ParquetObjectReader;
        use parquet::arrow::ParquetRecordBatchStreamBuilder;
        
        let meta = self.store.head(path).await.context("获取Parquet文件元数据失败")?;
        let reader = ParquetObjectReader::new(self.store.clone(), meta);
        
        let builder = ParquetRecordBatchStreamBuilder::new(reader)
            .await
            .context("创建Parquet读取器失败")?;
        
        let mut stream = builder.build().context("构建Parquet流失败")?;
        let mut batches = Vec::new();
        
        while let Some(batch) = stream.next().await {
            let batch = batch.context("读取Parquet批次失败")?;
            batches.push(batch);
        }
        
        Ok(batches)
    }

    /// 读取CSV文件
    async fn read_csv_file(&self, path: &ObjectPath) -> Result<Vec<RecordBatch>> {
        use arrow_csv::{ReaderBuilder, Reader};
        use std::io::Cursor;
        
        let data = self.store.get(path).await.context("获取CSV文件失败")?;
        let bytes = data.bytes().await.context("读取CSV文件内容失败")?;
        
        let cursor = Cursor::new(bytes);
        let mut reader = ReaderBuilder::new()
            .has_header(true)
            .build(cursor)
            .context("创建CSV读取器失败")?;
        
        let mut batches = Vec::new();
        while let Some(batch) = reader.next() {
            let batch = batch.context("读取CSV批次失败")?;
            batches.push(batch);
        }
        
        Ok(batches)
    }

    /// 读取JSON文件
    async fn read_json_file(&self, path: &ObjectPath) -> Result<Vec<RecordBatch>> {
        use arrow_json::ReaderBuilder;
        use std::io::Cursor;
        
        let data = self.store.get(path).await.context("获取JSON文件失败")?;
        let bytes = data.bytes().await.context("读取JSON文件内容失败")?;
        
        let cursor = Cursor::new(bytes);
        let mut reader = ReaderBuilder::new()
            .build(cursor)
            .context("创建JSON读取器失败")?;
        
        let mut batches = Vec::new();
        while let Some(batch) = reader.next() {
            let batch = batch.context("读取JSON批次失败")?;
            batches.push(batch);
        }
        
        Ok(batches)
    }

    /// 执行SQL查询
    pub async fn execute_sql(&self, sql: &str) -> Result<Vec<RecordBatch>> {
        debug!("执行SQL查询: {}", sql);
        
        // 注册可用的表（基于文件）
        self.register_tables().await?;
        
        // 执行查询
        let df = self.ctx.sql(sql).await.context("SQL解析失败")?;
        let batches = df.collect().await.context("查询执行失败")?;
        
        Ok(batches)
    }

    /// 注册表（基于文件）
    async fn register_tables(&self) -> Result<()> {
        let files = self.list_files(None).await?;
        
        for file in files {
            let table_name = self.extract_table_name(&file.path);
            
            match file.format {
                FileFormat::Parquet => {
                    let url = format!("file://{}", file.path);
                    self.ctx.register_parquet(&table_name, &url, Default::default())
                        .await
                        .context("注册Parquet表失败")?;
                }
                FileFormat::Csv => {
                    let url = format!("file://{}", file.path);
                    self.ctx.register_csv(&table_name, &url, Default::default())
                        .await
                        .context("注册CSV表失败")?;
                }
                FileFormat::Json => {
                    let url = format!("file://{}", file.path);
                    self.ctx.register_json(&table_name, &url, Default::default())
                        .await
                        .context("注册JSON表失败")?;
                }
                FileFormat::Auto => {
                    // 跳过自动检测的文件
                    continue;
                }
            }
        }
        
        Ok(())
    }

    /// 从文件路径提取表名
    fn extract_table_name(&self, path: &str) -> String {
        let path = std::path::Path::new(path);
        path.file_stem()
            .and_then(|s| s.to_str())
            .unwrap_or("unknown")
            .to_string()
    }

    /// 获取文件Schema
    pub async fn get_file_schema(&self, path: &str) -> Result<SchemaRef> {
        debug!("获取文件Schema: {}", path);
        
        // 检查缓存
        {
            let cache = self.file_cache.read().await;
            if let Some(file_info) = cache.get(path) {
                if let Some(schema) = &file_info.schema {
                    return Ok(schema.clone());
                }
            }
        }
        
        // 读取文件获取Schema
        let batches = self.read_file(path).await?;
        if let Some(batch) = batches.first() {
            let schema = batch.schema();
            
            // 更新缓存
            {
                let mut cache = self.file_cache.write().await;
                if let Some(file_info) = cache.get_mut(path) {
                    file_info.schema = Some(schema.clone());
                }
            }
            
            Ok(schema)
        } else {
            Err(anyhow::anyhow!("文件为空或无法读取: {}", path))
        }
    }

    /// 获取存储统计信息
    pub async fn get_stats(&self) -> StorageStats {
        self.stats.read().await.clone()
    }

    /// 健康检查
    pub async fn health_check(&self) -> Result<bool> {
        debug!("执行存储健康检查");
        
        // 尝试列出根目录
        match self.list_files(None).await {
            Ok(_) => {
                info!("存储健康检查通过");
                Ok(true)
            }
            Err(e) => {
                warn!("存储健康检查失败: {}", e);
                Ok(false)
            }
        }
    }
}
