//! 批量处理优化模块

use std::collections::HashMap;
use std::time::Instant;
use serde_json::Value;
use crate::error::Result;

/// 批量处理策略
#[derive(Debug, Clone, PartialEq)]
pub enum BatchStrategy {
    /// 固定大小批次
    FixedSize(usize),
    /// 基于内存大小的批次
    MemoryBased(usize), // 字节数
    /// 自适应批次大小
    Adaptive { initial_size: usize, max_size: usize },
}

impl Default for BatchStrategy {
    fn default() -> Self {
        BatchStrategy::FixedSize(1000)
    }
}

/// 批量处理器
pub struct BatchProcessor {
    strategy: BatchStrategy,
    current_batch_size: usize,
    performance_history: Vec<BatchPerformance>,
}

/// 批次性能记录
#[derive(Debug, Clone)]
struct BatchPerformance {
    batch_size: usize,
    processing_time_ms: u64,
    rows_per_second: f64,
}

impl BatchProcessor {
    /// 创建新的批量处理器
    pub fn new(strategy: BatchStrategy) -> Self {
        let initial_size = match &strategy {
            BatchStrategy::FixedSize(size) => *size,
            BatchStrategy::MemoryBased(_) => 1000,
            BatchStrategy::Adaptive { initial_size, .. } => *initial_size,
        };

        Self {
            strategy,
            current_batch_size: initial_size,
            performance_history: Vec::new(),
        }
    }

    /// 处理数据批次
    pub fn process_batches<F>(&mut self, data: Vec<HashMap<String, Value>>, mut processor: F) -> Result<()>
    where
        F: FnMut(&[HashMap<String, Value>]) -> Result<()>,
    {
        if data.is_empty() {
            return Ok(());
        }

        let batches = self.create_batches(&data)?;
        
        for batch in batches {
            let start_time = Instant::now();
            
            processor(&batch)?;
            
            let processing_time = start_time.elapsed().as_millis() as u64;
            self.record_performance(batch.len(), processing_time);
            
            // 如果使用自适应策略，调整批次大小
            if matches!(self.strategy, BatchStrategy::Adaptive { .. }) {
                self.adjust_batch_size();
            }
        }

        Ok(())
    }

    /// 创建数据批次
    fn create_batches(&self, data: &[HashMap<String, Value>]) -> Result<Vec<Vec<HashMap<String, Value>>>> {
        let batch_size = match &self.strategy {
            BatchStrategy::FixedSize(size) => *size,
            BatchStrategy::MemoryBased(max_bytes) => {
                self.calculate_memory_based_batch_size(data, *max_bytes)?
            },
            BatchStrategy::Adaptive { .. } => self.current_batch_size,
        };

        let mut batches = Vec::new();
        let mut current_batch = Vec::new();

        for row in data {
            current_batch.push(row.clone());
            
            if current_batch.len() >= batch_size {
                batches.push(current_batch);
                current_batch = Vec::new();
            }
        }

        // 添加最后一个批次（如果有剩余数据）
        if !current_batch.is_empty() {
            batches.push(current_batch);
        }

        Ok(batches)
    }

    /// 计算基于内存的批次大小
    fn calculate_memory_based_batch_size(&self, data: &[HashMap<String, Value>], max_bytes: usize) -> Result<usize> {
        if data.is_empty() {
            return Ok(1000);
        }

        // 估算单行数据的内存使用量
        let sample_row = &data[0];
        let estimated_row_size = self.estimate_row_memory_size(sample_row);
        
        if estimated_row_size == 0 {
            return Ok(1000);
        }

        let batch_size = max_bytes / estimated_row_size;
        Ok(batch_size.max(1).min(10000)) // 限制在1-10000之间
    }

    /// 估算行数据的内存使用量
    fn estimate_row_memory_size(&self, row: &HashMap<String, Value>) -> usize {
        let mut size = 0;
        
        for (key, value) in row {
            size += key.len(); // 键的大小
            size += match value {
                Value::String(s) => s.len(),
                Value::Number(_) => 8, // 假设数字占8字节
                Value::Bool(_) => 1,
                Value::Array(arr) => arr.len() * 50, // 粗略估算
                Value::Object(obj) => obj.len() * 100, // 粗略估算
                Value::Null => 0,
            };
        }
        
        size
    }

    /// 记录性能数据
    fn record_performance(&mut self, batch_size: usize, processing_time_ms: u64) {
        let rows_per_second = if processing_time_ms > 0 {
            (batch_size as f64 * 1000.0) / processing_time_ms as f64
        } else {
            0.0
        };

        let performance = BatchPerformance {
            batch_size,
            processing_time_ms,
            rows_per_second,
        };

        self.performance_history.push(performance);
        
        // 保持历史记录在合理范围内
        if self.performance_history.len() > 100 {
            self.performance_history.remove(0);
        }
    }

    /// 调整批次大小（自适应策略）
    fn adjust_batch_size(&mut self) {
        if let BatchStrategy::Adaptive { initial_size: _, max_size } = &self.strategy {
            if self.performance_history.len() < 3 {
                return;
            }

            // 获取最近的性能数据
            let recent_performances = &self.performance_history[self.performance_history.len() - 3..];
            let _avg_performance = recent_performances.iter()
                .map(|p| p.rows_per_second)
                .sum::<f64>() / recent_performances.len() as f64;

            // 如果性能在提升，增加批次大小
            if recent_performances.windows(2).all(|w| w[1].rows_per_second >= w[0].rows_per_second) {
                self.current_batch_size = (self.current_batch_size * 110 / 100).min(*max_size);
            }
            // 如果性能在下降，减少批次大小
            else if recent_performances.windows(2).all(|w| w[1].rows_per_second < w[0].rows_per_second) {
                self.current_batch_size = (self.current_batch_size * 90 / 100).max(100);
            }
        }
    }

    /// 获取当前批次大小
    pub fn current_batch_size(&self) -> usize {
        self.current_batch_size
    }

    /// 获取性能统计
    pub fn get_performance_stats(&self) -> BatchPerformanceStats {
        if self.performance_history.is_empty() {
            return BatchPerformanceStats::default();
        }

        let total_rows: usize = self.performance_history.iter().map(|p| p.batch_size).sum();
        let total_time: u64 = self.performance_history.iter().map(|p| p.processing_time_ms).sum();
        let avg_rows_per_second = self.performance_history.iter()
            .map(|p| p.rows_per_second)
            .sum::<f64>() / self.performance_history.len() as f64;

        let max_rows_per_second = self.performance_history.iter()
            .map(|p| p.rows_per_second)
            .fold(0.0, f64::max);

        let min_rows_per_second = self.performance_history.iter()
            .map(|p| p.rows_per_second)
            .fold(f64::INFINITY, f64::min);

        BatchPerformanceStats {
            total_batches: self.performance_history.len(),
            total_rows,
            total_processing_time_ms: total_time,
            avg_rows_per_second,
            max_rows_per_second,
            min_rows_per_second,
            current_batch_size: self.current_batch_size,
        }
    }
}

/// 批量处理性能统计
#[derive(Debug, Clone, Default)]
pub struct BatchPerformanceStats {
    pub total_batches: usize,
    pub total_rows: usize,
    pub total_processing_time_ms: u64,
    pub avg_rows_per_second: f64,
    pub max_rows_per_second: f64,
    pub min_rows_per_second: f64,
    pub current_batch_size: usize,
}

#[cfg(test)]
mod tests {
    use super::*;

    fn create_test_data(count: usize) -> Vec<HashMap<String, Value>> {
        (0..count).map(|i| {
            let mut row = HashMap::new();
            row.insert("id".to_string(), Value::Number(serde_json::Number::from(i)));
            row.insert("name".to_string(), Value::String(format!("User{}", i)));
            row
        }).collect()
    }

    #[test]
    fn test_fixed_size_batch_processor() {
        let mut processor = BatchProcessor::new(BatchStrategy::FixedSize(3));
        let data = create_test_data(10);
        
        let mut processed_batches = Vec::new();
        let result = processor.process_batches(data, |batch| {
            processed_batches.push(batch.len());
            Ok(())
        });
        
        assert!(result.is_ok());
        assert_eq!(processed_batches, vec![3, 3, 3, 1]); // 3个完整批次 + 1个剩余
    }

    #[test]
    fn test_memory_based_batch_processor() {
        let mut processor = BatchProcessor::new(BatchStrategy::MemoryBased(1024));
        let data = create_test_data(5);
        
        let mut batch_count = 0;
        let result = processor.process_batches(data, |_batch| {
            batch_count += 1;
            Ok(())
        });
        
        assert!(result.is_ok());
        assert!(batch_count > 0);
    }

    #[test]
    fn test_adaptive_batch_processor() {
        let mut processor = BatchProcessor::new(BatchStrategy::Adaptive {
            initial_size: 2,
            max_size: 10,
        });
        let data = create_test_data(20);
        
        let initial_size = processor.current_batch_size();
        
        let result = processor.process_batches(data, |_batch| {
            // 模拟处理时间
            std::thread::sleep(std::time::Duration::from_millis(1));
            Ok(())
        });
        
        assert!(result.is_ok());
        assert_eq!(initial_size, 2);
        
        let stats = processor.get_performance_stats();
        assert!(stats.total_batches > 0);
        assert!(stats.total_rows > 0);
    }

    #[test]
    fn test_estimate_row_memory_size() {
        let processor = BatchProcessor::new(BatchStrategy::default());
        
        let mut row = HashMap::new();
        row.insert("id".to_string(), Value::Number(serde_json::Number::from(1)));
        row.insert("name".to_string(), Value::String("test".to_string()));
        
        let size = processor.estimate_row_memory_size(&row);
        assert!(size > 0);
    }

    #[test]
    fn test_performance_stats() {
        let mut processor = BatchProcessor::new(BatchStrategy::FixedSize(2));
        let data = create_test_data(5);
        
        let result = processor.process_batches(data, |_batch| {
            std::thread::sleep(std::time::Duration::from_millis(10));
            Ok(())
        });
        
        assert!(result.is_ok());
        
        let stats = processor.get_performance_stats();
        assert!(stats.total_batches > 0);
        assert_eq!(stats.total_rows, 5);
        assert!(stats.avg_rows_per_second > 0.0);
    }
}