//! 核心引擎模块
//!
//! 提供基于Rayon的并行流水线，生成策略控制等核心功能

use crate::error::Result;
use crate::generators::Generator;
use rayon::prelude::*;
use serde_json::Value;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Instant;

/// 生成策略
#[derive(Debug, Clone, Copy)]
pub enum GenerationStrategy {
    /// 顺序生成
    Sequential,
    /// 随机生成
    Random,
    /// 混合模式
    Mixed,
}

/// 生成配置
#[derive(Debug, Clone)]
pub struct GenConfig {
    /// 每批次生成数量
    pub batch_size: usize,
    /// 区域设置
    pub locale: crate::Language,
    /// 空值概率 (0.0-1.0)
    pub null_probability: f32,
    /// 生成策略
    pub strategy: GenerationStrategy,
    /// 并行度 (0表示自动检测)
    pub parallelism: usize,
}

impl Default for GenConfig {
    fn default() -> Self {
        Self {
            batch_size: 1000,
            locale: crate::Language::ZhCN,
            null_probability: 0.05,
            strategy: GenerationStrategy::Random,
            parallelism: 0, // 自动检测
        }
    }
}

/// 核心生成引擎
pub struct CoreEngine {
    config: GenConfig,
    generators: HashMap<String, Arc<dyn Generator>>,
    metrics: Arc<GeneratorMetrics>,
}

/// 生成器指标统计
#[derive(Default)]
pub struct GeneratorMetrics {
    generated_count: std::sync::atomic::AtomicUsize,
    error_count: std::sync::atomic::AtomicUsize,
    total_duration: std::sync::Mutex<std::time::Duration>,
}

impl GeneratorMetrics {
    /// 记录生成成功
    pub fn record_success(&self, count: usize, duration: std::time::Duration) {
        self.generated_count
            .fetch_add(count, std::sync::atomic::Ordering::Relaxed);
        let mut total = self.total_duration.lock().unwrap();
        *total += duration;
    }

    /// 记录生成错误
    pub fn record_error(&self) {
        self.error_count
            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
    }

    /// 获取生成总数
    pub fn generated_count(&self) -> usize {
        self.generated_count
            .load(std::sync::atomic::Ordering::Relaxed)
    }

    /// 获取错误总数
    pub fn error_count(&self) -> usize {
        self.error_count.load(std::sync::atomic::Ordering::Relaxed)
    }

    /// 获取平均延迟
    pub fn average_latency(&self) -> std::time::Duration {
        let total = *self.total_duration.lock().unwrap();
        let count = self.generated_count();
        if count > 0 {
            total / count as u32
        } else {
            std::time::Duration::ZERO
        }
    }

    /// 导出Prometheus格式指标
    pub fn export_prometheus(&self) -> String {
        format!(
            r#"# HELP dataforge_generated_total Total number of generated records
# TYPE dataforge_generated_total counter
dataforge_generated_total {}

# HELP dataforge_errors_total Total number of generation errors
# TYPE dataforge_errors_total counter
dataforge_errors_total {}

# HELP dataforge_average_latency_seconds Average generation latency in seconds
# TYPE dataforge_average_latency_seconds gauge
dataforge_average_latency_seconds {:.6}
"#,
            self.generated_count(),
            self.error_count(),
            self.average_latency().as_secs_f64()
        )
    }
}

impl CoreEngine {
    /// 创建新的核心引擎
    pub fn new(config: GenConfig) -> Self {
        // 设置并行度
        let parallelism = if config.parallelism == 0 {
            rayon::current_num_threads()
        } else {
            config.parallelism
        };

        // 配置Rayon线程池
        if let Err(e) = rayon::ThreadPoolBuilder::new()
            .num_threads(parallelism)
            .build_global()
        {
            log::warn!("Failed to configure Rayon thread pool: {}", e);
        }

        Self {
            config,
            generators: HashMap::new(),
            metrics: Arc::new(GeneratorMetrics::default()),
        }
    }

    /// 注册生成器
    pub fn register_generator<T: Generator + 'static>(&mut self, name: String, generator: T) {
        self.generators.insert(name, Arc::new(generator));
    }

    /// 批量生成数据
    pub fn generate_batch(&self, count: usize) -> Result<Vec<Value>> {
        let start = Instant::now();

        let result = match self.config.strategy {
            GenerationStrategy::Sequential => self.generate_sequential(count),
            GenerationStrategy::Random => self.generate_parallel(count),
            GenerationStrategy::Mixed => self.generate_mixed(count),
        };

        let duration = start.elapsed();

        match &result {
            Ok(data) => {
                self.metrics.record_success(data.len(), duration);
                log::debug!("Generated {} records in {:?}", data.len(), duration);
            }
            Err(_) => {
                self.metrics.record_error();
                log::error!("Failed to generate batch of {} records", count);
            }
        }

        result
    }

    /// 顺序生成
    fn generate_sequential(&self, count: usize) -> Result<Vec<Value>> {
        let mut results = Vec::with_capacity(count);

        for i in 0..count {
            let value = self.generate_single_record(i)?;
            results.push(value);
        }

        Ok(results)
    }

    /// 并行生成
    fn generate_parallel(&self, count: usize) -> Result<Vec<Value>> {
        let batch_size = self.config.batch_size.min(count);
        let chunks: Vec<_> = (0..count)
            .collect::<Vec<_>>()
            .chunks(batch_size)
            .map(|chunk| chunk.to_vec())
            .collect();

        let results: Result<Vec<Vec<Value>>> = chunks
            .into_par_iter()
            .map(|chunk| {
                chunk
                    .into_iter()
                    .map(|i| self.generate_single_record(i))
                    .collect::<Result<Vec<_>>>()
            })
            .collect();

        Ok(results?.into_iter().flatten().collect())
    }

    /// 混合模式生成
    fn generate_mixed(&self, count: usize) -> Result<Vec<Value>> {
        // 一半并行，一半顺序
        let parallel_count = count / 2;
        let sequential_count = count - parallel_count;

        let mut parallel_results = self.generate_parallel(parallel_count)?;
        let mut sequential_results = self.generate_sequential(sequential_count)?;

        parallel_results.append(&mut sequential_results);
        Ok(parallel_results)
    }

    /// 生成单条记录
    fn generate_single_record(&self, index: usize) -> Result<Value> {
        // 这里应该根据具体的生成器来生成数据
        // 暂时返回一个示例结构
        use crate::generators::*;

        // 检查是否应该生成null值
        if self.should_generate_null() {
            return Ok(Value::Null);
        }

        Ok(serde_json::json!({
            "id": index,
            "uuid": uuid_v4(),
            "name": name::zh_cn_fullname(),
            "email": internet::email(),
            "created_at": datetime::iso8601(),
            "metadata": {
                "generated_by": "dataforge",
                "strategy": format!("{:?}", self.config.strategy),
                "locale": format!("{:?}", self.config.locale)
            }
        }))
    }

    /// 判断是否应该生成null值
    fn should_generate_null(&self) -> bool {
        use rand::Rng;
        let mut rng = rand::thread_rng();
        rng.gen::<f32>() < self.config.null_probability
    }

    /// 获取指标
    pub fn metrics(&self) -> Arc<GeneratorMetrics> {
        Arc::clone(&self.metrics)
    }

    /// 获取配置
    pub fn config(&self) -> &GenConfig {
        &self.config
    }

    /// 更新配置
    pub fn update_config(&mut self, config: GenConfig) {
        self.config = config;
    }

    /// 重置指标
    pub fn reset_metrics(&self) {
        self.metrics
            .generated_count
            .store(0, std::sync::atomic::Ordering::Relaxed);
        self.metrics
            .error_count
            .store(0, std::sync::atomic::Ordering::Relaxed);
        *self.metrics.total_duration.lock().unwrap() = std::time::Duration::ZERO;
    }
}

/// 生成器工厂
pub struct GeneratorFactory;

impl GeneratorFactory {
    /// 创建默认生成器集合
    pub fn create_default_generators() -> HashMap<String, Arc<dyn Generator>> {
        let generators = HashMap::new();

        // 这里可以注册各种内置生成器
        // generators.insert("uuid".to_string(), Arc::new(UuidGenerator));
        // generators.insert("name".to_string(), Arc::new(NameGenerator));

        generators
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_core_engine_creation() {
        let config = GenConfig::default();
        let engine = CoreEngine::new(config);

        assert_eq!(engine.config().batch_size, 1000);
        assert_eq!(engine.metrics().generated_count(), 0);
    }

    #[test]
    fn test_batch_generation() {
        let config = GenConfig {
            batch_size: 10,
            ..Default::default()
        };
        let engine = CoreEngine::new(config);

        let result = engine.generate_batch(5);
        assert!(result.is_ok());

        let data = result.unwrap();
        assert_eq!(data.len(), 5);
        assert_eq!(engine.metrics().generated_count(), 5);
    }

    #[test]
    fn test_parallel_generation() {
        let config = GenConfig {
            strategy: GenerationStrategy::Random,
            batch_size: 100,
            ..Default::default()
        };
        let engine = CoreEngine::new(config);

        let result = engine.generate_batch(1000);
        assert!(result.is_ok());

        let data = result.unwrap();
        assert_eq!(data.len(), 1000);
    }

    #[test]
    fn test_metrics() {
        let engine = CoreEngine::new(GenConfig::default());

        // 生成一些数据
        let _ = engine.generate_batch(100);

        let metrics = engine.metrics();
        assert_eq!(metrics.generated_count(), 100);
        assert_eq!(metrics.error_count(), 0);

        // 测试Prometheus导出
        let prometheus_output = metrics.export_prometheus();
        assert!(prometheus_output.contains("dataforge_generated_total 100"));
    }

    #[test]
    fn test_null_probability() {
        let config = GenConfig {
            null_probability: 1.0, // 100%概率生成null
            ..Default::default()
        };
        let engine = CoreEngine::new(config);

        // 多次测试，应该有null值
        let mut null_count = 0;
        for _ in 0..100 {
            if engine.should_generate_null() {
                null_count += 1;
            }
        }

        // 由于概率是1.0，应该全部为null
        assert_eq!(null_count, 100);
    }
}
