//! 资源调度器核心实现
//! 
//! 提供智能的沙箱分配和调度功能

use std::sync::Arc;
use std::time::{Duration, Instant};
use std::collections::HashMap;
use tokio::sync::RwLock;
use anyhow::{Result, anyhow};
use tracing::{info, warn, error, debug};
use uuid::Uuid;
use serde::{Serialize, Deserialize};

use crate::types::{Language, Priority, ResourceRequirements, SandboxState, PoolType};
use crate::sandbox_manager::pool::PoolManager;
use crate::scheduler::policies::{SchedulingPolicy, SchedulingContext, PolicyDecision};
use crate::scheduler::metrics::SchedulerMetrics;
use crate::scheduler::placement::{PlacementEngine, PlacementStrategy};
use crate::scheduler::affinity::{AffinityConstraints, AffinityRule};

/// 调度请求
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SchedulingRequest {
    /// 请求ID
    pub request_id: Uuid,
    /// 语言类型
    pub language: Language,
    /// 语言版本
    pub version: String,
    /// 资源需求
    pub resource_requirements: ResourceRequirements,
    /// 优先级
    pub priority: Priority,
    /// 超时时间（秒）
    pub timeout_secs: u64,
    /// 亲和性约束
    pub affinity_constraints: Option<AffinityConstraints>,
    /// 调度策略偏好
    pub policy_preference: Option<String>,
    /// 自定义元数据
    pub metadata: HashMap<String, String>,
    /// 请求时间
    #[serde(skip, default = "std::time::Instant::now")]
    pub request_time: Instant,
}

/// 调度结果
#[derive(Debug, Clone)]
pub struct SchedulingResult {
    /// 请求ID
    pub request_id: Uuid,
    /// 调度是否成功
    pub success: bool,
    /// 分配的沙箱ID
    pub sandbox_id: Option<Uuid>,
    /// 选择的池类型
    pub pool_type: Option<PoolType>,
    /// 调度策略
    pub used_policy: String,
    /// 调度时间
    pub scheduling_time: Duration,
    /// 错误信息
    pub error_message: Option<String>,
    /// 节点信息
    pub node_info: Option<NodeInfo>,
    /// 调度决策详情
    pub decision_details: PolicyDecision,
}

/// 节点信息
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeInfo {
    /// 节点ID
    pub node_id: String,
    /// 节点地址
    pub address: String,
    /// 可用资源
    pub available_resources: ResourceRequirements,
    /// 已使用资源
    pub used_resources: ResourceRequirements,
    /// 负载率
    pub load_percentage: f64,
    /// 节点标签
    pub labels: HashMap<String, String>,
}

/// 资源调度器
pub struct ResourceScheduler {
    /// 调度策略
    policies: Vec<Box<dyn SchedulingPolicy>>,
    /// 默认策略
    default_policy: String,
    /// 池管理器
    pool_manager: Arc<PoolManager>,
    /// 放置引擎
    placement_engine: Arc<PlacementEngine>,
    /// 指标收集器
    metrics: Arc<SchedulerMetrics>,
    /// 待调度队列
    pending_requests: Arc<RwLock<Vec<SchedulingRequest>>>,
    /// 调度历史
    scheduling_history: Arc<RwLock<HashMap<Uuid, SchedulingResult>>>,
    /// 节点信息缓存
    nodes_info: Arc<RwLock<HashMap<String, NodeInfo>>>,
    /// 调度器配置
    config: SchedulerConfig,
}

/// 调度器配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SchedulerConfig {
    /// 调度间隔（毫秒）
    pub scheduling_interval_ms: u64,
    /// 最大队列长度
    pub max_queue_size: usize,
    /// 调度超时时间（秒）
    pub scheduling_timeout_secs: u64,
    /// 启用预测调度
    pub enable_predictive_scheduling: bool,
    /// 启用反亲和性
    pub enable_anti_affinity: bool,
    /// 重试次数
    pub max_retries: u32,
    /// 资源超额分配比例
    pub resource_overcommit_ratio: f64,
    /// 负载均衡阈值
    pub load_balance_threshold: f64,
}

impl Default for SchedulerConfig {
    fn default() -> Self {
        Self {
            scheduling_interval_ms: 100,
            max_queue_size: 10000,
            scheduling_timeout_secs: 30,
            enable_predictive_scheduling: true,
            enable_anti_affinity: false,
            max_retries: 3,
            resource_overcommit_ratio: 1.2,
            load_balance_threshold: 0.8,
        }
    }
}

/// 调度器构建器
pub struct SchedulerBuilder {
    policies: Vec<Box<dyn SchedulingPolicy>>,
    default_policy: String,
    config: SchedulerConfig,
    placement_strategy: PlacementStrategy,
}

impl SchedulerBuilder {
    /// 创建新的构建器
    pub fn new() -> Self {
        Self {
            policies: Vec::new(),
            default_policy: "resource_aware".to_string(),
            config: SchedulerConfig::default(),
            placement_strategy: PlacementStrategy::BestFit,
        }
    }

    /// 添加调度策略
    pub fn add_policy(mut self, policy: Box<dyn SchedulingPolicy>) -> Self {
        self.policies.push(policy);
        self
    }

    /// 设置默认策略
    pub fn default_policy(mut self, policy: &str) -> Self {
        self.default_policy = policy.to_string();
        self
    }

    /// 设置配置
    pub fn config(mut self, config: SchedulerConfig) -> Self {
        self.config = config;
        self
    }

    /// 设置放置策略
    pub fn placement_strategy(mut self, strategy: PlacementStrategy) -> Self {
        self.placement_strategy = strategy;
        self
    }

    /// 构建调度器
    pub async fn build(self, pool_manager: Arc<PoolManager>) -> Result<ResourceScheduler> {
        let placement_engine = Arc::new(PlacementEngine::new(self.placement_strategy));
        let metrics = Arc::new(SchedulerMetrics::new());

        info!("创建资源调度器，策略数量: {}", self.policies.len());

        Ok(ResourceScheduler {
            policies: self.policies,
            default_policy: self.default_policy,
            pool_manager,
            placement_engine,
            metrics,
            pending_requests: Arc::new(RwLock::new(Vec::new())),
            scheduling_history: Arc::new(RwLock::new(HashMap::new())),
            nodes_info: Arc::new(RwLock::new(HashMap::new())),
            config: self.config,
        })
    }
}

impl Default for SchedulerBuilder {
    fn default() -> Self {
        Self::new()
    }
}

impl ResourceScheduler {
    /// 提交调度请求
    pub async fn submit_request(&self, request: SchedulingRequest) -> Result<Uuid> {
        let request_id = request.request_id;
        
        // 检查队列大小
        {
            let queue = self.pending_requests.read().await;
            if queue.len() >= self.config.max_queue_size {
                return Err(anyhow!("调度队列已满，无法接受新请求"));
            }
        }

        // 添加到待调度队列
        {
            let mut queue = self.pending_requests.write().await;
            queue.push(request);
        }

        // 更新指标
        self.metrics.increment_submitted_requests().await;
        
        debug!("调度请求已提交: {}", request_id);
        Ok(request_id)
    }

    /// 启动调度器
    pub async fn start(&self) -> Result<()> {
        info!("启动资源调度器");

        // 启动调度循环
        self.start_scheduling_loop().await;

        // 启动指标收集
        self.metrics.start().await?;

        // 启动节点信息更新
        self.start_node_info_updater().await;

        info!("资源调度器启动完成");
        Ok(())
    }

    /// 停止调度器
    pub async fn stop(&self) -> Result<()> {
        info!("停止资源调度器");
        self.metrics.stop().await?;
        Ok(())
    }

    /// 启动调度循环
    async fn start_scheduling_loop(&self) {
        let pending_requests = self.pending_requests.clone();
        let scheduling_history = self.scheduling_history.clone();
        let pool_manager = self.pool_manager.clone();
        let placement_engine = self.placement_engine.clone();
        let metrics = self.metrics.clone();
        let policies = unsafe { std::mem::transmute::<&Vec<Box<dyn SchedulingPolicy>>, &'static Vec<Box<dyn SchedulingPolicy>>>(&self.policies) };
        let default_policy = self.default_policy.clone();
        let config = self.config.clone();

        tokio::spawn(async move {
            let mut interval = tokio::time::interval(Duration::from_millis(config.scheduling_interval_ms));
            
            loop {
                interval.tick().await;

                // 获取待调度请求
                let requests = {
                    let mut queue = pending_requests.write().await;
                    let batch_size = queue.len().min(10);
                    let batch = queue.drain(..batch_size).collect::<Vec<_>>();
                    batch
                };

                if requests.is_empty() {
                    continue;
                }

                // 批量调度
                for request in requests {
                    let start_time = Instant::now();
                    
                    let result = Self::schedule_request_internal(
                        request.clone(),
                        &pool_manager,
                        &placement_engine,
                        &policies,
                        &default_policy,
                        &config,
                    ).await;

                    let scheduling_time = start_time.elapsed();
                    
                    let final_result = SchedulingResult {
                        request_id: request.request_id,
                        scheduling_time,
                        ..result
                    };

                    // 记录调度历史
                    {
                        let mut history = scheduling_history.write().await;
                        history.insert(request.request_id, final_result.clone());
                        
                        // 限制历史记录数量
                        if history.len() > 10000 {
                            let oldest_keys: Vec<_> = history.keys().take(1000).cloned().collect();
                            for key in oldest_keys {
                                history.remove(&key);
                            }
                        }
                    }

                    // 更新指标
                    if final_result.success {
                        metrics.increment_successful_schedulings().await;
                        metrics.record_scheduling_time(scheduling_time).await;
                    } else {
                        metrics.increment_failed_schedulings().await;
                    }

                    debug!(
                        "调度请求完成: {} -> 成功: {}, 用时: {:?}",
                        request.request_id,
                        final_result.success,
                        scheduling_time
                    );
                }
            }
        });
    }

    /// 内部调度实现
    async fn schedule_request_internal(
        request: SchedulingRequest,
        pool_manager: &Arc<PoolManager>,
        placement_engine: &Arc<PlacementEngine>,
        policies: &[Box<dyn SchedulingPolicy>],
        default_policy: &str,
        config: &SchedulerConfig,
    ) -> SchedulingResult {
        // 选择调度策略
        let policy_name = request.policy_preference.as_deref().unwrap_or(default_policy);
        let policy = policies.iter()
            .find(|p| p.name() == policy_name)
            .or_else(|| policies.iter().find(|p| p.name() == default_policy))
            .or_else(|| policies.first());

        if policy.is_none() {
            return SchedulingResult {
                request_id: request.request_id,
                success: false,
                sandbox_id: None,
                pool_type: None,
                used_policy: "none".to_string(),
                scheduling_time: Duration::from_millis(0),
                error_message: Some("没有可用的调度策略".to_string()),
                node_info: None,
                decision_details: PolicyDecision::default(),
            };
        }

        let policy = policy.unwrap();

        // 创建调度上下文
        let context = SchedulingContext {
            request: request.clone(),
            available_resources: HashMap::new(), // TODO: 从pool_manager获取
            current_load: 0.0, // TODO: 计算当前负载
            node_constraints: Vec::new(),
        };

        // 执行调度决策
        let decision = policy.make_decision(&context).await;

        // 分离出需要的字段以避免借用冲突
        let (success, sandbox_id, pool_type_result, error_msg) = match &decision.action {
            crate::scheduler::policies::SchedulingAction::Allocate { pool_type, node_preference: _ } => {
                (true, Some(uuid::Uuid::new_v4()), Some(*pool_type), None)
            }
            crate::scheduler::policies::SchedulingAction::Queue { estimated_wait_time } => {
                let msg = format!("请求排队，预计等待时间: {:?}", estimated_wait_time);
                (false, None, None, Some(msg))
            }
            crate::scheduler::policies::SchedulingAction::Reject { reason } => {
                (false, None, None, Some(reason.to_string()))
            }
        };

        SchedulingResult {
            request_id: request.request_id,
            success,
            sandbox_id,
            pool_type: pool_type_result,
            used_policy: policy.name().to_string(),
            scheduling_time: Duration::from_millis(0),
            error_message: error_msg,
            node_info: None, // 暂时设为None，避免clone
            decision_details: decision,
        }
    }

    /// 尝试其他池类型
    async fn try_alternative_pools(
        request: &SchedulingRequest,
        _pool_manager: &Arc<PoolManager>, // 暂时不使用
        policy_name: &str,
        decision: PolicyDecision,
    ) -> SchedulingResult {
        // 按优先级尝试不同池类型 - 简化实现
        let pool_type = match request.priority {
            Priority::Critical => PoolType::Hot,
            Priority::High => PoolType::Hot,
            Priority::Normal => PoolType::Warm,
            Priority::Low => PoolType::Cold,
        };

        // 模拟成功分配
        SchedulingResult {
            request_id: request.request_id,
            success: true,
            sandbox_id: Some(uuid::Uuid::new_v4()),
            pool_type: Some(pool_type),
            used_policy: policy_name.to_string(),
            scheduling_time: Duration::from_millis(0),
            error_message: None,
            node_info: None,
            decision_details: decision,
        }
    }

    /// 启动节点信息更新器
    async fn start_node_info_updater(&self) {
        let nodes_info = self.nodes_info.clone();
        let pool_manager = self.pool_manager.clone();

        tokio::spawn(async move {
            let mut interval = tokio::time::interval(Duration::from_secs(10));
            
            loop {
                interval.tick().await;
                
                // TODO: 从pool_manager获取实际的节点信息
                // 这里是模拟实现
                let mut nodes = nodes_info.write().await;
                nodes.insert("node1".to_string(), NodeInfo {
                    node_id: "node1".to_string(),
                    address: "127.0.0.1:8080".to_string(),
                    available_resources: ResourceRequirements {
                        memory_mb: 4096,
                        cpu_cores: 2,
                        disk_mb: 10240,
                        network_enabled: true,
                        network_bandwidth_mbps: 100,
                    },
                    used_resources: ResourceRequirements {
                        memory_mb: 1024,
                        cpu_cores: 0,
                        disk_mb: 2048,
                        network_enabled: true,
                        network_bandwidth_mbps: 10,
                    },
                    load_percentage: 25.0,
                    labels: HashMap::from([
                        ("zone".to_string(), "us-west-1a".to_string()),
                        ("instance-type".to_string(), "m5.large".to_string()),
                    ]),
                });
            }
        });
    }

    /// 获取调度结果
    pub async fn get_scheduling_result(&self, request_id: Uuid) -> Option<SchedulingResult> {
        let history = self.scheduling_history.read().await;
        history.get(&request_id).cloned()
    }

    /// 获取调度统计
    pub async fn get_statistics(&self) -> SchedulingStatistics {
        let metrics = self.metrics.get_snapshot().await;
        let queue_size = self.pending_requests.read().await.len();
        let history_size = self.scheduling_history.read().await.len();

        SchedulingStatistics {
            total_requests: metrics.total_requests,
            successful_schedulings: metrics.successful_schedulings,
            failed_schedulings: metrics.failed_schedulings,
            average_scheduling_time: metrics.average_scheduling_time,
            current_queue_size: queue_size,
            history_size,
            success_rate: metrics.success_rate,
        }
    }

    /// 获取指标
    pub fn metrics(&self) -> Arc<SchedulerMetrics> {
        self.metrics.clone()
    }
}

/// 调度统计信息
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SchedulingStatistics {
    /// 总请求数
    pub total_requests: u64,
    /// 成功调度数
    pub successful_schedulings: u64,
    /// 失败调度数
    pub failed_schedulings: u64,
    /// 平均调度时间
    pub average_scheduling_time: Duration,
    /// 当前队列大小
    pub current_queue_size: usize,
    /// 历史记录数量
    pub history_size: usize,
    /// 成功率
    pub success_rate: f64,
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::sandbox_manager::pool::PoolManager;
    use crate::Config;

    #[tokio::test]
    async fn test_scheduler_creation() {
        let config = crate::Config::default();
        let pool_manager = Arc::new(PoolManager::new(config).await.unwrap());
        
        let scheduler = SchedulerBuilder::new()
            .build(pool_manager)
            .await
            .unwrap();

        assert_eq!(scheduler.default_policy, "resource_aware");
    }

    #[tokio::test]
    async fn test_submit_request() {
        let config = crate::Config::default();
        let pool_manager = Arc::new(PoolManager::new(config).await.unwrap());
        
        let scheduler = SchedulerBuilder::new()
            .build(pool_manager)
            .await
            .unwrap();

        let request = SchedulingRequest {
            request_id: Uuid::new_v4(),
            language: Language::Python,
            version: "3.9".to_string(),
            resource_requirements: ResourceRequirements {
                memory_mb: 512,
                cpu_cores: 1,
                disk_mb: 1024,
                network_bandwidth_mbps: 10,
            },
            priority: Priority::Normal,
            timeout_secs: 300,
            affinity_constraints: None,
            policy_preference: None,
            metadata: HashMap::new(),
            request_time: Instant::now(),
        };

        let request_id = scheduler.submit_request(request).await.unwrap();
        assert!(!request_id.is_nil());
    }
}