// 初始化时传递配置
let lua = if config.enable_cross_platform_compat {
    Lua::new_with(LuaOptions::new().thread_safe(true)) 
} else {
    Lua::new()
};
use std::{
    collections::HashMap,
    sync::Arc,
    time::{Duration, SystemTime, UNIX_EPOCH},
};
use anyhow::{Context, Result};
use dashmap::DashMap;
use mlua::{Lua, LuaSerdeExt};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio::{
    sync::{broadcast, mpsc, RwLock, Semaphore},
    time::timeout,
};
use tracing::{debug, error, info, warn, instrument};
use sysinfo::{System, SystemExt};
use uuid::Uuid;

use crate::config::FrameworkConfig;
use crate::core::lua_ext::setup_lua_apis;

#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum SystemEvent {
    TaskStarted { id: String, worker_id: usize },
    TaskCompleted { id: String, duration_ms: u64 },
    ResourceAlert { resource: String, usage: f64 },
    ScriptLoaded { script_name: String },
    CrossPlatformRequest { lang: String, payload: Value },
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Task {
    pub id: String,
    pub script: String,
    pub params: HashMap<String, Value>,
    pub priority: u8,
    pub timeout_ms: u64,
}

#[derive(Debug, Serialize, Deserialize)]
pub struct ResourceStats {
    pub active_tasks: usize,
    pub memory_usage: f64,
    pub cpu_usage: f64,
    pub lua_memory_usage: usize,
    pub thread_pool_active: usize,
}

#[derive(Clone)]
pub struct OpsPanelCore {
    lua_runtimes: Arc<DashMap<u64, RwLock<Lua>>>,
    resource_limiter: Arc<Semaphore>,
    event_bus: Arc<broadcast::Sender<SystemEvent>>,
    state_store: Arc<DashMap<String, Value>>,
    config: FrameworkConfig,
    start_time: SystemTime,
    lua_memory_tracker: Arc<monitor::LuaMemoryTracker>,
}

impl OpsPanelCore {
    pub async fn new(config: FrameworkConfig) -> Result<Self> {
        let lua_runtimes = Arc::new(DashMap::new());
        let memory_tracker = Arc::new(monitor::LuaMemoryTracker::new(config.lua_memory_limit));

        // 初始化Lua运行时
        for i in 0..config.worker_threads {
            let lua = if config.enable_cross_platform_compat {
                Lua::new_with(LuaOptions::new().thread_safe(true))
            } else {
                Lua::new()
            };
            let worker_id = i as u64;
            
            setup_lua_apis(&lua, memory_tracker.clone(), worker_id).await?;
            lua_runtimes.insert(worker_id, RwLock::new(lua));
        }

        let (event_tx, _) = broadcast::channel(1000);

        Ok(Self {
            lua_runtimes,
            resource_limiter: Arc::new(Semaphore::new(config.max_concurrent_tasks)),
            event_bus: Arc::new(event_tx),
            state_store: Arc::new(DashMap::new()),
            config,
            start_time: SystemTime::now(),
            lua_memory_tracker: memory_tracker,
        })
    }

    #[instrument(skip(self))]
    pub async fn execute_script(&self, task: Task) -> Result<Value> {
        let _permit = self
            .resource_limiter
            .acquire()
            .await
            .context("Failed to acquire resource permit")?;
        
        let start_time = SystemTime::now();

        // 获取一个Lua运行时实例
        let worker_id = Uuid::new_v4().as_u128() as u64 % self.config.worker_threads as u64;
        let runtime = self.lua_runtimes.get(&worker_id)
            .ok_or_else(|| anyhow::anyhow!("Worker {} not found", worker_id))?;

        // 广播任务开始事件
        let _ = self.event_bus.send(SystemEvent::TaskStarted {
            id: task.id.clone(),
            worker_id: worker_id as usize,
        });

        // 设置超时
        let script_future = async {
            let result = {
                let lua_guard = runtime.value().read().await;
                let script = task.script.clone();
                let params = task.params.clone();

                tokio::task::spawn_blocking(move || {
                    let lua = &lua_guard;
                    // 设置脚本参数到Lua全局作用域
                    let globals = lua.globals();
                    for (key, value) in params {
                        globals.set(key, lua.to_value(&value)?)?;
                    }

                    // 执行脚本
                    let result: mlua::Value = lua.load(&script).eval()?;
                    lua.from_value(result)
                })
                .await
                .map_err(|e| anyhow::anyhow!("Task execution failed: {}", e))?
                .context("Failed to execute script")?
            };
            
            self.check_memory_usage(worker_id).await?;
            result
        };

        let duration = Duration::from_millis(task.timeout_ms);
        let result = match timeout(duration, script_future).await {
            Ok(res) => res,
            Err(_) => {
                error!(
                    task_id = task.id,
                    worker_id,
                    timeout_ms = task.timeout_ms,
                    "Task timed out after {}ms", 
                    task.timeout_ms
                );
                return Err(anyhow::anyhow!(
                    "Task {} timed out after {}ms (worker: {})",
                    task.id,
                    task.timeout_ms,
                    worker_id
                ));
            }
        };

        // 广播任务完成事件
        let duration = start_time.elapsed().as_millis() as u64;
        let _ = self.event_bus.send(SystemEvent::TaskCompleted {
            id: task.id,
            duration_ms: duration,
        });

        result
    }

    async fn check_memory_usage(&self, worker_id: u64) -> Result<()> {
        let mem_usage = self.lua_memory_tracker.get_memory_usage(worker_id);
        if mem_usage > self.config.lua_memory_limit as f32 * 0.9 {
            warn!(
                worker_id,
                "Lua memory usage high: {} bytes (limit: {})", 
                mem_usage,
                self.config.lua_memory_limit
            );
            let _ = self.event_bus.send(SystemEvent::ResourceAlert {
                resource: "LuaMemory".to_string(),
                usage: (mem_usage as f64 / self.config.lua_memory_limit as f64) * 100.0,
            });
            return Err(anyhow::anyhow!(
                "Memory usage exceeded 90% of limit ({} bytes)", 
                mem_usage
            ));
        }
        Ok(())
    }

        let duration = Duration::from_millis(task.timeout_ms);
        let result = match timeout(duration, script_future).await {
            Ok(res) => res,
            Err(_) => {
                error!(task_id = task.id, "Task timed out after {}ms", task.timeout_ms);
                return Err(anyhow::anyhow!("Task timed out"));
            }
        };

        // 广播任务完成事件
        let duration = start_time.elapsed().as_millis() as u64;
        let _ = self.event_bus.send(SystemEvent::TaskCompleted {
            id: task.id,
            duration_ms: duration,
        });

        result
    }

    #[instrument(skip(self))]
    pub async fn execute_batch(&self, tasks: Vec<Task>) -> Vec<Result<Value>> {
        if tasks.is_empty() {
            return Vec::new();
        }

        const CHUNK_SIZE: usize = 10;
        let semaphore = Arc::new(Semaphore::new(self.config.max_concurrent_tasks));
        let mut results = Vec::with_capacity(tasks.len());
        let total_tasks = tasks.len();

        info!("Starting batch execution of {} tasks", total_tasks);

        for (chunk_idx, chunk) in tasks.chunks(CHUNK_SIZE).enumerate() {
            let (tx, mut rx) = mpsc::channel(CHUNK_SIZE);
            let mut handles = Vec::with_capacity(chunk.len());

            for task in chunk {
                let tx = tx.clone();
                let core = self.clone();
                let permit = semaphore.clone().acquire_owned().await
                    .context("Failed to acquire semaphore permit")?;

                let handle = tokio::spawn(async move {
                    let result = core.execute_script(task).await;
                    let _ = tx.send(result).await;
                    drop(permit);
                });
                handles.push(handle);
            }

            drop(tx); // 关闭发送端

            // 收集当前chunk的结果
            let mut chunk_results = Vec::with_capacity(chunk.len());
            while let Some(result) = rx.recv().await {
                chunk_results.push(result);
            }

            // 等待当前chunk的所有任务完成
            for handle in handles {
                if let Err(e) = handle.await {
                    error!("Batch task failed: {}", e);
                }
            }

            results.extend(chunk_results);
            
            debug!(
                "Batch progress: {}/{} tasks completed (chunk {}/{})",
                results.len(),
                total_tasks,
                chunk_idx + 1,
                (total_tasks + CHUNK_SIZE - 1) / CHUNK_SIZE
            );
        }

        info!(
            "Batch execution completed: {}/{} tasks succeeded",
            results.iter().filter(|r| r.is_ok()).count(),
            total_tasks
        );

        results
    }

    pub fn set_state(&self, key: String, value: Value) {
        self.state_store.insert(key, value);
    }

    pub fn get_state(&self, key: &str) -> Option<Value> {
        self.state_store.get(key).map(|v| v.value().clone())
    }

    pub async fn get_resource_stats(&self) -> ResourceStats {
        let mut system = System::new();
        system.refresh_memory();
        system.refresh_cpu();

        ResourceStats {
            active_tasks: self.config.max_concurrent_tasks - self.resource_limiter.available_permits(),
            memory_usage: system.used_memory() as f64 / 1024.0 / 1024.0, // MB
            cpu_usage: system.global_cpu_usage() as f64,
            lua_memory_usage: self.lua_memory_tracker.total_usage(),
            thread_pool_active: self.lua_runtimes.len(),
        }
    }

    pub fn subscribe_events(&self) -> broadcast::Receiver<SystemEvent> {
        self.event_bus.subscribe()
    }

    pub fn uptime(&self) -> u64 {
        SystemTime::now()
            .duration_since(self.start_time)
            .unwrap_or_default()
            .as_secs()
    }
}