use std::sync::{Arc, Mutex};
use std::collections::{HashMap, VecDeque};
use std::time::{Duration, Instant};
use std::thread;
use tokio::sync::{mpsc, oneshot};
use tokio::task::JoinHandle;
use rayon::prelude::*;

/// 任务优先级
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum TaskPriority {
    Low = 0,
    Normal = 1,
    High = 2,
    Critical = 3,
}

impl Default for TaskPriority {
    fn default() -> Self {
        TaskPriority::Normal
    }
}

/// 任务状态
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TaskStatus {
    Pending,
    Running,
    Completed,
    Failed,
    Cancelled,
}

/// 任务类型
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TaskType {
    Computation,
    IO,
    Network,
    UI,
}

impl Default for TaskType {
    fn default() -> Self {
        TaskType::Computation
    }
}

/// 任务配置
#[derive(Debug, Clone)]
pub struct TaskConfig {
    /// 任务优先级
    pub priority: TaskPriority,
    /// 任务类型
    pub task_type: TaskType,
    /// 超时时间（毫秒）
    pub timeout_ms: Option<u64>,
    /// 重试次数
    pub max_retries: u32,
    /// 是否可取消
    pub cancellable: bool,
}

impl Default for TaskConfig {
    fn default() -> Self {
        Self {
            priority: TaskPriority::Normal,
            task_type: TaskType::Computation,
            timeout_ms: None,
            max_retries: 0,
            cancellable: true,
        }
    }
}

/// 任务元数据
#[derive(Debug, Clone)]
pub struct TaskMetadata {
    /// 任务ID
    pub id: String,
    /// 任务名称
    pub name: String,
    /// 创建时间
    pub created_at: Instant,
    /// 开始执行时间
    pub started_at: Option<Instant>,
    /// 完成时间
    pub completed_at: Option<Instant>,
    /// 当前状态
    pub status: TaskStatus,
    /// 重试次数
    pub retry_count: u32,
    /// 错误信息
    pub error: Option<String>,
}

/// 任务定义
pub struct Task<T> {
    /// 任务元数据
    pub metadata: TaskMetadata,
    /// 任务配置
    pub config: TaskConfig,
    /// 任务处理函数
    handler: Box<dyn FnOnce() -> Result<T, String> + Send + 'static>,
    /// 取消信号发送器
    cancel_tx: Option<oneshot::Sender<()>>,
}

impl<T: Send + 'static> Task<T> {
    /// 创建新任务
    pub fn new<F>(id: String, name: String, config: TaskConfig, handler: F) -> Self
    where
        F: FnOnce() -> Result<T, String> + Send + 'static,
    {
        let metadata = TaskMetadata {
            id,
            name,
            created_at: Instant::now(),
            started_at: None,
            completed_at: None,
            status: TaskStatus::Pending,
            retry_count: 0,
            error: None,
        };
        
        Self {
            metadata,
            config,
            handler: Box::new(handler),
            cancel_tx: None,
        }
    }
}

/// 任务结果
pub struct TaskResult<T> {
    /// 任务元数据
    pub metadata: TaskMetadata,
    /// 任务结果
    pub result: Result<T, String>,
}

/// 任务调度器
pub struct TaskScheduler {
    /// 任务队列
    task_queues: Mutex<HashMap<TaskPriority, VecDeque<Arc<Mutex<Box<dyn Any + Send>>>>>>,
    /// 工作线程数
    worker_count: usize,
    /// 任务发送通道
    task_tx: mpsc::Sender<Arc<Mutex<Box<dyn Any + Send>>>>,
    /// 任务接收通道
    task_rx: Mutex<Option<mpsc::Receiver<Arc<Mutex<Box<dyn Any + Send>>>>>>,
    /// 工作线程句柄
    worker_handles: Mutex<Vec<JoinHandle<()>>>,
    /// 运行状态
    running: Arc<Mutex<bool>>,
    /// 任务计数器
    task_counter: Mutex<usize>,
    /// 已完成任务计数器
    completed_counter: Mutex<usize>,
    /// 失败任务计数器
    failed_counter: Mutex<usize>,
}

use std::any::Any;

impl TaskScheduler {
    /// 创建新的任务调度器
    pub fn new(worker_count: usize) -> Self {
        let (task_tx, task_rx) = mpsc::channel(100);
        
        Self {
            task_queues: Mutex::new(HashMap::new()),
            worker_count,
            task_tx,
            task_rx: Mutex::new(Some(task_rx)),
            worker_handles: Mutex::new(Vec::new()),
            running: Arc::new(Mutex::new(false)),
            task_counter: Mutex::new(0),
            completed_counter: Mutex::new(0),
            failed_counter: Mutex::new(0),
        }
    }
    
    /// 启动调度器
    pub fn start(&self) {
        let mut running = self.running.lock().unwrap();
        if *running {
            return;
        }
        *running = true;
        drop(running);
        
        let mut worker_handles = self.worker_handles.lock().unwrap();
        let mut task_rx = self.task_rx.lock().unwrap().take().unwrap();
        
        // 初始化任务队列
        let mut queues = self.task_queues.lock().unwrap();
        queues.insert(TaskPriority::Low, VecDeque::new());
        queues.insert(TaskPriority::Normal, VecDeque::new());
        queues.insert(TaskPriority::High, VecDeque::new());
        queues.insert(TaskPriority::Critical, VecDeque::new());
        drop(queues);
        
        // 启动工作线程
        let running = self.running.clone();
        for i in 0..self.worker_count {
            let task_tx = self.task_tx.clone();
            let worker_running = running.clone();
            
            let handle = tokio::spawn(async move {
                println!("Worker {} started", i);
                
                while *worker_running.lock().unwrap() {
                    match task_rx.recv().await {
                        Some(task) => {
                            // 处理任务
                            println!("Worker {} processing task", i);
                            
                            // 这里我们需要使用动态分发来处理不同类型的任务
                            // 实际实现中，每个任务类型需要有自己的处理逻辑
                            let mut task_guard = task.lock().unwrap();
                            let task_ref = &mut *task_guard;
                            
                            // 使用downcast_mut尝试将任务转换为具体类型
                            // 这里只是一个示例，实际实现需要根据任务类型进行处理
                            if let Some(_) = task_ref.downcast_mut::<Task<()>>() {
                                println!("Processing void task");
                            } else if let Some(_) = task_ref.downcast_mut::<Task<i32>>() {
                                println!("Processing i32 task");
                            } else if let Some(_) = task_ref.downcast_mut::<Task<String>>() {
                                println!("Processing String task");
                            } else {
                                println!("Unknown task type");
                            }
                        }
                        None => {
                            // 通道已关闭，退出循环
                            break;
                        }
                    }
                }
                
                println!("Worker {} stopped", i);
            });
            
            worker_handles.push(handle);
        }
        
        // 启动调度线程
        let task_tx = self.task_tx.clone();
        let task_queues = self.task_queues.clone();
        let scheduler_running = running.clone();
        
        let scheduler_handle = tokio::spawn(async move {
            println!("Scheduler started");
            
            while *scheduler_running.lock().unwrap() {
                // 从队列中获取任务并调度
                let mut queues = task_queues.lock().unwrap();
                
                // 按优先级从高到低检查队列
                let priorities = [TaskPriority::Critical, TaskPriority::High, TaskPriority::Normal, TaskPriority::Low];
                let mut task_to_schedule = None;
                
                for &priority in &priorities {
                    if let Some(queue) = queues.get_mut(&priority) {
                        if let Some(task) = queue.pop_front() {
                            task_to_schedule = Some(task);
                            break;
                        }
                    }
                }
                
                drop(queues);
                
                // 如果有任务要调度，发送到工作线程
                if let Some(task) = task_to_schedule {
                    if let Err(e) = task_tx.send(task).await {
                        println!("Failed to send task: {}", e);
                    }
                } else {
                    // 没有任务，等待一段时间
                    tokio::time::sleep(Duration::from_millis(10)).await;
                }
            }
            
            println!("Scheduler stopped");
        });
        
        worker_handles.push(scheduler_handle);
    }
    
    /// 停止调度器
    pub async fn stop(&self) {
        let mut running = self.running.lock().unwrap();
        if !*running {
            return;
        }
        *running = false;
        drop(running);
        
        // 等待所有工作线程完成
        let mut handles = self.worker_handles.lock().unwrap();
        for handle in handles.drain(..) {
            let _ = handle.await;
        }
        
        println!("Task scheduler stopped");
    }
    
    /// 提交任务
    pub async fn submit<T: Send + 'static>(&self, task: Task<T>) -> oneshot::Receiver<TaskResult<T>> {
        let (result_tx, result_rx) = oneshot::channel();
        
        // 创建任务的可取消版本
        let (cancel_tx, cancel_rx) = oneshot::channel();
        let mut task = task;
        task.cancel_tx = Some(cancel_tx);
        
        // 更新任务计数
        let mut counter = self.task_counter.lock().unwrap();
        *counter += 1;
        drop(counter);
        
        // 将任务放入对应优先级的队列
        let mut queues = self.task_queues.lock().unwrap();
        let queue = queues.entry(task.config.priority).or_insert_with(VecDeque::new);
        
        // 将任务包装为Box<dyn Any>
        let task_box: Box<dyn Any + Send> = Box::new(task);
        let task_arc = Arc::new(Mutex::new(task_box));
        
        queue.push_back(task_arc);
        drop(queues);
        
        // 返回结果接收器
        result_rx
    }
    
    /// 获取调度器统计信息
    pub fn get_stats(&self) -> SchedulerStats {
        let task_count = *self.task_counter.lock().unwrap();
        let completed_count = *self.completed_counter.lock().unwrap();
        let failed_count = *self.failed_counter.lock().unwrap();
        
        let queues = self.task_queues.lock().unwrap();
        let mut queue_sizes = HashMap::new();
        
        for (&priority, queue) in queues.iter() {
            queue_sizes.insert(priority, queue.len());
        }
        
        SchedulerStats {
            worker_count: self.worker_count,
            task_count,
            completed_count,
            failed_count,
            pending_count: task_count - completed_count - failed_count,
            queue_sizes,
        }
    }
}

/// 调度器统计信息
#[derive(Debug, Clone)]
pub struct SchedulerStats {
    /// 工作线程数
    pub worker_count: usize,
    /// 总任务数
    pub task_count: usize,
    /// 已完成任务数
    pub completed_count: usize,
    /// 失败任务数
    pub failed_count: usize,
    /// 待处理任务数
    pub pending_count: usize,
    /// 各优先级队列大小
    pub queue_sizes: HashMap<TaskPriority, usize>,
}

/// 全局任务调度器
lazy_static::lazy_static! {
    pub static ref GLOBAL_TASK_SCHEDULER: TaskScheduler = {
        let scheduler = TaskScheduler::new(num_cpus::get());
        scheduler.start();
        scheduler
    };
}

/// 批处理任务执行器
pub struct BatchExecutor<T, R> {
    /// 输入数据
    data: Vec<T>,
    /// 批处理大小
    batch_size: usize,
    /// 处理函数
    processor: Box<dyn Fn(&[T]) -> Vec<R> + Send + Sync>,
}

impl<T: Send + Sync + 'static, R: Send + 'static> BatchExecutor<T, R> {
    /// 创建新的批处理执行器
    pub fn new<F>(data: Vec<T>, batch_size: usize, processor: F) -> Self
    where
        F: Fn(&[T]) -> Vec<R> + Send + Sync + 'static,
    {
        Self {
            data,
            batch_size,
            processor: Box::new(processor),
        }
    }
    
    /// 并行执行批处理
    pub fn execute_parallel(&self) -> Vec<R> {
        // 将数据分成批次
        let chunks: Vec<&[T]> = self.data.chunks(self.batch_size).collect();
        
        // 并行处理每个批次
        let results: Vec<Vec<R>> = chunks.par_iter()
            .map(|chunk| (self.processor)(*chunk))
            .collect();
        
        // 合并结果
        results.into_iter().flatten().collect()
    }
    
    /// 异步执行批处理
    pub async fn execute_async(&self) -> Vec<R> {
        // 将数据分成批次
        let chunks: Vec<&[T]> = self.data.chunks(self.batch_size).collect();
        let chunk_count = chunks.len();
        
        // 创建结果通道
        let (tx, mut rx) = mpsc::channel(chunk_count);
        
        // 为每个批次创建任务
        for (i, chunk) in chunks.into_iter().enumerate() {
            let processor = self.processor.clone();
            let chunk_data = chunk.to_vec();
            let tx = tx.clone();
            
            tokio::spawn(async move {
                // 处理批次
                let result = processor(&chunk_data);
                
                // 发送结果
                if let Err(e) = tx.send((i, result)).await {
                    eprintln!("Failed to send batch result: {}", e);
                }
            });
        }
        
        // 丢弃发送端，以便接收端可以正确关闭
        drop(tx);
        
        // 收集结果
        let mut batch_results = vec![Vec::new(); chunk_count];
        while let Some((idx, result)) = rx.recv().await {
            batch_results[idx] = result;
        }
        
        // 合并结果
        batch_results.into_iter().flatten().collect()
    }
}

/// 任务组，用于管理相关任务
pub struct TaskGroup {
    /// 任务组ID
    id: String,
    /// 任务组名称
    name: String,
    /// 任务列表
    tasks: Mutex<Vec<String>>,
    /// 是否已完成
    completed: Mutex<bool>,
}

impl TaskGroup {
    /// 创建新的任务组
    pub fn new(id: String, name: String) -> Self {
        Self {
            id,
            name,
            tasks: Mutex::new(Vec::new()),
            completed: Mutex::new(false),
        }
    }
    
    /// 添加任务到组
    pub fn add_task(&self, task_id: String) {
        let mut tasks = self.tasks.lock().unwrap();
        tasks.push(task_id);
    }
    
    /// 标记任务组为已完成
    pub fn mark_completed(&self) {
        let mut completed = self.completed.lock().unwrap();
        *completed = true;
    }
    
    /// 检查任务组是否已完成
    pub fn is_completed(&self) -> bool {
        *self.completed.lock().unwrap()
    }
    
    /// 获取任务组中的任务ID列表
    pub fn get_task_ids(&self) -> Vec<String> {
        self.tasks.lock().unwrap().clone()
    }
    
    /// 获取任务组ID
    pub fn get_id(&self) -> &str {
        &self.id
    }
    
    /// 获取任务组名称
    pub fn get_name(&self) -> &str {
        &self.name
    }
}

/// 任务组管理器
pub struct TaskGroupManager {
    /// 任务组映射
    groups: Mutex<HashMap<String, Arc<TaskGroup>>>,
}

impl TaskGroupManager {
    /// 创建新的任务组管理器
    pub fn new() -> Self {
        Self {
            groups: Mutex::new(HashMap::new()),
        }
    }
    
    /// 创建新的任务组
    pub fn create_group(&self, id: String, name: String) -> Arc<TaskGroup> {
        let group = Arc::new(TaskGroup::new(id.clone(), name));
        let mut groups = self.groups.lock().unwrap();
        groups.insert(id, group.clone());
        group
    }
    
    /// 获取任务组
    pub fn get_group(&self, id: &str) -> Option<Arc<TaskGroup>> {
        let groups = self.groups.lock().unwrap();
        groups.get(id).cloned()
    }
    
    /// 删除任务组
    pub fn remove_group(&self, id: &str) -> bool {
        let mut groups = self.groups.lock().unwrap();
        groups.remove(id).is_some()
    }
    
    /// 获取所有任务组
    pub fn get_all_groups(&self) -> Vec<Arc<TaskGroup>> {
        let groups = self.groups.lock().unwrap();
        groups.values().cloned().collect()
    }
}

/// 全局任务组管理器
lazy_static::lazy_static! {
    pub static ref GLOBAL_TASK_GROUP_MANAGER: TaskGroupManager = TaskGroupManager::new();
}

#[cfg(test)]
mod tests {
    use super::*;
    
    #[tokio::test]
    async fn test_batch_executor() {
        // 创建测试数据
        let data: Vec<i32> = (0..100).collect();
        
        // 创建批处理执行器
        let executor = BatchExecutor::new(data, 10, |batch| {
            batch.iter().map(|&x| x * 2).collect()
        });
        
        // 执行并行处理
        let results = executor.execute_parallel();
        
        // 验证结果
        assert_eq!(results.len(), 100);
        for (i, &result) in results.iter().enumerate() {
            assert_eq!(result, i as i32 * 2);
        }
    }
}