use std::sync::{Arc, Mutex};
use std::thread;
use std::collections::VecDeque;
use std::sync::mpsc::{channel, Sender, Receiver};

/// 任务优先级
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum TaskPriority {
    Low = 0,
    Normal = 1,
    High = 2,
    Critical = 3,
}

/// 任务状态
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TaskStatus {
    Pending,
    Running,
    Completed,
    Failed,
    Cancelled,
}

/// 任务结果
pub type TaskResult<T> = Result<T, String>;

/// 任务接口
pub trait Task: Send {
    type Output: Send;
    
    /// 执行任务
    fn execute(&self) -> TaskResult<Self::Output>;
    
    /// 获取任务优先级
    fn priority(&self) -> TaskPriority {
        TaskPriority::Normal
    }
    
    /// 获取任务ID
    fn id(&self) -> u64;
    
    /// 获取任务描述
    fn description(&self) -> String {
        format!("Task {}", self.id())
    }
}

/// 任务包装器
struct TaskWrapper {
    id: u64,
    priority: TaskPriority,
    task: Box<dyn Task<Output = Box<dyn std::any::Any + Send>> + Send>,
    status: TaskStatus,
}

impl PartialEq for TaskWrapper {
    fn eq(&self, other: &Self) -> bool {
        self.id == other.id
    }
}

impl Eq for TaskWrapper {}

impl PartialOrd for TaskWrapper {
    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
        Some(self.cmp(other))
    }
}

impl Ord for TaskWrapper {
    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
        // 首先按优先级排序（高优先级在前）
        match other.priority.cmp(&self.priority) {
            std::cmp::Ordering::Equal => {
                // 优先级相同时按ID排序（低ID在前，即先提交的任务先执行）
                self.id.cmp(&other.id)
            }
            ordering => ordering,
        }
    }
}

/// 任务调度器消息
enum SchedulerMessage {
    NewTask(TaskWrapper),
    CancelTask(u64),
    Shutdown,
}

/// 并行任务调度器
pub struct ParallelTaskScheduler {
    task_sender: Sender<SchedulerMessage>,
    next_task_id: Arc<Mutex<u64>>,
    thread_count: usize,
}

impl ParallelTaskScheduler {
    /// 创建新的任务调度器
    pub fn new(thread_count: usize) -> Self {
        let (task_sender, task_receiver) = channel();
        let task_receiver = Arc::new(Mutex::new(task_receiver));
        let next_task_id = Arc::new(Mutex::new(0));
        
        // 启动工作线程
        for i in 0..thread_count {
            let receiver = Arc::clone(&task_receiver);
            thread::spawn(move || {
                Self::worker_thread(i, receiver);
            });
        }
        
        ParallelTaskScheduler {
            task_sender,
            next_task_id,
            thread_count,
        }
    }
    
    /// 提交任务
    pub fn submit<T: Task + 'static>(&self, task: T) -> u64 {
        let mut id_guard = self.next_task_id.lock().unwrap();
        let task_id = *id_guard;
        *id_guard += 1;
        drop(id_guard);
        
        // 包装任务
        let wrapper = TaskWrapper {
            id: task_id,
            priority: task.priority(),
            task: Box::new(TaskAdapter::new(task)),
            status: TaskStatus::Pending,
        };
        
        // 发送任务到队列
        self.task_sender.send(SchedulerMessage::NewTask(wrapper)).unwrap();
        
        task_id
    }
    
    /// 取消任务
    pub fn cancel_task(&self, task_id: u64) {
        self.task_sender.send(SchedulerMessage::CancelTask(task_id)).unwrap();
    }
    
    /// 关闭调度器
    pub fn shutdown(&self) {
        self.task_sender.send(SchedulerMessage::Shutdown).unwrap();
    }
    
    /// 获取线程数量
    pub fn thread_count(&self) -> usize {
        self.thread_count
    }
    
    /// 工作线程函数
    fn worker_thread(id: usize, receiver: Arc<Mutex<Receiver<SchedulerMessage>>>) {
        println!("Worker thread {} started", id);
        
        let mut pending_tasks: std::collections::BinaryHeap<std::cmp::Reverse<TaskWrapper>> = std::collections::BinaryHeap::new();
        let mut should_shutdown = false;
        
        while !should_shutdown {
            // 接收新任务或命令
            let msg = {
                let receiver = receiver.lock().unwrap();
                match receiver.recv() {
                    Ok(msg) => msg,
                    Err(_) => break,
                }
            };
            
            match msg {
                SchedulerMessage::NewTask(task) => {
                    pending_tasks.push(std::cmp::Reverse(task));
                }
                SchedulerMessage::CancelTask(task_id) => {
                    // 从待处理任务中移除
                    pending_tasks.retain(|task| task.0.id != task_id);
                }
                SchedulerMessage::Shutdown => {
                    should_shutdown = true;
                }
            }
            
            // 处理一个任务
            if !pending_tasks.is_empty() {
                let task = pending_tasks.pop().unwrap().0;
                println!("Thread {} executing task {}: {}", id, task.id, task.task.description());
                
                let result = task.task.execute();
                match result {
                    Ok(output) => {
                        println!("Task {} completed successfully", task.id);
                    }
                    Err(err) => {
                        println!("Task {} failed: {}", task.id, err);
                    }
                }
            }
        }
        
        println!("Worker thread {} shutting down", id);
    }
}

/// 任务适配器，将具体任务类型转换为通用任务类型
struct TaskAdapter<T: Task> {
    task: T,
}

impl<T: Task + 'static> TaskAdapter<T> {
    fn new(task: T) -> Self {
        TaskAdapter { task }
    }
}

impl<T: Task + 'static> Task for TaskAdapter<T> {
    type Output = Box<dyn std::any::Any + Send>;
    
    fn execute(&self) -> TaskResult<Self::Output> {
        match self.task.execute() {
            Ok(result) => Ok(Box::new(result) as Box<dyn std::any::Any + Send>),
            Err(err) => Err(err),
        }
    }
    
    fn priority(&self) -> TaskPriority {
        self.task.priority()
    }
    
    fn id(&self) -> u64 {
        self.task.id()
    }
    
    fn description(&self) -> String {
        self.task.description()
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use std::sync::atomic::{AtomicUsize, Ordering};
    use std::time::Duration;
    
    struct TestTask {
        id: u64,
        priority: TaskPriority,
        work_time_ms: u64,
        counter: Arc<AtomicUsize>,
    }
    
    impl Task for TestTask {
        type Output = usize;
        
        fn execute(&self) -> TaskResult<Self::Output> {
            // 模拟工作
            thread::sleep(Duration::from_millis(self.work_time_ms));
            
            // 增加计数器
            let prev = self.counter.fetch_add(1, Ordering::SeqCst);
            
            Ok(prev + 1)
        }
        
        fn priority(&self) -> TaskPriority {
            self.priority
        }
        
        fn id(&self) -> u64 {
            self.id
        }
        
        fn description(&self) -> String {
            format!("Test Task {} (priority: {:?}, work: {}ms)", self.id, self.priority, self.work_time_ms)
        }
    }
    
    #[test]
    fn test_task_scheduler() {
        let scheduler = ParallelTaskScheduler::new(4);
        let counter = Arc::new(AtomicUsize::new(0));
        
        // 提交一些任务
        for i in 0..10 {
            let priority = match i % 3 {
                0 => TaskPriority::Low,
                1 => TaskPriority::Normal,
                _ => TaskPriority::High,
            };
            
            let task = TestTask {
                id: i,
                priority,
                work_time_ms: 10,
                counter: Arc::clone(&counter),
            };
            
            scheduler.submit(task);
        }
        
        // 等待任务完成
        thread::sleep(Duration::from_millis(500));
        
        // 验证所有任务都已执行
        assert_eq!(counter.load(Ordering::SeqCst), 10);
        
        scheduler.shutdown();
    }
}