use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use std::ptr;
use std::thread;
use std::time::Duration;
use std::hash::Hasher;
use std::collections::hash_map::DefaultHasher;
use std::collections::VecDeque;

/// Pipit原生并发引擎
pub struct ConcurrentEngine {
    /// 使用标准库的同步队列替换无锁队列
    task_queue: Arc<Mutex<VecDeque<Box<dyn Fn() + Send + 'static>>>>,
    /// 工作线程池
    worker_threads: Vec<thread::JoinHandle<()>>,
    /// 运行状态
    running: Arc<AtomicBool>,
    /// 线程数量
    thread_count: usize,
}

/// 并发安全的数据容器
pub struct ConcurrentData<T> {
    data: AtomicPtr<T>,
    version: AtomicUsize,
}

/// 并发哈希表
pub struct ConcurrentHashMap<K, V> {
    buckets: Vec<AtomicPtr<HashEntry<K, V>>>,
    len: AtomicUsize,
}

/// 哈希表条目
struct HashEntry<K, V> {
    key: K,
    value: V,
    next: AtomicPtr<HashEntry<K, V>>,
}

/// 并发计数器
pub struct ConcurrentCounter {
    value: AtomicUsize,
}

/// 并发标志
pub struct ConcurrentFlag {
    flag: AtomicBool,
}

impl ConcurrentEngine {
    /// 创建新的并发引擎
    pub fn new(thread_count: usize) -> Self {
        let running = Arc::new(AtomicBool::new(true));
        let task_queue = Arc::new(Mutex::new(VecDeque::<Box<dyn Fn() + Send + 'static>>::new()));
        let mut worker_threads = Vec::new();
        
        // 启动工作线程
        for _ in 0..thread_count {
            let queue = task_queue.clone();
            let running_ref = running.clone();
            
            let handle = thread::spawn(move || {
                while running_ref.load(Ordering::Relaxed) {
                    let task = {
                        let mut queue = queue.lock().unwrap();
                        queue.pop_front()
                    };
                    
                    if let Some(task) = task {
                        // 添加异常处理，捕获任务执行时的panic
                        if let Err(panic_info) = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
                            task();
                        })) {
                            // 打印panic信息，但不终止线程
                            if let Some(msg) = panic_info.downcast_ref::<&str>() {
                                println!("Task panicked with message: {}", msg);
                            } else if let Some(msg) = panic_info.downcast_ref::<String>() {
                                println!("Task panicked with message: {}", msg);
                            } else {
                                println!("Task panicked with unknown error");
                            }
                        }
                    } else {
                        thread::yield_now();
                    }
                }
            });
            
            worker_threads.push(handle);
        }
        
        ConcurrentEngine {
            task_queue,
            worker_threads,
            running,
            thread_count: thread_count, // 存储线程数量用于调试或统计
        }
    }
    
    /// 提交任务
    pub fn submit_task<F>(&self, task: F)
    where
        F: Fn() + Send + 'static,
    {
        let mut queue = self.task_queue.lock().unwrap();
        queue.push_back(Box::new(task));
    }
    
    /// 等待所有任务完成
    pub fn wait_for_completion(&self) {
        loop {
            let queue = self.task_queue.lock().unwrap();
            if queue.is_empty() {
                break;
            }
            drop(queue);
            thread::sleep(Duration::from_millis(1));
        }
    }
    
    /// 停止引擎
    pub fn shutdown(self) {
        self.running.store(false, Ordering::Relaxed);
        for handle in self.worker_threads {
            handle.join().unwrap();
        }
    }
    
    /// 获取线程数量
    pub fn thread_count(&self) -> usize {
        self.thread_count
    }
}

impl<T> ConcurrentData<T> {
    pub fn new(value: T) -> Self {
        let boxed = Box::into_raw(Box::new(value));
        ConcurrentData {
            data: AtomicPtr::new(boxed),
            version: AtomicUsize::new(0),
        }
    }
    
    pub fn get(&self) -> T
    where
        T: Clone,
    {
        let ptr = self.data.load(Ordering::Acquire);
        unsafe { &*ptr }.clone()
    }
    
    pub fn update<F>(&self, f: F)
    where
        F: FnOnce(&mut T),
    {
        let ptr = self.data.load(Ordering::Acquire);
        unsafe {
            f(&mut *ptr);
        }
        self.version.fetch_add(1, Ordering::Release);
    }
}

impl<T> Clone for ConcurrentData<T> {
    fn clone(&self) -> Self {
        ConcurrentData {
            data: AtomicPtr::new(self.data.load(Ordering::Acquire)),
            version: AtomicUsize::new(self.version.load(Ordering::Acquire)),
        }
    }
}

impl<K, V> ConcurrentHashMap<K, V> {
    pub fn new() -> Self {
        let mut buckets = Vec::new();
        for _ in 0..16 {
            buckets.push(AtomicPtr::new(ptr::null_mut()));
        }
        
        ConcurrentHashMap {
            buckets,
            len: AtomicUsize::new(0),
        }
    }
    
    pub fn insert(&self, key: K, value: V)
    where
        K: std::hash::Hash + Eq + Clone,
    {
        let bucket = self.get_bucket(&key);
        let new_entry = Box::into_raw(Box::new(HashEntry {
            key: key.clone(),
            value,
            next: AtomicPtr::new(ptr::null_mut()),
        }));
        
        loop {
            let head = self.buckets[bucket].load(Ordering::Acquire);
            unsafe {
                (*new_entry).next.store(head, Ordering::Release);
            }
            
            if self.buckets[bucket].compare_exchange(
                head,
                new_entry,
                Ordering::Release,
                Ordering::Relaxed,
            ).is_ok() {
                self.len.fetch_add(1, Ordering::Relaxed);
                break;
            }
        }
    }
    
    pub fn get(&self, key: &K) -> Option<&V>
    where
        K: std::hash::Hash + Eq,
    {
        let bucket = self.get_bucket(key);
        let mut current = self.buckets[bucket].load(Ordering::Acquire);
        
        while !current.is_null() {
            unsafe {
                let entry = &*current;
                if &entry.key == key {
                    return Some(&entry.value);
                }
                current = entry.next.load(Ordering::Acquire);
            }
        }
        
        None
    }
    
    fn get_bucket(&self, key: &K) -> usize
    where
        K: std::hash::Hash,
    {
        let mut hasher = DefaultHasher::new();
        key.hash(&mut hasher);
        (hasher.finish() as usize) % self.buckets.len()
    }
}

impl<K, V> Clone for ConcurrentHashMap<K, V> {
    fn clone(&self) -> Self {
        ConcurrentHashMap::new()
    }
}

impl ConcurrentCounter {
    pub fn new(initial: usize) -> Self {
        ConcurrentCounter {
            value: AtomicUsize::new(initial),
        }
    }
    
    pub fn increment(&self) {
        self.value.fetch_add(1, Ordering::Relaxed);
    }
    
    pub fn decrement(&self) {
        self.value.fetch_sub(1, Ordering::Relaxed);
    }
    
    pub fn get(&self) -> usize {
        self.value.load(Ordering::Acquire)
    }
}

impl Clone for ConcurrentCounter {
    fn clone(&self) -> Self {
        ConcurrentCounter {
            value: AtomicUsize::new(self.value.load(Ordering::Acquire)),
        }
    }
}

impl ConcurrentFlag {
    pub fn new(initial: bool) -> Self {
        ConcurrentFlag {
            flag: AtomicBool::new(initial),
        }
    }
    
    pub fn set(&self, value: bool) {
        self.flag.store(value, Ordering::Release);
    }
    
    pub fn get(&self) -> bool {
        self.flag.load(Ordering::Acquire)
    }
}

impl Clone for ConcurrentFlag {
    fn clone(&self) -> Self {
        ConcurrentFlag {
            flag: AtomicBool::new(self.flag.load(Ordering::Acquire)),
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use std::sync::Arc;
    
    #[test]
    fn test_concurrent_engine() {
        let engine = ConcurrentEngine::new(4);
        let counter = Arc::new(ConcurrentCounter::new(0));
        
        // 提交1000个任务
        for i in 0..1000 {
            let counter = counter.clone();
            engine.submit_task(move || {
                counter.increment();
            });
        }
        
        // 等待所有任务完成，给足够的时间
        let start_time = std::time::Instant::now();
        let timeout = std::time::Duration::from_secs(5);
        
        // 我们不仅要检查任务队列是否为空，还要确保计数器达到预期值
        while {
            let queue = engine.task_queue.lock().unwrap();
            !queue.is_empty()
        } || counter.get() < 1000 {
            thread::sleep(Duration::from_millis(10));
            if start_time.elapsed() > timeout {
                let queue_size = {
                    let queue = engine.task_queue.lock().unwrap();
                    queue.len()
                };
                panic!("Test timed out. Tasks remaining: {}, Current count: {}", 
                       queue_size, 
                       counter.get());
            }
        }
        
        // 再给一点时间确保所有任务都执行完成
        thread::sleep(Duration::from_millis(100));
        
        assert_eq!(counter.get(), 1000);
        engine.shutdown();
    }
    
    #[test]
    fn test_concurrent_data() {
        let data = ConcurrentData::new(42);
        assert_eq!(data.get(), 42);
        
        data.update(|x| *x += 10);
        assert_eq!(data.get(), 52);
    }
    
    #[test]
    fn test_concurrent_hashmap() {
        let map = ConcurrentHashMap::new();
        map.insert(1, "hello");
        map.insert(2, "world");
        
        assert_eq!(map.get(&1), Some(&"hello"));
        assert_eq!(map.get(&2), Some(&"world"));
        assert_eq!(map.get(&3), None);
    }
    
    #[test]
    fn test_concurrent_counter() {
        let counter = ConcurrentCounter::new(0);
        counter.increment();
        counter.increment();
        counter.decrement();
        
        assert_eq!(counter.get(), 1);
    }
    
    #[test]
    fn test_concurrent_flag() {
        let flag = ConcurrentFlag::new(false);
        flag.set(true);
        
        assert!(flag.get());
    }
}