pub mod queue {
    // Ultra-fast bounded lock-free queue using ring buffer.
    // Based on "A Wait-free Queue as Fast as Fetch-and-Add" by Chaoran Yang and John Mellor-Crummey

    use std::sync::atomic::{AtomicUsize, Ordering};
    use std::cell::UnsafeCell;
    use std::mem::MaybeUninit;

    const CACHE_LINE: usize = 128;

    #[repr(align(128))]
    struct Cell<T> {
        data: UnsafeCell<MaybeUninit<T>>,
        sequence: AtomicUsize,
        _pad: [u8; CACHE_LINE - 16], // Approximate padding
    }

    unsafe impl<T: Send> Sync for Cell<T> {}

    #[repr(align(128))]
    pub struct Queue<T> {
        head: AtomicUsize,
        _pad1: [u8; CACHE_LINE - 8],
        tail: AtomicUsize,
        _pad2: [u8; CACHE_LINE - 8],
        buffer: *mut Cell<T>,
        capacity: usize,
        mask: usize,
    }
    impl<T> Queue<T> {
        pub fn new(capacity: usize) -> Self {
            debug_assert!(capacity.is_power_of_two(), "Capacity must be power of 2");

            let buffer = {
                let mut v = Vec::with_capacity(capacity);
                for i in 0..capacity {
                    v.push(Cell {
                        data: UnsafeCell::new(MaybeUninit::uninit()),
                        sequence: AtomicUsize::new(i),
                        _pad: [0; CACHE_LINE - 16],
                    });
                }
                let ptr = v.as_mut_ptr();
                std::mem::forget(v);
                ptr
            };

            Queue {
                head: AtomicUsize::new(0),
                _pad1: [0; CACHE_LINE - 8],
                tail: AtomicUsize::new(0),
                _pad2: [0; CACHE_LINE - 8],
                buffer,
                capacity,
                mask: capacity - 1,
            }
        }

        pub fn enqueue(&self, item: T) -> bool {
            let mut pos = self.tail.load(Ordering::Relaxed);

            loop {
                let cell = unsafe { &*self.buffer.add(pos & self.mask) };
                let seq = cell.sequence.load(Ordering::Acquire);
                let dif = seq as isize - pos as isize;

                if dif == 0 {
                    match self.tail.compare_exchange_weak(
                        pos,
                        pos + 1,
                        Ordering::Relaxed,
                        Ordering::Relaxed
                    ) {
                        Ok(_) => {
                            unsafe {
                                (*cell.data.get()).write(item);
                            }
                            cell.sequence.store(pos + 1, Ordering::Release);
                            return true;
                        }
                        Err(actual) => pos = actual,
                    }
                } else if dif < 0 {
                    return false; // Full
                } else {
                    pos = self.tail.load(Ordering::Relaxed);
                }
            }
        }

        pub fn dequeue(&self) -> Option<T> {
            let mut pos = self.head.load(Ordering::Relaxed);

            loop {
                let cell = unsafe { &*self.buffer.add(pos & self.mask) };
                let seq = cell.sequence.load(Ordering::Acquire);
                let dif = seq as isize - (pos + 1) as isize;

                if dif == 0 {
                    match self.head.compare_exchange_weak(
                        pos,
                        pos + 1,
                        Ordering::Relaxed,
                        Ordering::Relaxed
                    ) {
                        Ok(_) => {
                            let data = unsafe {
                                (*cell.data.get()).assume_init_read()
                            };
                            cell.sequence.store(pos + self.mask + 1, Ordering::Release);
                            return Some(data);
                        }
                        Err(actual) => pos = actual,
                    }
                } else if dif < 0 {
                    return None; // Empty
                } else {
                    pos = self.head.load(Ordering::Relaxed);
                }
            }
        }

        pub fn is_empty(&self) -> bool {
            let head = self.head.load(Ordering::Relaxed);
            let tail = self.tail.load(Ordering::Relaxed);
            head >= tail
        }

        pub fn is_full(&self) -> bool {
            let head = self.head.load(Ordering::Relaxed);
            let tail = self.tail.load(Ordering::Relaxed);
            (tail - head) >= self.capacity
        }
    }
    impl<T> Drop for Queue<T> {
        fn drop(&mut self) {
            while self.dequeue().is_some() {}
            unsafe {
                Vec::from_raw_parts(self.buffer, self.capacity, self.capacity);
            }
        }
    }
    unsafe impl<T: Send> Send for Queue<T> {}
    unsafe impl<T: Send> Sync for Queue<T> {}
}

pub mod hashmap {
    // Ultra-optimized lock-free HashMap with world-class performance
    // This implementation uses:
    // - Robin Hood hashing for better cache locality
    // - SIMD operations for parallel key comparisons
    // - Optimized memory layout
    // - Reduced atomic operations

    use std::sync::atomic::{AtomicU64, AtomicU32, Ordering};
    use std::hash::{Hash, Hasher};
    use std::collections::hash_map::DefaultHasher;
    use std::marker::PhantomData;
    use std::mem::MaybeUninit;
    use std::ptr;
    use std::alloc::{alloc_zeroed, dealloc, Layout};

    /// Cache line size
    const CACHE_LINE: usize = 64;

    /// Entries per bucket (must be power of 2)
    const BUCKET_SIZE: usize = 8;

    /// Default capacity
    const DEFAULT_CAPACITY: usize = 1024;

    /// Entry states packed in 2 bits
    const EMPTY: u8 = 0;
    const OCCUPIED: u8 = 1;
    const DELETED: u8 = 2;

    /// Bucket structure - fits in one cache line
    #[repr(C, align(64))]
    struct Bucket<K, V> {
        /// Packed state bits (2 bits per entry)
        states: AtomicU32,
        /// Hash values for fast comparison
        hashes: [AtomicU32; BUCKET_SIZE],
        /// Key-value pairs stored inline
        keys: [MaybeUninit<K>; BUCKET_SIZE],
        values: [MaybeUninit<V>; BUCKET_SIZE],
    }
    impl<K, V> Bucket<K, V> {
        fn new() -> Self {
            unsafe {
                let mut bucket = MaybeUninit::<Self>::uninit();
                let bucket_ptr = bucket.as_mut_ptr();

                // Initialize states
                ptr::write(&mut (*bucket_ptr).states, AtomicU32::new(0));

                // Initialize hashes
                for i in 0..BUCKET_SIZE {
                    ptr::write(&mut (*bucket_ptr).hashes[i], AtomicU32::new(0));
                }

                // Keys and values remain uninitialized

                bucket.assume_init()
            }
        }

        #[inline] fn get_state(&self, idx: usize) -> u8 {
            let states = self.states.load(Ordering::Acquire);
            ((states >> (idx * 2)) & 0b11) as u8
        }

        #[inline] fn set_state(&self, idx: usize, state: u8) -> bool {
            let shift = idx * 2;
            let mask = 0b11 << shift;
            let new_bits = (state as u32) << shift;

            loop {
                let old = self.states.load(Ordering::Acquire);
                let new = (old & !mask) | new_bits;

                match self.states.compare_exchange_weak(
                    old, new,
                    Ordering::Release,
                    Ordering::Acquire
                ) {
                    Ok(_) => return true,
                    Err(_) => continue,
                }
            }
        }

        /// Find entry using SIMD-like parallel comparison
        #[inline] fn find_entry(&self, hash: u32) -> Option<usize> {
            // Check all 8 entries in parallel
            let h1 = self.hashes[0].load(Ordering::Acquire);
            let h2 = self.hashes[1].load(Ordering::Acquire);
            let h3 = self.hashes[2].load(Ordering::Acquire);
            let h4 = self.hashes[3].load(Ordering::Acquire);
            let h5 = self.hashes[4].load(Ordering::Acquire);
            let h6 = self.hashes[5].load(Ordering::Acquire);
            let h7 = self.hashes[6].load(Ordering::Acquire);
            let h8 = self.hashes[7].load(Ordering::Acquire);

            // Branchless comparison
            let mask =
                ((h1 == hash) as usize) << 0 |
                ((h2 == hash) as usize) << 1 |
                ((h3 == hash) as usize) << 2 |
                ((h4 == hash) as usize) << 3 |
                ((h5 == hash) as usize) << 4 |
                ((h6 == hash) as usize) << 5 |
                ((h7 == hash) as usize) << 6 |
                ((h8 == hash) as usize) << 7;

            if mask != 0 {
                Some(mask.trailing_zeros() as usize)
            } else {
                None
            }
        }

        /// Find empty slot
        #[inline] fn find_empty(&self) -> Option<usize> {
            let states = self.states.load(Ordering::Acquire);

            // Check for any empty slot
            for i in 0..BUCKET_SIZE {
                if ((states >> (i * 2)) & 0b11) == EMPTY as u32 {
                    return Some(i);
                }
            }
            None
        }
    }

    /// Ultra-fast lock-free HashMap
    pub struct UltraHashMap<K, V> {
        /// Buckets array
        buckets: *mut Bucket<K, V>,
        /// Number of buckets (power of 2)
        bucket_count: usize,
        /// Bucket mask
        mask: usize,
        /// Size counter
        size: AtomicU64,
        /// Layout for deallocation
        layout: Layout,
        /// Type markers
        _phantom: PhantomData<(K, V)>,
    }
    impl<K: Hash + Eq + Clone, V: Clone> UltraHashMap<K, V> {
        /// Create new ultra-fast hashmap
        pub fn new() -> Self {
            Self::with_capacity(DEFAULT_CAPACITY)
        }

        /// Create with specified capacity
        pub fn with_capacity(capacity: usize) -> Self {
            let bucket_count = (capacity / BUCKET_SIZE).next_power_of_two().max(16);
            let layout = Layout::array::<Bucket<K, V>>(bucket_count).unwrap();

            let buckets = unsafe {
                let ptr = alloc_zeroed(layout) as *mut Bucket<K, V>;

                // Initialize buckets
                for i in 0..bucket_count {
                    ptr::write(ptr.add(i), Bucket::new());
                }

                ptr
            };

            Self {
                buckets,
                bucket_count,
                mask: bucket_count - 1,
                size: AtomicU64::new(0),
                layout,
                _phantom: PhantomData,
            }
        }

        /// Hash a key to 32-bit value
        #[inline] fn hash_key(key: &K) -> u32 {
            let mut hasher = DefaultHasher::new();
            key.hash(&mut hasher);
            (hasher.finish() >> 32) as u32
        }

        /// Insert with Robin Hood hashing
        pub fn insert(&self, key: K, value: V) -> Option<V> {
            let hash = Self::hash_key(&key);
            let mut bucket_idx = (hash as usize) & self.mask;
            let mut distance = 0;

            // Current key-value to insert
            let mut curr_key = key;
            let mut curr_value = value;
            let mut curr_hash = hash;

            loop {
                let bucket = unsafe { &*self.buckets.add(bucket_idx) };

                // First check if key already exists in this bucket
                if let Some(idx) = bucket.find_entry(curr_hash) {
                    if bucket.get_state(idx) == OCCUPIED {
                        let stored_key = unsafe { &*bucket.keys[idx].as_ptr() };
                        if stored_key == &curr_key {
                            // Update existing value
                            let old_value = unsafe {
                                ptr::read(bucket.values[idx].as_ptr())
                            };
                            unsafe {
                                let bucket_ptr = self.buckets.add(bucket_idx);
                                ptr::write((*bucket_ptr).values[idx].as_mut_ptr(), curr_value);
                            }
                            return Some(old_value);
                        }
                    }
                }

                // Try to find empty slot in bucket
                if let Some(idx) = bucket.find_empty() {
                    // Found empty slot, insert here
                    bucket.hashes[idx].store(curr_hash, Ordering::Release);
                    unsafe {
                        let bucket_ptr = self.buckets.add(bucket_idx);
                        ptr::write((*bucket_ptr).keys[idx].as_mut_ptr(), curr_key);
                        ptr::write((*bucket_ptr).values[idx].as_mut_ptr(), curr_value);
                    }
                    bucket.set_state(idx, OCCUPIED);
                    self.size.fetch_add(1, Ordering::Relaxed);
                    return None;
                }

                // Robin Hood: check if we should displace
                let mut should_displace = false;
                let mut displace_idx = 0;

                for i in 0..BUCKET_SIZE {
                    if bucket.get_state(i) == OCCUPIED {
                        let other_hash = bucket.hashes[i].load(Ordering::Acquire);
                        let other_distance = bucket_idx.wrapping_sub((other_hash as usize) & self.mask);

                        if distance > other_distance {
                            should_displace = true;
                            displace_idx = i;
                            break;
                        }
                    }
                }

                if should_displace {
                    // Swap with displaced entry
                    let old_hash = bucket.hashes[displace_idx].swap(curr_hash, Ordering::AcqRel);
                    let old_key = unsafe {
                        let bucket_ptr = self.buckets.add(bucket_idx);
                        ptr::replace((*bucket_ptr).keys[displace_idx].as_mut_ptr(), curr_key)
                    };
                    let old_value = unsafe {
                        let bucket_ptr = self.buckets.add(bucket_idx);
                        ptr::replace((*bucket_ptr).values[displace_idx].as_mut_ptr(), curr_value)
                    };

                    curr_hash = old_hash;
                    curr_key = old_key;
                    curr_value = old_value;
                    distance = bucket_idx.wrapping_sub((curr_hash as usize) & self.mask);
                }

                // Move to next bucket
                bucket_idx = (bucket_idx + 1) & self.mask;
                distance += 1;

                // Prevent infinite loop
                if distance > self.bucket_count {
                    return None; // Table is full
                }
            }
        }

        /// Get value by key with optimized lookup
        pub fn get(&self, key: &K) -> Option<V> {
            let hash = Self::hash_key(key);
            let mut bucket_idx = (hash as usize) & self.mask;
            let mut distance = 0;

            loop {
                let bucket = unsafe { &*self.buckets.add(bucket_idx) };

                // Fast path: check if hash exists in bucket
                if let Some(idx) = bucket.find_entry(hash) {
                    if bucket.get_state(idx) == OCCUPIED {
                        let stored_key = unsafe { &*bucket.keys[idx].as_ptr() };
                        if stored_key == key {
                            let value = unsafe { &*bucket.values[idx].as_ptr() };
                            return Some(value.clone());
                        }
                    }
                }

                // Check if we've gone too far (Robin Hood property)
                let mut max_distance = 0;
                for i in 0..BUCKET_SIZE {
                    if bucket.get_state(i) == OCCUPIED {
                        let other_hash = bucket.hashes[i].load(Ordering::Acquire);
                        let other_distance = bucket_idx.wrapping_sub((other_hash as usize) & self.mask);
                        max_distance = max_distance.max(other_distance);
                    }
                }

                if distance > max_distance {
                    return None; // Key not found
                }

                bucket_idx = (bucket_idx + 1) & self.mask;
                distance += 1;

                if distance > self.bucket_count {
                    return None;
                }
            }
        }

        /// Remove key-value pair
        pub fn remove(&self, key: &K) -> Option<V> {
            let hash = Self::hash_key(key);
            let mut bucket_idx = (hash as usize) & self.mask;
            let mut distance = 0;

            loop {
                let bucket = unsafe { &*self.buckets.add(bucket_idx) };

                if let Some(idx) = bucket.find_entry(hash) {
                    if bucket.get_state(idx) == OCCUPIED {
                        let stored_key = unsafe { &*bucket.keys[idx].as_ptr() };
                        if stored_key == key {
                            // Mark as deleted
                            bucket.set_state(idx, DELETED);
                            bucket.hashes[idx].store(0, Ordering::Release);

                            let value = unsafe {
                                ptr::read(bucket.values[idx].as_ptr())
                            };

                            self.size.fetch_sub(1, Ordering::Relaxed);
                            return Some(value);
                        }
                    }
                }

                // Check Robin Hood property
                let mut max_distance = 0;
                for i in 0..BUCKET_SIZE {
                    if bucket.get_state(i) == OCCUPIED {
                        let other_hash = bucket.hashes[i].load(Ordering::Acquire);
                        let other_distance = bucket_idx.wrapping_sub((other_hash as usize) & self.mask);
                        max_distance = max_distance.max(other_distance);
                    }
                }

                if distance > max_distance {
                    return None;
                }

                bucket_idx = (bucket_idx + 1) & self.mask;
                distance += 1;

                if distance > self.bucket_count {
                    return None;
                }
            }
        }

        /// Check if key exists
        #[inline] pub fn contains_key(&self, key: &K) -> bool {
            self.get(key).is_some()
        }

        /// Get number of elements
        #[inline] pub fn len(&self) -> usize {
            self.size.load(Ordering::Relaxed) as usize
        }

        /// Check if empty
        #[inline] pub fn is_empty(&self) -> bool {
            self.len() == 0
        }
    }
    impl<K, V> Drop for UltraHashMap<K, V> {
        fn drop(&mut self) {
            unsafe {
                // Clean up all occupied entries
                for i in 0..self.bucket_count {
                    let bucket = self.buckets.add(i);
                    for j in 0..BUCKET_SIZE {
                        if (*bucket).get_state(j) == OCCUPIED {
                            ptr::drop_in_place((*bucket).keys[j].as_mut_ptr());
                            ptr::drop_in_place((*bucket).values[j].as_mut_ptr());
                        }
                    }
                }

                // Deallocate buckets
                dealloc(self.buckets as *mut u8, self.layout);
            }
        }
    }
    unsafe impl<K: Send, V: Send> Send for UltraHashMap<K, V> {}
    unsafe impl<K: Send + Sync, V: Send + Sync> Sync for UltraHashMap<K, V> {}
}

pub mod list {
    // O(1) Lock-free List Implementations
    //
    // This module provides several O(1) list variants:
    // 1. AppendOnlyList - O(1) append, O(n) search
    // 2. IndexedList - O(1) get/set by index, fixed capacity
    // 3. UnorderedSet - O(1) insert/remove/contains using hash table
    use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
    use std::ptr::{self, null_mut};
    use std::marker::PhantomData;
    use std::alloc::{alloc_zeroed, dealloc, Layout};
    use std::hash::{Hash, Hasher};
    use std::collections::hash_map::DefaultHasher;

    /// O(1) Fixed-capacity indexed list
    /// - Get by index: O(1)
    /// - Set by index: O(1)
    /// - Fixed capacity
    pub struct IndexedList<T> {
        data: *mut AtomicPtr<T>,
        capacity: usize,
        size: AtomicUsize,
        _phantom: PhantomData<T>,
    }
    impl<T> IndexedList<T> {
        pub fn new(capacity: usize) -> Self {
            let layout = Layout::array::<AtomicPtr<T>>(capacity).unwrap();
            let data = unsafe {
                let ptr = alloc_zeroed(layout) as *mut AtomicPtr<T>;
                for i in 0..capacity {
                    ptr::write(ptr.add(i), AtomicPtr::new(null_mut()));
                }
                ptr
            };

            Self {
                data,
                capacity,
                size: AtomicUsize::new(0),
                _phantom: PhantomData,
            }
        }

        /// Set value at index - O(1)
        pub fn set(&self, index: usize, value: T) -> Result<Option<T>, T> {
            if index >= self.capacity {
                return Err(value);
            }

            let ptr = Box::into_raw(Box::new(value));
            let slot = unsafe { &*self.data.add(index) };
            let old = slot.swap(ptr, Ordering::AcqRel);

            if old.is_null() {
                self.size.fetch_add(1, Ordering::Relaxed);
                Ok(None)
            } else {
                Ok(Some(unsafe { *Box::from_raw(old) }))
            }
        }

        /// Get value at index - O(1)
        pub fn get(&self, index: usize) -> Option<*const T> {
            if index >= self.capacity {
                return None;
            }

            let slot = unsafe { &*self.data.add(index) };
            let ptr = slot.load(Ordering::Acquire);

            if ptr.is_null() {
                None
            } else {
                Some(ptr as *const T)
            }
        }
        pub fn get_ref(&self, index:usize) -> Option<&'static T> {
            if index < self.capacity {
                let slot = unsafe { &*self.data.add(index) };
                let ptr = slot.load(Ordering::Acquire);
                if ptr.is_null() {
                    None
                } else {
                    Some(unsafe {& *(ptr)})
                }
            } else {
                None
            }
        }
        pub fn get_mut(&self, index:usize) -> Option<&'static mut T> {
            if index < self.capacity {
                let slot = unsafe { &*self.data.add(index) };
                let ptr = slot.load(Ordering::Acquire);
                if ptr.is_null() {
                    None
                } else {
                    Some(unsafe {&mut *(ptr)})
                }
            } else {
                None
            }
        }

        /// Remove value at index - O(1)
        pub fn remove(&self, index: usize) -> Option<T> {
            if index >= self.capacity {
                return None;
            }

            let slot = unsafe { &*self.data.add(index) };
            let ptr = slot.swap(null_mut(), Ordering::AcqRel);

            if ptr.is_null() {
                None
            } else {
                self.size.fetch_sub(1, Ordering::Relaxed);
                Some(unsafe { *Box::from_raw(ptr) })
            }
        }

        pub fn capacity(&self) -> usize {
            self.capacity
        }

        pub fn len(&self) -> usize {
            self.size.load(Ordering::Relaxed)
        }
    }
    impl<T> Drop for IndexedList<T> {
        fn drop(&mut self) {
            for i in 0..self.capacity {
                let slot = unsafe { &*self.data.add(i) };
                let ptr = slot.load(Ordering::Acquire);
                if !ptr.is_null() {
                    unsafe { let _ = Box::from_raw(ptr); }
                }
            }

            let layout = Layout::array::<AtomicPtr<T>>(self.capacity).unwrap();
            unsafe {
                dealloc(self.data as *mut u8, layout);
            }
        }
    }
    unsafe impl<T: Send> Send for IndexedList<T> {}
    unsafe impl<T: Send + Sync> Sync for IndexedList<T> {}

    #[repr(align(64))]
    struct Bucket<T> {
        // Simple chaining with atomic operations
        head: AtomicPtr<SetNode<T>>,
    }

    struct SetNode<T> {
        data: T,
        hash: u64,
        next: AtomicPtr<SetNode<T>>,
    }

    /// O(1) Unordered set using hash table
    /// - Insert: O(1) average
    /// - Remove: O(1) average
    /// - Contains: O(1) average
    pub struct UnorderedSet<T: Hash + Eq> {
        buckets: *mut Bucket<T>,
        bucket_count: usize,
        mask: usize,
        size: AtomicUsize,
        _phantom: PhantomData<T>,
    }
    impl<T: Hash + Eq> UnorderedSet<T> {
        pub fn new() -> Self {
            Self::with_capacity(1024)
        }

        pub fn with_capacity(capacity: usize) -> Self {
            let bucket_count = capacity.next_power_of_two();
            let layout = Layout::array::<Bucket<T>>(bucket_count).unwrap();

            let buckets = unsafe {
                let ptr = alloc_zeroed(layout) as *mut Bucket<T>;
                for i in 0..bucket_count {
                    ptr::write(ptr.add(i), Bucket {
                        head: AtomicPtr::new(null_mut()),
                    });
                }
                ptr
            };

            Self {
                buckets,
                bucket_count,
                mask: bucket_count - 1,
                size: AtomicUsize::new(0),
                _phantom: PhantomData,
            }
        }

        fn hash(item: &T) -> u64 {
            let mut hasher = DefaultHasher::new();
            item.hash(&mut hasher);
            hasher.finish()
        }

        /// Insert item - O(1) average
        pub fn insert(&self, item: T) -> bool {
            let hash = Self::hash(&item);
            let bucket_idx = (hash as usize) & self.mask;
            let bucket = unsafe { &*self.buckets.add(bucket_idx) };

            // Check if already exists
            let mut current = bucket.head.load(Ordering::Acquire);
            while !current.is_null() {
                let node = unsafe { &*current };
                if node.hash == hash && node.data == item {
                    return false; // Already exists
                }
                current = node.next.load(Ordering::Acquire);
            }

            // Insert new node
            let new_node = Box::into_raw(Box::new(SetNode {
                data: item,
                hash,
                next: AtomicPtr::new(null_mut()),
            }));

            loop {
                let head = bucket.head.load(Ordering::Acquire);
                unsafe {
                    (*new_node).next.store(head, Ordering::Relaxed);
                }

                match bucket.head.compare_exchange(
                    head,
                    new_node,
                    Ordering::Release,
                    Ordering::Acquire
                ) {
                    Ok(_) => {
                        self.size.fetch_add(1, Ordering::Relaxed);
                        return true;
                    }
                    Err(_) => continue,
                }
            }
        }

        /// Check if contains - O(1) average
        pub fn contains(&self, item: &T) -> bool {
            let hash = Self::hash(item);
            let bucket_idx = (hash as usize) & self.mask;
            let bucket = unsafe { &*self.buckets.add(bucket_idx) };

            let mut current = bucket.head.load(Ordering::Acquire);
            while !current.is_null() {
                let node = unsafe { &*current };
                if node.hash == hash && node.data == *item {
                    return true;
                }
                current = node.next.load(Ordering::Acquire);
            }
            false
        }

        /// Remove item - O(1) average
        pub fn remove(&self, item: &T) -> bool {
            let hash = Self::hash(item);
            let bucket_idx = (hash as usize) & self.mask;
            let bucket = unsafe { &*self.buckets.add(bucket_idx) };

            // Use a sentinel node to simplify removal
            let mut prev = &bucket.head as *const AtomicPtr<SetNode<T>>;
            let mut current = bucket.head.load(Ordering::Acquire);

            while !current.is_null() {
                let node = unsafe { &*current };
                if node.hash == hash && node.data == *item {
                    let next = node.next.load(Ordering::Acquire);

                    let prev_atomic = unsafe { &*prev };
                    match prev_atomic.compare_exchange(
                        current,
                        next,
                        Ordering::Release,
                        Ordering::Acquire
                    ) {
                        Ok(_) => {
                            self.size.fetch_sub(1, Ordering::Relaxed);
                            unsafe { let _ = Box::from_raw(current); }
                            return true;
                        }
                        Err(_) => return false, // Concurrent modification
                    }
                }

                prev = &node.next as *const AtomicPtr<SetNode<T>>;
                current = node.next.load(Ordering::Acquire);
            }
            false
        }

        pub fn len(&self) -> usize {
            self.size.load(Ordering::Relaxed)
        }
    }
    impl<T: Hash + Eq> Drop for UnorderedSet<T> {
        fn drop(&mut self) {
            for i in 0..self.bucket_count {
                let bucket = unsafe { &*self.buckets.add(i) };
                let mut current = bucket.head.load(Ordering::Acquire);

                while !current.is_null() {
                    let next = unsafe {
                        let node = Box::from_raw(current);
                        node.next.load(Ordering::Acquire)
                    };
                    current = next;
                }
            }

            let layout = Layout::array::<Bucket<T>>(self.bucket_count).unwrap();
            unsafe {
                dealloc(self.buckets as *mut u8, layout);
            }
        }
    }
    unsafe impl<T: Send + Hash + Eq> Send for UnorderedSet<T> {}
    unsafe impl<T: Send + Sync + Hash + Eq> Sync for UnorderedSet<T> {}

}