#include "bw_tree.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>

// Atomic memory ordering for all CAS operations
#define MEMORY_ORDER_ACQUIRE memory_order_acquire
#define MEMORY_ORDER_RELEASE memory_order_release
#define MEMORY_ORDER_ACQ_REL memory_order_acq_rel
#define MEMORY_ORDER_RELAXED memory_order_relaxed

// Utility macros
#define BW_ALIGN_SIZE(size, alignment) (((size) + (alignment) - 1) & ~((alignment) - 1))
#define BW_MIN(a, b) ((a) < (b) ? (a) : (b))
#define BW_MAX(a, b) ((a) > (b) ? (a) : (b))

// Global epoch counter for garbage collection
static atomic_ulong global_epoch = ATOMIC_VAR_INIT(0);

// =============================================================================
// Key and Value Utilities
// =============================================================================

int bw_key_compare(const bw_key_t* key1, const bw_key_t* key2) {
    if (!key1 || !key2) return 0;
    return bw_key_compare_raw(key1->data, key1->size, key2->data, key2->size);
}

int bw_key_compare_raw(const void* key1, size_t key1_size, 
                      const void* key2, size_t key2_size) {
    if (!key1 && !key2) return 0;
    if (!key1) return -1;
    if (!key2) return 1;
    
    size_t min_size = BW_MIN(key1_size, key2_size);
    int result = memcmp(key1, key2, min_size);
    
    if (result == 0) {
        if (key1_size < key2_size) return -1;
        if (key1_size > key2_size) return 1;
        return 0;
    }
    
    return result;
}

bw_key_t* bw_key_create(memory_manager_t* memory_mgr, const void* key, size_t key_size) {
    if (!memory_mgr || !key || key_size == 0 || key_size > BW_TREE_MAX_KEY_SIZE) {
        return NULL;
    }
    
    size_t alloc_size = sizeof(bw_key_t) + key_size;
    bw_key_t* bw_key = mm_alloc(memory_mgr, alloc_size, MEM_TYPE_INDEX_BUFFER);
    if (!bw_key) return NULL;
    
    bw_key->size = key_size;
    memcpy(bw_key->data, key, key_size);
    
    return bw_key;
}

bw_value_t* bw_value_create(memory_manager_t* memory_mgr, const void* value, size_t value_size) {
    if (!memory_mgr || !value || value_size == 0 || value_size > BW_TREE_MAX_VALUE_SIZE) {
        return NULL;
    }
    
    size_t alloc_size = sizeof(bw_value_t) + value_size;
    bw_value_t* bw_value = mm_alloc(memory_mgr, alloc_size, MEM_TYPE_INDEX_BUFFER);
    if (!bw_value) return NULL;
    
    bw_value->size = value_size;
    memcpy(bw_value->data, value, value_size);
    
    return bw_value;
}

void bw_key_destroy(bw_key_t* key, memory_manager_t* memory_mgr) {
    if (key && memory_mgr) {
        mm_free(memory_mgr, key);
    }
}

void bw_value_destroy(bw_value_t* value, memory_manager_t* memory_mgr) {
    if (value && memory_mgr) {
        mm_free(memory_mgr, value);
    }
}

// =============================================================================
// Mapping Table Implementation
// =============================================================================

mapping_table_t* mapping_table_create(size_t initial_capacity) {
    if (initial_capacity == 0) {
        initial_capacity = BW_TREE_MAPPING_TABLE_SIZE;
    }
    
    mapping_table_t* table = malloc(sizeof(mapping_table_t));
    if (!table) return NULL;
    
    table->entries = calloc(initial_capacity, sizeof(mapping_entry_t));
    if (!table->entries) {
        free(table);
        return NULL;
    }
    
    table->capacity = initial_capacity;
    table->size = 0;
    atomic_init(&table->next_page_id, 1); // Start from page ID 1 (0 is invalid)
    
    if (pthread_rwlock_init(&table->resize_lock, NULL) != 0) {
        free(table->entries);
        free(table);
        return NULL;
    }
    
    return table;
}

void mapping_table_destroy(mapping_table_t* table) {
    if (!table) return;
    
    pthread_rwlock_destroy(&table->resize_lock);
    
    // Clean up all pages
    for (size_t i = 0; i < table->capacity; i++) {
        bw_page_t* page = atomic_load(&table->entries[i].page);
        if (page) {
            pthread_mutex_destroy(&page->mutex);
            free(page);
        }
    }
    
    free(table->entries);
    free(table);
}

page_id_t mapping_table_allocate_page(mapping_table_t* table) {
    if (!table) return 0;
    
    page_id_t page_id = atomic_fetch_add(&table->next_page_id, 1);
    
    pthread_rwlock_rdlock(&table->resize_lock);
    
    // Resize table if needed
    if (page_id >= table->capacity) {
        pthread_rwlock_unlock(&table->resize_lock);
        pthread_rwlock_wrlock(&table->resize_lock);
        
        // Double check after acquiring write lock
        if (page_id >= table->capacity) {
            size_t new_capacity = table->capacity * 2;
            mapping_entry_t* new_entries = realloc(table->entries, 
                                                  new_capacity * sizeof(mapping_entry_t));
            if (new_entries) {
                // Initialize new entries
                memset(new_entries + table->capacity, 0, 
                      (new_capacity - table->capacity) * sizeof(mapping_entry_t));
                table->entries = new_entries;
                table->capacity = new_capacity;
            }
        }
        
        pthread_rwlock_unlock(&table->resize_lock);
        pthread_rwlock_rdlock(&table->resize_lock);
    }
    
    // Create physical page
    bw_page_t* page = malloc(sizeof(bw_page_t));
    if (page) {
        page->page_id = page_id;
        atomic_init(&page->node_ptr, NULL);
        atomic_init(&page->ref_count, 1);
        atomic_init(&page->is_deleted, false);
        page->last_access_epoch = atomic_load(&global_epoch);
        
        if (pthread_mutex_init(&page->mutex, NULL) == 0) {
            atomic_store(&table->entries[page_id].page, page);
            atomic_store(&table->entries[page_id].version, 0);
            table->size++;
        } else {
            free(page);
            page_id = 0;
        }
    } else {
        page_id = 0;
    }
    
    pthread_rwlock_unlock(&table->resize_lock);
    return page_id;
}

bw_page_t* mapping_table_get_page(mapping_table_t* table, page_id_t page_id) {
    if (!table || page_id == 0 || page_id >= table->capacity) {
        return NULL;
    }
    
    pthread_rwlock_rdlock(&table->resize_lock);
    bw_page_t* page = atomic_load(&table->entries[page_id].page);
    if (page) {
        atomic_fetch_add(&page->ref_count, 1);
        page->last_access_epoch = atomic_load(&global_epoch);
    }
    pthread_rwlock_unlock(&table->resize_lock);
    
    return page;
}

bool mapping_table_cas_page(mapping_table_t* table, page_id_t page_id,
                           bw_node_t* expected, bw_node_t* desired) {
    if (!table || page_id == 0 || page_id >= table->capacity) {
        return false;
    }
    
    pthread_rwlock_rdlock(&table->resize_lock);
    bw_page_t* page = atomic_load(&table->entries[page_id].page);
    if (!page) {
        pthread_rwlock_unlock(&table->resize_lock);
        return false;
    }
    
    bool success = atomic_compare_exchange_strong(&page->node_ptr, &expected, desired);
    if (success) {
        atomic_fetch_add(&table->entries[page_id].version, 1);
        page->last_access_epoch = atomic_load(&global_epoch);
    }
    
    pthread_rwlock_unlock(&table->resize_lock);
    return success;
}

// =============================================================================
// Garbage Collection Implementation
// =============================================================================

gc_context_t* gc_context_create(void) {
    gc_context_t* gc_context = malloc(sizeof(gc_context_t));
    if (!gc_context) return NULL;
    
    gc_context->current_epoch = atomic_load(&global_epoch);
    atomic_init(&gc_context->active_threads, 0);
    gc_context->gc_running = false;
    gc_context->gc_queue_head = NULL;
    gc_context->gc_queue_tail = NULL;
    
    if (pthread_mutex_init(&gc_context->epoch_mutex, NULL) != 0 ||
        pthread_cond_init(&gc_context->epoch_cond, NULL) != 0 ||
        pthread_mutex_init(&gc_context->gc_queue_mutex, NULL) != 0) {
        free(gc_context);
        return NULL;
    }
    
    return gc_context;
}

void gc_context_destroy(gc_context_t* gc_context) {
    if (!gc_context) return;
    
    // Stop GC thread if running
    if (gc_context->gc_running) {
        gc_context->gc_running = false;
        pthread_cond_signal(&gc_context->epoch_cond);
        pthread_join(gc_context->gc_thread, NULL);
    }
    
    // Clean up remaining nodes in GC queue
    pthread_mutex_lock(&gc_context->gc_queue_mutex);
    struct gc_node* current = gc_context->gc_queue_head;
    while (current) {
        struct gc_node* next = current->next;
        free(current->node);
        free(current);
        current = next;
    }
    pthread_mutex_unlock(&gc_context->gc_queue_mutex);
    
    pthread_mutex_destroy(&gc_context->epoch_mutex);
    pthread_cond_destroy(&gc_context->epoch_cond);
    pthread_mutex_destroy(&gc_context->gc_queue_mutex);
    
    free(gc_context);
}

epoch_t gc_enter_epoch(gc_context_t* gc_context) {
    if (!gc_context) return 0;
    
    atomic_fetch_add(&gc_context->active_threads, 1);
    return atomic_load(&global_epoch);
}

void gc_exit_epoch(gc_context_t* gc_context) {
    if (!gc_context) return;
    
    uint32_t remaining = atomic_fetch_sub(&gc_context->active_threads, 1) - 1;
    
    // If no more active threads, advance epoch
    if (remaining == 0) {
        pthread_mutex_lock(&gc_context->epoch_mutex);
        gc_context->current_epoch = atomic_fetch_add(&global_epoch, 1) + 1;
        pthread_cond_signal(&gc_context->epoch_cond);
        pthread_mutex_unlock(&gc_context->epoch_mutex);
    }
}

void gc_defer_delete(gc_context_t* gc_context, bw_node_t* node) {
    if (!gc_context || !node) return;
    
    struct gc_node* gc_node = malloc(sizeof(struct gc_node));
    if (!gc_node) return;
    
    gc_node->node = node;
    gc_node->delete_epoch = atomic_load(&global_epoch);
    gc_node->next = NULL;
    
    pthread_mutex_lock(&gc_context->gc_queue_mutex);
    if (gc_context->gc_queue_tail) {
        gc_context->gc_queue_tail->next = gc_node;
    } else {
        gc_context->gc_queue_head = gc_node;
    }
    gc_context->gc_queue_tail = gc_node;
    pthread_mutex_unlock(&gc_context->gc_queue_mutex);
}

void* gc_thread_func(void* arg) {
    gc_context_t* gc_context = (gc_context_t*)arg;
    if (!gc_context) return NULL;
    
    while (gc_context->gc_running) {
        // Wait for epoch advancement or timeout
        pthread_mutex_lock(&gc_context->epoch_mutex);
        struct timespec timeout;
        clock_gettime(CLOCK_REALTIME, &timeout);
        timeout.tv_sec += BW_TREE_GC_EPOCH_INTERVAL / 1000;
        timeout.tv_nsec += (BW_TREE_GC_EPOCH_INTERVAL % 1000) * 1000000;
        
        pthread_cond_timedwait(&gc_context->epoch_cond, &gc_context->epoch_mutex, &timeout);
        pthread_mutex_unlock(&gc_context->epoch_mutex);
        
        if (!gc_context->gc_running) break;
        
        // Process GC queue
        epoch_t safe_epoch = gc_context->current_epoch - 2; // Conservative safety margin
        
        pthread_mutex_lock(&gc_context->gc_queue_mutex);
        struct gc_node* current = gc_context->gc_queue_head;
        struct gc_node* prev = NULL;
        
        while (current) {
            if (current->delete_epoch < safe_epoch) {
                // Safe to delete
                if (prev) {
                    prev->next = current->next;
                } else {
                    gc_context->gc_queue_head = current->next;
                }
                
                if (current == gc_context->gc_queue_tail) {
                    gc_context->gc_queue_tail = prev;
                }
                
                struct gc_node* to_delete = current;
                current = current->next;
                
                free(to_delete->node);
                free(to_delete);
            } else {
                prev = current;
                current = current->next;
            }
        }
        pthread_mutex_unlock(&gc_context->gc_queue_mutex);
    }
    
    return NULL;
}

// =============================================================================
// Node Management
// =============================================================================

bw_node_t* bw_node_create_leaf(memory_manager_t* memory_mgr, uint32_t node_size) {
    if (!memory_mgr) return NULL;
    
    bw_node_t* node = mm_alloc(memory_mgr, node_size, MEM_TYPE_INDEX_BUFFER);
    if (!node) return NULL;
    
    memset(node, 0, node_size);
    node->type = BW_NODE_LEAF;
    node->size = node_size;
    node->count = 0;
    node->level = 0;
    node->create_epoch = atomic_load(&global_epoch);
    atomic_init(&node->next, NULL);
    
    return node;
}

bw_node_t* bw_node_create_internal(memory_manager_t* memory_mgr, uint32_t node_size) {
    if (!memory_mgr) return NULL;
    
    bw_node_t* node = mm_alloc(memory_mgr, node_size, MEM_TYPE_INDEX_BUFFER);
    if (!node) return NULL;
    
    memset(node, 0, node_size);
    node->type = BW_NODE_INTERNAL;
    node->size = node_size;
    node->count = 0;
    node->level = 1; // Will be set properly when inserted into tree
    node->create_epoch = atomic_load(&global_epoch);
    atomic_init(&node->next, NULL);
    
    return node;
}

delta_record_t* bw_delta_create_insert(memory_manager_t* memory_mgr, 
                                      const void* key, size_t key_size,
                                      const void* value, size_t value_size) {
    if (!memory_mgr || !key || !value) return NULL;
    
    delta_record_t* delta = mm_alloc(memory_mgr, sizeof(delta_record_t), MEM_TYPE_INDEX_BUFFER);
    if (!delta) return NULL;
    
    delta->type = BW_NODE_DELTA_INSERT;
    delta->size = sizeof(delta_record_t);
    delta->create_epoch = atomic_load(&global_epoch);
    atomic_init(&delta->next, NULL);
    
    delta->data.insert.key = bw_key_create(memory_mgr, key, key_size);
    delta->data.insert.value = bw_value_create(memory_mgr, value, value_size);
    
    if (!delta->data.insert.key || !delta->data.insert.value) {
        if (delta->data.insert.key) bw_key_destroy(delta->data.insert.key, memory_mgr);
        if (delta->data.insert.value) bw_value_destroy(delta->data.insert.value, memory_mgr);
        mm_free(memory_mgr, delta);
        return NULL;
    }
    
    return delta;
}

delta_record_t* bw_delta_create_delete(memory_manager_t* memory_mgr,
                                      const void* key, size_t key_size) {
    if (!memory_mgr || !key) return NULL;
    
    delta_record_t* delta = mm_alloc(memory_mgr, sizeof(delta_record_t), MEM_TYPE_INDEX_BUFFER);
    if (!delta) return NULL;
    
    delta->type = BW_NODE_DELTA_DELETE;
    delta->size = sizeof(delta_record_t);
    delta->create_epoch = atomic_load(&global_epoch);
    atomic_init(&delta->next, NULL);
    
    delta->data.delete.key = bw_key_create(memory_mgr, key, key_size);
    if (!delta->data.delete.key) {
        mm_free(memory_mgr, delta);
        return NULL;
    }
    
    return delta;
}

delta_record_t* bw_delta_create_update(memory_manager_t* memory_mgr,
                                      const void* key, size_t key_size,
                                      const void* old_value, size_t old_value_size,
                                      const void* new_value, size_t new_value_size) {
    if (!memory_mgr || !key || !old_value || !new_value) return NULL;
    
    delta_record_t* delta = mm_alloc(memory_mgr, sizeof(delta_record_t), MEM_TYPE_INDEX_BUFFER);
    if (!delta) return NULL;
    
    delta->type = BW_NODE_DELTA_UPDATE;
    delta->size = sizeof(delta_record_t);
    delta->create_epoch = atomic_load(&global_epoch);
    atomic_init(&delta->next, NULL);
    
    delta->data.update.key = bw_key_create(memory_mgr, key, key_size);
    delta->data.update.old_value = bw_value_create(memory_mgr, old_value, old_value_size);
    delta->data.update.new_value = bw_value_create(memory_mgr, new_value, new_value_size);
    
    if (!delta->data.update.key || !delta->data.update.old_value || !delta->data.update.new_value) {
        if (delta->data.update.key) bw_key_destroy(delta->data.update.key, memory_mgr);
        if (delta->data.update.old_value) bw_value_destroy(delta->data.update.old_value, memory_mgr);
        if (delta->data.update.new_value) bw_value_destroy(delta->data.update.new_value, memory_mgr);
        mm_free(memory_mgr, delta);
        return NULL;
    }
    
    return delta;
}

void bw_node_destroy(bw_node_t* node, memory_manager_t* memory_mgr) {
    if (!node || !memory_mgr) return;
    
    // For delta records, clean up specific data
    if (node->type >= BW_NODE_DELTA_INSERT) {
        delta_record_t* delta = (delta_record_t*)node;
        
        switch (delta->type) {
            case BW_NODE_DELTA_INSERT:
                if (delta->data.insert.key) bw_key_destroy(delta->data.insert.key, memory_mgr);
                if (delta->data.insert.value) bw_value_destroy(delta->data.insert.value, memory_mgr);
                break;
                
            case BW_NODE_DELTA_DELETE:
                if (delta->data.delete.key) bw_key_destroy(delta->data.delete.key, memory_mgr);
                break;
                
            case BW_NODE_DELTA_UPDATE:
                if (delta->data.update.key) bw_key_destroy(delta->data.update.key, memory_mgr);
                if (delta->data.update.old_value) bw_value_destroy(delta->data.update.old_value, memory_mgr);
                if (delta->data.update.new_value) bw_value_destroy(delta->data.update.new_value, memory_mgr);
                break;
                
            case BW_NODE_DELTA_SPLIT:
                if (delta->data.split.split_key) bw_key_destroy(delta->data.split.split_key, memory_mgr);
                break;
                
            case BW_NODE_DELTA_MERGE:
                if (delta->data.merge.merge_key) bw_key_destroy(delta->data.merge.merge_key, memory_mgr);
                break;
                
            default:
                break;
        }
    }
    
    mm_free(memory_mgr, node);
}

// =============================================================================
// Tree Management
// =============================================================================

bw_tree_t* bw_tree_create(memory_manager_t* memory_mgr) {
    if (!memory_mgr) return NULL;
    
    bw_tree_t* tree = mm_alloc(memory_mgr, sizeof(bw_tree_t), MEM_TYPE_INDEX_BUFFER);
    if (!tree) return NULL;
    
    // Initialize mapping table
    tree->mapping_table = mapping_table_create(BW_TREE_MAPPING_TABLE_SIZE);
    if (!tree->mapping_table) {
        mm_free(memory_mgr, tree);
        return NULL;
    }
    
    // Initialize garbage collection
    tree->gc_context = gc_context_create();
    if (!tree->gc_context) {
        mapping_table_destroy(tree->mapping_table);
        mm_free(memory_mgr, tree);
        return NULL;
    }
    
    // Create root node
    page_id_t root_page_id = mapping_table_allocate_page(tree->mapping_table);
    if (root_page_id == 0) {
        gc_context_destroy(tree->gc_context);
        mapping_table_destroy(tree->mapping_table);
        mm_free(memory_mgr, tree);
        return NULL;
    }
    
    bw_node_t* root_node = bw_node_create_leaf(memory_mgr, BW_TREE_NODE_SIZE);
    if (!root_node) {
        gc_context_destroy(tree->gc_context);
        mapping_table_destroy(tree->mapping_table);
        mm_free(memory_mgr, tree);
        return NULL;
    }
    
    bw_page_t* root_page = mapping_table_get_page(tree->mapping_table, root_page_id);
    if (!root_page) {
        bw_node_destroy(root_node, memory_mgr);
        gc_context_destroy(tree->gc_context);
        mapping_table_destroy(tree->mapping_table);
        mm_free(memory_mgr, tree);
        return NULL;
    }
    
    atomic_store(&root_page->node_ptr, root_node);
    
    // Initialize tree fields
    tree->root_page_id = root_page_id;
    tree->memory_mgr = memory_mgr;
    
    // Initialize statistics
    atomic_init(&tree->total_nodes, 1);
    atomic_init(&tree->total_operations, 0);
    atomic_init(&tree->consolidations, 0);
    atomic_init(&tree->splits, 0);
    atomic_init(&tree->merges, 0);
    
    // Initialize configuration
    tree->node_size = BW_TREE_NODE_SIZE;
    tree->min_occupancy = BW_TREE_MIN_OCCUPANCY;
    tree->max_delta_chain = BW_TREE_MAX_DELTA_CHAIN;
    
    // Initialize thread safety
    atomic_init(&tree->is_destroyed, false);
    atomic_init(&tree->active_operations, 0);
    
    // Start GC thread
    tree->gc_context->gc_running = true;
    if (pthread_create(&tree->gc_context->gc_thread, NULL, gc_thread_func, tree->gc_context) != 0) {
        atomic_store(&tree->is_destroyed, true);
        bw_node_destroy(root_node, memory_mgr);
        gc_context_destroy(tree->gc_context);
        mapping_table_destroy(tree->mapping_table);
        mm_free(memory_mgr, tree);
        return NULL;
    }
    
    return tree;
}

void bw_tree_destroy(bw_tree_t* tree) {
    if (!tree) return;
    
    // Mark as destroyed and wait for active operations
    atomic_store(&tree->is_destroyed, true);
    while (atomic_load(&tree->active_operations) > 0) {
        // Small delay to allow operations to complete
        struct timespec ts = {0, 1000000}; // 1ms
        nanosleep(&ts, NULL);
    }
    
    // Clean up garbage collection
    gc_context_destroy(tree->gc_context);
    
    // Clean up mapping table (this will free all nodes)
    mapping_table_destroy(tree->mapping_table);
    
    // Free tree structure
    mm_free(tree->memory_mgr, tree);
}

int bw_tree_clear(bw_tree_t* tree) {
    if (!tree || atomic_load(&tree->is_destroyed)) return -1;
    
    // Create new empty root
    bw_node_t* new_root = bw_node_create_leaf(tree->memory_mgr, tree->node_size);
    if (!new_root) return -1;
    
    bw_page_t* root_page = mapping_table_get_page(tree->mapping_table, tree->root_page_id);
    if (!root_page) {
        bw_node_destroy(new_root, tree->memory_mgr);
        return -1;
    }
    
    bw_node_t* old_root = atomic_exchange(&root_page->node_ptr, new_root);
    if (old_root) {
        gc_defer_delete(tree->gc_context, old_root);
    }
    
    // Reset statistics
    atomic_store(&tree->total_operations, 0);
    atomic_store(&tree->consolidations, 0);
    atomic_store(&tree->splits, 0);
    atomic_store(&tree->merges, 0);
    
    return 0;
}

// =============================================================================
// Core Bw-Tree Operations
// =============================================================================

// Helper function to traverse the tree and find the leaf node for a key
static bw_node_t* bw_tree_find_leaf(bw_tree_t* tree, const void* key, size_t key_size, 
                                    page_id_t* leaf_page_id) {
    if (!tree || !key || !leaf_page_id) return NULL;
    
    page_id_t current_page_id = tree->root_page_id;
    
    while (true) {
        bw_page_t* page = mapping_table_get_page(tree->mapping_table, current_page_id);
        if (!page) return NULL;
        
        bw_node_t* node = atomic_load(&page->node_ptr);
        if (!node) return NULL;
        
        // If it's a leaf node, we found our target
        if (node->type == BW_NODE_LEAF) {
            *leaf_page_id = current_page_id;
            return node;
        }
        
        // For internal nodes, find the child to descend to
        // Simplified implementation - in a real Bw-Tree, this would involve
        // more complex key comparison and range finding
        if (node->type == BW_NODE_INTERNAL) {
            // For now, just go to the first child (simplified)
            // In a real implementation, this would use key comparison
            // to find the correct child page
            current_page_id = 1; // Placeholder
            if (current_page_id == tree->root_page_id) {
                // Avoid infinite loop - return the current node as leaf
                *leaf_page_id = current_page_id;
                return node;
            }
        } else {
            break;
        }
    }
    
    return NULL;
}

// Insert operation
bw_result_t bw_tree_insert(bw_tree_t* tree, const void* key, size_t key_size,
                          const void* value, size_t value_size) {
    if (!tree || !key || !value) return BW_ERROR_INVALID_PARAM;
    if (atomic_load(&tree->is_destroyed)) return BW_ERROR_TREE_DESTROYED;
    if (key_size > BW_TREE_MAX_KEY_SIZE || value_size > BW_TREE_MAX_VALUE_SIZE) {
        return BW_ERROR_INVALID_PARAM;
    }
    
    // Increment active operations counter
    atomic_fetch_add(&tree->active_operations, 1);
    atomic_fetch_add(&tree->total_operations, 1);
    
    bw_result_t result = BW_SUCCESS;
    
    // Find the target leaf page
    page_id_t leaf_page_id;
    bw_node_t* leaf_node = bw_tree_find_leaf(tree, key, key_size, &leaf_page_id);
    
    if (!leaf_node) {
        result = BW_ERROR_NODE_FULL;
        goto cleanup;
    }
    
    // Create insert delta record
    delta_record_t* insert_delta = bw_delta_create_insert(tree->memory_mgr, key, key_size, value, value_size);
    if (!insert_delta) {
        result = BW_ERROR_OUT_OF_MEMORY;
        goto cleanup;
    }
    
    // Try to install the delta record atomically
    bw_page_t* leaf_page = mapping_table_get_page(tree->mapping_table, leaf_page_id);
    if (!leaf_page) {
        bw_node_destroy((bw_node_t*)insert_delta, tree->memory_mgr);
        result = BW_ERROR_CONCURRENT_UPDATE;
        goto cleanup;
    }
    
    // Atomic compare-and-swap to install delta
    insert_delta->next = atomic_load(&leaf_node->next);
    if (!atomic_compare_exchange_weak(&leaf_node->next, &insert_delta->next, 
                                     (struct bw_node*)insert_delta)) {
        bw_node_destroy((bw_node_t*)insert_delta, tree->memory_mgr);
        result = BW_ERROR_CONCURRENT_UPDATE;
        goto cleanup;
    }
    
    // Check if consolidation is needed
    if (bw_node_needs_consolidation(leaf_node, tree->max_delta_chain)) {
        bw_node_consolidate(tree, leaf_page_id);
    }
    
cleanup:
    atomic_fetch_sub(&tree->active_operations, 1);
    return result;
}

// Search operation
bw_result_t bw_tree_search(bw_tree_t* tree, const void* key, size_t key_size,
                          bw_search_result_t* result) {
    if (!tree || !key || !result) return BW_ERROR_INVALID_PARAM;
    if (atomic_load(&tree->is_destroyed)) return BW_ERROR_TREE_DESTROYED;
    
    // Initialize result
    result->found = false;
    result->value = NULL;
    result->page_id = 0;
    result->node = NULL;
    result->index = 0;
    
    // Increment active operations counter
    atomic_fetch_add(&tree->active_operations, 1);
    atomic_fetch_add(&tree->total_operations, 1);
    
    bw_result_t ret = BW_SUCCESS;
    
    // Find the target leaf page
    page_id_t leaf_page_id;
    bw_node_t* leaf_node = bw_tree_find_leaf(tree, key, key_size, &leaf_page_id);
    
    if (!leaf_node) {
        ret = BW_ERROR_KEY_NOT_FOUND;
        goto cleanup;
    }
    
    result->page_id = leaf_page_id;
    result->node = leaf_node;
    
    // Search through delta chain first
    bw_node_t* current = atomic_load(&leaf_node->next);
    while (current && current->type >= BW_NODE_DELTA_INSERT) {
        delta_record_t* delta = (delta_record_t*)current;
        
        if (delta->type == BW_NODE_DELTA_INSERT || delta->type == BW_NODE_DELTA_UPDATE) {
            if (bw_key_compare_raw(key, key_size, delta->data.insert.key->data, 
                                  delta->data.insert.key->size) == 0) {
                // Found in delta chain
                result->found = true;
                result->value = bw_value_create(tree->memory_mgr, 
                                              delta->data.insert.value->data,
                                              delta->data.insert.value->size);
                goto cleanup;
            }
        } else if (delta->type == BW_NODE_DELTA_DELETE) {
            if (bw_key_compare_raw(key, key_size, delta->data.delete.key->data,
                                  delta->data.delete.key->size) == 0) {
                // Key was deleted
                ret = BW_ERROR_KEY_NOT_FOUND;
                goto cleanup;
            }
        }
        
        current = atomic_load(&current->next);
    }
    
    // Search in base node (simplified implementation)
    // In a real Bw-Tree, this would involve parsing the node's data structure
    // For now, we'll assume the key is not found in the base node
    ret = BW_ERROR_KEY_NOT_FOUND;
    
cleanup:
    atomic_fetch_sub(&tree->active_operations, 1);
    return ret;
}

// Delete operation
bw_result_t bw_tree_delete(bw_tree_t* tree, const void* key, size_t key_size) {
    if (!tree || !key) return BW_ERROR_INVALID_PARAM;
    if (atomic_load(&tree->is_destroyed)) return BW_ERROR_TREE_DESTROYED;
    
    // Increment active operations counter
    atomic_fetch_add(&tree->active_operations, 1);
    atomic_fetch_add(&tree->total_operations, 1);
    
    bw_result_t result = BW_SUCCESS;
    
    // Find the target leaf page
    page_id_t leaf_page_id;
    bw_node_t* leaf_node = bw_tree_find_leaf(tree, key, key_size, &leaf_page_id);
    
    if (!leaf_node) {
        result = BW_ERROR_KEY_NOT_FOUND;
        goto cleanup;
    }
    
    // Create delete delta record
    delta_record_t* delete_delta = bw_delta_create_delete(tree->memory_mgr, key, key_size);
    if (!delete_delta) {
        result = BW_ERROR_OUT_OF_MEMORY;
        goto cleanup;
    }
    
    // Try to install the delta record atomically
    bw_page_t* leaf_page = mapping_table_get_page(tree->mapping_table, leaf_page_id);
    if (!leaf_page) {
        bw_node_destroy((bw_node_t*)delete_delta, tree->memory_mgr);
        result = BW_ERROR_CONCURRENT_UPDATE;
        goto cleanup;
    }
    
    // Atomic compare-and-swap to install delta
    delete_delta->next = atomic_load(&leaf_node->next);
    if (!atomic_compare_exchange_weak(&leaf_node->next, &delete_delta->next,
                                     (struct bw_node*)delete_delta)) {
        bw_node_destroy((bw_node_t*)delete_delta, tree->memory_mgr);
        result = BW_ERROR_CONCURRENT_UPDATE;
        goto cleanup;
    }
    
    // Check if consolidation is needed
    if (bw_node_needs_consolidation(leaf_node, tree->max_delta_chain)) {
        bw_node_consolidate(tree, leaf_page_id);
    }
    
cleanup:
    atomic_fetch_sub(&tree->active_operations, 1);
    return result;
}

// Update operation
bw_result_t bw_tree_update(bw_tree_t* tree, const void* key, size_t key_size,
                          const void* value, size_t value_size) {
    if (!tree || !key || !value) return BW_ERROR_INVALID_PARAM;
    if (atomic_load(&tree->is_destroyed)) return BW_ERROR_TREE_DESTROYED;
    
    // For Bw-Tree, update can be implemented as a delta update
    // This is similar to insert but marks as an update operation
    
    // Increment active operations counter
    atomic_fetch_add(&tree->active_operations, 1);
    atomic_fetch_add(&tree->total_operations, 1);
    
    bw_result_t result = BW_SUCCESS;
    
    // Find the target leaf page
    page_id_t leaf_page_id;
    bw_node_t* leaf_node = bw_tree_find_leaf(tree, key, key_size, &leaf_page_id);
    
    if (!leaf_node) {
        result = BW_ERROR_KEY_NOT_FOUND;
        goto cleanup;
    }
    
    // Create update delta record
    delta_record_t* update_delta = bw_delta_create_update(tree->memory_mgr, key, key_size,
                                                         NULL, 0, value, value_size);
    if (!update_delta) {
        result = BW_ERROR_OUT_OF_MEMORY;
        goto cleanup;
    }
    
    // Try to install the delta record atomically
    bw_page_t* leaf_page = mapping_table_get_page(tree->mapping_table, leaf_page_id);
    if (!leaf_page) {
        bw_node_destroy((bw_node_t*)update_delta, tree->memory_mgr);
        result = BW_ERROR_CONCURRENT_UPDATE;
        goto cleanup;
    }
    
    // Atomic compare-and-swap to install delta
    update_delta->next = atomic_load(&leaf_node->next);
    if (!atomic_compare_exchange_weak(&leaf_node->next, &update_delta->next,
                                     (struct bw_node*)update_delta)) {
        bw_node_destroy((bw_node_t*)update_delta, tree->memory_mgr);
        result = BW_ERROR_CONCURRENT_UPDATE;
        goto cleanup;
    }
    
    // Check if consolidation is needed
    if (bw_node_needs_consolidation(leaf_node, tree->max_delta_chain)) {
        bw_node_consolidate(tree, leaf_page_id);
    }
    
cleanup:
    atomic_fetch_sub(&tree->active_operations, 1);
    return result;
}

// Conditional operations
bw_result_t bw_tree_insert_if_not_exists(bw_tree_t* tree, const void* key, size_t key_size,
                                        const void* value, size_t value_size) {
    // First check if key exists
    bw_search_result_t search_result;
    bw_result_t search_ret = bw_tree_search(tree, key, key_size, &search_result);
    
    if (search_ret == BW_SUCCESS && search_result.found) {
        if (search_result.value) {
            bw_value_destroy(search_result.value, tree->memory_mgr);
        }
        return BW_ERROR_KEY_EXISTS;
    }
    
    // Key doesn't exist, proceed with insert
    return bw_tree_insert(tree, key, key_size, value, value_size);
}

bw_result_t bw_tree_update_if_exists(bw_tree_t* tree, const void* key, size_t key_size,
                                    const void* value, size_t value_size) {
    // First check if key exists
    bw_search_result_t search_result;
    bw_result_t search_ret = bw_tree_search(tree, key, key_size, &search_result);
    
    if (search_ret != BW_SUCCESS || !search_result.found) {
        return BW_ERROR_KEY_NOT_FOUND;
    }
    
    if (search_result.value) {
        bw_value_destroy(search_result.value, tree->memory_mgr);
    }
    
    // Key exists, proceed with update
    return bw_tree_update(tree, key, key_size, value, value_size);
}

bw_result_t bw_tree_delete_if_exists(bw_tree_t* tree, const void* key, size_t key_size) {
    // First check if key exists
    bw_search_result_t search_result;
    bw_result_t search_ret = bw_tree_search(tree, key, key_size, &search_result);
    
    if (search_ret != BW_SUCCESS || !search_result.found) {
        return BW_ERROR_KEY_NOT_FOUND;
    }
    
    if (search_result.value) {
        bw_value_destroy(search_result.value, tree->memory_mgr);
    }
    
    // Key exists, proceed with delete
    return bw_tree_delete(tree, key, key_size);
}