"""
存储引擎 - 提供统一的存储访问接口
"""

import os
import sys
import time
import threading
from typing import Dict, List, Optional, Union, Tuple, Any
from enum import Enum

# 添加当前目录到Python路径，支持直接运行
current_dir = os.path.dirname(os.path.abspath(__file__))
if current_dir not in sys.path:
    sys.path.insert(0, current_dir)

from page_manager import PageManager
from cache_manager import CacheManager, LRUCache, FIFOCache, ClockCache
from constants import *
from statistics import StorageStats, CacheStats, Logger
from table_schema import TableSchema, Column, ColumnType, Record
from index_manager import IndexManager


class CachePolicy(Enum):
    """缓存策略枚举"""
    LRU = "lru"
    FIFO = "fifo"
    CLOCK = "clock"


class StorageEngine:
    """
    存储引擎 - 提供统一的存储访问接口
    
    整合页管理器和缓存管理器，为上层数据库系统提供：
    1. 页级读写操作
    2. 缓存管理
    3. 统计信息
    4. 日志记录
    """
    
    def __init__(self, 
                 db_path: str,
                 cache_size: int = DEFAULT_CACHE_SIZE,
                 cache_policy: CachePolicy = CachePolicy.LRU,
                 enable_locking: bool = True,
                 enable_readahead: bool = True):
        """
        初始化存储引擎
        
        Args:
            db_path: 数据库文件路径
            cache_size: 缓存大小（页数）
            cache_policy: 缓存替换策略
            enable_locking: 是否启用页面锁机制
            enable_readahead: 是否启用预读机制
        """
        self.db_path = db_path
        self.cache_policy = cache_policy
        self.enable_locking = enable_locking
        self.enable_readahead = enable_readahead
        
        # 初始化页管理器（带锁和预读支持）
        self.page_manager = PageManager(db_path, 
                                       enable_locking=enable_locking,
                                       enable_readahead=enable_readahead)
        
        # 根据策略初始化缓存管理器
        # 使用值比较而不是实例比较，避免不同模块导入的枚举实例不匹配问题
        if cache_policy.value == "lru" or str(cache_policy) == "CachePolicy.LRU":
            self.cache_manager = LRUCache(self.page_manager, cache_size, enable_locking=enable_locking)
        elif cache_policy.value == "fifo" or str(cache_policy) == "CachePolicy.FIFO":
            self.cache_manager = FIFOCache(self.page_manager, cache_size, enable_locking=enable_locking)
        elif cache_policy.value == "clock" or str(cache_policy) == "CachePolicy.CLOCK":
            self.cache_manager = ClockCache(self.page_manager, cache_size, enable_locking=enable_locking)
        else:
            raise ValueError(f"Unsupported cache policy: {cache_policy}")
        
        # 日志记录器
        self.logger = Logger()
        
        # WAL管理器 - 延迟初始化以避免在系统数据库创建时出错
        self.wal_manager = None
        self.wal_enabled = False
        self.wal_initialization_attempted = False
        
        # 后台线程控制
        self.running = True
        self.background_threads = []
        
        self.logger.info("INIT", 0, f"Storage engine initialized with {cache_policy.value} cache (WAL will be initialized on first use)")
        
        # 添加线程同步锁
        import threading
        self._table_lock = threading.RLock()        # 表操作锁
        self._schema_lock = threading.RLock()       # 表结构锁
        self._mapping_lock = threading.RLock()      # 表映射锁
        
        # 表映射信息（表名 -> 页ID列表）
        self.table_pages: Dict[str, List[int]] = {}
        # 表结构信息（表名 -> TableSchema）
        self.table_schemas: Dict[str, TableSchema] = {}
        
        # 索引管理器
        self.index_manager = IndexManager(self.page_manager, self.logger)
        
        # 加载表映射和表结构
        self._load_table_mappings()
        self._load_table_schemas()
        
        # 为已存在的表创建主键索引
        self._ensure_primary_key_indexes()
    
    def _load_table_mappings(self):
        """从元数据加载表到页的映射关系"""
        try:
            # 尝试从页管理器读取目录页（页ID为1，预留给表目录）
            import json
            import struct
            
            # 检查是否存在目录页
            if not self.page_manager.is_page_allocated(1):
                # 如果目录页不存在，分配一个
                if self.page_manager.next_page_id <= 1:
                    self.page_manager.next_page_id = 2
                self.page_manager.allocated_pages.add(1)
                self.page_manager.stats.total_pages = max(self.page_manager.stats.total_pages, 2)
                self.page_manager._save_metadata()
                self.logger.info("LOAD_MAPPINGS", 1, "Created new table directory page")
                return
            
            # 读取目录页数据
            catalog_data = self.page_manager.read_page(1)
            
            # 解析目录数据（前4字节是数据长度）
            if len(catalog_data) < 4:
                self.logger.info("LOAD_MAPPINGS", 1, "Empty catalog page")
                return
                
            data_length = struct.unpack('I', catalog_data[:4])[0]
            if data_length == 0 or data_length > PAGE_SIZE - 4:
                self.logger.info("LOAD_MAPPINGS", 1, "No valid catalog data")
                return
            
            # 解析JSON数据
            json_data = catalog_data[4:4+data_length].decode('utf-8')
            self.table_pages = json.loads(json_data)
            
            self.logger.info("LOAD_MAPPINGS", 1, f"Loaded {len(self.table_pages)} table mappings")
            
        except Exception as e:
            self.logger.error("LOAD_MAPPINGS", 1, f"Failed to load table mappings: {e}")
            self.table_pages = {}
    
    def _save_table_mappings(self):
        """保存表到页的映射关系"""
        try:
            import json
            import struct
            
            # 确保目录页存在
            if not self.page_manager.is_page_allocated(1):
                if self.page_manager.next_page_id <= 1:
                    self.page_manager.next_page_id = 2
                self.page_manager.allocated_pages.add(1)
                self.page_manager.stats.total_pages = max(self.page_manager.stats.total_pages, 2)
            
            # 序列化表映射信息
            json_data = json.dumps(self.table_pages).encode('utf-8')
            data_length = len(json_data)
            
            if data_length > PAGE_SIZE - 4:
                self.logger.error("SAVE_MAPPINGS", 1, "Table mappings too large for single page")
                return
            
            # 构建页数据（4字节长度 + JSON数据）
            page_data = struct.pack('I', data_length) + json_data
            page_data = page_data + bytes(PAGE_SIZE - len(page_data))  # 填充到页大小
            
            # 写入目录页
            success = self.page_manager.write_page(1, page_data)
            if success:
                self.logger.info("SAVE_MAPPINGS", 1, f"Saved {len(self.table_pages)} table mappings")
            else:
                self.logger.error("SAVE_MAPPINGS", 1, "Failed to write table mappings")
                
        except Exception as e:
            self.logger.error("SAVE_MAPPINGS", 1, f"Failed to save table mappings: {e}")
    
    def _load_table_schemas(self):
        """从元数据加载表结构定义"""
        try:
            # 尝试从页管理器读取表结构页（页ID为2，预留给表结构）
            import json
            import struct
            
            # 检查是否存在表结构页
            if not self.page_manager.is_page_allocated(2):
                # 如果表结构页不存在，分配一个
                if self.page_manager.next_page_id <= 2:
                    self.page_manager.next_page_id = 3
                self.page_manager.allocated_pages.add(2)
                self.page_manager.stats.total_pages = max(self.page_manager.stats.total_pages, 3)
                self.page_manager._save_metadata()
                self.logger.info("LOAD_SCHEMAS", 2, "Created new table schema page")
                return
            
            # 读取表结构页数据
            schema_data = self.page_manager.read_page(2)
            
            # 解析表结构数据（前4字节是数据长度）
            if len(schema_data) < 4:
                self.logger.info("LOAD_SCHEMAS", 2, "Empty schema page")
                return
                
            data_length = struct.unpack('I', schema_data[:4])[0]
            if data_length == 0 or data_length > PAGE_SIZE - 4:
                self.logger.info("LOAD_SCHEMAS", 2, "No valid schema data")
                return
            
            # 解析JSON数据
            json_data = schema_data[4:4+data_length].decode('utf-8')
            schemas_dict = json.loads(json_data)
            
            # 重建TableSchema对象
            for table_name, schema_dict in schemas_dict.items():
                self.table_schemas[table_name] = TableSchema.from_dict(schema_dict)
            
            self.logger.info("LOAD_SCHEMAS", 2, f"Loaded {len(self.table_schemas)} table schemas")
            
        except Exception as e:
            self.logger.error("LOAD_SCHEMAS", 2, f"Failed to load table schemas: {e}")
            self.table_schemas = {}
    
    def _save_table_schemas(self):
        """保存表结构定义到元数据页"""
        try:
            import json
            import struct
            
            # 确保表结构页存在
            if not self.page_manager.is_page_allocated(2):
                if self.page_manager.next_page_id <= 2:
                    self.page_manager.next_page_id = 3
                self.page_manager.allocated_pages.add(2)
                self.page_manager.stats.total_pages = max(self.page_manager.stats.total_pages, 3)
            
            # 序列化表结构信息
            schemas_dict = {name: schema.to_dict() for name, schema in self.table_schemas.items()}
            json_data = json.dumps(schemas_dict).encode('utf-8')
            data_length = len(json_data)
            
            if data_length > PAGE_SIZE - 4:
                self.logger.error("SAVE_SCHEMAS", 2, "Table schemas too large for single page")
                return
            
            # 构建页数据（4字节长度 + JSON数据）
            page_data = struct.pack('I', data_length) + json_data
            page_data = page_data + bytes(PAGE_SIZE - len(page_data))  # 填充到页大小
            
            # 写入表结构页
            success = self.page_manager.write_page(2, page_data)
            if success:
                self.logger.info("SAVE_SCHEMAS", 2, f"Saved {len(self.table_schemas)} table schemas")
            else:
                self.logger.error("SAVE_SCHEMAS", 2, "Failed to write table schemas")
                
        except Exception as e:
            self.logger.error("SAVE_SCHEMAS", 2, f"Failed to save table schemas: {e}")
    
    def _ensure_primary_key_indexes(self):
        """为所有有主键的表确保创建了主键索引"""
        for table_name, schema in self.table_schemas.items():
            if schema.has_primary_key() and not schema.has_primary_key_index:
                pk_column = schema.get_primary_key()
                key_type = schema.get_primary_key_type()
                
                success = self.index_manager.create_primary_key_index(
                    table_name, pk_column.name, key_type
                )
                
                if success:
                    schema.has_primary_key_index = True
                    self.logger.info("INDEX", 0, f"Created primary key index for table {table_name}")
                else:
                    self.logger.warn("INDEX", 0, f"Failed to create primary key index for table {table_name}")
        
        # 保存更新的表结构
        self._save_table_schemas()
    
    # ==================== 基本页操作接口 ====================
    
    def allocate_page(self) -> int:
        """
        分配一个新页
        
        Returns:
            int: 新分配的页ID
        """
        page_id = self.page_manager.allocate_page()
        self.logger.info("ALLOC_PAGE", page_id, "Page allocated")
        return page_id
    
    def deallocate_page(self, page_id: int) -> bool:
        """
        释放页
        
        Args:
            page_id: 要释放的页ID
            
        Returns:
            bool: 释放是否成功
        """
        # 先从缓存中移除
        self.cache_manager.invalidate_page(page_id)
        
        # 再从页管理器中释放
        success = self.page_manager.deallocate_page(page_id)
        
        if success:
            self.logger.info("DEALLOC_PAGE", page_id, "Page deallocated")
        else:
            self.logger.error("DEALLOC_PAGE", page_id, "Failed to deallocate page")
        
        return success
    
    def read_page(self, page_id: int) -> bytes:
        """
        直接从磁盘读取页（不使用缓存）
        
        Args:
            page_id: 页ID
            
        Returns:
            bytes: 页数据
        """
        return self.page_manager.read_page(page_id)
    
    def write_page(self, page_id: int, data: bytes) -> bool:
        """
        直接写入页到磁盘（不使用缓存）
        
        Args:
            page_id: 页ID
            data: 要写入的数据
            
        Returns:
            bool: 写入是否成功
        """
        return self.page_manager.write_page(page_id, data)
    
    # ==================== 缓存操作接口 ====================
    
    def get_page(self, page_id: int) -> bytes:
        """
        获取页数据（使用缓存）
        
        Args:
            page_id: 页ID
            
        Returns:
            bytes: 页数据
        """
        return self.cache_manager.get_page(page_id)
    
    def put_page(self, page_id: int, data: bytes) -> bool:
        """
        写入页数据到缓存
        
        Args:
            page_id: 页ID
            data: 页数据
            
        Returns:
            bool: 写入是否成功
        """
        return self.cache_manager.put_page(page_id, data)
    
    def flush_page(self, page_id: int) -> bool:
        """
        刷新指定页到磁盘
        
        Args:
            page_id: 页ID
            
        Returns:
            bool: 刷新是否成功
        """
        return self.cache_manager.flush_page(page_id)
    
    def flush_all_pages(self) -> bool:
        """
        刷新所有缓存页到磁盘
        
        Returns:
            bool: 刷新是否成功
        """
        return self.cache_manager.flush_all_pages()
    
    # ==================== 表结构管理接口 ====================
    
    def create_table(self, table_name: str, columns: List[Column]) -> bool:
        """
        创建表（包含结构定义）
        
        Args:
            table_name: 表名
            columns: 列定义列表
            
        Returns:
            bool: 创建是否成功
        """
        if table_name in self.table_schemas:
            self.logger.warn("CREATE_TABLE", 0, f"Table {table_name} already exists")
            return False
        
        try:
            # 创建表结构
            schema = TableSchema(table_name, columns)
            
            # 分配初始数据页
            initial_pages = 1
            page_ids = []
            for _ in range(initial_pages):
                page_id = self.allocate_page()
                page_ids.append(page_id)
                # 初始化页面头信息
                self._initialize_data_page(page_id)
            
            # 保存表结构和页面映射
            schema.page_ids = page_ids
            self.table_schemas[table_name] = schema
            self.table_pages[table_name] = page_ids
            
            # 创建主键索引（如果有主键）
            if schema.has_primary_key():
                pk_column = schema.get_primary_key()
                key_type = schema.get_primary_key_type()
                
                success = self.index_manager.create_primary_key_index(
                    table_name, pk_column.name, key_type
                )
                
                if success:
                    schema.has_primary_key_index = True
                    self.logger.info("CREATE_TABLE", 0, f"Created primary key index for {table_name}.{pk_column.name}")
                else:
                    self.logger.warning("CREATE_TABLE", 0, f"Failed to create primary key index for {table_name}")
            
            # 持久化到磁盘
            self._save_table_schemas()
            self._save_table_mappings()
            
            self.logger.info("CREATE_TABLE", 0, 
                           f"Created table {table_name} with {len(columns)} columns, {len(page_ids)} pages")
            
            return True
            
        except Exception as e:
            self.logger.error("CREATE_TABLE", 0, f"Failed to create table {table_name}: {e}")
            return False
    
    def drop_table(self, table_name: str) -> bool:
        """
        删除表（包含结构和数据）
        
        Args:
            table_name: 表名
            
        Returns:
            bool: 删除是否成功
        """
        if table_name not in self.table_schemas:
            self.logger.warn("DROP_TABLE", 0, f"Table {table_name} does not exist")
            return False
        
        try:
            # 释放所有数据页
            page_ids = self.table_pages[table_name]
            success_count = 0
            
            for page_id in page_ids:
                if self.deallocate_page(page_id):
                    success_count += 1
            
            # 删除表结构和映射信息
            del self.table_schemas[table_name]
            del self.table_pages[table_name]
            
            # 持久化到磁盘
            self._save_table_schemas()
            self._save_table_mappings()
            
            self.logger.info("DROP_TABLE", 0, 
                           f"Dropped table {table_name}, freed {success_count}/{len(page_ids)} pages")
            
            return success_count == len(page_ids)
    
        except Exception as e:
            self.logger.error("DROP_TABLE", 0, f"Failed to drop table {table_name}: {e}")
            return False
    
    def get_table_schema(self, table_name: str) -> Optional[TableSchema]:
        """
        获取表结构定义
        
        Args:
            table_name: 表名
            
        Returns:
            TableSchema: 表结构对象，如果表不存在则返回None
        """
        return self.table_schemas.get(table_name)
    
    def table_exists(self, table_name: str) -> bool:
        """
        检查表是否存在
        
        Args:
            table_name: 表名
            
        Returns:
            bool: 表是否存在
        """
        return table_name in self.table_schemas
    
    def get_table_list(self) -> List[str]:
        """
        获取所有表名列表
        
        Returns:
            List[str]: 表名列表
        """
        return list(self.table_schemas.keys())
    
    def _initialize_data_page(self, page_id: int):
        """
        初始化数据页的头信息
        
        Args:
            page_id: 页ID
        """
        import struct
        
        # 页头结构：记录数量(4字节) + 空闲空间偏移(4字节)
        # 为了与扫描逻辑一致，初始空闲空间从8字节开始
        page_header = struct.pack('II', 0, 8)
        page_data = page_header + bytes(PAGE_SIZE - len(page_header))
        
        self.put_page(page_id, page_data)
    
    # ==================== 记录级操作接口 ====================
    
    def insert_record(self, table_name: str, record_data: Dict[str, Any], 
                     transaction_id: str = None) -> bool:
        """
        插入记录 (线程安全版本)
        
        Args:
            table_name: 表名
            record_data: 记录数据字典
            transaction_id: 事务ID (用于WAL)
            
        Returns:
            bool: 插入是否成功
        """
        if table_name not in self.table_schemas:
            self.logger.error("INSERT", 0, f"Table {table_name} does not exist")
            return False
        
        # WAL记录：在实际插入前记录操作
        if self._ensure_wal_initialized() and transaction_id:
            try:
                # 序列化记录数据用于WAL
                import json
                after_image = json.dumps(record_data, ensure_ascii=False).encode('utf-8')
                
                # 写入WAL记录
                self.wal_manager.write_data_change(
                    transaction_id=transaction_id,
                    operation="INSERT",
                    table_name=table_name,
                    before_image=None,  # 插入操作没有before image
                    after_image=after_image
                )
            except Exception as e:
                self.logger.error("WAL_INSERT", 0, f"Failed to write WAL for insert: {e}")
                # WAL失败不应该阻止操作继续，但应该记录
        
        # 简化的锁策略：只在需要时使用最小粒度的锁
        return self._insert_record_internal(table_name, record_data, transaction_id)
    
    def _insert_record_internal(self, table_name: str, record_data: Dict[str, Any], 
                               transaction_id: str = None, lock_manager = None) -> bool:
        """
        内部插入记录方法
        """
        lock_manager = None
        acquired_lock = False
        temp_tx = None
        
        # 使用表级锁保护主键检查和插入操作的原子性
        with self._table_lock:
            schema = self.table_schemas[table_name]
            
            # 检查主键约束（在表锁保护下）
            if schema.has_primary_key():
                pk_column = schema.get_primary_key()
                if pk_column.name in record_data:
                    pk_value = record_data[pk_column.name]
                    # 检查主键是否已存在
                    try:
                        existing_pointer = self.index_manager.search_by_primary_key(table_name, pk_value)
                        if existing_pointer:
                            self.logger.error("INSERT", 0, f"Duplicate primary key value: {pk_value}")
                            return False
                    except Exception:
                        # 如果索引查询失败，继续进行（可能是索引还没建立）
                        pass
                    
                    # 额外的记录级检查，确保万无一失
                    existing_records = self.select_records(table_name, {pk_column.name: pk_value})
                    if existing_records:
                        self.logger.error("INSERT", 0, f"Duplicate primary key detected in records: {pk_value}")
                        return False
        
        try:
            # 创建记录对象
            record = Record(schema)
            for column_name, value in record_data.items():
                record.set_value(column_name, value)
            
            # 序列化记录
            record_bytes = record.serialize()
            
            # 优化多线程性能：根据并发级别动态调整页面分配策略
            page_id = None
            current_pages = len(self.table_pages.get(table_name, []))
            
            # 高并发场景下，优先分配新页面减少锁竞争
            import threading
            active_threads = threading.active_count()
            
            if current_pages < max(2, active_threads):
                # 活跃线程多时，倾向于分配新页面避免竞争
                page_id = self._add_table_page(table_name)
            else:
                # 查找有空间的页面
                page_id = self._find_page_with_space(table_name, len(record_bytes))
                if page_id is None:
                    # 需要分配新页面
                    page_id = self._add_table_page(table_name)
            
            # 使用轻量级页面锁保护写操作
            lock_manager = self.get_lock_manager()
            
            if lock_manager:
                # 尝试获取页面写锁，简化重试机制
                temp_tx = f"write_{page_id}_{threading.get_ident()}"
                lock_manager.begin_transaction(temp_tx)
                acquired_lock = lock_manager.acquire_write_lock(page_id, temp_tx)
                
                if not acquired_lock:
                    # 如果获取锁失败，直接返回False，避免重复插入
                    lock_manager.rollback_transaction(temp_tx)
                    self.logger.warning("INSERT", 0, f"Failed to acquire write lock for page {page_id}, aborting to prevent duplicates")
                    return False
            
            # 插入记录到页面
            slot_id = self._insert_record_to_page(page_id, record_bytes)
            
            if slot_id is not None:
                # 临时跳过索引操作进行调试
                # TODO: 重新启用索引功能
                try:
                    # 创建记录指针
                    from index_types import RecordPointer
                    record_pointer = RecordPointer(page_id=page_id, slot_id=slot_id)
                    
                    # 插入到索引
                    index_success = self.index_manager.insert_to_indexes(
                        table_name, record_data, record_pointer
                    )
                    
                    if not index_success:
                        self.logger.warning("INSERT", page_id, f"Index insertion failed but continuing...")
                    
                except Exception as e:
                    self.logger.warning("INSERT", page_id, f"Index insertion failed: {e}, but continuing...")
                
                self.logger.info("INSERT", page_id, f"Inserted record into table {table_name}")
                result = True
            else:
                self.logger.error("INSERT", page_id, f"Failed to insert record into table {table_name}")
                result = False
            
            return result
            
        except Exception as e:
            self.logger.error("INSERT", 0, f"Failed to insert record into table {table_name}: {e}")
            return False
        finally:
            # 确保释放锁
            if acquired_lock and lock_manager and temp_tx:
                try:
                    lock_manager.commit_transaction(temp_tx)
                except Exception:
                    pass
    
    def select_records(self, table_name: str, condition=None) -> List[Dict[str, Any]]:
        """
        查询记录 (线程安全版本)
        
        Args:
            table_name: 表名
            condition: 查询条件（简单实现，暂不支持复杂条件）
            
        Returns:
            List[Dict]: 记录列表
        """
        if table_name not in self.table_schemas:
            self.logger.error("SELECT", 0, f"Table {table_name} does not exist")
            return []
        
        # 简化的查询策略：读操作通常不需要长期锁
        return self._select_records_internal(table_name, condition)
    
    def _select_records_internal(self, table_name: str, condition=None) -> List[Dict[str, Any]]:
        """
        内部查询记录方法
        """
        try:
            schema = self.table_schemas[table_name]
            
            # 尝试使用主键索引优化查询
            if condition and schema.has_primary_key():
                pk_column = schema.get_primary_key()
                if pk_column.name in condition:
                    # 主键等值查询，使用索引
                    pk_value = condition[pk_column.name]
                    if isinstance(pk_value, dict):
                        # 处理条件操作符（如 {"$eq": value}）
                        if "$eq" in pk_value or len(pk_value) == 1 and not any(op.startswith("$") for op in pk_value.keys()):
                            pk_value = list(pk_value.values())[0]
                        else:
                            pk_value = None
                    
                    if pk_value is not None:
                        # 使用主键索引查找
                        record_pointer = self.index_manager.search_by_primary_key(table_name, pk_value)
                        if record_pointer:
                            # 从指定页面和slot读取记录
                            record = self._get_record_by_pointer(record_pointer, schema)
                            if record and self._match_condition(record, condition):
                                self.logger.info("SELECT", 0, f"Used primary key index for table {table_name}")
                                return [record.to_dict()]
                        
                        self.logger.info("SELECT", 0, f"Primary key {pk_value} not found in table {table_name}")
                        return []
            
            # 回退到全表扫描
            page_ids = self.table_pages[table_name]
            unique_page_ids = list(set(page_ids))
            records = []
            
            # 扫描所有数据页
            for page_id in unique_page_ids:
                page_records = self._scan_page_records(page_id, schema)
                records.extend(page_records)
            
            # 应用条件过滤（简单实现）
            if condition:
                filtered_records = []
                for record in records:
                    if self._match_condition(record, condition):
                        filtered_records.append(record)
                records = filtered_records
            
            self.logger.info("SELECT", 0, f"Selected {len(records)} records from table {table_name}")
            
            # 转换为字典格式
            return [record.to_dict() for record in records]
            
        except Exception as e:
            self.logger.error("SELECT", 0, f"Failed to select records from table {table_name}: {e}")
            return []
    
    def update_records(self, table_name: str, condition: Dict[str, Any], updates: Dict[str, Any],
                      transaction_id: str = None) -> int:
        """
        更新记录
        
        Args:
            table_name: 表名
            condition: 更新条件
            updates: 更新的字段和值
            transaction_id: 事务ID (用于WAL)
            
        Returns:
            int: 成功更新的记录数
        """
        if table_name not in self.table_schemas:
            self.logger.error("UPDATE", 0, f"Table {table_name} does not exist")
            return 0
        
        try:
            schema = self.table_schemas[table_name]
            page_ids = self.table_pages[table_name]
            unique_page_ids = list(set(page_ids))
            updated_count = 0
            
            # 扫描所有数据页
            for page_id in unique_page_ids:
                page_records = self._scan_page_records(page_id, schema)
                updated_in_page = 0
                
                for i, record in enumerate(page_records):
                    # 检查是否匹配条件
                    if self._match_condition(record, condition):
                        # WAL记录：记录更新前的数据
                        if self._ensure_wal_initialized() and transaction_id:
                            try:
                                import json
                                # 获取更新前的记录
                                before_data = record.to_dict()
                                before_image = json.dumps(before_data, ensure_ascii=False).encode('utf-8')
                                
                                # 构造更新后的记录
                                after_data = before_data.copy()
                                after_data.update(updates)
                                after_image = json.dumps(after_data, ensure_ascii=False).encode('utf-8')
                                
                                # 写入WAL记录
                                self.wal_manager.write_data_change(
                                    transaction_id=transaction_id,
                                    operation="UPDATE",
                                    table_name=table_name,
                                    page_id=page_id,
                                    before_image=before_image,
                                    after_image=after_image
                                )
                            except Exception as e:
                                self.logger.error("WAL_UPDATE", 0, f"Failed to write WAL for update: {e}")
                        
                        # 更新记录字段
                        for field, value in updates.items():
                            # 检查字段是否存在于表结构中
                            if schema.get_column(field) is not None:
                                record.set_value(field, value)
                        
                        # 重新序列化并写入页面
                        if self._update_record_in_page(page_id, i, record, schema):
                            updated_in_page += 1
                
                updated_count += updated_in_page
                
                # 如果页面有更新，写回磁盘
                if updated_in_page > 0:
                    # 重写页面
                    self._rewrite_page_records(page_id, page_records, schema)
                    # 同步重建该页的主键索引指针（slot_id 可能已变化）
                    if schema.has_primary_key():
                        pk_col = schema.get_primary_key()
                        try:
                            self.index_manager.rebuild_primary_key_for_page(
                                table_name, pk_col.name, page_id, page_records
                            )
                        except Exception as e:
                            self.logger.warning("INDEX", page_id, f"Rebuild PK after update failed: {e}")
            
            self.logger.info("UPDATE", 0, f"Updated {updated_count} records in table {table_name}")
            return updated_count
            
        except Exception as e:
            self.logger.error("UPDATE", 0, f"Failed to update records in table {table_name}: {e}")
            return 0
    
    def delete_records(self, table_name: str, condition: Dict[str, Any], 
                      transaction_id: str = None) -> int:
        """
        删除记录
        
        Args:
            table_name: 表名
            condition: 删除条件
            transaction_id: 事务ID (用于WAL)
            
        Returns:
            int: 成功删除的记录数
        """
        if table_name not in self.table_schemas:
            self.logger.error("DELETE", 0, f"Table {table_name} does not exist")
            return 0
        
        try:
            schema = self.table_schemas[table_name]
            page_ids = self.table_pages[table_name]
            unique_page_ids = list(set(page_ids))
            deleted_count = 0

            # 优先走主键等值删除（利用索引定位页面），避免全表扫描
            if isinstance(condition, dict) and schema.has_primary_key():
                pk_col = schema.get_primary_key()
                if pk_col and pk_col.name in condition and not isinstance(condition[pk_col.name], dict):
                    pk_value = condition[pk_col.name]
                    try:
                        record_pointer = self.index_manager.search_by_primary_key(table_name, pk_value)
                    except Exception:
                        record_pointer = None
                    if record_pointer:
                        # 只在该页内重写，删除匹配主键的记录
                        page_id = record_pointer.page_id
                        page_records = self._scan_page_records(page_id, schema)
                        
                        # WAL记录：记录被删除的记录
                        if self._ensure_wal_initialized() and transaction_id:
                            for record in page_records:
                                if record.get_value(pk_col.name) == pk_value:
                                    try:
                                        import json
                                        before_data = record.to_dict()
                                        before_image = json.dumps(before_data, ensure_ascii=False).encode('utf-8')
                                        
                                        # 写入WAL记录
                                        self.wal_manager.write_data_change(
                                            transaction_id=transaction_id,
                                            operation="DELETE",
                                            table_name=table_name,
                                            page_id=page_id,
                                            before_image=before_image,
                                            after_image=None  # 删除操作没有after image
                                        )
                                    except Exception as e:
                                        self.logger.error("WAL_DELETE", 0, f"Failed to write WAL for delete: {e}")
                        
                        records_to_keep = [r for r in page_records if r.get_value(pk_col.name) != pk_value]
                        deleted_in_page = max(0, len(page_records) - len(records_to_keep))
                        if deleted_in_page > 0:
                            # 使用事务性删除：原子操作，确保索引和数据的一致性
                            # 获取该页面的排他锁，确保操作的原子性
                            transaction_id = f"delete_{threading.current_thread().ident}_{pk_value}"
                            lock_acquired = False
                            
                            try:
                                # 获取页面锁以确保一致性
                                lock_acquired = self.lock_manager.acquire_write_lock(page_id, transaction_id)
                                if not lock_acquired:
                                    self.logger.warning("DELETE", page_id, f"Failed to acquire write lock for page {page_id}")
                                
                                # 1. 先尝试删除索引条目（但不立即刷新到磁盘）
                                index_delete_success = False
                                for retry in range(3):  # 重试3次
                                    try:
                                        index_delete_success = self.index_manager.delete_from_primary_key(table_name, pk_value)
                                        if index_delete_success:
                                            break
                                        time.sleep(0.001)  # 短暂等待后重试
                                    except Exception as e:
                                        self.logger.warning("DELETE_INDEX", page_id, f"PK index delete attempt {retry+1} failed: {e}")
                                        if retry == 2:  # 最后一次重试失败
                                            # 强制重建该记录的索引状态
                                            try:
                                                # 确保索引中不存在该键
                                                pk_index = self.index_manager.get_primary_key_index(table_name)
                                                if pk_index:
                                                    # 直接在B+树中查找并删除
                                                    from index_types import IndexKey
                                                    index_key = IndexKey(value=pk_value, key_type=pk_index.metadata.key_type)
                                                    leaf_page = pk_index._find_leaf(index_key)
                                                    if leaf_page.delete_record(index_key):
                                                        pk_index.dirty_pages.add(leaf_page.page_id)
                                                        pk_index.metadata.entry_count -= 1
                                                        index_delete_success = True
                                            except Exception as rebuild_e:
                                                self.logger.error("DELETE_INDEX", page_id, f"Index rebuild failed: {rebuild_e}")
                                
                                if index_delete_success:
                                    # 2. 索引删除成功，现在删除数据（确保原子性）
                                    self._rewrite_page_records(page_id, records_to_keep, schema)
                                    
                                    # 3. 立即刷新索引到磁盘，确保一致性
                                    if hasattr(self, 'index_manager') and self.index_manager:
                                        self.index_manager.flush_all_indexes()
                                    
                                    self.logger.info("DELETE", page_id, f"Deleted {deleted_in_page} records by primary key fast-path")
                                    return deleted_in_page
                                else:
                                    self.logger.error("DELETE_INDEX", page_id, f"Failed to delete index entry for key {pk_value} after all retries")
                                    # 索引删除失败，不删除数据以保持一致性
                                    return 0
                                        
                            except Exception as lock_e:
                                self.logger.error("DELETE", page_id, f"Failed to perform atomic delete: {lock_e}")
                                # 如果无法完成原子操作，回退到非索引删除模式
                                pass
                            finally:
                                # 释放锁
                                if lock_acquired:
                                    try:
                                        self.lock_manager.release_lock(page_id, transaction_id)
                                    except Exception as e:
                                        self.logger.warning("DELETE", page_id, f"Failed to release lock: {e}")
                        else:
                            # 该页未找到匹配，退回通用主键扫描（跨页）
                            total_deleted = 0
                            for pid in unique_page_ids:
                                page_records = self._scan_page_records(pid, schema)
                                keep = [r for r in page_records if r.get_value(pk_col.name) != pk_value]
                                d = max(0, len(page_records) - len(keep))
                                if d > 0:
                                    # 更新索引（尽力而为）
                                    try:
                                        self.index_manager.delete_from_indexes(table_name, {pk_col.name: pk_value})
                                    except Exception as e:
                                        self.logger.warning("DELETE_INDEX", pid, f"PK index delete (fallback) failed: {e}")
                                    self._rewrite_page_records(pid, keep, schema)
                                    total_deleted += d
                            if total_deleted > 0:
                                self.logger.info("DELETE", 0, f"Deleted {total_deleted} records by primary key fallback")
                                return total_deleted
            
            for page_id in unique_page_ids:
                page_records = self._scan_page_records(page_id, schema)
                # 简单等值条件（所有值非dict）优先用等值比较，避免复杂匹配器误差
                is_simple_eq = isinstance(condition, dict) and all(not isinstance(v, dict) for v in condition.values())
                if is_simple_eq:
                    def eq_match(r):
                        for k, v in condition.items():
                            if r.get_value(k) != v:
                                return False
                        return True
                    should_delete_flags = [eq_match(r) for r in page_records]
                else:
                    should_delete_flags = [self._match_condition(r, condition) for r in page_records]
                records_to_keep = [r for r, d in zip(page_records, should_delete_flags) if not d]
                deleted_in_page = sum(1 for d in should_delete_flags if d)
                
                # 同步更新索引（尽力而为，失败忽略）
                if deleted_in_page > 0 and schema.has_primary_key():
                    for record, d in zip(page_records, should_delete_flags):
                        if d:
                            try:
                                pk_column = schema.get_primary_key()
                                pk_value = record.get_value(pk_column.name)
                                self.index_manager.delete_from_primary_key(table_name, pk_value)
                            except Exception as e:
                                self.logger.warning("DELETE_INDEX", 0, f"Delete from primary index skipped: {e}")
                
                # 重写页面（只保留未删除的记录）
                if deleted_in_page > 0:
                    # 重写页面
                    self._rewrite_page_records(page_id, records_to_keep, schema)
                    # 同步重建该页的主键索引指针（slot_id 重排）
                    if schema.has_primary_key():
                        pk_col = schema.get_primary_key()
                        try:
                            self.index_manager.rebuild_primary_key_for_page(
                                table_name, pk_col.name, page_id, records_to_keep
                            )
                        except Exception as e:
                            self.logger.warning("INDEX", page_id, f"Rebuild PK after delete failed: {e}")
                
                deleted_count += deleted_in_page
            
            # 批量删除后强制刷新索引，确保一致性
            if deleted_count > 0:
                try:
                    if hasattr(self, 'index_manager') and self.index_manager:
                        self.index_manager.flush_all_indexes()
                except Exception as flush_e:
                    self.logger.warning("DELETE", 0, f"Batch delete index flush failed: {flush_e}")
            
            self.logger.info("DELETE", 0, f"Deleted {deleted_count} records from table {table_name}")
            return deleted_count
            
        except Exception as e:
            self.logger.error("DELETE", 0, f"Failed to delete records from table {table_name}: {e}")
            return 0
    
    def _update_record_in_page(self, page_id: int, record_index: int, record, schema) -> bool:
        """更新页面中的单个记录"""
        try:
            # 这是一个简化实现，实际中应该就地更新
            # 这里我们依赖 _rewrite_page_records 来处理
            return True
        except Exception as e:
            self.logger.error("UPDATE_RECORD", page_id, f"Failed to update record {record_index}: {e}")
            return False
    
    def _rewrite_page_records(self, page_id: int, records: List, schema) -> bool:
        """重写页面中的所有记录"""
        try:
            import struct
            
            # 创建新的页面数据
            page_data = bytearray(PAGE_SIZE)
            
            # 写入页头：记录数和空闲空间偏移
            record_count = len(records)
            offset = 8  # 页头大小
            
            # 序列化所有记录
            for record in records:
                record_bytes = record.serialize()
                record_size = len(record_bytes)
                
                # 检查空间是否足够
                if offset + record_size > PAGE_SIZE:
                    self.logger.error("REWRITE_PAGE", page_id, "Page size exceeded")
                    return False
                
                # 写入记录
                page_data[offset:offset + record_size] = record_bytes
                offset += record_size
            
            # 更新页头
            struct.pack_into('II', page_data, 0, record_count, offset)
            
            # 写回页面（更新缓存）
            return self.put_page(page_id, bytes(page_data))
            
        except Exception as e:
            self.logger.error("REWRITE_PAGE", page_id, f"Failed to rewrite page: {e}")
            return False
    
    def _find_page_with_space(self, table_name: str, required_space: int) -> Optional[int]:
        """
        查找有足够空间的页面
        
        Args:
            table_name: 表名
            required_space: 需要的空间大小
            
        Returns:
            Optional[int]: 页ID，如果没有找到则返回None
        """
        import struct
        
        page_ids = self.table_pages[table_name]
        
        for page_id in page_ids:
            page_data = self.get_page(page_id)
            
            # 读取页头信息
            record_count, free_space_offset = struct.unpack('II', page_data[:8])
            available_space = PAGE_SIZE - free_space_offset
            
            if available_space >= required_space:
                return page_id
        
        return None
    
    def _add_table_page(self, table_name: str) -> int:
        """
        为表添加新页面（线程安全）
        
        Args:
            table_name: 表名
            
        Returns:
            int: 新分配的页ID
        """
        with self._mapping_lock:
            page_id = self.allocate_page()
            self.table_pages[table_name].append(page_id)
            self.table_schemas[table_name].page_ids.append(page_id)
            
            # 初始化新页面
            self._initialize_data_page(page_id)
            
            # 保存映射信息
            self._save_table_mappings()
            self._save_table_schemas()
            
            self.logger.info("ADD_PAGE", page_id, f"Added page to table {table_name}")
            return page_id
    
    def _insert_record_to_page(self, page_id: int, record_bytes: bytes) -> Optional[int]:
        """
        将记录插入到指定页面
        
        Args:
            page_id: 页ID
            record_bytes: 序列化的记录数据
            
        Returns:
            Optional[int]: 插入成功返回slot_id，失败返回None
        """
        import struct
        
        try:
            page_data = bytearray(self.get_page(page_id))
            
            # 读取页头信息
            record_count, free_space_offset = struct.unpack('II', page_data[:8])
            
            # 检查空间是否足够
            available_space = PAGE_SIZE - free_space_offset
            if available_space < len(record_bytes):
                return None
            
            # 写入记录数据
            page_data[free_space_offset:free_space_offset + len(record_bytes)] = record_bytes
            
            # 当前记录的slot_id就是当前的record_count
            slot_id = record_count
            
            # 更新页头信息
            new_record_count = record_count + 1
            new_free_space_offset = free_space_offset + len(record_bytes)
            struct.pack_into('II', page_data, 0, new_record_count, new_free_space_offset)
            
            # 写回页面
            self.put_page(page_id, bytes(page_data))
            
            return slot_id
            
        except Exception as e:
            self.logger.error("INSERT_TO_PAGE", page_id, f"Failed to insert record: {e}")
            return None
    
    def _delete_record_from_page(self, page_id: int, slot_id: int) -> bool:
        """
        从页面删除指定slot的记录
        
        Args:
            page_id: 页面ID
            slot_id: 记录slot ID (从0开始的索引)
            
        Returns:
            bool: 删除是否成功
        """
        import struct
        
        try:
            page_data = bytearray(self.get_page(page_id))
            
            # 读取页头信息
            record_count, free_space_offset = struct.unpack('II', page_data[:8])
            
            # 验证slot_id的有效性
            if slot_id < 0 or slot_id >= record_count:
                self.logger.error("DELETE_RECORD", page_id, f"Invalid slot_id {slot_id}, record_count={record_count}")
                return False
            
            # 遍历记录找到要删除的记录位置
            records_info = []  # [(start_offset, end_offset, record_data)]
            offset = 8  # 跳过页头
            
            for i in range(record_count):
                if offset >= free_space_offset:
                    self.logger.error("DELETE_RECORD", page_id, f"Corrupted page: offset {offset} >= free_space {free_space_offset}")
                    return False
                
                # 读取记录长度
                if offset + 4 > free_space_offset:
                    self.logger.error("DELETE_RECORD", page_id, f"Cannot read record {i} length")
                    return False
                
                record_length = struct.unpack('I', page_data[offset:offset+4])[0]
                
                # 验证记录长度
                if record_length <= 4 or offset + record_length > free_space_offset:
                    self.logger.error("DELETE_RECORD", page_id, f"Invalid record {i} length: {record_length}")
                    return False
                
                # 记录每个记录的位置信息
                record_start = offset
                record_end = offset + record_length
                record_data = page_data[record_start:record_end]
                
                records_info.append((record_start, record_end, record_data))
                offset += record_length
            
            # 验证我们找到的记录数量是否正确
            if len(records_info) != record_count:
                self.logger.error("DELETE_RECORD", page_id, f"Record count mismatch: found {len(records_info)}, expected {record_count}")
                return False
            
            # 重建页面数据，跳过要删除的记录
            new_page_data = bytearray(PAGE_SIZE)
            new_offset = 8  # 跳过页头
            new_record_count = record_count - 1
            
            for i, (start, end, data) in enumerate(records_info):
                if i != slot_id:  # 跳过要删除的记录
                    # 复制记录数据到新位置
                    record_size = len(data)
                    new_page_data[new_offset:new_offset + record_size] = data
                    new_offset += record_size
            
            # 更新页头信息
            struct.pack_into('II', new_page_data, 0, new_record_count, new_offset)
            
            # 写回页面
            success = self.put_page(page_id, bytes(new_page_data))
            
            if success:
                self.logger.info("DELETE_RECORD", page_id, f"Successfully deleted record slot {slot_id}")
            else:
                self.logger.error("DELETE_RECORD", page_id, f"Failed to write page after deleting slot {slot_id}")
            
            return success
            
        except Exception as e:
            self.logger.error("DELETE_RECORD", page_id, f"Failed to delete record slot {slot_id}: {e}")
            return False
    
    def _get_record_by_pointer(self, record_pointer, schema: TableSchema):
        """
        根据记录指针获取记录
        
        Args:
            record_pointer: RecordPointer对象
            schema: 表结构
            
        Returns:
            Record: 记录对象，如果找不到返回None
        """
        try:
            from index_types import RecordPointer
            
            # 简化实现：从页面扫描找到对应slot的记录
            page_records = self._scan_page_records(record_pointer.page_id, schema)
            
            # 返回指定slot的记录（简化实现，假设slot_id就是记录在页面中的索引）
            if 0 <= record_pointer.slot_id < len(page_records):
                return page_records[record_pointer.slot_id]
            
            return None
            
        except Exception as e:
            self.logger.error("GET_RECORD", record_pointer.page_id, f"Failed to get record by pointer: {e}")
            return None
    
    def _scan_page_records(self, page_id: int, schema: TableSchema) -> List[Record]:
        """
        扫描页面中的所有记录
        
        Args:
            page_id: 页ID
            schema: 表结构
            
        Returns:
            List[Record]: 记录列表
        """
        import struct
        
        try:
            page_data = self.get_page(page_id)
            
            # 读取页头信息
            record_count, free_space_offset = struct.unpack('II', page_data[:8])
            
            records = []
            offset = 8  # 跳过页头（8字节：record_count + free_space_offset）
            
            # 逐个解析记录
            for i in range(record_count):
                if offset >= free_space_offset:
                    break
                
                # 首先读取记录长度来确定记录边界
                if offset + 4 > free_space_offset:
                    self.logger.error("SCAN_PAGE", page_id, f"Invalid record {i}: cannot read length")
                    break
                
                record_length = struct.unpack('I', page_data[offset:offset+4])[0]
                
                # 验证记录长度的合理性
                if record_length <= 4 or offset + record_length > free_space_offset:
                    self.logger.error("SCAN_PAGE", page_id, f"Invalid record {i}: bad length {record_length}")
                    break
                
                # 反序列化记录（从offset开始，包含长度头）
                try:
                    record = Record.deserialize(schema, page_data, offset)
                    records.append(record)
                    
                    # 移动到下一个记录
                    offset += record_length
                    
                except Exception as e:
                    self.logger.error("SCAN_PAGE", page_id, f"Failed to deserialize record {i}: {e}")
                    break
            
            return records
            
        except Exception as e:
            self.logger.error("SCAN_PAGE", page_id, f"Failed to scan page records: {e}")
            return []
    
    def _match_condition(self, record: Record, condition) -> bool:
        """
        复杂条件匹配，支持多种操作符和逻辑组合
        
        Args:
            record: 记录对象
            condition: 条件（支持多种格式）
                - Dict: 简单等值匹配 {"id": 1, "name": "Alice"}
                - Dict: 复杂条件 {"age": {"$gt": 18}, "name": {"$like": "A%"}}
                - Dict: 逻辑组合 {"$and": [condition1, condition2]}
            
        Returns:
            bool: 是否匹配条件
        """
        if condition is None:
            return True
            
        if isinstance(condition, dict):
            return self._evaluate_dict_condition(record, condition)
        else:
            raise ValueError(f"Unsupported condition type: {type(condition)}")
    
    def _evaluate_dict_condition(self, record: Record, condition: Dict) -> bool:
        """评估字典形式的条件"""
        for key, value in condition.items():
            if key.startswith('$'):
                # 逻辑操作符
                if not self._evaluate_logical_operator(record, key, value):
                    return False
            else:
                # 字段条件
                if not self._evaluate_field_condition(record, key, value):
                    return False
        return True
    
    def _evaluate_logical_operator(self, record: Record, operator: str, operands) -> bool:
        """评估逻辑操作符"""
        if operator == '$and':
            if not isinstance(operands, list):
                raise ValueError("$and requires a list of conditions")
            return all(self._match_condition(record, cond) for cond in operands)
        
        elif operator == '$or':
            if not isinstance(operands, list):
                raise ValueError("$or requires a list of conditions")
            return any(self._match_condition(record, cond) for cond in operands)
        
        elif operator == '$not':
            return not self._match_condition(record, operands)
        
        else:
            raise ValueError(f"Unsupported logical operator: {operator}")
    
    def _evaluate_field_condition(self, record: Record, field_name: str, condition_value) -> bool:
        """评估字段条件"""
        actual_value = record.get_value(field_name)
        
        if isinstance(condition_value, dict):
            # 复杂条件，如 {"$gt": 18}
            for op, expected in condition_value.items():
                if not self._evaluate_comparison_operator(actual_value, op, expected):
                    return False
            return True
        else:
            # 简单等值条件
            return self._evaluate_comparison_operator(actual_value, '$eq', condition_value)
    
    def _evaluate_comparison_operator(self, actual_value, operator: str, expected_value) -> bool:
        """评估比较操作符"""
        try:
            if operator == '$eq' or operator == '=':
                return actual_value == expected_value
            
            elif operator == '$ne' or operator == '!=':
                return actual_value != expected_value
            
            elif operator == '$gt' or operator == '>':
                return actual_value is not None and expected_value is not None and actual_value > expected_value
            
            elif operator == '$gte' or operator == '>=':
                return actual_value is not None and expected_value is not None and actual_value >= expected_value
            
            elif operator == '$lt' or operator == '<':
                return actual_value is not None and expected_value is not None and actual_value < expected_value
            
            elif operator == '$lte' or operator == '<=':
                return actual_value is not None and expected_value is not None and actual_value <= expected_value
            
            elif operator == '$in':
                if not isinstance(expected_value, (list, tuple, set)):
                    raise ValueError("$in requires a list, tuple, or set")
                return actual_value in expected_value
            
            elif operator == '$nin':
                if not isinstance(expected_value, (list, tuple, set)):
                    raise ValueError("$nin requires a list, tuple, or set")
                return actual_value not in expected_value
            
            elif operator == '$like':
                if not isinstance(expected_value, str) or not isinstance(actual_value, str):
                    return False
                # 简单的LIKE实现：%表示任意字符，_表示单个字符
                return self._match_like_pattern(actual_value, expected_value)
            
            elif operator == '$null':
                if expected_value:
                    return actual_value is None
                else:
                    return actual_value is not None
            
            elif operator == '$regex':
                import re
                if not isinstance(expected_value, str) or not isinstance(actual_value, str):
                    return False
                return bool(re.search(expected_value, actual_value))
            
            else:
                raise ValueError(f"Unsupported comparison operator: {operator}")
                
        except (TypeError, ValueError) as e:
            # 类型不兼容的比较（如数字与字符串），返回False
            self.logger.warn("CONDITION", 0, f"Comparison error: {e}")
            return False
    
    def _match_like_pattern(self, text: str, pattern: str) -> bool:
        """匹配LIKE模式"""
        import re
        
        # 简化的LIKE实现
        # 将SQL LIKE模式转换为正则表达式
        # % -> .*（任意字符）
        # _ -> .（单个字符）
        
        # 转义正则表达式中的特殊字符，但保留%和_
        regex_pattern = ""
        i = 0
        while i < len(pattern):
            char = pattern[i]
            if char == '%':
                regex_pattern += '.*'
            elif char == '_':
                regex_pattern += '.'
            elif char == '\\' and i + 1 < len(pattern):
                # 处理转义字符
                next_char = pattern[i + 1]
                if next_char in '%_':
                    regex_pattern += re.escape(next_char)
                    i += 1  # 跳过下一个字符
                else:
                    regex_pattern += re.escape(char)
            else:
                # 转义其他正则表达式特殊字符
                regex_pattern += re.escape(char)
            i += 1
        
        regex_pattern = f'^{regex_pattern}$'
        
        try:
            return bool(re.match(regex_pattern, text))
        except re.error:
            return False
    
    # ==================== 兼容性接口 ====================
    
    def create_table_pages(self, table_name: str, initial_pages: int = 1) -> List[int]:
        """兼容旧接口：仅创建页面，不定义结构"""
        self.logger.warn("DEPRECATED", 0, "create_table_pages is deprecated, use create_table instead")
        
        # 如果表不存在，创建页面集合
        if table_name not in self.table_pages:
            self.table_pages[table_name] = []
        
            # 分配指定数量的页面
            for _ in range(initial_pages):
                page_id = self.allocate_page()
                self.table_pages[table_name].append(page_id)
                # 初始化页面
                self._initialize_data_page(page_id)
            
            # 保存映射
            self._save_table_mappings()
            
            return self.get_table_pages(table_name)
    
    def get_table_pages(self, table_name: str) -> List[int]:
        """获取表的所有页面ID"""
        return self.table_pages.get(table_name, [])
    
    def add_table_page(self, table_name: str) -> int:
        """
        为表添加新页面（向后兼容接口）
        
        Args:
            table_name: 表名
            
        Returns:
            int: 新分配的页ID，如果表不存在返回-1
        """
        if table_name not in self.table_pages:
            return -1
        return self._add_table_page(table_name)
    
    def drop_table_pages(self, table_name: str) -> bool:
        """
        删除表的所有页面（向后兼容接口）
        
        Args:
            table_name: 表名
            
        Returns:
            bool: 是否成功删除
        """
        if table_name not in self.table_pages:
            return False
        
        # 释放所有页面
        for page_id in self.table_pages[table_name]:
            self.deallocate_page(page_id)
        
        # 从映射中删除
        del self.table_pages[table_name]
        
        # 如果在增强结构中也存在，删除它
        if table_name in self.table_schemas:
            del self.table_schemas[table_name]
        
        # 保存更改
        self._save_table_mappings()
        self._save_table_schemas()
        
        return True
    
    # ==================== 统计和状态接口 ====================
    
    def get_storage_stats(self) -> StorageStats:
        """获取存储统计信息"""
        return self.page_manager.get_stats()
    
    def get_cache_stats(self) -> CacheStats:
        """获取缓存统计信息"""
        return self.cache_manager.get_stats()
    
    def get_system_info(self) -> Dict:
        """获取系统信息"""
        storage_stats = self.get_storage_stats()
        cache_stats = self.get_cache_stats()
        
        return {
            "database_path": self.db_path,
            "cache_policy": self.cache_policy.value,
            "storage_stats": {
                "total_pages": storage_stats.total_pages,
                "used_pages": storage_stats.used_pages,
                "free_pages": storage_stats.free_pages,
                "page_utilization": storage_stats.page_utilization,
                "pages_allocated": storage_stats.pages_allocated,
                "pages_read": storage_stats.pages_read,
                "pages_written": storage_stats.pages_written,
                "disk_reads": storage_stats.disk_reads,
                "disk_writes": storage_stats.disk_writes
            },
            "cache_stats": {
                "cache_size": cache_stats.cache_size,
                "max_cache_size": cache_stats.max_cache_size,
                "cache_hits": cache_stats.cache_hits,
                "cache_misses": cache_stats.cache_misses,
                "hit_rate": cache_stats.hit_rate,
                "replacement_count": cache_stats.replacement_count
            },
            "tables": {
                table_name: len(page_ids) 
                for table_name, page_ids in self.table_pages.items()
            }
        }
    
    def print_system_status(self):
        """打印系统状态"""
        info = self.get_system_info()
        
        print(f"\n{'='*50}")
        print(f"存储引擎系统状态")
        print(f"{'='*50}")
        print(f"数据库文件: {info['database_path']}")
        print(f"缓存策略: {info['cache_policy'].upper()}")
        print()
        
        # 存储统计
        storage = info['storage_stats']
        print(f"存储统计:")
        print(f"  总页数: {storage['total_pages']}")
        print(f"  已用页数: {storage['used_pages']}")
        print(f"  空闲页数: {storage['free_pages']}")
        print(f"  页利用率: {storage['page_utilization']:.2%}")
        print(f"  页分配数: {storage['pages_allocated']}")
        print(f"  页读取数: {storage['pages_read']}")
        print(f"  页写入数: {storage['pages_written']}")
        print(f"  磁盘读取: {storage['disk_reads']}")
        print(f"  磁盘写入: {storage['disk_writes']}")
        print()
        
        # 缓存统计
        cache = info['cache_stats']
        print(f"缓存统计:")
        print(f"  缓存大小: {cache['cache_size']}/{cache['max_cache_size']}")
        print(f"  缓存命中: {cache['cache_hits']}")
        print(f"  缓存失效: {cache['cache_misses']}")
        print(f"  命中率: {cache['hit_rate']:.2%}")
        print(f"  替换次数: {cache['replacement_count']}")
        print()
        
        # 表信息
        if info['tables']:
            print(f"表信息:")
            for table_name, page_count in info['tables'].items():
                print(f"  {table_name}: {page_count} 页")
        else:
            print("表信息: 无表")
        
        print(f"{'='*50}")
    
    # ==================== 锁管理接口 ====================
    
    def get_lock_manager(self):
        """获取锁管理器"""
        if self.enable_locking and hasattr(self.page_manager, 'lock_manager'):
            return self.page_manager.lock_manager
        return None
    
    def acquire_table_lock(self, table_name: str, lock_type: str = "shared") -> bool:
        """
        获取表级锁（锁定表的所有页面）
        
        Args:
            table_name: 表名
            lock_type: 锁类型，"shared" 或 "exclusive"
        
        Returns:
            bool: 是否成功获取锁
        """
        if not self.enable_locking or table_name not in self.table_pages:
            return False
        
        lock_manager = self.get_lock_manager()
        if not lock_manager:
            return False
        
        page_ids = self.table_pages[table_name]
        
        try:
            for page_id in page_ids:
                if lock_type == "shared":
                    if not lock_manager.acquire_read_lock(page_id):
                        # 如果获取失败，释放已获取的锁
                        self.release_table_lock(table_name)
                        return False
                elif lock_type == "exclusive":
                    if not lock_manager.acquire_write_lock(page_id):
                        # 如果获取失败，释放已获取的锁
                        self.release_table_lock(table_name)
                        return False
                else:
                    self.logger.error("LOCK", 0, f"Invalid lock type: {lock_type}")
                    return False
            
            self.logger.info("TABLE_LOCK", 0, f"Acquired {lock_type} lock on table {table_name}")
            return True
            
        except Exception as e:
            self.logger.error("TABLE_LOCK", 0, f"Failed to acquire table lock: {e}")
            self.release_table_lock(table_name)
            return False
    
    def release_table_lock(self, table_name: str) -> bool:
        """
        释放表级锁
        
        Args:
            table_name: 表名
        
        Returns:
            bool: 是否成功释放锁
        """
        if not self.enable_locking or table_name not in self.table_pages:
            return False
        
        lock_manager = self.get_lock_manager()
        if not lock_manager:
            return False
        
        page_ids = self.table_pages[table_name]
        released_count = 0
        
        for page_id in page_ids:
            if lock_manager.release_lock(page_id):
                released_count += 1
        
        self.logger.info("TABLE_LOCK", 0, f"Released {released_count} locks on table {table_name}")
        return released_count == len(page_ids)
    
    def release_all_locks(self) -> int:
        """
        释放当前事务的所有锁
        
        Returns:
            int: 释放的锁数量
        """
        if not self.enable_locking:
            return 0
        
        lock_manager = self.get_lock_manager()
        if not lock_manager:
            return 0
        
        released_count = lock_manager.release_all_locks()
        self.logger.info("RELEASE_ALL", 0, f"Released {released_count} locks")
        return released_count
    
    def get_lock_info(self, page_id: int = None) -> Dict:
        """
        获取锁信息
        
        Args:
            page_id: 页面ID，如果为None则返回锁管理器统计信息
        
        Returns:
            Dict: 锁信息
        """
        if not self.enable_locking:
            return {"locking_enabled": False}
        
        lock_manager = self.get_lock_manager()
        if not lock_manager:
            return {"locking_enabled": False}
        
        if page_id is not None:
            return lock_manager.page_lock_manager.get_lock_info(page_id)
        else:
            return {
                "locking_enabled": True,
                "stats": lock_manager.page_lock_manager.get_stats()
            }
    
    def print_lock_status(self):
        """打印锁状态"""
        if not self.enable_locking:
            print("锁机制未启用")
            return
        
        lock_manager = self.get_lock_manager()
        if not lock_manager:
            print("锁管理器不可用")
            return
        
        lock_manager.page_lock_manager.print_lock_status()
    
    # ==================== 生命周期管理 ====================
    
    def close(self):
        """关闭存储引擎"""
        self.logger.info("CLOSE", 0, "Closing storage engine")
        
        # 释放所有锁
        if self.enable_locking:
            released_count = self.release_all_locks()
            self.logger.info("CLOSE", 0, f"Released {released_count} locks during shutdown")
        
        # 刷新所有缓存页
        self.flush_all_pages()
        
        # 刷新所有索引
        if hasattr(self, 'index_manager'):
            self.index_manager.flush_all_indexes()
            self.logger.info("CLOSE", 0, "Flushed all indexes")
        
        # 保存表映射信息和表结构
        self._save_table_mappings()
        self._save_table_schemas()
        
        # 停止后台线程
        self.running = False
        for thread in self.background_threads:
            if thread.is_alive():
                thread.join(timeout=2.0)  # 等待线程结束，最多2秒
        self.logger.info("CLOSE", 0, "Background threads stopped")
        
        # 关闭WAL管理器前先执行checkpoint
        if self.wal_manager:
            try:
                # 强制执行checkpoint，将所有WAL数据应用到主数据库
                self.logger.info("CLOSE", 0, "Performing final checkpoint before closing...")
                checkpoint_lsn = self.wal_manager.perform_checkpoint()
                self.logger.info("CLOSE", 0, f"Final checkpoint completed at LSN {checkpoint_lsn}")
                
                # 确保所有页面都被刷新
                self.flush_all_pages()
                
            except Exception as e:
                self.logger.error("CLOSE", 0, f"Error during final checkpoint: {e}")
            
            self.wal_manager.close()
            self.logger.info("CLOSE", 0, "WAL manager closed")
        
        # 关闭页管理器
        self.page_manager.close()
        
        self.logger.info("CLOSE", 0, "Storage engine closed")
    
    def __enter__(self):
        """支持with语句"""
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        """支持with语句"""
        self.close()
    
    # ==================== 调试和测试接口 ====================
    
    def print_recent_logs(self, count: int = 20):
        """打印最近的日志"""
        print("\n=== 页管理器日志 ===")
        self.page_manager.logger.print_recent_logs(count)
        
        print("\n=== 缓存管理器日志 ===")
        self.cache_manager.logger.print_recent_logs(count)
        
        print("\n=== 存储引擎日志 ===")
        self.logger.print_recent_logs(count)
    
    def get_cached_pages(self) -> List[int]:
        """获取当前缓存中的页面"""
        return self.cache_manager.get_cached_pages()
    
    def get_dirty_pages(self) -> List[int]:
        """获取当前缓存中的脏页"""
        return self.cache_manager.get_dirty_pages()
    
    def clear_cache(self):
        """清空缓存"""
        self.cache_manager.clear_cache()
        self.logger.info("CLEAR_CACHE", 0, "Cache cleared")
    
    # ==================== 预读机制接口 ====================
    
    def get_readahead_statistics(self) -> dict:
        """获取预读统计信息"""
        stats = {
            'readahead_enabled': self.enable_readahead,
            'storage_engine': {
                'cache_policy': self.cache_policy.value,
                'enable_locking': self.enable_locking
            }
        }
        
        # 页管理器预读统计
        if hasattr(self.page_manager, 'get_readahead_statistics'):
            page_stats = self.page_manager.get_readahead_statistics()
            stats['page_manager'] = page_stats
        
        # 缓存管理器预读统计
        if hasattr(self.cache_manager, 'get_readahead_statistics'):
            cache_stats = self.cache_manager.get_readahead_statistics()
            stats['cache_manager'] = cache_stats
        
        return stats
    
    def configure_readahead(self, **kwargs):
        """配置预读参数"""
        if self.enable_readahead:
            if hasattr(self.page_manager, 'configure_readahead'):
                self.page_manager.configure_readahead(**kwargs)
                self.logger.info("CONFIG", 0, f"Readahead configuration updated: {kwargs}")
        else:
            self.logger.warn("CONFIG", 0, "Readahead is disabled, configuration ignored")
    
    def enable_readahead_mode(self, enable: bool = True):
        """动态启用/禁用预读模式"""
        if hasattr(self.page_manager, 'readahead_engine') and self.page_manager.readahead_engine:
            self.page_manager.readahead_engine.configure(enable_readahead=enable)
            self.enable_readahead = enable
            self.logger.info("CONFIG", 0, f"Readahead mode {'enabled' if enable else 'disabled'}")
        else:
            self.logger.warn("CONFIG", 0, "Readahead engine not available")
    
    def print_readahead_status(self):
        """打印预读状态"""
        print(f"\n=== 预读机制状态 ===")
        print(f"预读启用: {self.enable_readahead}")
        
        if self.enable_readahead:
            readahead_stats = self.get_readahead_statistics()
            
            # 页管理器预读状态
            if 'page_manager' in readahead_stats:
                pm_stats = readahead_stats['page_manager']
                print(f"预读引擎状态: {pm_stats.get('enabled', False)}")
                print(f"当前访问模式: {pm_stats.get('current_pattern', 'unknown')}")
                print(f"模式置信度: {pm_stats.get('pattern_confidence', 0):.2f}")
                
                if 'stats' in pm_stats:
                    stats = pm_stats['stats']
                    print(f"预读请求数: {stats.get('requests_generated', 0)}")
                    print(f"预读页面数: {stats.get('pages_prefetched', 0)}")
                    print(f"预读命中率: {stats.get('hit_rate', 0):.2%}")
            
            # 缓存管理器预读状态
            if 'cache_manager' in readahead_stats:
                cm_stats = readahead_stats['cache_manager']
                print(f"缓存利用率: {cm_stats.get('cache_utilization', 0):.2%}")
                print(f"缓存命中率: {cm_stats.get('hit_rate', 0):.2%}")
        
        print("=" * 30)
    
    # ==================== WAL后台线程管理 ====================
    
    def _start_background_threads(self):
        """启动后台线程"""
        import threading
        
        # 后台页面写入线程
        background_writer = threading.Thread(
            target=self._background_page_writer, 
            name="BackgroundPageWriter",
            daemon=True
        )
        background_writer.start()
        self.background_threads.append(background_writer)
        
        # 检查点线程
        checkpoint_thread = threading.Thread(
            target=self._checkpoint_thread,
            name="CheckpointThread", 
            daemon=True
        )
        checkpoint_thread.start()
        self.background_threads.append(checkpoint_thread)
        
        self.logger.info("BACKGROUND", 0, "Started background threads for WAL")
    
    def _background_page_writer(self):
        """后台线程：将脏页异步写入磁盘"""
        import time
        
        while self.running:
            try:
                if not self._ensure_wal_initialized():
                    time.sleep(5)
                    continue
                
                # 获取已刷新的WAL LSN
                flush_lsn = self.wal_manager.get_flush_lsn()
                
                # 获取可以安全刷新的脏页（LSN小于等于已刷新LSN的页面）
                safe_pages = self._get_safe_dirty_pages(flush_lsn)
                
                if safe_pages:
                    for page_id in safe_pages:
                        try:
                            # 刷新页面到磁盘
                            self.page_manager.flush_page_to_disk(page_id)
                            self.logger.info("BACKGROUND_FLUSH", page_id, f"Flushed page {page_id} to disk")
                        except Exception as e:
                            self.logger.error("BACKGROUND_FLUSH", page_id, f"Failed to flush page {page_id}: {e}")
                
                # 每秒检查一次
                time.sleep(1)
                
            except Exception as e:
                self.logger.error("BACKGROUND", 0, f"Background page writer error: {e}")
                time.sleep(5)  # 发生错误时等待更长时间
    
    def _checkpoint_thread(self):
        """检查点线程：定期执行检查点"""
        import time
        from wal_types import WALConstants
        
        while self.running:
            try:
                # 每5分钟执行一次检查点
                time.sleep(WALConstants.CHECKPOINT_INTERVAL_SECONDS)
                
                if not self.running:
                    break
                
                if self._ensure_wal_initialized():
                    # 获取已刷新的页面信息
                    flushed_pages = self._get_flushed_pages_info()
                    
                    # 执行检查点
                    checkpoint_lsn = self.wal_manager.perform_checkpoint(flushed_pages)
                    self.logger.info("CHECKPOINT", 0, f"Checkpoint completed at LSN {checkpoint_lsn}")
                    
            except Exception as e:
                self.logger.error("CHECKPOINT", 0, f"Checkpoint error: {e}")
                time.sleep(60)  # 出错时等待1分钟再重试
    
    def _get_safe_dirty_pages(self, flush_lsn: int) -> List[int]:
        """
        获取可以安全刷新的脏页
        
        Args:
            flush_lsn: 已刷新的WAL LSN
            
        Returns:
            可以安全刷新的页面ID列表
        """
        safe_pages = []
        
        try:
            # 简化实现：获取所有脏页，实际应该检查页面LSN
            if hasattr(self.cache_manager, 'get_dirty_pages'):
                dirty_pages = self.cache_manager.get_dirty_pages()
                # 简化处理：假设所有脏页都可以安全刷新
                # 实际应该检查每个页面的LSN是否小于等于flush_lsn
                safe_pages = dirty_pages[:10]  # 限制每次最多刷新10个页面
            
        except Exception as e:
            self.logger.error("SAFE_PAGES", 0, f"Failed to get safe dirty pages: {e}")
        
        return safe_pages
    
    def _get_flushed_pages_info(self) -> List[Dict[str, Any]]:
        """获取已刷新的页面信息"""
        flushed_pages = []
        
        try:
            # 简化实现：返回一些基本的页面信息
            # 实际应该维护页面的LSN信息
            if hasattr(self, 'table_pages'):
                for table_name, page_ids in self.table_pages.items():
                    for page_id in page_ids[:5]:  # 限制数量
                        flushed_pages.append({
                            'page_id': page_id,
                            'table_name': table_name,
                            'lsn': self.wal_manager.get_current_lsn() - 1 if self.wal_manager else 0
                        })
            
        except Exception as e:
            self.logger.error("FLUSHED_PAGES", 0, f"Failed to get flushed pages info: {e}")
        
        return flushed_pages
    
    def _ensure_wal_initialized(self):
        """确保WAL管理器已初始化"""
        if not self.wal_initialization_attempted:
            self.wal_initialization_attempted = True
            try:
                from wal_manager import WALManager
                
                # WAL管理器需要数据库目录，而不是.db文件路径
                wal_db_path = os.path.dirname(self.db_path) if self.db_path.endswith('.db') else self.db_path
                self.wal_manager = WALManager(wal_db_path)
                self.wal_enabled = True
                
                # 启动后台线程
                self._start_background_threads()
                
                self.logger.info("WAL_INIT", 0, f"WAL manager initialized successfully at {wal_db_path}")
            except Exception as e:
                self.wal_manager = None
                self.wal_enabled = False
                self.logger.warn("WAL_INIT", 0, f"WAL initialization failed: {e}")
        
        return self.wal_enabled
