from __future__ import annotations

from dataclasses import dataclass
from pathlib import Path
import os
import re
import threading
import logging
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import multiprocessing
from typing import List, Optional, Dict, Any, Tuple

from .config import MigratorConfig
from .parser import iter_parse_sql_text, iter_parse_sql_file
from .splitter import split_ddl_dml
from .transformer import transform_statements, transform_statements_split
from .transformer import _collect_table_schemas, _extract_table_schema_from_create  # reuse DDL schema extractor
from .transformer import transform_expression, _extract_inline_constraints, _format_index_column_token, _norm_name
from sqlglot import exp

# Pre-compiled regex patterns for performance optimization
_RE_INSERT_PATTERN = re.compile(
    r"^\s*INSERT\s+INTO\s+([`\"]?[\w\.]+[`\"]?)\s*(\((.*?)\))?\s+(VALUES|SELECT)\s*(.*)$",
    flags=re.IGNORECASE | re.DOTALL
)
_RE_INSERT_VALUES_PATTERN = re.compile(
    r"^\s*INSERT\s+INTO\s+([`\"]?[\w\.]+[`\"]?)\s*(\((.*?)\))?\s+VALUES\s*(.*)$",
    flags=re.IGNORECASE | re.DOTALL
)
_RE_INTEGER_PATTERN = re.compile(r"^[+-]?\d+$")

# Logger for pipeline operations
logger = logging.getLogger(__name__)


# Module-level worker functions for multiprocessing (must be picklable)
def _process_batch_worker_mp(args: Tuple[List[str], Dict, Optional[Dict]]) -> str:
    """Worker function for multiprocessing: process a batch of INSERTs using ultra-fast processing.
    This function must be at module level to be picklable.
    Note: Ultra-fast mode processes in main thread, so this is only used as fallback.
    """
    try:
        from .pipeline_optimized import process_insert_batch_ultra_fast
        batch, schemas, json_status = args
        return process_insert_batch_ultra_fast(batch, schemas, json_status)
    except ImportError:
        # Fallback if optimized module not available
        return '\n'.join(batch)


def _process_single_worker_mp(args: Tuple[str, Dict, Optional[Dict]]) -> str:
    """Worker function for multiprocessing: process a single INSERT statement.
    This function must be at module level to be picklable.
    """
    raw_sql, table_schemas, json_fix_status = args
    # Note: json_fix_lock is not needed in multiprocessing (each process has its own state)
    return _process_insert_statement(raw_sql, table_schemas, json_fix_status, None)


@dataclass
class PipelineResult:
    output_sql: str
    ddl_sql: str
    dml_sql: str
    constraints_sql: str
    errors: list[str]
    warnings: list[str]
    dml_file_path: Optional[Path] = None  # Optional temporary file path for DML (for streaming processing)
    ddl_file_path: Optional[Path] = None  # Optional temporary file path for DDL (for streaming processing)
    constraints_file_path: Optional[Path] = None  # Optional temporary file path for constraints (for streaming processing)


def run_pipeline(cfg: MigratorConfig) -> PipelineResult:
    """
    Run the SQL migration pipeline with streaming transformation:
    - Transform DDL statements immediately as they are parsed
    - Extract table schemas immediately after CREATE TABLE transformation
    - Process INSERT statements immediately when table schema is available (multi-threaded)
    """
    logger.info("Starting SQL migration pipeline")
    input_path = Path(cfg.input_sql_path)
    logger.info(f"Input file: {input_path} (size: {input_path.stat().st_size / (1024*1024):.2f} MB)")
    
    errors: List[str] = []
    warnings: List[str] = []
    
    # Get file size first
    import tempfile
    file_size = input_path.stat().st_size
    
    # Output collections for transformed SQL
    # For large files, use file-based streaming for all outputs
    ddl_lines: List[str] = []
    dml_lines: List[str] = []
    cons_lines: List[str] = []
    fk_lines: List[str] = []
    
    # File handles for streaming output (for very large files)
    # Lower threshold to use file streaming more aggressively to avoid memory issues
    ddl_file = None
    ddl_file_path = None
    cons_file = None
    cons_file_path = None
    use_file_for_all = file_size > 50 * 1024 * 1024  # 50MB threshold for full streaming (lowered to prevent memory issues)
    
    if use_file_for_all:
        logger.info("Using file-based streaming for all outputs (very large file detected)")
        ddl_fd, ddl_file_path = tempfile.mkstemp(suffix='.sql', prefix='ddl_', text=True)
        ddl_file = os.fdopen(ddl_fd, 'w', encoding='utf-8')
        cons_fd, cons_file_path = tempfile.mkstemp(suffix='.sql', prefix='constraints_', text=True)
        cons_file = os.fdopen(cons_fd, 'w', encoding='utf-8')
        logger.debug(f"Created temporary files: DDL={ddl_file_path}, Constraints={cons_file_path}")
    
    # Table schemas: updated as CREATE TABLE statements are processed
    table_schemas: Dict[str, Dict[str, Any]] = {}
    
    # INSERT statement queues: grouped by table name (lowercase)
    # When a table's schema becomes available, process its INSERT queue
    insert_queues: Dict[str, List[str]] = {}  # {table_name_lower: [insert_stmt, ...]}
    
    # Track JSON fix status per table (thread-safe for parallel processing)
    json_fix_status: Dict[str, bool] = {}  # {table_name_lower: needs_fix}
    json_fix_lock = threading.Lock()
    
    # Thread pool for processing INSERT statements
    max_workers = 12  # Limit to 6 workers
    # Use ProcessPoolExecutor for large files (CPU-bound task, better CPU utilization)
    use_multiprocessing = file_size > 100 * 1024 * 1024  # Use multiprocessing for large files (>100MB)
    
    if use_multiprocessing:
        logger.info(f"Initializing process pool with {max_workers} workers for INSERT processing (multiprocessing for CPU-bound tasks)")
        insert_executor = ProcessPoolExecutor(max_workers=max_workers)
    else:
        logger.info(f"Initializing thread pool with {max_workers} workers for INSERT processing")
        insert_executor = ThreadPoolExecutor(max_workers=max_workers)
    
    # Collect raw DROP DATABASE statements
    dropdb_passthrough: List[str] = []
    
    # Determine if we should use file-based streaming for processed INSERTs
    # Lower threshold to use file streaming more aggressively to avoid memory issues
    use_file_for_inserts = file_size > 50 * 1024 * 1024  # 50MB threshold (lowered to prevent memory issues)
    
    if use_file_for_inserts:
        logger.info("Using file-based streaming for INSERT statements (large file detected)")
    else:
        logger.info("Using in-memory processing for INSERT statements")
    
    processed_inserts_file = None
    processed_inserts_path = None
    if use_file_for_inserts:
        processed_inserts_fd, processed_inserts_path = tempfile.mkstemp(suffix='.sql', prefix='processed_inserts_', text=True)
        processed_inserts_file = os.fdopen(processed_inserts_fd, 'w', encoding='utf-8')
        logger.debug(f"Created temporary file for processed INSERTs: {processed_inserts_path}")
    
    # Helper functions for writing output (handles both file and memory modes)
    def write_ddl_line(line: str):
        """Write DDL line to file or memory."""
        if use_file_for_all and ddl_file:
            ddl_file.write(line + "\n")
        else:
            ddl_lines.append(line)
    
    def write_cons_line(line: str):
        """Write constraint line to file or memory."""
        if use_file_for_all and cons_file:
            cons_file.write(line + "\n")
        else:
            cons_lines.append(line)
    
    def write_fk_line(line: str):
        """Write foreign key line to file or memory."""
        if use_file_for_all and cons_file:
            cons_file.write(line + "\n")
        else:
            fk_lines.append(line)
    
    # Track parallel INSERT processing
    parallel_insert_processing_started = False
    parallel_insert_start_time = None
    parallel_insert_batch_count = 0
    parallel_insert_total_count = 0
    
    # Disable batch merging - process each INSERT individually
    USE_ULTRA_FAST_BATCH = False  # Disabled: no merging, process each INSERT separately
    
    # Helper function to process a batch of INSERT statements (no merging)
    def process_table_inserts_batch(table_name_lower: str, inserts: List[str]):
        """Process a batch of INSERT statements for a table individually (no merging)."""
        nonlocal parallel_insert_processing_started, parallel_insert_start_time, parallel_insert_batch_count, parallel_insert_total_count, insert_processed_count
        
        if not inserts:
            return
        
        # Track parallel processing start
        if not parallel_insert_processing_started:
            parallel_insert_processing_started = True
            parallel_insert_start_time = time.time()
            logger.info("=" * 60)
            logger.info("Starting parallel INSERT statement processing...")
            executor_type = "processes" if use_multiprocessing else "threads"
            logger.info(f"{executor_type.capitalize()}: {max_workers} workers available")
            logger.info("=" * 60)
        
        parallel_insert_batch_count += 1
        parallel_insert_total_count += len(inserts)
        
        batch_start_time = time.time()
        
        # Process each INSERT individually (no merging)
        if use_multiprocessing:
            # For multiprocessing, use module-level worker function (must be picklable)
            # Prepare arguments for each INSERT
            insert_args = [(stmt, table_schemas, json_fix_status) for stmt in inserts]
            processed = list(insert_executor.map(_process_single_worker_mp, insert_args))
        else:
            processed = list(insert_executor.map(
                lambda s: _process_insert_statement(s, table_schemas, json_fix_status, json_fix_lock),
                inserts
            ))
        
        batch_end_time = time.time()
        batch_duration = batch_end_time - batch_start_time
        throughput = len(inserts) / batch_duration if batch_duration > 0 else 0
        
        executor_type = "processes" if use_multiprocessing else "threads"
        logger.info(f"[Parallel INSERT] Completed batch #{parallel_insert_batch_count} for table '{table_name_lower}': "
                   f"{len(processed)} INSERT statements processed in {batch_duration:.2f} seconds "
                   f"({throughput:.0f} statements/sec, using {max_workers} {executor_type}, no merging)")
        
        # Write processed INSERTs to output
        for p in processed:
            if p:  # Skip empty results
                if use_file_for_inserts:
                    processed_inserts_file.write(p + "\n")
                else:
                    dml_lines.append(p)
        
        insert_processed_count += len(inserts)  # Count original inserts, not merged results
    
    # Helper function to process INSERT queue for a table when schema becomes available
    def process_table_inserts(table_name_lower: str):
        """Process all queued INSERT statements for a table using multi-threading."""
        if table_name_lower not in insert_queues:
            return
        
        inserts = insert_queues.pop(table_name_lower)
        if not inserts:
            return
        
        process_table_inserts_batch(table_name_lower, inserts)
    
    # Helper function to periodically process all queued INSERTs (for continuous processing during parsing)
    def process_queued_inserts_periodically():
        """Process all queued INSERTs that have schemas available, regardless of batch size.
        This ensures continuous processing during parsing (边解析边处理)."""
        nonlocal last_periodic_process_count
        
        # Process all tables that have schemas available and have queued INSERTs
        tables_to_process = [
            (table_name, inserts) 
            for table_name, inserts in list(insert_queues.items())
            if table_name in table_schemas and len(inserts) > 0
        ]
        
        for table_name_lower, inserts in tables_to_process:
            # Process all queued INSERTs for this table (even if less than batch threshold)
            if table_name_lower in insert_queues:
                queued_for_table = insert_queues.pop(table_name_lower)
                if queued_for_table:
                    process_table_inserts_batch(table_name_lower, queued_for_table)
    
    # Always use file-based streaming parser
    # Optimize chunk size based on file size for better I/O performance
    # Larger files benefit from larger chunks (fewer system calls)
    if file_size > 500 * 1024 * 1024:  # > 500MB
        chunk_size = 100 * 1024 * 1024  # 100MB chunks
        logger.info("Using 100MB chunks for file parsing (very large file)")
    elif file_size > 100 * 1024 * 1024:  # > 100MB
        chunk_size = 50 * 1024 * 1024  # 50MB chunks
        logger.info("Using 50MB chunks for file parsing (large file)")
    elif file_size > 10 * 1024 * 1024:  # > 10MB
        chunk_size = 10 * 1024 * 1024  # 10MB chunks
        logger.info("Using 10MB chunks for file parsing (medium file)")
    else:
        chunk_size = 1024 * 1024  # 1MB chunks (default)
        logger.info("Using 1MB chunks for file parsing (small file)")
    
    logger.info("Starting SQL file parsing and transformation")
    logger.info(f"Chunk size: {chunk_size / (1024*1024):.1f} MB, estimated I/O operations: {int(file_size / chunk_size) + 1}")
    
    import time
    parse_start_time = time.time()
    parse_iter = iter_parse_sql_file(input_path, read_dialect=cfg.dialect_read, chunk_size=chunk_size)
    
    # Counters for statistics
    ddl_count = 0
    create_table_count = 0
    insert_count = 0
    insert_queued_count = 0
    insert_processed_count = 0
    
    # Track DDL transformation timing
    ddl_transformation_started = False
    ddl_transformation_start_time = None
    first_ddl_time = None
    last_ddl_time = None
    
    # Periodic processing trigger: process queued INSERTs periodically even if batch threshold not reached
    # This ensures continuous processing during parsing (边解析边处理)
    # Lower interval to process more frequently and prevent memory buildup
    periodic_process_interval = 20  # Process every 20 INSERTs encountered (regardless of table)
    last_periodic_process_count = 0
    
    # Process statements: transform DDL immediately, queue INSERTs until table schema is available
    for part in parse_iter:
        if part.error:
            msg = f"Parse error: {part.error}\nOffending statement:\n{part.raw_sql.strip()}\n"
            logger.warning(f"Parse error encountered: {part.error}")
            if cfg.on_error == "stop":
                raise SystemExit(msg)
            errors.append(msg)
            continue
        
        if part.ast is not None:
            # DDL or DML statement with AST
            ast = part.ast
            
            # Check if it's a DDL statement
            is_ddl = isinstance(ast, (exp.Create, exp.Alter, exp.Drop, exp.Comment))
            
            # Track DDL transformation start
            if is_ddl and not ddl_transformation_started:
                ddl_transformation_started = True
                ddl_transformation_start_time = time.time()
                first_ddl_time = ddl_transformation_start_time
                logger.info("=" * 60)
                logger.info("Starting DDL syntax transformation...")
                logger.info("=" * 60)
            
            # Check if it's a CREATE TABLE statement
            is_create_table = isinstance(ast, exp.Create) and ast.args.get("kind") and ast.args["kind"].upper() == "TABLE"
            
            if is_create_table:
                ddl_count += 1
                create_table_count += 1
                last_ddl_time = time.time()
                
                # Extract table name before transformation
                table_name_raw = ast.this.this if hasattr(ast.this, "this") else None
                table_name = _norm_name(table_name_raw)
                table_name_lower = table_name.lower() if table_name else None
                
                logger.info(f"[DDL Transform] Processing CREATE TABLE statement for table '{table_name}'")
                ddl_stmt_start = time.time()
                
                # Extract table schema BEFORE transformation to preserve TINYINT(1) detection
                # This is critical: we need the original AST to detect TINYINT(1) correctly
                schema = _extract_table_schema_from_create(ast)
                if schema and table_name_lower:
                    table_schemas[table_name_lower] = schema
                    bool_cols_count = len(schema.get('bool_cols', set()))
                    json_cols_count = len(schema.get('json_cols', set()))
                    logger.info(f"Extracted schema for table '{table_name}': {len(schema.get('order', []))} columns, "
                              f"{bool_cols_count} BOOL columns, {json_cols_count} JSON columns")
                
                # Transform CREATE TABLE immediately
                # Extract inline constraints first (before transformation mutates AST)
                constraints = _extract_inline_constraints(ast)
                
                # Transform the statement
                res = transform_expression(ast, table_schemas=table_schemas)
                ddl_stmt_end = time.time()
                logger.debug(f"[DDL Transform] Completed transformation for table '{table_name}' in {ddl_stmt_end - ddl_stmt_start:.3f} seconds")
                if not res:
                    try:
                        sql = ast.sql(dialect="postgres")
                    except Exception:
                        sql = ast.sql(dialect="mysql")
                    w = []
                    extra = []
                else:
                    sql, w, extra = res
                
                warnings.extend(w)
                
                # Log constraint extraction
                unique_count = len(constraints.get("unique", []))
                index_count = len(constraints.get("indexes", []))
                fk_count = len(constraints.get("foreign_keys", []))
                check_count = len(constraints.get("checks", []))
                if unique_count > 0 or index_count > 0 or fk_count > 0 or check_count > 0:
                    logger.debug(f"Extracted constraints for table '{table_name}': "
                               f"{unique_count} unique, {index_count} indexes, {fk_count} foreign keys, {check_count} checks")
                
                # Process any queued INSERTs for this table (now that schema is available)
                if table_name_lower:
                    queued_count = len(insert_queues.get(table_name_lower, []))
                    if queued_count > 0:
                        logger.info(f"Table '{table_name}' schema ready, processing {queued_count} queued INSERT statements")
                    process_table_inserts(table_name_lower)
                    if queued_count > 0:
                        insert_processed_count += queued_count
                
                # Apply regex fallback to extract lingering inline indexes/constraints
                # Also extract FULLTEXT INDEX from original SQL if available
                try:
                    import re
                    if table_name:
                        # Extract FULLTEXT INDEX from original SQL (if available)
                        # This is needed because FULLTEXT INDEX is removed during preprocessing
                        original_sql = getattr(part, 'original_sql', None)
                        if original_sql and "FULLTEXT" in original_sql.upper():
                            def _emit_fulltext_index_from_original(m):
                                # Strip both double quotes and backticks from index name
                                name = (m.group(1) or "").strip('"`')
                                cols_raw = m.group(2)
                                raw_tokens = [c for c in cols_raw.split(',') if c.strip()]
                                # Remove NULLS FIRST/NULLS LAST and other PostgreSQL-specific syntax
                                cols_fmt = []
                                for tok in raw_tokens:
                                    # Extract column name, ignoring NULLS FIRST/LAST and other modifiers
                                    col_parts = tok.strip().split()
                                    col_name = col_parts[0].strip('"`')
                                    cols_fmt.append(f'"{col_name}"')
                                if cols_fmt:
                                    tn = f'"{_norm_name(table_name) or table_name}"'
                                    # Extract column names without quotes for index name generation
                                    col_names = [c.strip('"') for c in cols_fmt]
                                    idx_name = name or f"idx_{_norm_name(table_name) or table_name}_{'_'.join(col_names)}"
                                    write_cons_line(f'CREATE INDEX {idx_name} ON {tn} (' + ", ".join(cols_fmt) + ");")
                                return ""  # Already extracted, no need to process again
                            
                            # Pattern matches: FULLTEXT INDEX "name" or FULLTEXT INDEX `name` or FULLTEXT INDEX name
                            fulltext_idx_pat = re.compile(r",\s*FULLTEXT\s+(?:INDEX|KEY)\s+[`\"]?([^`\"(\s]+)[`\"]?\s*\(([^)]*)\)", re.IGNORECASE)
                            original_sql = fulltext_idx_pat.sub(_emit_fulltext_index_from_original, original_sql)
                        
                        # Extract UNIQUE/INDEX/KEY definitions
                        def _emit_index(m):
                            unique_kw = m.group(1) or ""
                            name = (m.group(2) or m.group(5) or "").strip('"')
                            cols_raw = m.group(3) or m.group(6)
                            raw_tokens = [c for c in cols_raw.split(',') if c.strip()]
                            cols_fmt = [_format_index_column_token(tok) for tok in raw_tokens]
                            if cols_fmt:
                                tn = f'"{_norm_name(table_name) or table_name}"'
                                bare = [t.split()[0].strip('"') for t in cols_fmt]
                                if unique_kw.strip().upper().startswith("UNIQUE"):
                                    idx_name = name or f"uq_{_norm_name(table_name) or table_name}_{'_'.join(bare)}"
                                    write_cons_line(f'CREATE UNIQUE INDEX {idx_name} ON {tn} (' + ", ".join(cols_fmt) + ");")
                                else:
                                    idx_name = name or f"idx_{_norm_name(table_name) or table_name}_{'_'.join(bare)}"
                                    write_cons_line(f'CREATE INDEX {idx_name} ON {tn} (' + ", ".join(cols_fmt) + ");")
                            return ""
                        
                        idx_pat = re.compile(r"\s*,\s*(UNIQUE\s+)?(?:KEY|INDEX)\s+\"?([^\"(\s]+)?\"?\s*\(([^)]*)\)|\s*,\s*(UNIQUE\s+)?INDEX\s+\"?([^\"(\s]+)?\"?\s*\(([^)]*)\)", re.IGNORECASE)
                        sql = re.sub(idx_pat, _emit_index, sql)
                        
                        # Extract FULLTEXT INDEX: treat as regular index (extract from DDL, emit as CREATE INDEX)
                        def _emit_fulltext_index(m):
                            # Strip both double quotes and backticks from index name
                            name = (m.group(1) or "").strip('"`')
                            cols_raw = m.group(2)
                            raw_tokens = [c for c in cols_raw.split(',') if c.strip()]
                            # Remove NULLS FIRST/NULLS LAST and other PostgreSQL-specific syntax
                            cols_fmt = []
                            for tok in raw_tokens:
                                # Extract column name, ignoring NULLS FIRST/LAST and other modifiers
                                col_parts = tok.strip().split()
                                col_name = col_parts[0].strip('"`')
                                cols_fmt.append(f'"{col_name}"')
                            if cols_fmt:
                                tn = f'"{_norm_name(table_name) or table_name}"'
                                # Extract column names without quotes for index name generation
                                col_names = [c.strip('"') for c in cols_fmt]
                                idx_name = name or f"idx_{_norm_name(table_name) or table_name}_{'_'.join(col_names)}"
                                write_cons_line(f'CREATE INDEX {idx_name} ON {tn} (' + ", ".join(cols_fmt) + ");")
                            return ""  # remove from CREATE TABLE
                        
                        fulltext_idx_pat = re.compile(r"\s*,\s*FULLTEXT\s+(?:INDEX|KEY)\s+\"?([^\"(\s]+)?\"?\s*\(([^)]*)\)", re.IGNORECASE)
                        sql = re.sub(fulltext_idx_pat, _emit_fulltext_index, sql)
                        
                        # Extract plain UNIQUE
                        def _emit_plain_unique(m):
                            name = (m.group(1) or '').strip('"')
                            cols_raw = m.group(2)
                            raw_tokens = [c for c in cols_raw.split(',') if c.strip()]
                            cols_fmt = [_format_index_column_token(tok) for tok in raw_tokens]
                            if cols_fmt:
                                tn = f'"{_norm_name(table_name) or table_name}"'
                                bare = [t.split()[0].strip('"') for t in cols_fmt]
                                idx_name = name or f"uq_{_norm_name(table_name) or table_name}_{'_'.join(bare)}"
                                write_cons_line(f'CREATE UNIQUE INDEX {idx_name} ON {tn} (' + ", ".join(cols_fmt) + ");")
                            return ""
                        plain_unique_pat = re.compile(r"\s*,\s*UNIQUE\s+\"?([^\"(\s]+)?\"?\s*\(([^)]*)\)", re.IGNORECASE)
                        sql = re.sub(plain_unique_pat, _emit_plain_unique, sql)
                        
                        # Extract FOREIGN KEY
                        def _emit_fk(m):
                            cname = (m.group(1) or "").strip('"')
                            local_cols = [c.strip().strip('"') for c in m.group(2).split(',')]
                            ref_table = m.group(3).strip('"')
                            ref_cols = [c.strip().strip('"') for c in m.group(4).split(',')]
                            fk_name = cname or f"fk_{table_name}_{'_'.join(local_cols)}_to_{ref_table}"
                            write_fk_line(
                                f'ALTER TABLE {table_name} ADD CONSTRAINT {fk_name} FOREIGN KEY (' + ", ".join(f'"{c}"' for c in local_cols) + f') REFERENCES "{ref_table}" (' + ", ".join(f'"{c}"' for c in ref_cols) + ");"
                            )
                            return ""
                        fk_pat = re.compile(r"\s*,\s*CONSTRAINT\s+\"?([^\"\s]+)?\"?\s+FOREIGN\s+KEY\s*\(([^)]*)\)\s+REFERENCES\s+\"?([^\"\s]+)\"?\s*\(([^)]*)\)", re.IGNORECASE)
                        sql = re.sub(fk_pat, _emit_fk, sql)
                        
                        # Extract CHECK constraints
                        def _emit_check(m):
                            cname = (m.group(1) or "").strip('"')
                            expr = m.group(2)
                            chk_name = cname or f"chk_{table_name}_{len(cons_lines)+1}"
                            write_cons_line(f'ALTER TABLE {table_name} ADD CONSTRAINT {chk_name} CHECK ({expr});')
                            return ""
                        chk_pat = re.compile(r"\s*,\s*CONSTRAINT\s+\"?([^\"\s]+)?\"?\s+CHECK\s*\((.*?)\)", re.IGNORECASE | re.DOTALL)
                        sql = re.sub(chk_pat, _emit_check, sql)
                except Exception:
                    pass
                
                # Separate SEQUENCE, COMMENT ON, CHECK constraints, and TRIGGERS
                sequences = [x for x in extra if x.strip().upper().startswith("CREATE SEQUENCE")]
                comments = [x for x in extra if x.strip().upper().startswith("COMMENT ON")]
                checks = [x for x in extra if x.strip().upper().startswith("ALTER TABLE") and "CHECK" in x.upper()]
                triggers = [x for x in extra if "CREATE TRIGGER" in x.upper() or (x.strip().upper().startswith("DELIMITER") and "CREATE TRIGGER" in x.upper())]
                
                # Output: SEQUENCE first, then CREATE TABLE, then COMMENT ON
                for seq in sequences:
                    write_ddl_line(seq.rstrip("; ") + ";")
                write_ddl_line(sql.rstrip("; ") + ";")
                for comment in comments:
                    write_ddl_line(comment.rstrip("; ") + ";")
                
                # CHECK constraints and TRIGGERS go to constraints bucket
                for check in checks:
                    write_cons_line(check.rstrip("; ") + ";")
                for trigger in triggers:
                    if "DELIMITER" in trigger.upper():
                        write_cons_line(trigger.rstrip())
                    else:
                        write_cons_line(trigger.rstrip("; ") + ";")
                
                # Emit extracted constraints
                for u in constraints.get("unique", []):
                    if u.get("columns"):
                        name = u.get("name") or f"uq_{table_name}_{'_'.join(u['columns'])}"
                        cols = ", ".join(f'"{c}"' for c in u["columns"])
                        write_cons_line(f'CREATE UNIQUE INDEX {name} ON {table_name} ({cols});')
                
                for idx in constraints.get("indexes", []):
                    if idx.get("columns"):
                        name = idx.get("name") or f"idx_{table_name}_{'_'.join(idx['columns'])}"
                        cols = ", ".join(f'"{c}"' for c in idx["columns"])
                        write_cons_line(f'CREATE INDEX {name} ON {table_name} ({cols});')
                
                for chk in constraints.get("checks", []):
                    if chk.get("expression_sql"):
                        name = chk.get("name") or f"chk_{table_name}_{len(cons_lines)+1}"
                        expr_sql = chk["expression_sql"]
                        expr = expr_sql
                        up = expr_sql.strip().upper()
                        if up.startswith("CHECK"):
                            import re
                            m = re.search(r"CHECK\s*\((.*)\)\s*$", expr_sql, flags=re.IGNORECASE | re.DOTALL)
                            expr = m.group(1) if m else expr_sql
                        write_cons_line(f'ALTER TABLE {table_name} ADD CONSTRAINT {name} CHECK ({expr});')
                
                for fk in constraints.get("foreign_keys", []):
                    if fk.get("columns") and fk.get("ref_table") and fk.get("ref_columns"):
                        name = fk.get("name") or f"fk_{table_name}_{'_'.join(fk['columns'])}_to_{fk['ref_table']}"
                        cols = ", ".join(f'"{c}"' for c in fk["columns"])
                        ref_cols = ", ".join(f'"{c}"' for c in fk["ref_columns"])
                        write_fk_line(
                            f'ALTER TABLE {table_name} ADD CONSTRAINT {name} FOREIGN KEY ({cols}) REFERENCES "{fk["ref_table"]}" ({ref_cols});'
                        )
            
            elif isinstance(ast, exp.Create):
                ddl_count += 1
                last_ddl_time = time.time()
                # Other CREATE statements (DATABASE, SEQUENCE, INDEX, etc.)
                kind = ast.args.get("kind")
                if kind and kind.upper() == "INDEX":
                    logger.debug("[DDL Transform] Processing CREATE INDEX statement")
                    res = transform_expression(ast, table_schemas=table_schemas)
                    if not res:
                        try:
                            sql = ast.sql(dialect="postgres")
                        except Exception:
                            sql = ast.sql(dialect="mysql")
                        w = []
                        extra = []
                    else:
                        sql, w, extra = res
                    write_cons_line(sql.rstrip("; ") + ";")
                    for x in extra:
                        write_cons_line(x.rstrip("; ") + ";")
                    warnings.extend(w)
                else:
                    # CREATE DATABASE, CREATE SEQUENCE, etc.
                    kind_str = kind.upper() if kind else "UNKNOWN"
                    logger.debug(f"Processing CREATE {kind_str} statement")
                    res = transform_expression(ast, table_schemas=table_schemas)
                    if not res:
                        try:
                            sql = ast.sql(dialect="postgres")
                        except Exception:
                            sql = ast.sql(dialect="mysql")
                        w = []
                        extra = []
                    else:
                        sql, w, extra = res
                    write_ddl_line(sql.rstrip("; ") + ";")
                    for x in extra:
                        write_ddl_line(x.rstrip("; ") + ";")
                    warnings.extend(w)
            
            elif isinstance(ast, exp.Alter):
                ddl_count += 1
                last_ddl_time = time.time()
                logger.debug("[DDL Transform] Processing ALTER statement")
                # ALTER statements
                res = transform_expression(ast, table_schemas=table_schemas)
                if not res:
                    try:
                        sql = ast.sql(dialect="postgres")
                    except Exception:
                        sql = ast.sql(dialect="mysql")
                    w = []
                    extra = []
                else:
                    sql, w, extra = res
                if "FOREIGN KEY" in sql.upper():
                    write_fk_line(sql.rstrip("; ") + ";")
                    for x in extra:
                        write_fk_line(x.rstrip("; ") + ";")
                else:
                    write_cons_line(sql.rstrip("; ") + ";")
                    for x in extra:
                        write_cons_line(x.rstrip("; ") + ";")
                warnings.extend(w)
            
            elif isinstance(ast, exp.Insert):
                # INSERT statements with AST (shouldn't happen normally, but handle it)
                insert_count += 1
                logger.debug("Processing INSERT statement with AST")
                res = transform_expression(ast, table_schemas=table_schemas)
                if not res:
                    try:
                        sql = ast.sql(dialect="postgres")
                    except Exception:
                        sql = ast.sql(dialect="mysql")
                else:
                    sql, w, extra = res
                dml_lines.append(sql.rstrip("; ") + ";")
                warnings.extend(w)
            
            else:
                ddl_count += 1
                last_ddl_time = time.time()
                # Other DDL statements (DROP, COMMENT, etc.)
                logger.debug(f"[DDL Transform] Processing DDL statement: {type(ast).__name__}")
                res = transform_expression(ast, table_schemas=table_schemas)
                if not res:
                    try:
                        sql = ast.sql(dialect="postgres")
                    except Exception:
                        sql = ast.sql(dialect="mysql")
                    w = []
                    extra = []
                else:
                    sql, w, extra = res
                write_ddl_line(sql.rstrip("; ") + ";")
                for x in extra:
                    write_ddl_line(x.rstrip("; ") + ";")
                warnings.extend(w)
        
        else:
            # Raw SQL statement (INSERT or DROP DATABASE)
            raw = (part.raw_sql or "").strip()
            if raw:
                up = raw.lstrip().upper()
                stmt = raw if raw.endswith(";") else raw + ";"
                
                if up.startswith("INSERT"):
                    insert_count += 1
                    # Extract table name from INSERT statement
                    m = _RE_INSERT_PATTERN.match(stmt)
                    if m:
                        table_token = m.group(1).strip()
                        base = table_token.strip('`"')
                        original_table_name = base.split('.')[-1]
                        table_name_lower = original_table_name.lower()
                        
                        # Check if table schema is available
                        if table_name_lower in table_schemas:
                            # Queue for batch parallel processing (even if schema is available)
                            # This ensures all INSERTs are processed in parallel batches for better performance
                            if table_name_lower not in insert_queues:
                                insert_queues[table_name_lower] = []
                            insert_queues[table_name_lower].append(stmt)
                            insert_queued_count += 1
                            
                            # Process in batches: when queue reaches a threshold, process immediately
                            # Use smaller batches for more responsive processing during parsing
                            # This allows parallel processing while still processing as we go (边解析边处理)
                            # No merging, so use standard batch size
                            # Lower threshold to prevent memory buildup
                            batch_threshold = 50  # Smaller batch size to prevent memory issues
                            if len(insert_queues[table_name_lower]) >= batch_threshold:
                                # Process this batch immediately (边解析边并行处理)
                                queued_for_table = insert_queues.pop(table_name_lower)
                                process_table_inserts_batch(table_name_lower, queued_for_table)
                        else:
                            # Queue for later processing (schema not available yet)
                            insert_queued_count += 1
                            if insert_queued_count % 10000 == 0:
                                logger.debug(f"Queued {insert_queued_count} INSERT statements (waiting for table schemas)...")
                            if table_name_lower not in insert_queues:
                                insert_queues[table_name_lower] = []
                            insert_queues[table_name_lower].append(stmt)
                            
                            # Prevent memory buildup: if queue gets too large, process even without schema
                            # (will just normalize table name, no bool/json conversion)
                            max_queue_size = 1000  # Maximum INSERTs to queue per table before forcing processing
                            if len(insert_queues[table_name_lower]) >= max_queue_size:
                                logger.warning(f"Queue for table '{table_name_lower}' reached {max_queue_size} INSERTs, processing without schema to prevent memory issues")
                                queued_for_table = insert_queues.pop(table_name_lower)
                                process_table_inserts_batch(table_name_lower, queued_for_table)
                        
                        # Periodic processing: process queued INSERTs every N INSERTs encountered
                        # This ensures continuous processing during parsing (边解析边处理)
                        if insert_count - last_periodic_process_count >= periodic_process_interval:
                            last_periodic_process_count = insert_count
                            process_queued_inserts_periodically()
                
                elif up.startswith("DROP DATABASE"):
                    dropdb_passthrough.append(stmt)
    

    parse_end_time = time.time()
    parse_duration = parse_end_time - parse_start_time
    parse_throughput = file_size / parse_duration / (1024 * 1024) if parse_duration > 0 else 0
    
    # Log DDL transformation completion
    if ddl_transformation_started and last_ddl_time:
        ddl_duration = last_ddl_time - ddl_transformation_start_time
        logger.info("=" * 60)
        logger.info("DDL syntax transformation completed")
        logger.info(f"  - Started at: {time.strftime('%H:%M:%S', time.localtime(ddl_transformation_start_time))}")
        logger.info(f"  - Completed at: {time.strftime('%H:%M:%S', time.localtime(last_ddl_time))}")
        logger.info(f"  - Duration: {ddl_duration:.2f} seconds")
        logger.info(f"  - Total DDL statements: {ddl_count}")
        logger.info(f"  - CREATE TABLE statements: {create_table_count}")
        logger.info(f"  - Average time per DDL: {ddl_duration / ddl_count:.3f} seconds" if ddl_count > 0 else "")
        logger.info("=" * 60)
    
    logger.info("Finished parsing and transforming SQL statements")
    logger.info(f"Parse performance: {parse_duration:.2f} seconds, throughput: {parse_throughput:.2f} MB/s")
    logger.info(f"Statistics: {ddl_count} DDL statements, {create_table_count} CREATE TABLE statements, "
              f"{insert_count} INSERT statements")
    
    # Log memory usage estimates
    total_queued = sum(len(inserts) for inserts in insert_queues.values())
    if total_queued > 0:
        logger.warning(f"Warning: {total_queued} INSERT statements still queued in memory (tables without schemas)")
    
    # Estimate memory usage
    ddl_memory = sum(len(line) for line in ddl_lines) if ddl_lines else 0
    cons_memory = sum(len(line) for line in cons_lines) + sum(len(line) for line in fk_lines) if (cons_lines or fk_lines) else 0
    queued_memory = sum(len(stmt) for inserts in insert_queues.values() for stmt in inserts) if insert_queues else 0
    
    logger.info(f"Memory usage estimates:")
    logger.info(f"  - DDL in memory: {ddl_memory / (1024*1024):.2f} MB")
    logger.info(f"  - Constraints in memory: {cons_memory / (1024*1024):.2f} MB")
    logger.info(f"  - Queued INSERTs in memory: {queued_memory / (1024*1024):.2f} MB")
    logger.info(f"  - Total estimated memory: {(ddl_memory + cons_memory + queued_memory) / (1024*1024):.2f} MB")
    
    # Process any remaining queued INSERTs (including those that didn't reach batch threshold)
    remaining_queued = sum(len(inserts) for inserts in insert_queues.values())
    if remaining_queued > 0:
        logger.info(f"Processing {remaining_queued} remaining queued INSERT statements")
        for table_name_lower, inserts in list(insert_queues.items()):
            # Process all remaining INSERTs for each table
            process_table_inserts_batch(table_name_lower, inserts)
            # Remove from queue after processing
            insert_queues.pop(table_name_lower, None)
    
    # Close processed INSERTs file if used (after all INSERTs are processed)
    if processed_inserts_file:
        processed_inserts_file.close()
        processed_inserts_path = Path(processed_inserts_path) if processed_inserts_path else None
        logger.debug("Closed temporary file for processed INSERTs")
    else:
        processed_inserts_path = None
    
    # Log parallel INSERT processing completion
    if parallel_insert_processing_started and parallel_insert_start_time:
        parallel_insert_end_time = time.time()
        parallel_insert_duration = parallel_insert_end_time - parallel_insert_start_time
        avg_throughput = parallel_insert_total_count / parallel_insert_duration if parallel_insert_duration > 0 else 0
        
        logger.info("=" * 60)
        logger.info("Parallel INSERT statement processing completed")
        logger.info(f"  - Started at: {time.strftime('%H:%M:%S', time.localtime(parallel_insert_start_time))}")
        logger.info(f"  - Completed at: {time.strftime('%H:%M:%S', time.localtime(parallel_insert_end_time))}")
        logger.info(f"  - Duration: {parallel_insert_duration:.2f} seconds")
        logger.info(f"  - Total batches processed: {parallel_insert_batch_count}")
        logger.info(f"  - Total INSERT statements processed: {parallel_insert_total_count}")
        logger.info(f"  - Average throughput: {avg_throughput:.0f} statements/sec")
        executor_type = "processes" if use_multiprocessing else "threads"
        logger.info(f"  - {executor_type.capitalize()}: {max_workers} workers")
        logger.info(f"  - Actual {executor_type} used: {max_workers} (all workers utilized)")
        logger.info("=" * 60)
    
    # Shutdown executor
    executor_type = "process pool" if use_multiprocessing else "thread pool"
    logger.info(f"Shutting down {executor_type}...")
    insert_executor.shutdown(wait=True)
    logger.info(f"{executor_type.capitalize()} shutdown complete ({max_workers} workers)")
    
    # Close file handles if using file-based streaming and read content
    if use_file_for_all:
        if ddl_file:
            ddl_file.close()
            logger.debug(f"Closed DDL file: {ddl_file_path}")
        if cons_file:
            cons_file.close()
            logger.debug(f"Closed constraints file: {cons_file_path}")
        
        # Always read DDL and constraints content from files (they are usually small)
        # DDL and constraints files are typically small (table structures and indexes),
        # so we can always read them regardless of file size or output_sql_path setting
        ddl_content = ""
        if ddl_file_path and os.path.exists(ddl_file_path):
            ddl_file_size = os.path.getsize(ddl_file_path)
            logger.debug(f"Reading DDL file: {ddl_file_path} (size: {ddl_file_size} bytes)")
            with open(ddl_file_path, 'r', encoding='utf-8') as f:
                ddl_content = f.read()
            logger.debug(f"Read DDL content: {len(ddl_content)} characters")
        else:
            logger.warning(f"DDL file path not found or does not exist: {ddl_file_path}")
        
        constraints_content = ""
        if cons_file_path and os.path.exists(cons_file_path):
            cons_file_size = os.path.getsize(cons_file_path)
            logger.debug(f"Reading constraints file: {cons_file_path} (size: {cons_file_size} bytes)")
            with open(cons_file_path, 'r', encoding='utf-8') as f:
                constraints_content = f.read()
            logger.debug(f"Read constraints content: {len(constraints_content)} characters")
        else:
            logger.warning(f"Constraints file path not found or does not exist: {cons_file_path}")
        
        # For output_sql construction, check if we need it and if files are too large
        need_output_sql = cfg.output_sql_path is None
        if need_output_sql:
            # Need to build output_sql, but for very large files this may cause memory issues
            # Check file sizes before reading DML (INSERT statements can be huge)
            dml_file_size = os.path.getsize(processed_inserts_path) if processed_inserts_path and os.path.exists(processed_inserts_path) else 0
            
            # If DML file is too large (>500MB), warn and skip output_sql construction
            # DDL and constraints are already read above (they're usually small)
            if dml_file_size > 500 * 1024 * 1024:
                logger.warning(f"DML file is too large ({dml_file_size / (1024*1024):.2f} MB), "
                             f"skipping output_sql construction to prevent memory issues. "
                             f"Use separate output files instead.")
    else:
        # Use in-memory content
        ddl_content = "\n".join(ddl_lines) if ddl_lines else ""
        constraints_content = "\n".join(cons_lines + fk_lines) if (cons_lines or fk_lines) else ""
    
    # Prepend DROP DATABASE statements
    header = ""
    if dropdb_passthrough:
        header = "\n".join(dropdb_passthrough) + "\n"
    
    # Build output
    ddl_sql = header + ddl_content + ("\n" if ddl_content else "")
    constraints_sql = constraints_content + ("\n" if constraints_content else "")
    
    # Handle DML output
    if processed_inserts_path and processed_inserts_path.exists():
        dml_file_path = processed_inserts_path
        dml_sql_for_result = ""  # Empty to indicate file-based
    else:
        dml_sql_for_result = "\n".join(dml_lines) + ("\n" if dml_lines else "")
        dml_file_path = None
    
    # Build output_sql if needed
    need_output_sql = cfg.output_sql_path is None
    if need_output_sql:
        if dml_sql_for_result:
            output_sql = ddl_sql + "\n" + dml_sql_for_result + "\n" + constraints_sql
        else:
            output_sql = ddl_sql + "\n" + constraints_sql
    else:
        output_sql = ddl_sql + "\n" + constraints_sql
    
    # Final statistics
    logger.info("Pipeline processing complete")
    logger.info(f"Final statistics:")
    logger.info(f"  - DDL statements: {ddl_count}")
    logger.info(f"  - CREATE TABLE statements: {create_table_count}")
    logger.info(f"  - Tables with schemas extracted: {len(table_schemas)}")
    logger.info(f"  - INSERT statements total: {insert_count}")
    logger.info(f"  - INSERT statements processed immediately: {insert_processed_count}")
    logger.info(f"  - INSERT statements queued: {insert_queued_count}")
    logger.info(f"  - DDL SQL lines: {len(ddl_lines)}")
    logger.info(f"  - DML SQL lines: {len(dml_lines)}")
    logger.info(f"  - Constraint SQL lines: {len(cons_lines) + len(fk_lines)}")
    logger.info(f"  - Errors: {len(errors)}")
    logger.info(f"  - Warnings: {len(warnings)}")
    if dml_file_path:
        logger.info(f"  - DML output file: {dml_file_path}")
    
    # Convert file paths to Path objects if they exist
    ddl_file_path_result = Path(ddl_file_path) if ddl_file_path and os.path.exists(ddl_file_path) else None
    constraints_file_path_result = Path(cons_file_path) if cons_file_path and os.path.exists(cons_file_path) else None
    
    return PipelineResult(
        output_sql=output_sql,
        ddl_sql=ddl_sql,
        dml_sql=dml_sql_for_result,
        constraints_sql=constraints_sql,
        errors=errors,
        warnings=warnings,
        dml_file_path=dml_file_path,
        ddl_file_path=ddl_file_path_result,
        constraints_file_path=constraints_file_path_result,
    )


def _process_insert_statement(raw_sql: str, table_schemas: dict, json_fix_status: Dict[str, bool] = None, json_fix_lock: threading.Lock = None) -> str:
    """
    Optimized merged function that processes INSERT statements in a single pass:
    1. Normalizes table name (quotes to preserve case)
    2. Converts 0/1 to FALSE/TRUE for BOOL columns
    3. Fixes over-escaped JSON values (\" -> ") for JSON columns
    
    JSON fix status is tracked per table: each table checks its first INSERT statement's first row,
    and if any JSON column needs fixing, all rows of that table will be fixed.
    
    This replaces the previous two-pass approach (_rewrite_insert_bool_literals_text + _normalize_insert_head)
    for 30-50% performance improvement.
    """
    try:
        s = raw_sql.strip().rstrip(';')
        up = s.upper()
        if not up.startswith('INSERT'):
            return raw_sql
        
        # Use pre-compiled regex for better performance
        m = _RE_INSERT_PATTERN.match(s)
        if not m:
            return raw_sql
        
        table_token = m.group(1).strip()
        cols_wrap = m.group(2) or ""
        verb = m.group(4)
        tail = m.group(5)
        
        # Extract and normalize table name (always quote to preserve case)
        base = table_token.strip('`"')
        original_table_name = base.split('.')[-1]
        quoted_table = f'"{original_table_name}"'
        
        # Fast path: If not VALUES or no table schema, just normalize table name
        if verb != 'VALUES':
            head = f"INSERT INTO {quoted_table}{(' ' + cols_wrap) if cols_wrap else ''} {verb}"
            return head + ' ' + tail + ';'
        
        # Check if table needs bool or json conversion (fast skip if not needed)
        table_name_lower = original_table_name.lower()
        schema = table_schemas.get(table_name_lower)
        if not schema:
            # No schema info, just normalize table name
            head = f"INSERT INTO {quoted_table}{(' ' + cols_wrap) if cols_wrap else ''} VALUES"
            return head + ' ' + tail + ';'
        
        bool_cols = schema.get('bool_cols', set())
        json_cols = schema.get('json_cols', set())
        if not bool_cols and not json_cols:
            # No bool or json columns, just normalize table name
            head = f"INSERT INTO {quoted_table}{(' ' + cols_wrap) if cols_wrap else ''} VALUES"
            return head + ' ' + tail + ';'
        
        # Need to process VALUES for bool and/or json conversion
        cols_group = m.group(3)
        values_tail = tail
        
        # Determine listed columns or default order
        if cols_group and cols_group.strip():
            listed_cols = [c.strip().strip('`"').lower() for c in cols_group.split(',')]
        else:
            listed_cols = list(schema.get('order', []))
        
        if not listed_cols:
            head = f"INSERT INTO {quoted_table}{(' ' + cols_wrap) if cols_wrap else ''} VALUES"
            return head + ' ' + tail + ';'
        
        bool_indexes = {i for i, c in enumerate(listed_cols) if c in bool_cols}
        json_indexes = {i for i, c in enumerate(listed_cols) if c in json_cols}
        
        if not bool_indexes and not json_indexes:
            head = f"INSERT INTO {quoted_table}{(' ' + cols_wrap) if cols_wrap else ''} VALUES"
            return head + ' ' + tail + ';'
        
        # Extract and rewrite tuples with optimized parsing
        tuples = _extract_top_level_tuples_optimized(values_tail)
        if not tuples:
            head = f"INSERT INTO {quoted_table}{(' ' + cols_wrap) if cols_wrap else ''} VALUES"
            return head + ' ' + tail + ';'
        
        # Get or determine JSON fix status for this table (per-table, thread-safe)
        needs_json_fix = False
        if json_indexes and json_fix_status is not None:
            # Check if this table's JSON fix status has been determined
            if table_name_lower not in json_fix_status:
                # First time seeing this table - check first row of this INSERT statement
                # This is the table's first INSERT statement (or at least the first one we process)
                import json
                if tuples:
                    first_row_cells = _split_tuple_cells_optimized(tuples[0])
                    if first_row_cells:
                        # Check all JSON columns in first row to determine if fix is needed
                        for i in json_indexes:
                            if i >= len(first_row_cells):
                                continue
                            v = first_row_cells[i].strip()
                            if v.upper() == 'NULL':
                                continue
                            # Remove outer quotes for checking
                            json_check = v
                            if v.startswith("'") and v.endswith("'"):
                                json_check = v[1:-1]
                            elif v.startswith('"') and v.endswith('"'):
                                json_check = v[1:-1]
                            
                            # Try to parse as JSON
                            try:
                                json.loads(json_check)
                                # Valid JSON, no fix needed for this column
                            except (json.JSONDecodeError, ValueError):
                                # Check if it's over-escaped (has \" sequences)
                                # The string may contain literal backslash+quote characters
                                # Check for both patterns: \" (backslash followed by quote) and \\" (double backslash)
                                # In Python string, '\\"' represents a backslash followed by a quote character
                                has_escaped_quote = '\\"' in json_check
                                if has_escaped_quote:
                                    # Try fixing it by replacing all \" with "
                                    # Use replace to handle all occurrences
                                    test_fixed = json_check.replace('\\"', '"')
                                    try:
                                        json.loads(test_fixed)
                                        # After fixing, it becomes valid JSON, so mark as needing fix
                                        needs_json_fix = True
                                        break  # Found one that needs fixing, no need to check others
                                    except (json.JSONDecodeError, ValueError):
                                        # Even after fixing, still invalid - might be a different issue
                                        # Don't mark as needing fix, keep original
                                        pass
                
                # Store the status for this table (thread-safe)
                if json_fix_lock:
                    with json_fix_lock:
                        # Double-check after acquiring lock (another thread might have set it)
                        if table_name_lower not in json_fix_status:
                            json_fix_status[table_name_lower] = needs_json_fix
                        else:
                            needs_json_fix = json_fix_status[table_name_lower]
                else:
                    if json_fix_status is not None:
                        json_fix_status[table_name_lower] = needs_json_fix
            else:
                # Table's JSON fix status already determined, use cached value
                if json_fix_status is not None:
                    needs_json_fix = json_fix_status[table_name_lower]
        
        import json
        new_tuples: List[str] = []
        for row_idx, tup in enumerate(tuples):
            cells = _split_tuple_cells_optimized(tup)
            if not cells:
                new_tuples.append(tup)
                continue
            
            # Process each cell in a single pass (both BOOL and JSON)
            for i in range(len(cells)):
                v = cells[i].strip()
                
                # Skip NULL
                if v.upper() == 'NULL':
                    continue
                
                # Process BOOL columns
                if i in bool_indexes:
                    # Quoted numeric string
                    if (v.startswith("'") and v.endswith("'")) or (v.startswith('"') and v.endswith('"')):
                        sv = v[1:-1]
                        if sv.isdigit():
                            cells[i] = 'TRUE' if int(sv) != 0 else 'FALSE'
                            continue
                    # Unquoted integer - use pre-compiled regex
                    if _RE_INTEGER_PATTERN.match(v):
                        cells[i] = 'TRUE' if int(v) != 0 else 'FALSE'
                        continue
                
                # Process JSON columns (apply fix if needed)
                # Only fix if table-level detection (first row) determined it's needed
                if i in json_indexes and needs_json_fix:
                    # Fix over-escaped JSON: remove all backslashes before quotes
                    # Original MySQL format: '{\"theme\": \"theme2\"}' -> '{"theme": "theme2"}'
                    original_v = v
                    if v.startswith("'") and v.endswith("'"):
                        # Single-quoted JSON string
                        json_content = v[1:-1]
                        # Replace all \" with " (remove backslashes before quotes)
                        fixed_content = json_content.replace('\\"', '"')
                        # Verify the fixed content is valid JSON
                        try:
                            json.loads(fixed_content)
                            # Write back with single quotes - the fixed_content now has proper JSON format
                            cells[i] = f"'{fixed_content}'"
                        except (json.JSONDecodeError, ValueError):
                            # If still invalid after fixing, keep original
                            cells[i] = original_v
                    elif v.startswith('"') and v.endswith('"'):
                        # Double-quoted JSON string
                        json_content = v[1:-1]
                        # Replace all \" with "
                        fixed_content = json_content.replace('\\"', '"')
                        try:
                            json.loads(fixed_content)
                            # For double-quoted SQL strings, escape double quotes in JSON for SQL
                            escaped_content = fixed_content.replace('"', '\\"')
                            cells[i] = f'"{escaped_content}"'
                        except (json.JSONDecodeError, ValueError):
                            cells[i] = original_v
                    else:
                        # No outer quotes, fix directly
                        fixed_content = v.replace('\\"', '"')
                        try:
                            json.loads(fixed_content)
                            cells[i] = fixed_content
                        except (json.JSONDecodeError, ValueError):
                            cells[i] = original_v
            
            new_tuples.append('(' + ', '.join(cells) + ')')
        
        # Rebuild statement with normalized table name
        cols_txt = f"({cols_group})" if cols_group and cols_group.strip() else ""
        head = f"INSERT INTO {quoted_table}{(' ' + cols_txt) if cols_txt else ''} VALUES"
        return head + ' ' + ', '.join(new_tuples) + ';'
    except Exception:
        return raw_sql


# Keep old functions for backward compatibility (deprecated, use _process_insert_statement instead)
def _normalize_insert_head(raw_sql: str) -> str:
    """Deprecated: Use _process_insert_statement instead."""
    return _process_insert_statement(raw_sql, {}, None, None)


def _rewrite_insert_bool_literals_text(raw_sql: str, table_schemas: dict) -> str:
    """Text-based rewrite for INSERT ... VALUES ..., converting 0/1 to FALSE/TRUE
    in columns that will become BOOL (collected from DDL TINYINT(1)).
    Safe no-op if table or columns are not known.
    """
    try:
        s = raw_sql.strip().rstrip(';')
        up = s.upper()
        if not up.startswith('INSERT'):
            return raw_sql
        # Extract table and optional columns list
        import re
        m = re.match(r"^\s*INSERT\s+INTO\s+([`\"]?[\w\.]+[`\"]?)\s*(\((.*?)\))?\s+VALUES\s*(.*)$",
                     s, flags=re.IGNORECASE | re.DOTALL)
        if not m:
            return raw_sql
        table_token = m.group(1).strip()
        base = table_token.strip('`"')
        # Support optional schema/database prefix: db.table -> table
        # Preserve original table name for quoting, but use lowercase for schema lookup
        original_table_name = base.split('.')[-1]
        table_name_lower = original_table_name.lower()
        cols_group = m.group(3)
        values_tail = m.group(4)
        schema = table_schemas.get(table_name_lower)
        if not schema:
            return raw_sql
        bool_cols = set(schema.get('bool_cols', set()))
        if not bool_cols:
            return raw_sql
        # Determine listed columns or default order
        if cols_group and cols_group.strip():
            listed_cols = [c.strip().strip('`"').lower() for c in cols_group.split(',')]
        else:
            listed_cols = list(schema.get('order', []))
        if not listed_cols:
            return raw_sql
        bool_indexes = {i for i, c in enumerate(listed_cols) if c in bool_cols}
        if not bool_indexes:
            return raw_sql
        # Split tail into top-level tuples and rewrite
        tuples = _extract_top_level_tuples(values_tail)
        if not tuples:
            return raw_sql
        new_tuples: List[str] = []
        for tup in tuples:
            cells = _split_tuple_cells(tup)
            if not cells:
                new_tuples.append(tup)
                continue
            for i in bool_indexes:
                if i >= len(cells):
                    continue
                v = cells[i].strip()
                # Skip NULL
                if v.upper() == 'NULL':
                    continue
                # Quoted numeric string
                if (v.startswith("'") and v.endswith("'")) or (v.startswith('"') and v.endswith('"')):
                    sv = v[1:-1]
                    if sv.isdigit():
                        cells[i] = 'TRUE' if int(sv) != 0 else 'FALSE'
                    continue
                # Unquoted integer
                if re.match(r"^[+-]?\d+$", v):
                    cells[i] = 'TRUE' if int(v) != 0 else 'FALSE'
                    continue
            new_tuples.append('(' + ', '.join(cells) + ')')
        # Rebuild canonical head: quote table name to preserve case
        cols_txt = f"({cols_group})" if cols_group and cols_group.strip() else ""
        # Use original table name (preserving case) and quote it
        # Quote table name to preserve case (PostgreSQL/KWDB converts unquoted identifiers to lowercase)
        quoted_table = f'"{original_table_name}"'
        head = f"INSERT INTO {quoted_table}{(' ' + cols_txt) if cols_txt else ''} VALUES"
        return head + ' ' + ', '.join(new_tuples) + ';'
    except Exception:
        return raw_sql


def _extract_top_level_tuples_optimized(values_tail: str) -> List[str]:
    """
    Optimized version with fast path for simple cases (no nested structures, no special chars).
    Falls back to full state machine for complex cases.
    """
    # Fast path: Check if simple case (no quotes, no comments, no nested parens)
    # Simple heuristic: if no quotes and balanced parens at top level only
    has_quotes = "'" in values_tail or '"' in values_tail or '`' in values_tail
    has_comments = '--' in values_tail or '/*' in values_tail
    
    if not has_quotes and not has_comments:
        # Fast path: simple comma-separated tuples like (1,2,3), (4,5,6)
        # Split by '),' pattern and handle last tuple
        if values_tail.strip().startswith('('):
            # Quick split by '),' pattern
            parts = values_tail.split('),')
            tuples = []
            for i, part in enumerate(parts):
                part = part.strip()
                if not part:
                    continue
                if not part.startswith('('):
                    part = '(' + part
                if i < len(parts) - 1:
                    part = part + ')'
                elif not part.endswith(')'):
                    # Last part might not have closing paren if it's the last tuple
                    if part.count('(') > part.count(')'):
                        part = part + ')'
                tuples.append(part)
            if tuples:
                return tuples
    
    # Fall back to full state machine for complex cases
    return _extract_top_level_tuples(values_tail)


def _extract_top_level_tuples(values_tail: str) -> List[str]:
    tuples: List[str] = []
    buf: List[str] = []
    depth = 0
    in_single = in_double = in_backtick = False
    in_ml = in_sl = False
    i = 0
    n = len(values_tail)
    while i < n:
        ch = values_tail[i]
        nxt = values_tail[i+1] if i+1 < n else ''
        if in_sl:
            buf.append(ch)
            if ch == '\n': in_sl = False
            i += 1; continue
        if in_ml:
            buf.append(ch)
            if ch == '*' and nxt == '/': buf.append(nxt); i += 2; in_ml = False; continue
            i += 1; continue
        if not (in_single or in_double or in_backtick):
            if ch == '-' and nxt == '-': in_sl = True; buf.append(ch); buf.append(nxt); i += 2; continue
            if ch == '/' and nxt == '*': in_ml = True; buf.append(ch); buf.append(nxt); i += 2; continue
        if ch == "'" and not (in_double or in_backtick): in_single = not in_single; buf.append(ch); i += 1; continue
        if ch == '"' and not (in_single or in_backtick): in_double = not in_double; buf.append(ch); i += 1; continue
        if ch == '`' and not (in_single or in_double): in_backtick = not in_backtick; buf.append(ch); i += 1; continue
        if ch == '(' and not (in_single or in_double or in_backtick): depth += 1
        if ch == ')' and not (in_single or in_double or in_backtick):
            depth = max(0, depth - 1)
            buf.append(ch)
            if depth == 0:
                tuples.append(''.join(buf).strip())
                buf = []
                i += 1
                # skip comma and spaces
                while i < n and values_tail[i].isspace(): i += 1
                if i < n and values_tail[i] == ',':
                    i += 1
                    while i < n and values_tail[i].isspace(): i += 1
                continue
            i += 1
            continue
        buf.append(ch)
        i += 1
    if not tuples and buf:
        rem = ''.join(buf).strip()
        if rem:
            tuples.append(rem)
    return tuples


def _split_tuple_cells_optimized(tuple_text: str) -> List[str]:
    """
    Optimized version with fast path for simple cases (no quotes, no nested structures).
    Falls back to full state machine for complex cases.
    """
    s = tuple_text.strip()
    inner = s[1:-1] if s.startswith('(') and s.endswith(')') else s
    
    # Fast path: Check if simple case (no quotes, no nested parens, no comments)
    has_quotes = "'" in inner or '"' in inner or '`' in inner
    has_nested = '(' in inner or ')' in inner
    has_comments = '--' in inner or '/*' in inner
    
    if not has_quotes and not has_nested and not has_comments:
        # Fast path: simple comma-separated values like 1, 2, 3
        cells = [cell.strip() for cell in inner.split(',')]
        return cells
    
    # Fall back to full state machine for complex cases
    return _split_tuple_cells(tuple_text)


def _split_tuple_cells(tuple_text: str) -> List[str]:
    s = tuple_text.strip()
    inner = s[1:-1] if s.startswith('(') and s.endswith(')') else s
    cells: List[str] = []
    buf: List[str] = []
    in_single = in_double = in_backtick = False
    in_ml = in_sl = False
    i = 0
    n = len(inner)
    depth = 0
    while i < n:
        ch = inner[i]
        nxt = inner[i+1] if i+1 < n else ''
        if in_sl:
            buf.append(ch)
            if ch == '\n': in_sl = False
            i += 1; continue
        if in_ml:
            buf.append(ch)
            if ch == '*' and nxt == '/': buf.append(nxt); i += 2; in_ml = False; continue
            i += 1; continue
        if not (in_single or in_double or in_backtick):
            if ch == '-' and nxt == '-': in_sl = True; buf.append(ch); buf.append(nxt); i += 2; continue
            if ch == '/' and nxt == '*': in_ml = True; buf.append(ch); buf.append(nxt); i += 2; continue
        if ch == "'" and not (in_double or in_backtick): in_single = not in_single; buf.append(ch); i += 1; continue
        if ch == '"' and not (in_single or in_backtick): in_double = not in_double; buf.append(ch); i += 1; continue
        if ch == '`' and not (in_single or in_double): in_backtick = not in_backtick; buf.append(ch); i += 1; continue
        if ch == '(' and not (in_single or in_double or in_backtick): depth += 1
        if ch == ')' and not (in_single or in_double or in_backtick): depth = max(0, depth-1)
        if ch == ',' and depth == 0 and not (in_single or in_double or in_backtick or in_ml or in_sl):
            cells.append(''.join(buf).strip())
            buf = []
            i += 1
            continue
        buf.append(ch)
        i += 1
    if buf:
        cells.append(''.join(buf).strip())
    return cells




