from __future__ import annotations

from dataclasses import dataclass
from pathlib import Path
import os
import re
import threading
from concurrent.futures import ThreadPoolExecutor
from typing import List, Optional, Dict

from .config import MigratorConfig
from .parser import iter_parse_sql_text, iter_parse_sql_file
from .splitter import split_ddl_dml
from .transformer import transform_statements, transform_statements_split
from .transformer import _collect_table_schemas  # reuse DDL schema extractor

# Pre-compiled regex patterns for performance optimization
_RE_INSERT_PATTERN = re.compile(
    r"^\s*INSERT\s+INTO\s+([`\"]?[\w\.]+[`\"]?)\s*(\((.*?)\))?\s+(VALUES|SELECT)\s*(.*)$",
    flags=re.IGNORECASE | re.DOTALL
)
_RE_INSERT_VALUES_PATTERN = re.compile(
    r"^\s*INSERT\s+INTO\s+([`\"]?[\w\.]+[`\"]?)\s*(\((.*?)\))?\s+VALUES\s*(.*)$",
    flags=re.IGNORECASE | re.DOTALL
)
_RE_INTEGER_PATTERN = re.compile(r"^[+-]?\d+$")


@dataclass
class PipelineResult:
    output_sql: str
    ddl_sql: str
    dml_sql: str
    constraints_sql: str
    errors: list[str]
    warnings: list[str]
    dml_file_path: Optional[Path] = None  # Optional temporary file path for DML (for streaming processing)


def run_pipeline(cfg: MigratorConfig) -> PipelineResult:
    """
    Run the SQL migration pipeline.
    
    Always uses streaming processing to avoid loading the entire file into memory at once.
    This reduces memory usage for all file sizes.
    """
    input_path = Path(cfg.input_sql_path)
    
    errors: List[str] = []
    asts: list = []
    
    # Always use file-based streaming parser to avoid loading entire file into memory
    # This reduces memory usage for all file sizes
    # For large files, write INSERT statements directly to temporary file instead of collecting in memory
    import tempfile
    passthrough_inserts_file = None
    passthrough_inserts_fd = None
    passthrough_inserts_count = 0
    passthrough_inserts: List[str] = []  # Only used for small files (< 100MB)
    # Collect raw DROP DATABASE statements (in case parser doesn't produce an AST)
    dropdb_passthrough: List[str] = []
    
    # Determine if we should use file-based streaming for INSERTs
    file_size = input_path.stat().st_size
    use_file_for_inserts = file_size > 100 * 1024 * 1024  # 100MB threshold
    
    if use_file_for_inserts:
        # Create temporary file for streaming INSERT statements
        passthrough_inserts_fd, passthrough_inserts_path = tempfile.mkstemp(suffix='.sql', prefix='inserts_', text=True)
        passthrough_inserts_file = os.fdopen(passthrough_inserts_fd, 'w', encoding='utf-8')
    
    # Always use file-based streaming parser for better memory efficiency
    parse_iter = iter_parse_sql_file(input_path, read_dialect=cfg.dialect_read)
    
    # Process statements
    for part in parse_iter:
        if part.error:
            msg = f"Parse error: {part.error}\nOffending statement:\n{part.raw_sql.strip()}\n"
            if cfg.on_error == "stop":
                raise SystemExit(msg)
            errors.append(msg)
            continue
        if part.ast is not None:
            asts.append(part.ast)
        else:
            raw = (part.raw_sql or "").strip()
            if raw:
                up = raw.lstrip().upper()
                stmt = raw if raw.endswith(";") else raw + ";"
                if up.startswith("INSERT"):
                    if use_file_for_inserts:
                        # Write directly to file to avoid memory accumulation
                        passthrough_inserts_file.write(stmt + "\n")
                        passthrough_inserts_count += 1
                    else:
                        # For small files, collect in memory (backward compatible)
                        passthrough_inserts.append(stmt)
                elif up.startswith("DROP DATABASE"):
                    dropdb_passthrough.append(stmt)
    
    # Close temporary file if used
    if passthrough_inserts_file:
        passthrough_inserts_file.close()
        passthrough_inserts_file_path = Path(passthrough_inserts_path)
    else:
        passthrough_inserts_file_path = None

    # Split for potential parallel execution layers (not executed here)
    queues = split_ddl_dml(asts)

    # Collect schema info BEFORE any transformation mutates AST (to preserve TINYINT(1) detection)
    # Only collect once and share between transformations
    table_schemas = _collect_table_schemas(queues.ddl)

    # For now we serialize back in order: DDL first then DML to keep dependencies sane
    ordered = [*queues.ddl, *queues.dml]
    
    # Optimization: Only call transform_statements if output_sql is needed (when output_sql_path is None)
    # For kafka-migrate and migrate commands, only transform_statements_split is needed
    need_output_sql = cfg.output_sql_path is None
    transformed_all = None
    if need_output_sql:
        # Only transform once when output_sql is needed
        transformed_all = transform_statements(ordered, table_schemas=table_schemas)
    
    # Always call transform_statements_split (needed for ddl_sql, dml_sql, constraints_sql)
    # Pass table_schemas to avoid re-collecting
    transformed_split = transform_statements_split(ordered, table_schemas=table_schemas)

    # Optional: rewrite passthrough INSERTs 0/1 -> FALSE/TRUE for columns that will become BOOL
    # Track JSON fix status per table (thread-safe for parallel processing)
    json_fix_status: Dict[str, bool] = {}  # {table_name_lower: needs_fix}
    json_fix_lock = threading.Lock()
    
    # For large files, process INSERTs from file in streaming mode to avoid memory issues
    processed_inserts_file_path = None
    if passthrough_inserts_file_path:
        # Process INSERTs from file in batches to avoid loading entire file into memory
        # Create a new temporary file for processed INSERTs
        processed_fd, processed_path = tempfile.mkstemp(suffix='.sql', prefix='processed_inserts_', text=True)
        processed_file = os.fdopen(processed_fd, 'w', encoding='utf-8')
        batch_size = 10000  # Process 10k INSERTs at a time
        
        with open(passthrough_inserts_file_path, 'r', encoding='utf-8') as f:
            batch = []
            for line in f:
                stmt = line.strip()
                if stmt and stmt.upper().startswith('INSERT'):
                    batch.append(stmt)
                    if len(batch) >= batch_size:
                        # Process batch - single pass with merged function
                        max_workers = max(1, (os.cpu_count() or 1))
                        with ThreadPoolExecutor(max_workers=max_workers) as ex:
                            processed = list(ex.map(lambda s: _process_insert_statement(s, table_schemas, json_fix_status, json_fix_lock), batch))
                        for p in processed:
                            processed_file.write(p + "\n")
                        batch = []
            # Process remaining batch - single pass with merged function
            if batch:
                max_workers = max(1, (os.cpu_count() or 1))
                with ThreadPoolExecutor(max_workers=max_workers) as ex:
                    processed = list(ex.map(lambda s: _process_insert_statement(s, table_schemas, json_fix_status, json_fix_lock), batch))
                for p in processed:
                    processed_file.write(p + "\n")
        
        processed_file.close()
        processed_inserts_file_path = Path(processed_path)
        # Clean up original file
        try:
            passthrough_inserts_file_path.unlink()
        except Exception:
            pass
        passthrough_inserts_file_path = processed_inserts_file_path
    elif passthrough_inserts:
        # For small files, process in memory (backward compatible)
        batch_size = 10000  # Process 10k INSERTs at a time to avoid memory issues
        if len(passthrough_inserts) > batch_size:
            # Process in batches
            processed_inserts: List[str] = []
            for i in range(0, len(passthrough_inserts), batch_size):
                batch = passthrough_inserts[i:i + batch_size]
                max_workers = max(1, (os.cpu_count() or 1))
                with ThreadPoolExecutor(max_workers=max_workers) as ex:
                    processed = list(ex.map(lambda s: _process_insert_statement(s, table_schemas, json_fix_status, json_fix_lock), batch))
                processed_inserts.extend(processed)
            passthrough_inserts = processed_inserts
        else:
            # Process all at once when count is reasonable - single pass with merged function
            max_workers = max(1, (os.cpu_count() or 1))
            with ThreadPoolExecutor(max_workers=max_workers) as ex:
                passthrough_inserts = list(ex.map(lambda s: _process_insert_statement(s, table_schemas, json_fix_status, json_fix_lock), passthrough_inserts))

    # Prepend collected DROP DATABASE statements to DDL output (preserve original order)
    header = ""
    if dropdb_passthrough:
        header = "\n".join(dropdb_passthrough) + "\n"
        transformed_split.ddl_sql = header + transformed_split.ddl_sql

    # Append passthrough INSERTs to DML output
    # For large files, INSERTs are in a temporary file; for small files, they're in memory
    dml_file_path = None
    if passthrough_inserts_file_path:
        # For large files, INSERTs are already in a temporary file
        # We'll merge this file with transformed_split.dml_sql later
        dml_file_path = passthrough_inserts_file_path
    elif passthrough_inserts:
        # For small files, append INSERTs from memory
        dml_text = "\n".join(passthrough_inserts) + ("\n" if passthrough_inserts else "")
        transformed_split.dml_sql += dml_text
    
    # If output_sql_path is None (migrate/kafka-migrate mode), write DML to temporary file for streaming
    # If we have a file for INSERTs, merge it with transformed_split.dml_sql into a single temp file
    if cfg.output_sql_path is None:
        if dml_file_path or transformed_split.dml_sql:
            import tempfile
            try:
                # Create temporary file for DML
                temp_fd, temp_path = tempfile.mkstemp(suffix='.sql', prefix='dml_', text=True)
                with os.fdopen(temp_fd, 'w', encoding='utf-8') as f:
                    # Write transformed DML first (if any)
                    if transformed_split.dml_sql:
                        f.write(transformed_split.dml_sql)
                    # Then append INSERTs from file (if using file mode)
                    if dml_file_path and dml_file_path.exists():
                        # Copy INSERTs file using streaming
                        with open(dml_file_path, 'r', encoding='utf-8') as src:
                            chunk_size = 1024 * 1024  # 1MB chunks
                            while True:
                                chunk = src.read(chunk_size)
                                if not chunk:
                                    break
                                f.write(chunk)
                        # Clean up the INSERTs file
                        try:
                            dml_file_path.unlink()
                        except Exception:
                            pass
                dml_file_path = Path(temp_path)
                # Keep dml_sql as empty string to indicate file-based processing
                # But still keep it for backward compatibility (convert command needs it)
                dml_sql_for_result = ""  # Empty for migrate/kafka-migrate, will use file path
            except Exception as e:
                # If file creation fails, fall back to in-memory processing
                dml_sql_for_result = transformed_split.dml_sql
        else:
            dml_sql_for_result = ""
    else:
        # For convert command (output_sql_path is set), we need to handle both file and memory cases
        if dml_file_path and dml_file_path.exists():
            # For large files, INSERTs are in a file - will be handled in output_sql construction
            dml_sql_for_result = ""  # Empty to indicate file-based
        else:
            dml_sql_for_result = transformed_split.dml_sql

    # Ensure header is prepended to DDL if it exists
    if header:
        transformed_split.ddl_sql = header + transformed_split.ddl_sql

    # Build output_sql from transformed_split if transformed_all is not available
    # For convert command, we need to construct output_sql (may need to read from file)
    if transformed_all:
        output_sql = transformed_all.sql
    else:
        # Combine DDL, DML, and constraints for output_sql
        # For migrate/kafka-migrate, dml_sql_for_result may be empty (using file instead)
        if dml_sql_for_result:
            output_sql = transformed_split.ddl_sql + "\n" + dml_sql_for_result + "\n" + transformed_split.constraints_sql
        else:
            # Read from file if using temporary file (using streaming to avoid loading entire file)
            if dml_file_path and dml_file_path.exists():
                # For convert command with large files, read DML from file
                # But we don't want to load entire file into memory for output_sql
                # Instead, we'll indicate that output_sql should be constructed from files
                # For now, we'll read it in chunks (output_sql is only used when output_sql_path is None)
                if cfg.output_sql_path is None:
                    # For migrate/kafka-migrate or convert without --out
                    # For migrate/kafka-migrate, we don't need output_sql, so we can skip
                    # For convert without --out, CLI will stream from dml_file_path to stdout
                    output_sql = transformed_split.ddl_sql + "\n" + transformed_split.constraints_sql
                    # Note: DML is in dml_file_path and will be streamed separately by CLI
                else:
                    # For convert command with --out, we don't actually need output_sql
                    # The CLI will write DDL, DML, and constraints separately
                    # So we can set output_sql to empty or minimal to avoid loading entire file
                    # The CLI handles dml_file_path separately for streaming copy
                    output_sql = transformed_split.ddl_sql + "\n" + transformed_split.constraints_sql
                    # Note: DML is in dml_file_path and will be written separately by CLI
            else:
                output_sql = transformed_split.ddl_sql + "\n" + transformed_split.constraints_sql

    return PipelineResult(
        output_sql=output_sql,
        ddl_sql=transformed_split.ddl_sql,
        dml_sql=dml_sql_for_result,  # Empty for migrate/kafka-migrate when using temp file
        constraints_sql=transformed_split.constraints_sql,
        errors=errors,
        warnings=transformed_split.warnings,
        dml_file_path=dml_file_path if (dml_file_path and dml_file_path.exists()) else None,  # Temporary file path for streaming processing
    )


def _process_insert_statement(raw_sql: str, table_schemas: dict, json_fix_status: Dict[str, bool] = None, json_fix_lock: threading.Lock = None) -> str:
    """
    Optimized merged function that processes INSERT statements in a single pass:
    1. Normalizes table name (quotes to preserve case)
    2. Converts 0/1 to FALSE/TRUE for BOOL columns
    3. Fixes over-escaped JSON values (\" -> ") for JSON columns
    
    JSON fix status is tracked per table: each table checks its first INSERT statement's first row,
    and if any JSON column needs fixing, all rows of that table will be fixed.
    
    This replaces the previous two-pass approach (_rewrite_insert_bool_literals_text + _normalize_insert_head)
    for 30-50% performance improvement.
    """
    try:
        s = raw_sql.strip().rstrip(';')
        up = s.upper()
        if not up.startswith('INSERT'):
            return raw_sql
        
        # Use pre-compiled regex for better performance
        m = _RE_INSERT_PATTERN.match(s)
        if not m:
            return raw_sql
        
        table_token = m.group(1).strip()
        cols_wrap = m.group(2) or ""
        verb = m.group(4)
        tail = m.group(5)
        
        # Extract and normalize table name (always quote to preserve case)
        base = table_token.strip('`"')
        original_table_name = base.split('.')[-1]
        quoted_table = f'"{original_table_name}"'
        
        # Fast path: If not VALUES or no table schema, just normalize table name
        if verb != 'VALUES':
            head = f"INSERT INTO {quoted_table}{(' ' + cols_wrap) if cols_wrap else ''} {verb}"
            return head + ' ' + tail + ';'
        
        # Check if table needs bool or json conversion (fast skip if not needed)
        table_name_lower = original_table_name.lower()
        schema = table_schemas.get(table_name_lower)
        if not schema:
            # No schema info, just normalize table name
            head = f"INSERT INTO {quoted_table}{(' ' + cols_wrap) if cols_wrap else ''} VALUES"
            return head + ' ' + tail + ';'
        
        bool_cols = schema.get('bool_cols', set())
        json_cols = schema.get('json_cols', set())
        if not bool_cols and not json_cols:
            # No bool or json columns, just normalize table name
            head = f"INSERT INTO {quoted_table}{(' ' + cols_wrap) if cols_wrap else ''} VALUES"
            return head + ' ' + tail + ';'
        
        # Need to process VALUES for bool and/or json conversion
        cols_group = m.group(3)
        values_tail = tail
        
        # Determine listed columns or default order
        if cols_group and cols_group.strip():
            listed_cols = [c.strip().strip('`"').lower() for c in cols_group.split(',')]
        else:
            listed_cols = list(schema.get('order', []))
        
        if not listed_cols:
            head = f"INSERT INTO {quoted_table}{(' ' + cols_wrap) if cols_wrap else ''} VALUES"
            return head + ' ' + tail + ';'
        
        bool_indexes = {i for i, c in enumerate(listed_cols) if c in bool_cols}
        json_indexes = {i for i, c in enumerate(listed_cols) if c in json_cols}
        
        if not bool_indexes and not json_indexes:
            head = f"INSERT INTO {quoted_table}{(' ' + cols_wrap) if cols_wrap else ''} VALUES"
            return head + ' ' + tail + ';'
        
        # Extract and rewrite tuples with optimized parsing
        tuples = _extract_top_level_tuples_optimized(values_tail)
        if not tuples:
            head = f"INSERT INTO {quoted_table}{(' ' + cols_wrap) if cols_wrap else ''} VALUES"
            return head + ' ' + tail + ';'
        
        # Get or determine JSON fix status for this table (per-table, thread-safe)
        needs_json_fix = False
        if json_indexes and json_fix_status is not None:
            # Check if this table's JSON fix status has been determined
            if table_name_lower not in json_fix_status:
                # First time seeing this table - check first row of this INSERT statement
                # This is the table's first INSERT statement (or at least the first one we process)
                import json
                if tuples:
                    first_row_cells = _split_tuple_cells_optimized(tuples[0])
                    if first_row_cells:
                        # Check all JSON columns in first row to determine if fix is needed
                        for i in json_indexes:
                            if i >= len(first_row_cells):
                                continue
                            v = first_row_cells[i].strip()
                            if v.upper() == 'NULL':
                                continue
                            # Remove outer quotes for checking
                            json_check = v
                            if v.startswith("'") and v.endswith("'"):
                                json_check = v[1:-1]
                            elif v.startswith('"') and v.endswith('"'):
                                json_check = v[1:-1]
                            
                            # Try to parse as JSON
                            try:
                                json.loads(json_check)
                                # Valid JSON, no fix needed for this column
                            except (json.JSONDecodeError, ValueError):
                                # Check if it's over-escaped (has \" sequences)
                                # The string may contain literal backslash+quote characters
                                # Check for both patterns: \" (backslash followed by quote) and \\" (double backslash)
                                # In Python string, '\\"' represents a backslash followed by a quote character
                                has_escaped_quote = '\\"' in json_check
                                if has_escaped_quote:
                                    # Try fixing it by replacing all \" with "
                                    # Use replace to handle all occurrences
                                    test_fixed = json_check.replace('\\"', '"')
                                    try:
                                        json.loads(test_fixed)
                                        # After fixing, it becomes valid JSON, so mark as needing fix
                                        needs_json_fix = True
                                        break  # Found one that needs fixing, no need to check others
                                    except (json.JSONDecodeError, ValueError):
                                        # Even after fixing, still invalid - might be a different issue
                                        # Don't mark as needing fix, keep original
                                        pass
                
                # Store the status for this table (thread-safe)
                if json_fix_lock:
                    with json_fix_lock:
                        # Double-check after acquiring lock (another thread might have set it)
                        if table_name_lower not in json_fix_status:
                            json_fix_status[table_name_lower] = needs_json_fix
                        else:
                            needs_json_fix = json_fix_status[table_name_lower]
                else:
                    if json_fix_status is not None:
                        json_fix_status[table_name_lower] = needs_json_fix
            else:
                # Table's JSON fix status already determined, use cached value
                if json_fix_status is not None:
                    needs_json_fix = json_fix_status[table_name_lower]
        
        import json
        new_tuples: List[str] = []
        for row_idx, tup in enumerate(tuples):
            cells = _split_tuple_cells_optimized(tup)
            if not cells:
                new_tuples.append(tup)
                continue
            
            # Process each cell in a single pass (both BOOL and JSON)
            for i in range(len(cells)):
                v = cells[i].strip()
                
                # Skip NULL
                if v.upper() == 'NULL':
                    continue
                
                # Process BOOL columns
                if i in bool_indexes:
                    # Quoted numeric string
                    if (v.startswith("'") and v.endswith("'")) or (v.startswith('"') and v.endswith('"')):
                        sv = v[1:-1]
                        if sv.isdigit():
                            cells[i] = 'TRUE' if int(sv) != 0 else 'FALSE'
                            continue
                    # Unquoted integer - use pre-compiled regex
                    if _RE_INTEGER_PATTERN.match(v):
                        cells[i] = 'TRUE' if int(v) != 0 else 'FALSE'
                        continue
                
                # Process JSON columns (apply fix if needed)
                # Only fix if table-level detection (first row) determined it's needed
                if i in json_indexes and needs_json_fix:
                    # Fix over-escaped JSON: remove all backslashes before quotes
                    # Original MySQL format: '{\"theme\": \"theme2\"}' -> '{"theme": "theme2"}'
                    original_v = v
                    if v.startswith("'") and v.endswith("'"):
                        # Single-quoted JSON string
                        json_content = v[1:-1]
                        # Replace all \" with " (remove backslashes before quotes)
                        fixed_content = json_content.replace('\\"', '"')
                        # Verify the fixed content is valid JSON
                        try:
                            json.loads(fixed_content)
                            # Write back with single quotes - the fixed_content now has proper JSON format
                            cells[i] = f"'{fixed_content}'"
                        except (json.JSONDecodeError, ValueError):
                            # If still invalid after fixing, keep original
                            cells[i] = original_v
                    elif v.startswith('"') and v.endswith('"'):
                        # Double-quoted JSON string
                        json_content = v[1:-1]
                        # Replace all \" with "
                        fixed_content = json_content.replace('\\"', '"')
                        try:
                            json.loads(fixed_content)
                            # For double-quoted SQL strings, escape double quotes in JSON for SQL
                            escaped_content = fixed_content.replace('"', '\\"')
                            cells[i] = f'"{escaped_content}"'
                        except (json.JSONDecodeError, ValueError):
                            cells[i] = original_v
                    else:
                        # No outer quotes, fix directly
                        fixed_content = v.replace('\\"', '"')
                        try:
                            json.loads(fixed_content)
                            cells[i] = fixed_content
                        except (json.JSONDecodeError, ValueError):
                            cells[i] = original_v
            
            new_tuples.append('(' + ', '.join(cells) + ')')
        
        # Rebuild statement with normalized table name
        cols_txt = f"({cols_group})" if cols_group and cols_group.strip() else ""
        head = f"INSERT INTO {quoted_table}{(' ' + cols_txt) if cols_txt else ''} VALUES"
        return head + ' ' + ', '.join(new_tuples) + ';'
    except Exception:
        return raw_sql


# Keep old functions for backward compatibility (deprecated, use _process_insert_statement instead)
def _normalize_insert_head(raw_sql: str) -> str:
    """Deprecated: Use _process_insert_statement instead."""
    return _process_insert_statement(raw_sql, {}, None, None)


def _rewrite_insert_bool_literals_text(raw_sql: str, table_schemas: dict) -> str:
    """Text-based rewrite for INSERT ... VALUES ..., converting 0/1 to FALSE/TRUE
    in columns that will become BOOL (collected from DDL TINYINT(1)).
    Safe no-op if table or columns are not known.
    """
    try:
        s = raw_sql.strip().rstrip(';')
        up = s.upper()
        if not up.startswith('INSERT'):
            return raw_sql
        # Extract table and optional columns list
        import re
        m = re.match(r"^\s*INSERT\s+INTO\s+([`\"]?[\w\.]+[`\"]?)\s*(\((.*?)\))?\s+VALUES\s*(.*)$",
                     s, flags=re.IGNORECASE | re.DOTALL)
        if not m:
            return raw_sql
        table_token = m.group(1).strip()
        base = table_token.strip('`"')
        # Support optional schema/database prefix: db.table -> table
        # Preserve original table name for quoting, but use lowercase for schema lookup
        original_table_name = base.split('.')[-1]
        table_name_lower = original_table_name.lower()
        cols_group = m.group(3)
        values_tail = m.group(4)
        schema = table_schemas.get(table_name_lower)
        if not schema:
            return raw_sql
        bool_cols = set(schema.get('bool_cols', set()))
        if not bool_cols:
            return raw_sql
        # Determine listed columns or default order
        if cols_group and cols_group.strip():
            listed_cols = [c.strip().strip('`"').lower() for c in cols_group.split(',')]
        else:
            listed_cols = list(schema.get('order', []))
        if not listed_cols:
            return raw_sql
        bool_indexes = {i for i, c in enumerate(listed_cols) if c in bool_cols}
        if not bool_indexes:
            return raw_sql
        # Split tail into top-level tuples and rewrite
        tuples = _extract_top_level_tuples(values_tail)
        if not tuples:
            return raw_sql
        new_tuples: List[str] = []
        for tup in tuples:
            cells = _split_tuple_cells(tup)
            if not cells:
                new_tuples.append(tup)
                continue
            for i in bool_indexes:
                if i >= len(cells):
                    continue
                v = cells[i].strip()
                # Skip NULL
                if v.upper() == 'NULL':
                    continue
                # Quoted numeric string
                if (v.startswith("'") and v.endswith("'")) or (v.startswith('"') and v.endswith('"')):
                    sv = v[1:-1]
                    if sv.isdigit():
                        cells[i] = 'TRUE' if int(sv) != 0 else 'FALSE'
                    continue
                # Unquoted integer
                if re.match(r"^[+-]?\d+$", v):
                    cells[i] = 'TRUE' if int(v) != 0 else 'FALSE'
                    continue
            new_tuples.append('(' + ', '.join(cells) + ')')
        # Rebuild canonical head: quote table name to preserve case
        cols_txt = f"({cols_group})" if cols_group and cols_group.strip() else ""
        # Use original table name (preserving case) and quote it
        # Quote table name to preserve case (PostgreSQL/KWDB converts unquoted identifiers to lowercase)
        quoted_table = f'"{original_table_name}"'
        head = f"INSERT INTO {quoted_table}{(' ' + cols_txt) if cols_txt else ''} VALUES"
        return head + ' ' + ', '.join(new_tuples) + ';'
    except Exception:
        return raw_sql


def _extract_top_level_tuples_optimized(values_tail: str) -> List[str]:
    """
    Optimized version with fast path for simple cases (no nested structures, no special chars).
    Falls back to full state machine for complex cases.
    """
    # Fast path: Check if simple case (no quotes, no comments, no nested parens)
    # Simple heuristic: if no quotes and balanced parens at top level only
    has_quotes = "'" in values_tail or '"' in values_tail or '`' in values_tail
    has_comments = '--' in values_tail or '/*' in values_tail
    
    if not has_quotes and not has_comments:
        # Fast path: simple comma-separated tuples like (1,2,3), (4,5,6)
        # Split by '),' pattern and handle last tuple
        if values_tail.strip().startswith('('):
            # Quick split by '),' pattern
            parts = values_tail.split('),')
            tuples = []
            for i, part in enumerate(parts):
                part = part.strip()
                if not part:
                    continue
                if not part.startswith('('):
                    part = '(' + part
                if i < len(parts) - 1:
                    part = part + ')'
                elif not part.endswith(')'):
                    # Last part might not have closing paren if it's the last tuple
                    if part.count('(') > part.count(')'):
                        part = part + ')'
                tuples.append(part)
            if tuples:
                return tuples
    
    # Fall back to full state machine for complex cases
    return _extract_top_level_tuples(values_tail)


def _extract_top_level_tuples(values_tail: str) -> List[str]:
    tuples: List[str] = []
    buf: List[str] = []
    depth = 0
    in_single = in_double = in_backtick = False
    in_ml = in_sl = False
    i = 0
    n = len(values_tail)
    while i < n:
        ch = values_tail[i]
        nxt = values_tail[i+1] if i+1 < n else ''
        if in_sl:
            buf.append(ch)
            if ch == '\n': in_sl = False
            i += 1; continue
        if in_ml:
            buf.append(ch)
            if ch == '*' and nxt == '/': buf.append(nxt); i += 2; in_ml = False; continue
            i += 1; continue
        if not (in_single or in_double or in_backtick):
            if ch == '-' and nxt == '-': in_sl = True; buf.append(ch); buf.append(nxt); i += 2; continue
            if ch == '/' and nxt == '*': in_ml = True; buf.append(ch); buf.append(nxt); i += 2; continue
        if ch == "'" and not (in_double or in_backtick): in_single = not in_single; buf.append(ch); i += 1; continue
        if ch == '"' and not (in_single or in_backtick): in_double = not in_double; buf.append(ch); i += 1; continue
        if ch == '`' and not (in_single or in_double): in_backtick = not in_backtick; buf.append(ch); i += 1; continue
        if ch == '(' and not (in_single or in_double or in_backtick): depth += 1
        if ch == ')' and not (in_single or in_double or in_backtick):
            depth = max(0, depth - 1)
            buf.append(ch)
            if depth == 0:
                tuples.append(''.join(buf).strip())
                buf = []
                i += 1
                # skip comma and spaces
                while i < n and values_tail[i].isspace(): i += 1
                if i < n and values_tail[i] == ',':
                    i += 1
                    while i < n and values_tail[i].isspace(): i += 1
                continue
            i += 1
            continue
        buf.append(ch)
        i += 1
    if not tuples and buf:
        rem = ''.join(buf).strip()
        if rem:
            tuples.append(rem)
    return tuples


def _split_tuple_cells_optimized(tuple_text: str) -> List[str]:
    """
    Optimized version with fast path for simple cases (no quotes, no nested structures).
    Falls back to full state machine for complex cases.
    """
    s = tuple_text.strip()
    inner = s[1:-1] if s.startswith('(') and s.endswith(')') else s
    
    # Fast path: Check if simple case (no quotes, no nested parens, no comments)
    has_quotes = "'" in inner or '"' in inner or '`' in inner
    has_nested = '(' in inner or ')' in inner
    has_comments = '--' in inner or '/*' in inner
    
    if not has_quotes and not has_nested and not has_comments:
        # Fast path: simple comma-separated values like 1, 2, 3
        cells = [cell.strip() for cell in inner.split(',')]
        return cells
    
    # Fall back to full state machine for complex cases
    return _split_tuple_cells(tuple_text)


def _split_tuple_cells(tuple_text: str) -> List[str]:
    s = tuple_text.strip()
    inner = s[1:-1] if s.startswith('(') and s.endswith(')') else s
    cells: List[str] = []
    buf: List[str] = []
    in_single = in_double = in_backtick = False
    in_ml = in_sl = False
    i = 0
    n = len(inner)
    depth = 0
    while i < n:
        ch = inner[i]
        nxt = inner[i+1] if i+1 < n else ''
        if in_sl:
            buf.append(ch)
            if ch == '\n': in_sl = False
            i += 1; continue
        if in_ml:
            buf.append(ch)
            if ch == '*' and nxt == '/': buf.append(nxt); i += 2; in_ml = False; continue
            i += 1; continue
        if not (in_single or in_double or in_backtick):
            if ch == '-' and nxt == '-': in_sl = True; buf.append(ch); buf.append(nxt); i += 2; continue
            if ch == '/' and nxt == '*': in_ml = True; buf.append(ch); buf.append(nxt); i += 2; continue
        if ch == "'" and not (in_double or in_backtick): in_single = not in_single; buf.append(ch); i += 1; continue
        if ch == '"' and not (in_single or in_backtick): in_double = not in_double; buf.append(ch); i += 1; continue
        if ch == '`' and not (in_single or in_double): in_backtick = not in_backtick; buf.append(ch); i += 1; continue
        if ch == '(' and not (in_single or in_double or in_backtick): depth += 1
        if ch == ')' and not (in_single or in_double or in_backtick): depth = max(0, depth-1)
        if ch == ',' and depth == 0 and not (in_single or in_double or in_backtick or in_ml or in_sl):
            cells.append(''.join(buf).strip())
            buf = []
            i += 1
            continue
        buf.append(ch)
        i += 1
    if buf:
        cells.append(''.join(buf).strip())
    return cells




