"""
Coordinator controller for orchestrating DDL → INSERT → Constraints execution
with Kafka-based parallel insertion.
"""
from __future__ import annotations

import logging
import signal
import sys
import time
import threading
import queue
from pathlib import Path
from typing import Optional, Dict, List
from concurrent.futures import ThreadPoolExecutor, as_completed

from .db_executor import KWDBExecutor
from .kafka_producer import KafkaInsertProducer
from .kafka_consumer import KafkaInsertConsumer

logger = logging.getLogger(__name__)


class KafkaMigrationController:
    """Controller for orchestrating migration with Kafka-based parallel inserts."""
    
    def __init__(
        self,
        # Database connection
        db_connection_string: Optional[str] = None,
        db_host: Optional[str] = None,
        db_port: Optional[int] = None,
        db_user: Optional[str] = None,
        db_password: Optional[str] = None,
        db_database: Optional[str] = None,
        # Kafka connection
        kafka_servers: str | List[str] = "localhost:9092",
        kafka_topic_prefix: str = "sql_migration_insert",
        # Processing config
        max_message_size: int = 134217728,  # 128MB safer default; align with broker/topic message.max.bytes
        max_parallel_consumers: Optional[int] = None,
        max_retries: int = 3,
        # Database connection pool config
        db_min_connections: int = 2,
        db_max_connections: int = 20,  # Increased for better parallelism
        # Kafka topic config
        topic_partitions: int = 4,  # Number of partitions per topic
        consumers_per_table: int = 2,  # Number of consumer instances per table (for parallelism)
    ):
        """
        Initialize migration controller.
        
        Args:
            db_connection_string: PostgreSQL connection string (takes precedence over individual params)
                Format: postgresql://user:password@host:port/database?sslmode=disable
            db_host: KWDB host (ignored if db_connection_string provided)
            db_port: KWDB port (ignored if db_connection_string provided)
            db_user: KWDB user (ignored if db_connection_string provided)
            db_password: KWDB password (ignored if db_connection_string provided)
            db_database: KWDB database name (ignored if db_connection_string provided)
            kafka_servers: Kafka broker addresses
            kafka_topic_prefix: Prefix for Kafka topic names
            max_message_size: Maximum message size in bytes (default: 1GB, should match Kafka broker config)
            max_parallel_consumers: Maximum parallel consumers (None = auto)
            max_retries: Maximum retries for failed inserts
        """
        self.db_executor = KWDBExecutor(
            connection_string=db_connection_string,
            host=db_host,
            port=db_port,
            user=db_user,
            password=db_password,
            database=db_database,
            min_connections=db_min_connections,
            max_connections=db_max_connections,
        )
        
        self.producer = KafkaInsertProducer(
            bootstrap_servers=kafka_servers,
            topic_prefix=kafka_topic_prefix,
            max_message_size=max_message_size,
            topic_partitions=topic_partitions,
        )
        
        self.consumers_per_table = consumers_per_table
        
        # Generate unique consumer group ID suffix to avoid consuming old messages
        # This ensures each migration run uses a fresh consumer group
        import time
        unique_suffix = f"{int(time.time() * 1000)}"  # Timestamp in milliseconds
        
        self.consumer = KafkaInsertConsumer(
            bootstrap_servers=kafka_servers,
            topic_prefix=kafka_topic_prefix,
            unique_group_id_suffix=unique_suffix,
        )
        
        self.max_parallel_consumers = max_parallel_consumers
        self.max_retries = max_retries
        
        # Track if cleanup is needed
        self._cleanup_needed = False
        self._closed = False  # Track if already closed
        self._kafka_cleaned = False  # Track if Kafka resources have been cleaned
        self._setup_signal_handlers()
        self._tune_third_party_loggers()
        
        logger.info("Initialized KafkaMigrationController")
    
    def _setup_signal_handlers(self):
        """Setup signal handlers for graceful shutdown and cleanup."""
        def signal_handler(signum, frame):
            logger.warning(f"Received signal {signum}, initiating cleanup...")
            self._cleanup_resources()
            sys.exit(1)
        
        # Register handlers for common interrupt signals
        signal.signal(signal.SIGINT, signal_handler)  # Ctrl+C
        signal.signal(signal.SIGTERM, signal_handler)  # Termination signal
    
    def _create_checkpoint(self) -> Dict[str, set]:
        """
        Create a checkpoint of current database state.
        Records all existing database objects (tables, sequences, views, functions, types, etc.)
        
        Returns:
            Dictionary with sets of object names by type
        """
        checkpoint = {
            'tables': set(),
            'sequences': set(),
            'views': set(),
            'functions': set(),
            'types': set(),
            'schemas': set(),
        }
        
        conn = None
        try:
            conn = self.db_executor._get_valid_connection()
            with conn.cursor() as cursor:
                # Get current schema
                cursor.execute("SELECT current_schema()")
                current_schema = cursor.fetchone()[0]
                checkpoint['current_schema'] = current_schema
                
                # Get all tables
                cursor.execute("""
                    SELECT table_name
                    FROM information_schema.tables
                    WHERE table_schema = current_schema() 
                    AND table_type = 'BASE TABLE'
                """)
                checkpoint['tables'] = {row[0] for row in cursor.fetchall()}
                
                # Get all sequences
                cursor.execute("""
                    SELECT sequence_name
                    FROM information_schema.sequences
                    WHERE sequence_schema = current_schema()
                """)
                checkpoint['sequences'] = {row[0] for row in cursor.fetchall()}
                
                # Get all views
                cursor.execute("""
                    SELECT table_name
                    FROM information_schema.views
                    WHERE table_schema = current_schema()
                """)
                checkpoint['views'] = {row[0] for row in cursor.fetchall()}
                
                # Get all functions
                cursor.execute("""
                    SELECT routine_name
                    FROM information_schema.routines
                    WHERE routine_schema = current_schema()
                    AND routine_type = 'FUNCTION'
                """)
                checkpoint['functions'] = {row[0] for row in cursor.fetchall()}
                
                # Get all custom types
                cursor.execute("""
                    SELECT typname
                    FROM pg_type t
                    JOIN pg_namespace n ON n.oid = t.typnamespace
                    WHERE n.nspname = current_schema()
                    AND t.typtype = 'c'  -- composite types
                """)
                checkpoint['types'] = {row[0] for row in cursor.fetchall()}
                
                # Get all schemas (except system schemas)
                cursor.execute("""
                    SELECT schema_name
                    FROM information_schema.schemata
                    WHERE schema_name NOT IN ('information_schema', 'pg_catalog', 'pg_toast', 'pg_temp_1', 'pg_toast_temp_1')
                """)
                checkpoint['schemas'] = {row[0] for row in cursor.fetchall()}
                
                logger.info(f"✓ Checkpoint created: {len(checkpoint['tables'])} tables, "
                          f"{len(checkpoint['sequences'])} sequences, "
                          f"{len(checkpoint['views'])} views, "
                          f"{len(checkpoint['functions'])} functions, "
                          f"{len(checkpoint['types'])} types")
        except Exception as e:
            logger.warning(f"Failed to create checkpoint: {e}")
        finally:
            if conn:
                try:
                    self.db_executor.pool.putconn(conn)
                except Exception:
                    try:
                        conn.close()
                    except Exception:
                        pass
        
        return checkpoint
    
    def _rollback_to_checkpoint(self, checkpoint: Dict[str, set], ddl_sql: str = ""):
        """
        Rollback database to checkpoint state by dropping all objects created after checkpoint.
        
        Args:
            checkpoint: Checkpoint dictionary created by _create_checkpoint
            ddl_sql: Optional DDL SQL to extract object names (for fallback)
        """
        import re
        
        try:
            conn = self.db_executor._get_valid_connection()
            try:
                with conn.cursor() as cursor:
                    current_schema = checkpoint.get('current_schema', 'public')
                    
                    # Get current state
                    cursor.execute("SELECT current_schema()")
                    actual_schema = cursor.fetchone()[0]
                    
                    if actual_schema != current_schema:
                        logger.warning(f"Schema mismatch: checkpoint was in '{current_schema}', current is '{actual_schema}'")
                    
                    # Get current tables
                    cursor.execute("""
                        SELECT table_name
                        FROM information_schema.tables
                        WHERE table_schema = current_schema() 
                        AND table_type = 'BASE TABLE'
                    """)
                    current_tables = {row[0] for row in cursor.fetchall()}
                    
                    # Get current sequences
                    cursor.execute("""
                        SELECT sequence_name
                        FROM information_schema.sequences
                        WHERE sequence_schema = current_schema()
                    """)
                    current_sequences = {row[0] for row in cursor.fetchall()}
                    
                    # Get current views
                    cursor.execute("""
                        SELECT table_name
                        FROM information_schema.views
                        WHERE table_schema = current_schema()
                    """)
                    current_views = {row[0] for row in cursor.fetchall()}
                    
                    # Get current functions
                    cursor.execute("""
                        SELECT routine_name
                        FROM information_schema.routines
                        WHERE routine_schema = current_schema()
                        AND routine_type = 'FUNCTION'
                    """)
                    current_functions = {row[0] for row in cursor.fetchall()}
                    
                    # Get current types
                    cursor.execute("""
                        SELECT typname
                        FROM pg_type t
                        JOIN pg_namespace n ON n.oid = t.typnamespace
                        WHERE n.nspname = current_schema()
                        AND t.typtype = 'c'
                    """)
                    current_types = {row[0] for row in cursor.fetchall()}
                    
                    # Find objects to drop (objects that exist now but not in checkpoint)
                    tables_to_drop = current_tables - checkpoint.get('tables', set())
                    sequences_to_drop = current_sequences - checkpoint.get('sequences', set())
                    views_to_drop = current_views - checkpoint.get('views', set())
                    functions_to_drop = current_functions - checkpoint.get('functions', set())
                    types_to_drop = current_types - checkpoint.get('types', set())
                    
                    dropped_count = 0
                    
                    # Drop views first (they may depend on tables)
                    if views_to_drop:
                        logger.info(f"  Dropping {len(views_to_drop)} views...")
                        for view_name in views_to_drop:
                            try:
                                cursor.execute(f'DROP VIEW IF EXISTS "{view_name}" CASCADE')
                                logger.info(f"    Dropped view: {view_name}")
                                dropped_count += 1
                            except Exception as e:
                                logger.warning(f"    Failed to drop view {view_name}: {e}")
                    
                    # Drop tables (CASCADE will handle dependencies)
                    if tables_to_drop:
                        logger.info(f"  Dropping {len(tables_to_drop)} tables...")
                        # Drop in reverse order to handle foreign keys
                        for table_name in reversed(sorted(tables_to_drop)):
                            try:
                                cursor.execute(f'DROP TABLE IF EXISTS "{table_name}" CASCADE')
                                logger.info(f"    Dropped table: {table_name}")
                                dropped_count += 1
                            except Exception as e:
                                logger.warning(f"    Failed to drop table {table_name}: {e}")
                    
                    # Drop sequences
                    if sequences_to_drop:
                        logger.info(f"  Dropping {len(sequences_to_drop)} sequences...")
                        for seq_name in sequences_to_drop:
                            try:
                                cursor.execute(f'DROP SEQUENCE IF EXISTS "{seq_name}" CASCADE')
                                logger.info(f"    Dropped sequence: {seq_name}")
                                dropped_count += 1
                            except Exception as e:
                                logger.warning(f"    Failed to drop sequence {seq_name}: {e}")
                    
                    # Drop functions
                    if functions_to_drop:
                        logger.info(f"  Dropping {len(functions_to_drop)} functions...")
                        for func_name in functions_to_drop:
                            try:
                                # Functions need signature, but we'll try simple drop first
                                cursor.execute(f'DROP FUNCTION IF EXISTS "{func_name}" CASCADE')
                                logger.info(f"    Dropped function: {func_name}")
                                dropped_count += 1
                            except Exception as e:
                                logger.warning(f"    Failed to drop function {func_name}: {e}")
                    
                    # Drop types
                    if types_to_drop:
                        logger.info(f"  Dropping {len(types_to_drop)} types...")
                        for type_name in types_to_drop:
                            try:
                                cursor.execute(f'DROP TYPE IF EXISTS "{type_name}" CASCADE')
                                logger.info(f"    Dropped type: {type_name}")
                                dropped_count += 1
                            except Exception as e:
                                logger.warning(f"    Failed to drop type {type_name}: {e}")
                    
                    conn.commit()
                    
                    if dropped_count > 0:
                        logger.info(f"✓ Rollback completed: dropped {dropped_count} objects")
                    else:
                        logger.info("✓ Rollback completed: no objects to drop")
                    
                    # If database was created, drop it
                    if ddl_sql:
                        db_pattern = r'CREATE\s+DATABASE\s+(?:IF\s+NOT\s+EXISTS\s+)?(?:["`])?(\w+)(?:["`])?'
                        db_match = re.search(db_pattern, ddl_sql, re.IGNORECASE)
                        if db_match:
                            created_database = db_match.group(1)
                            logger.info(f"  Dropping database: {created_database}")
                            try:
                                from .db_executor import parse_connection_string
                                conn_params = parse_connection_string(self.db_executor.connection_string)
                                current_db = conn_params.get('database', 'defaultdb')
                                
                                if current_db == 'postgres':
                                    default_db = 'defaultdb'
                                elif current_db == 'defaultdb':
                                    default_db = 'postgres'
                                else:
                                    default_db = 'postgres'
                                
                                import psycopg2
                                temp_conn = psycopg2.connect(
                                    host=conn_params['host'],
                                    port=conn_params['port'],
                                    user=conn_params['user'],
                                    password=conn_params.get('password', ''),
                                    database=default_db,
                                    sslmode=conn_params.get('sslmode', 'disable')
                                )
                                try:
                                    temp_conn.autocommit = True
                                    with temp_conn.cursor() as temp_cursor:
                                        temp_cursor.execute(f'DROP DATABASE IF EXISTS "{created_database}"')
                                    logger.info(f"  ✓ Dropped database: {created_database}")
                                finally:
                                    temp_conn.close()
                            except Exception as db_drop_err:
                                logger.warning(f"  Failed to drop database {created_database}: {db_drop_err}")
            finally:
                try:
                    self.db_executor.pool.putconn(conn)
                except Exception:
                    try:
                        conn.close()
                    except Exception:
                        pass
        except Exception as e:
            logger.error(f"Error during rollback to checkpoint: {e}", exc_info=True)
    
    def _cleanup_kafka_resources(self):
        """Cleanup only Kafka resources (topics, producer, consumer) without closing DB connections."""
        if self._kafka_cleaned:
            return  # Already cleaned
        
        try:
            # Cleanup producer topics
            if self.producer:
                try:
                    self.producer.cleanup_topics()
                except Exception as e:
                    logger.debug(f"Error cleaning up topics: {e}")
            
            # Note: We don't close producer/consumer connections here explicitly
            # as they will be cleaned up by Python's garbage collector
            # The important part is cleaning up Kafka topics to free up broker resources
            self._kafka_cleaned = True
        except Exception as e:
            logger.warning(f"Error cleaning up Kafka resources: {e}")
    
    def _tune_third_party_loggers(self):
        """Reduce noisy logs from kafka-python; configurable via KWDB_KAFKA_LOG_LEVEL."""
        try:
            import os as _os
            import logging as _logging
            level_name = (_os.environ.get('KWDB_KAFKA_LOG_LEVEL') or 'WARNING').upper()
            level = getattr(_logging, level_name, _logging.WARNING)
            for name in (
                'kafka',
                'kafka.client',
                'kafka.conn',
                'kafka.producer',
                'kafka.consumer',
                'kafka.coordinator',
                'kafka.cluster',
                'kafka.protocol',
            ):
                _logging.getLogger(name).setLevel(level)
        except Exception:
            pass
    
    def _cleanup_resources(self):
        """Cleanup Kafka topics and connections."""
        if self._closed:
            return  # Already cleaned up
        
        try:
            logger.info("Cleaning up Kafka resources...")
            
            # Cleanup producer topics first (before closing connections)
            if self.producer:
                try:
                    self.producer.cleanup_topics()
                except Exception as e:
                    logger.warning(f"Error cleaning up topics: {e}")
            
            # Close connections
            self._close_internal()
            
            logger.info("Cleanup completed")
        except Exception as e:
            logger.warning(f"Error during cleanup: {e}", exc_info=True)
        finally:
            self._closed = True
    
    def execute_migration(
        self,
        ddl_file: Path | str,
        insert_file: Path | str,
        constraints_file: Path | str,
    ) -> Dict[str, any]:
        """
        Execute complete migration: DDL → INSERT (via Kafka) → Constraints.
        
        Args:
            ddl_file: Path to DDL SQL file
            insert_file: Path to INSERT SQL file
            constraints_file: Path to constraints/indexes SQL file
            
        Returns:
            Dictionary with execution results and statistics
        """
        ddl_path = Path(ddl_file)
        insert_path = Path(insert_file)
        constraints_path = Path(constraints_file)
        
        # Read SQL from files using streaming to avoid memory issues
        if not ddl_path.exists():
            raise FileNotFoundError(f"DDL file not found: {ddl_path}")
        if not insert_path.exists():
            raise FileNotFoundError(f"INSERT file not found: {insert_path}")
        if not constraints_path.exists():
            logger.warning(f"Constraints file not found: {constraints_path}, will skip")
            constraints_sql = ""
        else:
            # Read file using streaming to avoid loading entire file into memory
            chunks = []
            with open(constraints_path, 'r', encoding='utf-8') as f:
                chunk_size = 1024 * 1024  # 1MB chunks
                while True:
                    chunk = f.read(chunk_size)
                    if not chunk:
                        break
                    chunks.append(chunk)
            constraints_sql = ''.join(chunks)
        
        # Read DDL file using streaming to avoid loading entire file into memory
        chunks = []
        with open(ddl_path, 'r', encoding='utf-8') as f:
            chunk_size = 1024 * 1024  # 1MB chunks
            while True:
                chunk = f.read(chunk_size)
                if not chunk:
                    break
                chunks.append(chunk)
        ddl_sql = ''.join(chunks)
        
        # For INSERT file, pass file path for streaming processing instead of reading entire file
        # This avoids loading large files into memory
        return self.execute_migration_from_sql(
            ddl_sql=ddl_sql,
            insert_sql="",  # Empty string, will use insert_file_path instead
            constraints_sql=constraints_sql,
            insert_file_path=insert_path,  # Pass file path for streaming
        )
    
    def execute_migration_from_sql(
        self,
        ddl_sql: str,
        insert_sql: str = "",
        constraints_sql: str = "",
        use_transaction: bool = False,
        insert_statements: Optional[List[str]] = None,
        insert_file_path: Optional[Path] = None,
    ) -> Dict[str, any]:
        """
        Execute complete migration from SQL text: DDL → INSERT (via Kafka) → Constraints.
        
        Args:
            ddl_sql: DDL SQL text
            insert_sql: INSERT SQL text (used only if insert_statements and insert_file_path are None)
            constraints_sql: Constraints/indexes SQL text
            use_transaction: If True, use transaction mode (DDL + Constraints in transaction, INSERT with independent transactions)
            insert_statements: Optional list of INSERT statements (avoids string splitting for large files)
            insert_file_path: Optional path to INSERT SQL file for streaming processing (avoids loading entire file into memory)
            
        Returns:
            Dictionary with execution results and statistics
        """
        results: Dict[str, any] = {
            "ddl_executed": False,
            "ddl_time": 0.0,
            "inserts_sent": False,
            "inserts_completed": False,
            "insert_exec_time": 0.0,  # Total time from DDL end to constraints start
            "constraints_executed": False,
            "constraints_time": 0.0,
            "producer_stats": {},
            "consumer_stats": {},
            "errors": [],
        }
        
        # Track tables discovered during streaming (for topic creation)
        self._streaming_tables: Dict[str, int] = {}  # table_name -> message_count
        
        # Clear diagnostic errors from previous runs
        if hasattr(self.db_executor, '_diagnostic_errors'):
            self.db_executor._diagnostic_errors = []
        
        self._cleanup_needed = True
        
        import time
        
        # Transaction management for lightweight transaction mode
        main_conn = None
        transaction_active = False
        
        try:
            # Check if DDL contains CREATE DATABASE
            import re
            ddl_statements_preview = [s.strip() for s in ddl_sql.split(';') if s.strip()]
            db_pattern = r'CREATE\s+DATABASE\s+(?:IF\s+NOT\s+EXISTS\s+)?(?:["`])?(\w+)(?:["`])?'
            will_create_database = None
            for stmt in ddl_statements_preview:
                db_match = re.search(db_pattern, stmt, re.IGNORECASE)
                if db_match:
                    will_create_database = db_match.group(1)
                    break
            
            # Create checkpoint before migration starts (if using transaction mode)
            # If database will be created, create it first, then create checkpoint
            checkpoint = None
            if use_transaction:
                if will_create_database:
                    logger.info(f"Database '{will_create_database}' will be created - creating it first, then checkpoint...")
                    # Create database first
                    from .db_executor import parse_connection_string
                    conn_params = parse_connection_string(self.db_executor.connection_string)
                    current_db = conn_params.get('database', 'defaultdb')
                    
                    # Determine default database to connect to
                    if current_db == 'postgres':
                        default_db = 'defaultdb'
                    elif current_db == 'defaultdb':
                        default_db = 'postgres'
                    else:
                        default_db = 'postgres'
                    
                    # Create a temporary connection to default database
                    import psycopg2
                    temp_conn = psycopg2.connect(
                        host=conn_params['host'],
                        port=conn_params['port'],
                        user=conn_params['user'],
                        password=conn_params.get('password', ''),
                        database=default_db,
                        sslmode=conn_params.get('sslmode', 'disable')
                    )
                    try:
                        temp_conn.autocommit = True
                        with temp_conn.cursor() as temp_cursor:
                            # Find CREATE DATABASE statement
                            create_db_stmt = None
                            for stmt in ddl_statements_preview:
                                db_match = re.search(db_pattern, stmt, re.IGNORECASE)
                                if db_match:
                                    create_db_stmt = stmt
                                    break
                            
                            if create_db_stmt:
                                # Transform CREATE DATABASE to PostgreSQL syntax if needed
                                create_db_sql = create_db_stmt
                                if not create_db_sql.upper().startswith('CREATE DATABASE IF NOT EXISTS'):
                                    create_db_sql = re.sub(
                                        r'CREATE\s+DATABASE\s+',
                                        'CREATE DATABASE IF NOT EXISTS ',
                                        create_db_sql,
                                        flags=re.IGNORECASE,
                                        count=1
                                    )
                                # Quote database name
                                if '"' not in create_db_sql and '`' not in create_db_sql:
                                    create_db_sql = re.sub(
                                        r'CREATE\s+DATABASE\s+IF\s+NOT\s+EXISTS\s+(\w+)',
                                        r'CREATE DATABASE IF NOT EXISTS "\1"',
                                        create_db_sql,
                                        flags=re.IGNORECASE,
                                        count=1
                                    )
                                temp_cursor.execute(create_db_sql)
                                logger.info(f"✓ Created database: {will_create_database}")
                    finally:
                        temp_conn.close()
                    
                    # Now reconnect to the new database and create checkpoint
                    # Close all existing connections in the pool and reconnect
                    try:
                        # Close the pool and recreate it with new database
                        self.db_executor.pool.closeall()
                    except Exception:
                        pass
                    
                    # Update connection string to point to new database
                    new_conn_string = self.db_executor.connection_string.replace(
                        f"/{current_db}",
                        f"/{will_create_database}"
                    )
                    # Recreate pool with new database
                    from .db_executor import KWDBExecutor
                    self.db_executor = KWDBExecutor(
                        connection_string=new_conn_string,
                        min_connections=self.db_executor.min_connections,
                        max_connections=self.db_executor.max_connections
                    )
                    logger.info(f"✓ Reconnected to database: {will_create_database}")
                
                logger.info("Creating database checkpoint before migration...")
                checkpoint = self._create_checkpoint()
            
            # Stage 1: Execute DDL (table creation)
            logger.info("="*60)
            logger.info("Stage 1: Executing DDL (table creation)")
            if use_transaction:
                logger.info("Transaction mode: DDL + Constraints in transaction, INSERT with independent transactions")
            logger.info("="*60)
            
            ddl_start_time = time.time()
            if not ddl_sql.strip():
                logger.warning("No DDL SQL provided, skipping")
                results["ddl_executed"] = False
                results["ddl_time"] = 0.0
            else:
                if use_transaction:
                    # Execute DDL in transaction
                    # Get a connection and use explicit transaction management
                    # Avoid modifying autocommit attribute which can fail if connection is in transaction
                    main_conn = self.db_executor._get_valid_connection()
                    
                    # Use explicit transaction management with BEGIN/COMMIT/ROLLBACK
                    # First, ensure connection is not in a transaction by ending any existing one
                    try:
                        # Try to end any existing transaction
                        # If connection is in autocommit mode, these will be no-ops
                        # If connection is in transaction, these will end it
                        with main_conn.cursor() as cleanup_cursor:
                            try:
                                cleanup_cursor.execute("COMMIT")
                            except Exception:
                                try:
                                    cleanup_cursor.execute("ROLLBACK")
                                except Exception:
                                    # If both fail, connection might be in autocommit mode
                                    # or already clean - this is OK
                                    pass
                        
                        # Now start a new transaction
                        # Use a separate cursor to avoid issues with the cleanup cursor
                        with main_conn.cursor() as begin_cursor:
                            begin_cursor.execute("BEGIN")
                        
                        transaction_active = True
                    except Exception as e:
                        # If transaction setup fails, return connection and get a fresh one
                        logger.warning(f"Failed to start transaction: {e}, getting fresh connection")
                        try:
                            self.db_executor.pool.putconn(main_conn)
                        except Exception:
                            try:
                                main_conn.close()
                            except Exception:
                                pass
                        
                        # Get fresh connection and try again
                        main_conn = self.db_executor._get_valid_connection()
                        try:
                            with main_conn.cursor() as cleanup_cursor:
                                try:
                                    cleanup_cursor.execute("COMMIT")
                                except Exception:
                                    try:
                                        cleanup_cursor.execute("ROLLBACK")
                                    except Exception:
                                        pass
                            with main_conn.cursor() as begin_cursor:
                                begin_cursor.execute("BEGIN")
                            transaction_active = True
                        except Exception as cleanup_err:
                            logger.error(f"Failed to start transaction even on fresh connection: {cleanup_err}")
                            raise RuntimeError(f"Cannot start transaction: {cleanup_err}")
                    
                    # Transactions here ensure atomic commit and visibility ordering for DDL batch.
                    # Note: rollback is NOT handled by transactions anymore; rollback is unified via checkpoint.
                    
                    try:
                        with main_conn.cursor() as cursor:
                            ddl_statements = [s.strip() for s in ddl_sql.split(';') if s.strip()]
                            
                            # Execute DDL statements (skip CREATE DATABASE and USE as they're not supported in transaction)
                            executed_count = 0
                            for stmt in ddl_statements:
                                if stmt:
                                    stmt_upper = stmt.upper().strip()
                                    # Skip database-level statements that shouldn't be executed in transaction
                                    if (stmt_upper.startswith('DROP DATABASE') or 
                                        stmt_upper.startswith('CREATE DATABASE') or
                                        stmt_upper.startswith('USE ') or
                                        (stmt_upper.startswith('/*') and 'DATABASE' in stmt_upper)):
                                        logger.debug(f"Skipping database-level statement: {stmt[:100]}...")
                                        continue
                                    try:
                                        cursor.execute(stmt + ';')
                                        executed_count += 1
                                    except Exception as stmt_err:
                                        # Log which statement failed
                                        logger.error(f"Failed to execute DDL statement {executed_count + 1}/{len(ddl_statements)}: {stmt_err}")
                                        logger.error(f"Statement: {stmt[:200]}...")
                                        raise
                            
                            logger.info(f"✓ DDL executed in transaction: {executed_count} statements")
                        
                        # Commit DDL immediately so that Consumer connections can see the tables
                        # This is necessary because Consumers use different connections from the pool
                        main_conn.commit()
                        logger.info("✓ DDL committed - tables are now visible to Consumer connections")
                        # Note: We'll start a new transaction for Constraints later if needed
                        transaction_active = False  # DDL transaction is committed
                    except Exception as e:
                        if main_conn:
                            main_conn.rollback()
                            transaction_active = False
                        logger.error("✗ DDL stage failed - transaction rolled back")
                        # Rollback to checkpoint if checkpoint exists
                        if checkpoint:
                            logger.error("✗ DDL stage failed - rolling back to checkpoint")
                            try:
                                self._rollback_to_checkpoint(checkpoint, ddl_sql)
                            except Exception as rollback_err:
                                logger.error(f"Error during rollback to checkpoint: {rollback_err}", exc_info=True)
                        raise RuntimeError(f"DDL stage failed: {e}")
                else:
                    # Non-transactional mode (original behavior)
                    try:
                        self.db_executor.execute_ddl(ddl_sql)
                    except Exception as e:
                        logger.error("✗ DDL stage failed")
                        # Rollback to checkpoint if checkpoint exists
                        if checkpoint:
                            logger.error("✗ DDL stage failed - rolling back to checkpoint")
                            try:
                                self._rollback_to_checkpoint(checkpoint, ddl_sql)
                            except Exception as rollback_err:
                                logger.error(f"Error during rollback to checkpoint: {rollback_err}", exc_info=True)
                        raise RuntimeError(f"DDL stage failed: {e}")
                
                results["ddl_executed"] = True
                results["ddl_time"] = time.time() - ddl_start_time
                logger.info(f"✓ DDL execution completed in {results['ddl_time']:.2f}s")
            
            # Record start time for INSERT stage (includes Kafka config, producer, consumer)
            insert_start_time = time.time()
            
            # Stage 2: Process and send INSERT statements to Kafka
            logger.info("\n" + "="*60)
            logger.info("Stage 2: Processing and sending INSERT statements to Kafka")
            logger.info("="*60)
            # Fast preflight: ensure Kafka broker is reachable to avoid wasted parsing time
            try:
                self._ensure_kafka_available()
            except Exception as e:
                logger.error(f"Kafka not available: {e}")
                raise
            
            # Use provided insert_statements list if available, otherwise split from string or file
            if insert_statements is None:
                # Priority: insert_file_path > insert_statements > insert_sql
                if insert_file_path and insert_file_path.exists():
                    # Stream processing from file to avoid loading entire file into memory
                    file_size = insert_file_path.stat().st_size
                    logger.info(f"Streaming INSERT statements from file: {insert_file_path} ({file_size / 1024 / 1024:.1f}MB)")
                    # Record start time for INSERT stage
                    insert_start_time = time.time()
                    results["insert_start_time"] = insert_start_time
                    # Use streaming producer that processes file in chunks
                    try:
                        self._send_inserts_from_file_streaming(insert_file_path, results, checkpoint, use_transaction, ddl_sql)
                        # Note: inserts_sent and inserts_completed are set inside _send_inserts_from_file_streaming
                    except RuntimeError as insert_err:
                        # INSERT stage failed - rollback should have been handled inside _send_inserts_from_file_streaming
                        # But ensure it happens even if there was an exception
                        if checkpoint:
                            logger.error("✗ INSERT stage failed - ensuring rollback to checkpoint")
                            try:
                                self._rollback_to_checkpoint(checkpoint, ddl_sql)
                            except Exception as rollback_err:
                                logger.error(f"Error during rollback to checkpoint: {rollback_err}", exc_info=True)
                        raise
                elif not insert_sql.strip():
                    logger.warning("No INSERT SQL provided, skipping")
                    results["inserts_sent"] = True
                    results["inserts_completed"] = True
                    insert_statements = []
                else:
                    # For large insert_sql strings, warn and suggest using insert_file_path instead
                    # Split by ');' to better align with multi-row INSERT endings
                    # Note: This still loads all statements into memory, so for very large files,
                    # it's better to use insert_file_path for true streaming
                    if len(insert_sql) > 100 * 1024 * 1024:  # 100MB
                        logger.warning(
                            f"insert_sql is large ({len(insert_sql) / 1024 / 1024:.1f}MB). "
                            "Consider using insert_file_path for better memory efficiency."
                        )
                    insert_statements = []
                    for part in insert_sql.split(');'):
                        stmt = part.strip()
                        if not stmt:
                            continue
                        s = stmt + ');'
                        if s.strip().upper().startswith('INSERT'):
                            insert_statements.append(s)
            
            # Process insert_statements if we have them (not streaming from file)
            if insert_statements is not None and len(insert_statements) == 0:
                logger.warning("No INSERT statements found")
                results["inserts_sent"] = True
                results["inserts_completed"] = True
            elif insert_statements is not None and len(insert_statements) > 0:
                    # Split by table
                    table_inserts = self.producer.split_inserts_by_table(insert_statements)
                    
                    # Note: We don't delete existing topics - Kafka handles this gracefully
                    # Instead, we use unique consumer group IDs for each run to avoid consuming old data
                    table_names_list = list(table_inserts.keys())
                    
                    # Create all topics upfront with intelligent partitions per table based on statement counts
                    # Limit maximum partitions to 6 to avoid memory issues
                    MAX_PARTITIONS = 6
                    table_to_partitions: Dict[str, int] = {}
                    for t in table_names_list:
                        msg_count = len(table_inserts.get(t, []))
                        desired_partitions = (msg_count + 49) // 50 if msg_count > 0 else 1
                        if desired_partitions < 1:
                            desired_partitions = 1
                        # Cap partitions at MAX_PARTITIONS to prevent excessive memory usage
                        desired_partitions = min(desired_partitions, MAX_PARTITIONS)
                        table_to_partitions[t] = desired_partitions
                        logger.info(f"Planned topic: {self.producer.topic_prefix}_{t} partitions={desired_partitions} (messages={msg_count})")
                    # Bulk create topics in one admin call
                    try:
                        self.producer.create_kafka_topics_bulk(table_to_partitions)
                    except Exception as e:
                        logger.warning(f"Bulk topic creation had issues: {e}")
                    # Wait longer to ensure all topics are fully available and metadata is synced
                    # This helps avoid "Failed to update metadata" errors when sending first message
                    import time as _t
                    _t.sleep(10)  # Increased to 10s to allow metadata synchronization across cluster
                    
                    # Pre-warm metadata for all topics to avoid timeout on first send
                    # Use the producer's ensure_metadata_ready method with retries
                    # Note: Each attempt may take up to 5s, so total time can be significant
                    logger.info("Pre-warming metadata for all topics (this may take a moment)...")
                    successful_warmups = 0
                    failed_topics = []
                    total_topics = len(table_to_partitions)
                    
                    for table_name in table_to_partitions.keys():
                        topic_name = f"{self.producer.topic_prefix}_{table_name}"
                        # Use more retries and longer delay to handle slow metadata sync
                        if self.producer._ensure_metadata_ready(topic_name, max_retries=6, retry_delay=2.0):
                            successful_warmups += 1
                        else:
                            failed_topics.append(table_name)
                    
                    # Log results
                    if successful_warmups == total_topics:
                        logger.info(
                            f"✓ Metadata pre-warming completed successfully: all {total_topics} topics ready"
                        )
                    else:
                        logger.warning(
                            f"Metadata pre-warming completed with failures: {successful_warmups}/{total_topics} topics ready. "
                            f"Failed topics: {failed_topics}"
                        )
                    # Expose mapping for per-table pipelines
                    try:
                        self._table_to_partitions = table_to_partitions
                    except Exception:
                        pass
                    
                    # Auto-scale DB connection pool to match total consumers across tables
                    try:
                        total_consumers_planned = sum(max(1, int(p or 1)) for p in table_to_partitions.values())
                        # Reserve a small headroom (20%) and at least +2 for misc operations
                        desired_max_pool = max(self.db_executor.max_connections, int(total_consumers_planned * 1.2) + 2)
                        desired_min_pool = max(self.db_executor.min_connections, min(2, desired_max_pool))
                        if desired_max_pool > self.db_executor.max_connections:
                            logger.info(
                                f"Scaling DB connection pool for INSERT stage: "
                                f"planned_consumers={total_consumers_planned}, "
                                f"old_pool={self.db_executor.min_connections}-{self.db_executor.max_connections} -> "
                                f"new_pool={desired_min_pool}-{desired_max_pool}"
                            )
                            # Recreate pool with larger max using existing connection parameters
                            if self.db_executor.pool:
                                self.db_executor.pool.closeall()
                            from psycopg2.pool import ThreadedConnectionPool
                            from .db_executor import parse_connection_string
                            conn_params = parse_connection_string(self.db_executor.connection_string)
                            self.db_executor.pool = ThreadedConnectionPool(
                                desired_min_pool,
                                desired_max_pool,
                                host=conn_params['host'],
                                port=conn_params['port'],
                                user=conn_params['user'],
                                password=conn_params['password'],
                                database=conn_params['database'],
                                sslmode=conn_params.get('sslmode', 'disable'),
                            )
                            self.db_executor.min_connections = desired_min_pool
                            self.db_executor.max_connections = desired_max_pool
                            logger.info("✓ Connection pool scaled up for INSERT stage")
                    except Exception as e:
                        logger.warning(f"Failed to auto-scale DB connection pool: {e}")
                    
                    # Stage 3: Parallel producer-consumer pipeline for each table
                    logger.info("\n" + "="*60)
                    logger.info("Stage 3: Streaming producer-consumer pipeline (parallel)")
                    logger.info("="*60)
                    logger.info("Starting streaming mode: producers send while consumers consume in parallel")
                    
                    # Execute SQL callback - attach db_executor for batch execution
                    def execute_insert_sql(sql: str):
                        self.db_executor.execute_sql(sql)
                    
                    # Attach db_executor to callback so consumer can use execute_sql_batch
                    execute_insert_sql.__self__ = type('obj', (object,), {'execute_sql_batch': self.db_executor.execute_sql_batch})()
                    # Alternative: attach directly
                    execute_insert_sql.db_executor = self.db_executor
                    
                    # Start streaming producer-consumer pipeline for all tables in parallel
                    # Use a shared error flag to detect errors immediately
                    import threading
                    error_detected = threading.Event()
                    error_lock = threading.Lock()
                    
                    def handle_insert_error():
                        """Handle INSERT error: immediately cleanup and rollback."""
                        if error_detected.is_set():
                            return  # Already handling error
                        
                        with error_lock:
                            if error_detected.is_set():
                                return
                            error_detected.set()
                        
                        logger.error("✗ INSERT error detected - immediately stopping and cleaning up...")
                        results["inserts_completed"] = False
                        
                        # Immediately cleanup Kafka resources
                        try:
                            logger.info("Cleaning up Kafka resources immediately...")
                            self._cleanup_kafka_resources()
                            logger.info("✓ Kafka resources cleaned up")
                        except Exception as kafka_err:
                            logger.warning(f"Error cleaning up Kafka: {kafka_err}")
                        
                        # Rollback to checkpoint if checkpoint exists (regardless of transaction mode)
                        if checkpoint:
                            logger.error("✗ INSERT stage failed - rolling back to checkpoint")
                            try:
                                self._rollback_to_checkpoint(checkpoint, ddl_sql)
                            except Exception as rollback_err:
                                logger.error(f"Error during rollback to checkpoint: {rollback_err}", exc_info=True)
                    
                    # Run pipeline with error detection
                    try:
                        producer_stats, consumer_stats = self._run_streaming_pipeline(
                            table_inserts=table_inserts,
                            execute_sql=execute_insert_sql,
                            error_event=error_detected,
                            on_error=handle_insert_error,
                        )
                    except Exception as pipeline_err:
                        # Pipeline error - trigger cleanup
                        handle_insert_error()
                        raise RuntimeError(f"INSERT pipeline failed: {pipeline_err}")
                    
                    results["producer_stats"] = producer_stats
                    results["consumer_stats"] = {
                        name: {
                            "batches_processed": stats.batches_processed,
                            "rows_inserted": stats.rows_inserted,
                            "errors": stats.errors,
                            "duration": stats.duration,
                        }
                        for name, stats in consumer_stats.items()
                    }
                    
                    # Check for errors after pipeline completes
                    if error_detected.is_set() or any(v.get("errors", 0) for v in results["consumer_stats"].values()):
                        # Ensure rollback happens even if handle_insert_error wasn't called
                        if checkpoint:
                            logger.error("✗ INSERT stage failed - rolling back to checkpoint")
                            try:
                                self._rollback_to_checkpoint(checkpoint, ddl_sql)
                            except Exception as rollback_err:
                                logger.error(f"Error during rollback to checkpoint: {rollback_err}", exc_info=True)
                        raise RuntimeError("Consumer errors detected, aborting before constraints")
                    
                    results["inserts_sent"] = True
                    results["inserts_completed"] = True
                    logger.info("✓ All INSERT statements executed")
            
            # Calculate total INSERT execution time (from DDL end to constraints start)
            insert_end_time = time.time()
            results["insert_exec_time"] = insert_end_time - insert_start_time
            
            # Skip post-insert count validations; proceed directly

            # Clean up Kafka resources immediately after INSERT stage completes
            # This frees up resources for the constraints/index creation stage
            # Only clean up if we actually used Kafka (created topics)
            producer_stats = results.get("producer_stats", {})
            if results.get("inserts_completed") and producer_stats:
                logger.info("\n" + "="*60)
                logger.info("Cleaning up Kafka resources (INSERT stage complete)")
                logger.info("="*60)
                try:
                    self._cleanup_kafka_resources()
                    logger.info("✓ Kafka resources cleaned up")
                    
                    # Force garbage collection to free up memory before constraints
                    import gc
                    collected = gc.collect()
                    logger.debug(f"Garbage collected {collected} objects to free memory")
                except Exception as e:
                    logger.warning(f"Error cleaning up Kafka resources: {e}")
            
            # Stage 4: Execute constraints and indexes
            # Reduce connection pool size before constraints to free up memory for index creation
            # Index creation is memory-intensive and needs more resources
            if results.get("inserts_completed"):
                logger.info("Reducing database connection pool to free memory for index creation...")
                original_min = self.db_executor.min_connections
                original_max = self.db_executor.max_connections
                # Reduce to minimal connections needed (index creation typically uses fewer connections)
                reduced_min = min(original_min, 2)
                reduced_max = min(original_max, 5)  # Much smaller pool for constraints
                
                # Close current pool and create new smaller one
                try:
                    if self.db_executor.pool:
                        self.db_executor.pool.closeall()
                    
                    # Recreate with smaller pool using existing connection parameters
                    from psycopg2.pool import ThreadedConnectionPool
                    from .db_executor import parse_connection_string
                    
                    # Parse connection string to get parameters
                    conn_params = parse_connection_string(self.db_executor.connection_string)
                    
                    self.db_executor.pool = ThreadedConnectionPool(
                        reduced_min,
                        reduced_max,
                        host=conn_params['host'],
                        port=conn_params['port'],
                        user=conn_params['user'],
                        password=conn_params['password'],
                        database=conn_params['database'],
                        sslmode=conn_params.get('sslmode', 'disable'),
                    )
                    self.db_executor.min_connections = reduced_min
                    self.db_executor.max_connections = reduced_max
                    logger.info(f"✓ Connection pool reduced from {original_min}-{original_max} to {reduced_min}-{reduced_max}")
                    
                    # Force garbage collection after pool reduction
                    import gc
                    collected = gc.collect()
                    logger.debug(f"Garbage collected {collected} objects after pool reduction")
                except Exception as e:
                    logger.warning(f"Could not reduce connection pool: {e}, continuing with existing pool")
            
            logger.info("\n" + "="*60)
            logger.info("Stage 4: Executing constraints and indexes")
            logger.info("="*60)
            
            constraints_start_time = time.time()
            if not constraints_sql.strip():
                logger.warning("No constraints SQL provided, skipping")
                results["constraints_executed"] = False
                results["constraints_time"] = 0.0
            else:
                # Pre-validate foreign key integrity before adding constraints
                logger.info("Pre-validating foreign key integrity...")
                try:
                    self._validate_foreign_key_integrity(constraints_sql)
                    logger.info("✓ Foreign key integrity validation passed")
                except Exception as e:
                    logger.error(f"✗ Foreign key integrity validation failed: {e}")
                    logger.error("This indicates missing or inconsistent data in the database.")
                    logger.error("Aborting constraints execution to prevent database corruption.")
                    results["constraints_executed"] = False
                    results["constraints_time"] = 0.0
                    # If using transaction mode, rollback transaction
                    if use_transaction and transaction_active and main_conn:
                        try:
                            main_conn.rollback()
                            logger.error("✗ Constraints stage failed - transaction rolled back (DDL rolled back, INSERT data remains)")
                            transaction_active = False
                        except Exception as rollback_err:
                            logger.warning(f"Error during rollback: {rollback_err}")
                    # Rollback to checkpoint if checkpoint exists
                    if checkpoint:
                        logger.error("✗ Constraints stage failed - rolling back to checkpoint")
                        try:
                            self._rollback_to_checkpoint(checkpoint, ddl_sql)
                        except Exception as rollback_err:
                            logger.error(f"Error during rollback to checkpoint: {rollback_err}", exc_info=True)
                    raise
                
                if use_transaction:
                    # Execute Constraints in two phases:
                    # Phase 1: Create all indexes and non-FK constraints, then commit
                    # Phase 2: Create all foreign keys, then commit
                    # This ensures indexes are committed and visible before FK creation
                    constraints_conn = self.db_executor._get_valid_connection()
                    
                    try:
                        # Ensure connection is clean before starting transaction
                        with constraints_conn.cursor() as cleanup_cursor:
                            try:
                                cleanup_cursor.execute("COMMIT")
                            except Exception:
                                try:
                                    cleanup_cursor.execute("ROLLBACK")
                                except Exception:
                                    pass
                        
                        import re
                        import time
                        
                        # Split constraints into indexes/other constraints and foreign keys
                        # Handle DELIMITER blocks specially - they should not be split by semicolon
                        # First, extract all DELIMITER blocks (triggers) from the SQL
                        delimiter_blocks = []
                        remaining_sql = constraints_sql
                        
                        # Find all DELIMITER blocks using regex
                        # Pattern: DELIMITER \\ ... DELIMITER;
                        # In SQL text, "DELIMITER \\" may contain one or two backslashes depending on how it's stored
                        # Try to match both cases: DELIMITER \ and DELIMITER \\
                        # Use non-greedy match to capture the entire block until DELIMITER;
                        delimiter_pattern = r'(DELIMITER\s+\\+[\s\S]*?DELIMITER;)'
                        matches = re.finditer(delimiter_pattern, constraints_sql, re.IGNORECASE | re.MULTILINE)
                        for match in matches:
                            block = match.group(0).strip()
                            delimiter_blocks.append(block)
                            # Remove the DELIMITER block from remaining SQL
                            remaining_sql = remaining_sql.replace(match.group(0), '', 1)
                            logger.debug(f"Extracted DELIMITER block (length: {len(block)}): {block[:50]}...{block[-50:]}")
                        
                        # Now split remaining SQL by semicolon (regular statements)
                        constraint_statements = [s.strip() for s in remaining_sql.split(';') if s.strip()]
                        
                        # Add DELIMITER blocks back to constraint_statements
                        constraint_statements.extend(delimiter_blocks)
                        
                        index_and_other_statements = []
                        foreign_key_statements = []
                        
                        for stmt in constraint_statements:
                            if stmt:
                                # Check if it's a DELIMITER block (trigger)
                                if stmt.upper().startswith('DELIMITER'):
                                    # DELIMITER blocks go to index_and_other_statements
                                    index_and_other_statements.append(stmt)
                                # Check if it's a foreign key constraint
                                elif re.search(r'ALTER\s+TABLE\s+.*\s+ADD\s+CONSTRAINT\s+.*\s+FOREIGN\s+KEY', stmt, re.IGNORECASE):
                                    foreign_key_statements.append(stmt)
                                else:
                                    # Index, CHECK, or other constraints
                                    index_and_other_statements.append(stmt)
                        
                        # Phase 1: Create indexes and other constraints
                        if index_and_other_statements:
                            logger.info(f"Phase 1: Creating {len(index_and_other_statements)} indexes and other constraints...")
                            
                            # Start transaction for indexes/other constraints
                            with constraints_conn.cursor() as begin_cursor:
                                begin_cursor.execute("BEGIN")
                            
                            try:
                                with constraints_conn.cursor() as cursor:
                                    for idx, stmt in enumerate(index_and_other_statements, 1):
                                        if stmt:
                                            # DELIMITER blocks should be executed as-is without adding semicolon
                                            if stmt.upper().startswith('DELIMITER'):
                                                # Log the DELIMITER block being executed (first and last 100 chars)
                                                logger.debug(f"Executing DELIMITER block {idx}/{len(index_and_other_statements)} (length: {len(stmt)})")
                                                logger.debug(f"  First 100 chars: {stmt[:100]}")
                                                logger.debug(f"  Last 100 chars: {stmt[-100:]}")
                                                # Extract the CREATE TRIGGER statement from DELIMITER block
                                                # Remove DELIMITER \\ at the start and DELIMITER; at the end
                                                # Find the CREATE TRIGGER part (from CREATE TRIGGER to END \\)
                                                # In the text, "END \\" contains two backslash characters
                                                # In regex, \\ matches one backslash, so \\\\ matches two backslashes
                                                trigger_match = re.search(r'(CREATE\s+TRIGGER[\s\S]*?END\s+\\\\)', stmt, re.IGNORECASE)
                                                if trigger_match:
                                                    # Extract the trigger statement
                                                    trigger_stmt = trigger_match.group(1).strip()
                                                    # Replace END \\ (two backslashes) with END;
                                                    # In the text, "END \\" is two characters: backslash + backslash
                                                    # In regex replacement, we need to match the literal text
                                                    trigger_stmt = re.sub(r'END\s+\\+', 'END;', trigger_stmt, flags=re.IGNORECASE)
                                                    logger.debug(f"Extracted trigger statement: {trigger_stmt[:100]}...")
                                                    cursor.execute(trigger_stmt)
                                                else:
                                                    # Fallback: try to execute the whole block
                                                    logger.warning(f"Could not extract CREATE TRIGGER from DELIMITER block, executing as-is")
                                                    cursor.execute(stmt)
                                            else:
                                                cursor.execute(stmt + ';')
                                            logger.debug(f"Created constraint/index {idx}/{len(index_and_other_statements)}")
                                
                                # Commit indexes and other constraints
                                constraints_conn.commit()
                                logger.info(f"✓ Phase 1 completed: {len(index_and_other_statements)} indexes/constraints committed")
                                
                                # Small delay to ensure indexes are fully available
                                time.sleep(1.0)
                                
                            except Exception as e:
                                try:
                                    constraints_conn.rollback()
                                    logger.error("✗ Phase 1 failed - transaction rolled back")
                                except Exception:
                                    pass
                                # Generate diagnostic error message
                                try:
                                    from .diagnostics import diagnose_sql_error
                                    # Find the failing statement
                                    failing_stmt = ""
                                    error_str = str(e)
                                    # Try to find which statement failed
                                    for stmt in index_and_other_statements:
                                        if stmt and (stmt[:50] in error_str or any(word in error_str for word in stmt.split()[:3])):
                                            failing_stmt = stmt
                                            break
                                    if not failing_stmt and index_and_other_statements:
                                        failing_stmt = index_and_other_statements[-1]  # Use last statement as fallback
                                    diag = diagnose_sql_error(failing_stmt, error_str, phase='CONSTRAINTS')
                                    # Collect diagnostic error
                                    if not hasattr(self.db_executor, '_diagnostic_errors'):
                                        self.db_executor._diagnostic_errors = []
                                    self.db_executor._diagnostic_errors.append(diag)
                                    # Still log a brief error message
                                    logger.error(f"Phase 1 (indexes/constraints) execution failed: {e}")
                                except Exception as diag_err:
                                    # If diagnosis fails, just use original error
                                    logger.warning(f"Failed to generate diagnostic: {diag_err}")
                                
                                # Rollback to checkpoint if checkpoint exists
                                if checkpoint:
                                    logger.error("✗ Constraints Phase 1 failed - rolling back to checkpoint")
                                    try:
                                        self._rollback_to_checkpoint(checkpoint, ddl_sql)
                                    except Exception as rollback_err:
                                        logger.error(f"Error during rollback to checkpoint: {rollback_err}", exc_info=True)
                                
                                raise RuntimeError(f"Phase 1 (indexes/constraints) failed: {e}")
                        
                        # Phase 2: Create foreign keys
                        if foreign_key_statements:
                            logger.info(f"Phase 2: Creating {len(foreign_key_statements)} foreign key constraints...")
                            
                            # Clean up connection state before starting new transaction
                            # Phase 1 committed, but connection might still be in transaction state
                            with constraints_conn.cursor() as cleanup_cursor:
                                try:
                                    cleanup_cursor.execute("COMMIT")
                                except Exception:
                                    try:
                                        cleanup_cursor.execute("ROLLBACK")
                                    except Exception:
                                        pass
                            
                            # Start a new transaction for foreign keys
                            with constraints_conn.cursor() as begin_cursor:
                                begin_cursor.execute("BEGIN")
                            
                            try:
                                with constraints_conn.cursor() as cursor:
                                    for idx, stmt in enumerate(foreign_key_statements, 1):
                                        if stmt:
                                            cursor.execute(stmt + ';')
                                            logger.debug(f"Created foreign key {idx}/{len(foreign_key_statements)}")
                                
                                # Commit foreign keys
                                constraints_conn.commit()
                                logger.info(f"✓ Phase 2 completed: {len(foreign_key_statements)} foreign keys committed")
                                
                            except Exception as e:
                                try:
                                    constraints_conn.rollback()
                                    logger.error("✗ Phase 2 failed - transaction rolled back (indexes/constraints remain)")
                                except Exception:
                                    pass
                                # Generate diagnostic error message
                                try:
                                    from .diagnostics import diagnose_sql_error
                                    # Find the failing statement
                                    failing_stmt = ""
                                    error_str = str(e)
                                    # Try to find which statement failed
                                    for stmt in foreign_key_statements:
                                        if stmt and (stmt[:50] in error_str or any(word in error_str for word in stmt.split()[:3])):
                                            failing_stmt = stmt
                                            break
                                    if not failing_stmt and foreign_key_statements:
                                        failing_stmt = foreign_key_statements[-1]  # Use last statement as fallback
                                    diag = diagnose_sql_error(failing_stmt, error_str, phase='CONSTRAINTS')
                                    # Collect diagnostic error
                                    if not hasattr(self.db_executor, '_diagnostic_errors'):
                                        self.db_executor._diagnostic_errors = []
                                    self.db_executor._diagnostic_errors.append(diag)
                                    # Still log a brief error message
                                    logger.error(f"Phase 2 (foreign keys) execution failed: {e}")
                                except Exception as diag_err:
                                    # If diagnosis fails, just use original error
                                    logger.warning(f"Failed to generate diagnostic: {diag_err}")
                                
                                # Rollback to checkpoint if checkpoint exists
                                if checkpoint:
                                    logger.error("✗ Constraints Phase 2 failed - rolling back to checkpoint")
                                    try:
                                        self._rollback_to_checkpoint(checkpoint, ddl_sql)
                                    except Exception as rollback_err:
                                        logger.error(f"Error during rollback to checkpoint: {rollback_err}", exc_info=True)
                                
                                raise RuntimeError(f"Phase 2 (foreign keys) failed: {e}")
                        
                        logger.info("✓ All constraints executed and committed successfully")
                        
                    except Exception as setup_err:
                        logger.error(f"Failed to execute constraints: {setup_err}")
                        # Rollback to checkpoint if checkpoint exists
                        if checkpoint:
                            logger.error("✗ Constraints stage failed - rolling back to checkpoint")
                            try:
                                self._rollback_to_checkpoint(checkpoint, ddl_sql)
                            except Exception as rollback_err:
                                logger.error(f"Error during rollback to checkpoint: {rollback_err}", exc_info=True)
                        raise RuntimeError(f"Constraints stage failed: {setup_err}")
                    finally:
                        # Return the constraints connection to pool
                        try:
                            self.db_executor.pool.putconn(constraints_conn)
                        except Exception:
                            try:
                                constraints_conn.close()
                            except Exception:
                                pass
                else:
                    # Non-transactional mode (original behavior)
                    self.db_executor.execute_constraints(constraints_sql)
                
                results["constraints_executed"] = True
                results["constraints_time"] = time.time() - constraints_start_time
                logger.info(f"✓ Constraints execution completed in {results['constraints_time']:.2f}s")
            # Stage 5: Extended validation based on CLI options (if available via env)
            try:
                # Pull options from environment set by CLI (if present)
                import os
                validate_opts = os.environ.get('KWDB_MIGRATE_VALIDATE')  # e.g., counts,structure
                report_flag = os.environ.get('KWDB_MIGRATE_REPORT')
                if validate_opts:
                    from .validation import (
                        validate_counts, validate_structure, render_report_text, ValidationReport,
                        parse_mysql_source, validate_structure_against_source, validate_counts_against_source,
                    )
                    # Try to load source metadata for source-vs-target validation
                    source_meta = None
                    try:
                        import os
                        src_path = os.environ.get('KWDB_MIGRATE_SOURCE_SQL_PATH')
                        if src_path and os.path.exists(src_path):
                            # Try to parse as MySQL file (using streaming to avoid loading entire file into memory)
                            chunks = []
                            with open(src_path, 'r', encoding='utf-8', errors='ignore') as f:
                                chunk_size = 1024 * 1024  # 1MB chunks
                                while True:
                                    chunk = f.read(chunk_size)
                                    if not chunk:
                                        break
                                    chunks.append(chunk)
                            source_sql_text = ''.join(chunks)
                            source_meta = parse_mysql_source(source_sql_text)
                        else:
                            # Try to parse from KWDB SQL files (if available via environment)
                            # These are set when using --ddl-file, --insert-file, --constraints-file
                            from .validation import parse_kwdb_source
                            ddl_path = os.environ.get('KWDB_MIGRATE_DDL_FILE')
                            insert_path = os.environ.get('KWDB_MIGRATE_INSERT_FILE')
                            constraints_path = os.environ.get('KWDB_MIGRATE_CONSTRAINTS_FILE')
                            if ddl_path and insert_path and os.path.exists(ddl_path) and os.path.exists(insert_path):
                                # Read files using streaming to avoid loading entire files into memory
                                def read_file_streaming(file_path: str, errors: str = 'strict') -> str:
                                    """Read file in chunks to avoid loading entire file into memory."""
                                    chunks = []
                                    with open(file_path, 'r', encoding='utf-8', errors=errors) as f:
                                        chunk_size = 1024 * 1024  # 1MB chunks
                                        while True:
                                            chunk = f.read(chunk_size)
                                            if not chunk:
                                                break
                                            chunks.append(chunk)
                                    return ''.join(chunks)
                                
                                ddl_sql = read_file_streaming(ddl_path, errors='ignore')
                                insert_sql = read_file_streaming(insert_path, errors='ignore')
                                constraints_sql = ""
                                if constraints_path and os.path.exists(constraints_path):
                                    constraints_sql = read_file_streaming(constraints_path, errors='ignore')
                                source_meta = parse_kwdb_source(ddl_sql, insert_sql, constraints_sql)
                    except Exception as e:
                        logger.debug(f"Failed to parse source metadata: {e}")
                        source_meta = None
                    reports = []
                    # Always run counts + structure when validation is enabled
                    expected = {}
                    if source_meta is not None:
                        reports.append(validate_counts_against_source(self.db_executor, source_meta))
                        reports.append(validate_structure_against_source(self.db_executor, source_meta))
                    else:
                        # Without source dump, counts cannot be computed; run basic structure only
                        reports.append(validate_structure(self.db_executor))
                    # Fail pipeline if any report failed
                    if any(not r.ok for r in reports):
                        results['errors'].append('Validation failed')
                        # Write TXT report if requested
                        try:
                            conv_env = os.environ.get('KWDB_MIGRATE_CONVERSION_TIME')
                            conversion_time = float(conv_env) if conv_env else None
                            total_time = (
                                (conversion_time or 0.0)
                                + float(results.get('ddl_time', 0.0) or 0.0)
                                + float(results.get('insert_exec_time', 0.0) or 0.0)
                                + float(results.get('constraints_time', 0.0) or 0.0)
                            )
                            timings = {
                                'ddl_time': results.get('ddl_time', 0.0),
                                'insert_exec_time': results.get('insert_exec_time', 0.0),
                                'constraints_time': results.get('constraints_time', 0.0),
                                'total_time': total_time,
                            }
                            if conversion_time is not None:
                                timings['conversion_time'] = conversion_time
                            txt = render_report_text(reports, timings)
                            if report_flag:
                                out_dir = os.path.join(os.getcwd(), 'Migration_Report')
                                os.makedirs(out_dir, exist_ok=True)
                                try:
                                    from .db_executor import parse_connection_string
                                    db_name = parse_connection_string(self.db_executor.connection_string).get('database', 'database')
                                except Exception:
                                    db_name = 'database'
                                import time as _t
                                ts = _t.strftime('%Y%m%d_%H%M%S', _t.localtime())
                                fname = f"{db_name}_{ts}.txt"
                                # Use streaming write (though report files are typically small, this ensures consistency)
                                with open(os.path.join(out_dir, fname), 'w', encoding='utf-8') as f:
                                    # For small files, write directly is fine, but we use chunked write for consistency
                                    chunk_size = 1024 * 1024  # 1MB chunks
                                    for i in range(0, len(txt), chunk_size):
                                        f.write(txt[i:i + chunk_size])
                        except Exception:
                            pass
                        raise RuntimeError('Validation failed; see logs/report')
                    # Write TXT report on success if requested
                    try:
                        conv_env = os.environ.get('KWDB_MIGRATE_CONVERSION_TIME')
                        conversion_time = float(conv_env) if conv_env else None
                        total_time = (
                            (conversion_time or 0.0)
                            + float(results.get('ddl_time', 0.0) or 0.0)
                            + float(results.get('insert_exec_time', 0.0) or 0.0)
                            + float(results.get('constraints_time', 0.0) or 0.0)
                        )
                        timings = {
                            'ddl_time': results.get('ddl_time', 0.0),
                            'insert_exec_time': results.get('insert_exec_time', 0.0),
                            'constraints_time': results.get('constraints_time', 0.0),
                            'total_time': total_time,
                        }
                        if conversion_time is not None:
                            timings['conversion_time'] = conversion_time
                        txt = render_report_text(reports, timings)
                        if report_flag:
                            out_dir = os.path.join(os.getcwd(), 'Migration_Report')
                            os.makedirs(out_dir, exist_ok=True)
                            try:
                                from .db_executor import parse_connection_string
                                db_name = parse_connection_string(self.db_executor.connection_string).get('database', 'database')
                            except Exception:
                                db_name = 'database'
                            import time as _t
                            ts = _t.strftime('%Y%m%d_%H%M%S', _t.localtime())
                            fname = f"{db_name}_{ts}.txt"
                            # Use streaming write (though report files are typically small, this ensures consistency)
                            with open(os.path.join(out_dir, fname), 'w', encoding='utf-8') as f:
                                # For small files, write directly is fine, but we use chunked write for consistency
                                chunk_size = 1024 * 1024  # 1MB chunks
                                for i in range(0, len(txt), chunk_size):
                                    f.write(txt[i:i + chunk_size])
                            logger.info(f"Validation TXT report written to {out_dir}/{fname}")
                    except Exception as we:
                        logger.warning(f"Could not write TXT report: {we}")
            except Exception as e:
                logger.warning(f"Validation stage error: {e}")
            
            logger.info("\n" + "="*60)
            logger.info("Migration completed successfully!")
            logger.info("="*60)
            
        except (KeyboardInterrupt, SystemExit):
            # Handle manual interruption
            logger.warning("Migration interrupted by user")
            results["errors"].append("Migration interrupted by user")
            # Rollback transaction if active
            if use_transaction and transaction_active and main_conn:
                try:
                    main_conn.rollback()
                    transaction_active = False
                except Exception:
                    pass
            raise
        except Exception as e:
            logger.error(f"Migration failed: {e}", exc_info=True)
            results["errors"].append(str(e))
            # Rollback transaction if active and not already handled
            if use_transaction and transaction_active and main_conn:
                try:
                    main_conn.rollback()
                    transaction_active = False
                except Exception:
                    pass
            # Output diagnostic errors before cleanup
            if hasattr(self.db_executor, '_diagnostic_errors') and self.db_executor._diagnostic_errors:
                logger.info("\n" + "="*60)
                logger.info("Diagnostic Errors Summary")
                logger.info("="*60)
                for idx, diag in enumerate(self.db_executor._diagnostic_errors, 1):
                    logger.error(f"\n[{idx}] {diag}")
                logger.info("="*60 + "\n")
                # Clear diagnostic errors after output
                self.db_executor._diagnostic_errors = []
            # Don't raise here - let cleanup happen in finally
        finally:
            # Ensure transaction connection is properly closed
            if main_conn and transaction_active:
                try:
                    main_conn.rollback()
                    logger.warning("Transaction was still active in finally block - rolled back")
                except Exception:
                    pass
            if main_conn:
                try:
                    # Connection was used with explicit BEGIN, so we don't need to reset autocommit
                    # Just return it to pool
                    self.db_executor.pool.putconn(main_conn)
                except Exception:
                    try:
                        main_conn.close()
                    except Exception:
                        pass
            # Only cleanup resources if not already done (Kafka resources cleaned after INSERT stage)
            # This ensures DB connection is closed even if there was an error
            if self._cleanup_needed:
                try:
                    # If Kafka already cleaned, just close DB connection
                    # Otherwise do full cleanup (including Kafka)
                    if self._kafka_cleaned:
                        # Kafka already cleaned, just close DB
                        self._close_internal()
                    else:
                        # Full cleanup (including Kafka)
                        self._cleanup_resources()
                except Exception as e:
                    logger.warning(f"Error during final cleanup: {e}")
                finally:
                    self._cleanup_needed = False
        
        # Output all diagnostic errors at the end
        if hasattr(self.db_executor, '_diagnostic_errors') and self.db_executor._diagnostic_errors:
            logger.info("\n" + "="*60)
            logger.info("Diagnostic Errors Summary")
            logger.info("="*60)
            for idx, diag in enumerate(self.db_executor._diagnostic_errors, 1):
                logger.error(f"\n[{idx}] {diag}")
            logger.info("="*60 + "\n")
            # Clear diagnostic errors after output
            self.db_executor._diagnostic_errors = []
        
        return results
    
    def _close_internal(self):
        """Internal method to close connections."""
        try:
            if self.db_executor:
                self.db_executor.close()
        except Exception as e:
            logger.warning(f"Error closing db_executor: {e}")
        
        try:
            if self.producer:
                self.producer.close()
        except Exception as e:
            logger.warning(f"Error closing producer: {e}")
        
        # Consumer doesn't need explicit close (handled per table)
    
    def close(self):
        """Close all connections."""
        if not self._closed:
            self._close_internal()
            self._closed = True
    
    def cleanup_topics(self):
        """Manually cleanup Kafka topics (can be called externally)."""
        if self.producer:
            self.producer.cleanup_topics()
    
    def _run_streaming_pipeline(
        self,
        table_inserts: Dict[str, List[str]],
        execute_sql,
        error_event=None,
        on_error=None,
    ) -> tuple[Dict[str, int], Dict[str, any]]:
        """
        Run streaming producer-consumer pipeline: each table has a producer thread
        that sends messages while a consumer thread consumes them in parallel.
        
        Args:
            table_inserts: Dictionary mapping table names to their INSERT statements
            execute_sql: Function to execute SQL statements
            
        Returns:
            Tuple of (producer_stats, consumer_stats)
        """
        table_names = list(table_inserts.keys())
        max_workers = self.max_parallel_consumers or min(len(table_names), 10)
        
        producer_stats: Dict[str, int] = {}
        consumer_stats: Dict[str, any] = {}
        
        # Function to run producer-consumer pipeline for a single table
        def run_table_pipeline(table_name: str, insert_statements: List[str]):
            """Run producer and multiple consumers for a single table in parallel threads."""
            import threading
            
            # Start consumers first (they will wait for messages)
            # Use multiple consumers per table for better parallelism
            consumer_threads = []
            consumer_results = []
            
            def run_consumer(consumer_id: int):
                """Consumer thread: consume and execute messages."""
                consumer_stats = None
                try:
                    # Check if error already detected before starting
                    if error_event and error_event.is_set():
                        logger.warning(f"Consumer {consumer_id} for table '{table_name}' cancelled due to error")
                        from .kafka_consumer import ConsumerStats
                        consumer_stats = ConsumerStats(
                            table_name=table_name,
                            errors=1,
                            end_time=time.time(),
                        )
                        consumer_results.append(consumer_stats)
                        return
                    
                    # Create a unique consumer group per consumer instance for this table
                    # This allows multiple consumers to consume from different partitions
                    stats = self.consumer.process_table_messages(
                        table_name=table_name,
                        execute_sql=execute_sql,
                        max_retries=self.max_retries,
                        batch_commit_size=10,
                        error_event=error_event,  # Pass error event to consumer
                    )
                    consumer_stats = stats
                    
                    # Check for errors after processing
                    if consumer_stats and consumer_stats.errors > 0:
                        logger.error(f"Consumer {consumer_id} for table '{table_name}' reported {consumer_stats.errors} errors")
                        if error_event and on_error:
                            on_error()  # Trigger immediate cleanup
                except Exception as e:
                    logger.error(f"Consumer {consumer_id} error for table '{table_name}': {e}")
                    from .kafka_consumer import ConsumerStats
                    consumer_stats = ConsumerStats(
                        table_name=table_name,
                        errors=1,
                        end_time=time.time(),
                    )
                    if error_event and on_error:
                        on_error()  # Trigger immediate cleanup
                finally:
                    consumer_results.append(consumer_stats)
            
            # Start multiple consumer threads if configured, based on precomputed partitions
            desired_partitions = 1
            # Read precomputed partitions mapping from controller if available
            try:
                mapping = getattr(self, "_table_to_partitions", None)
                if isinstance(mapping, dict):
                    desired_partitions = max(1, int(mapping.get(table_name, 1)))
                else:
                    desired_partitions = 1
            except Exception:
                desired_partitions = 1
            num_consumers = desired_partitions
            if num_consumers > 1:
                logger.info(f"Starting {num_consumers} consumers for table '{table_name}' (partitions: {desired_partitions})")
            else:
                logger.info(f"Starting 1 consumer for table '{table_name}' (partitions: {desired_partitions})")
            
            for i in range(num_consumers):
                consumer_thread = threading.Thread(
                    target=run_consumer,
                    args=(i,),
                    daemon=False,
                    name=f"Consumer-{table_name}-{i}"
                )
                consumer_thread.start()
                consumer_threads.append(consumer_thread)
            
            # Wait a bit for consumers to initialize
            time.sleep(0.5)
            
            # Run producer (main thread)
            try:
                sent_count = self.producer.send_table_messages_async(
                    table_name=table_name,
                    insert_statements=insert_statements,
                    wait_for_send=False,  # Async mode for better throughput
                    create_topic=False,  # Topics already created
                )
                logger.info(f"✓ Producer completed for table '{table_name}': {sent_count} batches sent")
            except Exception as e:
                logger.error(f"Producer error for table '{table_name}': {e}")
                sent_count = 0
            
            # Wait for all consumers to finish
            for consumer_thread in consumer_threads:
                consumer_thread.join(timeout=4 * 60)  # 5分钟 timeout 
            
            # Aggregate consumer stats from all consumers
            if consumer_results:
                from .kafka_consumer import ConsumerStats
                # Combine stats from all consumers
                total_batches = sum(cs.batches_processed for cs in consumer_results if cs)
                total_rows = sum(cs.rows_inserted for cs in consumer_results if cs)
                total_errors = sum(cs.errors for cs in consumer_results if cs)
                # Use the longest duration
                durations = [cs.duration for cs in consumer_results if cs]
                max_duration = max(durations) if durations else 0.0
                
                aggregated_stats = ConsumerStats(
                    table_name=table_name,
                    batches_processed=total_batches,
                    rows_inserted=total_rows,
                    errors=total_errors,
                    start_time=min((cs.start_time for cs in consumer_results if cs and cs.start_time), default=None),
                    end_time=max((cs.end_time for cs in consumer_results if cs and cs.end_time), default=None),
                )
            else:
                logger.error(f"No consumers completed for table '{table_name}'")
                from .kafka_consumer import ConsumerStats
                aggregated_stats = ConsumerStats(
                    table_name=table_name,
                    errors=1,
                    end_time=time.time(),
                )
            
            return table_name, sent_count, aggregated_stats
        
        # Run all tables in parallel using ThreadPoolExecutor
        # Prioritize heavy tables first (more statements) so large topics start earlier
        logger.info(f"Starting streaming pipeline for {len(table_names)} tables with {max_workers} workers")
        
        # Sort tables by descending number of statements to prioritize heavy tables
        ordered_tables = sorted(
            table_inserts.items(),
            key=lambda kv: len(kv[1]),
            reverse=True,
        )
        logger.debug(
            "Producer start order (heavy first): " +
            ", ".join(f"{t}({len(stmts)})" for t, stmts in ordered_tables)
        )
        
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            futures = {
                executor.submit(run_table_pipeline, table_name, insert_statements): table_name
                for table_name, insert_statements in ordered_tables
            }
            
            for future in as_completed(futures):
                # Check if error detected, cancel remaining futures
                if error_event and error_event.is_set():
                    logger.warning("Error detected, cancelling remaining table pipelines...")
                    for remaining_future in futures:
                        if not remaining_future.done():
                            remaining_future.cancel()
                    break
                
                table_name = futures[future]
                try:
                    result_table_name, prod_count, cons_stats = future.result()
                    producer_stats[result_table_name] = prod_count
                    consumer_stats[result_table_name] = cons_stats
                    
                    # Check for errors in completed table
                    if cons_stats and cons_stats.errors > 0:
                        logger.error(f"Table '{result_table_name}' has {cons_stats.errors} errors")
                        if error_event and on_error:
                            on_error()  # Trigger immediate cleanup
                            break
                except Exception as e:
                    logger.error(f"Error in streaming pipeline for table '{table_name}': {e}")
                    producer_stats[table_name] = 0
                    from .kafka_consumer import ConsumerStats
                    consumer_stats[table_name] = ConsumerStats(
                        table_name=table_name,
                        errors=1,
                        end_time=time.time(),
                    )
                    if error_event and on_error:
                        on_error()  # Trigger immediate cleanup
                        break
        
        # Flush producer to ensure all messages are sent (blocking to ensure completion)
        flush_duration = self.producer.flush_producer(timeout=60.0, blocking=True)  # Blocking: wait for completion
        if flush_duration > 0:
            logger.info(f"Final flush completed in {flush_duration:.2f}s")
            
            # Log producer completion statistics
            total_statements = sum(producer_stats.values()) if producer_stats else 0
            logger.info("=" * 60)
            logger.info("✓ Producer completed: All data sent to Kafka")
            logger.info(f"  Total statements sent: {total_statements}")
            logger.info(f"  Flush duration: {flush_duration:.2f}s")
            if producer_stats:
                logger.info("  Per-table statistics:")
                for table_name in sorted(producer_stats.keys()):
                    statements = producer_stats.get(table_name, 0)
                    logger.info(f"    - {table_name}: {statements} statements")
            logger.info("=" * 60)
        
        return producer_stats, consumer_stats
    
    def _validate_foreign_key_integrity(self, constraints_sql: str) -> None:
        """
        Validate foreign key integrity before adding constraints.
        Checks if all foreign key references exist in the referenced tables.
        
        Args:
            constraints_sql: Constraints SQL statements
            
        Raises:
            RuntimeError: If foreign key violations are found
        """
        import re
        
        # Parse foreign key constraints from SQL
        fk_pattern = r'ALTER\s+TABLE\s+["\']?(\w+)["\']?\s+ADD\s+CONSTRAINT\s+\w+\s+FOREIGN\s+KEY\s+\(([^)]+)\)\s+REFERENCES\s+["\']?(\w+)["\']?\s+\(([^)]+)\)'
        
        violations = []
        
        for match in re.finditer(fk_pattern, constraints_sql, re.IGNORECASE):
            child_table = match.group(1)
            child_columns = [col.strip().strip('"\'') for col in match.group(2).split(',')]
            parent_table = match.group(3)
            parent_columns = [col.strip().strip('"\'') for col in match.group(4).split(',')]
            
            if len(child_columns) != len(parent_columns):
                logger.warning(f"Skipping FK validation for {child_table}->{parent_table}: column count mismatch")
                continue
            
            # Build validation query
            # Check for orphaned foreign keys (child references that don't exist in parent)
            child_cols_quoted = ', '.join(f'"{col}"' for col in child_columns)
            parent_cols_quoted = ', '.join(f'"{col}"' for col in parent_columns)
            
            # For multi-column foreign keys, we need to check all combinations
            join_conditions = ' AND '.join(
                f'child."{child_columns[i]}" = parent."{parent_columns[i]}"'
                for i in range(len(child_columns))
            )
            
            # For single column FK, use the column directly; for multi-column, use concatenation
            if len(child_columns) == 1:
                value_expr = f'child."{child_columns[0]}"::text'
            else:
                value_expr = "CONCAT_WS('|', " + ', '.join(f'child."{col}"::text' for col in child_columns) + ")"
            
            validation_query = f'''
                SELECT COUNT(*) as orphan_count
                FROM "{child_table}" child
                WHERE NOT EXISTS (
                    SELECT 1 FROM "{parent_table}" parent
                    WHERE {join_conditions}
                )
            '''
            
            try:
                conn = self.db_executor.pool.getconn()
                try:
                    with conn.cursor() as cur:
                        cur.execute(validation_query)
                        result = cur.fetchone()
                        orphan_count = result[0] if result else 0
                        
                        if orphan_count > 0:
                            violations.append(
                                f'Table "{child_table}" has {orphan_count} orphaned foreign key(s) '
                                f'referencing "{parent_table}".'
                            )
                finally:
                    self.db_executor.pool.putconn(conn)
            except Exception as e:
                logger.warning(f"Error validating FK {child_table}->{parent_table}: {e}")
                # Don't fail on validation errors, just log them
        
        if violations:
            error_msg = "\n".join(f"  - {v}" for v in violations)
            raise RuntimeError(f"Foreign key integrity violations found:\n{error_msg}")

    def _ensure_kafka_available(self, retries: int = 3, delay_sec: float = 1.0) -> None:
        """
        Best-effort check that Kafka is reachable before heavy processing.
        Raises RuntimeError if brokers are not available.
        """
        last_err = None
        for _ in range(max(1, retries)):
            try:
                # Use admin client to list topics as a lightweight health check
                if self.producer and getattr(self.producer, 'admin_client', None):
                    # kafka-python AdminClient may raise if no brokers available
                    # list_topics() in kafka-python doesn't accept a timeout kwarg
                    _ = self.producer.admin_client.list_topics()
                    return
            except Exception as e:
                last_err = e
                import time as _t
                _t.sleep(max(0.1, delay_sec))
        raise RuntimeError(
            "No Kafka brokers available at configured servers. "
            "Please ensure the broker is running and advertised.listeners points to this host:port."
            + (f" Last error: {last_err}" if last_err else "")
        )
    
    def _send_inserts_from_file_streaming(
        self, 
        insert_file_path: Path, 
        results: Dict[str, any],
        checkpoint: Optional[Dict[str, set]] = None,
        use_transaction: bool = False,
        ddl_sql: str = ""
    ) -> None:
        """
        Stream INSERT statements from file and send to Kafka without loading entire file into memory.
        
        Args:
            insert_file_path: Path to INSERT SQL file
            results: Results dictionary to update
            checkpoint: Optional checkpoint for rollback
            use_transaction: Whether using transaction mode
            ddl_sql: DDL SQL for rollback context
        """
        from collections import defaultdict
        
        # First pass: stream through file to count statements per table and create topics
        logger.info("First pass: Counting INSERT statements per table...")
        table_counts: Dict[str, int] = defaultdict(int)
        canon_map: Dict[str, str] = {}  # lowercase -> canonical case
        
        chunk_size = 1024 * 1024  # 1MB chunks
        buffer = ""
        
        with open(insert_file_path, 'r', encoding='utf-8') as f:
            while True:
                chunk = f.read(chunk_size)
                if not chunk:
                    break
                
                buffer += chunk
                
                # Process complete statements from buffer
                while True:
                    end_pos = buffer.find(');')
                    if end_pos == -1:
                        # No complete statement, break to read more
                        if len(buffer) > 10 * 1024 * 1024:  # 10MB buffer limit
                            # Find last safe split
                            last_split = buffer.rfind(');')
                            if last_split > 0:
                                buffer = buffer[last_split + 2:]
                            else:
                                buffer = ""  # Clear buffer if too large
                        break
                    
                    # Found end of statement
                    stmt = buffer[:end_pos + 2].strip()
                    buffer = buffer[end_pos + 2:]
                    
                    if stmt and stmt.upper().startswith('INSERT'):
                        # Extract table name
                        table_name = self.producer._parse_table_name(stmt)
                        if table_name:
                            key = table_name.lower()
                            if key not in canon_map:
                                canon_map[key] = table_name
                            table_counts[canon_map[key]] += 1
        
        # Process remaining buffer
        if buffer.strip():
            stmt = buffer.strip()
            if stmt.upper().startswith('INSERT'):
                if not stmt.endswith(');'):
                    stmt += ');'
                table_name = self.producer._parse_table_name(stmt)
                if table_name:
                    key = table_name.lower()
                    if key not in canon_map:
                        canon_map[key] = table_name
                    table_counts[canon_map[key]] += 1
        
        if not table_counts:
            logger.warning("No INSERT statements found in file")
            results["inserts_sent"] = True
            results["inserts_completed"] = True
            return
        
        logger.info(f"Found {sum(table_counts.values())} INSERT statements in {len(table_counts)} tables")
        for table, count in table_counts.items():
            logger.info(f"  Table '{table}': {count} statements")
        
        # Create topics based on counts
        MAX_PARTITIONS = 6
        table_to_partitions: Dict[str, int] = {}
        for table, count in table_counts.items():
            desired_partitions = (count + 49) // 50 if count > 0 else 1
            desired_partitions = min(max(desired_partitions, 1), MAX_PARTITIONS)
            table_to_partitions[table] = desired_partitions
            logger.info(f"Planned topic: {self.producer.topic_prefix}_{table} partitions={desired_partitions} (messages={count})")
        
        # Bulk create topics
        try:
            self.producer.create_kafka_topics_bulk(table_to_partitions)
        except Exception as e:
            logger.warning(f"Bulk topic creation had issues: {e}")
        import time as _t
        _t.sleep(2)
        
        # Auto-scale DB connection pool
        try:
            total_consumers_planned = sum(max(1, int(p or 1)) for p in table_to_partitions.values())
            desired_max_pool = max(self.db_executor.max_connections, int(total_consumers_planned * 1.2) + 2)
            desired_min_pool = max(self.db_executor.min_connections, min(2, desired_max_pool))
            if desired_max_pool > self.db_executor.max_connections:
                logger.info(
                    f"Scaling DB connection pool for INSERT stage: "
                    f"planned_consumers={total_consumers_planned}, "
                    f"old_pool={self.db_executor.min_connections}-{self.db_executor.max_connections} -> "
                    f"new_pool={desired_min_pool}-{desired_max_pool}"
                )
                if self.db_executor.pool:
                    self.db_executor.pool.closeall()
                from psycopg2.pool import ThreadedConnectionPool
                from .db_executor import parse_connection_string
                conn_params = parse_connection_string(self.db_executor.connection_string)
                self.db_executor.pool = ThreadedConnectionPool(
                    desired_min_pool,
                    desired_max_pool,
                    host=conn_params['host'],
                    port=conn_params['port'],
                    user=conn_params['user'],
                    password=conn_params['password'],
                    database=conn_params['database'],
                    sslmode=conn_params.get('sslmode', 'disable'),
                )
                self.db_executor.min_connections = desired_min_pool
                self.db_executor.max_connections = desired_max_pool
                logger.info("✓ Connection pool scaled up for INSERT stage")
        except Exception as e:
            logger.warning(f"Failed to auto-scale DB connection pool: {e}")
        
        # Store partition mapping for consumer threads
        self._table_to_partitions = table_to_partitions
        
        # Stage 3: Parallel producer-consumer pipeline for each table (same structure as memory mode)
        logger.info("\n" + "="*60)
        logger.info("Stage 3: Streaming producer-consumer pipeline (parallel)")
        logger.info("="*60)
        logger.info("Starting streaming mode: producers send while consumers consume in parallel")
        
        # Execute SQL callback - attach db_executor for batch execution
        def execute_insert_sql(sql: str):
            self.db_executor.execute_sql(sql)
        
        # Attach db_executor to callback so consumer can use execute_sql_batch
        execute_insert_sql.__self__ = type('obj', (object,), {'execute_sql_batch': self.db_executor.execute_sql_batch})()
        # Alternative: attach directly
        execute_insert_sql.db_executor = self.db_executor
        
        # Start streaming producer-consumer pipeline for all tables in parallel
        # Use a shared error flag to detect errors immediately
        import threading
        error_detected = threading.Event()
        error_lock = threading.Lock()
        
        def handle_insert_error():
            """Handle INSERT error: immediately cleanup and rollback."""
            if error_detected.is_set():
                return  # Already handling error
            
            with error_lock:
                if error_detected.is_set():
                    return
                error_detected.set()
            
            logger.error("✗ INSERT error detected - immediately stopping and cleaning up...")
            results["inserts_completed"] = False
            
            # Immediately cleanup Kafka resources
            try:
                logger.info("Cleaning up Kafka resources immediately...")
                self._cleanup_kafka_resources()
                logger.info("✓ Kafka resources cleaned up")
            except Exception as kafka_err:
                logger.warning(f"Error cleaning up Kafka: {kafka_err}")
            
            # If using transaction mode, DDL was already committed, so we need to rollback to checkpoint
            if use_transaction and checkpoint:
                logger.error("✗ INSERT stage failed - rolling back to checkpoint")
                try:
                    self._rollback_to_checkpoint(checkpoint, ddl_sql)
                except Exception as rollback_err:
                    logger.error(f"Error during rollback to checkpoint: {rollback_err}", exc_info=True)
        
        # Run streaming pipeline with file-based producer (same structure as _run_streaming_pipeline)
        try:
            producer_stats, consumer_stats = self._run_streaming_pipeline_from_file(
                insert_file_path=insert_file_path,
                table_counts=table_counts,
                canon_map=canon_map,
                table_to_partitions=table_to_partitions,
                execute_sql=execute_insert_sql,
                error_event=error_detected,
                on_error=handle_insert_error,
            )
        except Exception as pipeline_err:
            # Pipeline error - trigger cleanup
            handle_insert_error()
            raise RuntimeError(f"INSERT pipeline failed: {pipeline_err}")
        
        results["producer_stats"] = producer_stats
        results["consumer_stats"] = {
            name: {
                "batches_processed": stats.batches_processed,
                "rows_inserted": stats.rows_inserted,
                "errors": stats.errors,
                "duration": stats.duration,
            }
            for name, stats in consumer_stats.items()
        }
        
        # Check for errors after pipeline completes
        if error_detected.is_set() or any(v.get("errors", 0) for v in results["consumer_stats"].values()):
            # Ensure rollback happens even if handle_insert_error wasn't called
            if use_transaction and checkpoint:
                logger.error("✗ INSERT stage failed - rolling back to checkpoint")
                try:
                    self._rollback_to_checkpoint(checkpoint, ddl_sql)
                except Exception as rollback_err:
                    logger.error(f"Error during rollback to checkpoint: {rollback_err}", exc_info=True)
            raise RuntimeError("Consumer errors detected, aborting before constraints")
        
        results["inserts_sent"] = True
        results["inserts_completed"] = True
        logger.info("✓ All INSERT statements executed")
    
    def _run_streaming_pipeline_from_file(
        self,
        insert_file_path: Path,
        table_counts: Dict[str, int],
        canon_map: Dict[str, str],
        table_to_partitions: Dict[str, int],
        execute_sql,
        error_event=None,
        on_error=None,
    ) -> tuple[Dict[str, int], Dict[str, any]]:
        """
        Run streaming producer-consumer pipeline from file (same structure as _run_streaming_pipeline).
        Streams INSERT statements from file and sends to Kafka without loading entire file into memory.
        
        Args:
            insert_file_path: Path to INSERT SQL file
            table_counts: Dictionary mapping table names to their statement counts
            canon_map: Dictionary mapping lowercase table names to canonical case
            table_to_partitions: Dictionary mapping table names to partition counts
            execute_sql: Function to execute SQL statements
            error_event: Optional threading.Event to signal errors
            on_error: Optional callback function for error handling
            
        Returns:
            Tuple of (producer_stats, consumer_stats)
        """
        from collections import defaultdict
        
        table_names = list(table_counts.keys())
        max_workers = self.max_parallel_consumers or min(len(table_names), 10)
        
        producer_stats: Dict[str, int] = {}
        consumer_stats: Dict[str, any] = {}
        
        # Function to run producer-consumer pipeline for a single table (from file)
        # Same structure as _run_streaming_pipeline, but consumers are started separately
        def run_table_pipeline_from_file(table_name: str):
            """Run producer and multiple consumers for a single table in parallel threads (same structure as _run_streaming_pipeline)."""
            import threading
            
            # Start consumers first (they will wait for messages)
            # Use multiple consumers per table for better parallelism
            consumer_threads = []
            consumer_results = []
            
            def run_consumer(consumer_id: int):
                """Consumer thread: consume and execute messages."""
                consumer_stats = None
                try:
                    # Check if error already detected before starting
                    if error_event and error_event.is_set():
                        logger.warning(f"Consumer {consumer_id} for table '{table_name}' cancelled due to error")
                        from .kafka_consumer import ConsumerStats
                        consumer_stats = ConsumerStats(
                            table_name=table_name,
                            errors=1,
                            end_time=time.time(),
                        )
                        consumer_results.append(consumer_stats)
                        return
                    
                    # Create a unique consumer group per consumer instance for this table
                    # This allows multiple consumers to consume from different partitions
                    stats = self.consumer.process_table_messages(
                        table_name=table_name,
                        execute_sql=execute_sql,
                        max_retries=self.max_retries,
                        batch_commit_size=10,
                        error_event=error_event,  # Pass error event to consumer
                    )
                    consumer_stats = stats
                    
                    # Check for errors after processing
                    if consumer_stats and consumer_stats.errors > 0:
                        logger.error(f"Consumer {consumer_id} for table '{table_name}' reported {consumer_stats.errors} errors")
                        if error_event and on_error:
                            on_error()  # Trigger immediate cleanup
                except Exception as e:
                    logger.error(f"Consumer {consumer_id} error for table '{table_name}': {e}")
                    from .kafka_consumer import ConsumerStats
                    consumer_stats = ConsumerStats(
                        table_name=table_name,
                        errors=1,
                        end_time=time.time(),
                    )
                    if error_event and on_error:
                        on_error()  # Trigger immediate cleanup
                finally:
                    consumer_results.append(consumer_stats)
            
            # Start multiple consumer threads if configured, based on precomputed partitions
            desired_partitions = table_to_partitions.get(table_name, 1)
            num_consumers = desired_partitions
            if num_consumers > 1:
                logger.info(f"Starting {num_consumers} consumers for table '{table_name}' (partitions: {desired_partitions})")
            else:
                logger.info(f"Starting 1 consumer for table '{table_name}' (partitions: {desired_partitions})")
            
            for i in range(num_consumers):
                consumer_thread = threading.Thread(
                    target=run_consumer,
                    args=(i,),
                    daemon=False,
                    name=f"Consumer-{table_name}-{i}"
                )
                consumer_thread.start()
                consumer_threads.append(consumer_thread)
            
            # Return consumer threads and results for later aggregation
            # Producer will be handled separately - all tables share one file stream
            return consumer_threads, consumer_results
        
        # Start all consumers first (same structure as _run_streaming_pipeline)
        # Store consumer threads and results for each table
        table_consumer_data = {}
        
        for table_name in table_names:
            if error_event and error_event.is_set():
                break
            consumer_threads, consumer_results = run_table_pipeline_from_file(table_name)
            table_consumer_data[table_name] = {
                'threads': consumer_threads,
                'results': consumer_results,
            }
        
        # Wait a bit for all consumers to initialize (same as _run_streaming_pipeline)
        time.sleep(0.5)
        
        # Direct producer architecture (no queues, no worker threads):
        # - Main thread: reads file, parses statements, and directly sends to Kafka producer
        # - Each table has its own buffer that accumulates statements
        # - When buffer reaches batch size, send immediately to Kafka
        chunk_size = 1024 * 1024  # 1MB chunks
        batch_size = 50  # Send in batches of 50 statements per table
        
        # Per-table buffers for accumulating statements before sending
        table_buffers: Dict[str, List[str]] = {table_name: [] for table_name in table_names}
        table_stats: Dict[str, int] = {table_name: 0 for table_name in table_names}  # Track statements sent per table
        table_batch_counts: Dict[str, int] = {table_name: 0 for table_name in table_names}  # Track batches sent per table
        
        def send_table_batch(table_name: str, force: bool = False):
            """Send accumulated statements for a table if buffer is full or force is True."""
            if table_name not in table_buffers:
                return
            
            buffer = table_buffers[table_name]
            buffer_size = len(buffer)
            
            # Send if buffer is full or forced
            if buffer_size >= batch_size or (force and buffer_size > 0):
                try:
                    # Log before sending to track timing
                    send_start_time = time.time()
                    self.producer.send_table_messages_async(
                        table_name,
                        buffer,
                        wait_for_send=False,
                        create_topic=False,
                        flush_immediately=False,  # We'll flush periodically (non-blocking)
                    )
                    send_duration = time.time() - send_start_time
                    if send_duration > 1.0:  # Log if send took more than 1 second
                        logger.warning(
                            f"Send operation for table '{table_name}' took {send_duration:.2f}s "
                            f"(batch size: {buffer_size}). This may indicate buffer blocking."
                        )
                    table_stats[table_name] += buffer_size
                    table_batch_counts[table_name] += 1
                    
                    # Flush more frequently (every 10 batches) to prevent buffer overflow and blocking
                    # Use non-blocking mode to avoid blocking file reading
                    if table_batch_counts[table_name] % 10 == 0:
                        try:
                            self.producer.flush_producer(timeout=10.0, blocking=False)  # Non-blocking: flush in background
                            logger.debug(f"Flush started in background after batch {table_batch_counts[table_name]} for table '{table_name}'")
                        except Exception as e:
                            logger.warning(f"Error starting flush for table '{table_name}': {e}")
                    
                    logger.debug(f"Sent batch of {buffer_size} statements for table '{table_name}' (total: {table_stats[table_name]}, batches: {table_batch_counts[table_name]})")
                    table_buffers[table_name] = []  # Clear buffer
                except Exception as e:
                    logger.error(f"Error sending batch for table '{table_name}': {e}", exc_info=True)
                    if error_event:
                        error_event.set()
                            
        # Main thread: read file and send directly to producer (no queues, no worker threads)
        logger.info("Streaming INSERT statements from file and sending directly to Kafka producer...")
        file_buffer = ""
        statements_parsed = 0
        statements_distributed = {table: 0 for table in table_names}
        
        try:
            file_finished = False
            with open(insert_file_path, 'r', encoding='utf-8') as f:
                while True:
                    if error_event and error_event.is_set():
                        logger.warning("File reading stopped due to error event")
                        break
                    
                    chunk = f.read(chunk_size)
                    if not chunk:
                        logger.info(f"Finished reading file. Total statements parsed: {statements_parsed}")
                        file_finished = True
                        # Log current state of all tables
                        for table_name in table_buffers.keys():
                            buffer_size = len(table_buffers[table_name])
                            processed = statements_distributed.get(table_name, 0)
                            total = table_counts.get(table_name, 0)
                            if buffer_size > 0 or processed < total:
                                logger.info(f"  Table '{table_name}': processed {processed}/{total}, buffer size: {buffer_size}")
                    
                    if chunk:
                        file_buffer += chunk
                    
                    # Process complete statements
                    while True:
                        if error_event and error_event.is_set():
                            logger.warning("Statement processing stopped due to error event")
                            break
                        
                        end_pos = file_buffer.find(');')
                        if end_pos == -1:
                            # No more complete statements in buffer
                            if file_finished:
                                # File is finished, process any remaining partial statement
                                if file_buffer.strip():
                                    # Try to process remaining buffer as final statement
                                    stmt = file_buffer.strip()
                                    if stmt.endswith(';'):
                                        statements_parsed += 1
                                        parsed_table = self.producer._parse_table_name(stmt)
                                        if parsed_table:
                                            parsed_key = parsed_table.lower()
                                            canonical_table = canon_map.get(parsed_key, parsed_table)
                                            
                                            if canonical_table not in table_buffers:
                                                logger.warning(f"Table '{canonical_table}' not found in pre-scanned tables, creating buffer on-the-fly")
                                                table_buffers[canonical_table] = []
                                                table_stats[canonical_table] = 0
                                                table_batch_counts[canonical_table] = 0
                                                statements_distributed[canonical_table] = 0
                                            
                                            table_buffers[canonical_table].append(stmt)
                                            statements_distributed[canonical_table] = statements_distributed.get(canonical_table, 0) + 1
                                            logger.debug(f"Added final statement {statements_distributed[canonical_table]} to buffer for table '{canonical_table}' (buffer size: {len(table_buffers[canonical_table])})")
                                            # Force send final batch for this table
                                            send_table_batch(canonical_table, force=True)
                                        file_buffer = ""
                                # File finished, send all remaining buffers immediately
                                logger.info("File reading finished. Sending all remaining buffers immediately...")
                                for table_name in table_buffers.keys():
                                    if table_buffers[table_name]:
                                        logger.debug(f"Sending remaining {len(table_buffers[table_name])} statements for table '{table_name}' (file finished)")
                                        send_table_batch(table_name, force=True)
                            else:
                                # File not finished yet, check buffer size limit
                                if len(file_buffer) > 10 * 1024 * 1024:  # 10MB buffer limit
                                    last_split = file_buffer.rfind(');')
                                    if last_split > 0:
                                        file_buffer = file_buffer[last_split + 2:]
                                    else:
                                        file_buffer = ""
                            break
                        
                        stmt = file_buffer[:end_pos + 2].strip()
                        file_buffer = file_buffer[end_pos + 2:]
                        statements_parsed += 1
                        
                        # Parse table name directly (insert file contains only INSERT statements)
                        parsed_table = self.producer._parse_table_name(stmt)
                        if parsed_table:
                            parsed_key = parsed_table.lower()
                            canonical_table = canon_map.get(parsed_key, parsed_table)
                            
                            # Handle case where table might not be in pre-scanned list
                            if canonical_table not in table_buffers:
                                logger.warning(f"Table '{canonical_table}' not found in pre-scanned tables, creating buffer on-the-fly")
                                table_buffers[canonical_table] = []
                                table_stats[canonical_table] = 0
                                table_batch_counts[canonical_table] = 0
                                statements_distributed[canonical_table] = 0
                            
                            # Add statement to table's buffer
                            table_buffers[canonical_table].append(stmt)
                            statements_distributed[canonical_table] = statements_distributed.get(canonical_table, 0) + 1
                            
                            # Log first few statements for each table
                            if statements_distributed[canonical_table] <= 3:
                                logger.debug(f"Added statement {statements_distributed[canonical_table]} to buffer for table '{canonical_table}' (buffer size: {len(table_buffers[canonical_table])})")
                            
                            # Send batch if buffer is full
                            send_table_batch(canonical_table, force=False)
                            
                            # Check if this table's data is complete: if processed count equals total count, send immediately
                            total_count = table_counts.get(canonical_table, 0)
                            processed_count = statements_distributed[canonical_table]
                            if total_count > 0 and processed_count >= total_count:
                                # This table's data is complete, send remaining buffer immediately
                                if table_buffers[canonical_table]:
                                    logger.debug(f"Table '{canonical_table}' data complete ({processed_count}/{total_count}), sending remaining {len(table_buffers[canonical_table])} statements immediately")
                                    send_table_batch(canonical_table, force=True)
                                    # Immediately flush to prevent buffer blocking for other tables
                                    # Use non-blocking mode to avoid blocking file reading
                                    try:
                                        flush_duration = self.producer.flush_producer(timeout=15.0, blocking=False)  # Non-blocking: flush in background
                                        logger.debug(f"Flush started in background after completing table '{canonical_table}'")
                                    except Exception as e:
                                        logger.warning(f"Error starting flush after table '{canonical_table}' completion: {e}")
                            
                            # If file is finished and this is the last statement, force send immediately
                            if file_finished and not file_buffer.strip():
                                logger.debug(f"File finished, forcing send for table '{canonical_table}' with {len(table_buffers[canonical_table])} statements in buffer")
                                send_table_batch(canonical_table, force=True)
                            
                            # Periodic flush every 200 statements to prevent buffer blocking
                            # Use non-blocking mode to avoid blocking file reading
                            if statements_parsed % 200 == 0:
                                try:
                                    self.producer.flush_producer(timeout=10.0, blocking=False)  # Non-blocking: flush in background
                                except Exception as e:
                                    logger.warning(f"Error starting periodic flush: {e}")
                            
                            # Log progress every 1000 statements per table
                            if statements_distributed[canonical_table] % 1000 == 0:
                                logger.info(f"Processed {statements_distributed[canonical_table]} statements for table '{canonical_table}' (sent: {table_stats[canonical_table]}, batches: {table_batch_counts[canonical_table]})")
                        else:
                            # Log warning if table name cannot be parsed
                            if statements_parsed % 10000 == 0:
                                logger.debug(f"Could not parse table name from statement (parsed {statements_parsed} statements so far)")
                            else:
                                logger.warning(f"Could not parse table name from statement (first 100 chars): {stmt[:100]}")
                    
                    # If file is finished, break outer loop
                    if file_finished:
                        break
            
            # Log final distribution statistics
            logger.info(f"File reading completed. Statements processed by table:")
            for table_name, count in statements_distributed.items():
                logger.info(f"  Table '{table_name}': {count} statements processed, {table_stats.get(table_name, 0)} sent, {table_batch_counts.get(table_name, 0)} batches")
            
            # Final check: send any remaining buffers (should be empty if logic above worked correctly)
            remaining_count = sum(len(buf) for buf in table_buffers.values())
            if remaining_count > 0:
                logger.info(f"Found {remaining_count} remaining statements in buffers, sending now...")
                for table_name in table_buffers.keys():
                    if table_buffers[table_name]:
                        logger.debug(f"Sending final remaining {len(table_buffers[table_name])} statements for table '{table_name}'")
                        send_table_batch(table_name, force=True)
            else:
                logger.debug("All buffers are empty, no additional sending needed")
            
            # Final flush to ensure all messages are sent
            # Use blocking mode with timeout to ensure all messages are sent before continuing
            logger.info("Performing final flush to ensure all messages are sent...")
            try:
                flush_duration = self.producer.flush_producer(timeout=60.0, blocking=True)  # Blocking: wait for completion
                logger.info(f"Final flush completed in {flush_duration:.2f}s")
                
                # Log producer completion statistics
                total_statements = sum(table_stats.values())
                total_batches = sum(table_batch_counts.values())
                logger.info("=" * 60)
                logger.info("✓ Producer completed: All data sent to Kafka")
                logger.info(f"  Total statements sent: {total_statements}")
                logger.info(f"  Total batches sent: {total_batches}")
                logger.info(f"  Flush duration: {flush_duration:.2f}s")
                logger.info("  Per-table statistics:")
                for table_name in sorted(table_stats.keys()):
                    statements = table_stats.get(table_name, 0)
                    batches = table_batch_counts.get(table_name, 0)
                    logger.info(f"    - {table_name}: {statements} statements, {batches} batches")
                logger.info("=" * 60)
            except Exception as e:
                logger.warning(f"Error during final flush: {e}")
        except Exception as e:
            logger.error(f"Error reading file: {e}", exc_info=True)
            if error_event:
                error_event.set()
        finally:
            # Send any remaining buffers in case of error
            for table_name in table_buffers.keys():
                if table_buffers[table_name]:
                    logger.info(f"Sending remaining {len(table_buffers[table_name])} statements for table '{table_name}' in finally block")
                    send_table_batch(table_name, force=True)
            
            # Final flush (blocking to ensure all messages are sent)
            try:
                flush_duration = self.producer.flush_producer(timeout=60.0, blocking=True)  # Blocking: wait for completion
                if flush_duration > 0:
                    logger.info(f"Final flush in finally block completed in {flush_duration:.2f}s")
                    # Log producer completion statistics if available
                    if 'table_stats' in locals() and table_stats:
                        total_statements = sum(table_stats.values())
                        total_batches = sum(table_batch_counts.values()) if 'table_batch_counts' in locals() else 0
                        logger.info("=" * 60)
                        logger.info("✓ Producer completed: All data sent to Kafka (finally block)")
                        logger.info(f"  Total statements sent: {total_statements}")
                        if total_batches > 0:
                            logger.info(f"  Total batches sent: {total_batches}")
                        logger.info(f"  Flush duration: {flush_duration:.2f}s")
                        logger.info("=" * 60)
            except Exception as e:
                logger.warning(f"Error during final flush in finally block: {e}")
        
        # Wait for all consumers to finish (same as _run_streaming_pipeline)
        for table_name, data in table_consumer_data.items():
            for consumer_thread in data['threads']:
                consumer_thread.join(timeout=3600)  # 1 hour timeout
            
            # Aggregate consumer stats from all consumers (same structure as _run_streaming_pipeline)
            consumer_results = data['results']
            if consumer_results:
                from .kafka_consumer import ConsumerStats
                total_batches = sum(cs.batches_processed for cs in consumer_results if cs)
                total_rows = sum(cs.rows_inserted for cs in consumer_results if cs)
                total_errors = sum(cs.errors for cs in consumer_results if cs)
                
                aggregated_stats = ConsumerStats(
                    table_name=table_name,
                    batches_processed=total_batches,
                    rows_inserted=total_rows,
                    errors=total_errors,
                    start_time=min((cs.start_time for cs in consumer_results if cs and cs.start_time), default=None),
                    end_time=max((cs.end_time for cs in consumer_results if cs and cs.end_time), default=None),
                )
            else:
                from .kafka_consumer import ConsumerStats
                aggregated_stats = ConsumerStats(
                    table_name=table_name,
                    errors=1,
                    end_time=time.time(),
                )
            
            consumer_stats[table_name] = aggregated_stats
        
        # Update producer_stats with table_stats
        producer_stats = table_stats.copy()
        
        # Flush producer to ensure all messages are sent (blocking to ensure completion)
        flush_duration = self.producer.flush_producer(timeout=60.0, blocking=True)  # Blocking: wait for completion
        if flush_duration > 0:
            logger.info(f"Final flush completed in {flush_duration:.2f}s")
            
            # Log producer completion statistics
            total_statements = sum(producer_stats.values()) if producer_stats else 0
            logger.info("=" * 60)
            logger.info("✓ Producer completed: All data sent to Kafka")
            logger.info(f"  Total statements sent: {total_statements}")
            logger.info(f"  Flush duration: {flush_duration:.2f}s")
            if producer_stats:
                logger.info("  Per-table statistics:")
                for table_name in sorted(producer_stats.keys()):
                    statements = producer_stats.get(table_name, 0)
                    logger.info(f"    - {table_name}: {statements} statements")
            logger.info("=" * 60)
        
        return producer_stats, consumer_stats

    def _validate_foreign_key_integrity(self, constraints_sql: str) -> None:
        """
        Validate foreign key integrity before adding constraints.
        Checks if all foreign key references exist in the referenced tables.
        
        Args:
            constraints_sql: Constraints SQL statements
            
        Raises:
            RuntimeError: If foreign key violations are found
        """
        import re
        
        # Parse foreign key constraints from SQL
        fk_pattern = r'ALTER\s+TABLE\s+["\']?(\w+)["\']?\s+ADD\s+CONSTRAINT\s+\w+\s+FOREIGN\s+KEY\s+\(([^)]+)\)\s+REFERENCES\s+["\']?(\w+)["\']?\s+\(([^)]+)\)'
        
        violations = []
        
        for match in re.finditer(fk_pattern, constraints_sql, re.IGNORECASE):
            child_table = match.group(1)
            child_columns = [col.strip().strip('"\'') for col in match.group(2).split(',')]
            parent_table = match.group(3)
            parent_columns = [col.strip().strip('"\'') for col in match.group(4).split(',')]
            
            if len(child_columns) != len(parent_columns):
                logger.warning(f"Skipping FK validation for {child_table}->{parent_table}: column count mismatch")
                continue
            
            # Build validation query
            # Check for orphaned foreign keys (child references that don't exist in parent)
            child_cols_quoted = ', '.join(f'"{col}"' for col in child_columns)
            parent_cols_quoted = ', '.join(f'"{col}"' for col in parent_columns)
            
            # For multi-column foreign keys, we need to check all combinations
            join_conditions = ' AND '.join(
                f'child."{child_columns[i]}" = parent."{parent_columns[i]}"'
                for i in range(len(child_columns))
            )
            
            # For single column FK, use the column directly; for multi-column, use concatenation
            if len(child_columns) == 1:
                value_expr = f'child."{child_columns[0]}"::text'
            else:
                value_expr = "CONCAT_WS('|', " + ', '.join(f'child."{col}"::text' for col in child_columns) + ")"
            
            validation_query = f'''
                SELECT COUNT(*) as orphan_count
                FROM "{child_table}" child
                WHERE NOT EXISTS (
                    SELECT 1 FROM "{parent_table}" parent
                    WHERE {join_conditions}
                )
            '''
            
            try:
                conn = self.db_executor.pool.getconn()
                try:
                    with conn.cursor() as cur:
                        cur.execute(validation_query)
                        result = cur.fetchone()
                        orphan_count = result[0] if result else 0
                        
                        if orphan_count > 0:
                            violations.append(
                                f'Table "{child_table}" has {orphan_count} orphaned foreign key(s) '
                                f'referencing "{parent_table}".'
                            )
                finally:
                    self.db_executor.pool.putconn(conn)
            except Exception as e:
                logger.warning(f"Error validating FK {child_table}->{parent_table}: {e}")
                # Don't fail on validation errors, just log them
        
        if violations:
            error_msg = "\n".join(f"  - {v}" for v in violations)
            raise RuntimeError(f"Foreign key integrity violations found:\n{error_msg}")

