"""
Kafka Producer for parallel table insertion.
Handles topic creationbuffer_memory: int = 1073741824 and batch message sending.
"""
from __future__ import annotations

import json
import re
import logging
from dataclasses import dataclass, asdict
from typing import Dict, List, Optional, Set
from collections import defaultdict

from kafka import KafkaProducer, KafkaAdminClient
from kafka.admin import NewTopic
from kafka.errors import TopicAlreadyExistsError, KafkaError

logger = logging.getLogger(__name__)

# Prefer high-performance JSON if available
try:
    import orjson as _fastjson
except Exception:  # pragma: no cover
    _fastjson = None


@dataclass
class InsertBatch:
    """Represents a batch of INSERT statements for a table."""
    table_name: str
    batch_id: int
    total_batches: int
    sql_statements: List[str]
    row_count: int
    
    def to_dict(self) -> dict:
        return asdict(self)


class KafkaInsertProducer:
    """Producer for sending INSERT statements to Kafka topics."""
    
    def __init__(
        self,
        bootstrap_servers: str | List[str] = "localhost:9092",
        topic_prefix: str = "sql_migration_insert",
        max_message_size: int = 134217728,  # 128MB safer default; align with broker/topic message.max.bytes
        topic_partitions: int = 4,  # Number of partitions per topic (enables parallelism)
        # Kafka producer tuning
        compression_type: str = "lz4",
        acks: str | int = 1,
        linger_ms: int = 0,  # 0ms for fully streaming: send immediately without artificial delay
        kafka_batch_bytes: int = 1048576,  # 1MB producer batch buffer per partition (increased from 512KB for better throughput)
        buffer_memory: int = 1610612736 ,  # 1.5G total producer buffer (increased from 256MB for high-throughput scenarios)
        max_in_flight_requests_per_connection: int = 5,
    ):
        """
        Initialize Kafka producer.
        
        Args:
            bootstrap_servers: Kafka broker addresses
            topic_prefix: Prefix for topic names (table names will be appended)
            max_message_size: Maximum message size in bytes
        """
        self.bootstrap_servers = bootstrap_servers
        self.topic_prefix = topic_prefix
        self.max_message_size = max_message_size
        self.topic_partitions = topic_partitions
        self.compression_type = compression_type
        self.acks = acks
        self.linger_ms = linger_ms
        self.kafka_batch_bytes = kafka_batch_bytes
        self.buffer_memory = buffer_memory
        self.max_in_flight_requests_per_connection = max_in_flight_requests_per_connection
        # Track rows sent per table for consistency checks
        self.rows_sent_per_table: dict[str, int] = {}
        # Track which topics have had metadata warmed up
        self._metadata_warmed_topics: set[str] = set()
        
        # Validate and auto-fallback compression type if library not available
        actual_compression = self._validate_compression_type(self.compression_type)
        if actual_compression != self.compression_type:
            logger.info(
                f"Compression type '{self.compression_type}' not available, "
                f"falling back to '{actual_compression}'"
            )
            self.compression_type = actual_compression
        
        # Track created topics for cleanup
        self._created_topics: Set[str] = set()
        
        # Create producer with compression and batch settings
        # Defaults aim for high throughput; can be overridden via constructor
        self.producer = KafkaProducer(
            bootstrap_servers=bootstrap_servers,
            # Use fastest available serializer; orjson returns UTF-8 bytes directly
            value_serializer=(_fastjson.dumps if _fastjson else (lambda v: json.dumps(v, ensure_ascii=False).encode('utf-8'))),
            compression_type=self.compression_type,
            batch_size=self.kafka_batch_bytes,
            linger_ms=self.linger_ms,
            max_request_size=max_message_size,
            retries=3,
            acks=self.acks,
            # Critical timeout settings to prevent message expiration during high load
            # Increased timeouts for large tables with big messages
            delivery_timeout_ms=900000,  # 15 minutes - maximum time for message delivery (increased from 10min)
            request_timeout_ms=600000,  # 10 minutes - time to wait for server response (increased from 5min)
            max_block_ms=30000,  # 30 seconds - time to block when buffer is full or metadata update needed (increased to allow metadata updates)
            buffer_memory=self.buffer_memory,
            max_in_flight_requests_per_connection=self.max_in_flight_requests_per_connection,
        )
        
        self.admin_client = KafkaAdminClient(
            bootstrap_servers=bootstrap_servers,
            client_id='sql_migration_admin'
        )
    
    def _validate_compression_type(self, compression_type: str) -> str:
        """
        Validate compression type and fallback to available option if needed.
        Checks for required Python libraries without connecting to Kafka.
        
        Args:
            compression_type: Requested compression type
            
        Returns:
            Valid compression type that's available in the environment
        """
        # Map compression types to their required Python modules
        compression_modules = {
            'lz4': 'lz4',
            'snappy': 'snappy',
            'gzip': None,  # Built-in to Python
            'none': None,  # No module needed
        }
        
        # List of compression types to try in order of preference
        preferred_order = ['lz4', 'snappy', 'gzip', 'none']
        
        # If requested type is in preferred list, try it first
        if compression_type in preferred_order:
            preferred_order = [compression_type] + [c for c in preferred_order if c != compression_type]
        
        for comp_type in preferred_order:
            module_name = compression_modules.get(comp_type)
            
            # Check if module is needed and available
            if module_name is None:
                # Built-in or no compression - always available
                return comp_type
            
            # Check if the required module can be imported
            try:
                __import__(module_name)
                return comp_type  # Module is available
            except ImportError:
                # Module not available, try next compression type
                continue
        
        # Fallback to 'none' if all else fails (shouldn't happen, but safe)
        logger.warning("No compression libraries available, using 'none' (no compression)")
        return 'none'
    
    def _ensure_metadata_ready(self, topic_name: str, max_retries: int = 3, retry_delay: float = 1.0) -> bool:
        """
        Ensure metadata is ready for a topic by calling partitions_for() with retries.
        This helps avoid metadata update timeouts when sending messages.
        
        Note: partitions_for() may block up to max_block_ms (30s) if metadata is not ready.
        We use retries with delays to give Kafka broker time to sync metadata.
        
        Args:
            topic_name: Name of the topic
            max_retries: Maximum number of retry attempts
            retry_delay: Delay between retries in seconds (should be > 0 to allow broker to sync)
            
        Returns:
            True if metadata is ready, False otherwise
        """
        import time
        import threading
        
        # Use a timeout wrapper to prevent partitions_for() from blocking too long
        # Since partitions_for() itself can block up to max_block_ms (30s), we need to handle this carefully
        def get_partitions_with_timeout(timeout_seconds: float):
            """Get partitions with a timeout to prevent indefinite blocking."""
            result = [None]
            exception = [None]
            
            def _get_partitions():
                try:
                    result[0] = self.producer.partitions_for(topic_name)
                except Exception as e:
                    exception[0] = e
            
            thread = threading.Thread(target=_get_partitions, daemon=True)
            thread.start()
            thread.join(timeout=timeout_seconds)
            
            if thread.is_alive():
                # Thread is still running, meaning partitions_for() is still blocking
                # This indicates metadata is not ready yet
                return None, TimeoutError(f"partitions_for() blocked for more than {timeout_seconds}s")
            
            if exception[0]:
                return None, exception[0]
            
            return result[0], None
        
        for attempt in range(max_retries):
            try:
                # Use a shorter timeout (5s) to quickly detect if metadata is ready
                # If it takes longer, we'll retry with a delay
                partitions, error = get_partitions_with_timeout(5.0)
                
                if error:
                    # If it's a timeout, metadata is likely not ready yet
                    if isinstance(error, TimeoutError):
                        raise TimeoutError(f"Metadata not ready (blocked for 5s)")
                    else:
                        raise error
                
                if partitions is not None:
                    self._metadata_warmed_topics.add(topic_name)
                    if attempt > 0:
                        logger.info(f"Metadata ready for topic '{topic_name}' after {attempt + 1} attempts")
                    else:
                        logger.info(f"Metadata ready for topic '{topic_name}'")
                    return True
            except Exception as e:
                if attempt < max_retries - 1:
                    # Increase retry delay to give broker more time to sync metadata
                    # Since each attempt can take up to 5s, we wait longer between retries
                    actual_delay = retry_delay * (attempt + 1)  # Exponential backoff: 1s, 2s, 3s...
                    logger.info(
                        f"Metadata not ready for topic '{topic_name}' (attempt {attempt + 1}/{max_retries}): {e}. "
                        f"Retrying in {actual_delay:.1f}s..."
                    )
                    time.sleep(actual_delay)
                else:
                    logger.warning(
                        f"Failed to ensure metadata ready for topic '{topic_name}' after {max_retries} attempts: {e}"
                    )
        
        return False
    
    def _parse_table_name(self, insert_sql: str) -> Optional[str]:
        """Extract table name from INSERT statement, preserving original case (without quotes).
        Supports both `INSERT INTO t (..)` and `INSERT INTO t(..)` (no space before column list).
        """
        pattern = r'INSERT\s+INTO\s+[`\"]?([A-Za-z0-9_\.]+)[`\"]?(?:\s|\()'
        match = re.match(pattern, insert_sql.strip(), re.IGNORECASE)
        return match.group(1) if match else None

    def _canonicalize_table_names(self, insert_statements: List[str]) -> Dict[str, List[str]]:
        """Group INSERTs by table using first-seen original case as canonical name."""
        canon_map: Dict[str, str] = {}
        grouped: Dict[str, List[str]] = defaultdict(list)
        for stmt in insert_statements:
            t = self._parse_table_name(stmt)
            if not t:
                logger.warning(f"Could not parse table name from: {stmt[:100]}")
                continue
            key = t.lower()
            if key not in canon_map:
                canon_map[key] = t
            grouped[canon_map[key]].append(stmt)
        return dict(grouped)

    def _build_insert_prefix(self, first_stmt: str, canonical_table: str) -> str:
        """Build INSERT INTO prefix with quoted canonical table name and optional column list."""
        m = re.search(r'INSERT\s+INTO\s+[`\"]?[A-Za-z0-9_\.]+[`\"]?\s*(\([^)]+\))?\s+VALUES', first_stmt or '', re.IGNORECASE)
        cols = m.group(1) if m else None
        return f'INSERT INTO "{canonical_table}" {cols or ""} VALUES'.replace('  ', ' ')
    
    def _extract_values_from_insert(self, insert_sql: str) -> List[str]:
        """Extract individual row tuples from INSERT statement.
        
        Handles both single and multi-row INSERTs:
        - INSERT INTO table VALUES (1, 2);
        - INSERT INTO table VALUES (1, 2), (3, 4), (5, 6);
        """
        # Remove trailing semicolon
        sql = insert_sql.rstrip(';').strip()
        
        # Find VALUES keyword
        values_idx = sql.upper().find('VALUES')
        if values_idx == -1:
            return []
        
        values_part = sql[values_idx + 6:].strip()
        
        # Extract top-level tuples (handling nested parentheses in values)
        tuples: List[str] = []
        buf: List[str] = []
        depth = 0
        in_string = False
        string_char = None
        
        i = 0
        while i < len(values_part):
            ch = values_part[i]
            
            if not in_string:
                if ch in ("'", '"'):
                    in_string = True
                    string_char = ch
                    buf.append(ch)
                elif ch == '(':
                    depth += 1
                    buf.append(ch)
                elif ch == ')':
                    depth -= 1
                    buf.append(ch)
                    if depth == 0 and buf:
                        # Complete tuple
                        tuple_str = ''.join(buf).strip()
                        if tuple_str.startswith('(') and tuple_str.endswith(')'):
                            tuples.append(tuple_str)
                        buf = []
                elif ch == ',' and depth == 0:
                    # Skip comma between tuples
                    pass
                else:
                    buf.append(ch)
            else:
                # Inside string literal
                if ch == string_char:
                    # Handle SQL-standard escaped quotes by doubling the quote: '' or ""
                    next_char = values_part[i+1] if i + 1 < len(values_part) else None
                    if next_char == string_char:
                        # It's an escaped quote, include one quote and skip the next
                        buf.append(ch)
                        i += 1  # skip the second quote
                    elif values_part[i-1] != '\\':
                        # End of string (not backslash-escaped)
                        buf.append(ch)
                        in_string = False
                        string_char = None
                    else:
                        # Backslash-escaped quote, keep inside string
                        buf.append(ch)
                else:
                    buf.append(ch)
            
            i += 1
        
        # Handle last tuple if exists
        if buf and depth == 0:
            tuple_str = ''.join(buf).strip()
            if tuple_str.startswith('('):
                tuples.append(tuple_str)
        
        return tuples
    
    def split_inserts_by_table(self, insert_statements: List[str]) -> Dict[str, List[str]]:
        """
        Split INSERT statements by table name.
        
        Args:
            insert_statements: List of INSERT SQL statements
            
        Returns:
            Dictionary mapping table names to their INSERT statements
        """
        grouped = self._canonicalize_table_names(insert_statements)
        logger.info(f"Split {len(insert_statements)} INSERT statements into {len(grouped)} tables")
        for table, stmts in grouped.items():
            logger.info(f"  Table '{table}': {len(stmts)} statements")
        
        return dict(grouped)
    
    def delete_topics(self, topic_names: List[str], ignore_errors: bool = True) -> None:
        """
        Delete Kafka topics.
        
        Args:
            topic_names: List of topic names to delete
            ignore_errors: If True, only log errors at debug level (topics may not exist)
        """
        if not topic_names:
            return
        
        try:
            # kafka-python's delete_topics takes a list of topic names directly
            futures = self.admin_client.delete_topics(topics=topic_names)
            
            # Wait for deletion to complete
            if hasattr(futures, 'items'):
                # Older API: futures is a dict
                for topic_name, future in futures.items():
                    try:
                        future.result(timeout=30)
                        logger.debug(f"Deleted topic: {topic_name}")
                    except Exception as e:
                        if ignore_errors:
                            logger.debug(f"Topic {topic_name} may not exist (error ignored): {e}")
                        else:
                            logger.warning(f"Error deleting topic {topic_name}: {e}")
            elif hasattr(futures, 'topic_errors'):
                # Newer API: futures is a response object
                for topic_error in futures.topic_errors:
                    topic_name = topic_error[0]
                    error_code = topic_error[1]
                    error_message = topic_error[2] if len(topic_error) > 2 else None
                    
                    if error_code == 0:  # Success
                        logger.debug(f"Deleted topic: {topic_name}")
                    else:
                        # Error code 3 = UnknownTopicOrPartitionError (topic doesn't exist)
                        if ignore_errors or error_code == 3:
                            logger.debug(f"Topic {topic_name} does not exist (error ignored)")
                        else:
                            logger.warning(f"Error deleting topic {topic_name}: {error_message or f'Error code {error_code}'}")
            else:
                # Fallback: assume deletion succeeded
                for topic_name in topic_names:
                    logger.debug(f"Deleted topic: {topic_name}")
                    
        except Exception as e:
            if ignore_errors:
                logger.debug(f"Error deleting topics (ignored): {e}")
            else:
                logger.warning(f"Error deleting topics: {e}")
        
        # Always wait for deletion to complete (even if ignore_errors=True, we want to ensure deletion)
        # Poll to check if topics are actually deleted (faster than fixed wait)
        # Kafka may mark topics for deletion but they need time to actually be removed
        import time
        max_wait_time = 20  # Maximum 20 seconds total (increased to handle slow deletions)
        poll_interval = 0.5  # Check every 500ms
        max_polls = int(max_wait_time / poll_interval)
        
        for poll_count in range(max_polls):
            try:
                # Check if topics still exist
                existing_topics = set(self.admin_client.list_topics())
                remaining_topics = [t for t in topic_names if t in existing_topics]
                
                if not remaining_topics:
                    # All topics deleted
                    if poll_count > 0:
                        logger.info(f"All topics deleted after {poll_count * poll_interval:.1f}s")
                    break
                
                # Some topics still exist or are being deleted, wait a bit more
                if poll_count == 0:
                    logger.info(f"Waiting for topics to be deleted: {remaining_topics}")
                elif poll_count % 10 == 0:  # Log every 5 seconds
                    logger.debug(f"Still waiting for topics deletion ({poll_count * poll_interval:.1f}s): {remaining_topics}")
                time.sleep(poll_interval)
            except Exception as e:
                # If we can't check, assume deletion is in progress and wait a bit
                logger.debug(f"Error checking topic deletion status: {e}, waiting...")
                if poll_count < max_polls - 1:
                    time.sleep(poll_interval)
                else:
                    logger.warning(f"Could not verify topic deletion after {max_wait_time}s, proceeding anyway")
                    break
        
        # Final check: if topics still exist after max wait, log warning but continue
        try:
            existing_topics = set(self.admin_client.list_topics())
            remaining_topics = [t for t in topic_names if t in existing_topics]
            if remaining_topics:
                logger.warning(
                    f"Topics still exist after {max_wait_time}s wait: {remaining_topics}. "
                    f"Kafka may still be deleting them. Proceeding anyway..."
                )
        except Exception:
            # Ignore errors in final check
            pass
    
    def cleanup_topics(self) -> None:
        """Cleanup all created topics."""
        if self._created_topics:
            logger.info(f"Cleaning up {len(self._created_topics)} Kafka topics...")
            # Use ignore_errors=True to avoid excessive warnings - topics may already be deleted
            self.delete_topics(list(self._created_topics), ignore_errors=True)
            self._created_topics.clear()
    
    def create_kafka_topics(self, tables: List[str], partitions: Optional[int] = None, replication_factor: int = 1) -> None:
        """
        Create Kafka topics for each table.
        If topics already exist, they will be tracked but not recreated.
        
        Args:
            tables: List of table names
            partitions: Number of partitions per topic (allows parallelism, uses self.topic_partitions if None)
            replication_factor: Kafka replication factor
        """
        if partitions is None:
            partitions = self.topic_partitions
        topics_to_create = []
        # Discover existing topics once to avoid per-topic network calls
        try:
            existing = set(self.admin_client.list_topics())
        except Exception:
            existing = set()
        for table in tables:
            topic_name = f"{self.topic_prefix}_{table}"
            # Always track the topic (whether creating or already exists)
            self._created_topics.add(topic_name)
            # If topic exists, delete it first and wait for deletion to complete
            if topic_name in existing:
                logger.info(f"Topic exists, deleting before recreate: {topic_name}")
                try:
                    self.delete_topics([topic_name], ignore_errors=False)
                    # Wait a bit more to ensure topic is fully removed from metadata
                    import time
                    time.sleep(0.5)
                except Exception as e:
                    logger.warning(f"Error deleting existing topic {topic_name}: {e}")
            # Queue create
            topics_to_create.append(NewTopic(name=topic_name, num_partitions=partitions, replication_factor=replication_factor))
        
        if not topics_to_create:
            return
        
        try:
            futures = self.admin_client.create_topics(topics_to_create)
            
            # Handle response - kafka-python throws exception for already existing topics
            # So we need to catch that separately
            if hasattr(futures, 'topic_errors'):
                # Newer API: futures is a response object
                for topic_error in futures.topic_errors:
                    topic_name = topic_error[0]
                    error_code = topic_error[1]
                    error_message = topic_error[2] if len(topic_error) > 2 else None
                    
                    if error_code == 0:  # Success
                        logger.info(f"Created topic: {topic_name}")
                    elif error_code == 36:  # TopicAlreadyExistsError
                        logger.info(f"Topic already exists: {topic_name} (will be reused)")
                    else:
                        logger.warning(f"Topic creation issue for {topic_name}: {error_message or f'Error code {error_code}'}")
            elif hasattr(futures, 'items'):
                # Older API: futures is a dict
                for topic, future in futures.items():
                    try:
                        future.result()  # Wait for creation
                        logger.info(f"Created topic: {topic}")
                    except TopicAlreadyExistsError:
                        logger.info(f"Topic already exists: {topic} (will be reused)")
                    except Exception as e:
                        logger.warning(f"Topic creation issue for {topic}: {e}")
            else:
                # Fallback: assume topics were created or already exist
                for topic_obj in topics_to_create:
                    logger.info(f"Topic ready: {topic_obj.name}")
                    
        except TopicAlreadyExistsError as e:
            # All topics already exist - this is fine, just log and continue
            logger.info(f"Topics already exist, will reuse them: {e}")
        except Exception as e:
            # Check if it's a TopicAlreadyExistsError in the exception message
            if "TopicAlreadyExistsError" in str(e) or "already exists" in str(e).lower():
                logger.info("Some topics already exist, will reuse them")
            else:
                logger.error(f"Error creating topics: {e}")
                raise
        
        # Wait a bit and verify topics are actually available
        # This ensures consumers can connect immediately
        import time
        time.sleep(2)
        logger.debug("Topics created, verifying availability...")

    def create_kafka_topics_bulk(self, table_to_partitions: Dict[str, int], replication_factor: int = 1) -> None:
        """
        Create multiple Kafka topics in one admin call, each with its own partition count.
        This method does not sleep after creation; callers may choose to wait once.
        """
        if not table_to_partitions:
            return
        # Discover existing topics once
        try:
            existing = set(self.admin_client.list_topics())
        except Exception:
            existing = set()
        topics_to_create = []
        for table, parts in table_to_partitions.items():
            parts = max(1, int(parts or 1))
            topic_name = f"{self.topic_prefix}_{table}"
            self._created_topics.add(topic_name)
            if topic_name in existing:
                logger.info(f"Topic exists, deleting before recreate: {topic_name}")
                try:
                    self.delete_topics([topic_name], ignore_errors=False)
                    # Wait a bit more to ensure topic is fully removed from metadata
                    import time
                    time.sleep(0.5)
                except Exception as e:
                    logger.warning(f"Error deleting existing topic {topic_name}: {e}")
            topics_to_create.append(NewTopic(name=topic_name, num_partitions=parts, replication_factor=replication_factor))
        if not topics_to_create:
            return
        try:
            futures = self.admin_client.create_topics(topics_to_create)
            if hasattr(futures, 'topic_errors'):
                # Check for topics marked for deletion and retry if needed
                topics_to_retry = []
                for topic_error in futures.topic_errors:
                    topic_name = topic_error[0]
                    error_code = topic_error[1]
                    error_message = topic_error[2] if len(topic_error) > 2 else None
                    if error_code == 0:
                        logger.info(f"Created topic: {topic_name}")
                    elif error_code == 36:  # TopicAlreadyExistsError
                        # Check if it's marked for deletion
                        if error_message and "marked for deletion" in error_message.lower():
                            logger.warning(f"Topic {topic_name} is marked for deletion, will wait and retry")
                            topics_to_retry.append(topic_name)
                        else:
                            logger.info(f"Topic already exists: {topic_name} (will be reused)")
                    else:
                        logger.warning(f"Topic creation issue for {topic_name}: {error_message or f'Error code {error_code}'}")
                
                # Retry creation for topics marked for deletion
                if topics_to_retry:
                    import time
                    logger.info(f"Waiting 5 seconds for topics to finish deletion: {topics_to_retry}")
                    time.sleep(5)
                    # Retry creating these topics - extract table name from topic name to get partition count
                    retry_topics = []
                    for topic_name in topics_to_retry:
                        # Extract table name from topic name (remove prefix)
                        table_name = topic_name.replace(f"{self.topic_prefix}_", "")
                        partitions = table_to_partitions.get(table_name, 1)
                        retry_topics.append(NewTopic(name=topic_name, num_partitions=partitions, replication_factor=replication_factor))
                    
                    try:
                        retry_futures = self.admin_client.create_topics(retry_topics)
                        if hasattr(retry_futures, 'topic_errors'):
                            for topic_error in retry_futures.topic_errors:
                                topic_name = topic_error[0]
                                error_code = topic_error[1]
                                error_message = topic_error[2] if len(topic_error) > 2 else None
                                if error_code == 0:
                                    logger.info(f"Created topic after retry: {topic_name}")
                                elif error_code == 36:
                                    logger.info(f"Topic still exists after retry: {topic_name} (will be reused)")
                                else:
                                    logger.warning(f"Topic creation still failed for {topic_name}: {error_message or f'Error code {error_code}'}")
                    except Exception as retry_e:
                        logger.warning(f"Error retrying topic creation: {retry_e}")
            elif hasattr(futures, 'items'):
                for topic, future in futures.items():
                    try:
                        future.result()
                        logger.info(f"Created topic: {topic}")
                    except TopicAlreadyExistsError:
                        logger.info(f"Topic already exists: {topic} (will be reused)")
                    except Exception as e:
                        logger.warning(f"Topic creation issue for {topic}: {e}")
            else:
                for topic_obj in topics_to_create:
                    logger.info(f"Topic ready: {topic_obj.name}")
        except TopicAlreadyExistsError as e:
            logger.info(f"Topics already exist, will reuse them: {e}")
        except Exception as e:
            if "TopicAlreadyExistsError" in str(e) or "already exists" in str(e).lower():
                logger.info("Some topics already exist, will reuse them")
            else:
                logger.error(f"Error creating topics: {e}")
                raise
    
    def send_table_messages_async(
        self, 
        table_name: str, 
        insert_statements: List[str], 
        wait_for_send: bool = False,
        create_topic: bool = True,
        flush_immediately: bool = True,  # For streaming: set to False to reduce flush frequency
    ) -> int:
        """
        Send INSERT batches for a single table asynchronously (for per-table pipeline mode).
        
        Args:
            table_name: Table name
            insert_statements: INSERT statements for this table
            wait_for_send: If True, wait for each send to complete
            create_topic: If True, create topic for this table
            flush_immediately: If True, flush after sending (default). If False, skip flush for streaming mode.
            
        Returns:
            Number of batches sent
        """
        topic_name = f"{self.topic_prefix}_{table_name}"
        
        # Create topic if needed
        if create_topic:
            self.create_kafka_topics([table_name])
            self._created_topics.add(topic_name)
            # Pre-warm metadata immediately after topic creation with retry
            self._ensure_metadata_ready(topic_name, max_retries=3, retry_delay=1.0)
        
        # True stream processing: process INSERT statements one by one, send immediately
        # This allows consumers to start processing while we're still parsing more INSERTs
        futures = []
        sent_count = 0
        failed_batches = []
        split_count = 0
        total_rows_sent = 0
        
        # Send each INSERT statement as a single message (no re-batching)
        # This preserves the original INSERT statement structure
        
        # Use None as key to enable round-robin partition assignment
        # This allows messages to be distributed evenly across all partitions
        # enabling parallel processing by multiple consumers
        batch_id = 0
        
        # Stream processing: send each message immediately for better latency
        # This allows consumers to start processing while producer is still sending
        
        # Process each INSERT statement and send immediately
        for stmt in insert_statements:
            try:
                # Quick size check first (avoid creating objects if not needed)
                # Cache encoding result to avoid repeated encoding
                stmt_bytes = stmt.encode('utf-8')
                stmt_size = len(stmt_bytes)
                
                # Skip row count calculation for small messages (optimization)
                # Only calculate if needed for size checking or splitting
                # For small messages, use quick estimate: assume at least 1 row
                row_count = 1  # Default estimate for small messages
                needs_row_count = stmt_size > self.max_message_size * 0.9
                
                if needs_row_count:
                    # Only calculate row count if message might be large
                    # Optimization: Cache upper() result
                    stmt_upper = stmt.upper()
                    values_idx = stmt_upper.find('VALUES')
                    if values_idx != -1:
                        values_part = stmt[values_idx + 6:]
                        row_count = values_part.count('(')  # Quick estimate
                        if row_count == 0:
                            row_count = 1  # At least 1 row
                    else:
                        row_count = 1  # Fallback
                
                # Only create batch object and serialize if needed
                if stmt_size > self.max_message_size * 0.9:  # 90% threshold - need full check
                    # Build dictionary directly (avoid InsertBatch object creation)
                    batch_dict = {
                        'table_name': table_name,
                        'batch_id': batch_id,
                        'total_batches': 0,
                        'sql_statements': [stmt],
                        'row_count': row_count,
                    }
                    batch_id += 1
                    # Serialize to check size
                    serialized = json.dumps(batch_dict, ensure_ascii=False).encode('utf-8')
                    message_size = len(serialized)
                else:
                    # Small message: estimate size without serialization
                    message_size = stmt_size + 200  # Add overhead estimate (JSON structure + metadata)
                    batch_dict = None  # Will create on demand
                
                # Check if message exceeds max size (for large messages that needed full check)
                if message_size > self.max_message_size:
                    # Need to create batch object for splitting
                    if batch_dict is None:
                        batch_dict = {
                            'table_name': table_name,
                            'batch_id': batch_id,
                            'total_batches': 0,
                            'sql_statements': [stmt],
                            'row_count': row_count,
                        }
                    
                    split_count += 1
                    logger.warning(
                        f"INSERT statement {batch_id} for table '{table_name}' is too large: "
                        f"{message_size} bytes ({message_size / 1024 / 1024:.2f} MB) > "
                        f"{self.max_message_size} bytes ({self.max_message_size / 1024 / 1024:.2f} MB). "
                        f"Splitting into smaller batches..."
                    )
                    
                    rows_per_batch = max(1, int(row_count * self.max_message_size / message_size * 0.9))
                    if rows_per_batch < 10:
                        rows_per_batch = 10
                    
                    # Create temporary InsertBatch for splitting (needed for _split_large_batch)
                    temp_batch = InsertBatch(
                        table_name=table_name,
                        batch_id=batch_id,
                        total_batches=0,
                        sql_statements=[stmt],
                        row_count=row_count,
                    )
                    
                    split_batches = self._split_large_batch(
                        table_name, 
                        stmt, 
                        row_count,
                        rows_per_batch,
                        batch_id_start=batch_id
                    )
                    
                    batch_id += 1
                    
                    # Send the split batches
                    for sub_batch in split_batches:
                        try:
                            # Ensure metadata is ready for first batch (helps avoid metadata update timeout)
                            if topic_name not in self._metadata_warmed_topics:
                                self._ensure_metadata_ready(topic_name, max_retries=3, retry_delay=1.0)
                            
                            future = self.producer.send(
                                topic_name,
                                key=None,  # No key - enables round-robin partition distribution
                                value=sub_batch.to_dict(),
                            )
                            if wait_for_send:
                                future.get(timeout=30)
                                sent_count += 1
                            else:
                                futures.append((future, sub_batch.batch_id))
                                sent_count += 1
                            total_rows_sent += sub_batch.row_count
                        except Exception as e:
                            logger.error(
                                f"Error sending split batch for table '{table_name}': {e}"
                            )
                            failed_batches.append(sub_batch.batch_id)
                            if wait_for_send:
                                raise
                else:
                    # Message size is OK - send immediately (streaming mode)
                    # Build dictionary directly (avoid InsertBatch object creation)
                    if batch_dict is None:
                        batch_dict = {
                            'table_name': table_name,
                            'batch_id': batch_id,
                            'total_batches': 0,
                            'sql_statements': [stmt],
                            'row_count': row_count,
                        }
                    
                    # Send immediately for streaming processing
                    try:
                        # Ensure metadata is ready before sending first message to each topic
                        # This is critical to avoid metadata update timeout errors
                        if topic_name not in self._metadata_warmed_topics:
                            metadata_ready = self._ensure_metadata_ready(topic_name, max_retries=3, retry_delay=1.0)
                            if not metadata_ready:
                                # If metadata is still not ready after retries, log warning but continue
                                # The send() call will still try to update metadata, but may timeout
                                logger.warning(
                                    f"Metadata not ready for topic '{topic_name}' after retries. "
                                    f"Sending anyway, but may timeout if metadata update is needed."
                                )
                        
                        future = self.producer.send(
                            topic_name,
                            key=None,  # No key - enables round-robin partition distribution
                            value=batch_dict,  # Directly use dictionary
                        )
                        if wait_for_send:
                            future.get(timeout=30)
                            sent_count += 1
                        else:
                            futures.append((future, batch_id))
                            sent_count += 1
                        total_rows_sent += row_count
                    except Exception as e:
                        logger.error(f"Error sending message {batch_id} for table '{table_name}': {e}")
                        failed_batches.append(batch_id)
                        if wait_for_send:
                            raise
                    
                    batch_id += 1
                        
            except Exception as e:
                error_msg = str(e)
                if "MessageSizeTooLarge" in error_msg or "Message too large" in error_msg:
                    # Need to split - calculate row_count if not already done
                    split_count += 1
                    logger.error(
                        f"MessageSizeTooLargeError for INSERT statement {batch_id} of table '{table_name}'. "
                        f"Trying smaller batch..."
                    )
                    try:
                        # Calculate row_count if not already done
                        if row_count <= 1:
                            stmt_upper = stmt.upper()
                            values_idx = stmt_upper.find('VALUES')
                            row_count = stmt[values_idx + 6:].count('(') if values_idx != -1 else 1
                            if row_count == 0:
                                row_count = 1
                        
                        # Create temporary InsertBatch for splitting
                        temp_batch = InsertBatch(
                            table_name=table_name,
                            batch_id=batch_id,
                            total_batches=0,
                            sql_statements=[stmt],
                            row_count=row_count,
                        )
                        
                        split_batches = self._split_large_batch(
                            table_name,
                            stmt,
                            row_count,
                            max(10, row_count // 10),
                            batch_id_start=batch_id
                        )
                        
                        batch_id += 1
                        for sub_batch in split_batches:
                            # Ensure metadata is ready for first batch (helps avoid metadata update timeout)
                            if topic_name not in self._metadata_warmed_topics:
                                self._ensure_metadata_ready(topic_name, max_retries=3, retry_delay=1.0)
                            
                            future = self.producer.send(
                                topic_name,
                                key=None,  # No key - enables round-robin partition distribution
                                value=sub_batch.to_dict(),
                            )
                            if wait_for_send:
                                future.get(timeout=30)
                                sent_count += 1
                            else:
                                futures.append((future, sub_batch.batch_id))
                                sent_count += 1
                            total_rows_sent += sub_batch.row_count
                    except Exception as split_error:
                        logger.error(
                            f"Failed to send even split batches for table '{table_name}': {split_error}"
                        )
                        failed_batches.append(batch_id)
                        if wait_for_send:
                            raise
                else:
                    # For errors, track batch_id
                    logger.error(f"Error sending INSERT statement {batch_id} for table {table_name}: {e}")
                    failed_batches.append(batch_id)
                    if wait_for_send:
                        raise
                    # Increment batch_id even on error to maintain consistency
                    batch_id += 1
        
        if not wait_for_send and futures:
            # For async mode, don't wait for all futures individually
            # Flush only if requested (for streaming mode, caller can flush less frequently)
            if flush_immediately:
                # Use non-blocking flush to avoid blocking file reading
                # Messages will be sent in background, allowing file reading to continue
                try:
                    self.flush_producer(timeout=3.0, blocking=False)  # Non-blocking flush
                except Exception as e:
                    logger.debug(f"Non-blocking flush started in background: {e}")
            
            # Optionally check for errors in background (non-blocking)
            # Only log errors, don't block on them
            # For streaming mode with flush_immediately=False, skip background check
            # since messages are sent asynchronously and consumers are already processing
            if not flush_immediately:
                # Streaming mode: skip background check to avoid false timeout warnings
                # Messages are already in Kafka producer buffer and will be sent asynchronously
                # Consumers are processing data, so delivery is working correctly
                pass
            else:
                # For non-streaming mode, check futures in background with reasonable timeout
                import threading
                def check_futures():
                    failed_count = 0
                    # Use fixed timeout per future (2 minutes) instead of scaling with count
                    # This prevents excessive timeouts for large tables
                    # Each future gets 2 minutes, which is reasonable for most messages
                    timeout_per_future = 120  # 2 minutes per future
                    for future, batch_id in futures:
                        try:
                            # Check each future with fixed timeout
                            future.get(timeout=timeout_per_future)
                        except Exception as e:
                            # Only log if it's a real error (not just timeout on large messages)
                            error_msg = str(e)
                            if "Timeout" in error_msg:
                                # For timeout errors, check if message was actually sent
                                # Kafka producer may have sent it but future.get timed out
                                # In streaming mode, this is usually not a problem
                                logger.debug(
                                    f"Timeout checking batch {batch_id} for table '{table_name}' "
                                    f"(message may still be delivered). Error: {e}"
                                )
                            else:
                                logger.error(f"Failed to send batch {batch_id} for table '{table_name}': {e}")
                            failed_count += 1
                            if batch_id not in failed_batches:
                                failed_batches.append(batch_id)
                    if failed_count > 0 and failed_count < len(futures):
                        # Only warn if some (but not all) failed, as partial failures are concerning
                        logger.warning(f"Failed to send {failed_count} out of {len(futures)} batches for table '{table_name}'")
                
                # Start error checking in background thread (non-blocking)
                # For streaming mode, we don't wait for this thread - it's just for error reporting
                error_check_thread = threading.Thread(target=check_futures, daemon=True)
                error_check_thread.start()
        elif wait_for_send:
            # For sync mode, wait for all sends to complete
            failed_count = 0
            # Use fixed timeout per future (2 minutes) instead of scaling with count
            # This prevents excessive timeouts for large tables
            timeout_per_future = 120  # 2 minutes per future
            for future, batch_id in futures:
                try:
                    future.get(timeout=timeout_per_future)  # Wait with fixed timeout
                except Exception as e:
                    error_msg = str(e)
                    if "Timeout" in error_msg:
                        logger.warning(
                            f"Timeout waiting for batch {batch_id} for table '{table_name}' "
                            f"(message may still be delivered). Error: {e}"
                        )
                    else:
                        logger.error(f"Failed to send batch {batch_id} for table '{table_name}': {e}")
                    failed_count += 1
                    if batch_id not in failed_batches:
                        failed_batches.append(batch_id)
            # Flush for sync mode too
            self.producer.flush()
        
        if failed_batches:
            logger.warning(f"Failed to send {len(failed_batches)} batches for table '{table_name}': {failed_batches}")
        
        if split_count > 0:
            logger.info(
                f"Sent {sent_count} batches for table '{table_name}' to topic '{topic_name}' "
                f"({split_count} batches were split due to size limits)"
            )
        else:
            logger.info(f"Sent {sent_count} batches for table '{table_name}' to topic '{topic_name}' (stream mode)")

        # Record rows sent for this table
        prev = self.rows_sent_per_table.get(table_name, 0)
        self.rows_sent_per_table[table_name] = prev + total_rows_sent
        
        return sent_count
    
    # Batch/pipeline mode APIs removed; streaming mode (send_table_messages_async) is the only supported mode.
    
    def flush_producer(self, timeout: float = 3.0, blocking: bool = True):
        """
        Flush all pending messages (call after async sends).
        
        Args:
            timeout: Maximum time to wait for flush to complete (seconds). Default 3.0s.
                     Only used if blocking=True. If timeout is reached, log warning but continue.
            blocking: If True, wait for flush to complete (with timeout). If False, start flush
                     in background and return immediately without waiting (fully non-blocking).
        
        Returns:
            float: Flush duration in seconds (0.0 if non-blocking, actual duration if blocking)
        """
        import time
        flush_start_time = time.time()
        
        try:
            # Use timeout to prevent blocking for too long
            # Kafka producer flush() doesn't support timeout directly, but we can use a thread
            import threading
            flush_completed = threading.Event()
            flush_error = [None]
            flush_duration = [0.0]  # Track actual flush duration
            
            def do_flush():
                try:
                    flush_start = time.time()
                    self.producer.flush()
                    flush_duration[0] = time.time() - flush_start
                    flush_completed.set()
                except Exception as e:
                    flush_error[0] = e
                    flush_completed.set()
            
            flush_thread = threading.Thread(target=do_flush, daemon=True)
            flush_thread.start()
            
            if not blocking:
                # Non-blocking mode: return immediately, flush continues in background
                logger.debug("Flush started in background (non-blocking mode)")
                return 0.0
            
            # Blocking mode: wait for flush with timeout
            if flush_completed.wait(timeout=timeout):
                actual_duration = flush_duration[0] if flush_duration[0] > 0 else (time.time() - flush_start_time)
                if flush_error[0]:
                    raise flush_error[0]
                # Log flush duration for network monitoring
                if actual_duration > 1.0:  # Log if flush took more than 1 second
                    logger.warning(
                        f"Flush operation took {actual_duration:.2f}s (timeout: {timeout}s). "
                        "This may indicate network latency or high message volume."
                    )
                else:
                    logger.debug(f"Flush completed in {actual_duration:.2f}s")
                return actual_duration
            else:
                # Timeout - log warning but don't block
                # Use actual timeout value in warning message
                elapsed = time.time() - flush_start_time
                logger.warning(
                    f"Flush operation timed out after {timeout}s (elapsed: {elapsed:.2f}s). "
                    "Messages may still be in producer buffer. This may indicate network issues. Continuing..."
                )
                return elapsed
        except Exception as e:
            elapsed = time.time() - flush_start_time
            logger.warning(f"Error during flush (elapsed: {elapsed:.2f}s): {e}. Continuing...")
            return elapsed
       
    
    def close(self):
        """Close producer connections."""
        if self.producer:
            self.producer.close()
        if self.admin_client:
            self.admin_client.close()

