"""
Kafka Consumer for parallel table insertion.
Handles consuming messages and executing batch inserts.
"""
from __future__ import annotations

import json
import logging
import time
import re
from dataclasses import dataclass
from typing import Dict, Optional, Callable, List
from concurrent.futures import ThreadPoolExecutor, as_completed
from threading import Lock, Event

from kafka import KafkaConsumer
from kafka.errors import KafkaError

logger = logging.getLogger(__name__)

# Prefer high-performance JSON if available
try:
    import orjson as _fastjson
except Exception:  # pragma: no cover
    _fastjson = None


@dataclass
class ConsumerStats:
    """Statistics for a consumer."""
    table_name: str
    batches_processed: int = 0
    rows_inserted: int = 0
    errors: int = 0
    start_time: Optional[float] = None
    end_time: Optional[float] = None
    
    @property
    def duration(self) -> float:
        if self.start_time and self.end_time:
            return self.end_time - self.start_time
        return 0.0


class KafkaInsertConsumer:
    """Consumer for processing INSERT batches from Kafka."""
    
    def __init__(
        self,
        bootstrap_servers: str | List[str] = "localhost:9092",
        topic_prefix: str = "sql_migration_insert",
        group_id: str = "sql_migration_consumers",
        auto_offset_reset: str = "earliest",  # Read all messages in fresh topics created for this run
        enable_auto_commit: bool = False,  # Manual commit for transaction control
        max_poll_records: int = 1000,  # Increased to 1000 for better throughput (more messages per poll)
        unique_group_id_suffix: Optional[str] = None,  # Optional unique suffix for consumer group
        consume_only_new: bool = False,  # Do not seek to end; avoid skipping messages from this run
    ):
        """
        Initialize Kafka consumer.
        
        Args:
            bootstrap_servers: Kafka broker addresses
            topic_prefix: Prefix for topic names
            group_id: Consumer group ID
            auto_offset_reset: Offset reset policy
            enable_auto_commit: Whether to auto-commit offsets
            max_poll_records: Maximum records per poll
        """
        self.bootstrap_servers = bootstrap_servers
        self.topic_prefix = topic_prefix
        self.group_id = group_id
        self.auto_offset_reset = auto_offset_reset
        self.enable_auto_commit = enable_auto_commit
        self.max_poll_records = max_poll_records
        self.unique_group_id_suffix = unique_group_id_suffix
        self.consume_only_new = consume_only_new
        
        self.stats_lock = Lock()
        self.stats: Dict[str, ConsumerStats] = {}
    
    def _create_consumer(self, table_name: str) -> KafkaConsumer:
        """Create a consumer for a specific table."""
        topic_name = f"{self.topic_prefix}_{table_name}"
        
        # Use unique consumer group ID to avoid consuming old messages from previous runs
        # If unique_group_id_suffix is provided, append it to ensure uniqueness
        # IMPORTANT: All consumers for the same table MUST use the SAME consumer group ID
        # to enable partition sharing. Each consumer will consume from different partitions,
        # avoiding duplicate message processing.
        if self.unique_group_id_suffix:
            consumer_group_id = f"{self.group_id}_{table_name}_{self.unique_group_id_suffix}"
        else:
            consumer_group_id = f"{self.group_id}_{table_name}"
        
        consumer = KafkaConsumer(
            topic_name,
            bootstrap_servers=self.bootstrap_servers,
            group_id=consumer_group_id,
            auto_offset_reset=self.auto_offset_reset,
            enable_auto_commit=self.enable_auto_commit,
            # Use fastest available deserializer; avoid extra UTF-8 decode
            value_deserializer=(_fastjson.loads if _fastjson else (lambda m: json.loads(m.decode('utf-8')))),
            # We don't use the key in consumers; skip decoding to save CPU
            key_deserializer=None,
            max_poll_records=self.max_poll_records,
            consumer_timeout_ms=300000,  # 5 minutes timeout (allows for long-running migrations)
            # Optimized fetch settings for better batching and throughput
            fetch_min_bytes=32768,  # Wait for at least 32KB of data (better batching)
            fetch_max_wait_ms=200,  # Max 200ms wait for more data (allows better batching without too much latency)
        )
        # If configured, move offsets to the end so we only consume messages sent after this run starts
        if self.consume_only_new:
            try:
                # Ensure assignment exists before seeking
                attempts = 0
                while not consumer.assignment() and attempts < 50:
                    consumer.poll(timeout_ms=100)
                    attempts += 1
                # Seek to end for all assigned partitions
                if consumer.assignment():
                    consumer.seek_to_end()
            except Exception:
                # Best-effort; continue even if seeking fails
                pass
        return consumer
    
    def process_table_messages(
        self,
        table_name: str,
        execute_sql: Callable[[str], None],
        max_retries: int = 3,
        batch_commit_size: int = 10,
        producer_done_event: Optional[Event] = None,
        error_event: Optional[Event] = None,
        on_error: Optional[Callable[[], None]] = None,
    ) -> ConsumerStats:
        """
        Process messages for a single table.
        
        Args:
            table_name: Name of the table to process
            execute_sql: Function to execute SQL statements (single SQL string)
            max_retries: Maximum number of retries on error
            batch_commit_size: Number of batches to process before committing offset
            
        Returns:
            ConsumerStats with processing statistics
        """
        # Start time will be set on first received message to avoid counting idle wait
        stats = ConsumerStats(table_name=table_name, start_time=None)
        
        consumer = None
        try:
            consumer = self._create_consumer(table_name)
            logger.info(f"Starting consumer for table '{table_name}'")
            
            batch_count = 0
            consecutive_empty_polls = 0
            # Wait times for streaming mode:
            # - Before receiving first message: wait up to 10 minutes (producer startup, file reading, and message delivery)
            #   This is important for large files where producer may take time to start sending messages
            # - After receiving messages: use smart exit - check if we've reached end of partition
            #   If at end, wait only 2 seconds to ensure no more messages arrive
            #   If not at end, wait up to 10 seconds for more messages
            max_empty_polls_before_message = 6000  # 10 minutes (6000 * 100ms = 600s = 10min)
            max_empty_polls_after_message_at_end = 20  # 2 seconds when at partition end (20 * 100ms = 2s)
            max_empty_polls_after_message_not_at_end = 20  # 2 seconds when not at end (20 * 100ms = 2s)
            received_at_least_one = False
            last_poll_at_end = False  # Track if last poll indicated we're at end of partition
            
            while True:
                # Check if error detected, exit immediately
                if error_event and error_event.is_set():
                    logger.warning(f"Error detected, stopping consumer for table '{table_name}'")
                    break
                
                try:
                    # Optimized poll timeout: 100ms for better batching while maintaining responsiveness
                    # This allows more messages to accumulate per poll, reducing overhead
                    message_pack = consumer.poll(timeout_ms=100)
                    
                    # Check if we're at the end of all assigned partitions
                    at_end = False
                    if received_at_least_one and not message_pack:
                        try:
                            # Check if we've reached the end of all assigned partitions
                            assignments = consumer.assignment()
                            if assignments:
                                # Get end offsets for all assigned partitions
                                from kafka import TopicPartition
                                end_offsets = consumer.end_offsets(assignments)
                                positions = {tp: consumer.position(tp) for tp in assignments}
                                # Check if all partitions are at their end
                                at_end = all(
                                    positions.get(tp, 0) >= end_offsets.get(tp, 0)
                                    for tp in assignments
                                )
                                last_poll_at_end = at_end
                        except Exception:
                            # If we can't check, assume we're not at end (safer)
                            at_end = False
                    
                    if not message_pack:
                        consecutive_empty_polls += 1
                        if received_at_least_one:
                            # After receiving messages, use smart exit based on partition end status
                            actual_wait_seconds = consecutive_empty_polls * 0.1  # 100ms per poll
                            if at_end or last_poll_at_end:
                                # At end of partition: wait only 2 seconds to ensure no more messages
                                if consecutive_empty_polls >= max_empty_polls_after_message_at_end:
                                    logger.debug(f"No more messages for table '{table_name}' (at partition end) after {actual_wait_seconds:.1f}s, exiting")
                                    break
                            else:
                                # Not at end: wait up to 10 seconds for more messages
                                if consecutive_empty_polls >= max_empty_polls_after_message_not_at_end:
                                    logger.debug(f"No more messages for table '{table_name}' (not at end) after {actual_wait_seconds:.1f}s, exiting")
                                    break
                        else:
                            # Before receiving first message, wait up to 10 minutes
                            actual_wait_seconds = consecutive_empty_polls * 0.1  # 100ms per poll
                            if consecutive_empty_polls >= max_empty_polls_before_message:
                                # When multiple consumers are used, some may not receive messages
                                # if partitions are already assigned to other consumers - this is normal
                                # Also, producer may take time to start sending messages for large files
                                logger.debug(
                                    f"No messages received for table '{table_name}' after "
                                    f"{actual_wait_seconds:.1f}s (this is normal when using multiple consumers per table or when producer is still reading large files)"
                                )
                                break
                        continue
                    
                    # Reset counter when we get messages
                    consecutive_empty_polls = 0
                    received_at_least_one = True
                    # Record actual processing start time at first receipt
                    if stats.start_time is None:
                        stats.start_time = time.time()
                    
                    # Hybrid batching: aggregate 10–20 messages (~15k–40k rows) per transaction for better throughput
                    batch_sql_list = []   # Collected SQL statements
                    batch_row_counts = [] # Row counts per message
                    batch_messages = []   # Messages in current batch
                    batch_size_for_commit = 15
                    
                    for topic_partition, messages in message_pack.items():
                        for message in messages:
                            try:
                                batch_data = message.value
                                # Extract batch information
                                sql_statements = batch_data.get('sql_statements', [])
                                row_count = int(batch_data.get('row_count', 0) or 0)
                                # Collect for batch
                                batch_sql_list.extend(sql_statements)
                                batch_row_counts.append(row_count)
                                batch_messages.append(message)
                                # Execute when reaching threshold
                                if len(batch_messages) >= batch_size_for_commit:
                                    try:
                                        db_executor = getattr(execute_sql, 'db_executor', None)
                                        if db_executor and hasattr(db_executor, 'execute_sql_batch'):
                                            db_executor.execute_sql_batch(batch_sql_list)
                                        else:
                                            for sql_stmt in batch_sql_list:
                                                execute_sql(sql_stmt)
                                        # Commit this batch
                                        consumer.commit()
                                        total_rows = sum(batch_row_counts)
                                        stats.rows_inserted += total_rows
                                        stats.batches_processed += len(batch_row_counts)
                                        batch_count += len(batch_row_counts)
                                        # Clear buffers
                                        batch_sql_list = []
                                        batch_row_counts = []
                                        batch_messages = []
                                    except Exception as e:
                                        msg = str(e)
                                        if ("建议修改" not in msg) and ("失败语句片段" not in msg):
                                            logger.error(f"Failed to execute batch for table {table_name}: {msg}")
                                        stats.errors += len(batch_row_counts) if batch_row_counts else 1
                                        if on_error:
                                            on_error()
                                        elif error_event:
                                            error_event.set()
                                        # Clear buffers on failure and re-raise to break outer loops
                                        batch_sql_list = []
                                        batch_row_counts = []
                                        batch_messages = []
                                        raise
                            except Exception as e:
                                logger.error(f"Error processing message for table {table_name}: {e}")
                                stats.errors += 1
                                if on_error:
                                    on_error()
                                elif error_event:
                                    error_event.set()
                                # Continue with other messages
                    
                    # Execute any remaining batched SQL statements
                    if batch_messages:
                        try:
                            db_executor = getattr(execute_sql, 'db_executor', None)
                            if db_executor and hasattr(db_executor, 'execute_sql_batch'):
                                db_executor.execute_sql_batch(batch_sql_list)
                            else:
                                for sql_stmt in batch_sql_list:
                                    execute_sql(sql_stmt)
                            consumer.commit()
                            total_rows = sum(batch_row_counts)
                            stats.rows_inserted += total_rows
                            stats.batches_processed += len(batch_row_counts)
                            batch_count += len(batch_row_counts)
                        except Exception as e:
                            msg = str(e)
                            if ("建议修改" not in msg) and ("失败语句片段" not in msg):
                                logger.error(f"Failed to execute remaining batch for table {table_name}: {msg}")
                            stats.errors += len(batch_row_counts)
                            if on_error:
                                on_error()
                            elif error_event:
                                error_event.set()
                        
                except Exception as e:
                    logger.error(f"Error polling messages for table {table_name}: {e}")
                    stats.errors += 1
                    # Trigger immediate cleanup first, then set error event
                    if on_error:
                        on_error()  # Trigger immediate cleanup (this will set error_event inside)
                    elif error_event:
                        error_event.set()
                    break
            
            stats.end_time = time.time()
            
            with self.stats_lock:
                self.stats[table_name] = stats
            
            logger.info(
                f"Completed processing table '{table_name}': "
                f"{stats.batches_processed} batches, {stats.rows_inserted} rows, "
                f"{stats.errors} errors, duration: {stats.duration:.2f}s"
            )
            
        except Exception as e:
            logger.error(f"Fatal error processing table '{table_name}': {e}")
            stats.errors += 1
            stats.end_time = time.time()
            # Trigger immediate cleanup first, then set error event
            if on_error:
                on_error()  # Trigger immediate cleanup (this will set error_event inside)
            elif error_event:
                error_event.set()
        finally:
            if consumer:
                consumer.close()
        
        return stats
    
    def process_messages_parallel(
        self,
        table_names: List[str],
        execute_sql: Callable[[str], None],
        max_workers: Optional[int] = None,
        max_retries: int = 3,
        batch_commit_size: int = 10,
    ) -> Dict[str, ConsumerStats]:
        """
        Process messages for multiple tables in parallel.
        
        Args:
            table_names: List of table names to process
            execute_sql: Function to execute SQL statements
            max_workers: Maximum number of parallel consumers (default: number of tables)
            max_retries: Maximum number of retries on error
            batch_commit_size: Number of batches to process before committing offset
            
        Returns:
            Dictionary mapping table names to their ConsumerStats
        """
        if max_workers is None:
            max_workers = min(len(table_names), 10)  # Cap at 10 parallel consumers
        
        logger.info(f"Starting parallel consumers for {len(table_names)} tables with {max_workers} workers")
        
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            futures = {
                executor.submit(
                    self.process_table_messages,
                    table_name,
                    execute_sql,
                    max_retries,
                    batch_commit_size,
                ): table_name
                for table_name in table_names
            }
            
            results: Dict[str, ConsumerStats] = {}
            
            for future in as_completed(futures):
                table_name = futures[future]
                try:
                    stats = future.result()
                    results[table_name] = stats
                except Exception as e:
                    logger.error(f"Error in parallel consumer for table '{table_name}': {e}")
                    results[table_name] = ConsumerStats(
                        table_name=table_name,
                        errors=1,
                        end_time=time.time(),
                    )
        
        # Print summary
        total_batches = sum(s.batches_processed for s in results.values())
        total_rows = sum(s.rows_inserted for s in results.values())
        total_errors = sum(s.errors for s in results.values())
        total_duration = max((s.duration for s in results.values()), default=0.0)
        
        logger.info(
            f"\n{'='*60}\n"
            f"Parallel Insert Summary:\n"
            f"  Tables processed: {len(results)}\n"
            f"  Total batches: {total_batches}\n"
            f"  Total rows: {total_rows}\n"
            f"  Total errors: {total_errors}\n"
            f"  Total duration: {total_duration:.2f}s\n"
            f"  Average throughput: {total_rows/total_duration if total_duration > 0 else 0:.0f} rows/s\n"
            f"{'='*60}"
        )
        
        return results


