from typing import Optional
import argparse
import sys
import os
import json
from pathlib import Path

from .config import build_config_from_sources
from .pipeline import run_pipeline
from .kafka_controller import KafkaMigrationController
from .direct_executor import DirectMigrationExecutor
import logging

logger = logging.getLogger(__name__)


def _build_arg_parser() -> argparse.ArgumentParser:
    parser = argparse.ArgumentParser(
        prog="kwdb-migrate",
        description="MySQL dump (.sql) to KWDB relational engine converter",
    )
    
    subparsers = parser.add_subparsers(dest="command", help="Available commands")
    
    # Convert command
    convert_parser = subparsers.add_parser("convert", help="Convert MySQL dump to KWDB SQL")
    convert_parser.add_argument("-f", "--file", required=True, help="Path to MySQL dump .sql")
    convert_parser.add_argument("--host", default=None)
    convert_parser.add_argument("--port", type=int, default=None)
    convert_parser.add_argument("--user", default=None)
    convert_parser.add_argument("--password", default=None)
    convert_parser.add_argument("--database", default=None, help="Target database override")
    convert_parser.add_argument("--on-error", choices=["stop", "skip"], default="stop")
    convert_parser.add_argument(
        "--config", default=None, help="Optional path to YAML/JSON config file"
    )
    convert_parser.add_argument(
        "--out", default=None, help="Write transformed KWDB SQL to this file"
    )
    convert_parser.add_argument(
        "--dialect-read", default="mysql", help="Input dialect (default: mysql)"
    )
    
    # Kafka migration command
    kafka_parser = subparsers.add_parser(
        "kafka-migrate",
        help="Execute migration with Kafka-based parallel INSERT (from MySQL file or converted SQL files)"
    )
    # MySQL file mode (new)
    kafka_parser.add_argument("--mysql-file", help="Path to MySQL dump file (converts and executes in memory)")
    kafka_parser.add_argument("--on-error", choices=["stop", "skip"], default="stop", help="Error handling mode for conversion")
    kafka_parser.add_argument("--dialect-read", default="mysql", help="Input dialect (default: mysql)")
    # Converted files mode (old)
    kafka_parser.add_argument("--ddl-file", help="Path to DDL SQL file (schema) - required if --mysql-file not provided")
    kafka_parser.add_argument("--insert-file", help="Path to INSERT SQL file (data) - required if --mysql-file not provided")
    kafka_parser.add_argument("--constraints-file", help="Path to constraints SQL file - required if --mysql-file not provided")
    kafka_parser.add_argument(
        "--db-connection-string",
        default=None,
        help="PostgreSQL connection string (takes precedence over individual db params). "
             "Format: postgresql://user:password@host:port/database?sslmode=disable"
    )
    kafka_parser.add_argument("--db-host", default=None, help="KWDB host (ignored if --db-connection-string provided)")
    kafka_parser.add_argument("--db-port", type=int, default=None, help="KWDB port (ignored if --db-connection-string provided)")
    kafka_parser.add_argument("--db-user", default=None, help="KWDB user (ignored if --db-connection-string provided)")
    kafka_parser.add_argument("--db-password", default=None, help="KWDB password (ignored if --db-connection-string provided)")
    kafka_parser.add_argument("--db-database", default=None, help="KWDB database (ignored if --db-connection-string provided)")
    kafka_parser.add_argument("--kafka-servers", default="127.0.0.1:9092", help="Kafka broker addresses (comma-separated, default: 127.0.0.1:9092)")
    kafka_parser.add_argument("--kafka-topic-prefix", default="sql_migration_insert", help="Kafka topic prefix")
    # Validation option (kept as controller internal validation, not standalone CLI command)
    kafka_parser.add_argument(
        "--validate",
        action="store_true",
        help="Run validation (structure + counts) after migration and write TXT report"
    )
    kafka_parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
    kafka_parser.add_argument(
        "--no-transaction",
        action="store_true",
        help="Disable transaction mode (execute stages separately without rollback on error). Default: use transaction (DDL + Constraints in transaction, INSERT with independent transactions)"
    )
    
    # Direct migration command (MySQL file → Convert → Execute)
    migrate_parser = subparsers.add_parser(
        "migrate",
        help="Convert MySQL file to KWDB and execute directly (no intermediate files)"
    )
    # Accept new --mysql-file for consistency with kafka-migrate; keep -f/--file as aliases
    # Also support pre-converted files mode: --ddl-file/--insert-file/--constraints-file
    migrate_parser.add_argument("-f", "--file", "--mysql-file", dest="mysql_file", required=False, help="Path to MySQL dump file")
    migrate_parser.add_argument("--ddl-file", help="Path to DDL SQL file (schema) - required if --mysql-file not provided")
    migrate_parser.add_argument("--insert-file", help="Path to INSERT SQL file (data) - required if --mysql-file not provided")
    migrate_parser.add_argument("--constraints-file", help="Path to constraints SQL file - optional in some cases")
    migrate_parser.add_argument(
        "--db-connection-string",
        default=None,
        help="PostgreSQL connection string (takes precedence over individual db params). "
             "Format: postgresql://user:password@host:port/database?sslmode=disable"
    )
    migrate_parser.add_argument("--db-host", default=None, help="KWDB host (ignored if --db-connection-string provided)")
    migrate_parser.add_argument("--db-port", type=int, default=None, help="KWDB port (ignored if --db-connection-string provided)")
    migrate_parser.add_argument("--db-user", default=None, help="KWDB user (ignored if --db-connection-string provided)")
    migrate_parser.add_argument("--db-password", default=None, help="KWDB password (ignored if --db-connection-string provided)")
    migrate_parser.add_argument("--db-database", default=None, help="KWDB database (ignored if --db-connection-string provided)")
    migrate_parser.add_argument("--on-error", choices=["stop", "skip"], default="stop", help="Error handling mode")
    migrate_parser.add_argument("--dialect-read", default="mysql", help="Input dialect (default: mysql)")
    migrate_parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
    migrate_parser.add_argument(
        "--validate",
        action="store_true",
        help="Run validation (structure + counts) after migration and print report"
    )
    
    return parser


def main(argv: Optional[list[str]] = None) -> int:
    parser = _build_arg_parser()
    args = parser.parse_args(argv)

    if args.command == "convert":
        return _handle_convert_command(args)
    elif args.command == "kafka-migrate":
        return _handle_kafka_migrate_command(args)
    elif args.command == "migrate":
        return _handle_migrate_command(args)
    else:
        parser.print_help()
        return 1


def _handle_convert_command(args) -> int:
    """Handle the convert command."""
    import time
    
    # Record total start time
    total_start_time = time.time()
    
    input_path = Path(args.file)
    if not input_path.exists() or not input_path.is_file():
        print(f"Error: Input file not found: {input_path}")
        return 1

    print(f"Starting conversion of {input_path.name}...")
    print(f"Input file size: {input_path.stat().st_size / (1024 * 1024):.2f} MB")
    
    cfg_start_time = time.time()
    cfg = build_config_from_sources(
        cli_args=vars(args),
        env=os.environ if hasattr(sys, "__dict__") else {},
        config_path=args.config,
    )
    cfg_time = time.time() - cfg_start_time

    conversion_start_time = time.time()
    result = run_pipeline(cfg)
    conversion_time = time.time() - conversion_start_time

    write_start_time = time.time()
    if args.out:
        out_path = Path(args.out)
        out_dir = out_path.parent
        base = out_path.stem
        ddl_path = out_dir / f"{base}_schema.sql"
        dml_path = out_dir / f"{base}_data.sql"
        cons_path = out_dir / f"{base}_constraints.sql"

        # Use streaming write to avoid loading entire content into memory
        with open(ddl_path, 'w', encoding='utf-8') as f:
            f.write(result.ddl_sql)
        # For DML, check if we have a file path (streaming mode) or string content
        if result.dml_file_path and result.dml_file_path.exists():
            # Copy from temporary file using streaming
            with open(result.dml_file_path, 'r', encoding='utf-8') as src:
                with open(dml_path, 'w', encoding='utf-8') as dst:
                    chunk_size = 1024 * 1024  # 1MB chunks
                    while True:
                        chunk = src.read(chunk_size)
                        if not chunk:
                            break
                        dst.write(chunk)
        else:
            # Write string content directly
            with open(dml_path, 'w', encoding='utf-8') as f:
                f.write(result.dml_sql)
        with open(cons_path, 'w', encoding='utf-8') as f:
            f.write(result.constraints_sql)

        print(f"\nConverted SQL written to:")
        print(f"  - Schema (DDL): {ddl_path}")
        print(f"  - Data (DML):   {dml_path}")
        print(f"  - Constraints:  {cons_path}")
    else:
        # If no out file specified, print combined SQL to stdout
        # Use streaming to avoid loading entire file into memory
        if result.dml_file_path and result.dml_file_path.exists():
            # Stream DML from file to avoid memory issues
            sys.stdout.write(result.ddl_sql + "\n")
            with open(result.dml_file_path, 'r', encoding='utf-8') as f:
                chunk_size = 1024 * 1024  # 1MB chunks
                while True:
                    chunk = f.read(chunk_size)
                    if not chunk:
                        break
                    sys.stdout.write(chunk)
            sys.stdout.write("\n" + result.constraints_sql)
        else:
            # For small files, use output_sql directly
            sys.stdout.write(result.output_sql)
    write_time = time.time() - write_start_time
    
    total_time = time.time() - total_start_time
    
    # Print timing summary
    print("\n" + "="*60)
    print("Conversion Summary:")
    print("="*60)
    print(f"Configuration:  {cfg_time:.2f}s")
    print(f"Conversion:     {conversion_time:.2f}s")
    print(f"File writing:   {write_time:.2f}s")
    print("-"*60)
    print(f"Total time:     {total_time:.2f}s")
    print("="*60)
    
    if result.errors:
        print(f"\nErrors encountered: {len(result.errors)}")
        for error in result.errors:
            print(f"  - {error}")
    
    if result.warnings:
        print(f"\nWarnings: {len(result.warnings)}")
        for warning in result.warnings:
            print(f"  - {warning}")
    
    return 0 if not result.errors else 1


def _handle_kafka_migrate_command(args) -> int:
    """Handle the kafka-migrate command."""
    # Setup logging
    log_level = logging.DEBUG if args.verbose else logging.INFO
    logging.basicConfig(
        level=log_level,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S'
    )
    
    # Suppress verbose Kafka library logs - only show ERROR
    # Heartbeat warnings during rebalancing are normal and not actual errors
    kafka_loggers = [
        'kafka',
        'kafka.conn',
        'kafka.coordinator',
        'kafka.coordinator.consumer',
        'kafka.coordinator.heartbeat',  # Rebalancing heartbeats are normal
        'kafka.consumer',
        'kafka.consumer.fetcher',
        'kafka.consumer.subscription_state',
        'kafka.producer',
        'kafka.producer.record_accumulator',
        'kafka.cluster',
        'kafka.admin',
    ]
    for logger_name in kafka_loggers:
        kafka_logger = logging.getLogger(logger_name)
        if not args.verbose:
            # In non-verbose mode, suppress WARNING (rebalancing is normal) and only show ERROR
            kafka_logger.setLevel(logging.ERROR)
        else:
            # In verbose mode, show INFO but still reduce some noise
            kafka_logger.setLevel(logging.INFO)
    
    import time
    
    # Start total timing at the beginning (includes conversion if applicable)
    total_start_time = time.time()
    
    # Determine mode: MySQL file or converted files
    mysql_file = args.mysql_file
    ddl_file = args.ddl_file
    insert_file = args.insert_file
    constraints_file = args.constraints_file
    
    # Initialize temp_insert_file to None - will be set if using mysql_file mode
    temp_insert_file = None
    
    if mysql_file:
        # New mode: Convert from MySQL file
        mysql_path = Path(mysql_file)
        if not mysql_path.exists():
            print(f"Error: MySQL file not found: {mysql_path}")
            return 1
        
        print("Starting conversion from MySQL to KWDB SQL...")
        conversion_start = time.time()
        
        # Import conversion dependencies
        from .config import MigratorConfig
        from .pipeline import run_pipeline
        
        # Create config for conversion
        cfg = MigratorConfig(
            input_sql_path=mysql_path,
            output_sql_path=None,  # No output file
            on_error=args.on_error,
            dialect_read=args.dialect_read,
        )
        
        # Run conversion pipeline
        pipeline_result = run_pipeline(cfg)
        conversion_time = time.time() - conversion_start
        
        if pipeline_result.errors:
            print(f"Conversion errors: {pipeline_result.errors}")
            if args.on_error == "stop":
                return 1
        
        print(f"✓ Conversion completed in {conversion_time:.2f}s")
        # Expose conversion time and source path so controller/validation can include them
        os.environ['KWDB_MIGRATE_CONVERSION_TIME'] = f"{conversion_time:.2f}"
        os.environ['KWDB_MIGRATE_SOURCE_SQL_PATH'] = str(mysql_path)
        
        # Extract SQL from pipeline result
        ddl_sql = pipeline_result.ddl_sql
        insert_sql = pipeline_result.dml_sql
        constraints_sql = pipeline_result.constraints_sql
        
        # Check if pipeline already created a temporary file for INSERT statements
        # (happens when file is large > 100MB)
        if pipeline_result.dml_file_path and pipeline_result.dml_file_path.exists():
            # Pipeline already created a temporary file for streaming
            temp_insert_file = pipeline_result.dml_file_path
            print(f"Using pipeline-created INSERT file: {temp_insert_file} ({temp_insert_file.stat().st_size / 1024 / 1024:.1f}MB)")
        elif insert_sql and insert_sql.strip():
            # Write insert_sql to temporary file for streaming processing
            # This avoids loading large INSERT statements into memory
            import tempfile
            temp_insert_fd, temp_insert_path = tempfile.mkstemp(suffix='.sql', prefix='inserts_', text=True)
            temp_insert_file = Path(temp_insert_path)
            try:
                with open(temp_insert_fd, 'w', encoding='utf-8') as f:
                    f.write(insert_sql)
                print(f"Created temporary INSERT file: {temp_insert_file} ({len(insert_sql) / 1024 / 1024:.1f}MB)")
            except Exception as e:
                print(f"Error: Failed to create temporary INSERT file: {e}")
                if temp_insert_file and temp_insert_file.exists():
                    try:
                        temp_insert_file.unlink()
                    except Exception:
                        pass
                temp_insert_file = None
        
    else:
        # Old mode: Use converted files
        if not ddl_file or not insert_file or not constraints_file:
            print("Error: Either --mysql-file must be provided, or all three --ddl-file, --insert-file, --constraints-file must be provided")
            return 1
        
        ddl_path = Path(ddl_file)
        insert_path = Path(insert_file)
        constraints_path = Path(constraints_file)
        
        # Validate files exist
        for file_path, name in [(ddl_path, "DDL"), (insert_path, "INSERT"), (constraints_path, "Constraints")]:
            if not file_path.exists():
                print(f"Error: {name} file not found: {file_path}")
                return 1
        
        # Read SQL from files using streaming to avoid loading entire files into memory
        def read_file_streaming(file_path: Path) -> str:
            """Read file in chunks to avoid loading entire file into memory."""
            chunks = []
            with open(file_path, 'r', encoding='utf-8') as f:
                chunk_size = 1024 * 1024  # 1MB chunks
                while True:
                    chunk = f.read(chunk_size)
                    if not chunk:
                        break
                    chunks.append(chunk)
            return ''.join(chunks)
        
        # For large files, use file paths instead of loading into memory
        # Read DDL and constraints (usually small), but pass INSERT file path for streaming
        ddl_sql = read_file_streaming(ddl_path)
        # Don't read insert_sql for large files - pass file path instead for streaming
        insert_sql = ""  # Empty, will use insert_file_path for streaming
        constraints_sql = read_file_streaming(constraints_path) if constraints_path.exists() else ""
        conversion_time = 0.0  # No conversion in this mode
        # Ensure previous runs don't leak env into report
        if 'KWDB_MIGRATE_CONVERSION_TIME' in os.environ:
            del os.environ['KWDB_MIGRATE_CONVERSION_TIME']
        if 'KWDB_MIGRATE_SOURCE_SQL_PATH' in os.environ:
            del os.environ['KWDB_MIGRATE_SOURCE_SQL_PATH']
    
    # Parse Kafka servers (default: 127.0.0.1:9092)
    kafka_servers = [s.strip() for s in args.kafka_servers.split(',')]
    
    try:
        controller = KafkaMigrationController(
            db_connection_string=args.db_connection_string,
            db_host=args.db_host,
            db_port=args.db_port,
            db_user=args.db_user,
            db_password=args.db_password,
            db_database=args.db_database,
            kafka_servers=kafka_servers,
            kafka_topic_prefix=args.kafka_topic_prefix,
            # Use default values for smart partitioning and consumer management:
            # max_message_size=134217728 (128MB)
            # max_parallel_consumers=None (auto)
            # max_retries=3
            # topic_partitions=4 (smart partitioning)
            # consumers_per_table=2 (smart consumer management)
            # db_min_connections=2, db_max_connections=20 (auto-scaled based on consumers)
        )
        # Pass validation options via environment (consumed inside controller)
        if bool(getattr(args, 'validate', False)):
            # Always run counts+structure and always write TXT report
            os.environ['KWDB_MIGRATE_VALIDATE'] = 'counts,structure'
            os.environ['KWDB_MIGRATE_REPORT'] = '1'
            # If using separate KWDB SQL files, pass file paths for validation
            if not mysql_file:
                os.environ['KWDB_MIGRATE_DDL_FILE'] = str(ddl_path)
                os.environ['KWDB_MIGRATE_INSERT_FILE'] = str(insert_path)
                if constraints_path.exists():
                    os.environ['KWDB_MIGRATE_CONSTRAINTS_FILE'] = str(constraints_path)
        else:
            # Ensure no stale env affects run
            os.environ.pop('KWDB_MIGRATE_VALIDATE', None)
            os.environ.pop('KWDB_MIGRATE_REPORT', None)
            os.environ.pop('KWDB_MIGRATE_DDL_FILE', None)
            os.environ.pop('KWDB_MIGRATE_INSERT_FILE', None)
            os.environ.pop('KWDB_MIGRATE_CONSTRAINTS_FILE', None)
        
        print("Starting Kafka-based migration...")
        # Pass insert_file_path for streaming
        insert_file_path_for_streaming = None
        if mysql_file and temp_insert_file:
            # When using --mysql-file, use temporary file for streaming
            insert_file_path_for_streaming = temp_insert_file
            insert_sql = ""  # Clear insert_sql to force file mode
        elif insert_file and not mysql_file:
            # When using --insert-file (not --mysql-file), pass file path for streaming
            insert_file_path_for_streaming = insert_path
        
        try:
            results = controller.execute_migration_from_sql(
                ddl_sql=ddl_sql,
                insert_sql=insert_sql,
                constraints_sql=constraints_sql,
                use_transaction=not getattr(args, 'no_transaction', False),  # Default: True (use transaction)
                insert_file_path=insert_file_path_for_streaming,  # Pass file path for streaming
            )
        finally:
            # Clean up temporary INSERT file if created
            if temp_insert_file and temp_insert_file.exists():
                try:
                    temp_insert_file.unlink()
                    print(f"Deleted temporary INSERT file: {temp_insert_file}")
                except Exception as e:
                    print(f"Warning: Failed to delete temporary INSERT file {temp_insert_file}: {e}")
        
        # Compute derived total time = conversion + DDL + INSERT + constraints (exclude validation/report)
        insert_exec_time = results.get('insert_exec_time', 0.0)
        ddl_time = results.get('ddl_time', 0.0)
        constraints_time = results.get('constraints_time', 0.0)
        derived_total_time = float(conversion_time or 0.0) + float(ddl_time or 0.0) + float(insert_exec_time or 0.0) + float(constraints_time or 0.0)
        
        # Print summary
        print("\n" + "="*60)
        print("Migration Summary:")
        print("="*60)
        
        if conversion_time > 0:
            print(f"Conversion:")
            print(f"  - Time: {conversion_time:.2f}s")
            print()
        
        print(f"DDL Execution:")
        print(f"  - Status: {'✓' if results['ddl_executed'] else '✗'}")
        ddl_stmt_count = len([s for s in ddl_sql.split(';') if s.strip()])
        print(f"  - Statements: {ddl_stmt_count}")
        print(f"  - Time: {ddl_time:.2f}s")
        print()
        
        print(f"Data (INSERT) Execution:")
        print(f"  - Sent to Kafka: {'✓' if results['inserts_sent'] else '✗'}")
        print(f"  - Completed: {'✓' if results['inserts_completed'] else '✗'}")
        if results.get('consumer_stats'):
            total_rows = sum(stats['rows_inserted'] for stats in results['consumer_stats'].values())
            total_batches = sum(stats['batches_processed'] for stats in results['consumer_stats'].values())
            print(f"  - Rows inserted: {total_rows}")
            print(f"  - Batches processed: {total_batches}")
            print(f"  - Execution time: {insert_exec_time:.2f}s")
        if results.get('producer_stats'):
            print(f"  - Tables: {len(results['producer_stats'])}")
            for table, batches in results['producer_stats'].items():
                print(f"    - {table}: {batches} batches sent")
        print()
        
        # Show message size splitting information if available in logs
        # This would be logged by kafka_producer during execution
        
        print(f"Constraints Execution:")
        print(f"  - Status: {'✓' if results['constraints_executed'] else '✗'}")
        constraints_stmt_count = len([s for s in constraints_sql.split(';') if s.strip()])
        print(f"  - Statements: {constraints_stmt_count}")
        constraints_time = results.get('constraints_time', 0.0)
        print(f"  - Time: {constraints_time:.2f}s")
        print()
        
        print(f"{'-'*60}")
        print(f"Total Time: {derived_total_time:.2f}s")
        print("="*60)
        
        if results.get('errors'):
            print(f"\nErrors:")
            for error in results['errors']:
                print(f"  - {error}")
            return 1
        
        # Cleanup is already handled in execute_migration's finally block
        # controller.close() is idempotent, but not needed since cleanup already happened
        return 0
        
    except KeyboardInterrupt:
        print("\nMigration interrupted by user")
        # Cleanup is handled by signal handler and finally block
        return 1
    except Exception as e:
        print(f"Migration failed: {e}")
        if args.verbose:
            import traceback
            traceback.print_exc()
        # Cleanup is handled in execute_migration's finally block
        return 1
    finally:
        # Ensure cleanup happens even if controller creation failed or exception occurred
        # Note: execute_migration's finally block already handles cleanup, but we ensure it here too
        if 'controller' in locals():
            try:
                # close() is idempotent - safe to call multiple times
                controller.close()
            except Exception as e:
                # Ignore cleanup errors - they're just warnings
                pass


def _handle_migrate_command(args) -> int:
    """Handle the migrate command (MySQL → Convert → Execute in memory)."""
    # Setup logging
    log_level = logging.DEBUG if args.verbose else logging.INFO
    logging.basicConfig(
        level=log_level,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S'
    )
    
    # Two modes:
    # 1) --mysql-file provided: convert then execute (original behavior)
    # 2) --ddl-file/--insert-file/--constraints-file provided: execute directly without conversion
    mysql_file_path = getattr(args, 'mysql_file', None)
    use_converted_files = (not mysql_file_path) and (args.ddl_file and args.insert_file)
    
    if not mysql_file_path and not use_converted_files:
        print("Error: Either --mysql-file must be provided, or --ddl-file and --insert-file must be provided")
        return 1
    
    executor = None
    try:
        executor = DirectMigrationExecutor(
            connection_string=args.db_connection_string,
            host=args.db_host,
            port=args.db_port,
            user=args.db_user,
            password=args.db_password,
            database=args.db_database,
            on_error=args.on_error,
        )
        results = {}
        if mysql_file_path:
            mysql_file = Path(mysql_file_path)
            if not mysql_file.exists():
                print(f"Error: MySQL file not found: {mysql_file}")
                return 1
            print("Starting direct migration (MySQL → KWDB conversion → execution)...")
            results = executor.execute_from_mysql_file(
                mysql_file=mysql_file,
                dialect_read=args.dialect_read,
            )
        else:
            # Execute from converted files directly
            from time import time as _now
            print("Starting direct migration from converted SQL files (DDL/INSERT/Constraints)...")
            results = {
                "conversion_time": 0.0,
                "schema_time": 0.0,
                "data_time": 0.0,
                "constraints_time": 0.0,
                "total_time": 0.0,
                "schema_statements": 0,
                "data_statements": 0,
                "constraints_statements": 0,
                "conversion_warnings": [],
                "errors": [],
            }
            total_start = _now()
            
            # Check file sizes to determine if we should use streaming execution
            # For files larger than 100MB, use streaming to avoid OOM
            ddl_path = Path(args.ddl_file)
            insert_path = Path(args.insert_file)
            constraints_path = Path(args.constraints_file) if args.constraints_file else None
            
            ddl_size = ddl_path.stat().st_size if ddl_path.exists() else 0
            insert_size = insert_path.stat().st_size if insert_path.exists() else 0
            constraints_size = constraints_path.stat().st_size if constraints_path and constraints_path.exists() else 0
            
            use_streaming_threshold = 100 * 1024 * 1024  # 100MB
            
            # Execute DDL (usually small, but use streaming if large)
            if ddl_size > 0:
                t0 = _now()
                if ddl_size > use_streaming_threshold:
                    st, sc = executor.execute_sql_file_streaming(ddl_path, "Schema (DDL)")
                else:
                    # For small files, read into memory (backward compatible)
                    with open(ddl_path, 'r', encoding='utf-8') as f:
                        ddl_sql = f.read()
                    st, sc = executor.execute_sql_text(ddl_sql, "Schema (DDL)")
                results["schema_time"] = st
                results["schema_statements"] = sc
            
            # Execute INSERT (often large, use streaming by default for files > 100MB)
            if insert_size > 0:
                t0 = _now()
                if insert_size > use_streaming_threshold:
                    st, sc = executor.execute_sql_file_streaming(insert_path, "Data (INSERT)")
                    results["data_time"] = st
                    results["data_statements"] = sc  # Count is returned by execute_sql_file_streaming
                else:
                    # For small files, read into memory
                    with open(insert_path, 'r', encoding='utf-8') as f:
                        insert_sql = f.read()
                    st, sc = executor.execute_sql_text(insert_sql, "Data (INSERT)")
                    results["data_time"] = st
                    # Count only INSERT statements (split by ');' to match actual statement endings)
                    results["data_statements"] = len([s for s in insert_sql.split(');') if s.strip() and s.strip().upper().startswith('INSERT')])
            
            # Execute Constraints (usually small, but use streaming if large)
            if constraints_path and constraints_size > 0:
                t0 = _now()
                if constraints_size > use_streaming_threshold:
                    st, sc = executor.execute_sql_file_streaming(constraints_path, "Constraints (Indexes)")
                else:
                    # For small files, read into memory
                    with open(constraints_path, 'r', encoding='utf-8') as f:
                        constraints_sql = f.read()
                    st, sc = executor.execute_sql_text(constraints_sql, "Constraints (Indexes)")
                results["constraints_time"] = st
                results["constraints_statements"] = sc
            results["total_time"] = _now() - total_start
        
        # Print detailed summary
        print("\n" + "="*60)
        print("Migration Summary:")
        print("="*60)
        if mysql_file_path:
            print(f"Conversion:")
            print(f"  - Time: {results['conversion_time']:.2f}s")
            if results.get('conversion_warnings'):
                print(f"  - Warnings: {len(results['conversion_warnings'])}")
        
        print(f"\nSchema (DDL):")
        print(f"  - Time: {results['schema_time']:.2f}s")
        print(f"  - Statements: {results['schema_statements']}")
        
        print(f"\nData (INSERT):")
        print(f"  - Time: {results['data_time']:.2f}s")
        print(f"  - Statements: {results['data_statements']}")
        
        print(f"\nConstraints:")
        print(f"  - Time: {results['constraints_time']:.2f}s")
        print(f"  - Statements: {results['constraints_statements']}")
        
        print(f"\n{'-'*60}")
        print(f"Total Time: {results['total_time']:.2f}s")
        print(f"="*60)

        # Optional post-migration validation (counts + structure) against source
        if args.validate:
            try:
                from sqlmigration.db_executor import KWDBExecutor
                from sqlmigration.validation import (
                    parse_mysql_source,
                    parse_kwdb_source,
                    validate_counts_against_source,
                    validate_structure_against_source,
                    render_report_text,
                )
                vexec = KWDBExecutor(
                    connection_string=args.db_connection_string,
                    min_connections=1,
                    max_connections=2,
                )
                
                # Parse source metadata based on input type
                def read_file_streaming(file_path: Path, errors: str = 'strict') -> str:
                    """Read file in chunks to avoid loading entire file into memory."""
                    chunks = []
                    with open(file_path, 'r', encoding='utf-8', errors=errors) as f:
                        chunk_size = 1024 * 1024  # 1MB chunks
                        while True:
                            chunk = f.read(chunk_size)
                            if not chunk:
                                break
                            chunks.append(chunk)
                    return ''.join(chunks)
                
                if mysql_file_path:
                    # Parse from MySQL file using streaming
                    src_text = read_file_streaming(mysql_file, errors='ignore')
                    src_meta = parse_mysql_source(src_text)
                else:
                    # Parse from KWDB SQL files using streaming
                    ddl_sql = read_file_streaming(Path(args.ddl_file))
                    insert_sql = read_file_streaming(Path(args.insert_file))
                    constraints_sql = read_file_streaming(Path(args.constraints_file)) if args.constraints_file else ""
                    src_meta = parse_kwdb_source(ddl_sql, insert_sql, constraints_sql)
                
                rpt_counts = validate_counts_against_source(vexec, src_meta)
                rpt_struct = validate_structure_against_source(vexec, src_meta)
                vexec.close()
                timings = {
                    'conversion_time': results.get('conversion_time', 0.0),
                    'ddl_time': results.get('schema_time', 0.0),
                    'insert_exec_time': results.get('data_time', 0.0),
                    'constraints_time': results.get('constraints_time', 0.0),
                    'total_time': results.get('total_time', 0.0),
                }
                report = render_report_text([rpt_counts, rpt_struct], timings=timings)
                print("\n[VALIDATION] Report (counts + structure):\n" + report)
                # Also write TXT report to Migration_Report/<db>_<timestamp>.txt (align with kafka-migrate)
                try:
                    import os
                    os.makedirs('Migration_Report', exist_ok=True)
                    from .db_executor import parse_connection_string
                    try:
                        db_name = parse_connection_string(args.db_connection_string or "").get('database', 'database')
                    except Exception:
                        db_name = 'database'
                    import time as _t
                    ts = _t.strftime('%Y%m%d_%H%M%S', _t.localtime())
                    fname = f"{db_name}_{ts}.txt"
                    out_path = os.path.join(os.getcwd(), 'Migration_Report', fname)
                    # Use streaming write (though report files are typically small, this ensures consistency)
                    with open(out_path, 'w', encoding='utf-8') as f:
                        # For small files, write directly is fine, but we use chunked write for consistency
                        chunk_size = 1024 * 1024  # 1MB chunks
                        for i in range(0, len(report), chunk_size):
                            f.write(report[i:i + chunk_size])
                    print(f"[VALIDATION] TXT report written to {out_path}")
                except Exception:
                    pass
                if not (rpt_counts.ok and rpt_struct.ok):
                    return 1
            except Exception as e:
                print(f"Validation failed: {e}")
                import traceback
                traceback.print_exc()
                return 1
        
        if results.get('errors'):
            print(f"\nErrors:")
            for error in results['errors']:
                print(f"  - {error}")
            return 1
        
        executor.close()
        return 0
        
    except KeyboardInterrupt:
        print("\nMigration interrupted by user")
        return 1
    except Exception as e:
        print(f"Migration failed: {e}")
        if args.verbose:
            import traceback
            traceback.print_exc()
        return 1
    finally:
        if executor:
            try:
                executor.close()
            except:
                pass


if __name__ == "__main__":  # pragma: no cover
    raise SystemExit(main())



