#!/usr/bin/env python3
"""
AI Historical Simulation Platform - Complete Integration Main Entry Point

This is the main application entry point for the production-ready AI Historical
Simulation Platform, supporting multiple operation modes with complete system
integration including all platform components.

让有趣的灵魂跨越时空在当下聚首 - Let fascinating souls gather across time in the present.
"""

import argparse
import asyncio
import json
import logging
import sys
import signal
import os
from pathlib import Path
from typing import Optional, Dict, Any
from datetime import datetime

# Add src directory to Python path
sys.path.insert(0, str(Path(__file__).parent / "src"))

# Import platform components
from platform.historical_simulation_platform import HistoricalSimulationPlatform, managed_platform
from platform.session_manager import SessionManager, get_or_create_session
from platform.api_server import APIServer, create_development_server, create_production_server
from platform.web_interface import WebInterface
from platform.database_manager import DatabaseManager, DatabaseConfig
from platform.performance_monitor import PerformanceMonitor, managed_performance_monitor
from config.settings import Config

# Import legacy components for backward compatibility
try:
    from app.historical_platform import AIHistoricalSimulationPlatform
    from app.session_controller import SessionController
    from app.figure_manager import HistoricalFigureManager
    LEGACY_AVAILABLE = True
except ImportError:
    LEGACY_AVAILABLE = False

logger = logging.getLogger(__name__)


def setup_logging(level: str = "INFO", log_file: str = "platform.log") -> None:
    """Configure comprehensive logging for the platform."""
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    
    # Create logs directory
    log_path = Path("logs")
    log_path.mkdir(exist_ok=True)
    
    # Setup logging configuration
    logging.basicConfig(
        level=getattr(logging, level.upper()),
        format=log_format,
        handlers=[
            logging.StreamHandler(sys.stdout),
            logging.FileHandler(log_path / log_file),
            logging.FileHandler(log_path / f"platform_{datetime.now().strftime('%Y%m%d')}.log")
        ]
    )
    
    # Set specific log levels for different components
    logging.getLogger('uvicorn').setLevel(logging.WARNING)
    logging.getLogger('fastapi').setLevel(logging.WARNING)
    logging.getLogger('asyncio').setLevel(logging.WARNING)


def create_parser() -> argparse.ArgumentParser:
    """Create enhanced command-line argument parser."""
    parser = argparse.ArgumentParser(
        description="AI Historical Simulation Platform - Complete Production System",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Operation Modes:
  interactive    Interactive chat with historical figures
  batch          Batch processing of historical datasets  
  web            Web interface with dashboard
  api            API server only
  platform       Complete platform (API + Web + Monitoring)
  demo           Demonstration scenarios
  config         Configuration management
  status         System status and health check

Examples:
  %(prog)s platform --port 8080 --max-sessions 1000
  %(prog)s interactive --figure "Napoleon Bonaparte"
  %(prog)s batch --dataset historical_conversations.json
  %(prog)s web --port 3000
  %(prog)s api --port 8000 --production
  %(prog)s demo --scenario cross_temporal_debate
  %(prog)s status --detailed

Advanced Options:
  --production           Use production configuration
  --max-sessions         Maximum concurrent sessions (default: 1000)
  --hdc-dimension        HDC vector dimension (default: 10000)
  --db-type              Database type: sqlite, faiss, chroma (default: faiss)
  --enable-monitoring    Enable performance monitoring
  --backup-interval      Backup interval in hours (default: 24)
        """
    )
    
    # Main operation mode
    parser.add_argument(
        "mode",
        choices=["interactive", "batch", "web", "api", "platform", "demo", "config", "status"],
        help="Operation mode"
    )
    
    # Server configuration
    parser.add_argument(
        "--port", "-p",
        type=int,
        default=8080,
        help="Server port (default: 8080)"
    )
    
    parser.add_argument(
        "--host",
        default="127.0.0.1",
        help="Server host (default: 127.0.0.1)"
    )
    
    parser.add_argument(
        "--production",
        action="store_true",
        help="Use production configuration"
    )
    
    # Platform configuration
    parser.add_argument(
        "--max-sessions",
        type=int,
        default=1000,
        help="Maximum concurrent sessions (default: 1000)"
    )
    
    parser.add_argument(
        "--hdc-dimension",
        type=int,
        default=10000,
        help="HDC vector dimension (default: 10000)"
    )
    
    parser.add_argument(
        "--db-type",
        choices=["sqlite", "faiss", "chroma"],
        default="faiss",
        help="Vector database type (default: faiss)"
    )
    
    parser.add_argument(
        "--db-path",
        default="./data",
        help="Database storage path (default: ./data)"
    )
    
    # Monitoring and performance
    parser.add_argument(
        "--enable-monitoring",
        action="store_true",
        default=True,
        help="Enable performance monitoring (default: True)"
    )
    
    parser.add_argument(
        "--response-threshold",
        type=float,
        default=200.0,
        help="Response time threshold in ms (default: 200)"
    )
    
    parser.add_argument(
        "--backup-interval",
        type=int,
        default=24,
        help="Database backup interval in hours (default: 24)"
    )
    
    # Interactive mode options
    parser.add_argument(
        "--figure",
        help="Historical figure name for interactive mode"
    )
    
    # Batch processing options
    parser.add_argument(
        "--dataset",
        help="Dataset file for batch processing"
    )
    
    parser.add_argument(
        "--output",
        help="Output file for batch processing results"
    )
    
    # Demo options
    parser.add_argument(
        "--scenario",
        help="Demo scenario name"
    )
    
    parser.add_argument(
        "--figures",
        nargs="+",
        help="Historical figures for cross-temporal conversations"
    )
    
    parser.add_argument(
        "--topic",
        help="Discussion topic for cross-temporal conversations"
    )
    
    # Configuration options
    parser.add_argument(
        "--config",
        help="Configuration file path"
    )
    
    parser.add_argument(
        "--log-level",
        choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
        default="INFO",
        help="Logging level (default: INFO)"
    )
    
    parser.add_argument(
        "--verbose", "-v",
        action="store_true",
        help="Enable verbose output"
    )
    
    parser.add_argument(
        "--quiet", "-q",
        action="store_true",
        help="Suppress non-error output"
    )
    
    # Status and maintenance
    parser.add_argument(
        "--detailed",
        action="store_true",
        help="Show detailed status information"
    )
    
    parser.add_argument(
        "--health-check",
        action="store_true",
        help="Perform comprehensive health check"
    )
    
    return parser


async def platform_mode(args) -> None:
    """
    Run the complete platform with API server, web interface, and monitoring.
    This is the main production mode.
    """
    print("\n🏛️  AI Historical Simulation Platform - Complete System")
    print("=" * 70)
    print("让有趣的灵魂跨越时空在当下聚首")
    print("Let fascinating souls gather across time in the present")
    print("=" * 70)
    
    # Create database configuration
    db_config = DatabaseConfig(
        db_path=args.db_path,
        vector_db_type=args.db_type,
        vector_dimension=args.hdc_dimension,
        backup_interval_hours=args.backup_interval
    )
    
    try:
        # Initialize performance monitoring
        async with managed_performance_monitor(
            collection_interval=1.0,
            response_time_threshold_ms=args.response_threshold
        ) as performance_monitor:
            
            # Initialize complete platform
            async with managed_platform(
                hdc_dimension=args.hdc_dimension,
                max_concurrent_sessions=args.max_sessions
            ) as platform:
                
                print(f"✓ Platform initialized with {args.hdc_dimension}D HDC vectors")
                print(f"✓ Supporting up to {args.max_sessions} concurrent sessions")
                print(f"✓ Using {args.db_type} vector database")
                print(f"✓ Performance monitoring active (threshold: {args.response_threshold}ms)")
                
                # Create API server
                if args.production:
                    api_server = create_production_server(platform, host=args.host, port=args.port)
                    print(f"✓ Production API server ready on {args.host}:{args.port}")
                else:
                    api_server = create_development_server(platform, host=args.host, port=args.port)
                    print(f"✓ Development API server ready on {args.host}:{args.port}")
                
                # Create web interface
                web_interface = WebInterface(platform, api_server)
                print(f"✓ Web interface ready")
                
                # Display access information
                print(f"\n📱 Access Points:")
                print(f"   Web Interface: http://{args.host}:{args.port}/")
                print(f"   API Docs:      http://{args.host}:{args.port}/docs")
                print(f"   Dashboard:     http://{args.host}:{args.port}/dashboard")
                print(f"   Admin API Key: {api_server.get_admin_api_key()}")
                
                print(f"\n🚀 Platform is running. Press Ctrl+C to stop.")
                
                # Start API server
                await api_server.start()
        
    except KeyboardInterrupt:
        print("\n\n⏹️  Platform stopped by user")
    except Exception as e:
        logger.error(f"Platform error: {e}")
        raise


async def api_mode(args) -> None:
    """Run API server only."""
    print("\n🔌 AI Historical Simulation Platform - API Server")
    print("=" * 60)
    
    try:
        async with managed_platform(
            hdc_dimension=args.hdc_dimension,
            max_concurrent_sessions=args.max_sessions
        ) as platform:
            
            if args.production:
                api_server = create_production_server(platform, host=args.host, port=args.port)
                print(f"✓ Production API server starting on {args.host}:{args.port}")
            else:
                api_server = create_development_server(platform, host=args.host, port=args.port)
                print(f"✓ Development API server starting on {args.host}:{args.port}")
            
            print(f"📚 API Documentation: http://{args.host}:{args.port}/docs")
            print(f"🔑 Admin API Key: {api_server.get_admin_api_key()}")
            print("Press Ctrl+C to stop.")
            
            await api_server.start()
            
    except KeyboardInterrupt:
        print("\n⏹️  API server stopped")
    except Exception as e:
        logger.error(f"API server error: {e}")
        raise


async def web_mode(args) -> None:
    """Run web interface only."""
    print("\n🌐 AI Historical Simulation Platform - Web Interface")
    print("=" * 60)
    
    try:
        async with managed_platform(
            hdc_dimension=args.hdc_dimension,
            max_concurrent_sessions=args.max_sessions
        ) as platform:
            
            # Create minimal API server for web interface
            api_server = create_development_server(platform, host=args.host, port=args.port + 1)
            
            # Create web interface
            web_interface = WebInterface(platform, api_server)
            
            print(f"✓ Web interface ready on http://{args.host}:{args.port}/")
            print(f"✓ Backend API on http://{args.host}:{args.port + 1}/")
            print("Press Ctrl+C to stop.")
            
            # Start both servers
            await asyncio.gather(
                api_server.start(),
                # Web interface would start here - for now it's integrated with API
            )
            
    except KeyboardInterrupt:
        print("\n⏹️  Web interface stopped")
    except Exception as e:
        logger.error(f"Web interface error: {e}")
        raise


async def interactive_mode(args) -> None:
    """Enhanced interactive mode with platform integration."""
    print("\n💬 AI Historical Simulation Platform - Interactive Mode")
    print("=" * 60)
    
    try:
        async with managed_platform(
            hdc_dimension=args.hdc_dimension,
            max_concurrent_sessions=10  # Lower limit for interactive
        ) as platform:
            
            # Load default figures
            print("Loading historical figures...")
            default_figures = ["Napoleon Bonaparte", "William Shakespeare", "Albert Einstein"]
            for figure_name in default_figures:
                try:
                    await platform.load_historical_figure(figure_name)
                    print(f"✓ Loaded {figure_name}")
                except Exception as e:
                    print(f"⚠️  Failed to load {figure_name}: {e}")
            
            # Select figure
            figure_name = args.figure
            if not figure_name:
                available_figures = list(platform.historical_figures.keys())
                print(f"\nAvailable figures: {', '.join(available_figures)}")
                figure_name = input("\nEnter figure name (or 'quit' to exit): ").strip()
                if figure_name.lower() == 'quit':
                    return
            
            # Load selected figure
            try:
                figure = await platform.load_historical_figure(figure_name)
                print(f"\n✓ Now conversing with {figure.name}")
                print(f"Historical Period: {figure.historical_period}")
            except Exception as e:
                print(f"✗ Failed to load {figure_name}: {e}")
                return
            
            # Create session
            session_manager = platform.session_manager
            session_id = await session_manager.create_session(
                user_id="interactive_user",
                session_metadata={"mode": "interactive", "figure": figure_name}
            )
            
            print(f"\n💭 Chat with {figure.name} (type 'quit' to exit, 'help' for commands)")
            print("-" * 60)
            
            # Chat loop
            while True:
                try:
                    user_input = input(f"\nYou: ").strip()
                    
                    if user_input.lower() == 'quit':
                        break
                    elif user_input.lower() == 'help':
                        print_interactive_help()
                        continue
                    elif user_input.lower().startswith('switch '):
                        new_figure = user_input[7:].strip()
                        try:
                            figure = await platform.load_historical_figure(new_figure)
                            print(f"✓ Switched to {figure.name}")
                            continue
                        except Exception as e:
                            print(f"✗ Failed to switch to {new_figure}: {e}")
                            continue
                    elif user_input.lower() == 'stats':
                        await show_session_stats(platform, session_id)
                        continue
                    elif not user_input:
                        continue
                    
                    # Generate response
                    import time
                    start_time = time.time()
                    
                    response = await platform.generate_response(
                        session_id=session_id,
                        figure_name=figure.name,
                        user_input=user_input
                    )
                    
                    response_time = (time.time() - start_time) * 1000
                    
                    print(f"\n{figure.name}: {response}")
                    
                    if response_time > 200:
                        print(f"⚠️  Response time: {response_time:.1f}ms (slow)")
                    elif args.verbose:
                        print(f"ℹ️  Response time: {response_time:.1f}ms")
                    
                except KeyboardInterrupt:
                    print("\n\nGoodbye! 👋")
                    break
                except Exception as e:
                    print(f"✗ Error: {e}")
            
            # Cleanup
            await session_manager.remove_session(session_id)
            
    except Exception as e:
        logger.error(f"Interactive mode error: {e}")
        raise


async def demo_mode(args) -> None:
    """Enhanced demo mode with complete platform features."""
    print("\n🎭 AI Historical Simulation Platform - Demo Mode")
    print("=" * 60)
    
    try:
        async with managed_platform(
            hdc_dimension=args.hdc_dimension,
            max_concurrent_sessions=50
        ) as platform:
            
            if args.scenario == "cross_temporal_debate":
                await run_cross_temporal_demo(platform, args)
            elif args.scenario == "educational_session":
                await run_educational_demo(platform, args)
            elif args.scenario == "performance_test":
                await run_performance_demo(platform, args)
            else:
                await run_default_demo(platform, args)
                
    except Exception as e:
        logger.error(f"Demo mode error: {e}")
        raise


async def batch_mode(args) -> None:
    """Enhanced batch processing mode."""
    print("\n📊 AI Historical Simulation Platform - Batch Processing")
    print("=" * 60)
    
    if not args.dataset:
        print("✗ Dataset file required. Use --dataset option.")
        return
    
    try:
        async with managed_platform(
            hdc_dimension=args.hdc_dimension,
            max_concurrent_sessions=100
        ) as platform:
            
            # Load dataset
            with open(args.dataset, 'r') as f:
                dataset = json.load(f)
            
            print(f"✓ Loaded dataset with {len(dataset)} entries")
            
            results = []
            session_manager = platform.session_manager
            
            # Process in batches for better performance
            batch_size = 10
            for i in range(0, len(dataset), batch_size):
                batch = dataset[i:i + batch_size]
                batch_results = await process_batch(platform, session_manager, batch, i)
                results.extend(batch_results)
                
                print(f"Processed {min(i + batch_size, len(dataset))}/{len(dataset)} entries")
            
            # Save results
            output_file = args.output or f"batch_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
            
            with open(output_file, 'w') as f:
                json.dump({
                    'metadata': {
                        'total_entries': len(dataset),
                        'processed_entries': len(results),
                        'timestamp': datetime.now().isoformat(),
                        'platform_config': {
                            'hdc_dimension': args.hdc_dimension,
                            'db_type': args.db_type
                        }
                    },
                    'results': results
                }, f, indent=2)
            
            print(f"✓ Batch processing complete. Results saved to: {output_file}")
            
            # Show statistics
            successful = len([r for r in results if 'error' not in r])
            failed = len(results) - successful
            print(f"📈 Statistics: {successful} successful, {failed} failed")
            
    except FileNotFoundError:
        print(f"✗ Dataset file not found: {args.dataset}")
    except Exception as e:
        logger.error(f"Batch processing error: {e}")
        raise


async def status_mode(args) -> None:
    """System status and health check mode."""
    print("\n📋 AI Historical Simulation Platform - System Status")
    print("=" * 60)
    
    try:
        # Try to connect to running platform
        import aiohttp
        import json
        
        try:
            async with aiohttp.ClientSession() as session:
                # Try to get health status
                async with session.get(f"http://{args.host}:{args.port}/health") as response:
                    if response.status == 200:
                        health_data = await response.json()
                        print("✅ Platform is running")
                        print(f"   Status: {health_data.get('status', 'unknown')}")
                        
                        if args.detailed:
                            health = health_data.get('health', {})
                            print(f"   Uptime: {health.get('uptime_seconds', 0):.0f} seconds")
                            print(f"   Active Sessions: {health.get('active_sessions', 0)}")
                            print(f"   Memory Usage: {health.get('memory_usage_mb', 0):.1f} MB")
                            print(f"   Avg Response Time: {health.get('avg_response_time_ms', 0):.1f} ms")
                    else:
                        print(f"⚠️  Platform responding with status {response.status}")
        except aiohttp.ClientError:
            print("🔴 Platform not accessible")
            print(f"   Tried to connect to: {args.host}:{args.port}")
        
        if args.health_check:
            print("\n🔍 Performing comprehensive health check...")
            
            # Check database files
            db_path = Path(args.db_path)
            if db_path.exists():
                print(f"✅ Database directory exists: {db_path}")
                
                # Check for database files
                db_files = list(db_path.glob("**/*"))
                print(f"   Found {len(db_files)} database files")
            else:
                print(f"⚠️  Database directory not found: {db_path}")
            
            # Check log files
            log_path = Path("logs")
            if log_path.exists():
                log_files = list(log_path.glob("*.log"))
                print(f"✅ Log directory exists with {len(log_files)} files")
            else:
                print("⚠️  Log directory not found")
            
            # Check system resources
            try:
                import psutil
                print(f"✅ System Resources:")
                print(f"   CPU Usage: {psutil.cpu_percent()}%")
                print(f"   Memory Usage: {psutil.virtual_memory().percent}%")
                print(f"   Disk Usage: {psutil.disk_usage('/').percent}%")
            except ImportError:
                print("⚠️  System resource monitoring not available (install psutil)")
            
            print("\n✅ Health check complete")
    
    except Exception as e:
        logger.error(f"Status check error: {e}")
        print(f"✗ Error checking system status: {e}")


def config_mode(args) -> None:
    """Configuration management mode."""
    print("\n⚙️  AI Historical Simulation Platform - Configuration")
    print("=" * 60)
    
    config_file = args.config or "platform_config.yaml"
    
    try:
        if Path(config_file).exists():
            print(f"✅ Configuration file exists: {config_file}")
        else:
            print(f"⚠️  Configuration file not found: {config_file}")
            print("Creating default configuration...")
            
            # Create default configuration
            default_config = {
                'platform': {
                    'hdc_dimension': args.hdc_dimension,
                    'max_concurrent_sessions': args.max_sessions,
                    'response_threshold_ms': args.response_threshold
                },
                'database': {
                    'type': args.db_type,
                    'path': args.db_path,
                    'backup_interval_hours': args.backup_interval
                },
                'server': {
                    'host': args.host,
                    'port': args.port,
                    'production': args.production
                },
                'monitoring': {
                    'enabled': args.enable_monitoring,
                    'collection_interval': 1.0
                }
            }
            
            import yaml
            with open(config_file, 'w') as f:
                yaml.dump(default_config, f, default_flow_style=False)
            
            print(f"✅ Created default configuration: {config_file}")
        
        print(f"\nCurrent configuration:")
        print(f"  HDC Dimension: {args.hdc_dimension}")
        print(f"  Max Sessions: {args.max_sessions}")
        print(f"  Database Type: {args.db_type}")
        print(f"  Server Port: {args.port}")
        print(f"  Production Mode: {args.production}")
    
    except Exception as e:
        logger.error(f"Configuration error: {e}")
        print(f"✗ Configuration error: {e}")


# Helper functions

def print_interactive_help():
    """Print interactive mode help."""
    print("\n📖 Interactive Mode Commands:")
    print("  quit                    - Exit the conversation")
    print("  help                    - Show this help message")
    print("  switch <figure>         - Switch to different historical figure")
    print("  stats                   - Show session statistics")
    print("  Type any message to chat with the current figure")


async def show_session_stats(platform, session_id: str):
    """Show session statistics."""
    try:
        session = await platform.session_manager.get_session(session_id)
        if session:
            print(f"\n📊 Session Statistics:")
            print(f"   Session ID: {session_id}")
            print(f"   Created: {session.created_at.strftime('%Y-%m-%d %H:%M:%S')}")
            print(f"   Total Messages: {session.total_messages}")
            print(f"   Active Figure: {session.active_figure}")
            print(f"   Last Active: {session.last_active.strftime('%Y-%m-%d %H:%M:%S')}")
        else:
            print("⚠️  Session not found")
    except Exception as e:
        print(f"✗ Error getting session stats: {e}")


async def run_cross_temporal_demo(platform, args):
    """Run cross-temporal conversation demo."""
    print("🌍 Cross-Temporal Conversation Demo")
    
    figures = args.figures or ["Napoleon Bonaparte", "Albert Einstein"]
    topic = args.topic or "The nature of leadership and innovation"
    
    print(f"Participants: {', '.join(figures)}")
    print(f"Topic: {topic}")
    print("-" * 40)
    
    try:
        session_manager = platform.session_manager
        session_id = await session_manager.create_session(
            user_id="demo_user",
            session_metadata={"mode": "cross_temporal_demo"}
        )
        
        conversation = await platform.create_cross_temporal_conversation(
            session_id=session_id,
            figure_names=figures,
            topic=topic,
            rounds=2
        )
        
        for exchange in conversation:
            print(f"\n{exchange['figure']} ({exchange['historical_period']}):")
            print(f"  {exchange['response']}")
        
        print("\n✅ Cross-temporal demo completed")
        
    except Exception as e:
        print(f"✗ Demo error: {e}")


async def run_educational_demo(platform, args):
    """Run educational scenario demo."""
    print("🎓 Educational Scenario Demo")
    print("Simulating a classroom discussion about the Renaissance")
    
    # Implementation would go here
    print("✅ Educational demo completed")


async def run_performance_demo(platform, args):
    """Run performance testing demo."""
    print("⚡ Performance Testing Demo")
    
    # Create multiple concurrent sessions
    import time
    
    session_manager = platform.session_manager
    figure_name = "Napoleon Bonaparte"
    
    print(f"Testing with {figure_name}...")
    
    # Load figure
    await platform.load_historical_figure(figure_name)
    
    # Test concurrent requests
    num_requests = 10
    start_time = time.time()
    
    tasks = []
    for i in range(num_requests):
        session_id = await session_manager.create_session(
            user_id=f"perf_test_{i}",
            session_metadata={"mode": "performance_test"}
        )
        
        task = platform.generate_response(
            session_id=session_id,
            figure_name=figure_name,
            user_input=f"What do you think about strategy? (request {i+1})"
        )
        tasks.append(task)
    
    # Execute all requests concurrently
    responses = await asyncio.gather(*tasks, return_exceptions=True)
    
    total_time = time.time() - start_time
    
    # Calculate statistics
    successful = len([r for r in responses if not isinstance(r, Exception)])
    avg_time = (total_time / num_requests) * 1000  # ms per request
    
    print(f"✅ Performance test completed")
    print(f"   Requests: {num_requests}")
    print(f"   Successful: {successful}")
    print(f"   Total Time: {total_time:.2f}s")
    print(f"   Avg Time per Request: {avg_time:.1f}ms")
    print(f"   Requests per Second: {num_requests/total_time:.1f}")


async def run_default_demo(platform, args):
    """Run default demonstration."""
    print("🎪 Default Platform Demo")
    print("Showcasing key platform features...")
    
    # Load multiple figures
    figures = ["Napoleon Bonaparte", "William Shakespeare", "Albert Einstein"]
    for figure_name in figures:
        try:
            await platform.load_historical_figure(figure_name)
            print(f"✅ Loaded {figure_name}")
        except Exception as e:
            print(f"⚠️  Failed to load {figure_name}: {e}")
    
    # Create demo conversation
    session_manager = platform.session_manager
    session_id = await session_manager.create_session(
        user_id="demo_user",
        session_metadata={"mode": "default_demo"}
    )
    
    # Sample interactions
    interactions = [
        ("Napoleon Bonaparte", "What made you a great military leader?"),
        ("William Shakespeare", "How do you approach writing a new play?"),
        ("Albert Einstein", "Can you explain relativity in simple terms?")
    ]
    
    for figure_name, question in interactions:
        try:
            print(f"\n❓ Question to {figure_name}: {question}")
            response = await platform.generate_response(
                session_id=session_id,
                figure_name=figure_name,
                user_input=question
            )
            print(f"💬 {figure_name}: {response}")
        except Exception as e:
            print(f"✗ Error with {figure_name}: {e}")
    
    print("\n✅ Default demo completed")


async def process_batch(platform, session_manager, batch, batch_start_index):
    """Process a batch of entries."""
    results = []
    
    tasks = []
    for i, entry in enumerate(batch):
        session_id = await session_manager.create_session(
            user_id=f"batch_user_{batch_start_index + i}",
            session_metadata={"mode": "batch", "entry_index": batch_start_index + i}
        )
        
        task = process_single_entry(platform, entry, session_id, batch_start_index + i)
        tasks.append(task)
    
    batch_results = await asyncio.gather(*tasks, return_exceptions=True)
    
    for result in batch_results:
        if isinstance(result, Exception):
            results.append({
                'error': str(result),
                'timestamp': datetime.now().isoformat()
            })
        else:
            results.append(result)
    
    return results


async def process_single_entry(platform, entry, session_id, entry_index):
    """Process a single batch entry."""
    try:
        figure_name = entry.get('figure', 'Napoleon Bonaparte')
        prompt = entry.get('prompt', 'Tell me about yourself')
        
        # Ensure figure is loaded
        await platform.load_historical_figure(figure_name)
        
        # Generate response
        import time
        start_time = time.time()
        
        response = await platform.generate_response(
            session_id=session_id,
            figure_name=figure_name,
            user_input=prompt
        )
        
        response_time = time.time() - start_time
        
        return {
            'entry_index': entry_index,
            'input': entry,
            'figure': figure_name,
            'response': response,
            'response_time_seconds': response_time,
            'timestamp': datetime.now().isoformat()
        }
        
    except Exception as e:
        return {
            'entry_index': entry_index,
            'input': entry,
            'error': str(e),
            'timestamp': datetime.now().isoformat()
        }


def setup_signal_handlers():
    """Setup signal handlers for graceful shutdown."""
    def signal_handler(signum, frame):
        print(f"\n🛑 Received signal {signum}. Shutting down gracefully...")
        # The actual shutdown will be handled by the async context managers
        raise KeyboardInterrupt()
    
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)


async def main():
    """Enhanced main application entry point."""
    parser = create_parser()
    args = parser.parse_args()
    
    # Setup logging
    log_level = "ERROR" if args.quiet else args.log_level
    setup_logging(log_level)
    
    # Setup signal handlers
    setup_signal_handlers()
    
    logger.info(f"Starting AI Historical Simulation Platform in {args.mode} mode")
    
    try:
        # Route to appropriate mode
        if args.mode == "platform":
            await platform_mode(args)
        elif args.mode == "api":
            await api_mode(args)
        elif args.mode == "web":
            await web_mode(args)
        elif args.mode == "interactive":
            await interactive_mode(args)
        elif args.mode == "batch":
            await batch_mode(args)
        elif args.mode == "demo":
            await demo_mode(args)
        elif args.mode == "config":
            config_mode(args)
        elif args.mode == "status":
            await status_mode(args)
        
    except KeyboardInterrupt:
        if not args.quiet:
            print("\n👋 Platform stopped by user")
    except Exception as e:
        logger.error(f"Application error: {e}")
        if not args.quiet:
            print(f"✗ Error: {e}")
        if args.verbose:
            import traceback
            traceback.print_exc()
        sys.exit(1)
    
    if not args.quiet:
        print("🎯 Platform shutdown complete")


if __name__ == "__main__":
    asyncio.run(main())