#!/usr/bin/env python3
"""
AI Historical Simulation Platform - Main Application Entry Point

This module provides the command-line interface for the AI Historical Simulation Platform,
supporting various operation modes including interactive conversations with historical
figures, batch processing, and system management.
"""

import argparse
import asyncio
import sys
import os
import logging
from pathlib import Path
from typing import Optional, Dict, Any

# Add src directory to Python path
sys.path.insert(0, str(Path(__file__).parent))

from .app.historical_platform import AIHistoricalSimulationPlatform
from .app.session_controller import SessionController
from .app.figure_manager import HistoricalFigureManager
from .config.settings import Config
from .hdc.core import HDCOperations


def setup_logging(level: str = "INFO") -> None:
    """Configure logging for the application."""
    logging.basicConfig(
        level=getattr(logging, level.upper()),
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        handlers=[
            logging.StreamHandler(sys.stdout),
            logging.FileHandler('historical_platform.log')
        ]
    )


def create_parser() -> argparse.ArgumentParser:
    """Create the command-line argument parser."""
    parser = argparse.ArgumentParser(
        description="AI Historical Simulation Platform",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Examples:
  %(prog)s interactive --figure napoleon
  %(prog)s batch --dataset historical_figures.json
  %(prog)s web --port 8080
  %(prog)s demo --conversation napoleon_caesar
        """
    )
    
    parser.add_argument(
        "mode",
        choices=["interactive", "batch", "web", "demo", "config"],
        help="Operation mode"
    )
    
    parser.add_argument(
        "--figure",
        help="Historical figure name for interactive mode"
    )
    
    parser.add_argument(
        "--dataset",
        help="Dataset file for batch processing"
    )
    
    parser.add_argument(
        "--port",
        type=int,
        default=8080,
        help="Port for web interface (default: 8080)"
    )
    
    parser.add_argument(
        "--conversation",
        help="Demo conversation scenario"
    )
    
    parser.add_argument(
        "--config",
        help="Configuration file path"
    )
    
    parser.add_argument(
        "--dimension",
        type=int,
        default=10000,
        help="HDC vector dimension (default: 10000)"
    )
    
    parser.add_argument(
        "--log-level",
        choices=["DEBUG", "INFO", "WARNING", "ERROR"],
        default="INFO",
        help="Logging level"
    )
    
    parser.add_argument(
        "--output",
        help="Output file for batch processing results"
    )
    
    parser.add_argument(
        "--verbose", "-v",
        action="store_true",
        help="Enable verbose output"
    )
    
    return parser


async def interactive_mode(platform: AIHistoricalSimulationPlatform, 
                          figure_name: Optional[str] = None) -> None:
    """
    Run the interactive mode for real-time historical figure conversations.
    
    Args:
        platform: The AI Historical Simulation Platform instance
        figure_name: Optional specific figure to load
    """
    print("\n🏛️  AI Historical Simulation Platform - Interactive Mode")
    print("=" * 60)
    
    session_controller = SessionController(platform)
    
    if figure_name:
        try:
            figure = await platform.load_historical_figure(figure_name)
            print(f"✓ Loaded historical figure: {figure.name}")
        except Exception as e:
            print(f"✗ Failed to load figure '{figure_name}': {e}")
            return
    else:
        # Show available figures
        available_figures = platform.figure_manager.list_available_figures()
        print(f"\nAvailable historical figures: {', '.join(available_figures)}")
        
        figure_name = input("\nEnter figure name (or 'quit' to exit): ").strip()
        if figure_name.lower() == 'quit':
            return
        
        try:
            figure = await platform.load_historical_figure(figure_name)
            print(f"✓ Loaded historical figure: {figure.name}")
        except Exception as e:
            print(f"✗ Failed to load figure '{figure_name}': {e}")
            return
    
    print(f"\n💭 You are now conversing with {figure.name}")
    print("Type 'quit' to exit, 'help' for commands, or start chatting!")
    print("-" * 60)
    
    while True:
        try:
            user_input = input(f"\nYou: ").strip()
            
            if user_input.lower() == 'quit':
                break
            elif user_input.lower() == 'help':
                print_help_commands()
                continue
            elif user_input.lower().startswith('switch '):
                new_figure = user_input[7:].strip()
                try:
                    figure = await platform.load_historical_figure(new_figure)
                    print(f"✓ Switched to {figure.name}")
                    continue
                except Exception as e:
                    print(f"✗ Failed to switch to '{new_figure}': {e}")
                    continue
            elif not user_input:
                continue
            
            # Process user input and generate response
            response = await platform.generate_response(
                figure.personality_id, 
                user_input, 
                session_controller.get_current_session().session_id
            )
            
            print(f"{figure.name}: {response}")
            
        except KeyboardInterrupt:
            print("\n\nGoodbye! 👋")
            break
        except Exception as e:
            print(f"Error: {e}")


def print_help_commands():
    """Print available interactive commands."""
    print("\nAvailable commands:")
    print("  quit - Exit the conversation")
    print("  help - Show this help message")
    print("  switch <figure> - Switch to a different historical figure")


async def batch_mode(platform: AIHistoricalSimulationPlatform, 
                    dataset_path: str,
                    output_path: Optional[str] = None) -> None:
    """
    Run batch processing mode for processing historical datasets.
    
    Args:
        platform: The AI Historical Simulation Platform instance
        dataset_path: Path to the dataset file
        output_path: Optional output file for results
    """
    print("\n📊 AI Historical Simulation Platform - Batch Mode")
    print("=" * 60)
    
    try:
        import json
        
        # Load dataset
        with open(dataset_path, 'r') as f:
            dataset = json.load(f)
        
        print(f"✓ Loaded dataset with {len(dataset)} entries")
        
        results = []
        
        for i, entry in enumerate(dataset, 1):
            print(f"Processing entry {i}/{len(dataset)}: {entry.get('figure', 'Unknown')}")
            
            try:
                figure = await platform.load_historical_figure(entry['figure'])
                response = await platform.generate_response(
                    figure.personality_id,
                    entry['prompt'],
                    f"batch_session_{i}"
                )
                
                result = {
                    'input': entry,
                    'response': response,
                    'figure': figure.name,
                    'timestamp': platform.get_timestamp()
                }
                results.append(result)
                
            except Exception as e:
                print(f"✗ Error processing entry {i}: {e}")
                results.append({
                    'input': entry,
                    'error': str(e),
                    'timestamp': platform.get_timestamp()
                })
        
        # Save results
        output_file = output_path or f"batch_results_{platform.get_timestamp()}.json"
        with open(output_file, 'w') as f:
            json.dump(results, f, indent=2)
        
        print(f"✓ Batch processing complete. Results saved to: {output_file}")
        
    except FileNotFoundError:
        print(f"✗ Dataset file not found: {dataset_path}")
    except Exception as e:
        print(f"✗ Batch processing failed: {e}")


async def web_mode(platform: AIHistoricalSimulationPlatform, port: int) -> None:
    """
    Run the web interface mode.
    
    Args:
        platform: The AI Historical Simulation Platform instance
        port: Port number for the web server
    """
    print("\n🌐 AI Historical Simulation Platform - Web Mode")
    print("=" * 60)
    
    try:
        from web.app import create_web_app
        
        app = create_web_app(platform)
        
        print(f"🚀 Starting web server on port {port}")
        print(f"📱 Access the interface at: http://localhost:{port}")
        print("Press Ctrl+C to stop the server")
        
        # Start the web application
        import uvicorn
        await uvicorn.run(app, host="0.0.0.0", port=port)
        
    except ImportError as e:
        print(f"✗ Web dependencies not available: {e}")
        print("Install with: pip install fastapi uvicorn")
    except Exception as e:
        print(f"✗ Failed to start web server: {e}")


async def demo_mode(platform: AIHistoricalSimulationPlatform, 
                   conversation: Optional[str] = None) -> None:
    """
    Run demonstration scenarios.
    
    Args:
        platform: The AI Historical Simulation Platform instance
        conversation: Optional specific conversation scenario
    """
    print("\n🎭 AI Historical Simulation Platform - Demo Mode")
    print("=" * 60)
    
    from examples.demo_conversations import DemoConversations
    
    demo = DemoConversations(platform)
    
    if conversation:
        await demo.run_specific_conversation(conversation)
    else:
        # Show available demonstrations
        available_demos = demo.list_available_demos()
        print(f"\nAvailable demonstrations: {', '.join(available_demos)}")
        
        choice = input("\nSelect demo (or 'all' for all demos): ").strip()
        
        if choice.lower() == 'all':
            await demo.run_all_demos()
        else:
            await demo.run_specific_conversation(choice)


def config_mode(config_path: Optional[str] = None) -> None:
    """
    Configure the application settings.
    
    Args:
        config_path: Optional path to configuration file
    """
    print("\n⚙️  AI Historical Simulation Platform - Configuration")
    print("=" * 60)
    
    try:
        if config_path and os.path.exists(config_path):
            config = Config(config_path)
            print(f"✓ Loaded configuration from: {config_path}")
        else:
            config = Config()
            print("✓ Using default configuration")
        
        print(f"\nCurrent configuration:")
        print(f"  HDC Dimension: {config.hdc.dimension}")
        print(f"  Similarity Threshold: {config.hdc.similarity_threshold}")
        print(f"  Memory Cache Size: {config.memory.cache_size}")
        print(f"  Personality Accuracy: {config.personality.encoding_accuracy}")
        
        # Interactive configuration update
        print(f"\nConfiguration file location: {config.config_path}")
        print("Edit the configuration file and restart the application to apply changes.")
        
    except Exception as e:
        print(f"✗ Configuration error: {e}")


async def main():
    """Main application entry point."""
    parser = create_parser()
    args = parser.parse_args()
    
    # Setup logging
    setup_logging(args.log_level)
    logger = logging.getLogger(__name__)
    
    try:
        # Load configuration
        config = Config(args.config) if args.config else Config()
        
        if args.mode == "config":
            config_mode(args.config)
            return
        
        # Initialize the platform
        logger.info("Initializing AI Historical Simulation Platform")
        platform = AIHistoricalSimulationPlatform(
            hdc_dimension=args.dimension,
            config=config
        )
        
        await platform.initialize()
        logger.info("Platform initialized successfully")
        
        # Route to appropriate mode
        if args.mode == "interactive":
            await interactive_mode(platform, args.figure)
        elif args.mode == "batch":
            if not args.dataset:
                print("✗ Dataset file required for batch mode. Use --dataset option.")
                return
            await batch_mode(platform, args.dataset, args.output)
        elif args.mode == "web":
            await web_mode(platform, args.port)
        elif args.mode == "demo":
            await demo_mode(platform, args.conversation)
        
    except KeyboardInterrupt:
        print("\n\nApplication interrupted by user")
    except Exception as e:
        logger.error(f"Application error: {e}")
        print(f"✗ Error: {e}")
        if args.verbose:
            import traceback
            traceback.print_exc()
    finally:
        # Cleanup
        try:
            if 'platform' in locals():
                await platform.cleanup()
        except:
            pass


if __name__ == "__main__":
    # Run the async main function
    asyncio.run(main())