#!/usr/bin/env python3
"""
Startup script for AI-Powered SSH Terminal
"""

import os
import sys
import subprocess
import time
import platform
from pathlib import Path

def check_python_version():
    """Check if Python version is compatible"""
    if sys.version_info < (3, 8):
        print("❌ Python 3.8 or higher is required")
        print(f"Current version: {sys.version}")
        return False
    print(f"✓ Python version: {sys.version.split()[0]}")
    return True

def check_dependencies():
    """Check if required dependencies are installed"""
    print("\n🔍 Checking dependencies...")
    
    required_packages = [
        'fastapi', 'uvicorn', 'paramiko', 'pydantic', 
        'websockets', 'aiofiles', 'structlog'
    ]
    
    missing_packages = []
    for package in required_packages:
        try:
            __import__(package)
            print(f"✓ {package}")
        except ImportError:
            missing_packages.append(package)
            print(f"❌ {package}")
    
    if missing_packages:
        print(f"\n⚠️  Missing packages: {', '.join(missing_packages)}")
        print("Install with: pip install -r requirements.txt")
        return False
    
    print("✓ All required dependencies are installed")
    return True

def check_ollama_service():
    """Check if Ollama service is available"""
    try:
        import requests
        response = requests.get("http://localhost:11434/api/tags", timeout=2)
        if response.status_code == 200:
            models = response.json().get('models', [])
            print(f"✓ Ollama service running with {len(models)} models")
            if models:
                for model in models[:3]:
                    print(f"   - {model.get('name', 'Unknown')}")
            return True
        else:
            print("⚠️  Ollama service not responding")
            return False
    except Exception as e:
        print(f"⚠️  Ollama service not available: {e}")
        print("   Install Ollama from: https://ollama.ai/")
        print("   Then run: ollama pull llama2:7b-chat && ollama serve")
        return False

def setup_environment():
    """Setup environment variables"""
    os.environ.setdefault('PYTHONPATH', str(Path(__file__).parent))
    os.environ.setdefault('AI_TERMINAL_ENV', 'production')
    print("✓ Environment variables set")

def start_server(port=8000, host="0.0.0.0"):
    """Start the FastAPI server"""
    print(f"\n🚀 Starting AI Terminal Server on {host}:{port}")
    print(f"📱 Web interface: http://localhost:{port}")
    print("🔗 WebSocket endpoint: ws://localhost:{port}/ws/{{client_id}}")
    print("\n" + "="*50)
    
    try:
        # Import here to ensure dependencies are available
        import uvicorn
        
        # Start the server
        uvicorn.run(
            "app:app",
            host=host,
            port=port,
            reload=False,  # Set to False for production
            log_level="info",
            access_log=True
        )
    except KeyboardInterrupt:
        print("\n\n🛑 Server stopped by user")
    except Exception as e:
        print(f"\n❌ Server failed to start: {e}")
        return False
    
    return True

def open_browser(url):
    """Open web browser automatically"""
    try:
        system = platform.system()
        if system == "Windows":
            os.startfile(url)
        elif system == "Darwin":  # macOS
            subprocess.run(["open", url])
        elif system == "Linux":
            subprocess.run(["xdg-open", url])
        else:
            print(f"📱 Please open your browser to: {url}")
    except Exception:
        print(f"📱 Please open your browser to: {url}")

def main():
    """Main startup function"""
    print("🤖 AI-Powered SSH Terminal with Ollama")
    print("Enterprise SSH client with Ollama AI assistance")
    print("="*50)
    
    # Check system requirements
    if not check_python_version():
        return 1
    
    if not check_dependencies():
        print("\n💡 To install dependencies:")
        print("   pip install -r requirements.txt")
        return 1
    
    # Check Ollama service
    ollama_available = check_ollama_service()
    
    # Setup environment
    setup_environment()
    
    # Show startup information
    print("\n📋 System Information:")
    print(f"   Python: {sys.version.split()[0]}")
    print(f"   Platform: {platform.system()} {platform.release()}")
    print(f"   AI Service: {'Ollama Available' if ollama_available else 'Ollama Not Available'}")
    print(f"   Working Directory: {os.getcwd()}")
    
    # Ask user if they want to auto-open browser
    print("\n❓ Open browser automatically? (y/n): ", end="")
    try:
        auto_open = input().strip().lower() in ['y', 'yes', '']
    except KeyboardInterrupt:
        print("\n🛑 Startup cancelled")
        return 1
    
    # Start server in background and open browser
    if auto_open:
        import threading
        server_thread = threading.Thread(
            target=lambda: start_server(),
            daemon=True
        )
        server_thread.start()
        
        # Wait a moment for server to start
        time.sleep(2)
        open_browser("http://localhost:8000")
        
        # Wait for server thread
        try:
            server_thread.join()
        except KeyboardInterrupt:
            print("\n🛑 Server stopped")
    else:
        # Start server directly
        start_server()
    
    return 0

if __name__ == "__main__":
    sys.exit(main())