"""
Coordinator Server
Support instance change events, and provide health check, startup probe, readiness check and metrics collection functions
Integrate inference API service, rate limiting, timeout control and SSL support, and provide API key verification functionality
"""

import json
import ssl
import asyncio
import threading
import os
from typing import Dict, List, Optional, Any
from datetime import datetime, timezone
from functools import wraps

from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
import uvicorn
from contextlib import asynccontextmanager

from motor.resources.http_msg_spec import InsEventMsg, EventType
from motor.resources.instance import Instance
from motor.utils.logger import get_logger
from motor.utils.cert_util import CoordinatorCertUtil
from motor.coordinator.core.instance_manager import InstanceManager
from motor.coordinator.metrics.metrics_listener import MetricsListener
from motor.coordinator.core.instance_healthchecker import InstanceHealthChecker, ControllerClient

from motor.coordinator.middleware.fastapi_middleware import (
    SimpleRateLimitMiddleware, 
    create_simple_rate_limit_middleware, 
    SimpleRateLimitConfig
)
from motor.config.coordinator import CoordinatorConfig, RateLimitConfig

from motor.coordinator.models.request import (
    RequestType, DeploymentMode,
    OpenAICompletionRequest, OpenAIChatCompletionRequest,
    RequestResponse, StreamResponse
)

from motor.coordinator.router.router import handle_request

logger = get_logger(__name__)



class SSLConfig:
    def __init__(self):
        self.enabled = False
        self.cert_file = ""
        self.key_file = ""
        self.ca_file = ""
        self.password = ""
        self.verify_mode = ssl.CERT_REQUIRED
        self.check_hostname = True

class CoordinatorServer:
    
    def __init__(
        self,
        coordinator_config: Optional[CoordinatorConfig] = None
    ):
        if coordinator_config is None:
            coordinator_config = CoordinatorConfig()
            if coordinator_config.init() == -1:
                logger.error("Failed to initialize CoordinatorConfig")
                raise RuntimeError("Failed to initialize CoordinatorConfig")
        
        self.coordinator_config = coordinator_config
        self.timeout_config = coordinator_config.timeout_config
        self.api_key_config = coordinator_config.api_key_config
        self.ssl_config = SSLConfig()
        
        # Load SSL configuration from coordinator_config
        self._load_ssl_config()
        
        logger.info(f"Timeout configuration: request={self.timeout_config.request_timeout}s, "
                   f"connection={self.timeout_config.connection_timeout}s, read={self.timeout_config.read_timeout}s, "
                   f"write={self.timeout_config.write_timeout}s, keep_alive={self.timeout_config.keep_alive_timeout}s")
        
        if self.api_key_config.enabled and not self.api_key_config.valid_keys:
            logger.warning("API Key validation enabled but no valid keys configured!")
        
        logger.info(f"API Key validation enabled: {self.api_key_config.enabled}, valid keys count: {len(self.api_key_config.valid_keys)}, "
                   f"header: {self.api_key_config.header_name}, prefix: {self.api_key_config.key_prefix}, skip paths: {len(self.api_key_config.skip_paths)}")
        
        if self.ssl_config.enabled:
            logger.info(f"SSL configuration enabled: cert_file={self.ssl_config.cert_file}, "
                       f"key_file={self.ssl_config.key_file}, ca_file={self.ssl_config.ca_file}")
        else:
            logger.info("SSL configuration disabled")
        
        self.instance_manager = InstanceManager()
        
        self.management_app = FastAPI(
            title="Motor Coordinator Management Server",
            description="Management plane: health, readiness, metrics, instance refresh",
            version="1.0.0",
            lifespan=self._lifespan
        )
        
        self.inference_app = FastAPI(
            title="Motor Coordinator Inference Server",
            description="Inference API endpoints (OpenAI-compatible and more)",
            version="1.0.0",
            lifespan=self._lifespan
        )
        
        self.management_app.add_middleware(
            CORSMiddleware, 
            allow_origins=["*"], 
            allow_credentials=True, 
            allow_methods=["*"], 
            allow_headers=["*"]
        )
        self.inference_app.add_middleware(
            CORSMiddleware, 
            allow_origins=["*"], 
            allow_credentials=True, 
            allow_methods=["*"], 
            allow_headers=["*"]
        )
        
        self._register_routes()
    
    def _load_ssl_config(self):
        """Load SSL configuration from coordinator_config"""
        # Use request_server_tls for coordinator server SSL configuration
        request_tls = self.coordinator_config.request_server_tls
        
        if request_tls.tls_enable:
            self.ssl_config.enabled = True
            tls_items = request_tls.items
            self.ssl_config.cert_file = tls_items.get("tls_cert", "")
            self.ssl_config.key_file = tls_items.get("tls_key", "")
            self.ssl_config.ca_file = tls_items.get("ca_cert", "")
            self.ssl_config.password = tls_items.get("tls_passwd", "")
            # Optional: set verify_mode and check_hostname based on requirements
            # For now, using default values from SSLConfig initialization
        else:
            self.ssl_config.enabled = False
    
    def verify_api_key(self, request: Request) -> bool:
        if not self.api_key_config.enabled:
            return True
        
        if request.url.path in self.api_key_config.skip_paths:
            return True
        
        authorization = request.headers.get(self.api_key_config.header_name)
        
        if not authorization:
            logger.warning(f"API Key validation failed: missing Authorization header")
            raise HTTPException(
                status_code=401,
                detail="Missing Authorization header",
                headers={"WWW-Authenticate": "Bearer"}
            )
        
        api_key = authorization
        if self.api_key_config.key_prefix and authorization.startswith(self.api_key_config.key_prefix):
            api_key = authorization[len(self.api_key_config.key_prefix):]
        
        if api_key not in self.api_key_config.valid_keys:
            logger.warning(f"API Key validation failed: invalid key")
            raise HTTPException(
                status_code=403,
                detail="Invalid API Key"
            )
        
        logger.debug(f"API Key validation successful")
        return True
    
    def _timeout_handler(self, timeout_seconds: Optional[float] = None):
        def decorator(func):
            @wraps(func)
            async def wrapper(*args, **kwargs):
                actual_timeout = timeout_seconds if timeout_seconds is not None else self.timeout_config.request_timeout
                
                try:
                    return await asyncio.wait_for(
                        func(*args, **kwargs),
                        timeout=actual_timeout
                    )
                except asyncio.TimeoutError:
                    logger.warning(f"Request timeout after {actual_timeout}s: {func.__name__}")
                    raise HTTPException(
                        status_code=504,
                        detail=f"Request timed out after {actual_timeout} seconds"
                    )
                except HTTPException:
                    raise
                except Exception as e:
                    logger.error(f"Unexpected error in {func.__name__}: {e}")
                    raise
            
            return wrapper
        return decorator
    
    @asynccontextmanager
    async def _lifespan(self, app: FastAPI):
        logger.info("Coordinator server is starting...")
        try:
            yield
        except asyncio.CancelledError:
            logger.info("Coordinator server startup was cancelled")
        except Exception as e:
            logger.error(f"Coordinator server startup failed: {e}")
            raise
        finally:
            logger.info("Coordinator server is shutting down...")
            try:
                await asyncio.sleep(0.1)
            except asyncio.CancelledError:
                logger.info("Coordinator server shutdown was cancelled")
            except Exception as e:
                logger.warning(f"Error occurred during coordinator server shutdown: {e}")
    
    def _register_routes(self):
        # Inference API routes
        @self.inference_app.post("/v1/completions")
        @self._timeout_handler()
        async def openai_completions(request: Request):
            """OpenAI Completions API"""
            self.verify_api_key(request)
            return await self._handle_openai_request(request, RequestType.OPENAI)
        
        @self.inference_app.post("/v1/chat/completions")
        @self._timeout_handler()
        async def openai_chat_completions(request: Request):
            """OpenAI Chat Completions API"""
            self.verify_api_key(request)
            return await self._handle_openai_request(request, RequestType.OPENAI)
        
        # Management routes
        @self.management_app.get("/startup")
        async def startup_probe():
            logger.debug("Received startup probe request")
            return {"status": "ok", "message": "Coordinator is starting up"}
        
        @self.management_app.get("/health")
        async def health_check():
            logger.debug("Received health check request, Coordinator is healthy")
            return {"status": "ok", "message": "Coordinator is healthy"}
        
        @self.management_app.get("/readiness")
        async def readiness_check():
            if not InstanceManager().is_available():
                raise HTTPException(status_code=503, detail="Service is not ready")
            return {"status": "ok", "message": "Coordinator is ready"}
        
        @self.management_app.get("/metrics")
        async def get_metrics():
            return {
                "status": "ok",
                "metrics": {
                    "total_requests": 0,
                    "active_requests": 0,
                    "error_count": 0,
                    "uptime": "0s"
                }
            }
        
        @self.management_app.post("/v1/instances/refresh", response_model=RequestResponse)
        @self._timeout_handler()
        async def refresh_instances(request: Request) -> RequestResponse:
            return await self._handle_refresh_instances(request)
        
        @self.management_app.get("/")
        async def root():
            return {
                "service": "Motor Coordinator Server",
                "version": "1.0.0",
                "description": "coordinator server, management and inference APIs",
                "endpoints": {
                    "# Inference API": {
                        "POST /v1/completions": "OpenAI Completion API",
                        "POST /v1/chat/completions": "OpenAI Chat Completion API"
                    },
                    "# monitoring and health check": {
                        "GET /health": "health check",
                        "GET /startup": "startup probe",
                        "GET /readiness": "readiness check",
                        "GET /metrics": "get metrics"                    },
                    "# instance refresh": {
                        "POST /v1/instances/refresh": "refresh instances"
                    }
                }
            }
    
    async def _handle_openai_request(self, request: Request, request_type: RequestType):
        try:
            body = await request.body()
            body_json = json.loads(body.decode('utf-8'))
            
            self._validate_openai_request(body_json, request_type)
            if not self.instance_manager.is_available():
                raise HTTPException(status_code=503, detail="Service is not available")

            is_stream = self._openai_is_stream(body_json)
            if "messages" in body_json:
                input_data = json.dumps(body_json["messages"])
            elif "prompt" in body_json:
                input_data = body_json["prompt"]
            else:
                input_data = ""

            response_payload = {
                "request_id": "req-openai",
                "status": "success",
                "data": {
                    "input_data": input_data,
                    "is_stream": is_stream,
                    "request_type": request_type.value
                }
            }

            return response_payload
        except HTTPException:
            raise
        except Exception as e:
            logger.error(f"Failed to process OpenAI request: {e}")
            raise HTTPException(status_code=500, detail=str(e))
    
    def _validate_openai_request(self, body_json: Dict[str, Any], request_type: RequestType):
        if "model" not in body_json:
            raise HTTPException(status_code=400, detail="Missing required field: model")
        
        if request_type == RequestType.OPENAI:
            if "prompt" not in body_json and "messages" not in body_json:
                raise HTTPException(status_code=400, detail="Missing required field: prompt or messages")
            
            if "messages" in body_json:
                if not isinstance(body_json["messages"], list) or len(body_json["messages"]) == 0:
                    raise HTTPException(status_code=400, detail="Invalid messages field: must be a non-empty array")
                
                for i, message in enumerate(body_json["messages"]):
                    if not isinstance(message, dict):
                        raise HTTPException(status_code=400, detail=f"Invalid message format at index {i}: must be an object")
                    if "role" not in message or "content" not in message:
                        raise HTTPException(status_code=400, detail=f"Invalid message at index {i}: missing role or content")
                    if message["role"] not in ["system", "user", "assistant"]:
                        raise HTTPException(status_code=400, detail=f"Invalid role '{message['role']}' at index {i}: must be system, user, or assistant")
    
    def _openai_is_stream(self, body_json: Dict[str, Any]) -> bool:
        if "stream" in body_json:
            stream_value = body_json["stream"]
            # Convert string "true" to boolean True
            if isinstance(stream_value, str):
                return stream_value.lower() in ("true", "1", "yes")
            return bool(stream_value)
        return False
    
    async def _handle_refresh_instances(self, request: Request) -> RequestResponse:
        try:
            raw_body = await request.body()
            if not raw_body:
                logger.error("Request body is empty")
                raise HTTPException(status_code=400, detail="Request body cannot be empty")
            
            body = json.loads(raw_body.decode('utf-8'))
            if not body:
                logger.error("Parsed JSON body is empty")
                raise HTTPException(status_code=400, detail="Request body cannot be empty")
            
            logger.debug(f"Request body keys: {list(body.keys()) if isinstance(body, dict) else 'not a dict'}")
        except HTTPException:
            raise
        except json.JSONDecodeError as e:
            logger.error(f"Failed to parse request body as JSON: {e}")
            logger.error(f"Request body (first 200 chars): {raw_body.decode('utf-8', errors='ignore')[:200] if raw_body else 'empty'}")
            raise HTTPException(
                status_code=400, 
                detail=f"Invalid JSON format: {str(e)}"
            )
        except Exception as e:
            logger.error(f"Failed to parse request body: {e}, type: {type(e)}")
            raise HTTPException(
                status_code=400, 
                detail=f"Failed to parse request body: {str(e)}"
            )
        
        logger.info(f"Received instance refresh request, body keys: {list(body.keys()) if isinstance(body, dict) else 'not a dict'}")
        
        if isinstance(body, dict) and "instances" in body:
            for instance in body.get("instances", []):
                if isinstance(instance, dict) and "endpoints" in instance:
                    endpoints = instance["endpoints"]
                    if isinstance(endpoints, dict):
                        converted_endpoints = {}
                        for pod_ip, endpoint_dict in endpoints.items():
                            if isinstance(endpoint_dict, dict):
                                converted_endpoints[pod_ip] = {}
                                for endpoint_id_str, endpoint_data in endpoint_dict.items():
                                    try:
                                        endpoint_id = int(endpoint_id_str)
                                        converted_endpoints[pod_ip][endpoint_id] = endpoint_data
                                    except (ValueError, TypeError) as e:
                                        logger.warning(f"Failed to convert endpoint_id '{endpoint_id_str}' to int: {e}, keeping as string")
                                        converted_endpoints[pod_ip][endpoint_id_str] = endpoint_data
                            else:
                                converted_endpoints[pod_ip] = endpoint_dict
                        instance["endpoints"] = converted_endpoints
        
        try:
            event_msg = InsEventMsg(**body)
        except Exception as e:
            logger.error(f"Failed to parse InsEventMsg: {e}, body keys: {list(body.keys()) if isinstance(body, dict) else 'not a dict'}")
            raise HTTPException(status_code=400, detail=f"Invalid request format: {str(e)}")
        
        InstanceManager().refresh_instances(event_msg.event, event_msg.instances)
        is_ready = InstanceHealthChecker(ControllerClient()).check_state_alarm()
        
        return RequestResponse(
            request_id="refresh_request",
            status="success",
            message="Instance refresh completed",
            data={
                "timestamp": datetime.now(timezone.utc).isoformat(),
                "event_type": event_msg.event.value,
                "instance_count": len(event_msg.instances),
                "is_ready": is_ready
            }
        )
    
    def setup_rate_limiting(
        self,
        rate_limit_config: Optional[RateLimitConfig] = None
    ):
        try:
            if rate_limit_config is None:
                rate_limit_config = self.coordinator_config.rate_limit_config

            if not rate_limit_config.enabled:
                logger.info("Rate limiting is disabled in configuration")
                return

            middleware = create_simple_rate_limit_middleware(
                app=self.inference_app,
                max_requests=rate_limit_config.max_requests,
                window_size=rate_limit_config.window_size
            )

            self.inference_app.add_middleware(
                SimpleRateLimitMiddleware,
                rate_limiter=middleware.rate_limiter,
                skip_paths=rate_limit_config.skip_paths,
                error_message=rate_limit_config.error_message,
                error_status_code=rate_limit_config.error_status_code
            )

            logger.info(f"Rate limiting middleware enabled (Inference): max_requests={rate_limit_config.max_requests}/{rate_limit_config.window_size}s")

        except Exception as e:
            logger.error(f"Failed to setup rate limiting middleware (Inference): {e}")
    
    def create_unified_app(
        self,
        rate_limit_config: Optional[RateLimitConfig] = None
    ):
        unified_app = FastAPI(
            title="Motor Coordinator Server",
            description="Management and Inference APIs served on a single port",
            version="1.0.0",
            lifespan=self._lifespan
        )

        unified_app.add_middleware(
            CORSMiddleware, 
            allow_origins=["*"], 
            allow_credentials=True, 
            allow_methods=["*"], 
            allow_headers=["*"]
        )

        try:
            if rate_limit_config is None:
                rate_limit_config = self.coordinator_config.rate_limit_config

            if not rate_limit_config.enabled:
                logger.info("Rate limiting is disabled in configuration")
            else:
                middleware = create_simple_rate_limit_middleware(
                    app=unified_app,
                    max_requests=rate_limit_config.max_requests,
                    window_size=rate_limit_config.window_size
                )

                unified_app.add_middleware(
                    SimpleRateLimitMiddleware,
                    rate_limiter=middleware.rate_limiter,
                    skip_paths=rate_limit_config.skip_paths,
                    error_message=rate_limit_config.error_message,
                    error_status_code=rate_limit_config.error_status_code
                )

                logger.info(f"Rate limiting middleware enabled (Unified): max_requests={rate_limit_config.max_requests}/{rate_limit_config.window_size}s")

        except Exception as e:
            logger.error(f"Failed to setup rate limiting middleware (Unified): {e}")

        self._copy_routes(self.management_app, unified_app)
        self._copy_routes(self.inference_app, unified_app)

        return unified_app
    
    def _copy_routes(self, src_app: FastAPI, dst_app: FastAPI, skip_paths: Optional[List[str]] = None):
        if skip_paths is None:
            skip_paths = []
        reserved_paths = set(["/docs", "/redoc", "/openapi.json", "/favicon.ico"]) | set(skip_paths)
        for route in src_app.router.routes:
            try:
                path = getattr(route, "path", None)
                # Check if path matches any reserved path (exact match or prefix match)
                if path:
                    should_skip = False
                    for reserved_path in reserved_paths:
                        if path == reserved_path or path.startswith(reserved_path + "/"):
                            should_skip = True
                            break
                    if not should_skip:
                        dst_app.router.routes.append(route)
            except Exception:
                continue
    
    async def run(self):
        combined_mode = self.coordinator_config.combined_mode
        rate_limit_config = self.coordinator_config.rate_limit_config
        
        mgmt_server = None
        inference_server = None
        unified_server = None
        try:
            if combined_mode:
                unified_app = self.create_unified_app(
                    rate_limit_config=rate_limit_config
                )

                logger.info(f"Starting Unified server {self.coordinator_config.combined_host}:{self.coordinator_config.combined_port}")

                unified_config_kwargs = {
                    "app": unified_app,
                    "host": self.coordinator_config.combined_host,
                    "port": self.coordinator_config.combined_port,
                    "log_level": "info",
                    "access_log": True,
                    "lifespan": "on"
                }

                if self.timeout_config:
                    unified_config_kwargs["timeout_keep_alive"] = self.timeout_config.keep_alive_timeout
                    unified_config_kwargs["timeout_graceful_shutdown"] = 30

                if self.ssl_config and self.ssl_config.enabled:
                    ssl_context = CoordinatorCertUtil.create_ssl_context(
                        cert_file=self.ssl_config.cert_file,
                        key_file=self.ssl_config.key_file,
                        ca_file=self.ssl_config.ca_file,
                        password=self.ssl_config.password
                    )
                    if ssl_context:
                        unified_config_kwargs["ssl_keyfile"] = self.ssl_config.key_file
                        unified_config_kwargs["ssl_certfile"] = self.ssl_config.cert_file
                        unified_config_kwargs["ssl_ca_certs"] = self.ssl_config.ca_file
                        logger.info("HTTPS support enabled for unified server")
                    else:
                        logger.warning("SSL configuration failed, using HTTP mode")

                unified_server = uvicorn.Server(uvicorn.Config(**unified_config_kwargs))
                await unified_server.serve()

            else:
                self.setup_rate_limiting(
                    rate_limit_config=rate_limit_config
                )

                logger.info(f"Starting Management server {self.coordinator_config.mgmt_host}:{self.coordinator_config.mgmt_port}")
                logger.info(f"Starting Inference server {self.coordinator_config.inference_host}:{self.coordinator_config.inference_port}")

                mgmt_config_kwargs = {
                    "app": self.management_app,
                    "host": self.coordinator_config.mgmt_host,
                    "port": self.coordinator_config.mgmt_port,
                    "log_level": "info",
                    "access_log": True,
                    "lifespan": "on"
                }

                inference_config_kwargs = {
                    "app": self.inference_app,
                    "host": self.coordinator_config.inference_host,
                    "port": self.coordinator_config.inference_port,
                    "log_level": "info",
                    "access_log": True,
                    "lifespan": "on"
                }

                if self.timeout_config:
                    mgmt_config_kwargs["timeout_keep_alive"] = self.timeout_config.keep_alive_timeout
                    mgmt_config_kwargs["timeout_graceful_shutdown"] = 30
                    inference_config_kwargs["timeout_keep_alive"] = self.timeout_config.keep_alive_timeout
                    inference_config_kwargs["timeout_graceful_shutdown"] = 30

                if self.ssl_config and self.ssl_config.enabled:
                    mgmt_ssl_context = CoordinatorCertUtil.create_ssl_context_no_client_cert(
                        cert_file=self.ssl_config.cert_file,
                        key_file=self.ssl_config.key_file,
                        ca_file=self.ssl_config.ca_file,
                        password=self.ssl_config.password
                    )
                    inference_ssl_context = CoordinatorCertUtil.create_ssl_context(
                        cert_file=self.ssl_config.cert_file,
                        key_file=self.ssl_config.key_file,
                        ca_file=self.ssl_config.ca_file,
                        password=self.ssl_config.password
                    )
                    
                    if mgmt_ssl_context:
                        mgmt_config_kwargs["ssl_keyfile"] = self.ssl_config.key_file
                        mgmt_config_kwargs["ssl_certfile"] = self.ssl_config.cert_file
                        logger.info("HTTPS support enabled for management server (no client cert verification)")
                    else:
                        logger.warning("SSL configuration failed for management server, using HTTP mode")
                    
                    if inference_ssl_context:
                        inference_config_kwargs["ssl_keyfile"] = self.ssl_config.key_file
                        inference_config_kwargs["ssl_certfile"] = self.ssl_config.cert_file
                        inference_config_kwargs["ssl_ca_certs"] = self.ssl_config.ca_file
                        logger.info("HTTPS support enabled for inference server")
                    else:
                        logger.warning("SSL configuration failed for inference server, using HTTP mode")

                mgmt_server = uvicorn.Server(uvicorn.Config(**mgmt_config_kwargs))
                inference_server = uvicorn.Server(uvicorn.Config(**inference_config_kwargs))

                await asyncio.gather(
                    mgmt_server.serve(),
                    inference_server.serve(),
                )

        except asyncio.CancelledError:
            logger.info("Server tasks were cancelled")
            for srv in (mgmt_server, inference_server, unified_server):
                if srv:
                    try:
                        srv.should_exit = True
                    except Exception:
                        pass
            await asyncio.sleep(0.1)
            raise
        except KeyboardInterrupt:
            logger.info("Received keyboard interrupt signal")
            for srv in (mgmt_server, inference_server, unified_server):
                if srv:
                    try:
                        srv.should_exit = True
                    except Exception:
                        pass
            await asyncio.sleep(0.1)
            raise
        except Exception as e:
            logger.error(f"Server run failed: {e}")
            for srv in (mgmt_server, inference_server, unified_server):
                if srv:
                    try:
                        srv.should_exit = True
                    except Exception:
                        pass
            await asyncio.sleep(0.1)
            raise

