"""
EngineServer Server - Mock D/P instances and metaserver for inference flow
Support both streaming and non-streaming requests
Fixed None type subscriptable issue
"""

import sys
import time
import random
import asyncio
import httpx
from fastapi import FastAPI, HTTPException, Request, status, Header, BackgroundTasks
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
from typing import Optional, Dict, Any, AsyncGenerator, List
from enum import Enum
import uuid
import json

# Mock the missing dependencies
class PDRole(Enum):
    ROLE_D = "D"
    ROLE_P = "P"

class ReqState(Enum):
    PREFILL_END = "prefill_end"
    DECODE_END = "decode_end"
    INIT = "init"
    PROCESSING = "processing"
    PREFILL_START = "prefill_start"

class RequestInfo:
    def __init__(self, req_id, req_data):
        self.req_id = req_id
        self.req_data = req_data
        self.state = ReqState.INIT
        self.created_time = time.time()
        self.kv_cache = ""  # Simulate KV cache
        
    def update_state(self, state: ReqState):
        old_state = self.state
        self.state = state
        logger.info(f"Request {self.req_id} state: {old_state.value} -> {state.value}")

class RequestManager:
    _instance = None
    
    def __init__(self):
        self.requests = {}
        
    @classmethod
    def get_instance(cls):
        if cls._instance is None:
            cls._instance = RequestManager()
        return cls._instance
    
    def addReq(self, req_info: RequestInfo):
        self.requests[req_info.req_id] = req_info
        logger.info(f"Request {req_info.req_id} added to RequestManager")
        
    def delReq(self, req_id: str):
        if req_id in self.requests:
            del self.requests[req_id]
            logger.info(f"Request {req_id} removed from RequestManager")
            
    def getReq(self, req_id: str) -> Optional[RequestInfo]:
        return self.requests.get(req_id)

# Initialize request manager
RequestManager._instance = RequestManager()

# Mock logger
class MockLogger:
    def info(self, msg, *args, **kwargs):
        print(f"[INFO] {time.strftime('%H:%M:%S')} - {msg % args if args else msg}")
    
    def error(self, msg, *args, **kwargs):
        print(f"[ERROR] {time.strftime('%H:%M:%S')} - {msg % args if args else msg}")
        
    def debug(self, msg, *args, **kwargs):
        print(f"[DEBUG] {time.strftime('%H:%M:%S')} - {msg % args if args else msg}")
        
    def warning(self, msg, *args, **kwargs):
        print(f"[WARNING] {time.strftime('%H:%M:%S')} - {msg % args if args else msg}")

logger = MockLogger()

# Request models
class CompletionRequest(BaseModel):
    model: str
    prompt: str
    max_tokens: int = 3
    temperature: float = 0.1
    top_p: Optional[float] = 0.9
    stream: bool = False
    kv_transfer_params: Optional[Dict[str, Any]] = None

class ChatCompletionRequest(BaseModel):
    model: str
    messages: List[Dict[str, str]]
    max_tokens: int = 3
    temperature: float = 0.1
    stream: bool = False
    kv_transfer_params: Optional[Dict[str, Any]] = None

class MetaserverRequest(BaseModel):
    request_id: str
    do_remote_decode: bool
    do_remote_prefill: bool
    remote_engine_id: Optional[str] = None
    remote_host: Optional[str] = None
    remote_port: Optional[int] = None

# Response models
class Delta(BaseModel):
    content: str

class StreamChoice(BaseModel):
    delta: Delta
    index: int = 0
    finish_reason: Optional[str] = None

class StreamChunkResponse(BaseModel):
    choices: List[StreamChoice]
    id: str
    object: str = "chat.completion.chunk"
    created: Optional[int] = None
    model: Optional[str] = None

class Message(BaseModel):
    role: str = "assistant"
    content: str

class ChatChoice(BaseModel):
    index: int = 0
    message: Message
    finish_reason: Optional[str] = "stop"

class ChatCompletionResponse(BaseModel):
    id: str
    object: str = "chat.completion"
    created: int
    model: str
    choices: List[ChatChoice]
    usage: Optional[Dict[str, int]] = None

# Server configuration
class ServerConfig:
    def __init__(self):
        self.mode = "normal"
        self.metaserver_url = "http://localhost:8081/v1/metaserver"
        self.manage_ip = "localhost"
        self.manage_port = 8081
        self.error_rate = 0.1
        self.timeout_duration = 15.0
        self.slow_duration = 3.0
        self.error_codes = [500, 502, 503]

# Global configuration
server_config = ServerConfig()

# FastAPI application
app = FastAPI(
    title="Engine Server Mock",
    description="Mock D/P instances and metaserver for inference flow",
    version="4.1.0"
)

class DInstanceHandler:
    """Mock D Instance (Decode Role) - Support both streaming and non-streaming"""
    
    def __init__(self, req_info: RequestInfo):
        self.req_info = req_info
        self.prefill_completed = asyncio.Event()
        self.prefill_result: Optional[Dict[str, Any]] = None
        self.is_streaming = req_info.req_data.get('stream', False)
    
    async def handle_d_request(self):
        """Handle D instance request - full inference flow for both streaming and non-streaming"""
        logger.info(f"🎯 D_INSTANCE: Received request {self.req_info.req_id} (stream: {self.is_streaming})")
        logger.info(f"   Model: {self.req_info.req_data.get('model')}, Prompt: {self.req_info.req_data.get('prompt')[:50]}...")
        
        # Add to request manager
        RequestManager.get_instance().addReq(self.req_info)
        
        try:
            # Step 1: D instance receives request and forwards to metaserver
            logger.info(f"📤 D_INSTANCE: Forwarding to metaserver for P instance scheduling")
            await self.forward_to_metaserver()
            
            # Step 2: Wait for prefill completion from P instance
            logger.info(f"⏳ D_INSTANCE: Waiting for prefill completion...")
            await self.prefill_completed.wait()
            
            if not self.prefill_result:
                raise HTTPException(status_code=500, detail="Prefill failed - no result received")
            
            # Step 3: Start decoding with KV cache
            logger.info(f"🔍 D_INSTANCE: Starting decode phase with KV cache")
            
            if self.is_streaming:
                return await self.handle_streaming_decode()
            else:
                return await self.handle_non_streaming_decode()
                
        except Exception as e:
            logger.error(f"❌ D_INSTANCE: Error in request {self.req_info.req_id}: {e}")
            raise
        finally:
            RequestManager.get_instance().delReq(self.req_info.req_id)
    
    async def handle_streaming_decode(self) -> StreamingResponse:
        """Handle streaming decode - returns StreamingResponse"""
        async def generate_stream():
            try:
                async for chunk in self.generate_decode_stream():
                    yield chunk
                
                self.req_info.update_state(ReqState.DECODE_END)
                logger.info(f"✅ D_INSTANCE: Streaming inference completed for {self.req_info.req_id}")
            except Exception as e:
                logger.error(f"❌ D_INSTANCE: Error in streaming: {e}")
                error_chunk = {"error": str(e)}
                yield f"data: {json.dumps(error_chunk)}\n\n"
        
        return StreamingResponse(
            generate_stream(),
            media_type="application/x-ndjson"
        )
    
    async def handle_non_streaming_decode(self) -> JSONResponse:
        """Handle non-streaming decode - returns complete JSON response"""
        try:
            # Collect all tokens
            full_content = await self.collect_complete_response()
            
            # Create complete response
            response = ChatCompletionResponse(
                id=self._get_response_id(),
                created=int(time.time()),
                model=self.req_info.req_data.get("model", "test-model"),
                choices=[
                    ChatChoice(
                        message=Message(content=full_content),
                        finish_reason="stop"
                    )
                ],
                usage={
                    "prompt_tokens": len(self.req_info.req_data.get("prompt", "")),
                    "completion_tokens": len(full_content.split()),
                    "total_tokens": len(self.req_info.req_data.get("prompt", "")) + len(full_content.split())
                }
            )
            
            self.req_info.update_state(ReqState.DECODE_END)
            logger.info(f"✅ D_INSTANCE: Non-streaming inference completed for {self.req_info.req_id}")
            
            return JSONResponse(content=response.dict())
            
        except Exception as e:
            logger.error(f"❌ D_INSTANCE: Error in non-streaming decode: {e}")
            raise HTTPException(status_code=500, detail=str(e))
    
    def _get_response_id(self) -> str:
        """Get response ID from prefill result or generate a new one"""
        if self.prefill_result and "id" in self.prefill_result:
            return self.prefill_result["id"]
        return f"chatcmpl-{uuid.uuid4().hex[:8]}"
    
    def _get_first_token(self) -> str:
        """Get first token from prefill result"""
        if (self.prefill_result and 
            "choices" in self.prefill_result and 
            len(self.prefill_result["choices"]) > 0 and
            "delta" in self.prefill_result["choices"][0] and
            "content" in self.prefill_result["choices"][0]["delta"]):
            return self.prefill_result["choices"][0]["delta"]["content"]
        
        # Fallback if prefill result doesn't have expected structure
        return generate_prefill_chunk()
    
    async def collect_complete_response(self) -> str:
        """Collect complete response for non-streaming requests"""
        logger.info(f"   📋 D_INSTANCE: Collecting complete response for non-streaming")
        
        # Get the first token from prefill or generate one
        first_token = self._get_first_token()
        
        # Generate remaining tokens
        full_response = generate_poem_response()
        
        # Simulate processing time for non-streaming (faster than streaming)
        await asyncio.sleep(0.5)
        
        logger.info(f"   ✅ D_INSTANCE: Complete response collected: {full_response[:50]}...")
        return full_response
    
    async def forward_to_metaserver(self):
        """Step 1: D -> MetaServer (request P instance)"""
        # Build metaserver request
        metaserver_data = {
            "request_id": self.req_info.req_id,
            "do_remote_decode": False,
            "do_remote_prefill": True,
            "remote_engine_id": f"engine_{uuid.uuid4().hex[:8]}",
            "remote_host": server_config.manage_ip,
            "remote_port": server_config.manage_port
        }
        
        logger.info(f"   📨 D->MetaServer: {json.dumps(metaserver_data, indent=2)}")
        
        # In real system, this would be HTTP call to metaserver
        # For mock, we directly process it
        await self.process_metaserver_request(metaserver_data)
    
    async def process_metaserver_request(self, metaserver_data: dict):
        """Process metaserver request internally"""
        # Create background task to handle P instance processing
        background_tasks = BackgroundTasks()
        background_tasks.add_task(self.handle_p_instance_processing, metaserver_data)
        
        # Return immediately - P instance will process in background
        logger.info(f"   🔄 D_INSTANCE: P instance processing started in background")
    
    async def handle_p_instance_processing(self, metaserver_data: dict):
        """Background task to handle P instance processing"""
        try:
            # Step 2: MetaServer -> P Instance
            p_response = await self.call_p_instance(metaserver_data)
            
            # Store prefill result and notify D instance
            self.prefill_result = p_response
            self.prefill_completed.set()
            
            logger.info(f"   📥 D_INSTANCE: Prefill completed, KV cache ready")
            
        except Exception as e:
            logger.error(f"   ❌ D_INSTANCE: P instance processing failed: {e}")
            self.prefill_completed.set()  # Still set event to avoid hanging
    
    async def call_p_instance(self, metaserver_data: dict) -> Dict[str, Any]:
        """Step 2: Mock calling P instance"""
        logger.info(f"   🚀 MetaServer->P: Scheduling P instance")
        
        # Build P instance request from original D request and metaserver data
        p_request = {
            "model": self.req_info.req_data.get("model", "test-model"),
            "prompt": self.req_info.req_data.get("prompt", ""),
            "max_tokens": 1,  # P instance only does prefill
            "temperature": self.req_info.req_data.get("temperature", 0.7),
            "stream": False,
            "kv_transfer_params": {
                "model": self.req_info.req_data.get("model", "test-model"),
                "prompt": self.req_info.req_data.get("prompt", ""),
                "max_tokens": self.req_info.req_data.get("max_tokens", 120),
                "temperature": self.req_info.req_data.get("temperature", 0.7),
                "request_id": metaserver_data["request_id"],
                "do_remote_decode": metaserver_data["do_remote_decode"],
                "do_remote_prefill": metaserver_data["do_remote_prefill"],
                "remote_engine_id": metaserver_data.get("remote_engine_id"),
                "remote_host": metaserver_data.get("remote_host"),
                "remote_port": metaserver_data.get("remote_port")
            }
        }
        
        logger.info(f"   📋 P_INSTANCE: Received request {metaserver_data['request_id']}")
        logger.debug(f"   P Request: {json.dumps(p_request, indent=2, ensure_ascii=False)}")
        
        # Simulate P instance prefill computation
        await asyncio.sleep(0.8)  # Simulate computation time
        
        # Generate prefill result (first token)
        prefill_text = generate_prefill_chunk()
        
        # Create KV cache simulation
        self.req_info.kv_cache = (f"kv_cache_{uuid.uuid4().hex[:8]}")
        
        response = {
            "choices": [
                {
                    "delta": {
                        "content": prefill_text
                    },
                    "index": 0,
                    "finish_reason": None
                }
            ],
            "id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
            "kv_cache": self.req_info.kv_cache  # Simulate KV cache transfer
        }
        
        self.req_info.update_state(ReqState.PREFILL_END)
        logger.info(f"   ✅ P_INSTANCE: Prefill completed, KV cache: {self.req_info.kv_cache}")
        logger.info(f"   📤 P->D: Sending prefill result: {prefill_text}")
        
        return response
    
    async def generate_decode_stream(self) -> AsyncGenerator[str, None]:
        """Step 3: D instance decode stream generation for streaming requests"""
        logger.info(f"   🎬 D_INSTANCE: Starting decode with KV cache: {self.req_info.kv_cache}")
        
        # Get the first token from prefill
        first_token = self._get_first_token()
        
        # Generate remaining tokens
        full_response = generate_poem_response()
        remaining_tokens = full_response.replace(first_token, "").strip().split()
        
        # Yield the first token (from prefill)
        chunk = StreamChunkResponse(
            choices=[
                StreamChoice(
                    delta=Delta(content=first_token + " "),
                    index=0,
                    finish_reason=None
                )
            ],
            id=self._get_response_id(),
            created=int(time.time()),
            model=self.req_info.req_data.get("model", "test-model")
        )
        yield f"data: {chunk.json()}\n\n"
        await asyncio.sleep(0.1)
        
        # Yield remaining tokens
        for i, token in enumerate(remaining_tokens):
            await asyncio.sleep(0.15)  # Simulate decode time
            
            is_final = (i == len(remaining_tokens) - 1)
            
            chunk = StreamChunkResponse(
                choices=[
                    StreamChoice(
                        delta=Delta(content=token + " "),
                        index=0,
                        finish_reason="stop" if is_final else None
                    )
                ],
                id=self._get_response_id(),
                created=int(time.time()),
                model=self.req_info.req_data.get("model", "test-model")
            )
            
            logger.debug(f"   📝 D_INSTANCE: Decode token {i+1}/{len(remaining_tokens)}: {token}")
            yield f"data: {chunk.json()}\n\n"
        
        # Final DONE message
        yield "data: [DONE]\n\n"
        logger.info(f"   ✅ D_INSTANCE: Decode completed, {len(remaining_tokens) + 1} tokens generated")

class PInstanceHandler:
    """Mock P Instance (Prefill Role) - Direct handler for testing"""
    
    @staticmethod
    async def handle_p_request(p_request: dict) -> Dict[str, Any]:
        """Handle direct P instance request"""
        logger.info(f"🎯 P_INSTANCE: Direct request received")
        
        # Simulate prefill computation
        await asyncio.sleep(0.5)
        
        prefill_text = generate_prefill_chunk()
        
        response = {
            "choices": [
                {
                    "delta": {
                        "content": prefill_text
                    },
                    "index": 0,
                    "finish_reason": None
                }
            ],
            "id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
            "kv_cache": f"kv_cache_{uuid.uuid4().hex[:8]}"
        }
        
        logger.info(f"✅ P_INSTANCE: Direct prefill completed: {prefill_text}")
        return response

def generate_poem_response():
    """Generate a complete poem response"""
    poems = [
        "春风拂面花开早，柳絮飞舞鸟儿叫。阳光明媚照大地，万物复苏春意闹。",
        "春雨润物细无声，百花齐放春意浓。蝴蝶翩翩花间舞，春日美景入画中。",
        "春日暖阳照大地，万物复苏显生机。花开鸟鸣春意闹，人间四月最美时。",
        "春水初生春林盛，春风十里不如你。春花烂漫春草绿，春日时光最珍贵。"
    ]
    return random.choice(poems)

def generate_prefill_chunk():
    """Generate the first chunk for prefill"""
    chunks = [
        "春风", "春日", "春雨", "春水", 
        "花开", "鸟鸣", "柳絮", "蝴蝶"
    ]
    return random.choice(chunks)

def is_d_instance_request(request_data: dict) -> bool:
    """Check if this is a D instance request based on kv_transfer_params"""
    kv_params = request_data.get('kv_transfer_params', {})
    return (kv_params.get('do_remote_decode') is False and 
            kv_params.get('do_remote_prefill') is True)

@app.get("/status")
async def handle_status_events(request: Request):
    """Status check endpoint"""
    return JSONResponse(
        status_code=200,
        content={
            "status": "normal",
            "mode": server_config.mode,
            "description": "Mock D/P instances and metaserver - Support streaming/non-streaming",
            "active_requests": len(RequestManager.get_instance().requests)
        }
    )

@app.post("/v1/chat/completions")
async def handle_chat_completions(
    request: ChatCompletionRequest,
    x_request_id: Optional[str] = Header(None),
    background_tasks: BackgroundTasks = BackgroundTasks()
):
    """Main endpoint for D instance requests - Support both streaming and non-streaming"""
    request_id = x_request_id or f"d_req_{uuid.uuid4().hex[:8]}"
    request_data = request.dict()
    request_data['request_id'] = request_id
    
    # Create request info
    req_info = RequestInfo(request_id, request_data)
    
    # Check if this is a D instance request
    if is_d_instance_request(request_data):
        logger.info("=" * 60)
        stream_type = "STREAMING" if request.stream else "NON-STREAMING"
        logger.info(f"🚀 STARTING D/P INFERENCE FLOW ({stream_type})")
        logger.info("=" * 60)
        
        handler = DInstanceHandler(req_info)
        return await handler.handle_d_request()
    else:
        # Direct chat completion (not part of D/P flow)
        return await handle_direct_chat_request(request)

@app.post("/v1/completions")
async def handle_completions(request: CompletionRequest):
    """Handle standard completion requests - original logic preserved"""
    logger.info(f"📝 COMPLETION: Direct request (stream: {request.stream})")
    
    # Process request based on mode
    if server_config.mode == "normal":
        return await handle_normal_completion_request(request)
    elif server_config.mode == "timeout":
        return await handle_timeout_request(request)
    elif server_config.mode == "error":
        return await handle_error_request(request)
    elif server_config.mode == "slow":
        return await handle_slow_request(request)
    elif server_config.mode == "invalid_response":
        return await handle_invalid_response_request(request)
    elif server_config.mode == "random":
        return await handle_random_request(request)
    else:
        return await handle_normal_completion_request(request)

@app.post("/v1/p_instance")
async def handle_p_instance(request: Request):
    """Direct P instance endpoint for testing"""
    body = await request.json()
    logger.info(f"🎯 P_INSTANCE: Direct API call")
    
    response = await PInstanceHandler.handle_p_request(body)
    return JSONResponse(content=response)

async def handle_normal_completion_request(request: CompletionRequest):
    """Normal response - standard completion"""
    response_text = generate_poem_response()
    
    if request.stream:
        # For streaming completion requests
        async def generate_completion_stream():
            tokens = response_text.split()
            for i, token in enumerate(tokens):
                await asyncio.sleep(0.1)
                chunk = {
                    "id": f"cmpl-{uuid.uuid4().hex[:8]}",
                    "object": "text_completion",
                    "created": int(time.time()),
                    "model": request.model,
                    "choices": [
                        {
                            "text": token + " ",
                            "index": 0,
                            "finish_reason": None if i < len(tokens) - 1 else "stop"
                        }
                    ]
                }
                yield f"data: {json.dumps(chunk)}\n\n"
            yield "data: [DONE]\n\n"
        
        return StreamingResponse(
            generate_completion_stream(),
            media_type="application/x-ndjson"
        )
    else:
        # For non-streaming completion requests
        return {
            "id": f"cmpl-{uuid.uuid4().hex[:8]}",
            "object": "text_completion",
            "created": int(time.time()),
            "model": request.model,
            "choices": [
                {
                    "text": response_text,
                    "index": 0,
                    "finish_reason": "stop"
                }
            ],
            "usage": {
                "prompt_tokens": len(request.prompt),
                "completion_tokens": len(response_text.split()),
                "total_tokens": len(request.prompt) + len(response_text.split())
            }
        }

async def handle_direct_chat_request(request: ChatCompletionRequest):
    """Handle direct chat completion (not part of D/P flow)"""
    request_id = f"chat_direct_{uuid.uuid4().hex[:8]}"
    logger.info(f"💬 CHAT_DIRECT: {request_id} (stream: {request.stream})")
    
    response_text = generate_poem_response()
    
    if request.stream:
        # For streaming chat requests
        async def generate_chat_stream():
            tokens = response_text.split()
            for i, token in enumerate(tokens):
                await asyncio.sleep(0.1)
                chunk = {
                    "id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
                    "object": "chat.completion.chunk",
                    "created": int(time.time()),
                    "model": request.model,
                    "choices": [
                        {
                            "delta": {
                                "content": token + " "
                            },
                            "index": 0,
                            "finish_reason": None if i < len(tokens) - 1 else "stop"
                        }
                    ]
                }
                yield f"data: {json.dumps(chunk)}\n\n"
            yield "data: [DONE]\n\n"
        
        return StreamingResponse(
            generate_chat_stream(),
            media_type="application/x-ndjson"
        )
    else:
        # For non-streaming chat requests
        return {
            "id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
            "object": "chat.completion",
            "created": int(time.time()),
            "model": request.model,
            "choices": [
                {
                    "index": 0,
                    "message": {
                        "role": "assistant",
                        "content": response_text
                    },
                    "finish_reason": "stop"
                }
            ],
            "usage": {
                "prompt_tokens": sum(len(msg.get('content', '')) for msg in request.messages),
                "completion_tokens": len(response_text.split()),
                "total_tokens": sum(len(msg.get('content', '')) for msg in request.messages) + len(response_text.split())
            }
        }

# Original error simulation handlers
async def handle_timeout_request(request: CompletionRequest):
    """Timeout response"""
    logger.info(f"⏰ TIMEOUT: Simulating timeout for {server_config.timeout_duration}s")
    await asyncio.sleep(server_config.timeout_duration)
    return await handle_normal_completion_request(request)

async def handle_error_request(request: CompletionRequest):
    """Error response"""
    error_code = random.choice(server_config.error_codes)
    error_messages = {
        500: "Internal Server Error",
        502: "Bad Gateway", 
        503: "Service Unavailable"
    }
    logger.info(f"❌ ERROR: Returning HTTP {error_code}")
    raise HTTPException(
        status_code=error_code,
        detail=error_messages.get(error_code, "Unknown Error")
    )

async def handle_slow_request(request: CompletionRequest):
    """Slow response"""
    logger.info(f"🐌 SLOW: Simulating slow response for {server_config.slow_duration}s")
    await asyncio.sleep(server_config.slow_duration)
    return await handle_normal_completion_request(request)

async def handle_invalid_response_request(request: CompletionRequest):
    """Invalid response"""
    logger.info("⚠️ INVALID: Returning invalid response")
    invalid_response = {
        "id": "cmpl-invalid",
        "object": "text_completion", 
        "created": int(time.time()),
        "model": request.model
    }
    return JSONResponse(content=invalid_response)

async def handle_random_request(request: CompletionRequest):
    """Random response"""
    if random.random() < server_config.error_rate:
        error_type = random.choice(["timeout", "error", "slow", "invalid_response"])
        if error_type == "timeout":
            return await handle_timeout_request(request)
        elif error_type == "error":
            return await handle_error_request(request)
        elif error_type == "slow":
            return await handle_slow_request(request)
        elif error_type == "invalid_response":
            return await handle_invalid_response_request(request)
    else:
        return await handle_normal_completion_request(request)

@app.get("/control/requests")
async def get_active_requests():
    """Get active requests for monitoring"""
    requests = RequestManager.get_instance().requests
    request_list = []
    
    for req_id, req_info in requests.items():
        request_list.append({
            "request_id": req_id,
            "state": req_info.state.value,
            "model": req_info.req_data.get("model", "unknown"),
            "stream": req_info.req_data.get("stream", False),
            "age_seconds": round(time.time() - req_info.created_time, 2),
            "has_kv_cache": req_info.kv_cache is not None
        })
    
    return {
        "active_requests": len(requests),
        "requests": request_list
    }

@app.post("/control/mode")
async def change_mode(mode: str, error_rate: Optional[float] = None, timeout_duration: Optional[float] = None, slow_duration: Optional[float] = None):
    """Control server behavior mode"""
    valid_modes = ["normal", "timeout", "error", "slow", "invalid_response", "random"]
    if mode not in valid_modes:
        raise HTTPException(status_code=400, detail=f"Invalid mode. Must be one of: {valid_modes}")
    
    server_config.mode = mode
    if error_rate is not None:
        server_config.error_rate = error_rate
    if timeout_duration is not None:
        server_config.timeout_duration = timeout_duration
    if slow_duration is not None:
        server_config.slow_duration = slow_duration
    
    logger.info(f"🛠️ CONTROL: Server mode changed to {mode}")
    return {
        "status": "success",
        "mode": server_config.mode,
        "error_rate": server_config.error_rate,
        "timeout_duration": server_config.timeout_duration,
        "slow_duration": server_config.slow_duration
    }

def main(port, mode="normal"):
    """Main function - start EngineServer"""
    import uvicorn
    
    # Set server mode
    server_config.mode = mode
    server_config.manage_port = port
    
    logger.info(f"🚀 Starting EngineServer Mock on port {port}")
    logger.info(f"📋 Mode: {mode}")
    logger.info("🎯 Available endpoints:")
    logger.info("   POST /v1/chat/completions - D instance requests (full D/P flow)")
    logger.info("   POST /v1/completions      - Standard completion (original logic)")
    logger.info("   POST /v1/p_instance       - Direct P instance testing")
    logger.info("   GET  /control/requests    - Monitor active requests")
    logger.info("   POST /control/mode        - Change server mode")
    logger.info("")
    logger.info("📖 Usage examples:")
    logger.info("   Streaming D request: Set 'stream: true' in request body")
    logger.info("   Non-streaming D request: Set 'stream: false' in request body")
    logger.info("   Include kv_transfer_params for D instance routing")
    
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=port,
        log_level="info",
        access_log=False
    )

if __name__ == "__main__":
    port = 8080
    mode = "normal"
    
    if len(sys.argv) > 1:
        port = int(sys.argv[1])
    if len(sys.argv) > 2:
        mode = sys.argv[2]
    
    main(port, mode)
