import sys

from fastapi import status, HTTPException, FastAPI, Request
from fastapi.responses import StreamingResponse, JSONResponse
from motor.utils.logger import get_logger
import httpx
import asyncio
import json
from typing import AsyncGenerator

logger = get_logger(__name__)

async def generate_chat_stream(message: str) -> AsyncGenerator[str, None]:
    # 模拟的回复内容
    mock_response = f"Based on your message '{message}', here's my response: This is a simulated AI response that demonstrates streaming capabilities. Each word appears progressively, creating a natural conversation flow."
    words = mock_response.split()
    
    for i, word in enumerate(words):
        await asyncio.sleep(0.1)  # 模拟生成延迟
        
        chunk_data = {
            "id": f"chatcmpl-{i}",
            "object": "chat.completion.chunk",
            "choices": [{
                "delta": {
                    "content": word + " " if i < len(words) - 1 else word
                },
                "index": 0,
                "finish_reason": None if i < len(words) - 1 else "stop"
            }]
        }
        
        yield f"data: {json.dumps(chunk_data)}\n\n"
    yield "data: [DONE]\n\n"

class MockAsyncClient:
    
    def __init__(self, post_exc: Exception = None, stream_exc: Exception = None, 
                 post_fail_times: int = 1, stream_fail_times: int = 1):
        self.post_exc = post_exc
        self.post_fail_times = post_fail_times
        self.post_count = 0
        self.post_fail_count = 0
        
        self.stream_exc = stream_exc
        self.stream_fail_times = stream_fail_times
        self.stream_count = 0
        self.stream_fail_count = 0
        
        self.req_data_from_metaserver = {}
        self.req_headers_from_router = {}
        
    async def post(self, url, json=None, headers=None):
        logger.info(f"----------req_data_from_metaserver, url: {url}, body: {json}")
        self.post_count += 1
        if self.post_exc and self.post_fail_count < self.post_fail_times:
            self.post_fail_count += 1
            return HTTPException(
                status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, 
                detail=f"Error occurred while forwarding request to metaserver: {self.post_exc}"
            )
        
        self.req_data_from_metaserver = json
        
        return JSONResponse(
            status_code=status.HTTP_200_OK,
            content={
                "choices": [{"delta": {"content": "finish prefill"}, "index": 0, "finish_reason": None}],
                "id": "chatcmpl-123"
            }
        )
    
    async def stream(self, method, url, json=None, headers=None):
        self.stream_count += 1
        logger.info(f"----------req_data_from_coordinator:{json}")
        if self.stream_exc and self.stream_fail_count < self.stream_fail_times:
            self.stream_fail_count += 1
            # return MockStreamResponse(json or {}, recomputed=False, exc=self.stream_exc)
            raise HTTPException(
                status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
                detail=f"Error occurred while streaming request to metaserver: {self.stream_exc}"
            )

        from urllib.parse import urlparse
        meta_url = json["kv_transfer_params"]["metaserver"]
        parsed_url = urlparse(meta_url)

        async with httpx.AsyncClient(timeout=None, base_url=f"http://{parsed_url.hostname}:{parsed_url.port}") as client:
            logger.info(f"req_data_from_router header: {headers}, body:{json}")
            self.req_headers_from_router = headers
        
            # Forward request to metaserver
            response = await client.post(parsed_url.path, json={
                "request_id": headers.get("x-request-id"),
                "do_remote_decode": False,
                "do_remote_prefill": True,
                "remote_engine_id": "test-engine",
                "remote_host": parsed_url.hostname,
                "remote_port": parsed_url.port
            })
            logger.info(f"metaserver response status: {response.status_code}, body: {response.text}")
            
        # return MockStreamResponse(json or {}, recomputed=False, exc=None)
        return StreamingResponse(
            generate_chat_stream(response),
            media_type="text/event-stream"
        )
    
    
# FastAPI application
app = FastAPI(
    title="Engine Server Mock",
    description="Mock D/P instances and metaserver for inference flow",
    version="4.1.0"
)

mock_client = MockAsyncClient()

@app.post("/v1/chat/completions")
async def handle_chat_completions(
    request: Request
):
    request_data = await request.json()
    headers = request.headers
    logger.info(f"Received /v1/chat/completions request: {request_data}")
    # Check if this is a D instance request
    if "kv_transfer_params" in request_data and "metaserver" in request_data["kv_transfer_params"]:
        logger.info("=" * 60)
        stream_type = "STREAMING" if request.stream else "NON-STREAMING"
        logger.info(f"🚀 STARTING D/P INFERENCE FLOW ({stream_type})")
        logger.info("=" * 60)
        
        return await mock_client.stream(request.method, request.url, request_data, headers=headers)
    else:
        # Direct chat completion (not part of D/P flow)
        return await mock_client.post(request.url, json=request_data, headers=headers)
    
def main(port, mode="normal"):
    """Main function - start EngineServer"""
    import uvicorn
    
    logger.info(f"🚀 Starting EngineServer Mock on port {port}")
    logger.info(f"📋 Mode: {mode}")
    logger.info("🎯 Available endpoints:")
    logger.info("   POST /v1/chat/completions - D instance requests (full D/P flow)")
    logger.info("   POST /v1/completions      - Standard completion (original logic)")
    
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=port,
        log_level="info",
        access_log=False
    )

if __name__ == "__main__":
    port = 8080
    mode = "normal"
    
    if len(sys.argv) > 1:
        port = int(sys.argv[1])
    if len(sys.argv) > 2:
        mode = sys.argv[2]
    
    main(port, mode)