import json
import uuid
import asyncio
from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from typing import Dict, Optional

# 内存存储上下文 (生产环境用Redis)
context_store: Dict[str, dict] = {}


class ContextRequest(BaseModel):
    message: str
    ctx_id: Optional[str] = None
    max_tokens: int = 4096


class ContextData(BaseModel):
    history: str = ""
    token_count: int = 0


app = FastAPI()


def simulate_model_generation(prompt: str, ctx: ContextData) -> str:
    """模拟大模型生成 (实际替换为真实模型调用)"""
    # 实际应用中调用 HuggingFace/OpenAI API
    response = f"模拟响应: {prompt[::-1]}"  # 简单反转字符串
    return response


async def model_generator(request: ContextRequest):
    """SSE生成器核心逻辑"""
    # 1. 获取或创建上下文
    if request.ctx_id and request.ctx_id in context_store:
        ctx_data = ContextData(**context_store[request.ctx_id])
    else:
        new_ctx_id = f"ctx-{uuid.uuid4().hex[:8]}"
        ctx_data = ContextData()
        context_store[new_ctx_id] = ctx_data.dict()
        request.ctx_id = new_ctx_id

    # 2. 更新上下文
    full_prompt = f"{ctx_data.history}\nUser: {request.message}"

    # 3. 模拟流式生成
    response_text = simulate_model_generation(full_prompt, ctx_data)

    # 4. 流式返回SSE
    for i, char in enumerate(response_text):
        # 返回token事件
        yield {
            "event": "token",
            "data": {
                "token": char,
                "ctx_id": request.ctx_id,
                "index": i
            }
        }
        await asyncio.sleep(0.02)  # 模拟生成延迟

    # 5. 更新上下文存储
    ctx_data.history += f"\nUser: {request.message}\nAI: {response_text}"
    ctx_data.token_count = len(ctx_data.history.split())
    context_store[request.ctx_id] = ctx_data.dict()

    # 6. 发送上下文更新事件
    yield {
        "event": "context_update",
        "data": {
            "ctx_id": request.ctx_id,
            "token_count": ctx_data.token_count
        }
    }

    # 7. 发送完成事件
    yield {
        "event": "done",
        "data": {
            "ctx_id": request.ctx_id,
            "final": True
        }
    }


@app.post("/chat")
async def chat_stream(request: ContextRequest):
    """MCP SSE端点"""

    async def event_stream():
        async for event in model_generator(request):
            # SSE格式编码
            yield f"event: {event['event']}\n"
            yield f"data: {json.dumps(event['data'])}\n\n"

    return StreamingResponse(
        event_stream(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "X-Accel-Buffering": "no"
        }
    )


@app.get("/context/{ctx_id}")
def get_context(ctx_id: str):
    """获取上下文元数据"""
    if ctx_id not in context_store:
        raise HTTPException(status_code=404, detail="Context not found")
    return {
        "ctx_id": ctx_id,
        "token_count": context_store[ctx_id]["token_count"],
        "created": context_store[ctx_id].get("created_at", None)
    }


if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)