from fastapi import FastAPI,Header,HTTPException
from pydantic import BaseModel
from typing import List, Literal, Optional
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import MemorySaver
from langchain_mcp_adapters.client import MultiServerMCPClient
from contextlib import asynccontextmanager
import asyncio
import time  # ✅ 用于时间戳
from fastapi.responses import StreamingResponse
import json
from typing import AsyncGenerator

# ------------------ LLM 配置 ------------------

llm = ChatOpenAI(
    openai_api_base="https://api.siliconflow.cn/v1/",
    openai_api_key="sk-lyibxsovweyhvrqfdenylglchwzgwxvmwcenabgcngbvpmts",
    model_name="deepseek-ai/DeepSeek-V3",
    temperature=0.7,
)

memory = MemorySaver()

# ------------------ OpenAI 兼容接口 ------------------

class Message(BaseModel):
    role: Literal["system", "user", "assistant"]
    content: str

class ChatCompletionRequest(BaseModel):
    model: str
    messages: List[Message]
    temperature: Optional[float] = 0.7
    stream: Optional[bool] = False

# ------------------ FastAPI 应用 ------------------

client: MultiServerMCPClient = None
agent = None

@asynccontextmanager
async def lifespan(app: FastAPI):
    global client, agent
    client = MultiServerMCPClient({
        "math": {
            "command": "python",
            "args": ["./m_server.py"],
            "transport": "stdio",
        },
        "amap-amap-sse": {
            "transport": "sse",
            "url": "https://mcp.amap.com/sse?key=ea623731dd6a64c5f0fbfae71b71d0c1",
        },
        "mcp-hotnews-server": {
            "transport": "sse",
            "url": "https://mcp.api-inference.modelscope.cn/sse/f61dc5e51feb40",
        },
    })

    agent = create_react_agent(
        model=llm,
        tools=client.get_tools(),
        checkpointer=memory,
        prompt="你是一个智能助手 CrisAI ，你能帮助用户解答问题。",
    )
    yield
    # await client.aclose()

app = FastAPI(lifespan=lifespan)

@app.post("/v1/chat/completions")
async def chat_completions(request: ChatCompletionRequest,authorization: Optional[str] = Header(None)):
    if authorization is None or not authorization.startswith("Bearer "):
        raise HTTPException(status_code=401, detail="Missing or invalid API key")

        # 你可以在这里替换为实际检查的 key，比如 "sk-xxx"
    api_key = authorization.removeprefix("Bearer ").strip()
    if api_key != "sk-123":
        raise HTTPException(status_code=401, detail="Api key is invalid")
    try:
        # 获取用户最新输入
        user_input = next((msg.content for msg in reversed(request.messages) if msg.role == "user"), None)
        if not user_input:
            return {"error": "No user message found"}

        config = {"configurable": {"thread_id": "api-thread-1"}}

        if not request.stream:
            result = await agent.ainvoke({"messages": user_input}, config=config)
            final_message = result["messages"][-1]
            return {
                "id": "chatcmpl-fake-id",
                "object": "chat.completion",
                "created": int(time.time()),
                "model": request.model,
                "choices": [
                    {
                        "index": 0,
                        "message": {
                            "role": "assistant",
                            "content": final_message.content
                        },
                        "finish_reason": "stop"
                    }
                ]
            }

        # ✅ 流式响应（SSE）
        async def event_stream() -> AsyncGenerator[str, None]:
            result = await agent.ainvoke({"messages": user_input}, config=config)
            final_message = result["messages"][-1]
            full_content = final_message.content

            # 模拟按段发送（按句子或每几字符分段更佳）
            for chunk in [full_content[i:i + 20] for i in range(0, len(full_content), 20)]:
                data = {
                    "id": "chatcmpl-stream-id",
                    "object": "chat.completion.chunk",
                    "created": int(time.time()),
                    "model": request.model,
                    "choices": [
                        {
                            "delta": {"content": chunk},
                            "index": 0,
                            "finish_reason": None,
                        }
                    ]
                }
                yield f"data: {json.dumps(data)}\n\n"
                await asyncio.sleep(0.05)  # 可选：控制发送节奏

            # 发送 [DONE]
            yield "data: [DONE]\n\n"

        return StreamingResponse(event_stream(), media_type="text/event-stream")

    except Exception as e:
        return {"error": str(e)}


@app.get("/v1/models")
async def list_models(authorization: Optional[str] = Header(None)):
    if authorization is None or not authorization.startswith("Bearer "):
        raise HTTPException(status_code=401, detail="Missing or invalid API key")

    api_key = authorization.removeprefix("Bearer ").strip()
    if api_key != "sk-123":
        raise HTTPException(status_code=401, detail="Api key is invalid")

    return {
        "object": "list",
        "data": [
            {
                "id": "deepseek-ai/DeepSeek-V3",
                "object": "model",
                "created": 1710000000,
                "owned_by": "crisai",
            },
            {
                "id": "gpt-4o-mini",
                "object": "model",
                "created": 1710000000,
                "owned_by": "crisai",
            },
            {
                "id": "gemini2.5Pro",
                "object": "model",
                "created": 1710000000,
                "owned_by": "crisai",
            }
        ]
    }

# ------------------ 启动服务 ------------------

if __name__ == '__main__':
    import uvicorn
    uvicorn.run("main:app", host="0.0.0.0", port=1414, reload=True)
