import os

from dotenv import load_dotenv
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from langchain_community.chat_models import AzureChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langserve import add_routes
from starlette.middleware.cors import CORSMiddleware

load_dotenv()
chat_model = AzureChatOpenAI(
    # openai_api_key=
    # openai_api_base=os.getenv("AZURE_OPENAI_ENDPOINT"),
    api_key=os.getenv("AZURE_OPENAI_API_KEY"),
    azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
    azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"),
    api_version=os.getenv("AZURE_OPENAI_API_VERSION"),
    temperature=0.7
)

app = FastAPI(
    title="我的第一个langChain服务器",
    version="1.0",
    description="使用langchain的Runnable接口的简单API服务器",
)
# 添加CORS--支持跨域调用
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
    expose_headers=["*"],
)

# 创建简单的聊天链
prompt = ChatPromptTemplate.from_messages([
    ("system", "你是一个有帮助的AI助手。请用中文回答用户的问题。"),
    ("human", "{topic}")
])

chain = prompt | chat_model | StrOutputParser()


# 添加健康检查端点
@app.get("/health")
async def health_check():
    return {"status": "healthy", "model": "Azure OpenAI"}


# 通过add_routes添加LangServe路由。
# 注意：可能一些模型不支持【batch】，所以这里要手动指定。
"""
适合：SDK的方式调用，只提供LLM模型
"""
add_routes(app, chat_model, path="/chat", enabled_endpoints=["invoke", "stream", "stream_events"])

"""
适合：requests的http接口的方式调用。因为prompt定了变量：topic 变量
 """
add_routes(app, chain, path="/chat_ext", enabled_endpoints=["invoke", "stream", "stream_events"])

@app.get("/")
async def redirect_root_to_docs():
    return RedirectResponse("/docs")

@app.get("/openai")
async def redirect_root_to_docs():
    return {"status": "healthy", "model": "Azure OpenAI"}



if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8000)
