import os

from dotenv import load_dotenv
from fastapi import FastAPI
from langchain.chains.llm import LLMChain
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import AzureChatOpenAI
from langserve import add_routes


## 使用模型

from chat.azure_chat import llm_model

load_dotenv()
os.environ["AZURE_OPENAI_API_TYPE"] = "azure"

llm = AzureChatOpenAI(
    # openai_api_key=
    # openai_api_base=os.getenv("AZURE_OPENAI_ENDPOINT"),
    api_key=os.getenv("AZURE_OPENAI_API_KEY"),
    azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
    azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"),
    api_version=os.getenv("AZURE_OPENAI_API_VERSION"),
    temperature=0.7
)

app = FastAPI(
    title="Language Server 服务器",
    version="1.0",
    description="使用Langchain的Runnable接口的简单API服务器",
)

# 相当于spring的controller
add_routes(
    app,
    llm,
    path="/openai"
)

prompt = ChatPromptTemplate.from_template("告诉我一个关于{topic}的笑话")

add_routes(
    app,
    prompt | llm_model,
    path="/openai_text"
)


# 如果直接运行此脚本，启动服务器
if __name__ == "__main__":
    import uvicorn
    # 启动服务器，监听本地8000端口
    uvicorn.run(app, host="0.0.0.0", port=8001)