#!/usr/bin/env python
from fastapi import FastAPI, HTTPException
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from pydantic import BaseModel
import os

# 设置千问模型的API配置
os.environ["OPENAI_API_KEY"] = "sk-4e88cf4db3e14894bafaff606d296610"
os.environ["OPENAI_API_BASE"] = "https://dashscope.aliyuncs.com/compatible-mode/v1"

app = FastAPI(
    title="LangChain Server",
    version="1.0",
    description="A simple api server using Langchain",
)

# 创建千问模型实例
qwen_model = ChatOpenAI(
    model="qwen-max",
    openai_api_base="https://dashscope.aliyuncs.com/compatible-mode/v1"
)

# 创建提示模板
prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")

# 定义请求模型
class QwenRequest(BaseModel):
    input: str

class JokeRequest(BaseModel):
    topic: str

# 自定义路由
@app.post("/qwen/invoke")
async def invoke_qwen(request: QwenRequest):
    try:
        response = qwen_model.invoke(request.input)
        return {"output": response.content}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/joke/invoke")
async def invoke_joke(request: JokeRequest):
    try:
        # 构建完整的提示
        formatted_prompt = prompt.format(topic=request.topic)
        response = qwen_model.invoke(formatted_prompt)
        return {"output": response.content}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/")
async def root():
    return {
        "message": "LangChain Server is running",
        "endpoints": {
            "qwen": "POST /qwen/invoke",
            "joke": "POST /joke/invoke"
        }
    }

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="localhost", port=8000)