from fastapi import APIRouter
from langchain_openai import ChatOpenAI
from langchain.callbacks import AsyncIteratorCallbackHandler
from starlette.responses import StreamingResponse
from langchain.schema import BaseMessage
import asyncio
from pydantic import BaseModel

router=APIRouter()

class InputStr(BaseModel):
    input:str


async def generate_stream_response(_callback,llm:ChatOpenAI,messages:list[BaseMessage]):
    """生成流式响应""" 
    task=asyncio.shield(llm.ainvoke(messages,stop=None))
    async for token in _callback.aiter():
        yield token
    await task
     

@router.post("/chatapi/chat")
async def chat(input:InputStr):
    callback=AsyncIteratorCallbackHandler()
    llm=ChatOpenAI(
        api_key="sk-xxxx",#请填写从openai申请到的真实key
        streaming=True,
        callbacks=[callback]
        
    )
    messages=[
        {"role":"system","content":"你是有用的人工智能"},
        {"role":"user","content":input.input}
    ]
    
    #这是流式返回类型
    response=StreamingResponse(
        #返回的内容需要是个流
        generate_stream_response(callback,llm,messages), 
        media_type="text/event-stream"
    )
    
    return response  #将流返回给前端



#将此服务接口公开让前端可以调用

#至此，ai的服务化已经完成， 

#这样的接口可以供各种前端调用