from threading import Event

from aimodel import tongyi_model
from websocket_schemas import ChatResponse
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

async def a_qa(websocket,question, history_list, stop_event:Event):
    prompt = ChatPromptTemplate.from_messages(
        [
            SystemMessage(
                content="你是“新都科伦聊天助手”，能够友好的回答问题。"
            ),
            MessagesPlaceholder(variable_name="messages"),
        ]
    )
    chain = prompt | tongyi_model
    messages = []
    for i in history_list:
        if i["role"] == "user":
            messages.append(HumanMessage(i["content"]))
        elif i["role"] == "bot":
            messages.append(AIMessage(i["content"]))
    messages.append(HumanMessage(question))
    ai_result = chain.astream(
        {
            "messages": messages,
        }
    )
    async for res in ai_result:
        if stop_event.is_set():
            stop_event.clear()
            end_resp = ChatResponse(sender="bot", message="", type="end")
            await websocket.send_json(end_resp.dict())
            break
        start_resp = ChatResponse(sender="bot", message=res.content, type="stream")
        await websocket.send_json(start_resp.dict())
    end_resp = ChatResponse(sender="bot", message="", type="end")
    await websocket.send_json(end_resp.dict())



