from langchain.chat_models import init_chat_model
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder


def sync_stream_conversation(chain,history):
    result = ""
    #方式一
    for chunk in chain.stream({"messages":history}):
        print(chunk, end="",flush=True)
        result += chunk
    return  result


async def stream_conversation(chain,history):
    result = ""
    #方式二
    async for chunk in chain.astream({"messages":history}):
        print(chunk, end="",flush=True)
        result += chunk
    return  result

if __name__ == "__main__":
    model = init_chat_model(model="deepseek-chat", model_provider="deepseek")

    prompt_template = ChatPromptTemplate(
        [
            SystemMessage("你叫小智，是一名乐于助人的助手。"),
            MessagesPlaceholder(variable_name="messages")
        ]
    )
    # 新闻书写chain
    history = [HumanMessage("你好，我叫赵刚"),AIMessage("你好，我是你的AI助理，我叫小智。有什么可以帮到您的？")]
    chain = prompt_template | model | StrOutputParser()

    while True:
        input_text = input("请输入：")
        if input_text == "exit" or input_text == "quit" or input_text == "bye" or input_text == "goodbye" or input_text == "":
            break
        history.append(HumanMessage(input_text))
        #output = chain.invoke({"messages":history})
        # 方式一：
        output = sync_stream_conversation(chain,history)
        # 方式二：
        #output = asyncio.run(stream_conversation(chain,history))
        print(output)
        history.append(AIMessage(output))

        history = history[-50:]
