from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
from create_llm import create_llm

_llm = create_llm("huoshan-doubao")

# 初始化聊天历史
_chat_history = ChatMessageHistory()

_prompt = ChatPromptTemplate.from_messages([
  ("system", "你是一名智能助手，你需要帮助用户解答问题，回答尽可能简洁"),
  MessagesPlaceholder(variable_name="chat_history"),
  ("user", "{_user_content}")
])

_parser = StrOutputParser()


# 手动管理聊天历史
def build_messages(user_content):
  # 获取历史消息
  messages = _chat_history.messages
  # 添加新的用户消息
  messages.append({"role": "user", "content": user_content})
  return messages


def save_response(user_content, response):
  # 保存用户消息和AI回复到历史
  _chat_history.add_user_message(user_content)
  _chat_history.add_ai_message(response)
  return response


# 构建链
_chain = (
  RunnablePassthrough.assign(
    chat_history=RunnableLambda(lambda x: build_messages(x["_user_content"]))
  )
  | _prompt
  | _llm
  | _parser
)


def get_input(prompt="\n请输入对话内容："):
  return input(prompt)


# 对话循环
_user_content = get_input()
while _user_content != "exit":
  # 执行链式调用并流式输出
  full_text = ""
  for chunk in _chain.stream({"_user_content": _user_content}):
    full_text += chunk
    print(chunk, end="", flush=True)

  save_response(_user_content, full_text)
  _user_content = get_input()
