from operator import itemgetter

from langchain.memory import ConversationBufferMemory
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
    api_key="sk-a3f7718fb81f43b2915f0a6483b6661b",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    model="llama-4-scout-17b-16e-instruct",  # 此处以qwen-plus为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    # other params...
)
prompt = ChatPromptTemplate.from_messages([("system","你是一个乐于助人的机器助手"),MessagesPlaceholder(variable_name="history"),("human", "{input}")])
memory = ConversationBufferMemory(return_messages=True)
chain = (RunnablePassthrough.assign(history=RunnableLambda(memory.load_memory_variables) | itemgetter("history")) | prompt | llm)
inputs = {"input":"你好，我是李华"}
res = chain.invoke(inputs)
print(res)
memory.save_context(inputs, {"output":res.content})
print(chain.invoke({"input":"你好，我是谁？"}))
