from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_community.llms import Tongyi
from dotenv import load_dotenv
import os
from langchain_community.llms import Ollama

# # 加载环境变量
# load_dotenv()

# # 获取环境变量
# api_key = os.getenv("DASHSCOPE_API_KEY")

# # 初始化模型
# llm = Tongyi(
#     model="qwen-turbo",
#     api_key=api_key,
#     temperature=0.5,
#     max_tokens=1024,
#     max_retries=2
# )


from langchain_ollama import ChatOllama
# 1. 初始化 Ollama 模型连接
llm = ChatOllama(model="qwen3:14b", validate_model_on_init=True)

# 创建提示模板
prompt = ChatPromptTemplate.from_messages([
    ("system", "你是一个友好的AI助手。"),
    MessagesPlaceholder(variable_name="history"),
    ("human", "{input}")
])

# 创建链
chain = prompt | llm

# 创建内存存储
store = {}

def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
    if session_id not in store:
        store[session_id] = InMemoryChatMessageHistory()
    return store[session_id]

# 创建带历史记录的可运行对象
with_message_history = RunnableWithMessageHistory(
    chain,
    get_session_history,
    input_messages_key="input",
    history_messages_key="history",
)

session_id = "user_123"

# 第一次对话
res1 = with_message_history.invoke(
    {"input": "你好！,我是秀爷，我是一个抖音的视频创作者"},
    config={"configurable": {"session_id": session_id}}
)
print("第一次对话结果:")
print(res1)

# 第二次对话
res2 = with_message_history.invoke(
    {"input": "你知道我是谁吗？"},
    config={"configurable": {"session_id": session_id}}
)
print("\n第二次对话结果:")
print(res2)
