from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, BaseMessage, trim_messages, AIMessage, ToolMessage, SystemMessage
import os
from langchain_core.chat_history import BaseChatMessageHistory, InMemoryChatMessageHistory
from langchain_core.runnables import RunnableWithMessageHistory

from typing import List
import tiktoken

os.environ["OPENAI_API_KEY"] = "sk-f5324346ba744ef89eda093af8f307c7"
os.environ["OPENAI_API_BASE"] = "https://dashscope.aliyuncs.com/compatible-mode/v1"

chat_model = ChatOpenAI(model="deepseek-r1")

store = {}


# -> BaseChatMessageHistory 表示函数 get_session_history 的返回值类型是 BaseChatMessageHistory。
def get_session_history(session_id: str) -> BaseChatMessageHistory:
    if session_id not in store:
        store[session_id] = InMemoryChatMessageHistory()
        return store[session_id]

def str_token_counter(text: str) -> int:
    enc = tiktoken.get_encoding("o200k_base")
    return len(enc.encode(text))

def tiktoken_counter(messages: List[BaseMessage]) -> int:
    num_tokens = 3
    tokens_per_message = 3
    tokens_per_name = 1
    for msg in messages:
        if isinstance(msg, HumanMessage):
            role = "user"
        elif isinstance(msg, AIMessage):
            role = "assistant"
        elif isinstance(msg, ToolMessage):
            role = "tool"
        elif isinstance(msg, SystemMessage):
            role = "system"
        else:
            raise ValueError(f"Unsupported messages type {msg.__class__}")
        num_tokens += (
                tokens_per_message
                + str_token_counter(role)
                + str_token_counter(msg.content)
        )
        if msg.name:
            num_tokens += tokens_per_name + str_token_counter(msg.name)
    return num_tokens


trimmer = trim_messages(
    max_tokens=4096,
    strategy="last",
    token_counter=tiktoken_counter,
    include_system=True,
)



prompt = ChatPromptTemplate.from_messages(
    [
        ("system",
         "你现在扮演孔子的角色，尽量按照孔子的风格回复，不要出现‘子曰’",),
        MessagesPlaceholder(variable_name="messages"),
    ]
)

# with_message_history = RunnableWithMessageHistory(prompt | chat_model, get_session_history)
with_message_history = RunnableWithMessageHistory(
    trimmer | prompt | chat_model,
    get_session_history
)

config = {"configurable": {"session_id": "dreamhead"}}

while True:
    user_input = input("You:> ")
    if user_input.lower() == 'exit':
        break
    # stream = chat_model.stream([HumanMessage(content=user_input)])
    # for chunk in stream:
    #     print(chunk.content, end='', flush=True)
    # print()

    stream = with_message_history.stream([HumanMessage(content=user_input)], config=config)
    for chunk in stream:
        print(chunk.content, end='', flush=True)
    print()
