from copy import deepcopy
from langchain_core.messages import ToolMessage
from langchain_core.messages import (
    AIMessage,
    HumanMessage,
    SystemMessage,
    ToolMessage,
    trim_messages,
)
from langchain_core.messages.utils import count_tokens_approximately

def retain_only_last_messages(messages):
    result = []
    for m in messages:
        t = deepcopy(m)
        if isinstance(t, ToolMessage):
            t.content = "success"
        else:
            print(t)
        result.append(t)

def trimmed_message(state):
    trimmed_messages = state["messages"]
    if len(trimmed_messages) <= 1:
        return {}
    
    bFindFirst = False
    for m in reversed(trimmed_messages):
        if isinstance(m, ToolMessage):
            if bFindFirst:
                m.content = "success"
            bFindFirst = True

    trimmed_messages = trim_messages(
        trimmed_messages,
        # Keep the last <= n_count tokens of the messages.
        strategy="last",
        # Remember to adjust based on your model
        # or else pass a custom token_counter
        token_counter=count_tokens_approximately,
        # Most chat models expect that chat history starts with either:
        # (1) a HumanMessage or
        # (2) a SystemMessage followed by a HumanMessage
        # Remember to adjust based on the desired conversation
        # length
        max_tokens=16000,
        # Most chat models expect that chat history starts with either:
        # (1) a HumanMessage or
        # (2) a SystemMessage followed by a HumanMessage
        start_on="human",
        # Most chat models expect that chat history ends with either:
        # (1) a HumanMessage or
        # (2) a ToolMessage
        end_on=("human", "tool"),
        # Usually, we want to keep the SystemMessage
        # if it's present in the original history.
        # The SystemMessage has special instructions for the model.
        include_system=True,
        allow_partial=False,
    )
    
    # You can return updated messages either under `llm_input_messages` or 
    # `messages` key (see the note below)
    return {"llm_input_messages": trimmed_messages}