from typing import Tuple
import logging
from langchain_core.messages import AIMessage, HumanMessage
def add_details(response: str, reasoning: str, svg_argmap: str) -> str:
"""Add reasoning details to the response message shown in chat."""
response_with_details = (
f"
{response}
"
''
"Internal reasoning trace
"
f"{reasoning}
"
''
"Argument map
"
f"\n\n{svg_argmap}\n
\n "
)
return response_with_details
def get_details(response_with_details: str) -> Tuple[str, dict[str, str]]:
"""Extract response and details from response_with_details shown in chat."""
if " "):
details_content = details_content.split("")[1].strip()
details_content = details_content[:-len("
")].strip()
elif details_content.endswith(""):
details_content = details_content.split("")[1].strip()
details_content = details_content[:-len("
")].strip()
else:
logging.warning(f"Unrecognized details content: {details_content}")
details_content = "UNRECOGNIZED DETAILS CONTENT"
details_dict[details_id] = details_content
return response, details_dict
def history_to_langchain_format(history: list[tuple[str, str]]) -> list:
history_langchain_format = [] # History in LangChain format, as shown to the LLM
for human, ai in history:
history_langchain_format.append(HumanMessage(content=human))
if ai is None:
continue
response, details = get_details(ai)
logging.debug(f"Details: {details}")
content = response
if "reasoning" in details:
content += (
"\n\n"
"#+BEGIN_INTERNAL_TRACE // Internal reasoning trace (hidden from user)\n"
f"{details.get('reasoning', '')}\n"
"#+END_INTERNAL_TRACE"
)
history_langchain_format.append(AIMessage(content=content))
return history_langchain_format