from typing import Any, List, Tuple, Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate from langchain.agents.agent import BaseSingleActionAgent from langchain.agents.output_parsers.xml import XMLAgentOutputParser from langchain.agents.xml.prompt import agent_instructions from langchain.callbacks.base import Callbacks from langchain.chains.llm import LLMChain from langchain.tools.base import BaseTool class XMLAgent(BaseSingleActionAgent): """Agent that uses XML tags. Args: tools: list of tools the agent can choose from llm_chain: The LLMChain to call to predict the next action Examples: .. code-block:: python from langchain.agents import XMLAgent from langchain tools = ... model = """ tools: List[BaseTool] """List of tools this agent has access to.""" llm_chain: LLMChain """Chain to use to predict action.""" @property def input_keys(self) -> List[str]: return ["input"] @staticmethod def get_default_prompt() -> ChatPromptTemplate: return ChatPromptTemplate.from_template( agent_instructions ) + AIMessagePromptTemplate.from_template("{intermediate_steps}") @staticmethod def get_default_output_parser() -> XMLAgentOutputParser: return XMLAgentOutputParser() def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: log = "" for action, observation in intermediate_steps: log += ( f"{action.tool}{action.tool_input}" f"{observation}" ) tools = "" for tool in self.tools: tools += f"{tool.name}: {tool.description}\n" inputs = { "intermediate_steps": log, "tools": tools, "question": kwargs["input"], "stop": ["", ""], } response = self.llm_chain(inputs, callbacks=callbacks) return response[self.llm_chain.output_key] async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: log = "" for action, observation in intermediate_steps: log += ( f"{action.tool}{action.tool_input}" f"{observation}" ) tools = "" for tool in self.tools: tools += f"{tool.name}: {tool.description}\n" inputs = { "intermediate_steps": log, "tools": tools, "question": kwargs["input"], "stop": ["", ""], } response = await self.llm_chain.acall(inputs, callbacks=callbacks) return response[self.llm_chain.output_key]