id
stringlengths
14
16
text
stringlengths
44
2.73k
source
stringlengths
49
114
21adc1e83a3c-0
Source code for langchain.agents.agent_toolkits.python.base """Python agent.""" from typing import Any, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.python.prompt import PREFIX from langchain.agents.mrkl.base import ZeroShotAgent from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.llms.base import BaseLLM from langchain.tools.python.tool import PythonREPLTool [docs]def create_python_agent( llm: BaseLLM, tool: PythonREPLTool, callback_manager: Optional[BaseCallbackManager] = None, verbose: bool = False, prefix: str = PREFIX, **kwargs: Any, ) -> AgentExecutor: """Construct a python agent from an LLM and tool.""" tools = [tool] prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=verbose) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/python/base.html
97fa2b5a81ce-0
Source code for langchain.agents.agent_toolkits.jira.toolkit """Jira Toolkit.""" from typing import List from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.jira.tool import JiraAction from langchain.utilities.jira import JiraAPIWrapper [docs]class JiraToolkit(BaseToolkit): """Jira Toolkit.""" tools: List[BaseTool] = [] [docs] @classmethod def from_jira_api_wrapper(cls, jira_api_wrapper: JiraAPIWrapper) -> "JiraToolkit": actions = jira_api_wrapper.list() tools = [ JiraAction( name=action["name"], description=action["description"], mode=action["mode"], api_wrapper=jira_api_wrapper, ) for action in actions ] return cls(tools=tools) [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return self.tools By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/jira/toolkit.html
bf8a08699aab-0
Source code for langchain.agents.agent_toolkits.nla.toolkit """Toolkit for interacting with API's using natural language.""" from __future__ import annotations from typing import Any, List, Optional, Sequence from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.agents.agent_toolkits.nla.tool import NLATool from langchain.llms.base import BaseLLM from langchain.requests import Requests from langchain.tools.base import BaseTool from langchain.tools.openapi.utils.openapi_utils import OpenAPISpec from langchain.tools.plugin import AIPlugin [docs]class NLAToolkit(BaseToolkit): """Natural Language API Toolkit Definition.""" nla_tools: Sequence[NLATool] = Field(...) """List of API Endpoint Tools.""" [docs] def get_tools(self) -> List[BaseTool]: """Get the tools for all the API operations.""" return list(self.nla_tools) @staticmethod def _get_http_operation_tools( llm: BaseLLM, spec: OpenAPISpec, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> List[NLATool]: """Get the tools for all the API operations.""" if not spec.paths: return [] http_operation_tools = [] for path in spec.paths: for method in spec.get_methods_for_path(path): endpoint_tool = NLATool.from_llm_and_method( llm=llm, path=path, method=method, spec=spec, requests=requests, verbose=verbose, **kwargs, ) http_operation_tools.append(endpoint_tool)
https://python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/nla/toolkit.html
bf8a08699aab-1
**kwargs, ) http_operation_tools.append(endpoint_tool) return http_operation_tools [docs] @classmethod def from_llm_and_spec( cls, llm: BaseLLM, spec: OpenAPISpec, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit by creating tools for each operation.""" http_operation_tools = cls._get_http_operation_tools( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs ) return cls(nla_tools=http_operation_tools) [docs] @classmethod def from_llm_and_url( cls, llm: BaseLLM, open_api_url: str, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" spec = OpenAPISpec.from_url(open_api_url) return cls.from_llm_and_spec( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs ) [docs] @classmethod def from_llm_and_ai_plugin( cls, llm: BaseLLM, ai_plugin: AIPlugin, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" spec = OpenAPISpec.from_url(ai_plugin.api.url)
https://python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/nla/toolkit.html
bf8a08699aab-2
spec = OpenAPISpec.from_url(ai_plugin.api.url) # TODO: Merge optional Auth information with the `requests` argument return cls.from_llm_and_spec( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs, ) [docs] @classmethod def from_llm_and_ai_plugin_url( cls, llm: BaseLLM, ai_plugin_url: str, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" plugin = AIPlugin.from_url(ai_plugin_url) return cls.from_llm_and_ai_plugin( llm=llm, ai_plugin=plugin, requests=requests, verbose=verbose, **kwargs ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/nla/toolkit.html
baa28cdfb641-0
Source code for langchain.agents.self_ask_with_search.base """Chain that does self ask with search.""" from typing import Any, Sequence, Union from pydantic import Field from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.self_ask_with_search.output_parser import SelfAskOutputParser from langchain.agents.self_ask_with_search.prompt import PROMPT from langchain.agents.tools import Tool from langchain.llms.base import BaseLLM from langchain.prompts.base import BasePromptTemplate from langchain.tools.base import BaseTool from langchain.utilities.google_serper import GoogleSerperAPIWrapper from langchain.utilities.serpapi import SerpAPIWrapper class SelfAskWithSearchAgent(Agent): """Agent for the self-ask-with-search paper.""" output_parser: AgentOutputParser = Field(default_factory=SelfAskOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return SelfAskOutputParser() @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.SELF_ASK_WITH_SEARCH @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Prompt does not depend on tools.""" return PROMPT @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: if len(tools) != 1: raise ValueError(f"Exactly one tool must be specified, but got {tools}") tool_names = {tool.name for tool in tools} if tool_names != {"Intermediate Answer"}: raise ValueError(
https://python.langchain.com/en/latest/_modules/langchain/agents/self_ask_with_search/base.html
baa28cdfb641-1
if tool_names != {"Intermediate Answer"}: raise ValueError( f"Tool name should be Intermediate Answer, got {tool_names}" ) @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Intermediate answer: " @property def llm_prefix(self) -> str: """Prefix to append the LLM call with.""" return "" [docs]class SelfAskWithSearchChain(AgentExecutor): """Chain that does self ask with search. Example: .. code-block:: python from langchain import SelfAskWithSearchChain, OpenAI, GoogleSerperAPIWrapper search_chain = GoogleSerperAPIWrapper() self_ask = SelfAskWithSearchChain(llm=OpenAI(), search_chain=search_chain) """ def __init__( self, llm: BaseLLM, search_chain: Union[GoogleSerperAPIWrapper, SerpAPIWrapper], **kwargs: Any, ): """Initialize with just an LLM and a search chain.""" search_tool = Tool( name="Intermediate Answer", func=search_chain.run, description="Search" ) agent = SelfAskWithSearchAgent.from_llm_and_tools(llm, [search_tool]) super().__init__(agent=agent, tools=[search_tool], **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/self_ask_with_search/base.html
d24bc70624e8-0
Source code for langchain.agents.conversational_chat.base """An agent designed to hold a conversation in addition to using tools.""" from __future__ import annotations from typing import Any, List, Optional, Sequence, Tuple from pydantic import Field from langchain.agents.agent import Agent, AgentOutputParser from langchain.agents.conversational_chat.output_parser import ConvoOutputParser from langchain.agents.conversational_chat.prompt import ( PREFIX, SUFFIX, TEMPLATE_TOOL_RESPONSE, ) from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts.base import BasePromptTemplate from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, ) from langchain.schema import ( AgentAction, AIMessage, BaseLanguageModel, BaseMessage, BaseOutputParser, HumanMessage, ) from langchain.tools.base import BaseTool [docs]class ConversationalChatAgent(Agent): """An agent designed to hold a conversation in addition to using tools.""" output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return ConvoOutputParser() @property def _agent_type(self) -> str: raise NotImplementedError @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" [docs] @classmethod
https://python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html
d24bc70624e8-1
return "Thought:" [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], system_message: str = PREFIX, human_message: str = SUFFIX, input_variables: Optional[List[str]] = None, output_parser: Optional[BaseOutputParser] = None, ) -> BasePromptTemplate: tool_strings = "\n".join( [f"> {tool.name}: {tool.description}" for tool in tools] ) tool_names = ", ".join([tool.name for tool in tools]) _output_parser = output_parser or cls._get_default_output_parser() format_instructions = human_message.format( format_instructions=_output_parser.get_format_instructions() ) final_prompt = format_instructions.format( tool_names=tool_names, tools=tool_strings ) if input_variables is None: input_variables = ["input", "chat_history", "agent_scratchpad"] messages = [ SystemMessagePromptTemplate.from_template(system_message), MessagesPlaceholder(variable_name="chat_history"), HumanMessagePromptTemplate.from_template(final_prompt), MessagesPlaceholder(variable_name="agent_scratchpad"), ] return ChatPromptTemplate(input_variables=input_variables, messages=messages) def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> List[BaseMessage]: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts: List[BaseMessage] = [] for action, observation in intermediate_steps: thoughts.append(AIMessage(content=action.log)) human_message = HumanMessage( content=TEMPLATE_TOOL_RESPONSE.format(observation=observation) )
https://python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html
d24bc70624e8-2
content=TEMPLATE_TOOL_RESPONSE.format(observation=observation) ) thoughts.append(human_message) return thoughts [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, system_message: str = PREFIX, human_message: str = SUFFIX, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) _output_parser = output_parser or cls._get_default_output_parser() prompt = cls.create_prompt( tools, system_message=system_message, human_message=human_message, input_variables=input_variables, output_parser=_output_parser, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html
0e2b35030b4a-0
Source code for langchain.agents.mrkl.base """Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf.""" from __future__ import annotations from typing import Any, Callable, List, NamedTuple, Optional, Sequence from pydantic import Field from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.mrkl.output_parser import MRKLOutputParser from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.tools import Tool from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.schema import BaseLanguageModel from langchain.tools.base import BaseTool class ChainConfig(NamedTuple): """Configuration for chain to use in MRKL system. Args: action_name: Name of the action. action: Action function to call. action_description: Description of the action. """ action_name: str action: Callable action_description: str [docs]class ZeroShotAgent(Agent): """Agent for the MRKL chain.""" output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return MRKLOutputParser() @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.ZERO_SHOT_REACT_DESCRIPTION @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: "
https://python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
0e2b35030b4a-1
"""Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, ) -> PromptTemplate: """Create prompt in the style of the zero shot agent. Args: tools: List of tools the agent will have access to, used to format the prompt. prefix: String to put before the list of tools. suffix: String to put after the list of tools. input_variables: List of input variables the final prompt will expect. Returns: A PromptTemplate with the template assembled from the pieces here. """ tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format(tool_names=tool_names) template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) if input_variables is None: input_variables = ["input", "agent_scratchpad"] return PromptTemplate(template=template, input_variables=input_variables) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None,
https://python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
0e2b35030b4a-2
callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: for tool in tools: if tool.description is None: raise ValueError( f"Got a tool {tool.name} without a description. For this agent, " f"a description must always be provided." ) [docs]class MRKLChain(AgentExecutor): """Chain that implements the MRKL system. Example: .. code-block:: python from langchain import OpenAI, MRKLChain from langchain.chains.mrkl.base import ChainConfig llm = OpenAI(temperature=0) prompt = PromptTemplate(...)
https://python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
0e2b35030b4a-3
llm = OpenAI(temperature=0) prompt = PromptTemplate(...) chains = [...] mrkl = MRKLChain.from_chains(llm=llm, prompt=prompt) """ [docs] @classmethod def from_chains( cls, llm: BaseLanguageModel, chains: List[ChainConfig], **kwargs: Any ) -> AgentExecutor: """User friendly way to initialize the MRKL chain. This is intended to be an easy way to get up and running with the MRKL chain. Args: llm: The LLM to use as the agent LLM. chains: The chains the MRKL system has access to. **kwargs: parameters to be passed to initialization. Returns: An initialized MRKL chain. Example: .. code-block:: python from langchain import LLMMathChain, OpenAI, SerpAPIWrapper, MRKLChain from langchain.chains.mrkl.base import ChainConfig llm = OpenAI(temperature=0) search = SerpAPIWrapper() llm_math_chain = LLMMathChain(llm=llm) chains = [ ChainConfig( action_name = "Search", action=search.search, action_description="useful for searching" ), ChainConfig( action_name="Calculator", action=llm_math_chain.run, action_description="useful for doing math" ) ] mrkl = MRKLChain.from_chains(llm, chains) """ tools = [ Tool( name=c.action_name, func=c.action, description=c.action_description, )
https://python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
0e2b35030b4a-4
func=c.action, description=c.action_description, ) for c in chains ] agent = ZeroShotAgent.from_llm_and_tools(llm, tools) return cls(agent=agent, tools=tools, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
ea377b25bf97-0
Source code for langchain.agents.conversational.base """An agent designed to hold a conversation in addition to using tools.""" from __future__ import annotations from typing import Any, List, Optional, Sequence from pydantic import Field from langchain.agents.agent import Agent, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.conversational.output_parser import ConvoOutputParser from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.schema import BaseLanguageModel from langchain.tools.base import BaseTool [docs]class ConversationalAgent(Agent): """An agent designed to hold a conversation in addition to using tools.""" ai_prefix: str = "AI" output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser) @classmethod def _get_default_output_parser( cls, ai_prefix: str = "AI", **kwargs: Any ) -> AgentOutputParser: return ConvoOutputParser(ai_prefix=ai_prefix) @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.CONVERSATIONAL_REACT_DESCRIPTION @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], prefix: str = PREFIX,
https://python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
ea377b25bf97-1
cls, tools: Sequence[BaseTool], prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, ai_prefix: str = "AI", human_prefix: str = "Human", input_variables: Optional[List[str]] = None, ) -> PromptTemplate: """Create prompt in the style of the zero shot agent. Args: tools: List of tools the agent will have access to, used to format the prompt. prefix: String to put before the list of tools. suffix: String to put after the list of tools. ai_prefix: String to use before AI output. human_prefix: String to use before human output. input_variables: List of input variables the final prompt will expect. Returns: A PromptTemplate with the template assembled from the pieces here. """ tool_strings = "\n".join( [f"> {tool.name}: {tool.description}" for tool in tools] ) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format( tool_names=tool_names, ai_prefix=ai_prefix, human_prefix=human_prefix ) template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) if input_variables is None: input_variables = ["input", "chat_history", "agent_scratchpad"] return PromptTemplate(template=template, input_variables=input_variables) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None,
https://python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
ea377b25bf97-2
callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, ai_prefix: str = "AI", human_prefix: str = "Human", input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, ai_prefix=ai_prefix, human_prefix=human_prefix, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser( ai_prefix=ai_prefix ) return cls( llm_chain=llm_chain, allowed_tools=tool_names, ai_prefix=ai_prefix, output_parser=_output_parser, **kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
22b41e217c2d-0
Source code for langchain.agents.react.base """Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf.""" from typing import Any, List, Optional, Sequence from pydantic import Field from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.react.output_parser import ReActOutputParser from langchain.agents.react.textworld_prompt import TEXTWORLD_PROMPT from langchain.agents.react.wiki_prompt import WIKI_PROMPT from langchain.agents.tools import Tool from langchain.docstore.base import Docstore from langchain.docstore.document import Document from langchain.llms.base import BaseLLM from langchain.prompts.base import BasePromptTemplate from langchain.tools.base import BaseTool class ReActDocstoreAgent(Agent): """Agent for the ReAct chain.""" output_parser: AgentOutputParser = Field(default_factory=ReActOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return ReActOutputParser() @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.REACT_DOCSTORE @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Return default prompt.""" return WIKI_PROMPT @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: if len(tools) != 2: raise ValueError(f"Exactly two tools must be specified, but got {tools}") tool_names = {tool.name for tool in tools} if tool_names != {"Lookup", "Search"}:
https://python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
22b41e217c2d-1
if tool_names != {"Lookup", "Search"}: raise ValueError( f"Tool names should be Lookup and Search, got {tool_names}" ) @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def _stop(self) -> List[str]: return ["\nObservation:"] @property def llm_prefix(self) -> str: """Prefix to append the LLM call with.""" return "Thought:" class DocstoreExplorer: """Class to assist with exploration of a document store.""" def __init__(self, docstore: Docstore): """Initialize with a docstore, and set initial document to None.""" self.docstore = docstore self.document: Optional[Document] = None self.lookup_str = "" self.lookup_index = 0 def search(self, term: str) -> str: """Search for a term in the docstore, and if found save.""" result = self.docstore.search(term) if isinstance(result, Document): self.document = result return self._summary else: self.document = None return result def lookup(self, term: str) -> str: """Lookup a term in document (if saved).""" if self.document is None: raise ValueError("Cannot lookup without a successful search first") if term.lower() != self.lookup_str: self.lookup_str = term.lower() self.lookup_index = 0 else: self.lookup_index += 1 lookups = [p for p in self._paragraphs if self.lookup_str in p.lower()]
https://python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
22b41e217c2d-2
if len(lookups) == 0: return "No Results" elif self.lookup_index >= len(lookups): return "No More Results" else: result_prefix = f"(Result {self.lookup_index + 1}/{len(lookups)})" return f"{result_prefix} {lookups[self.lookup_index]}" @property def _summary(self) -> str: return self._paragraphs[0] @property def _paragraphs(self) -> List[str]: if self.document is None: raise ValueError("Cannot get paragraphs without a document") return self.document.page_content.split("\n\n") [docs]class ReActTextWorldAgent(ReActDocstoreAgent): """Agent for the ReAct TextWorld chain.""" [docs] @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Return default prompt.""" return TEXTWORLD_PROMPT @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: if len(tools) != 1: raise ValueError(f"Exactly one tool must be specified, but got {tools}") tool_names = {tool.name for tool in tools} if tool_names != {"Play"}: raise ValueError(f"Tool name should be Play, got {tool_names}") [docs]class ReActChain(AgentExecutor): """Chain that implements the ReAct paper. Example: .. code-block:: python from langchain import ReActChain, OpenAI react = ReAct(llm=OpenAI()) """ def __init__(self, llm: BaseLLM, docstore: Docstore, **kwargs: Any):
https://python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
22b41e217c2d-3
"""Initialize with the LLM and a docstore.""" docstore_explorer = DocstoreExplorer(docstore) tools = [ Tool( name="Search", func=docstore_explorer.search, description="Search for a term in the docstore.", ), Tool( name="Lookup", func=docstore_explorer.lookup, description="Lookup a term in the docstore.", ), ] agent = ReActDocstoreAgent.from_llm_and_tools(llm, tools) super().__init__(agent=agent, tools=tools, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
0fa4ce286d38-0
Source code for langchain.experimental.autonomous_agents.baby_agi.baby_agi from collections import deque from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field from langchain.chains.base import Chain from langchain.experimental.autonomous_agents.baby_agi.task_creation import ( TaskCreationChain, ) from langchain.experimental.autonomous_agents.baby_agi.task_execution import ( TaskExecutionChain, ) from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import ( TaskPrioritizationChain, ) from langchain.schema import BaseLanguageModel from langchain.vectorstores.base import VectorStore [docs]class BabyAGI(Chain, BaseModel): """Controller model for the BabyAGI agent.""" task_list: deque = Field(default_factory=deque) task_creation_chain: Chain = Field(...) task_prioritization_chain: Chain = Field(...) execution_chain: Chain = Field(...) task_id_counter: int = Field(1) vectorstore: VectorStore = Field(init=False) max_iterations: Optional[int] = None [docs] class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def add_task(self, task: Dict) -> None: self.task_list.append(task) def print_task_list(self) -> None: print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") for t in self.task_list: print(str(t["task_id"]) + ": " + t["task_name"]) def print_next_task(self, task: Dict) -> None:
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
0fa4ce286d38-1
def print_next_task(self, task: Dict) -> None: print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") print(str(task["task_id"]) + ": " + task["task_name"]) def print_task_result(self, result: str) -> None: print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") print(result) @property def input_keys(self) -> List[str]: return ["objective"] @property def output_keys(self) -> List[str]: return [] [docs] def get_next_task( self, result: str, task_description: str, objective: str ) -> List[Dict]: """Get the next task.""" task_names = [t["task_name"] for t in self.task_list] incomplete_tasks = ", ".join(task_names) response = self.task_creation_chain.run( result=result, task_description=task_description, incomplete_tasks=incomplete_tasks, objective=objective, ) new_tasks = response.split("\n") return [ {"task_name": task_name} for task_name in new_tasks if task_name.strip() ] [docs] def prioritize_tasks(self, this_task_id: int, objective: str) -> List[Dict]: """Prioritize tasks.""" task_names = [t["task_name"] for t in list(self.task_list)] next_task_id = int(this_task_id) + 1 response = self.task_prioritization_chain.run( task_names=", ".join(task_names),
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
0fa4ce286d38-2
task_names=", ".join(task_names), next_task_id=str(next_task_id), objective=objective, ) new_tasks = response.split("\n") prioritized_task_list = [] for task_string in new_tasks: if not task_string.strip(): continue task_parts = task_string.strip().split(".", 1) if len(task_parts) == 2: task_id = task_parts[0].strip() task_name = task_parts[1].strip() prioritized_task_list.append( {"task_id": task_id, "task_name": task_name} ) return prioritized_task_list def _get_top_tasks(self, query: str, k: int) -> List[str]: """Get the top k tasks based on the query.""" results = self.vectorstore.similarity_search(query, k=k) if not results: return [] return [str(item.metadata["task"]) for item in results] [docs] def execute_task(self, objective: str, task: str, k: int = 5) -> str: """Execute a task.""" context = self._get_top_tasks(query=objective, k=k) return self.execution_chain.run( objective=objective, context="\n".join(context), task=task ) def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Run the agent.""" objective = inputs["objective"] first_task = inputs.get("first_task", "Make a todo list") self.add_task({"task_id": 1, "task_name": first_task}) num_iters = 0 while True: if self.task_list: self.print_task_list()
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
0fa4ce286d38-3
while True: if self.task_list: self.print_task_list() # Step 1: Pull the first task task = self.task_list.popleft() self.print_next_task(task) # Step 2: Execute the task result = self.execute_task(objective, task["task_name"]) this_task_id = int(task["task_id"]) self.print_task_result(result) # Step 3: Store the result in Pinecone result_id = f"result_{task['task_id']}" self.vectorstore.add_texts( texts=[result], metadatas=[{"task": task["task_name"]}], ids=[result_id], ) # Step 4: Create new tasks and reprioritize task list new_tasks = self.get_next_task(result, task["task_name"], objective) for new_task in new_tasks: self.task_id_counter += 1 new_task.update({"task_id": self.task_id_counter}) self.add_task(new_task) self.task_list = deque(self.prioritize_tasks(this_task_id, objective)) num_iters += 1 if self.max_iterations is not None and num_iters == self.max_iterations: print( "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m" ) break return {} [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, verbose: bool = False, task_execution_chain: Optional[Chain] = None, **kwargs: Dict[str, Any], ) -> "BabyAGI":
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
0fa4ce286d38-4
**kwargs: Dict[str, Any], ) -> "BabyAGI": """Initialize the BabyAGI Controller.""" task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose) task_prioritization_chain = TaskPrioritizationChain.from_llm( llm, verbose=verbose ) if task_execution_chain is None: execution_chain: Chain = TaskExecutionChain.from_llm(llm, verbose=verbose) else: execution_chain = task_execution_chain return cls( task_creation_chain=task_creation_chain, task_prioritization_chain=task_prioritization_chain, execution_chain=execution_chain, vectorstore=vectorstore, **kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
a17d4cc7a4d4-0
Source code for langchain.experimental.autonomous_agents.autogpt.agent from __future__ import annotations from typing import List, Optional from pydantic import ValidationError from langchain.chains.llm import LLMChain from langchain.chat_models.base import BaseChatModel from langchain.experimental.autonomous_agents.autogpt.output_parser import ( AutoGPTOutputParser, BaseAutoGPTOutputParser, ) from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt from langchain.experimental.autonomous_agents.autogpt.prompt_generator import ( FINISH_NAME, ) from langchain.schema import ( AIMessage, BaseMessage, Document, HumanMessage, SystemMessage, ) from langchain.tools.base import BaseTool from langchain.tools.human.tool import HumanInputRun from langchain.vectorstores.base import VectorStoreRetriever [docs]class AutoGPT: """Agent class for interacting with Auto-GPT.""" def __init__( self, ai_name: str, memory: VectorStoreRetriever, chain: LLMChain, output_parser: BaseAutoGPTOutputParser, tools: List[BaseTool], feedback_tool: Optional[HumanInputRun] = None, ): self.ai_name = ai_name self.memory = memory self.full_message_history: List[BaseMessage] = [] self.next_action_count = 0 self.chain = chain self.output_parser = output_parser self.tools = tools self.feedback_tool = feedback_tool @classmethod def from_llm_and_tools( cls, ai_name: str, ai_role: str, memory: VectorStoreRetriever,
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html
a17d4cc7a4d4-1
ai_role: str, memory: VectorStoreRetriever, tools: List[BaseTool], llm: BaseChatModel, human_in_the_loop: bool = False, output_parser: Optional[BaseAutoGPTOutputParser] = None, ) -> AutoGPT: prompt = AutoGPTPrompt( ai_name=ai_name, ai_role=ai_role, tools=tools, input_variables=["memory", "messages", "goals", "user_input"], token_counter=llm.get_num_tokens, ) human_feedback_tool = HumanInputRun() if human_in_the_loop else None chain = LLMChain(llm=llm, prompt=prompt) return cls( ai_name, memory, chain, output_parser or AutoGPTOutputParser(), tools, feedback_tool=human_feedback_tool, ) def run(self, goals: List[str]) -> str: user_input = ( "Determine which next command to use, " "and respond using the format specified above:" ) # Interaction Loop loop_count = 0 while True: # Discontinue if continuous limit is reached loop_count += 1 # Send message to AI, get response assistant_reply = self.chain.run( goals=goals, messages=self.full_message_history, memory=self.memory, user_input=user_input, ) # Print Assistant thoughts print(assistant_reply) self.full_message_history.append(HumanMessage(content=user_input)) self.full_message_history.append(AIMessage(content=assistant_reply)) # Get command name and arguments
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html
a17d4cc7a4d4-2
# Get command name and arguments action = self.output_parser.parse(assistant_reply) tools = {t.name: t for t in self.tools} if action.name == FINISH_NAME: return action.args["response"] if action.name in tools: tool = tools[action.name] try: observation = tool.run(action.args) except ValidationError as e: observation = ( f"Validation Error in args: {str(e)}, args: {action.args}" ) except Exception as e: observation = ( f"Error: {str(e)}, {type(e).__name__}, args: {action.args}" ) result = f"Command {tool.name} returned: {observation}" elif action.name == "ERROR": result = f"Error: {action.args}. " else: result = ( f"Unknown command '{action.name}'. " f"Please refer to the 'COMMANDS' list for available " f"commands and only respond in the specified JSON format." ) memory_to_add = ( f"Assistant Reply: {assistant_reply} " f"\nResult: {result} " ) if self.feedback_tool is not None: feedback = f"\n{self.feedback_tool.run('Input: ')}" if feedback in {"q", "stop"}: print("EXITING") return "EXITING" memory_to_add += feedback self.memory.add_documents([Document(page_content=memory_to_add)]) self.full_message_history.append(SystemMessage(content=result)) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html
1bdb1177b8c3-0
Source code for langchain.experimental.generative_agents.memory import logging import re from typing import Any, Dict, List, Optional from langchain import LLMChain from langchain.prompts import PromptTemplate from langchain.retrievers import TimeWeightedVectorStoreRetriever from langchain.schema import BaseLanguageModel, BaseMemory, Document logger = logging.getLogger(__name__) [docs]class GenerativeAgentMemory(BaseMemory): llm: BaseLanguageModel """The core language model.""" memory_retriever: TimeWeightedVectorStoreRetriever """The retriever to fetch related memories.""" verbose: bool = False reflection_threshold: Optional[float] = None """When aggregate_importance exceeds reflection_threshold, stop to reflect.""" current_plan: List[str] = [] """The current plan of the agent.""" # A weight of 0.15 makes this less important than it # would be otherwise, relative to salience and time importance_weight: float = 0.15 """How much weight to assign the memory importance.""" aggregate_importance: float = 0.0 # : :meta private: """Track the sum of the 'importance' of recent memories. Triggers reflection when it reaches reflection_threshold.""" max_tokens_limit: int = 1200 # : :meta private: # input keys queries_key: str = "queries" most_recent_memories_token_key: str = "recent_memories_token" add_memory_key: str = "add_memory" # output keys relevant_memories_key: str = "relevant_memories" relevant_memories_simple_key: str = "relevant_memories_simple"
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
1bdb1177b8c3-1
relevant_memories_simple_key: str = "relevant_memories_simple" most_recent_memories_key: str = "most_recent_memories" def chain(self, prompt: PromptTemplate) -> LLMChain: return LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose) @staticmethod def _parse_list(text: str) -> List[str]: """Parse a newline-separated string into a list of strings.""" lines = re.split(r"\n", text.strip()) return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]: """Return the 3 most salient high-level questions about recent observations.""" prompt = PromptTemplate.from_template( "{observations}\n\n" + "Given only the information above, what are the 3 most salient" + " high-level questions we can answer about the subjects in" + " the statements? Provide each question on a new line.\n\n" ) observations = self.memory_retriever.memory_stream[-last_k:] observation_str = "\n".join([o.page_content for o in observations]) result = self.chain(prompt).run(observations=observation_str) return self._parse_list(result) def _get_insights_on_topic(self, topic: str) -> List[str]: """Generate 'insights' on a topic of reflection, based on pertinent memories.""" prompt = PromptTemplate.from_template( "Statements about {topic}\n" + "{related_statements}\n\n" + "What 5 high-level insights can you infer from the above statements?"
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
1bdb1177b8c3-2
+ "What 5 high-level insights can you infer from the above statements?" + " (example format: insight (because of 1, 5, 3))" ) related_memories = self.fetch_memories(topic) related_statements = "\n".join( [ f"{i+1}. {memory.page_content}" for i, memory in enumerate(related_memories) ] ) result = self.chain(prompt).run( topic=topic, related_statements=related_statements ) # TODO: Parse the connections between memories and insights return self._parse_list(result) [docs] def pause_to_reflect(self) -> List[str]: """Reflect on recent observations and generate 'insights'.""" if self.verbose: logger.info("Character is reflecting") new_insights = [] topics = self._get_topics_of_reflection() for topic in topics: insights = self._get_insights_on_topic(topic) for insight in insights: self.add_memory(insight) new_insights.extend(insights) return new_insights def _score_memory_importance(self, memory_content: str) -> float: """Score the absolute importance of the given memory.""" prompt = PromptTemplate.from_template( "On the scale of 1 to 10, where 1 is purely mundane" + " (e.g., brushing teeth, making bed) and 10 is" + " extremely poignant (e.g., a break up, college" + " acceptance), rate the likely poignancy of the" + " following piece of memory. Respond with a single integer." + "\nMemory: {memory_content}"
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
1bdb1177b8c3-3
+ "\nMemory: {memory_content}" + "\nRating: " ) score = self.chain(prompt).run(memory_content=memory_content).strip() if self.verbose: logger.info(f"Importance score: {score}") match = re.search(r"^\D*(\d+)", score) if match: return (float(score[0]) / 10) * self.importance_weight else: return 0.0 [docs] def add_memory(self, memory_content: str) -> List[str]: """Add an observation or memory to the agent's memory.""" importance_score = self._score_memory_importance(memory_content) self.aggregate_importance += importance_score document = Document( page_content=memory_content, metadata={"importance": importance_score} ) result = self.memory_retriever.add_documents([document]) # After an agent has processed a certain amount of memories (as measured by # aggregate importance), it is time to reflect on recent events to add # more synthesized memories to the agent's memory stream. if ( self.reflection_threshold is not None and self.aggregate_importance > self.reflection_threshold ): self.pause_to_reflect() # Hack to clear the importance from reflection self.aggregate_importance = 0.0 return result [docs] def fetch_memories(self, observation: str) -> List[Document]: """Fetch related memories.""" return self.memory_retriever.get_relevant_documents(observation) def format_memories_detail(self, relevant_memories: List[Document]) -> str: content_strs = set() content = [] for mem in relevant_memories:
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
1bdb1177b8c3-4
content = [] for mem in relevant_memories: if mem.page_content in content_strs: continue content_strs.add(mem.page_content) created_time = mem.metadata["created_at"].strftime("%B %d, %Y, %I:%M %p") content.append(f"- {created_time}: {mem.page_content.strip()}") return "\n".join([f"{mem}" for mem in content]) def format_memories_simple(self, relevant_memories: List[Document]) -> str: return "; ".join([f"{mem.page_content}" for mem in relevant_memories]) def _get_memories_until_limit(self, consumed_tokens: int) -> str: """Reduce the number of tokens in the documents.""" result = [] for doc in self.memory_retriever.memory_stream[::-1]: if consumed_tokens >= self.max_tokens_limit: break consumed_tokens += self.llm.get_num_tokens(doc.page_content) if consumed_tokens < self.max_tokens_limit: result.append(doc) return self.format_memories_simple(result) @property def memory_variables(self) -> List[str]: """Input keys this memory class will load dynamically.""" return [] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return key-value pairs given the text input to the chain.""" queries = inputs.get(self.queries_key) if queries is not None: relevant_memories = [ mem for query in queries for mem in self.fetch_memories(query) ] return { self.relevant_memories_key: self.format_memories_detail( relevant_memories ),
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
1bdb1177b8c3-5
relevant_memories ), self.relevant_memories_simple_key: self.format_memories_simple( relevant_memories ), } most_recent_memories_token = inputs.get(self.most_recent_memories_token_key) if most_recent_memories_token is not None: return { self.most_recent_memories_key: self._get_memories_until_limit( most_recent_memories_token ) } return {} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save the context of this model run to memory.""" # TODO: fix the save memory key mem = outputs.get(self.add_memory_key) if mem: self.add_memory(mem) [docs] def clear(self) -> None: """Clear memory contents.""" # TODO By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
0d192254104b-0
Source code for langchain.experimental.generative_agents.generative_agent import re from datetime import datetime from typing import Any, Dict, List, Optional, Tuple from pydantic import BaseModel, Field from langchain import LLMChain from langchain.experimental.generative_agents.memory import GenerativeAgentMemory from langchain.prompts import PromptTemplate from langchain.schema import BaseLanguageModel [docs]class GenerativeAgent(BaseModel): """A character with memory and innate characteristics.""" name: str """The character's name.""" age: Optional[int] = None """The optional age of the character.""" traits: str = "N/A" """Permanent traits to ascribe to the character.""" status: str """The traits of the character you wish not to change.""" memory: GenerativeAgentMemory """The memory object that combines relevance, recency, and 'importance'.""" llm: BaseLanguageModel """The underlying language model.""" verbose: bool = False summary: str = "" #: :meta private: """Stateful self-summary generated via reflection on the character's memory.""" summary_refresh_seconds: int = 3600 #: :meta private: """How frequently to re-generate the summary.""" last_refreshed: datetime = Field(default_factory=datetime.now) # : :meta private: """The last time the character's summary was regenerated.""" daily_summaries: List[str] = Field(default_factory=list) # : :meta private: """Summary of the events in the plan that the agent took.""" [docs] class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True # LLM-related methods @staticmethod
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
0d192254104b-1
arbitrary_types_allowed = True # LLM-related methods @staticmethod def _parse_list(text: str) -> List[str]: """Parse a newline-separated string into a list of strings.""" lines = re.split(r"\n", text.strip()) return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] def chain(self, prompt: PromptTemplate) -> LLMChain: return LLMChain( llm=self.llm, prompt=prompt, verbose=self.verbose, memory=self.memory ) def _get_entity_from_observation(self, observation: str) -> str: prompt = PromptTemplate.from_template( "What is the observed entity in the following observation? {observation}" + "\nEntity=" ) return self.chain(prompt).run(observation=observation).strip() def _get_entity_action(self, observation: str, entity_name: str) -> str: prompt = PromptTemplate.from_template( "What is the {entity} doing in the following observation? {observation}" + "\nThe {entity} is" ) return ( self.chain(prompt).run(entity=entity_name, observation=observation).strip() ) [docs] def summarize_related_memories(self, observation: str) -> str: """Summarize memories that are most relevant to an observation.""" prompt = PromptTemplate.from_template( """ {q1}? Context from memory: {relevant_memories} Relevant context: """ ) entity_name = self._get_entity_from_observation(observation) entity_action = self._get_entity_action(observation, entity_name)
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
0d192254104b-2
entity_action = self._get_entity_action(observation, entity_name) q1 = f"What is the relationship between {self.name} and {entity_name}" q2 = f"{entity_name} is {entity_action}" return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip() def _generate_reaction(self, observation: str, suffix: str) -> str: """React to a given observation or dialogue act.""" prompt = PromptTemplate.from_template( "{agent_summary_description}" + "\nIt is {current_time}." + "\n{agent_name}'s status: {agent_status}" + "\nSummary of relevant context from {agent_name}'s memory:" + "\n{relevant_memories}" + "\nMost recent observations: {most_recent_memories}" + "\nObservation: {observation}" + "\n\n" + suffix ) agent_summary_description = self.get_summary() relevant_memories_str = self.summarize_related_memories(observation) current_time_str = datetime.now().strftime("%B %d, %Y, %I:%M %p") kwargs: Dict[str, Any] = dict( agent_summary_description=agent_summary_description, current_time=current_time_str, relevant_memories=relevant_memories_str, agent_name=self.name, observation=observation, agent_status=self.status, ) consumed_tokens = self.llm.get_num_tokens( prompt.format(most_recent_memories="", **kwargs) ) kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens return self.chain(prompt=prompt).run(**kwargs).strip()
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
0d192254104b-3
return self.chain(prompt=prompt).run(**kwargs).strip() def _clean_response(self, text: str) -> str: return re.sub(f"^{self.name} ", "", text.strip()).strip() [docs] def generate_reaction(self, observation: str) -> Tuple[bool, str]: """React to a given observation.""" call_to_action_template = ( "Should {agent_name} react to the observation, and if so," + " what would be an appropriate reaction? Respond in one line." + ' If the action is to engage in dialogue, write:\nSAY: "what to say"' + "\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)." + "\nEither do nothing, react, or say something but not both.\n\n" ) full_result = self._generate_reaction(observation, call_to_action_template) result = full_result.strip().split("\n")[0] # AAA self.memory.save_context( {}, { self.memory.add_memory_key: f"{self.name} observed " f"{observation} and reacted by {result}" }, ) if "REACT:" in result: reaction = self._clean_response(result.split("REACT:")[-1]) return False, f"{self.name} {reaction}" if "SAY:" in result: said_value = self._clean_response(result.split("SAY:")[-1]) return True, f"{self.name} said {said_value}" else: return False, result [docs] def generate_dialogue_response(self, observation: str) -> Tuple[bool, str]: """React to a given observation."""
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
0d192254104b-4
"""React to a given observation.""" call_to_action_template = ( "What would {agent_name} say? To end the conversation, write:" ' GOODBYE: "what to say". Otherwise to continue the conversation,' ' write: SAY: "what to say next"\n\n' ) full_result = self._generate_reaction(observation, call_to_action_template) result = full_result.strip().split("\n")[0] if "GOODBYE:" in result: farewell = self._clean_response(result.split("GOODBYE:")[-1]) self.memory.save_context( {}, { self.memory.add_memory_key: f"{self.name} observed " f"{observation} and said {farewell}" }, ) return False, f"{self.name} said {farewell}" if "SAY:" in result: response_text = self._clean_response(result.split("SAY:")[-1]) self.memory.save_context( {}, { self.memory.add_memory_key: f"{self.name} observed " f"{observation} and said {response_text}" }, ) return True, f"{self.name} said {response_text}" else: return False, result ###################################################### # Agent stateful' summary methods. # # Each dialog or response prompt includes a header # # summarizing the agent's self-description. This is # # updated periodically through probing its memories # ###################################################### def _compute_agent_summary(self) -> str: """""" prompt = PromptTemplate.from_template( "How would you summarize {name}'s core characteristics given the"
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
0d192254104b-5
"How would you summarize {name}'s core characteristics given the" + " following statements:\n" + "{relevant_memories}" + "Do not embellish." + "\n\nSummary: " ) # The agent seeks to think about their core characteristics. return ( self.chain(prompt) .run(name=self.name, queries=[f"{self.name}'s core characteristics"]) .strip() ) [docs] def get_summary(self, force_refresh: bool = False) -> str: """Return a descriptive summary of the agent.""" current_time = datetime.now() since_refresh = (current_time - self.last_refreshed).seconds if ( not self.summary or since_refresh >= self.summary_refresh_seconds or force_refresh ): self.summary = self._compute_agent_summary() self.last_refreshed = current_time age = self.age if self.age is not None else "N/A" return ( f"Name: {self.name} (age: {age})" + f"\nInnate traits: {self.traits}" + f"\n{self.summary}" ) [docs] def get_full_header(self, force_refresh: bool = False) -> str: """Return a full header of the agent's status, summary, and current time.""" summary = self.get_summary(force_refresh=force_refresh) current_time_str = datetime.now().strftime("%B %d, %Y, %I:%M %p") return ( f"{summary}\nIt is {current_time_str}.\n{self.name}'s status: {self.status}" ) By Harrison Chase © Copyright 2023, Harrison Chase.
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
0d192254104b-6
) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5bf55e3ea844-0
Source code for langchain.prompts.loading """Load prompts from disk.""" import importlib import json import logging from pathlib import Path from typing import Union import yaml from langchain.output_parsers.regex import RegexParser from langchain.prompts.base import BasePromptTemplate from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/" logger = logging.getLogger(__name__) def load_prompt_from_config(config: dict) -> BasePromptTemplate: """Load prompt from Config Dict.""" if "_type" not in config: logger.warning("No `_type` key found, defaulting to `prompt`.") config_type = config.pop("_type", "prompt") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} prompt not supported") prompt_loader = type_to_loader_dict[config_type] return prompt_loader(config) def _load_template(var_name: str, config: dict) -> dict: """Load template from disk if applicable.""" # Check if template_path exists in config. if f"{var_name}_path" in config: # If it does, make sure template variable doesn't also exist. if var_name in config: raise ValueError( f"Both `{var_name}_path` and `{var_name}` cannot be provided." ) # Pop the template path from the config. template_path = Path(config.pop(f"{var_name}_path")) # Load the template. if template_path.suffix == ".txt": with open(template_path) as f:
https://python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
5bf55e3ea844-1
if template_path.suffix == ".txt": with open(template_path) as f: template = f.read() else: raise ValueError # Set the template variable to the extracted variable. config[var_name] = template return config def _load_examples(config: dict) -> dict: """Load examples if necessary.""" if isinstance(config["examples"], list): pass elif isinstance(config["examples"], str): with open(config["examples"]) as f: if config["examples"].endswith(".json"): examples = json.load(f) elif config["examples"].endswith((".yaml", ".yml")): examples = yaml.safe_load(f) else: raise ValueError( "Invalid file format. Only json or yaml formats are supported." ) config["examples"] = examples else: raise ValueError("Invalid examples format. Only list or string are supported.") return config def _load_output_parser(config: dict) -> dict: """Load output parser.""" if "output_parsers" in config: if config["output_parsers"] is not None: _config = config["output_parsers"] output_parser_type = _config["_type"] if output_parser_type == "regex_parser": output_parser = RegexParser(**_config) else: raise ValueError(f"Unsupported output parser {output_parser_type}") config["output_parsers"] = output_parser return config def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate: """Load the few shot prompt from the config.""" # Load the suffix and prefix templates. config = _load_template("suffix", config)
https://python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
5bf55e3ea844-2
config = _load_template("suffix", config) config = _load_template("prefix", config) # Load the example prompt. if "example_prompt_path" in config: if "example_prompt" in config: raise ValueError( "Only one of example_prompt and example_prompt_path should " "be specified." ) config["example_prompt"] = load_prompt(config.pop("example_prompt_path")) else: config["example_prompt"] = load_prompt_from_config(config["example_prompt"]) # Load the examples. config = _load_examples(config) config = _load_output_parser(config) return FewShotPromptTemplate(**config) def _load_prompt(config: dict) -> PromptTemplate: """Load the prompt template from config.""" # Load the template from disk if necessary. config = _load_template("template", config) config = _load_output_parser(config) return PromptTemplate(**config) [docs]def load_prompt(path: Union[str, Path]) -> BasePromptTemplate: """Unified method for loading a prompt from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"} ): return hub_result else: return _load_prompt_from_file(path) def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate: """Load prompt from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json":
https://python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
5bf55e3ea844-3
# Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) elif file_path.suffix == ".py": spec = importlib.util.spec_from_loader( "prompt", loader=None, origin=str(file_path) ) if spec is None: raise ValueError("could not load spec") helper = importlib.util.module_from_spec(spec) with open(file_path, "rb") as f: exec(f.read(), helper.__dict__) if not isinstance(helper.PROMPT, BasePromptTemplate): raise ValueError("Did not get object of type BasePromptTemplate.") return helper.PROMPT else: raise ValueError(f"Got unsupported file type {file_path.suffix}") # Load the prompt from the config now. return load_prompt_from_config(config) type_to_loader_dict = { "prompt": _load_prompt, "few_shot": _load_few_shot_prompt, # "few_shot_with_templates": _load_few_shot_with_templates_prompt, } By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
880de6823fcc-0
Source code for langchain.prompts.prompt """Prompt schema definition.""" from __future__ import annotations from pathlib import Path from string import Formatter from typing import Any, Dict, List, Union from pydantic import Extra, root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, _get_jinja2_variables_from_template, check_valid_template, ) [docs]class PromptTemplate(StringPromptTemplate): """Schema to represent a prompt for an LLM. Example: .. code-block:: python from langchain import PromptTemplate prompt = PromptTemplate(input_variables=["foo"], template="Say {foo}") """ input_variables: List[str] """A list of the names of the variables the prompt template expects.""" template: str """The prompt template.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "prompt" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs)
https://python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
880de6823fcc-1
""" kwargs = self._merge_partial_and_user_variables(**kwargs) return DEFAULT_FORMATTER_MAPPING[self.template_format](self.template, **kwargs) @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that template and input variables are consistent.""" if values["validate_template"]: all_inputs = values["input_variables"] + list(values["partial_variables"]) check_valid_template( values["template"], values["template_format"], all_inputs ) return values [docs] @classmethod def from_examples( cls, examples: List[str], suffix: str, input_variables: List[str], example_separator: str = "\n\n", prefix: str = "", **kwargs: Any, ) -> PromptTemplate: """Take examples in list format with prefix and suffix to create a prompt. Intended to be used as a way to dynamically create a prompt from examples. Args: examples: List of examples to use in the prompt. suffix: String to go after the list of examples. Should generally set up the user's input. input_variables: A list of variable names the final prompt template will expect. example_separator: The separator to use in between examples. Defaults to two new line characters. prefix: String that should go before any examples. Generally includes examples. Default to an empty string. Returns: The final prompt generated. """ template = example_separator.join([prefix, *examples, suffix]) return cls(input_variables=input_variables, template=template, **kwargs) [docs] @classmethod def from_file(
https://python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
880de6823fcc-2
[docs] @classmethod def from_file( cls, template_file: Union[str, Path], input_variables: List[str], **kwargs: Any ) -> PromptTemplate: """Load a prompt from a file. Args: template_file: The path to the file containing the prompt template. input_variables: A list of variable names the final prompt template will expect. Returns: The prompt loaded from the file. """ with open(str(template_file), "r") as f: template = f.read() return cls(input_variables=input_variables, template=template, **kwargs) [docs] @classmethod def from_template(cls, template: str, **kwargs: Any) -> PromptTemplate: """Load a prompt template from a template.""" if "template_format" in kwargs and kwargs["template_format"] == "jinja2": # Get the variables for the template input_variables = _get_jinja2_variables_from_template(template) else: input_variables = { v for _, v, _, _ in Formatter().parse(template) if v is not None } return cls( input_variables=list(sorted(input_variables)), template=template, **kwargs ) # For backwards compatibility. Prompt = PromptTemplate By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
2baf867b7732-0
Source code for langchain.prompts.chat """Chat prompt template.""" from __future__ import annotations from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, List, Sequence, Tuple, Type, Union from pydantic import BaseModel, Field from langchain.memory.buffer import get_buffer_string from langchain.prompts.base import BasePromptTemplate, StringPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( AIMessage, BaseMessage, ChatMessage, HumanMessage, PromptValue, SystemMessage, ) class BaseMessagePromptTemplate(BaseModel, ABC): @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To messages.""" @property @abstractmethod def input_variables(self) -> List[str]: """Input variables for this prompt template.""" [docs]class MessagesPlaceholder(BaseMessagePromptTemplate): """Prompt template that assumes variable is already list of messages.""" variable_name: str [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To a BaseMessage.""" value = kwargs[self.variable_name] if not isinstance(value, list): raise ValueError( f"variable {self.variable_name} should be a list of base messages, " f"got {value}" ) for v in value: if not isinstance(v, BaseMessage): raise ValueError( f"variable {self.variable_name} should be a list of base messages," f" got {value}" ) return value @property def input_variables(self) -> List[str]: """Input variables for this prompt template."""
https://python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
2baf867b7732-1
def input_variables(self) -> List[str]: """Input variables for this prompt template.""" return [self.variable_name] class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC): prompt: StringPromptTemplate additional_kwargs: dict = Field(default_factory=dict) @classmethod def from_template(cls, template: str, **kwargs: Any) -> BaseMessagePromptTemplate: prompt = PromptTemplate.from_template(template) return cls(prompt=prompt, **kwargs) @abstractmethod def format(self, **kwargs: Any) -> BaseMessage: """To a BaseMessage.""" def format_messages(self, **kwargs: Any) -> List[BaseMessage]: return [self.format(**kwargs)] @property def input_variables(self) -> List[str]: return self.prompt.input_variables class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate): role: str def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return ChatMessage( content=text, role=self.role, additional_kwargs=self.additional_kwargs ) class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate): def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return HumanMessage(content=text, additional_kwargs=self.additional_kwargs) class AIMessagePromptTemplate(BaseStringMessagePromptTemplate): def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return AIMessage(content=text, additional_kwargs=self.additional_kwargs) class SystemMessagePromptTemplate(BaseStringMessagePromptTemplate): def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs)
https://python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
2baf867b7732-2
text = self.prompt.format(**kwargs) return SystemMessage(content=text, additional_kwargs=self.additional_kwargs) class ChatPromptValue(PromptValue): messages: List[BaseMessage] def to_string(self) -> str: """Return prompt as string.""" return get_buffer_string(self.messages) def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return self.messages [docs]class BaseChatPromptTemplate(BasePromptTemplate, ABC): [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string() [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: messages = self.format_messages(**kwargs) return ChatPromptValue(messages=messages) [docs] @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format kwargs into a list of messages.""" [docs]class ChatPromptTemplate(BaseChatPromptTemplate, ABC): input_variables: List[str] messages: List[Union[BaseMessagePromptTemplate, BaseMessage]] @classmethod def from_role_strings( cls, string_messages: List[Tuple[str, str]] ) -> ChatPromptTemplate: messages = [ ChatMessagePromptTemplate( content=PromptTemplate.from_template(template), role=role ) for role, template in string_messages ] return cls.from_messages(messages) @classmethod def from_strings( cls, string_messages: List[Tuple[Type[BaseMessagePromptTemplate], str]] ) -> ChatPromptTemplate: messages = [ role(content=PromptTemplate.from_template(template)) for role, template in string_messages ]
https://python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
2baf867b7732-3
for role, template in string_messages ] return cls.from_messages(messages) @classmethod def from_messages( cls, messages: Sequence[Union[BaseMessagePromptTemplate, BaseMessage]] ) -> ChatPromptTemplate: input_vars = set() for message in messages: if isinstance(message, BaseMessagePromptTemplate): input_vars.update(message.input_variables) return cls(input_variables=list(input_vars), messages=messages) [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string() [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: kwargs = self._merge_partial_and_user_variables(**kwargs) result = [] for message_template in self.messages: if isinstance(message_template, BaseMessage): result.extend([message_template]) elif isinstance(message_template, BaseMessagePromptTemplate): rel_params = { k: v for k, v in kwargs.items() if k in message_template.input_variables } message = message_template.format_messages(**rel_params) result.extend(message) else: raise ValueError(f"Unexpected input: {message_template}") return result [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: raise NotImplementedError @property def _prompt_type(self) -> str: raise NotImplementedError [docs] def save(self, file_path: Union[Path, str]) -> None: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
ff5519026ca7-0
Source code for langchain.prompts.base """BasePrompt schema definition.""" from __future__ import annotations import json from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Union import yaml from pydantic import BaseModel, Extra, Field, root_validator from langchain.formatting import formatter from langchain.schema import BaseMessage, BaseOutputParser, HumanMessage, PromptValue def jinja2_formatter(template: str, **kwargs: Any) -> str: """Format a template using jinja2.""" try: from jinja2 import Template except ImportError: raise ImportError( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) return Template(template).render(**kwargs) def validate_jinja2(template: str, input_variables: List[str]) -> None: input_variables_set = set(input_variables) valid_variables = _get_jinja2_variables_from_template(template) missing_variables = valid_variables - input_variables_set extra_variables = input_variables_set - valid_variables error_message = "" if missing_variables: error_message += f"Missing variables: {missing_variables} " if extra_variables: error_message += f"Extra variables: {extra_variables}" if error_message: raise KeyError(error_message.strip()) def _get_jinja2_variables_from_template(template: str) -> Set[str]: try: from jinja2 import Environment, meta except ImportError: raise ImportError( "jinja2 not installed, which is needed to use the jinja2_formatter. "
https://python.langchain.com/en/latest/_modules/langchain/prompts/base.html
ff5519026ca7-1
"jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) env = Environment() ast = env.parse(template) variables = meta.find_undeclared_variables(ast) return variables DEFAULT_FORMATTER_MAPPING: Dict[str, Callable] = { "f-string": formatter.format, "jinja2": jinja2_formatter, } DEFAULT_VALIDATOR_MAPPING: Dict[str, Callable] = { "f-string": formatter.validate_input_variables, "jinja2": validate_jinja2, } def check_valid_template( template: str, template_format: str, input_variables: List[str] ) -> None: """Check that template string is valid.""" if template_format not in DEFAULT_FORMATTER_MAPPING: valid_formats = list(DEFAULT_FORMATTER_MAPPING) raise ValueError( f"Invalid template format. Got `{template_format}`;" f" should be one of {valid_formats}" ) try: validator_func = DEFAULT_VALIDATOR_MAPPING[template_format] validator_func(template, input_variables) except KeyError as e: raise ValueError( "Invalid prompt schema; check for mismatched or missing input parameters. " + str(e) ) class StringPromptValue(PromptValue): text: str def to_string(self) -> str: """Return prompt as string.""" return self.text def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return [HumanMessage(content=self.text)] [docs]class BasePromptTemplate(BaseModel, ABC): """Base class for all prompt templates, returning a prompt."""
https://python.langchain.com/en/latest/_modules/langchain/prompts/base.html
ff5519026ca7-2
"""Base class for all prompt templates, returning a prompt.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" output_parser: Optional[BaseOutputParser] = None """How to parse the output of calling an LLM on this formatted prompt.""" partial_variables: Mapping[str, Union[str, Callable[[], str]]] = Field( default_factory=dict ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] @abstractmethod def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" @root_validator() def validate_variable_names(cls, values: Dict) -> Dict: """Validate variable names do not include restricted names.""" if "stop" in values["input_variables"]: raise ValueError( "Cannot have an input variable named 'stop', as it is used internally," " please rename." ) if "stop" in values["partial_variables"]: raise ValueError( "Cannot have an partial variable named 'stop', as it is used " "internally, please rename." ) overall = set(values["input_variables"]).intersection( values["partial_variables"] ) if overall: raise ValueError( f"Found overlapping input and partial variables: {overall}" ) return values [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: """Return a partial of the prompt template.""" prompt_dict = self.__dict__.copy() prompt_dict["input_variables"] = list( set(self.input_variables).difference(kwargs)
https://python.langchain.com/en/latest/_modules/langchain/prompts/base.html
ff5519026ca7-3
prompt_dict["input_variables"] = list( set(self.input_variables).difference(kwargs) ) prompt_dict["partial_variables"] = {**self.partial_variables, **kwargs} return type(self)(**prompt_dict) def _merge_partial_and_user_variables(self, **kwargs: Any) -> Dict[str, Any]: # Get partial params: partial_kwargs = { k: v if isinstance(v, str) else v() for k, v in self.partial_variables.items() } return {**partial_kwargs, **kwargs} [docs] @abstractmethod def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ @property def _prompt_type(self) -> str: """Return the prompt type key.""" raise NotImplementedError [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of prompt.""" prompt_dict = super().dict(**kwargs) prompt_dict["_type"] = self._prompt_type return prompt_dict [docs] def save(self, file_path: Union[Path, str]) -> None: """Save the prompt. Args: file_path: Path to directory to save prompt to. Example: .. code-block:: python prompt.save(file_path="path/prompt.yaml") """ if self.partial_variables: raise ValueError("Cannot save prompt with partial variables.") # Convert file to Path object. if isinstance(file_path, str):
https://python.langchain.com/en/latest/_modules/langchain/prompts/base.html
ff5519026ca7-4
# Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") [docs]class StringPromptTemplate(BasePromptTemplate, ABC): """String prompt should expose the format method, returning a prompt.""" [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" return StringPromptValue(text=self.format(**kwargs)) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/base.html
f3dcb50733f9-0
Source code for langchain.prompts.few_shot_with_templates """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.prompts.base import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptWithTemplates(StringPromptTemplate): """Prompt template that contains few shot examples.""" examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: StringPromptTemplate """A PromptTemplate to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: Optional[StringPromptTemplate] = None """A PromptTemplate to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None)
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
f3dcb50733f9-1
examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix and input variables are consistent.""" if values["validate_template"]: input_variables = values["input_variables"] expected_input_variables = set(values["suffix"].input_variables) expected_input_variables |= set(values["partial_variables"]) if values["prefix"] is not None: expected_input_variables |= set(values["prefix"].input_variables) missing_vars = expected_input_variables.difference(input_variables) if missing_vars: raise ValueError( f"Got input_variables={input_variables}, but based on " f"prefix/suffix expected {expected_input_variables}" ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns:
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
f3dcb50733f9-2
kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use. examples = self._get_examples(**kwargs) # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall prefix. if self.prefix is None: prefix = "" else: prefix_kwargs = { k: v for k, v in kwargs.items() if k in self.prefix.input_variables } for k in prefix_kwargs.keys(): kwargs.pop(k) prefix = self.prefix.format(**prefix_kwargs) # Create the overall suffix suffix_kwargs = { k: v for k, v in kwargs.items() if k in self.suffix.input_variables } for k in suffix_kwargs.keys(): kwargs.pop(k) suffix = self.suffix.format( **suffix_kwargs, ) pieces = [prefix, *example_strings, suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot_with_templates" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported")
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
f3dcb50733f9-3
if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
74d4d9881cd9-0
Source code for langchain.prompts.few_shot """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, check_valid_template, ) from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptTemplate(StringPromptTemplate): """Prompt template that contains few shot examples.""" examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: str """A prompt template string to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: str = "" """A prompt template string to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided."""
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
74d4d9881cd9-1
"""Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix and input variables are consistent.""" if values["validate_template"]: check_valid_template( values["prefix"] + values["suffix"], values["template_format"], values["input_variables"] + list(values["partial_variables"]), ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use.
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
74d4d9881cd9-2
# Get the examples to use. examples = self._get_examples(**kwargs) # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall template. pieces = [self.prefix, *example_strings, self.suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
8e5b7fdce2e3-0
Source code for langchain.prompts.example_selector.length_based """Select examples based on length.""" import re from typing import Callable, Dict, List from pydantic import BaseModel, validator from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate def _get_length_based(text: str) -> int: return len(re.split("\n| ", text)) [docs]class LengthBasedExampleSelector(BaseExampleSelector, BaseModel): """Select examples based on length.""" examples: List[dict] """A list of the examples that the prompt template expects.""" example_prompt: PromptTemplate """Prompt template used to format the examples.""" get_text_length: Callable[[str], int] = _get_length_based """Function to measure prompt length. Defaults to word count.""" max_length: int = 2048 """Max length for the prompt, beyond which examples are cut.""" example_text_lengths: List[int] = [] #: :meta private: [docs] def add_example(self, example: Dict[str, str]) -> None: """Add new example to list.""" self.examples.append(example) string_example = self.example_prompt.format(**example) self.example_text_lengths.append(self.get_text_length(string_example)) @validator("example_text_lengths", always=True) def calculate_example_text_lengths(cls, v: List[int], values: Dict) -> List[int]: """Calculate text lengths if they don't exist.""" # Check if text lengths were passed in if v: return v # If they were not, calculate them example_prompt = values["example_prompt"] get_text_length = values["get_text_length"]
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/length_based.html
8e5b7fdce2e3-1
get_text_length = values["get_text_length"] string_examples = [example_prompt.format(**eg) for eg in values["examples"]] return [get_text_length(eg) for eg in string_examples] [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on the input lengths.""" inputs = " ".join(input_variables.values()) remaining_length = self.max_length - self.get_text_length(inputs) i = 0 examples = [] while remaining_length > 0 and i < len(self.examples): new_length = remaining_length - self.example_text_lengths[i] if new_length < 0: break else: examples.append(self.examples[i]) remaining_length = new_length i += 1 return examples By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/length_based.html
f5c3496dd830-0
Source code for langchain.prompts.example_selector.semantic_similarity """Example selector that selects examples based on SemanticSimilarity.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Type from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.vectorstores.base import VectorStore def sorted_values(values: Dict[str, str]) -> List[Any]: """Return a list of values in dict sorted by key.""" return [values[val] for val in sorted(values)] [docs]class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel): """Example selector that selects examples based on SemanticSimilarity.""" vectorstore: VectorStore """VectorStore than contains information about examples.""" k: int = 4 """Number of examples to select.""" example_keys: Optional[List[str]] = None """Optional keys to filter examples to.""" input_keys: Optional[List[str]] = None """Optional keys to filter input to. If provided, the search is based on the input variables instead of all variables.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def add_example(self, example: Dict[str, str]) -> str: """Add new example to vectorstore.""" if self.input_keys: string_example = " ".join( sorted_values({key: example[key] for key in self.input_keys}) ) else: string_example = " ".join(sorted_values(example)) ids = self.vectorstore.add_texts([string_example], metadatas=[example]) return ids[0]
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
f5c3496dd830-1
return ids[0] [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on semantic similarity.""" # Get the docs with the highest similarity. if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} query = " ".join(sorted_values(input_variables)) example_docs = self.vectorstore.similarity_search(query, k=self.k) # Get the examples from the metadata. # This assumes that examples are stored in metadata. examples = [dict(e.metadata) for e in example_docs] # If example keys are provided, filter examples to those keys. if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples [docs] @classmethod def from_examples( cls, examples: List[dict], embeddings: Embeddings, vectorstore_cls: Type[VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, **vectorstore_cls_kwargs: Any, ) -> SemanticSimilarityExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Args: examples: List of examples to use in the prompt. embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls: A vector store DB interface class, e.g. FAISS. k: Number of examples to select input_keys: If provided, the search is based on the input variables instead of all variables.
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
f5c3496dd830-2
instead of all variables. vectorstore_cls_kwargs: optional kwargs containing url for vector store Returns: The ExampleSelector instantiated, backed by a vector store. """ if input_keys: string_examples = [ " ".join(sorted_values({k: eg[k] for k in input_keys})) for eg in examples ] else: string_examples = [" ".join(sorted_values(eg)) for eg in examples] vectorstore = vectorstore_cls.from_texts( string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs ) return cls(vectorstore=vectorstore, k=k, input_keys=input_keys) [docs]class MaxMarginalRelevanceExampleSelector(SemanticSimilarityExampleSelector): """ExampleSelector that selects examples based on Max Marginal Relevance. This was shown to improve performance in this paper: https://arxiv.org/pdf/2211.13892.pdf """ fetch_k: int = 20 """Number of examples to fetch to rerank.""" [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on semantic similarity.""" # Get the docs with the highest similarity. if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} query = " ".join(sorted_values(input_variables)) example_docs = self.vectorstore.max_marginal_relevance_search( query, k=self.k, fetch_k=self.fetch_k ) # Get the examples from the metadata. # This assumes that examples are stored in metadata. examples = [dict(e.metadata) for e in example_docs]
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
f5c3496dd830-3
examples = [dict(e.metadata) for e in example_docs] # If example keys are provided, filter examples to those keys. if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples [docs] @classmethod def from_examples( cls, examples: List[dict], embeddings: Embeddings, vectorstore_cls: Type[VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, fetch_k: int = 20, **vectorstore_cls_kwargs: Any, ) -> MaxMarginalRelevanceExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Args: examples: List of examples to use in the prompt. embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls: A vector store DB interface class, e.g. FAISS. k: Number of examples to select input_keys: If provided, the search is based on the input variables instead of all variables. vectorstore_cls_kwargs: optional kwargs containing url for vector store Returns: The ExampleSelector instantiated, backed by a vector store. """ if input_keys: string_examples = [ " ".join(sorted_values({k: eg[k] for k in input_keys})) for eg in examples ] else: string_examples = [" ".join(sorted_values(eg)) for eg in examples] vectorstore = vectorstore_cls.from_texts( string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs )
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
f5c3496dd830-4
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs ) return cls(vectorstore=vectorstore, k=k, fetch_k=fetch_k, input_keys=input_keys) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
e07c96d9c91d-0
Source code for langchain.docstore.in_memory """Simple in memory docstore in the form of a dict.""" from typing import Dict, Union from langchain.docstore.base import AddableMixin, Docstore from langchain.docstore.document import Document [docs]class InMemoryDocstore(Docstore, AddableMixin): """Simple in memory docstore in the form of a dict.""" def __init__(self, _dict: Dict[str, Document]): """Initialize with dict.""" self._dict = _dict [docs] def add(self, texts: Dict[str, Document]) -> None: """Add texts to in memory dictionary.""" overlapping = set(texts).intersection(self._dict) if overlapping: raise ValueError(f"Tried to add ids that already exist: {overlapping}") self._dict = dict(self._dict, **texts) [docs] def search(self, search: str) -> Union[str, Document]: """Search via direct lookup.""" if search not in self._dict: return f"ID {search} not found." else: return self._dict[search] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/docstore/in_memory.html
a917b6925cf2-0
Source code for langchain.docstore.wikipedia """Wrapper around wikipedia API.""" from typing import Union from langchain.docstore.base import Docstore from langchain.docstore.document import Document [docs]class Wikipedia(Docstore): """Wrapper around wikipedia API.""" def __init__(self) -> None: """Check that wikipedia package is installed.""" try: import wikipedia # noqa: F401 except ImportError: raise ValueError( "Could not import wikipedia python package. " "Please install it with `pip install wikipedia`." ) [docs] def search(self, search: str) -> Union[str, Document]: """Try to search for wiki page. If page exists, return the page summary, and a PageWithLookups object. If page does not exist, return similar entries. """ import wikipedia try: page_content = wikipedia.page(search).content url = wikipedia.page(search).url result: Union[str, Document] = Document( page_content=page_content, metadata={"page": url} ) except wikipedia.PageError: result = f"Could not find [{search}]. Similar: {wikipedia.search(search)}" except wikipedia.DisambiguationError: result = f"Could not find [{search}]. Similar: {wikipedia.search(search)}" return result By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/docstore/wikipedia.html
65ca738cc066-0
Source code for langchain.utilities.searx_search """Utility for using SearxNG meta search API. SearxNG is a privacy-friendly free metasearch engine that aggregates results from `multiple search engines <https://docs.searxng.org/admin/engines/configured_engines.html>`_ and databases and supports the `OpenSearch <https://github.com/dewitt/opensearch/blob/master/opensearch-1-1-draft-6.md>`_ specification. More detailes on the installtion instructions `here. <../../ecosystem/searx.html>`_ For the search API refer to https://docs.searxng.org/dev/search_api.html Quick Start ----------- In order to use this utility you need to provide the searx host. This can be done by passing the named parameter :attr:`searx_host <SearxSearchWrapper.searx_host>` or exporting the environment variable SEARX_HOST. Note: this is the only required parameter. Then create a searx search instance like this: .. code-block:: python from langchain.utilities import SearxSearchWrapper # when the host starts with `http` SSL is disabled and the connection # is assumed to be on a private network searx_host='http://self.hosted' search = SearxSearchWrapper(searx_host=searx_host) You can now use the ``search`` instance to query the searx API. Searching --------- Use the :meth:`run() <SearxSearchWrapper.run>` and :meth:`results() <SearxSearchWrapper.results>` methods to query the searx API. Other methods are are available for convenience.
https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html
65ca738cc066-1
Other methods are are available for convenience. :class:`SearxResults` is a convenience wrapper around the raw json result. Example usage of the ``run`` method to make a search: .. code-block:: python s.run(query="what is the best search engine?") Engine Parameters ----------------- You can pass any `accepted searx search API <https://docs.searxng.org/dev/search_api.html>`_ parameters to the :py:class:`SearxSearchWrapper` instance. In the following example we are using the :attr:`engines <SearxSearchWrapper.engines>` and the ``language`` parameters: .. code-block:: python # assuming the searx host is set as above or exported as an env variable s = SearxSearchWrapper(engines=['google', 'bing'], language='es') Search Tips ----------- Searx offers a special `search syntax <https://docs.searxng.org/user/index.html#search-syntax>`_ that can also be used instead of passing engine parameters. For example the following query: .. code-block:: python s = SearxSearchWrapper("langchain library", engines=['github']) # can also be written as: s = SearxSearchWrapper("langchain library !github") # or even: s = SearxSearchWrapper("langchain library !gh") In some situations you might want to pass an extra string to the search query. For example when the `run()` method is called by an agent. The search suffix can also be used as a way to pass extra parameters to searx or the underlying search engines. .. code-block:: python # select the github engine and pass the search suffix
https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html
65ca738cc066-2
.. code-block:: python # select the github engine and pass the search suffix s = SearchWrapper("langchain library", query_suffix="!gh") s = SearchWrapper("langchain library") # select github the conventional google search syntax s.run("large language models", query_suffix="site:github.com") *NOTE*: A search suffix can be defined on both the instance and the method level. The resulting query will be the concatenation of the two with the former taking precedence. See `SearxNG Configured Engines <https://docs.searxng.org/admin/engines/configured_engines.html>`_ and `SearxNG Search Syntax <https://docs.searxng.org/user/index.html#id1>`_ for more details. Notes ----- This wrapper is based on the SearxNG fork https://github.com/searxng/searxng which is better maintained than the original Searx project and offers more features. Public searxNG instances often use a rate limiter for API usage, so you might want to use a self hosted instance and disable the rate limiter. If you are self-hosting an instance you can customize the rate limiter for your own network as described `here <https://github.com/searxng/searxng/pull/2129>`_. For a list of public SearxNG instances see https://searx.space/ """ import json from typing import Any, Dict, List, Optional import aiohttp import requests from pydantic import BaseModel, Extra, Field, PrivateAttr, root_validator, validator from langchain.utils import get_from_dict_or_env def _get_default_params() -> dict: return {"language": "en", "format": "json"}
https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html
65ca738cc066-3
return {"language": "en", "format": "json"} [docs]class SearxResults(dict): """Dict like wrapper around search api results.""" _data = "" def __init__(self, data: str): """Take a raw result from Searx and make it into a dict like object.""" json_data = json.loads(data) super().__init__(json_data) self.__dict__ = self def __str__(self) -> str: """Text representation of searx result.""" return self._data @property def results(self) -> Any: """Silence mypy for accessing this field. :meta private: """ return self.get("results") @property def answers(self) -> Any: """Helper accessor on the json result.""" return self.get("answers") [docs]class SearxSearchWrapper(BaseModel): """Wrapper for Searx API. To use you need to provide the searx host by passing the named parameter ``searx_host`` or exporting the environment variable ``SEARX_HOST``. In some situations you might want to disable SSL verification, for example if you are running searx locally. You can do this by passing the named parameter ``unsecure``. You can also pass the host url scheme as ``http`` to disable SSL. Example: .. code-block:: python from langchain.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://localhost:8888") Example with SSL disabled: .. code-block:: python from langchain.utilities import SearxSearchWrapper
https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html
65ca738cc066-4
.. code-block:: python from langchain.utilities import SearxSearchWrapper # note the unsecure parameter is not needed if you pass the url scheme as # http searx = SearxSearchWrapper(searx_host="http://localhost:8888", unsecure=True) """ _result: SearxResults = PrivateAttr() searx_host: str = "" unsecure: bool = False params: dict = Field(default_factory=_get_default_params) headers: Optional[dict] = None engines: Optional[List[str]] = [] categories: Optional[List[str]] = [] query_suffix: Optional[str] = "" k: int = 10 aiosession: Optional[Any] = None @validator("unsecure") def disable_ssl_warnings(cls, v: bool) -> bool: """Disable SSL warnings.""" if v: # requests.urllib3.disable_warnings() try: import urllib3 urllib3.disable_warnings() except ImportError as e: print(e) return v @root_validator() def validate_params(cls, values: Dict) -> Dict: """Validate that custom searx params are merged with default ones.""" user_params = values["params"] default = _get_default_params() values["params"] = {**default, **user_params} engines = values.get("engines") if engines: values["params"]["engines"] = ",".join(engines) categories = values.get("categories") if categories: values["params"]["categories"] = ",".join(categories)
https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html
65ca738cc066-5
if categories: values["params"]["categories"] = ",".join(categories) searx_host = get_from_dict_or_env(values, "searx_host", "SEARX_HOST") if not searx_host.startswith("http"): print( f"Warning: missing the url scheme on host \ ! assuming secure https://{searx_host} " ) searx_host = "https://" + searx_host elif searx_host.startswith("http://"): values["unsecure"] = True cls.disable_ssl_warnings(True) values["searx_host"] = searx_host return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def _searx_api_query(self, params: dict) -> SearxResults: """Actual request to searx API.""" raw_result = requests.get( self.searx_host, headers=self.headers, params=params, verify=not self.unsecure, ) # test if http result is ok if not raw_result.ok: raise ValueError("Searx API returned an error: ", raw_result.text) res = SearxResults(raw_result.text) self._result = res return res async def _asearx_api_query(self, params: dict) -> SearxResults: if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.get( self.searx_host, headers=self.headers, params=params, ssl=(lambda: False if self.unsecure else None)(), ) as response: if not response.ok:
https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html
65ca738cc066-6
) as response: if not response.ok: raise ValueError("Searx API returned an error: ", response.text) result = SearxResults(await response.text()) self._result = result else: async with self.aiosession.get( self.searx_host, headers=self.headers, params=params, verify=not self.unsecure, ) as response: if not response.ok: raise ValueError("Searx API returned an error: ", response.text) result = SearxResults(await response.text()) self._result = result return result [docs] def run( self, query: str, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = "", **kwargs: Any, ) -> str: """Run query through Searx API and parse results. You can pass any other params to the searx query API. Args: query: The query to search for. query_suffix: Extra suffix appended to the query. engines: List of engines to use for the query. categories: List of categories to use for the query. **kwargs: extra parameters to pass to the searx API. Returns: str: The result of the query. Raises: ValueError: If an error occured with the query. Example: This will make a query to the qwant engine: .. code-block:: python from langchain.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://my.searx.host")
https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html
65ca738cc066-7
searx.run("what is the weather in France ?", engine="qwant") # the same result can be achieved using the `!` syntax of searx # to select the engine using `query_suffix` searx.run("what is the weather in France ?", query_suffix="!qwant") """ _params = { "q": query, } params = {**self.params, **_params, **kwargs} if self.query_suffix and len(self.query_suffix) > 0: params["q"] += " " + self.query_suffix if isinstance(query_suffix, str) and len(query_suffix) > 0: params["q"] += " " + query_suffix if isinstance(engines, list) and len(engines) > 0: params["engines"] = ",".join(engines) if isinstance(categories, list) and len(categories) > 0: params["categories"] = ",".join(categories) res = self._searx_api_query(params) if len(res.answers) > 0: toret = res.answers[0] # only return the content of the results list elif len(res.results) > 0: toret = "\n\n".join([r.get("content", "") for r in res.results[: self.k]]) else: toret = "No good search result found" return toret [docs] async def arun( self, query: str, engines: Optional[List[str]] = None, query_suffix: Optional[str] = "", **kwargs: Any, ) -> str: """Asynchronously version of `run`."""
https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html
65ca738cc066-8
) -> str: """Asynchronously version of `run`.""" _params = { "q": query, } params = {**self.params, **_params, **kwargs} if self.query_suffix and len(self.query_suffix) > 0: params["q"] += " " + self.query_suffix if isinstance(query_suffix, str) and len(query_suffix) > 0: params["q"] += " " + query_suffix if isinstance(engines, list) and len(engines) > 0: params["engines"] = ",".join(engines) res = await self._asearx_api_query(params) if len(res.answers) > 0: toret = res.answers[0] # only return the content of the results list elif len(res.results) > 0: toret = "\n\n".join([r.get("content", "") for r in res.results[: self.k]]) else: toret = "No good search result found" return toret [docs] def results( self, query: str, num_results: int, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = "", **kwargs: Any, ) -> List[Dict]: """Run query through Searx API and returns the results with metadata. Args: query: The query to search for. query_suffix: Extra suffix appended to the query. num_results: Limit the number of results to return. engines: List of engines to use for the query. categories: List of categories to use for the query.
https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html
65ca738cc066-9
categories: List of categories to use for the query. **kwargs: extra parameters to pass to the searx API. Returns: Dict with the following keys: { snippet: The description of the result. title: The title of the result. link: The link to the result. engines: The engines used for the result. category: Searx category of the result. } """ _params = { "q": query, } params = {**self.params, **_params, **kwargs} if self.query_suffix and len(self.query_suffix) > 0: params["q"] += " " + self.query_suffix if isinstance(query_suffix, str) and len(query_suffix) > 0: params["q"] += " " + query_suffix if isinstance(engines, list) and len(engines) > 0: params["engines"] = ",".join(engines) if isinstance(categories, list) and len(categories) > 0: params["categories"] = ",".join(categories) results = self._searx_api_query(params).results[:num_results] if len(results) == 0: return [{"Result": "No good Search Result was found"}] return [ { "snippet": result.get("content", ""), "title": result["title"], "link": result["url"], "engines": result["engines"], "category": result["category"], } for result in results ] [docs] async def aresults( self, query: str, num_results: int,
https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html
65ca738cc066-10
self, query: str, num_results: int, engines: Optional[List[str]] = None, query_suffix: Optional[str] = "", **kwargs: Any, ) -> List[Dict]: """Asynchronously query with json results. Uses aiohttp. See `results` for more info. """ _params = { "q": query, } params = {**self.params, **_params, **kwargs} if self.query_suffix and len(self.query_suffix) > 0: params["q"] += " " + self.query_suffix if isinstance(query_suffix, str) and len(query_suffix) > 0: params["q"] += " " + query_suffix if isinstance(engines, list) and len(engines) > 0: params["engines"] = ",".join(engines) results = (await self._asearx_api_query(params)).results[:num_results] if len(results) == 0: return [{"Result": "No good Search Result was found"}] return [ { "snippet": result.get("content", ""), "title": result["title"], "link": result["url"], "engines": result["engines"], "category": result["category"], } for result in results ] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html
9f9a4dbc55dc-0
Source code for langchain.utilities.google_serper """Util that calls Google Search using the Serper.dev API.""" from typing import Dict, Optional import requests from pydantic.class_validators import root_validator from pydantic.main import BaseModel from langchain.utils import get_from_dict_or_env [docs]class GoogleSerperAPIWrapper(BaseModel): """Wrapper around the Serper.dev Google Search API. You can create a free API key at https://serper.dev. To use, you should have the environment variable ``SERPER_API_KEY`` set with your API key, or pass `serper_api_key` as a named parameter to the constructor. Example: .. code-block:: python from langchain import GoogleSerperAPIWrapper google_serper = GoogleSerperAPIWrapper() """ k: int = 10 gl: str = "us" hl: str = "en" serper_api_key: Optional[str] = None @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" serper_api_key = get_from_dict_or_env( values, "serper_api_key", "SERPER_API_KEY" ) values["serper_api_key"] = serper_api_key return values [docs] def run(self, query: str) -> str: """Run query through GoogleSearch and parse result.""" results = self._google_serper_search_results(query, gl=self.gl, hl=self.hl) return self._parse_results(results) def _parse_results(self, results: dict) -> str: snippets = [] if results.get("answerBox"):
https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html
9f9a4dbc55dc-1
snippets = [] if results.get("answerBox"): answer_box = results.get("answerBox", {}) if answer_box.get("answer"): return answer_box.get("answer") elif answer_box.get("snippet"): return answer_box.get("snippet").replace("\n", " ") elif answer_box.get("snippetHighlighted"): return ", ".join(answer_box.get("snippetHighlighted")) if results.get("knowledgeGraph"): kg = results.get("knowledgeGraph", {}) title = kg.get("title") entity_type = kg.get("type") if entity_type: snippets.append(f"{title}: {entity_type}.") description = kg.get("description") if description: snippets.append(description) for attribute, value in kg.get("attributes", {}).items(): snippets.append(f"{title} {attribute}: {value}.") for result in results["organic"][: self.k]: if "snippet" in result: snippets.append(result["snippet"]) for attribute, value in result.get("attributes", {}).items(): snippets.append(f"{attribute}: {value}.") if len(snippets) == 0: return "No good Google Search Result was found" return " ".join(snippets) def _google_serper_search_results(self, search_term: str, gl: str, hl: str) -> dict: headers = { "X-API-KEY": self.serper_api_key or "", "Content-Type": "application/json", } params = {"q": search_term, "gl": gl, "hl": hl} response = requests.post( "https://google.serper.dev/search", headers=headers, params=params ) response.raise_for_status()
https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html
9f9a4dbc55dc-2
) response.raise_for_status() search_results = response.json() return search_results By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html
3f83404391f4-0
Source code for langchain.utilities.bing_search """Util that calls Bing Search. In order to set this up, follow instructions at: https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e """ from typing import Dict, List import requests from pydantic import BaseModel, Extra, root_validator from langchain.utils import get_from_dict_or_env [docs]class BingSearchAPIWrapper(BaseModel): """Wrapper for Bing Search API. In order to set this up, follow instructions at: https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e """ bing_subscription_key: str bing_search_url: str k: int = 10 class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def _bing_search_results(self, search_term: str, count: int) -> List[dict]: headers = {"Ocp-Apim-Subscription-Key": self.bing_subscription_key} params = { "q": search_term, "count": count, "textDecorations": True, "textFormat": "HTML", } response = requests.get( self.bing_search_url, headers=headers, params=params # type: ignore ) response.raise_for_status() search_results = response.json() return search_results["webPages"]["value"] @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" bing_subscription_key = get_from_dict_or_env(
https://python.langchain.com/en/latest/_modules/langchain/utilities/bing_search.html
3f83404391f4-1
bing_subscription_key = get_from_dict_or_env( values, "bing_subscription_key", "BING_SUBSCRIPTION_KEY" ) values["bing_subscription_key"] = bing_subscription_key bing_search_url = get_from_dict_or_env( values, "bing_search_url", "BING_SEARCH_URL", # default="https://api.bing.microsoft.com/v7.0/search", ) values["bing_search_url"] = bing_search_url return values [docs] def run(self, query: str) -> str: """Run query through BingSearch and parse result.""" snippets = [] results = self._bing_search_results(query, count=self.k) if len(results) == 0: return "No good Bing Search Result was found" for result in results: snippets.append(result["snippet"]) return " ".join(snippets) [docs] def results(self, query: str, num_results: int) -> List[Dict]: """Run query through BingSearch and return metadata. Args: query: The query to search for. num_results: The number of results to return. Returns: A list of dictionaries with the following keys: snippet - The description of the result. title - The title of the result. link - The link to the result. """ metadata_results = [] results = self._bing_search_results(query, count=num_results) if len(results) == 0: return [{"Result": "No good Bing Search Result was found"}] for result in results: metadata_result = { "snippet": result["snippet"], "title": result["name"],
https://python.langchain.com/en/latest/_modules/langchain/utilities/bing_search.html
3f83404391f4-2
"snippet": result["snippet"], "title": result["name"], "link": result["url"], } metadata_results.append(metadata_result) return metadata_results By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/utilities/bing_search.html
d119aa22e617-0
Source code for langchain.utilities.apify from typing import Any, Callable, Dict, Optional from pydantic import BaseModel, root_validator from langchain.document_loaders import ApifyDatasetLoader from langchain.document_loaders.base import Document from langchain.utils import get_from_dict_or_env [docs]class ApifyWrapper(BaseModel): """Wrapper around Apify. To use, you should have the ``apify-client`` python package installed, and the environment variable ``APIFY_API_TOKEN`` set with your API key, or pass `apify_api_token` as a named parameter to the constructor. """ apify_client: Any apify_client_async: Any @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate environment. Validate that an Apify API token is set and the apify-client Python package exists in the current environment. """ apify_api_token = get_from_dict_or_env( values, "apify_api_token", "APIFY_API_TOKEN" ) try: from apify_client import ApifyClient, ApifyClientAsync values["apify_client"] = ApifyClient(apify_api_token) values["apify_client_async"] = ApifyClientAsync(apify_api_token) except ImportError: raise ValueError( "Could not import apify-client Python package. " "Please install it with `pip install apify-client`." ) return values [docs] def call_actor( self, actor_id: str, run_input: Dict, dataset_mapping_function: Callable[[Dict], Document], *, build: Optional[str] = None,
https://python.langchain.com/en/latest/_modules/langchain/utilities/apify.html
d119aa22e617-1
*, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None, ) -> ApifyDatasetLoader: """Run an Actor on the Apify platform and wait for results to be ready. Args: actor_id (str): The ID or name of the Actor on the Apify platform. run_input (Dict): The input object of the Actor that you're trying to run. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. build (str, optional): Optionally specifies the actor build to run. It can be either a build tag or build number. memory_mbytes (int, optional): Optional memory limit for the run, in megabytes. timeout_secs (int, optional): Optional timeout for the run, in seconds. Returns: ApifyDatasetLoader: A loader that will fetch the records from the Actor run's default dataset. """ actor_call = self.apify_client.actor(actor_id).call( run_input=run_input, build=build, memory_mbytes=memory_mbytes, timeout_secs=timeout_secs, ) return ApifyDatasetLoader( dataset_id=actor_call["defaultDatasetId"], dataset_mapping_function=dataset_mapping_function, ) [docs] async def acall_actor( self, actor_id: str, run_input: Dict, dataset_mapping_function: Callable[[Dict], Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None,
https://python.langchain.com/en/latest/_modules/langchain/utilities/apify.html
d119aa22e617-2
memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None, ) -> ApifyDatasetLoader: """Run an Actor on the Apify platform and wait for results to be ready. Args: actor_id (str): The ID or name of the Actor on the Apify platform. run_input (Dict): The input object of the Actor that you're trying to run. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. build (str, optional): Optionally specifies the actor build to run. It can be either a build tag or build number. memory_mbytes (int, optional): Optional memory limit for the run, in megabytes. timeout_secs (int, optional): Optional timeout for the run, in seconds. Returns: ApifyDatasetLoader: A loader that will fetch the records from the Actor run's default dataset. """ actor_call = await self.apify_client_async.actor(actor_id).call( run_input=run_input, build=build, memory_mbytes=memory_mbytes, timeout_secs=timeout_secs, ) return ApifyDatasetLoader( dataset_id=actor_call["defaultDatasetId"], dataset_mapping_function=dataset_mapping_function, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/utilities/apify.html
07e4be009910-0
Source code for langchain.utilities.google_places_api """Chain that calls Google Places API. """ import logging from typing import Any, Dict, Optional from pydantic import BaseModel, Extra, root_validator from langchain.utils import get_from_dict_or_env [docs]class GooglePlacesAPIWrapper(BaseModel): """Wrapper around Google Places API. To use, you should have the ``googlemaps`` python package installed, **an API key for the google maps platform**, and the enviroment variable ''GPLACES_API_KEY'' set with your API key , or pass 'gplaces_api_key' as a named parameter to the constructor. By default, this will return the all the results on the input query. You can use the top_k_results argument to limit the number of results. Example: .. code-block:: python from langchain import GooglePlacesAPIWrapper gplaceapi = GooglePlacesAPIWrapper() """ gplaces_api_key: Optional[str] = None google_map_client: Any #: :meta private: top_k_results: Optional[int] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key is in your environment variable.""" gplaces_api_key = get_from_dict_or_env( values, "gplaces_api_key", "GPLACES_API_KEY" ) values["gplaces_api_key"] = gplaces_api_key try: import googlemaps values["google_map_client"] = googlemaps.Client(gplaces_api_key) except ImportError: raise ValueError(
https://python.langchain.com/en/latest/_modules/langchain/utilities/google_places_api.html
07e4be009910-1
except ImportError: raise ValueError( "Could not import googlemaps python packge. " "Please install it with `pip install googlemaps`." ) return values [docs] def run(self, query: str) -> str: """Run Places search and get k number of places that exists that match.""" search_results = self.google_map_client.places(query)["results"] num_to_return = len(search_results) places = [] if num_to_return == 0: return "Google Places did not find any places that match the description" num_to_return = ( num_to_return if self.top_k_results is None else min(num_to_return, self.top_k_results) ) for i in range(num_to_return): result = search_results[i] details = self.fetch_place_details(result["place_id"]) if details is not None: places.append(details) return "\n".join([f"{i+1}. {item}" for i, item in enumerate(places)]) [docs] def fetch_place_details(self, place_id: str) -> Optional[str]: try: place_details = self.google_map_client.place(place_id) formatted_details = self.format_place_details(place_details) return formatted_details except Exception as e: logging.error(f"An Error occurred while fetching place details: {e}") return None [docs] def format_place_details(self, place_details: Dict[str, Any]) -> Optional[str]: try: name = place_details.get("result", {}).get("name", "Unkown") address = place_details.get("result", {}).get( "formatted_address", "Unknown" )
https://python.langchain.com/en/latest/_modules/langchain/utilities/google_places_api.html
07e4be009910-2
"formatted_address", "Unknown" ) phone_number = place_details.get("result", {}).get( "formatted_phone_number", "Unknown" ) website = place_details.get("result", {}).get("website", "Unknown") formatted_details = ( f"{name}\nAddress: {address}\n" f"Phone: {phone_number}\nWebsite: {website}\n\n" ) return formatted_details except Exception as e: logging.error(f"An error occurred while formatting place details: {e}") return None By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/utilities/google_places_api.html
ab2546cfe276-0
Source code for langchain.utilities.python import sys from io import StringIO from typing import Dict, Optional from pydantic import BaseModel, Field [docs]class PythonREPL(BaseModel): """Simulates a standalone Python REPL.""" globals: Optional[Dict] = Field(default_factory=dict, alias="_globals") locals: Optional[Dict] = Field(default_factory=dict, alias="_locals") [docs] def run(self, command: str) -> str: """Run command with own globals/locals and returns anything printed.""" old_stdout = sys.stdout sys.stdout = mystdout = StringIO() try: exec(command, self.globals, self.locals) sys.stdout = old_stdout output = mystdout.getvalue() except Exception as e: sys.stdout = old_stdout output = str(e) return output By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 25, 2023.
https://python.langchain.com/en/latest/_modules/langchain/utilities/python.html
59c1f05558a3-0
Source code for langchain.utilities.serpapi """Chain that calls SerpAPI. Heavily borrowed from https://github.com/ofirpress/self-ask """ import os import sys from typing import Any, Dict, Optional, Tuple import aiohttp from pydantic import BaseModel, Extra, Field, root_validator from langchain.utils import get_from_dict_or_env class HiddenPrints: """Context manager to hide prints.""" def __enter__(self) -> None: """Open file to pipe stdout to.""" self._original_stdout = sys.stdout sys.stdout = open(os.devnull, "w") def __exit__(self, *_: Any) -> None: """Close file that stdout was piped to.""" sys.stdout.close() sys.stdout = self._original_stdout [docs]class SerpAPIWrapper(BaseModel): """Wrapper around SerpAPI. To use, you should have the ``google-search-results`` python package installed, and the environment variable ``SERPAPI_API_KEY`` set with your API key, or pass `serpapi_api_key` as a named parameter to the constructor. Example: .. code-block:: python from langchain import SerpAPIWrapper serpapi = SerpAPIWrapper() """ search_engine: Any #: :meta private: params: dict = Field( default={ "engine": "google", "google_domain": "google.com", "gl": "us", "hl": "en", } ) serpapi_api_key: Optional[str] = None aiosession: Optional[aiohttp.ClientSession] = None class Config:
https://python.langchain.com/en/latest/_modules/langchain/utilities/serpapi.html