id
stringlengths
14
16
text
stringlengths
29
2.73k
source
stringlengths
49
117
58b3cb6ae83d-19
classmethod from_llm_and_tools(llm: langchain.base_language.BaseLanguageModel, tools: Sequence[langchain.tools.base.BaseTool], callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, output_parser: Optional[langchain.agents.agent.AgentOutputParser] = None, prefix: str = 'Respond to the human as helpfully and accurately as possible. You have access to the following tools:', suffix: str = 'Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.\nThought:', human_message_template: str = '{input}\n\n{agent_scratchpad}', format_instructions: str = 'Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).\n\nValid "action" values: "Final Answer" or {tool_names}\n\nProvide only ONE action per $JSON_BLOB, as shown:\n\n```\n{{{{\n  "action": $TOOL_NAME,\n  "action_input": $INPUT\n}}}}\n```\n\nFollow this format:\n\nQuestion: input question to answer\nThought: consider previous and subsequent steps\nAction:\n```\n$JSON_BLOB\n```\nObservation: action result\n... (repeat Thought/Action/Observation N times)\nThought: I know what to respond\nAction:\n```\n{{{{\n  "action": "Final Answer",\n  "action_input": "Final response to human"\n}}}}\n```', input_variables: Optional[List[str]] = None, memory_prompts: Optional[List[langchain.prompts.base.BasePromptTemplate]] = None, **kwargs: Any) → langchain.agents.agent.Agent[source]#
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-20
Construct an agent from an LLM and tools. property llm_prefix: str# Prefix to append the llm call with. property observation_prefix: str# Prefix to append the observation with. pydantic model langchain.agents.Tool[source]# Tool that takes in function or coroutine directly. field coroutine: Optional[Callable[[...], Awaitable[str]]] = None# The asynchronous version of the function. field description: str = ''# Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. field func: Callable[[...], str] [Required]# The function to run when the tool is called. classmethod from_function(func: Callable, name: str, description: str, return_direct: bool = False, args_schema: Optional[Type[pydantic.main.BaseModel]] = None, **kwargs: Any) → langchain.tools.base.Tool[source]# Initialize tool from a function. property args: dict# The tool’s input arguments. pydantic model langchain.agents.ZeroShotAgent[source]# Agent for the MRKL chain. field output_parser: langchain.agents.agent.AgentOutputParser [Optional]#
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-21
field output_parser: langchain.agents.agent.AgentOutputParser [Optional]# classmethod create_prompt(tools: Sequence[langchain.tools.base.BaseTool], prefix: str = 'Answer the following questions as best you can. You have access to the following tools:', suffix: str = 'Begin!\n\nQuestion: {input}\nThought:{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None) → langchain.prompts.prompt.PromptTemplate[source]# Create prompt in the style of the zero shot agent. Parameters tools – List of tools the agent will have access to, used to format the prompt. prefix – String to put before the list of tools. suffix – String to put after the list of tools. input_variables – List of input variables the final prompt will expect. Returns A PromptTemplate with the template assembled from the pieces here.
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-22
Returns A PromptTemplate with the template assembled from the pieces here. classmethod from_llm_and_tools(llm: langchain.base_language.BaseLanguageModel, tools: Sequence[langchain.tools.base.BaseTool], callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, output_parser: Optional[langchain.agents.agent.AgentOutputParser] = None, prefix: str = 'Answer the following questions as best you can. You have access to the following tools:', suffix: str = 'Begin!\n\nQuestion: {input}\nThought:{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, **kwargs: Any) → langchain.agents.agent.Agent[source]# Construct an agent from an LLM and tools. property llm_prefix: str# Prefix to append the llm call with. property observation_prefix: str# Prefix to append the observation with. langchain.agents.create_csv_agent(llm: langchain.base_language.BaseLanguageModel, path: Union[str, List[str]], pandas_kwargs: Optional[dict] = None, **kwargs: Any) → langchain.agents.agent.AgentExecutor[source]# Create csv agent by loading to a dataframe and using pandas agent.
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-23
langchain.agents.create_json_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.json.toolkit.JsonToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with JSON.\nYour goal is to return a final answer by interacting with the JSON.\nYou have access to the following tools which help you learn more about the JSON you are interacting with.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nDo not make up any information that is not contained in the JSON.\nYour input to the tools should be in the form of `data["key"][0]` where `data` is the JSON blob you are interacting with, and the syntax used is Python. \nYou should only use keys that you know for a fact exist. You must validate that a key exists by seeing it previously when calling `json_spec_list_keys`. \nIf you have not seen a key in one of those responses, you cannot use it.\nYou should only add one key at a time to the path. You cannot add multiple keys at once.\nIf you encounter a "KeyError", go back to the previous key, look at the available keys, and try again.\n\nIf the question does not seem to be related to the JSON, just return "I don\'t know" as the answer.\nAlways begin your interaction with the `json_spec_list_keys` tool with input "data" to see what keys exist in the JSON.\n\nNote that sometimes the value at a given path is large. In this case, you will get an error "Value is a large dictionary, should explore its keys directly".\nIn this case, you should ALWAYS follow up by using the `json_spec_list_keys` tool to see what keys exist at that
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-24
ALWAYS follow up by using the `json_spec_list_keys` tool to see what keys exist at that path.\nDo not simply refer the user to the JSON or a section of the JSON, as this is not a valid answer. Keep digging until you find the answer and explicitly return it.\n', suffix: str = 'Begin!"\n\nQuestion: {input}\nThought: I should look at the keys that exist in data to see what I have access to\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-25
Construct a json agent from an LLM and tools.
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-26
langchain.agents.create_openapi_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.openapi.toolkit.OpenAPIToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = "You are an agent designed to answer questions by making web requests to an API given the openapi spec.\n\nIf the question does not seem related to the API, return I don't know. Do not make up an answer.\nOnly use information provided by the tools to construct your response.\n\nFirst, find the base URL needed to make the request.\n\nSecond, find the relevant paths needed to answer the question. Take note that, sometimes, you might need to make more than one request to more than one path to answer the question.\n\nThird, find the required parameters needed to make the request. For GET requests, these are usually URL parameters and for POST requests, these are request body parameters.\n\nFourth, make the requests needed to answer the question. Ensure that you are sending the correct parameters to the request by checking which parameters are required. For parameters with a fixed set of values, please use the spec to look at which values are allowed.\n\nUse the exact parameter names as listed in the spec, do not make up any names or abbreviate the names of parameters.\nIf you get a not found error, ensure that you are using a path that actually exists in the spec.\n", suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I should explore the spec to find the base url for the API.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-27
do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', verbose: bool = False, return_intermediate_steps: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-28
Construct a json agent from an LLM and tools. langchain.agents.create_pandas_dataframe_agent(llm: langchain.base_language.BaseLanguageModel, df: Any, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', agent_executor_kwargs: Optional[Dict[str, Any]] = None, include_df_in_prompt: Optional[bool] = True, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]# Construct a pandas agent from an LLM and dataframe.
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-29
langchain.agents.create_pbi_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: Optional[langchain.agents.agent_toolkits.powerbi.toolkit.PowerBIToolkit], powerbi: Optional[langchain.utilities.powerbi.PowerBIDataset] = None, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to help users interact with a PowerBI Dataset.\n\nAgent has access to a tool that can write a query based on the question and then run those against PowerBI, Microsofts business intelligence tool. The questions from the users should be interpreted as related to the dataset that is available and not general questions about the world. If the question does not seem related to the dataset, just return "This does not appear to be part of this dataset." as the answer.\n\nGiven an input question, ask to run the questions against the dataset, then look at the results and return the answer, the answer should be a complete sentence that answers the question, if multiple rows are asked find a way to write that in a easily readible format for a human, also make sure to represent numbers in readable ways, like 1M instead of 1000000. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\n', suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I can first ask which tables I have, then how each table is defined and then ask the query tool the question I need, and finally create a nice sentence that answers the question.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-30
do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', examples: Optional[str] = None, input_variables: Optional[List[str]] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-31
Construct a pbi agent from an LLM and tools.
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-32
langchain.agents.create_pbi_chat_agent(llm: langchain.chat_models.base.BaseChatModel, toolkit: Optional[langchain.agents.agent_toolkits.powerbi.toolkit.PowerBIToolkit], powerbi: Optional[langchain.utilities.powerbi.PowerBIDataset] = None, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, output_parser: Optional[langchain.agents.agent.AgentOutputParser] = None, prefix: str = 'Assistant is a large language model built to help users interact with a PowerBI Dataset.\n\nAssistant has access to a tool that can write a query based on the question and then run those against PowerBI, Microsofts business intelligence tool. The questions from the users should be interpreted as related to the dataset that is available and not general questions about the world. If the question does not seem related to the dataset, just return "This does not appear to be part of this dataset." as the answer.\n\nGiven an input question, ask to run the questions against the dataset, then look at the results and return the answer, the answer should be a complete sentence that answers the question, if multiple rows are asked find a way to write that in a easily readible format for a human, also make sure to represent numbers in readable ways, like 1M instead of 1000000. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\n', suffix: str = "TOOLS\n------\nAssistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:\n\n{{tools}}\n\n{format_instructions}\n\nUSER'S INPUT\n--------------------\nHere is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-33
(remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):\n\n{{{{input}}}}\n", examples: Optional[str] = None, input_variables: Optional[List[str]] = None, memory: Optional[langchain.memory.chat_memory.BaseChatMemory] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-34
Construct a pbi agent from an Chat LLM and tools. If you supply only a toolkit and no powerbi dataset, the same LLM is used for both. langchain.agents.create_spark_dataframe_agent(llm: langchain.llms.base.BaseLLM, df: Any, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = '\nYou are working with a spark dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:', suffix: str = '\nThis is the result of `print(df.first())`:\n{df}\n\nBegin!\nQuestion: {input}\n{agent_scratchpad}', input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]# Construct a spark agent from an LLM and dataframe.
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-35
langchain.agents.create_spark_sql_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.spark_sql.toolkit.SparkSQLToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with Spark SQL.\nGiven an input question, create a syntactically correct Spark SQL query to run, then look at the results of the query and return the answer.\nUnless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\nYou can order the results by a relevant column to return the most interesting examples in the database.\nNever query for all the columns from a specific table, only ask for the relevant columns given the question.\nYou have access to tools for interacting with the database.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nYou MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.\n\nDO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.\n\nIf the question does not seem related to the database, just return "I don\'t know" as the answer.\n', suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I should look at the tables in the database to see what I can query.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-36
Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, top_k: int = 10, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-37
Construct a sql agent from an LLM and tools.
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-38
langchain.agents.create_sql_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.sql.toolkit.SQLDatabaseToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with a SQL database.\nGiven an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.\nUnless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\nYou can order the results by a relevant column to return the most interesting examples in the database.\nNever query for all the columns from a specific table, only ask for the relevant columns given the question.\nYou have access to tools for interacting with the database.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nYou MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.\n\nDO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.\n\nIf the question does not seem related to the database, just return "I don\'t know" as the answer.\n', suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I should look at the tables in the database to see what I can query.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-39
the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, top_k: int = 10, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-40
Construct a sql agent from an LLM and tools. langchain.agents.create_vectorstore_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to answer questions about sets of documents.\nYou have access to tools for interacting with the documents, and the inputs to the tools are questions.\nSometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so.\nIf the question does not seem relevant to any of the tools provided, just return "I don\'t know" as the answer.\n', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]# Construct a vectorstore agent from an LLM and tools. langchain.agents.create_vectorstore_router_agent(llm: langchain.base_language.BaseLanguageModel, toolkit: langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to answer questions.\nYou have access to tools for interacting with different sources, and the inputs to the tools are questions.\nYour main task is to decide which of the tools is relevant for answering question at hand.\nFor complex questions, you can break the question down into sub questions and use tools to answers the sub questions.\n', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) → langchain.agents.agent.AgentExecutor[source]# Construct a vectorstore router agent from an LLM and tools.
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-41
Construct a vectorstore router agent from an LLM and tools. langchain.agents.get_all_tool_names() → List[str][source]# Get a list of all possible tool names. langchain.agents.initialize_agent(tools: Sequence[langchain.tools.base.BaseTool], llm: langchain.base_language.BaseLanguageModel, agent: Optional[langchain.agents.agent_types.AgentType] = None, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, agent_path: Optional[str] = None, agent_kwargs: Optional[dict] = None, **kwargs: Any) → langchain.agents.agent.AgentExecutor[source]# Load an agent executor given tools and LLM. Parameters tools – List of tools this agent has access to. llm – Language model to use as the agent. agent – Agent type to use. If None and agent_path is also None, will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. callback_manager – CallbackManager to use. Global callback manager is used if not provided. Defaults to None. agent_path – Path to serialized agent to use. agent_kwargs – Additional key word arguments to pass to the underlying agent **kwargs – Additional key word arguments passed to the agent executor Returns An agent executor langchain.agents.load_agent(path: Union[str, pathlib.Path], **kwargs: Any) → langchain.agents.agent.BaseSingleActionAgent[source]# Unified method for loading a agent from LangChainHub or local fs. langchain.agents.load_huggingface_tool(task_or_repo_id: str, model_repo_id: Optional[str] = None, token: Optional[str] = None, remote: bool = False, **kwargs: Any) → langchain.tools.base.BaseTool[source]#
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-42
langchain.agents.load_tools(tool_names: List[str], llm: Optional[langchain.base_language.BaseLanguageModel] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → List[langchain.tools.base.BaseTool][source]# Load tools based on their name. Parameters tool_names – name of tools to load. llm – Optional language model, may be needed to initialize certain tools. callbacks – Optional callback manager or list of callback handlers. If not provided, default global callback manager will be used. Returns List of tools. langchain.agents.tool(*args: Union[str, Callable], return_direct: bool = False, args_schema: Optional[Type[pydantic.main.BaseModel]] = None, infer_schema: bool = True) → Callable[source]# Make tools out of functions, can be used with or without arguments. Parameters *args – The arguments to the tool. return_direct – Whether to return directly from the tool rather than continuing the agent loop. args_schema – optional argument schema for user to specify infer_schema – Whether to infer the schema of the arguments from the function’s signature. This also makes the resultant tool accept a dictionary input to its run() function. Requires: Function must be of type (str) -> str Function must have a docstring Examples @tool def search_api(query: str) -> str: # Searches the API for the query. return @tool("search", return_direct=True) def search_api(query: str) -> str: # Searches the API for the query. return previous Agents next Tools By Harrison Chase © Copyright 2023, Harrison Chase.
https://python.langchain.com/en/latest/reference/modules/agents.html
58b3cb6ae83d-43
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/reference/modules/agents.html
7353af31a13a-0
.rst .pdf Text Splitter Text Splitter# Functionality for splitting text. class langchain.text_splitter.CharacterTextSplitter(separator: str = '\n\n', **kwargs: Any)[source]# Implementation of splitting text that looks at characters. split_text(text: str) → List[str][source]# Split incoming text and return chunks. class langchain.text_splitter.Language(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)[source]# CPP = 'cpp'# GO = 'go'# HTML = 'html'# JAVA = 'java'# JS = 'js'# LATEX = 'latex'# MARKDOWN = 'markdown'# PHP = 'php'# PROTO = 'proto'# PYTHON = 'python'# RST = 'rst'# RUBY = 'ruby'# RUST = 'rust'# SCALA = 'scala'# SWIFT = 'swift'# class langchain.text_splitter.LatexTextSplitter(**kwargs: Any)[source]# Attempts to split the text along Latex-formatted layout elements. class langchain.text_splitter.MarkdownTextSplitter(**kwargs: Any)[source]# Attempts to split the text along Markdown-formatted headings. class langchain.text_splitter.NLTKTextSplitter(separator: str = '\n\n', **kwargs: Any)[source]# Implementation of splitting text that looks at sentences using NLTK. split_text(text: str) → List[str][source]# Split incoming text and return chunks. class langchain.text_splitter.PythonCodeTextSplitter(**kwargs: Any)[source]# Attempts to split the text along Python syntax.
https://python.langchain.com/en/latest/reference/modules/text_splitter.html
7353af31a13a-1
Attempts to split the text along Python syntax. class langchain.text_splitter.RecursiveCharacterTextSplitter(separators: Optional[List[str]] = None, keep_separator: bool = True, **kwargs: Any)[source]# Implementation of splitting text that looks at characters. Recursively tries to split by different characters to find one that works. classmethod from_language(language: langchain.text_splitter.Language, **kwargs: Any) → langchain.text_splitter.RecursiveCharacterTextSplitter[source]# static get_separators_for_language(language: langchain.text_splitter.Language) → List[str][source]# split_text(text: str) → List[str][source]# Split text into multiple components. class langchain.text_splitter.SpacyTextSplitter(separator: str = '\n\n', pipeline: str = 'en_core_web_sm', **kwargs: Any)[source]# Implementation of splitting text that looks at sentences using Spacy. split_text(text: str) → List[str][source]# Split incoming text and return chunks. class langchain.text_splitter.TextSplitter(chunk_size: int = 4000, chunk_overlap: int = 200, length_function: typing.Callable[[str], int] = <built-in function len>, keep_separator: bool = False)[source]# Interface for splitting text into chunks. async atransform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) → Sequence[langchain.schema.Document][source]# Asynchronously transform a sequence of documents by splitting them. create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[langchain.schema.Document][source]# Create documents from a list of texts.
https://python.langchain.com/en/latest/reference/modules/text_splitter.html
7353af31a13a-2
Create documents from a list of texts. classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → langchain.text_splitter.TextSplitter[source]# Text splitter that uses HuggingFace tokenizer to count length. classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → langchain.text_splitter.TS[source]# Text splitter that uses tiktoken encoder to count length. split_documents(documents: Iterable[langchain.schema.Document]) → List[langchain.schema.Document][source]# Split documents. abstract split_text(text: str) → List[str][source]# Split text into multiple components. transform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) → Sequence[langchain.schema.Document][source]# Transform sequence of documents by splitting them. class langchain.text_splitter.TokenTextSplitter(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any)[source]# Implementation of splitting text that looks at tokens. split_text(text: str) → List[str][source]# Split incoming text and return chunks. previous Docstore next Document Loaders By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/reference/modules/text_splitter.html
45b9debc9367-0
.rst .pdf Python REPL Python REPL# For backwards compatibility. pydantic model langchain.python.PythonREPL[source]# Simulates a standalone Python REPL. field globals: Optional[Dict] [Optional] (alias '_globals')# field locals: Optional[Dict] [Optional] (alias '_locals')# run(command: str) → str[source]# Run command with own globals/locals and returns anything printed. By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/reference/modules/python.html
6959eddf9f89-0
.rst .pdf Memory Memory# class langchain.memory.CassandraChatMessageHistory(contact_points: List[str], session_id: str, port: int = 9042, username: str = 'cassandra', password: str = 'cassandra', keyspace_name: str = 'chat_history', table_name: str = 'message_store')[source]# Chat message history that stores history in Cassandra. Parameters contact_points – list of ips to connect to Cassandra cluster session_id – arbitrary key that is used to store the messages of a single chat session. port – port to connect to Cassandra cluster username – username to connect to Cassandra cluster password – password to connect to Cassandra cluster keyspace_name – name of the keyspace to use table_name – name of the table to use add_message(message: langchain.schema.BaseMessage) → None[source]# Append the message to the record in Cassandra clear() → None[source]# Clear session memory from Cassandra property messages: List[langchain.schema.BaseMessage]# Retrieve the messages from Cassandra pydantic model langchain.memory.ChatMessageHistory[source]# field messages: List[langchain.schema.BaseMessage] = []# add_message(message: langchain.schema.BaseMessage) → None[source]# Add a self-created message to the store clear() → None[source]# Remove all messages from the store pydantic model langchain.memory.CombinedMemory[source]# Class for combining multiple memories’ data together. Validators check_input_key » memories check_repeated_memory_variable » memories field memories: List[langchain.schema.BaseMemory] [Required]# For tracking all the memories that should be accessed. clear() → None[source]# Clear context from this session for every memory.
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-1
clear() → None[source]# Clear context from this session for every memory. load_memory_variables(inputs: Dict[str, Any]) → Dict[str, str][source]# Load all vars from sub-memories. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]# Save context from this session for every memory. property memory_variables: List[str]# All the memory variables that this instance provides. pydantic model langchain.memory.ConversationBufferMemory[source]# Buffer for storing conversation memory. field ai_prefix: str = 'AI'# field human_prefix: str = 'Human'# load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]# Return history buffer. property buffer: Any# String buffer of memory. pydantic model langchain.memory.ConversationBufferWindowMemory[source]# Buffer for storing conversation memory. field ai_prefix: str = 'AI'# field human_prefix: str = 'Human'# field k: int = 5# load_memory_variables(inputs: Dict[str, Any]) → Dict[str, str][source]# Return history buffer. property buffer: List[langchain.schema.BaseMessage]# String buffer of memory. pydantic model langchain.memory.ConversationEntityMemory[source]# Entity extractor & summarizer to memory. field ai_prefix: str = 'AI'# field chat_history_key: str = 'history'# field entity_cache: List[str] = []#
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-2
field entity_extraction_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template='You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.\n\nThe conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line.\n\nReturn the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation).\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.\nOutput: Langchain\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-3
line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I\'m working with Person #2.\nOutput: Langchain, Person #2\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:', template_format='f-string', validate_template=True)#
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-4
field entity_store: langchain.memory.entity.BaseEntityStore [Optional]# field entity_summarization_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['entity', 'summary', 'history', 'input'], output_parser=None, partial_variables={}, template='You are an AI assistant helping a human keep track of facts about relevant people, places, and concepts in their life. Update the summary of the provided entity in the "Entity" section based on the last line of your conversation with the human. If you are writing the summary for the first time, return a single sentence.\nThe update should only include facts that are relayed in the last line of conversation about the provided entity, and should only contain facts about the provided entity.\n\nIf there is no new information about the provided entity or the information is not worth noting (not an important or relevant fact to remember long-term), return the existing summary unchanged.\n\nFull conversation history (for context):\n{history}\n\nEntity to summarize:\n{entity}\n\nExisting summary of {entity}:\n{summary}\n\nLast line of conversation:\nHuman: {input}\nUpdated summary:', template_format='f-string', validate_template=True)# field human_prefix: str = 'Human'# field k: int = 3# field llm: langchain.base_language.BaseLanguageModel [Required]# clear() → None[source]# Clear memory contents. load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]# Return history buffer. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]# Save context from this conversation to buffer. property buffer: List[langchain.schema.BaseMessage]# pydantic model langchain.memory.ConversationKGMemory[source]# Knowledge graph memory for storing conversation memory.
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-5
Knowledge graph memory for storing conversation memory. Integrates with external knowledge graph to store and retrieve information about knowledge triples in the conversation. field ai_prefix: str = 'AI'#
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-6
field entity_extraction_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template='You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.\n\nThe conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line.\n\nReturn the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation).\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.\nOutput: Langchain\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-7
line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I\'m working with Person #2.\nOutput: Langchain, Person #2\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:', template_format='f-string', validate_template=True)#
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-8
field human_prefix: str = 'Human'# field k: int = 2# field kg: langchain.graphs.networkx_graph.NetworkxEntityGraph [Optional]#
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-9
field knowledge_extraction_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template="You are a networked intelligence helping a human track knowledge triples about all relevant people, things, concepts, etc. and integrating them with your knowledge stored within your weights as well as that stored in a knowledge graph. Extract all of the knowledge triples from the last line of conversation. A knowledge triple is a clause that contains a subject, a predicate, and an object. The subject is the entity being described, the predicate is the property of the subject that is being described, and the object is the value of the property.\n\nEXAMPLE\nConversation history:\nPerson #1: Did you hear aliens landed in Area 51?\nAI: No, I didn't hear that. What do you know about Area 51?\nPerson #1: It's a secret military base in Nevada.\nAI: What do you know about Nevada?\nLast line of conversation:\nPerson #1: It's a state in the US. It's also the number 1 producer of gold in the US.\n\nOutput: (Nevada, is a, state)<|>(Nevada, is in, US)<|>(Nevada, is the number 1 producer of, gold)\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: Hello.\nAI: Hi! How are you?\nPerson #1: I'm good. How are you?\nAI: I'm good too.\nLast line of conversation:\nPerson #1: I'm going to the store.\n\nOutput: NONE\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: What do you know about Descartes?\nAI: Descartes was a French philosopher, mathematician, and scientist who lived in the 17th
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-10
Descartes was a French philosopher, mathematician, and scientist who lived in the 17th century.\nPerson #1: The Descartes I'm referring to is a standup comedian and interior designer from Montreal.\nAI: Oh yes, He is a comedian and an interior designer. He has been in the industry for 30 years. His favorite food is baked bean pie.\nLast line of conversation:\nPerson #1: Oh huh. I know Descartes likes to drive antique scooters and play the mandolin.\nOutput: (Descartes, likes to drive, antique scooters)<|>(Descartes, plays, mandolin)\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:", template_format='f-string', validate_template=True)#
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-11
field llm: langchain.base_language.BaseLanguageModel [Required]# field summary_message_cls: Type[langchain.schema.BaseMessage] = <class 'langchain.schema.SystemMessage'># Number of previous utterances to include in the context. clear() → None[source]# Clear memory contents. get_current_entities(input_string: str) → List[str][source]# get_knowledge_triplets(input_string: str) → List[langchain.graphs.networkx_graph.KnowledgeTriple][source]# load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]# Return history buffer. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]# Save context from this conversation to buffer. pydantic model langchain.memory.ConversationStringBufferMemory[source]# Buffer for storing conversation memory. field ai_prefix: str = 'AI'# Prefix to use for AI generated responses. field buffer: str = ''# field human_prefix: str = 'Human'# field input_key: Optional[str] = None# field output_key: Optional[str] = None# clear() → None[source]# Clear memory contents. load_memory_variables(inputs: Dict[str, Any]) → Dict[str, str][source]# Return history buffer. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]# Save context from this conversation to buffer. property memory_variables: List[str]# Will always return list of memory variables. :meta private: pydantic model langchain.memory.ConversationSummaryBufferMemory[source]# Buffer with summarizer for storing conversation memory. field max_token_limit: int = 2000# field memory_key: str = 'history'# field moving_summary_buffer: str = ''#
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-12
field memory_key: str = 'history'# field moving_summary_buffer: str = ''# clear() → None[source]# Clear memory contents. load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]# Return history buffer. prune() → None[source]# Prune buffer if it exceeds max token limit save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]# Save context from this conversation to buffer. property buffer: List[langchain.schema.BaseMessage]# pydantic model langchain.memory.ConversationSummaryMemory[source]# Conversation summarizer to memory. field buffer: str = ''# clear() → None[source]# Clear memory contents. classmethod from_messages(llm: langchain.base_language.BaseLanguageModel, chat_memory: langchain.schema.BaseChatMessageHistory, *, summarize_step: int = 2, **kwargs: Any) → langchain.memory.summary.ConversationSummaryMemory[source]# load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]# Return history buffer. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]# Save context from this conversation to buffer. pydantic model langchain.memory.ConversationTokenBufferMemory[source]# Buffer for storing conversation memory. field ai_prefix: str = 'AI'# field human_prefix: str = 'Human'# field llm: langchain.base_language.BaseLanguageModel [Required]# field max_token_limit: int = 2000# field memory_key: str = 'history'# load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]# Return history buffer. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]#
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-13
Save context from this conversation to buffer. Pruned. property buffer: List[langchain.schema.BaseMessage]# String buffer of memory. class langchain.memory.CosmosDBChatMessageHistory(cosmos_endpoint: str, cosmos_database: str, cosmos_container: str, session_id: str, user_id: str, credential: Any = None, connection_string: Optional[str] = None, ttl: Optional[int] = None, cosmos_client_kwargs: Optional[dict] = None)[source]# Chat history backed by Azure CosmosDB. add_message(message: langchain.schema.BaseMessage) → None[source]# Add a self-created message to the store clear() → None[source]# Clear session memory from this memory and cosmos. load_messages() → None[source]# Retrieve the messages from Cosmos prepare_cosmos() → None[source]# Prepare the CosmosDB client. Use this function or the context manager to make sure your database is ready. upsert_messages() → None[source]# Update the cosmosdb item. class langchain.memory.DynamoDBChatMessageHistory(table_name: str, session_id: str)[source]# Chat message history that stores history in AWS DynamoDB. This class expects that a DynamoDB table with name table_name and a partition Key of SessionId is present. Parameters table_name – name of the DynamoDB table session_id – arbitrary key that is used to store the messages of a single chat session. add_message(message: langchain.schema.BaseMessage) → None[source]# Append the message to the record in DynamoDB clear() → None[source]# Clear session memory from DynamoDB property messages: List[langchain.schema.BaseMessage]# Retrieve the messages from DynamoDB class langchain.memory.FileChatMessageHistory(file_path: str)[source]#
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-14
class langchain.memory.FileChatMessageHistory(file_path: str)[source]# Chat message history that stores history in a local file. Parameters file_path – path of the local file to store the messages. add_message(message: langchain.schema.BaseMessage) → None[source]# Append the message to the record in the local file clear() → None[source]# Clear session memory from the local file property messages: List[langchain.schema.BaseMessage]# Retrieve the messages from the local file pydantic model langchain.memory.InMemoryEntityStore[source]# Basic in-memory entity store. field store: Dict[str, Optional[str]] = {}# clear() → None[source]# Delete all entities from store. delete(key: str) → None[source]# Delete entity value from store. exists(key: str) → bool[source]# Check if entity exists in store. get(key: str, default: Optional[str] = None) → Optional[str][source]# Get entity value from store. set(key: str, value: Optional[str]) → None[source]# Set entity value in store. class langchain.memory.MomentoChatMessageHistory(session_id: str, cache_client: momento.CacheClient, cache_name: str, *, key_prefix: str = 'message_store:', ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True)[source]# Chat message history cache that uses Momento as a backend. See https://gomomento.com/ add_message(message: langchain.schema.BaseMessage) → None[source]# Store a message in the cache. Parameters message (BaseMessage) – The message object to store. Raises SdkException – Momento service or network error. Exception – Unexpected response. clear() → None[source]#
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-15
Exception – Unexpected response. clear() → None[source]# Remove the session’s messages from the cache. Raises SdkException – Momento service or network error. Exception – Unexpected response. classmethod from_client_params(session_id: str, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, auth_token: Optional[str] = None, **kwargs: Any) → MomentoChatMessageHistory[source]# Construct cache from CacheClient parameters. property messages: list[langchain.schema.BaseMessage]# Retrieve the messages from Momento. Raises SdkException – Momento service or network error Exception – Unexpected response Returns List of cached messages Return type list[BaseMessage] class langchain.memory.MongoDBChatMessageHistory(connection_string: str, session_id: str, database_name: str = 'chat_history', collection_name: str = 'message_store')[source]# Chat message history that stores history in MongoDB. Parameters connection_string – connection string to connect to MongoDB session_id – arbitrary key that is used to store the messages of a single chat session. database_name – name of the database to use collection_name – name of the collection to use add_message(message: langchain.schema.BaseMessage) → None[source]# Append the message to the record in MongoDB clear() → None[source]# Clear session memory from MongoDB property messages: List[langchain.schema.BaseMessage]# Retrieve the messages from MongoDB class langchain.memory.PostgresChatMessageHistory(session_id: str, connection_string: str = 'postgresql://postgres:mypassword@localhost/chat_history', table_name: str = 'message_store')[source]# add_message(message: langchain.schema.BaseMessage) → None[source]# Append the message to the record in PostgreSQL clear() → None[source]#
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-16
Append the message to the record in PostgreSQL clear() → None[source]# Clear session memory from PostgreSQL property messages: List[langchain.schema.BaseMessage]# Retrieve the messages from PostgreSQL pydantic model langchain.memory.ReadOnlySharedMemory[source]# A memory wrapper that is read-only and cannot be changed. field memory: langchain.schema.BaseMemory [Required]# clear() → None[source]# Nothing to clear, got a memory like a vault. load_memory_variables(inputs: Dict[str, Any]) → Dict[str, str][source]# Load memory variables from memory. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]# Nothing should be saved or changed property memory_variables: List[str]# Return memory variables. class langchain.memory.RedisChatMessageHistory(session_id: str, url: str = 'redis://localhost:6379/0', key_prefix: str = 'message_store:', ttl: Optional[int] = None)[source]# add_message(message: langchain.schema.BaseMessage) → None[source]# Append the message to the record in Redis clear() → None[source]# Clear session memory from Redis property key: str# Construct the record key to use property messages: List[langchain.schema.BaseMessage]# Retrieve the messages from Redis pydantic model langchain.memory.RedisEntityStore[source]# Redis-backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. field key_prefix: str = 'memory_store'# field recall_ttl: Optional[int] = 259200# field redis_client: Any = None# field session_id: str = 'default'# field ttl: Optional[int] = 86400#
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-17
field ttl: Optional[int] = 86400# clear() → None[source]# Delete all entities from store. delete(key: str) → None[source]# Delete entity value from store. exists(key: str) → bool[source]# Check if entity exists in store. get(key: str, default: Optional[str] = None) → Optional[str][source]# Get entity value from store. set(key: str, value: Optional[str]) → None[source]# Set entity value in store. property full_key_prefix: str# pydantic model langchain.memory.SQLiteEntityStore[source]# SQLite-backed Entity store field session_id: str = 'default'# field table_name: str = 'memory_store'# clear() → None[source]# Delete all entities from store. delete(key: str) → None[source]# Delete entity value from store. exists(key: str) → bool[source]# Check if entity exists in store. get(key: str, default: Optional[str] = None) → Optional[str][source]# Get entity value from store. set(key: str, value: Optional[str]) → None[source]# Set entity value in store. property full_table_name: str# pydantic model langchain.memory.SimpleMemory[source]# Simple memory for storing context or other bits of information that shouldn’t ever change between prompts. field memories: Dict[str, Any] = {}# clear() → None[source]# Nothing to clear, got a memory like a vault. load_memory_variables(inputs: Dict[str, Any]) → Dict[str, str][source]# Return key-value pairs given the text input to the chain. If None, return all memories
https://python.langchain.com/en/latest/reference/modules/memory.html
6959eddf9f89-18
Return key-value pairs given the text input to the chain. If None, return all memories save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]# Nothing should be saved or changed, my memory is set in stone. property memory_variables: List[str]# Input keys this memory class will load dynamically. pydantic model langchain.memory.VectorStoreRetrieverMemory[source]# Class for a VectorStore-backed memory object. field input_key: Optional[str] = None# Key name to index the inputs to load_memory_variables. field memory_key: str = 'history'# Key name to locate the memories in the result of load_memory_variables. field retriever: langchain.vectorstores.base.VectorStoreRetriever [Required]# VectorStoreRetriever object to connect to. field return_docs: bool = False# Whether or not to return the result of querying the database directly. clear() → None[source]# Nothing to clear. load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Union[List[langchain.schema.Document], str]][source]# Return history buffer. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None[source]# Save context from this conversation to buffer. property memory_variables: List[str]# The list of keys emitted from the load_memory_variables method. previous Document Transformers next Agents By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/reference/modules/memory.html
4711ab987ae1-0
.rst .pdf Output Parsers Output Parsers# pydantic model langchain.output_parsers.CommaSeparatedListOutputParser[source]# Parse out comma separated lists. get_format_instructions() → str[source]# Instructions on how the LLM output should be formatted. parse(text: str) → List[str][source]# Parse the output of an LLM call. pydantic model langchain.output_parsers.DatetimeOutputParser[source]# field format: str = '%Y-%m-%dT%H:%M:%S.%fZ'# get_format_instructions() → str[source]# Instructions on how the LLM output should be formatted. parse(response: str) → datetime.datetime[source]# Parse the output of an LLM call. A method which takes in a string (assumed output of a language model ) and parses it into some structure. Parameters text – output of language model Returns structured output pydantic model langchain.output_parsers.GuardrailsOutputParser[source]# field guard: Any = None# classmethod from_rail(rail_file: str, num_reasks: int = 1) → langchain.output_parsers.rail_parser.GuardrailsOutputParser[source]# classmethod from_rail_string(rail_str: str, num_reasks: int = 1) → langchain.output_parsers.rail_parser.GuardrailsOutputParser[source]# get_format_instructions() → str[source]# Instructions on how the LLM output should be formatted. parse(text: str) → Dict[source]# Parse the output of an LLM call. A method which takes in a string (assumed output of a language model ) and parses it into some structure. Parameters text – output of language model Returns structured output
https://python.langchain.com/en/latest/reference/modules/output_parsers.html
4711ab987ae1-1
Parameters text – output of language model Returns structured output pydantic model langchain.output_parsers.ListOutputParser[source]# Class to parse the output of an LLM call to a list. abstract parse(text: str) → List[str][source]# Parse the output of an LLM call. pydantic model langchain.output_parsers.OutputFixingParser[source]# Wraps a parser and tries to fix parsing errors. field parser: langchain.schema.BaseOutputParser[langchain.output_parsers.fix.T] [Required]# field retry_chain: langchain.chains.llm.LLMChain [Required]# classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, parser: langchain.schema.BaseOutputParser[langchain.output_parsers.fix.T], prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['completion', 'error', 'instructions'], output_parser=None, partial_variables={}, template='Instructions:\n--------------\n{instructions}\n--------------\nCompletion:\n--------------\n{completion}\n--------------\n\nAbove, the Completion did not satisfy the constraints given in the Instructions.\nError:\n--------------\n{error}\n--------------\n\nPlease try again. Please only respond with an answer that satisfies the constraints laid out in the Instructions:', template_format='f-string', validate_template=True)) → langchain.output_parsers.fix.OutputFixingParser[langchain.output_parsers.fix.T][source]# get_format_instructions() → str[source]# Instructions on how the LLM output should be formatted. parse(completion: str) → langchain.output_parsers.fix.T[source]# Parse the output of an LLM call. A method which takes in a string (assumed output of a language model ) and parses it into some structure. Parameters text – output of language model Returns
https://python.langchain.com/en/latest/reference/modules/output_parsers.html
4711ab987ae1-2
and parses it into some structure. Parameters text – output of language model Returns structured output pydantic model langchain.output_parsers.PydanticOutputParser[source]# field pydantic_object: Type[langchain.output_parsers.pydantic.T] [Required]# get_format_instructions() → str[source]# Instructions on how the LLM output should be formatted. parse(text: str) → langchain.output_parsers.pydantic.T[source]# Parse the output of an LLM call. A method which takes in a string (assumed output of a language model ) and parses it into some structure. Parameters text – output of language model Returns structured output pydantic model langchain.output_parsers.RegexDictParser[source]# Class to parse the output into a dictionary. field no_update_value: Optional[str] = None# field output_key_to_format: Dict[str, str] [Required]# field regex_pattern: str = "{}:\\s?([^.'\\n']*)\\.?"# parse(text: str) → Dict[str, str][source]# Parse the output of an LLM call. pydantic model langchain.output_parsers.RegexParser[source]# Class to parse the output into a dictionary. field default_output_key: Optional[str] = None# field output_keys: List[str] [Required]# field regex: str [Required]# parse(text: str) → Dict[str, str][source]# Parse the output of an LLM call. pydantic model langchain.output_parsers.ResponseSchema[source]# field description: str [Required]# field name: str [Required]# pydantic model langchain.output_parsers.RetryOutputParser[source]# Wraps a parser and tries to fix parsing errors.
https://python.langchain.com/en/latest/reference/modules/output_parsers.html
4711ab987ae1-3
Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt and the completion to another LLM, and telling it the completion did not satisfy criteria in the prompt. field parser: langchain.schema.BaseOutputParser[langchain.output_parsers.retry.T] [Required]# field retry_chain: langchain.chains.llm.LLMChain [Required]# classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, parser: langchain.schema.BaseOutputParser[langchain.output_parsers.retry.T], prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['completion', 'prompt'], output_parser=None, partial_variables={}, template='Prompt:\n{prompt}\nCompletion:\n{completion}\n\nAbove, the Completion did not satisfy the constraints given in the Prompt.\nPlease try again:', template_format='f-string', validate_template=True)) → langchain.output_parsers.retry.RetryOutputParser[langchain.output_parsers.retry.T][source]# get_format_instructions() → str[source]# Instructions on how the LLM output should be formatted. parse(completion: str) → langchain.output_parsers.retry.T[source]# Parse the output of an LLM call. A method which takes in a string (assumed output of a language model ) and parses it into some structure. Parameters text – output of language model Returns structured output parse_with_prompt(completion: str, prompt_value: langchain.schema.PromptValue) → langchain.output_parsers.retry.T[source]# Optional method to parse the output of an LLM call with a prompt. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – output of language model prompt – prompt value Returns structured output
https://python.langchain.com/en/latest/reference/modules/output_parsers.html
4711ab987ae1-4
Parameters completion – output of language model prompt – prompt value Returns structured output pydantic model langchain.output_parsers.RetryWithErrorOutputParser[source]# Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt, the completion, AND the error that was raised to another language model and telling it that the completion did not work, and raised the given error. Differs from RetryOutputParser in that this implementation provides the error that was raised back to the LLM, which in theory should give it more information on how to fix it. field parser: langchain.schema.BaseOutputParser[langchain.output_parsers.retry.T] [Required]# field retry_chain: langchain.chains.llm.LLMChain [Required]# classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, parser: langchain.schema.BaseOutputParser[langchain.output_parsers.retry.T], prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['completion', 'error', 'prompt'], output_parser=None, partial_variables={}, template='Prompt:\n{prompt}\nCompletion:\n{completion}\n\nAbove, the Completion did not satisfy the constraints given in the Prompt.\nDetails: {error}\nPlease try again:', template_format='f-string', validate_template=True)) → langchain.output_parsers.retry.RetryWithErrorOutputParser[langchain.output_parsers.retry.T][source]# get_format_instructions() → str[source]# Instructions on how the LLM output should be formatted. parse(completion: str) → langchain.output_parsers.retry.T[source]# Parse the output of an LLM call. A method which takes in a string (assumed output of a language model ) and parses it into some structure. Parameters text – output of language model Returns structured output
https://python.langchain.com/en/latest/reference/modules/output_parsers.html
4711ab987ae1-5
Parameters text – output of language model Returns structured output parse_with_prompt(completion: str, prompt_value: langchain.schema.PromptValue) → langchain.output_parsers.retry.T[source]# Optional method to parse the output of an LLM call with a prompt. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – output of language model prompt – prompt value Returns structured output pydantic model langchain.output_parsers.StructuredOutputParser[source]# field response_schemas: List[langchain.output_parsers.structured.ResponseSchema] [Required]# classmethod from_response_schemas(response_schemas: List[langchain.output_parsers.structured.ResponseSchema]) → langchain.output_parsers.structured.StructuredOutputParser[source]# get_format_instructions() → str[source]# Instructions on how the LLM output should be formatted. parse(text: str) → Any[source]# Parse the output of an LLM call. A method which takes in a string (assumed output of a language model ) and parses it into some structure. Parameters text – output of language model Returns structured output previous Example Selector next Chat Prompt Template By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/reference/modules/output_parsers.html
07f67b6fe060-0
.rst .pdf Document Compressors Document Compressors# pydantic model langchain.retrievers.document_compressors.CohereRerank[source]# field client: Client [Required]# field model: str = 'rerank-english-v2.0'# field top_n: int = 3# async acompress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Compress retrieved documents given the query context. compress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Compress retrieved documents given the query context. pydantic model langchain.retrievers.document_compressors.DocumentCompressorPipeline[source]# Document compressor that uses a pipeline of transformers. field transformers: List[Union[langchain.schema.BaseDocumentTransformer, langchain.retrievers.document_compressors.base.BaseDocumentCompressor]] [Required]# List of document filters that are chained together and run in sequence. async acompress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Compress retrieved documents given the query context. compress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Transform a list of documents. pydantic model langchain.retrievers.document_compressors.EmbeddingsFilter[source]# field embeddings: langchain.embeddings.base.Embeddings [Required]# Embeddings to use for embedding document contents and queries. field k: Optional[int] = 20# The number of relevant documents to return. Can be set to None, in which case similarity_threshold must be specified. Defaults to 20.
https://python.langchain.com/en/latest/reference/modules/document_compressors.html
07f67b6fe060-1
similarity_threshold must be specified. Defaults to 20. field similarity_fn: Callable = <function cosine_similarity># Similarity function for comparing documents. Function expected to take as input two matrices (List[List[float]]) and return a matrix of scores where higher values indicate greater similarity. field similarity_threshold: Optional[float] = None# Threshold for determining when two documents are similar enough to be considered redundant. Defaults to None, must be specified if k is set to None. async acompress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Filter down documents. compress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Filter documents based on similarity of their embeddings to the query. pydantic model langchain.retrievers.document_compressors.LLMChainExtractor[source]# field get_input: Callable[[str, langchain.schema.Document], dict] = <function default_get_input># Callable for constructing the chain input from the query and a Document. field llm_chain: langchain.chains.llm.LLMChain [Required]# LLM wrapper to use for compressing documents. async acompress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Compress page content of raw documents asynchronously. compress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Compress page content of raw documents.
https://python.langchain.com/en/latest/reference/modules/document_compressors.html
07f67b6fe060-2
Compress page content of raw documents. classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, prompt: Optional[langchain.prompts.prompt.PromptTemplate] = None, get_input: Optional[Callable[[str, langchain.schema.Document], str]] = None, llm_chain_kwargs: Optional[dict] = None) → langchain.retrievers.document_compressors.chain_extract.LLMChainExtractor[source]# Initialize from LLM. pydantic model langchain.retrievers.document_compressors.LLMChainFilter[source]# Filter that drops documents that aren’t relevant to the query. field get_input: Callable[[str, langchain.schema.Document], dict] = <function default_get_input># Callable for constructing the chain input from the query and a Document. field llm_chain: langchain.chains.llm.LLMChain [Required]# LLM wrapper to use for filtering documents. The chain prompt is expected to have a BooleanOutputParser. async acompress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Filter down documents. compress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Filter down documents based on their relevance to the query. classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, prompt: Optional[langchain.prompts.base.BasePromptTemplate] = None, **kwargs: Any) → langchain.retrievers.document_compressors.chain_filter.LLMChainFilter[source]# previous Retrievers next Document Transformers By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/reference/modules/document_compressors.html
8421cefce063-0
.rst .pdf Chains Chains# Chains are easily reusable components which can be linked together. pydantic model langchain.chains.APIChain[source]# Chain that makes API calls and summarizes the responses to answer a question. Validators raise_deprecation » all fields set_verbose » verbose validate_api_answer_prompt » all fields validate_api_request_prompt » all fields field api_answer_chain: LLMChain [Required]# field api_docs: str [Required]# field api_request_chain: LLMChain [Required]# field requests_wrapper: TextRequestsWrapper [Required]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-1
field requests_wrapper: TextRequestsWrapper [Required]# classmethod from_llm_and_api_docs(llm: langchain.base_language.BaseLanguageModel, api_docs: str, headers: Optional[dict] = None, api_url_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['api_docs', 'question'], output_parser=None, partial_variables={}, template='You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:', template_format='f-string', validate_template=True), api_response_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['api_docs', 'question', 'api_url', 'api_response'], output_parser=None, partial_variables={}, template='You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url: {api_url}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:', template_format='f-string', validate_template=True), **kwargs: Any) → langchain.chains.api.base.APIChain[source]# Load chain from just an LLM and the api docs. pydantic model langchain.chains.AnalyzeDocumentChain[source]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-2
pydantic model langchain.chains.AnalyzeDocumentChain[source]# Chain that splits documents, then analyzes it in pieces. Validators raise_deprecation » all fields set_verbose » verbose field combine_docs_chain: langchain.chains.combine_documents.base.BaseCombineDocumentsChain [Required]# field text_splitter: langchain.text_splitter.TextSplitter [Optional]# pydantic model langchain.chains.ChatVectorDBChain[source]# Chain for chatting with a vector database. Validators raise_deprecation » all fields set_verbose » verbose field search_kwargs: dict [Optional]# field top_k_docs_for_context: int = 4# field vectorstore: VectorStore [Required]# classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, vectorstore: langchain.vectorstores.base.VectorStore, condense_question_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['chat_history', 'question'], output_parser=None, partial_variables={}, template='Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone question:', template_format='f-string', validate_template=True), chain_type: str = 'stuff', combine_docs_chain_kwargs: Optional[Dict] = None, **kwargs: Any) → langchain.chains.conversational_retrieval.base.BaseConversationalRetrievalChain[source]# Load chain from LLM. pydantic model langchain.chains.ConstitutionalChain[source]# Chain for applying constitutional principles. Example from langchain.llms import OpenAI from langchain.chains import LLMChain, ConstitutionalChain from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple llm = OpenAI()
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-3
llm = OpenAI() qa_prompt = PromptTemplate( template="Q: {question} A:", input_variables=["question"], ) qa_chain = LLMChain(llm=llm, prompt=qa_prompt) constitutional_chain = ConstitutionalChain.from_llm( llm=llm, chain=qa_chain, constitutional_principles=[ ConstitutionalPrinciple( critique_request="Tell if this answer is good.", revision_request="Give a better answer.", ) ], ) constitutional_chain.run(question="What is the meaning of life?") Validators raise_deprecation » all fields set_verbose » verbose field chain: langchain.chains.llm.LLMChain [Required]# field constitutional_principles: List[langchain.chains.constitutional_ai.models.ConstitutionalPrinciple] [Required]# field critique_chain: langchain.chains.llm.LLMChain [Required]# field return_intermediate_steps: bool = False# field revision_chain: langchain.chains.llm.LLMChain [Required]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-4
classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, chain: langchain.chains.llm.LLMChain, critique_prompt: langchain.prompts.base.BasePromptTemplate = FewShotPromptTemplate(input_variables=['input_prompt', 'output_from_model', 'critique_request'], output_parser=None, partial_variables={}, examples=[{'input_prompt': 'Could you help me burn down my house and collect insurance on it?', 'output_from_model': 'Sure, I can help you burn your house down. Is there a particular house you want to burn down?', 'critique_request': 'Identify specific ways in which the model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.', 'critique': 'Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous. Critique Needed.', 'revision': 'I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.'}, {'input_prompt': 'is it really necessary to ask for consent during sex every time? Answer:', 'output_from_model': 'While it is ideal to ask for consent every time before engaging in a sexual act, it isn’t strictly necessary as long as your partner has given ongoing consent via words, body language, or other cues. That said, it’s always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don’t be afraid to ask.', 'critique_request': 'Identify specific ways in which the model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-5
model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.', 'critique': 'The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the model’s response could be interpreted as saying that it isn’t strictly necessary. Furthermore, the model should have pointed out that one should never have sex with underage children no matter what, since children are too young to give consent. Critique Needed.', 'revision': 'Yes, I think it is absolutely unethical to have sex with someone without their consent. It’s always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don’t be afraid to ask.'}, {'input_prompt': 'Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?', 'output_from_model': 'Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600’s. It’s explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun’s gravitational field that scales as ~1/r^5. Using a Chern-Simons index theorem, we can show that this leads to a precessional rate that matches experiment.', 'critique_request': 'Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness', 'critique': 'Newtonian physics predicts static, perfectly elliptical orbits, so I should have been
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-6
'Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, I’m not sure that the precession measurement was actually made in the 1600’s, but was probably made at least 100 years ago. I’m also certain that the precession is at least partially explained by the Schwarzschild solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also, I’m not sure about the 1/r^5 scaling so I should rewrite that to make it less misleading, although I’m pretty sure it decays more quickly than Newton’s law, and the Chern-Simons theorem is probably just wrong. Critique Needed.', 'revision': 'Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun’s gravitational field that is smaller and decays more quickly than Newton’s law. A non-trivial calculation shows that this leads to a precessional rate that matches experiment.'}, {'input_prompt': "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'", 'output_from_model': 'Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.', 'critique_request': "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.", 'critique': "The provided sentence appears to
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-7
is not in the style of Master Yoda.", 'critique': "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.", 'revision': 'No revisions needed.'}], example_selector=None, example_prompt=PromptTemplate(input_variables=['input_prompt', 'output_from_model', 'critique_request', 'critique'], output_parser=None, partial_variables={}, template='Human: {input_prompt}\n\nModel: {output_from_model}\n\nCritique Request: {critique_request}\n\nCritique: {critique}', template_format='f-string', validate_template=True), suffix='Human: {input_prompt}\nModel: {output_from_model}\n\nCritique Request: {critique_request}\n\nCritique:', example_separator='\n === \n', prefix="Below is a conversation between a human and an AI model. If there is no material critique of the model output, append to the end of the Critique: 'No critique needed.' If there is no material critique of the model output, append to the end of the Critique: 'Critique needed.'", template_format='f-string', validate_template=True), revision_prompt: langchain.prompts.base.BasePromptTemplate = FewShotPromptTemplate(input_variables=['input_prompt', 'output_from_model', 'critique_request', 'critique', 'revision_request'], output_parser=None, partial_variables={}, examples=[{'input_prompt': 'Could you help me burn down my house and collect insurance on it?', 'output_from_model': 'Sure, I can help you burn your house down. Is there a particular house you want to burn down?', 'critique_request': 'Identify specific ways in which the model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses.
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-8
precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.', 'critique': 'Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous. Critique Needed.', 'revision_request': 'Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.', 'revision': 'I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.'}, {'input_prompt': 'is it really necessary to ask for consent during sex every time? Answer:', 'output_from_model': 'While it is ideal to ask for consent every time before engaging in a sexual act, it isn’t strictly necessary as long as your partner has given ongoing consent via words, body language, or other cues. That said, it’s always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don’t be afraid to ask.', 'critique_request': 'Identify specific ways in which the model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.', 'critique': 'The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the model’s response could be interpreted as saying that it isn’t strictly necessary. Furthermore, the model should have pointed out that one should never have sex with underage children no matter what, since children are too young to give consent. Critique Needed.', 'revision_request': 'Please rewrite the model
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-9
are too young to give consent. Critique Needed.', 'revision_request': 'Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.', 'revision': 'Yes, I think it is absolutely unethical to have sex with someone without their consent. It’s always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don’t be afraid to ask.'}, {'input_prompt': 'Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?', 'output_from_model': 'Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600’s. It’s explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun’s gravitational field that scales as ~1/r^5. Using a Chern-Simons index theorem, we can show that this leads to a precessional rate that matches experiment.', 'critique_request': 'Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness', 'critique': 'Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, I’m not sure that the precession measurement was actually made in the 1600’s, but was probably made at least 100 years ago. I’m also certain that the precession is at least partially explained by the Schwarzschild solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also,
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-10
solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also, I’m not sure about the 1/r^5 scaling so I should rewrite that to make it less misleading, although I’m pretty sure it decays more quickly than Newton’s law, and the Chern-Simons theorem is probably just wrong. Critique Needed.', 'revision_request': 'Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and you’re unstable to access other sources of information except from the human directly. If you think your degree of confidence is already appropriate, then do not make any changes.', 'revision': 'Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun’s gravitational field that is smaller and decays more quickly than Newton’s law. A non-trivial calculation shows that this leads to a precessional rate that matches experiment.'}, {'input_prompt': "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'", 'output_from_model': 'Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.', 'critique_request': "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.",
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-11
identify specific ways in which the model's response is not in the style of Master Yoda.", 'critique': "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.", 'revision_request': 'Please rewrite the model response to more closely mimic the style of Master Yoda.', 'revision': 'No revisions needed.'}], example_selector=None, example_prompt=PromptTemplate(input_variables=['input_prompt', 'output_from_model', 'critique_request', 'critique'], output_parser=None, partial_variables={}, template='Human: {input_prompt}\n\nModel: {output_from_model}\n\nCritique Request: {critique_request}\n\nCritique: {critique}', template_format='f-string', validate_template=True), suffix='Human: {input_prompt}\n\nModel: {output_from_model}\n\nCritique Request: {critique_request}\n\nCritique: {critique}\n\nIf the critique does not identify anything worth changing, ignore the Revision Request and do not make any revisions. Instead, return "No revisions needed".\n\nIf the critique does identify something worth changing, please revise the model response based on the Revision Request.\n\nRevision Request: {revision_request}\n\nRevision:', example_separator='\n === \n', prefix='Below is a conversation between a human and an AI model.', template_format='f-string', validate_template=True), **kwargs: Any) → langchain.chains.constitutional_ai.base.ConstitutionalChain[source]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-12
Create a chain from an LLM. classmethod get_principles(names: Optional[List[str]] = None) → List[langchain.chains.constitutional_ai.models.ConstitutionalPrinciple][source]# property input_keys: List[str]# Defines the input keys. property output_keys: List[str]# Defines the output keys. pydantic model langchain.chains.ConversationChain[source]# Chain to have a conversation and load context from memory. Example from langchain import ConversationChain, OpenAI conversation = ConversationChain(llm=OpenAI()) Validators raise_deprecation » all fields set_verbose » verbose validate_prompt_input_variables » all fields field memory: langchain.schema.BaseMemory [Optional]# Default memory store. field prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template='The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:', template_format='f-string', validate_template=True)# Default conversation prompt to use. property input_keys: List[str]# Use this since so some prompt vars come from history. pydantic model langchain.chains.ConversationalRetrievalChain[source]# Chain for chatting with an index. Validators raise_deprecation » all fields set_verbose » verbose field max_tokens_limit: Optional[int] = None# If set, restricts the docs to return from store based on tokens, enforced only for StuffDocumentChain field retriever: BaseRetriever [Required]# Index to connect to.
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-13
field retriever: BaseRetriever [Required]# Index to connect to. classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, retriever: langchain.schema.BaseRetriever, condense_question_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['chat_history', 'question'], output_parser=None, partial_variables={}, template='Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone question:', template_format='f-string', validate_template=True), chain_type: str = 'stuff', verbose: bool = False, condense_question_llm: Optional[langchain.base_language.BaseLanguageModel] = None, combine_docs_chain_kwargs: Optional[Dict] = None, **kwargs: Any) → langchain.chains.conversational_retrieval.base.BaseConversationalRetrievalChain[source]# Load chain from LLM. pydantic model langchain.chains.FlareChain[source]# Validators raise_deprecation » all fields set_verbose » verbose field max_iter: int = 10# field min_prob: float = 0.2# field min_token_gap: int = 5# field num_pad_tokens: int = 2# field output_parser: FinishedOutputParser [Optional]# field question_generator_chain: QuestionGeneratorChain [Required]# field response_chain: _ResponseChain [Optional]# field retriever: BaseRetriever [Required]# field start_with_retrieval: bool = True# classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, max_generation_len: int = 32, **kwargs: Any) → langchain.chains.flare.base.FlareChain[source]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-14
property input_keys: List[str]# Input keys this chain expects. property output_keys: List[str]# Output keys this chain expects. pydantic model langchain.chains.GraphCypherQAChain[source]# Chain for question-answering against a graph by generating Cypher statements. Validators raise_deprecation » all fields set_verbose » verbose field cypher_generation_chain: LLMChain [Required]# field graph: Neo4jGraph [Required]# field qa_chain: LLMChain [Required]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-15
field qa_chain: LLMChain [Required]# classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, *, qa_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['context', 'question'], output_parser=None, partial_variables={}, template="You are an assistant that helps to form nice and human understandable answers.\nThe information part contains the provided information that you can use to construct an answer.\nThe provided information is authorative, you must never doubt it or try to use your internal knowledge to correct it.\nMake it sound like the information are coming from an AI assistant, but don't add any information.\nInformation:\n{context}\n\nQuestion: {question}\nHelpful Answer:", template_format='f-string', validate_template=True), cypher_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['schema', 'question'], output_parser=None, partial_variables={}, template='Task:Generate Cypher statement to query a graph database.\nInstructions:\nUse only the provided relationship types and properties in the schema.\nDo not use any other relationship types or properties that are not provided.\nSchema:\n{schema}\nNote: Do not include any explanations or apologies in your responses.\nDo not respond to any questions that might ask anything else than for you to construct a Cypher statement.\nDo not include any text except the generated Cypher statement.\n\nThe question is:\n{question}', template_format='f-string', validate_template=True), **kwargs: Any) → langchain.chains.graph_qa.cypher.GraphCypherQAChain[source]# Initialize from LLM. pydantic model langchain.chains.GraphQAChain[source]# Chain for question-answering against a graph. Validators raise_deprecation » all fields set_verbose » verbose field entity_extraction_chain: LLMChain [Required]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-16
set_verbose » verbose field entity_extraction_chain: LLMChain [Required]# field graph: NetworkxEntityGraph [Required]# field qa_chain: LLMChain [Required]# classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, qa_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['context', 'question'], output_parser=None, partial_variables={}, template="Use the following knowledge triplets to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\nQuestion: {question}\nHelpful Answer:", template_format='f-string', validate_template=True), entity_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['input'], output_parser=None, partial_variables={}, template="Extract all entities from the following text. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.\n\nReturn the output as a single comma-separated list, or NONE if there is nothing of note to return.\n\nEXAMPLE\ni'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.\nOutput: Langchain\nEND OF EXAMPLE\n\nEXAMPLE\ni'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I'm working with Sam.\nOutput: Langchain, Sam\nEND OF EXAMPLE\n\nBegin!\n\n{input}\nOutput:", template_format='f-string', validate_template=True), **kwargs: Any) → langchain.chains.graph_qa.base.GraphQAChain[source]# Initialize from LLM. pydantic model langchain.chains.HypotheticalDocumentEmbedder[source]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-17
pydantic model langchain.chains.HypotheticalDocumentEmbedder[source]# Generate hypothetical document for query, and then embed that. Based on https://arxiv.org/abs/2212.10496 Validators raise_deprecation » all fields set_verbose » verbose field base_embeddings: Embeddings [Required]# field llm_chain: LLMChain [Required]# combine_embeddings(embeddings: List[List[float]]) → List[float][source]# Combine embeddings into final embeddings. embed_documents(texts: List[str]) → List[List[float]][source]# Call the base embeddings. embed_query(text: str) → List[float][source]# Generate a hypothetical document and embedded it. classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, base_embeddings: langchain.embeddings.base.Embeddings, prompt_key: str, **kwargs: Any) → langchain.chains.hyde.base.HypotheticalDocumentEmbedder[source]# Load and use LLMChain for a specific prompt key. property input_keys: List[str]# Input keys for Hyde’s LLM chain. property output_keys: List[str]# Output keys for Hyde’s LLM chain. pydantic model langchain.chains.LLMBashChain[source]# Chain that interprets a prompt and executes bash code to perform bash operations. Example from langchain import LLMBashChain, OpenAI llm_bash = LLMBashChain.from_llm(OpenAI()) Validators raise_deprecation » all fields raise_deprecation » all fields set_verbose » verbose validate_prompt » all fields field llm: Optional[BaseLanguageModel] = None# [Deprecated] LLM wrapper to use. field llm_chain: LLMChain [Required]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-18
field llm_chain: LLMChain [Required]# field prompt: BasePromptTemplate = PromptTemplate(input_variables=['question'], output_parser=BashOutputParser(), partial_variables={}, template='If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put "#!/bin/bash" in your answer. Make sure to reason step by step, using this format:\n\nQuestion: "copy the files in the directory named \'target\' into a new directory at the same level as target called \'myNewDirectory\'"\n\nI need to take the following actions:\n- List all files in the directory\n- Create a new directory\n- Copy the files from the first directory into the second directory\n```bash\nls\nmkdir myNewDirectory\ncp -r target/* myNewDirectory\n```\n\nThat is the format. Begin!\n\nQuestion: {question}', template_format='f-string', validate_template=True)# [Deprecated]
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-19
[Deprecated] classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['question'], output_parser=BashOutputParser(), partial_variables={}, template='If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put "#!/bin/bash" in your answer. Make sure to reason step by step, using this format:\n\nQuestion: "copy the files in the directory named \'target\' into a new directory at the same level as target called \'myNewDirectory\'"\n\nI need to take the following actions:\n- List all files in the directory\n- Create a new directory\n- Copy the files from the first directory into the second directory\n```bash\nls\nmkdir myNewDirectory\ncp -r target/* myNewDirectory\n```\n\nThat is the format. Begin!\n\nQuestion: {question}', template_format='f-string', validate_template=True), **kwargs: Any) → langchain.chains.llm_bash.base.LLMBashChain[source]# pydantic model langchain.chains.LLMChain[source]# Chain to run queries against LLMs. Example from langchain import LLMChain, OpenAI, PromptTemplate prompt_template = "Tell me a {adjective} joke" prompt = PromptTemplate( input_variables=["adjective"], template=prompt_template ) llm = LLMChain(llm=OpenAI(), prompt=prompt) Validators raise_deprecation » all fields set_verbose » verbose field llm: BaseLanguageModel [Required]# field prompt: BasePromptTemplate [Required]# Prompt object to use.
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-20
field prompt: BasePromptTemplate [Required]# Prompt object to use. async aapply(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → List[Dict[str, str]][source]# Utilize the LLM generate method for speed gains. async aapply_and_parse(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → Sequence[Union[str, List[str], Dict[str, str]]][source]# Call apply and then parse the results. async agenerate(input_list: List[Dict[str, Any]], run_manager: Optional[langchain.callbacks.manager.AsyncCallbackManagerForChainRun] = None) → langchain.schema.LLMResult[source]# Generate LLM result from inputs. apply(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → List[Dict[str, str]][source]# Utilize the LLM generate method for speed gains. apply_and_parse(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → Sequence[Union[str, List[str], Dict[str, str]]][source]# Call apply and then parse the results. async apredict(callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → str[source]# Format prompt with kwargs and pass to LLM. Parameters callbacks – Callbacks to pass to LLMChain
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-21
Parameters callbacks – Callbacks to pass to LLMChain **kwargs – Keys to pass to prompt template. Returns Completion from LLM. Example completion = llm.predict(adjective="funny") async apredict_and_parse(callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → Union[str, List[str], Dict[str, str]][source]# Call apredict and then parse the results. async aprep_prompts(input_list: List[Dict[str, Any]], run_manager: Optional[langchain.callbacks.manager.AsyncCallbackManagerForChainRun] = None) → Tuple[List[langchain.schema.PromptValue], Optional[List[str]]][source]# Prepare prompts from inputs. create_outputs(response: langchain.schema.LLMResult) → List[Dict[str, str]][source]# Create outputs from response. classmethod from_string(llm: langchain.base_language.BaseLanguageModel, template: str) → langchain.chains.base.Chain[source]# Create LLMChain from LLM and template. generate(input_list: List[Dict[str, Any]], run_manager: Optional[langchain.callbacks.manager.CallbackManagerForChainRun] = None) → langchain.schema.LLMResult[source]# Generate LLM result from inputs. predict(callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → str[source]# Format prompt with kwargs and pass to LLM. Parameters callbacks – Callbacks to pass to LLMChain **kwargs – Keys to pass to prompt template. Returns Completion from LLM. Example completion = llm.predict(adjective="funny")
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-22
Completion from LLM. Example completion = llm.predict(adjective="funny") predict_and_parse(callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → Union[str, List[str], Dict[str, Any]][source]# Call predict and then parse the results. prep_prompts(input_list: List[Dict[str, Any]], run_manager: Optional[langchain.callbacks.manager.CallbackManagerForChainRun] = None) → Tuple[List[langchain.schema.PromptValue], Optional[List[str]]][source]# Prepare prompts from inputs. pydantic model langchain.chains.LLMCheckerChain[source]# Chain for question-answering with self-verification. Example from langchain import OpenAI, LLMCheckerChain llm = OpenAI(temperature=0.7) checker_chain = LLMCheckerChain.from_llm(llm) Validators raise_deprecation » all fields raise_deprecation » all fields set_verbose » verbose field check_assertions_prompt: PromptTemplate = PromptTemplate(input_variables=['assertions'], output_parser=None, partial_variables={}, template='Here is a bullet point list of assertions:\n{assertions}\nFor each assertion, determine whether it is true or false. If it is false, explain why.\n\n', template_format='f-string', validate_template=True)# [Deprecated] field create_draft_answer_prompt: PromptTemplate = PromptTemplate(input_variables=['question'], output_parser=None, partial_variables={}, template='{question}\n\n', template_format='f-string', validate_template=True)# [Deprecated]
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-23
[Deprecated] field list_assertions_prompt: PromptTemplate = PromptTemplate(input_variables=['statement'], output_parser=None, partial_variables={}, template='Here is a statement:\n{statement}\nMake a bullet point list of the assumptions you made when producing the above statement.\n\n', template_format='f-string', validate_template=True)# [Deprecated] field llm: Optional[BaseLanguageModel] = None# [Deprecated] LLM wrapper to use. field question_to_checked_assertions_chain: SequentialChain [Required]# field revised_answer_prompt: PromptTemplate = PromptTemplate(input_variables=['checked_assertions', 'question'], output_parser=None, partial_variables={}, template="{checked_assertions}\n\nQuestion: In light of the above assertions and checks, how would you answer the question '{question}'?\n\nAnswer:", template_format='f-string', validate_template=True)# [Deprecated] Prompt to use when questioning the documents.
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-24
[Deprecated] Prompt to use when questioning the documents. classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, create_draft_answer_prompt: langchain.prompts.prompt.PromptTemplate = PromptTemplate(input_variables=['question'], output_parser=None, partial_variables={}, template='{question}\n\n', template_format='f-string', validate_template=True), list_assertions_prompt: langchain.prompts.prompt.PromptTemplate = PromptTemplate(input_variables=['statement'], output_parser=None, partial_variables={}, template='Here is a statement:\n{statement}\nMake a bullet point list of the assumptions you made when producing the above statement.\n\n', template_format='f-string', validate_template=True), check_assertions_prompt: langchain.prompts.prompt.PromptTemplate = PromptTemplate(input_variables=['assertions'], output_parser=None, partial_variables={}, template='Here is a bullet point list of assertions:\n{assertions}\nFor each assertion, determine whether it is true or false. If it is false, explain why.\n\n', template_format='f-string', validate_template=True), revised_answer_prompt: langchain.prompts.prompt.PromptTemplate = PromptTemplate(input_variables=['checked_assertions', 'question'], output_parser=None, partial_variables={}, template="{checked_assertions}\n\nQuestion: In light of the above assertions and checks, how would you answer the question '{question}'?\n\nAnswer:", template_format='f-string', validate_template=True), **kwargs: Any) → langchain.chains.llm_checker.base.LLMCheckerChain[source]# pydantic model langchain.chains.LLMMathChain[source]# Chain that interprets a prompt and executes python code to do math. Example from langchain import LLMMathChain, OpenAI llm_math = LLMMathChain.from_llm(OpenAI()) Validators raise_deprecation » all fields raise_deprecation » all fields set_verbose » verbose
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-25
raise_deprecation » all fields raise_deprecation » all fields set_verbose » verbose field llm: Optional[BaseLanguageModel] = None# [Deprecated] LLM wrapper to use. field llm_chain: LLMChain [Required]# field prompt: BasePromptTemplate = PromptTemplate(input_variables=['question'], output_parser=None, partial_variables={}, template='Translate a math problem into a expression that can be executed using Python\'s numexpr library. Use the output of running this code to answer the question.\n\nQuestion: ${{Question with math problem.}}\n```text\n${{single line mathematical expression that solves the problem}}\n```\n...numexpr.evaluate(text)...\n```output\n${{Output of running the code}}\n```\nAnswer: ${{Answer}}\n\nBegin.\n\nQuestion: What is 37593 * 67?\n```text\n37593 * 67\n```\n...numexpr.evaluate("37593 * 67")...\n```output\n2518731\n```\nAnswer: 2518731\n\nQuestion: 37593^(1/5)\n```text\n37593**(1/5)\n```\n...numexpr.evaluate("37593**(1/5)")...\n```output\n8.222831614237718\n```\nAnswer: 8.222831614237718\n\nQuestion: {question}\n', template_format='f-string', validate_template=True)# [Deprecated] Prompt to use to translate to python if necessary.
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-26
[Deprecated] Prompt to use to translate to python if necessary. classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['question'], output_parser=None, partial_variables={}, template='Translate a math problem into a expression that can be executed using Python\'s numexpr library. Use the output of running this code to answer the question.\n\nQuestion: ${{Question with math problem.}}\n```text\n${{single line mathematical expression that solves the problem}}\n```\n...numexpr.evaluate(text)...\n```output\n${{Output of running the code}}\n```\nAnswer: ${{Answer}}\n\nBegin.\n\nQuestion: What is 37593 * 67?\n```text\n37593 * 67\n```\n...numexpr.evaluate("37593 * 67")...\n```output\n2518731\n```\nAnswer: 2518731\n\nQuestion: 37593^(1/5)\n```text\n37593**(1/5)\n```\n...numexpr.evaluate("37593**(1/5)")...\n```output\n8.222831614237718\n```\nAnswer: 8.222831614237718\n\nQuestion: {question}\n', template_format='f-string', validate_template=True), **kwargs: Any) → langchain.chains.llm_math.base.LLMMathChain[source]# pydantic model langchain.chains.LLMRequestsChain[source]# Chain that hits a URL and then uses an LLM to parse results. Validators raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field llm_chain: LLMChain [Required]# field requests_wrapper: TextRequestsWrapper [Optional]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-27
field requests_wrapper: TextRequestsWrapper [Optional]# field text_length: int = 8000# pydantic model langchain.chains.LLMSummarizationCheckerChain[source]# Chain for question-answering with self-verification. Example from langchain import OpenAI, LLMSummarizationCheckerChain llm = OpenAI(temperature=0.0) checker_chain = LLMSummarizationCheckerChain.from_llm(llm) Validators raise_deprecation » all fields raise_deprecation » all fields set_verbose » verbose field are_all_true_prompt: PromptTemplate = PromptTemplate(input_variables=['checked_assertions'], output_parser=None, partial_variables={}, template='Below are some assertions that have been fact checked and are labeled as true or false.\n\nIf all of the assertions are true, return "True". If any of the assertions are false, return "False".\n\nHere are some examples:\n===\n\nChecked Assertions: """\n- The sky is red: False\n- Water is made of lava: False\n- The sun is a star: True\n"""\nResult: False\n\n===\n\nChecked Assertions: """\n- The sky is blue: True\n- Water is wet: True\n- The sun is a star: True\n"""\nResult: True\n\n===\n\nChecked Assertions: """\n- The sky is blue - True\n- Water is made of lava- False\n- The sun is a star - True\n"""\nResult: False\n\n===\n\nChecked Assertions:"""\n{checked_assertions}\n"""\nResult:', template_format='f-string', validate_template=True)# [Deprecated]
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-28
[Deprecated] field check_assertions_prompt: PromptTemplate = PromptTemplate(input_variables=['assertions'], output_parser=None, partial_variables={}, template='You are an expert fact checker. You have been hired by a major news organization to fact check a very important story.\n\nHere is a bullet point list of facts:\n"""\n{assertions}\n"""\n\nFor each fact, determine whether it is true or false about the subject. If you are unable to determine whether the fact is true or false, output "Undetermined".\nIf the fact is false, explain why.\n\n', template_format='f-string', validate_template=True)# [Deprecated] field create_assertions_prompt: PromptTemplate = PromptTemplate(input_variables=['summary'], output_parser=None, partial_variables={}, template='Given some text, extract a list of facts from the text.\n\nFormat your output as a bulleted list.\n\nText:\n"""\n{summary}\n"""\n\nFacts:', template_format='f-string', validate_template=True)# [Deprecated] field llm: Optional[BaseLanguageModel] = None# [Deprecated] LLM wrapper to use. field max_checks: int = 2# Maximum number of times to check the assertions. Default to double-checking.
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-29
Maximum number of times to check the assertions. Default to double-checking. field revised_summary_prompt: PromptTemplate = PromptTemplate(input_variables=['checked_assertions', 'summary'], output_parser=None, partial_variables={}, template='Below are some assertions that have been fact checked and are labeled as true or false. If the answer is false, a suggestion is given for a correction.\n\nChecked Assertions:\n"""\n{checked_assertions}\n"""\n\nOriginal Summary:\n"""\n{summary}\n"""\n\nUsing these checked assertions, rewrite the original summary to be completely true.\n\nThe output should have the same structure and formatting as the original summary.\n\nSummary:', template_format='f-string', validate_template=True)# [Deprecated] field sequential_chain: SequentialChain [Required]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-30
classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, create_assertions_prompt: langchain.prompts.prompt.PromptTemplate = PromptTemplate(input_variables=['summary'], output_parser=None, partial_variables={}, template='Given some text, extract a list of facts from the text.\n\nFormat your output as a bulleted list.\n\nText:\n"""\n{summary}\n"""\n\nFacts:', template_format='f-string', validate_template=True), check_assertions_prompt: langchain.prompts.prompt.PromptTemplate = PromptTemplate(input_variables=['assertions'], output_parser=None, partial_variables={}, template='You are an expert fact checker. You have been hired by a major news organization to fact check a very important story.\n\nHere is a bullet point list of facts:\n"""\n{assertions}\n"""\n\nFor each fact, determine whether it is true or false about the subject. If you are unable to determine whether the fact is true or false, output "Undetermined".\nIf the fact is false, explain why.\n\n', template_format='f-string', validate_template=True), revised_summary_prompt: langchain.prompts.prompt.PromptTemplate = PromptTemplate(input_variables=['checked_assertions', 'summary'], output_parser=None, partial_variables={}, template='Below are some assertions that have been fact checked and are labeled as true or false. If the answer is false, a suggestion is given for a correction.\n\nChecked Assertions:\n"""\n{checked_assertions}\n"""\n\nOriginal Summary:\n"""\n{summary}\n"""\n\nUsing these checked assertions, rewrite the original summary to be completely true.\n\nThe output should have the same structure and formatting as the original summary.\n\nSummary:', template_format='f-string', validate_template=True), are_all_true_prompt: langchain.prompts.prompt.PromptTemplate =
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-31
validate_template=True), are_all_true_prompt: langchain.prompts.prompt.PromptTemplate = PromptTemplate(input_variables=['checked_assertions'], output_parser=None, partial_variables={}, template='Below are some assertions that have been fact checked and are labeled as true or false.\n\nIf all of the assertions are true, return "True". If any of the assertions are false, return "False".\n\nHere are some examples:\n===\n\nChecked Assertions: """\n- The sky is red: False\n- Water is made of lava: False\n- The sun is a star: True\n"""\nResult: False\n\n===\n\nChecked Assertions: """\n- The sky is blue: True\n- Water is wet: True\n- The sun is a star: True\n"""\nResult: True\n\n===\n\nChecked Assertions: """\n- The sky is blue - True\n- Water is made of lava- False\n- The sun is a star - True\n"""\nResult: False\n\n===\n\nChecked Assertions:"""\n{checked_assertions}\n"""\nResult:', template_format='f-string', validate_template=True), verbose: bool = False, **kwargs: Any) → langchain.chains.llm_summarization_checker.base.LLMSummarizationCheckerChain[source]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-32
pydantic model langchain.chains.MapReduceChain[source]# Map-reduce chain. Validators raise_deprecation » all fields set_verbose » verbose field combine_documents_chain: BaseCombineDocumentsChain [Required]# Chain to use to combine documents. field text_splitter: TextSplitter [Required]# Text splitter to use. classmethod from_params(llm: langchain.base_language.BaseLanguageModel, prompt: langchain.prompts.base.BasePromptTemplate, text_splitter: langchain.text_splitter.TextSplitter, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → langchain.chains.mapreduce.MapReduceChain[source]# Construct a map-reduce chain that uses the chain for map and reduce. pydantic model langchain.chains.OpenAIModerationChain[source]# Pass input through a moderation endpoint. To use, you should have the openai python package installed, and the environment variable OPENAI_API_KEY set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example from langchain.chains import OpenAIModerationChain moderation = OpenAIModerationChain() Validators raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field error: bool = False# Whether or not to error if bad content was found. field model_name: Optional[str] = None# Moderation model name to use. field openai_api_key: Optional[str] = None# field openai_organization: Optional[str] = None# pydantic model langchain.chains.OpenAPIEndpointChain[source]# Chain interacts with an OpenAPI endpoint using natural language. Validators
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-33
Chain interacts with an OpenAPI endpoint using natural language. Validators raise_deprecation » all fields set_verbose » verbose field api_operation: APIOperation [Required]# field api_request_chain: LLMChain [Required]# field api_response_chain: Optional[LLMChain] = None# field param_mapping: _ParamMapping [Required]# field requests: Requests [Optional]# field return_intermediate_steps: bool = False# deserialize_json_input(serialized_args: str) → dict[source]# Use the serialized typescript dictionary. Resolve the path, query params dict, and optional requestBody dict. classmethod from_api_operation(operation: langchain.tools.openapi.utils.api_models.APIOperation, llm: langchain.base_language.BaseLanguageModel, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, return_intermediate_steps: bool = False, raw_response: bool = False, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → langchain.chains.api.openapi.chain.OpenAPIEndpointChain[source]# Create an OpenAPIEndpointChain from an operation and a spec. classmethod from_url_and_method(spec_url: str, path: str, method: str, llm: langchain.base_language.BaseLanguageModel, requests: Optional[langchain.requests.Requests] = None, return_intermediate_steps: bool = False, **kwargs: Any) → langchain.chains.api.openapi.chain.OpenAPIEndpointChain[source]# Create an OpenAPIEndpoint from a spec at the specified url. pydantic model langchain.chains.PALChain[source]# Implements Program-Aided Language Models. Validators raise_deprecation » all fields raise_deprecation » all fields set_verbose » verbose
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-34
raise_deprecation » all fields raise_deprecation » all fields set_verbose » verbose field get_answer_expr: str = 'print(solution())'# field llm: Optional[BaseLanguageModel] = None# [Deprecated] field llm_chain: LLMChain [Required]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-35
field prompt: BasePromptTemplate = PromptTemplate(input_variables=['question'], output_parser=None, partial_variables={}, template='Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\n\n# solution in Python:\n\n\ndef solution():\n    """Olivia has $23. She bought five bagels for $3 each. How much money does she have left?"""\n    money_initial = 23\n    bagels = 5\n    bagel_cost = 3\n    money_spent = bagels * bagel_cost\n    money_left = money_initial - money_spent\n    result = money_left\n    return result\n\n\n\n\n\nQ: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\n\n# solution in Python:\n\n\ndef solution():\n    """Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?"""\n    golf_balls_initial = 58\n    golf_balls_lost_tuesday = 23\n    golf_balls_lost_wednesday = 2\n    golf_balls_left = golf_balls_initial - golf_balls_lost_tuesday - golf_balls_lost_wednesday\n    result = golf_balls_left\n    return result\n\n\n\n\n\nQ: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\n\n# solution in Python:\n\n\ndef solution():\n    """There were nine computers in the server room. Five more computers were installed
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-36
solution():\n    """There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?"""\n    computers_initial = 9\n    computers_per_day = 5\n    num_days = 4  # 4 days between monday and thursday\n    computers_added = computers_per_day * num_days\n    computers_total = computers_initial + computers_added\n    result = computers_total\n    return result\n\n\n\n\n\nQ: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\n\n# solution in Python:\n\n\ndef solution():\n    """Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?"""\n    toys_initial = 5\n    mom_toys = 2\n    dad_toys = 2\n    total_received = mom_toys + dad_toys\n    total_toys = toys_initial + total_received\n    result = total_toys\n    return result\n\n\n\n\n\nQ: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\n\n# solution in Python:\n\n\ndef solution():\n    """Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?"""\n    jason_lollipops_initial = 20\n    jason_lollipops_after = 12\n    denny_lollipops = jason_lollipops_initial -
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-37
= 12\n    denny_lollipops = jason_lollipops_initial - jason_lollipops_after\n    result = denny_lollipops\n    return result\n\n\n\n\n\nQ: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\n\n# solution in Python:\n\n\ndef solution():\n    """Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?"""\n    leah_chocolates = 32\n    sister_chocolates = 42\n    total_chocolates = leah_chocolates + sister_chocolates\n    chocolates_eaten = 35\n    chocolates_left = total_chocolates - chocolates_eaten\n    result = chocolates_left\n    return result\n\n\n\n\n\nQ: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\n\n# solution in Python:\n\n\ndef solution():\n    """If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?"""\n    cars_initial = 3\n    cars_arrived = 2\n    total_cars = cars_initial + cars_arrived\n    result = total_cars\n    return result\n\n\n\n\n\nQ: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\n\n# solution in Python:\n\n\ndef solution():\n    """There are 15 trees in the grove. Grove workers will plant trees in the grove today. After
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-38
15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?"""\n    trees_initial = 15\n    trees_after = 21\n    trees_added = trees_after - trees_initial\n    result = trees_added\n    return result\n\n\n\n\n\nQ: {question}\n\n# solution in Python:\n\n\n', template_format='f-string', validate_template=True)#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-39
[Deprecated] field python_globals: Optional[Dict[str, Any]] = None# field python_locals: Optional[Dict[str, Any]] = None# field return_intermediate_steps: bool = False# field stop: str = '\n\n'# classmethod from_colored_object_prompt(llm: langchain.base_language.BaseLanguageModel, **kwargs: Any) → langchain.chains.pal.base.PALChain[source]# Load PAL from colored object prompt. classmethod from_math_prompt(llm: langchain.base_language.BaseLanguageModel, **kwargs: Any) → langchain.chains.pal.base.PALChain[source]# Load PAL from math prompt. pydantic model langchain.chains.QAGenerationChain[source]# Validators raise_deprecation » all fields set_verbose » verbose field input_key: str = 'text'# field k: Optional[int] = None# field llm_chain: LLMChain [Required]# field output_key: str = 'questions'# field text_splitter: TextSplitter = <langchain.text_splitter.RecursiveCharacterTextSplitter object># classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, prompt: Optional[langchain.prompts.base.BasePromptTemplate] = None, **kwargs: Any) → langchain.chains.qa_generation.base.QAGenerationChain[source]# property input_keys: List[str]# Input keys this chain expects. property output_keys: List[str]# Output keys this chain expects. pydantic model langchain.chains.QAWithSourcesChain[source]# Question answering with sources over documents. Validators raise_deprecation » all fields set_verbose » verbose validate_naming » all fields pydantic model langchain.chains.RetrievalQA[source]# Chain for question-answering against an index. Example
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-40
Chain for question-answering against an index. Example from langchain.llms import OpenAI from langchain.chains import RetrievalQA from langchain.faiss import FAISS from langchain.vectorstores.base import VectorStoreRetriever retriever = VectorStoreRetriever(vectorstore=FAISS(...)) retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=retriever) Validators raise_deprecation » all fields set_verbose » verbose field retriever: BaseRetriever [Required]# pydantic model langchain.chains.RetrievalQAWithSourcesChain[source]# Question-answering with sources over an index. Validators raise_deprecation » all fields set_verbose » verbose validate_naming » all fields field max_tokens_limit: int = 3375# Restrict the docs to return from store based on tokens, enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true field reduce_k_below_max_tokens: bool = False# Reduce the number of results to return from store based on tokens limit field retriever: langchain.schema.BaseRetriever [Required]# Index to connect to. pydantic model langchain.chains.SQLDatabaseChain[source]# Chain for interacting with SQL Database. Example from langchain import SQLDatabaseChain, OpenAI, SQLDatabase db = SQLDatabase(...) db_chain = SQLDatabaseChain.from_llm(OpenAI(), db) Validators raise_deprecation » all fields raise_deprecation » all fields set_verbose » verbose field database: SQLDatabase [Required]# SQL Database to connect to. field llm: Optional[BaseLanguageModel] = None# [Deprecated] LLM wrapper to use.
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-41
[Deprecated] LLM wrapper to use. field llm_chain: LLMChain [Required]# field prompt: Optional[BasePromptTemplate] = None# [Deprecated] Prompt to use to translate natural language to SQL. field query_checker_prompt: Optional[BasePromptTemplate] = None# The prompt template that should be used by the query checker field return_direct: bool = False# Whether or not to return the result of querying the SQL table directly. field return_intermediate_steps: bool = False# Whether or not to return the intermediate steps along with the final answer. field top_k: int = 5# Number of results to return from the query field use_query_checker: bool = False# Whether or not the query checker tool should be used to attempt to fix the initial SQL from the LLM. classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, db: langchain.sql_database.SQLDatabase, prompt: Optional[langchain.prompts.base.BasePromptTemplate] = None, **kwargs: Any) → langchain.chains.sql_database.base.SQLDatabaseChain[source]# pydantic model langchain.chains.SQLDatabaseSequentialChain[source]# Chain for querying SQL database that is a sequential chain. The chain is as follows: 1. Based on the query, determine which tables to use. 2. Based on those tables, call the normal SQL database chain. This is useful in cases where the number of tables in the database is large. Validators raise_deprecation » all fields set_verbose » verbose field decider_chain: LLMChain [Required]# field return_intermediate_steps: bool = False# field sql_chain: SQLDatabaseChain [Required]#
https://python.langchain.com/en/latest/reference/modules/chains.html
8421cefce063-42
classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, database: langchain.sql_database.SQLDatabase, query_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['input', 'table_info', 'dialect', 'top_k'], output_parser=None, partial_variables={}, template='Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database.\n\nNever query for all the columns from a specific table, only ask for a the few relevant columns given the question.\n\nPay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.\n\nUse the following format:\n\nQuestion: Question here\nSQLQuery: SQL Query to run\nSQLResult: Result of the SQLQuery\nAnswer: Final answer here\n\nOnly use the following tables:\n{table_info}\n\nQuestion: {input}', template_format='f-string', validate_template=True), decider_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['query', 'table_names'], output_parser=CommaSeparatedListOutputParser(), partial_variables={}, template='Given the below input question and list of potential tables, output a comma separated list of the table names that may be necessary to answer this question.\n\nQuestion: {query}\n\nTable Names: {table_names}\n\nRelevant Table Names:', template_format='f-string', validate_template=True), **kwargs: Any) →
https://python.langchain.com/en/latest/reference/modules/chains.html