id
stringlengths 14
16
| text
stringlengths 29
2.73k
| source
stringlengths 49
117
|
---|---|---|
2cbd4a2024d3-0
|
Source code for langchain.agents.conversational.base
"""An agent designed to hold a conversation in addition to using tools."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence
from pydantic import Field
from langchain.agents.agent import Agent, AgentOutputParser
from langchain.agents.agent_types import AgentType
from langchain.agents.conversational.output_parser import ConvoOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain.agents.utils import validate_tools_single_input
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.tools.base import BaseTool
[docs]class ConversationalAgent(Agent):
"""An agent designed to hold a conversation in addition to using tools."""
ai_prefix: str = "AI"
output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser)
@classmethod
def _get_default_output_parser(
cls, ai_prefix: str = "AI", **kwargs: Any
) -> AgentOutputParser:
return ConvoOutputParser(ai_prefix=ai_prefix)
@property
def _agent_type(self) -> str:
"""Return Identifier of agent type."""
return AgentType.CONVERSATIONAL_REACT_DESCRIPTION
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
[docs] @classmethod
def create_prompt(
cls,
|
https://python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
|
2cbd4a2024d3-1
|
[docs] @classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
ai_prefix: str = "AI",
human_prefix: str = "Human",
input_variables: Optional[List[str]] = None,
) -> PromptTemplate:
"""Create prompt in the style of the zero shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
ai_prefix: String to use before AI output.
human_prefix: String to use before human output.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = "\n".join(
[f"> {tool.name}: {tool.description}" for tool in tools]
)
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(
tool_names=tool_names, ai_prefix=ai_prefix, human_prefix=human_prefix
)
template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
if input_variables is None:
input_variables = ["input", "chat_history", "agent_scratchpad"]
return PromptTemplate(template=template, input_variables=input_variables)
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
super()._validate_tools(tools)
validate_tools_single_input(cls.__name__, tools)
|
https://python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
|
2cbd4a2024d3-2
|
validate_tools_single_input(cls.__name__, tools)
[docs] @classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
output_parser: Optional[AgentOutputParser] = None,
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
ai_prefix: str = "AI",
human_prefix: str = "Human",
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
ai_prefix=ai_prefix,
human_prefix=human_prefix,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser(
ai_prefix=ai_prefix
)
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
ai_prefix=ai_prefix,
output_parser=_output_parser,
**kwargs,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
|
f2ce45d778cf-0
|
.md
.pdf
Tutorials
Contents
DeepLearning.AI course
Handbook
Tutorials
Tutorials#
⛓ icon marks a new addition [last update 2023-05-15]
DeepLearning.AI course#
⛓LangChain for LLM Application Development by Harrison Chase presented by Andrew Ng
Handbook#
LangChain AI Handbook By James Briggs and Francisco Ingham
Tutorials#
LangChain Tutorials by Edrick:
⛓ LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF
LangChain Crash Course: Build an AutoGPT app in 25 minutes by Nicholas Renotte
LangChain Crash Course - Build apps with language models by Patrick Loeber
LangChain Explained in 13 Minutes | QuickStart Tutorial for Beginners by Rabbitmetrics
#
LangChain for Gen AI and LLMs by James Briggs:
#1 Getting Started with GPT-3 vs. Open Source LLMs
#2 Prompt Templates for GPT 3.5 and other LLMs
#3 LLM Chains using GPT 3.5 and other LLMs
#4 Chatbot Memory for Chat-GPT, Davinci + other LLMs
#5 Chat with OpenAI in LangChain
⛓ #6 Fixing LLM Hallucinations with Retrieval Augmentation in LangChain
⛓ #7 LangChain Agents Deep Dive with GPT 3.5
⛓ #8 Create Custom Tools for Chatbots in LangChain
⛓ #9 Build Conversational Agents with Vector DBs
#
LangChain 101 by Data Independent:
What Is LangChain? - LangChain + ChatGPT Overview
Quickstart Guide
Beginner Guide To 7 Essential Concepts
OpenAI + Wolfram Alpha
Ask Questions On Your Custom (or Private) Files
|
https://python.langchain.com/en/latest/getting_started/tutorials.html
|
f2ce45d778cf-1
|
OpenAI + Wolfram Alpha
Ask Questions On Your Custom (or Private) Files
Connect Google Drive Files To OpenAI
YouTube Transcripts + OpenAI
Question A 300 Page Book (w/ OpenAI + Pinecone)
Workaround OpenAI's Token Limit With Chain Types
Build Your Own OpenAI + LangChain Web App in 23 Minutes
Working With The New ChatGPT API
OpenAI + LangChain Wrote Me 100 Custom Sales Emails
Structured Output From OpenAI (Clean Dirty Data)
Connect OpenAI To +5,000 Tools (LangChain + Zapier)
Use LLMs To Extract Data From Text (Expert Mode)
⛓ Extract Insights From Interview Transcripts Using LLMs
⛓ 5 Levels Of LLM Summarizing: Novice to Expert
#
LangChain How to and guides by Sam Witteveen:
LangChain Basics - LLMs & PromptTemplates with Colab
LangChain Basics - Tools and Chains
ChatGPT API Announcement & Code Walkthrough with LangChain
Conversations with Memory (explanation & code walkthrough)
Chat with Flan20B
Using Hugging Face Models locally (code walkthrough)
PAL : Program-aided Language Models with LangChain code
Building a Summarization System with LangChain and GPT-3 - Part 1
Building a Summarization System with LangChain and GPT-3 - Part 2
Microsoft’s Visual ChatGPT using LangChain
LangChain Agents - Joining Tools and Chains with Decisions
Comparing LLMs with LangChain
Using Constitutional AI in LangChain
Talking to Alpaca with LangChain - Creating an Alpaca Chatbot
Talk to your CSV & Excel with LangChain
BabyAGI: Discover the Power of Task-Driven Autonomous Agents!
Improve your BabyAGI with LangChain
|
https://python.langchain.com/en/latest/getting_started/tutorials.html
|
f2ce45d778cf-2
|
Improve your BabyAGI with LangChain
⛓ Master PDF Chat with LangChain - Your essential guide to queries on documents
⛓ Using LangChain with DuckDuckGO Wikipedia & PythonREPL Tools
⛓ Building Custom Tools and Agents with LangChain (gpt-3.5-turbo)
⛓ LangChain Retrieval QA Over Multiple Files with ChromaDB
⛓ LangChain Retrieval QA with Instructor Embeddings & ChromaDB for PDFs
⛓ LangChain + Retrieval Local LLMs for Retrieval QA - No OpenAI!!!
#
LangChain by Prompt Engineering:
LangChain Crash Course — All You Need to Know to Build Powerful Apps with LLMs
Working with MULTIPLE PDF Files in LangChain: ChatGPT for your Data
ChatGPT for YOUR OWN PDF files with LangChain
Talk to YOUR DATA without OpenAI APIs: LangChain
⛓️ CHATGPT For WEBSITES: Custom ChatBOT
#
LangChain by Chat with data
LangChain Beginner’s Tutorial for Typescript/Javascript
GPT-4 Tutorial: How to Chat With Multiple PDF Files (~1000 pages of Tesla’s 10-K Annual Reports)
GPT-4 & LangChain Tutorial: How to Chat With A 56-Page PDF Document (w/Pinecone)
⛓ LangChain & Supabase Tutorial: How to Build a ChatGPT Chatbot For Your Website
#
Get SH*T Done with Prompt Engineering and LangChain by Venelin Valkov
Getting Started with LangChain: Load Custom Data, Run OpenAI Models, Embeddings and ChatGPT
Loaders, Indexes & Vectorstores in LangChain: Question Answering on PDF files with ChatGPT
LangChain Models: ChatGPT, Flan Alpaca, OpenAI Embeddings, Prompt Templates & Streaming
|
https://python.langchain.com/en/latest/getting_started/tutorials.html
|
f2ce45d778cf-3
|
LangChain Chains: Use ChatGPT to Build Conversational Agents, Summaries and Q&A on Text With LLMs
Analyze Custom CSV Data with GPT-4 using Langchain
⛓ Build ChatGPT Chatbots with LangChain Memory: Understanding and Implementing Memory in Conversations
⛓ icon marks a new addition [last update 2023-05-15]
previous
Concepts
next
Models
Contents
DeepLearning.AI course
Handbook
Tutorials
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/getting_started/tutorials.html
|
6817701f1177-0
|
.md
.pdf
Concepts
Contents
Chain of Thought
Action Plan Generation
ReAct
Self-ask
Prompt Chaining
Memetic Proxy
Self Consistency
Inception
MemPrompt
Concepts#
These are concepts and terminology commonly used when developing LLM applications.
It contains reference to external papers or sources where the concept was first introduced,
as well as to places in LangChain where the concept is used.
Chain of Thought#
Chain of Thought (CoT) is a prompting technique used to encourage the model to generate a series of intermediate reasoning steps.
A less formal way to induce this behavior is to include “Let’s think step-by-step” in the prompt.
Chain-of-Thought Paper
Step-by-Step Paper
Action Plan Generation#
Action Plan Generation is a prompting technique that uses a language model to generate actions to take.
The results of these actions can then be fed back into the language model to generate a subsequent action.
WebGPT Paper
SayCan Paper
ReAct#
ReAct is a prompting technique that combines Chain-of-Thought prompting with action plan generation.
This induces the model to think about what action to take, then take it.
Paper
LangChain Example
Self-ask#
Self-ask is a prompting method that builds on top of chain-of-thought prompting.
In this method, the model explicitly asks itself follow-up questions, which are then answered by an external search engine.
Paper
LangChain Example
Prompt Chaining#
Prompt Chaining is combining multiple LLM calls, with the output of one-step being the input to the next.
PromptChainer Paper
Language Model Cascades
ICE Primer Book
Socratic Models
Memetic Proxy#
Memetic Proxy is encouraging the LLM
to respond in a certain way framing the discussion in a context that the model knows of and that
|
https://python.langchain.com/en/latest/getting_started/concepts.html
|
6817701f1177-1
|
to respond in a certain way framing the discussion in a context that the model knows of and that
will result in that type of response.
For example, as a conversation between a student and a teacher.
Paper
Self Consistency#
Self Consistency is a decoding strategy that samples a diverse set of reasoning paths and then selects the most consistent answer.
Is most effective when combined with Chain-of-thought prompting.
Paper
Inception#
Inception is also called First Person Instruction.
It is encouraging the model to think a certain way by including the start of the model’s response in the prompt.
Example
MemPrompt#
MemPrompt maintains a memory of errors and user feedback, and uses them to prevent repetition of mistakes.
Paper
previous
Quickstart Guide
next
Tutorials
Contents
Chain of Thought
Action Plan Generation
ReAct
Self-ask
Prompt Chaining
Memetic Proxy
Self Consistency
Inception
MemPrompt
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/getting_started/concepts.html
|
6139d3f8e5ca-0
|
.md
.pdf
Quickstart Guide
Contents
Installation
Environment Setup
Building a Language Model Application: LLMs
LLMs: Get predictions from a language model
Prompt Templates: Manage prompts for LLMs
Chains: Combine LLMs and prompts in multi-step workflows
Agents: Dynamically Call Chains Based on User Input
Memory: Add State to Chains and Agents
Building a Language Model Application: Chat Models
Get Message Completions from a Chat Model
Chat Prompt Templates
Chains with Chat Models
Agents with Chat Models
Memory: Add State to Chains and Agents
Quickstart Guide#
This tutorial gives you a quick walkthrough about building an end-to-end language model application with LangChain.
Installation#
To get started, install LangChain with the following command:
pip install langchain
# or
conda install langchain -c conda-forge
Environment Setup#
Using LangChain will usually require integrations with one or more model providers, data stores, apis, etc.
For this example, we will be using OpenAI’s APIs, so we will first need to install their SDK:
pip install openai
We will then need to set the environment variable in the terminal.
export OPENAI_API_KEY="..."
Alternatively, you could do this from inside the Jupyter notebook (or Python script):
import os
os.environ["OPENAI_API_KEY"] = "..."
If you want to set the API key dynamically, you can use the openai_api_key parameter when initiating OpenAI class—for instance, each user’s API key.
from langchain.llms import OpenAI
llm = OpenAI(openai_api_key="OPENAI_API_KEY")
Building a Language Model Application: LLMs#
Now that we have installed LangChain and set up our environment, we can start building our language model application.
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
6139d3f8e5ca-1
|
LangChain provides many modules that can be used to build language model applications. Modules can be combined to create more complex applications, or be used individually for simple applications.
LLMs: Get predictions from a language model#
The most basic building block of LangChain is calling an LLM on some input.
Let’s walk through a simple example of how to do this.
For this purpose, let’s pretend we are building a service that generates a company name based on what the company makes.
In order to do this, we first need to import the LLM wrapper.
from langchain.llms import OpenAI
We can then initialize the wrapper with any arguments.
In this example, we probably want the outputs to be MORE random, so we’ll initialize it with a HIGH temperature.
llm = OpenAI(temperature=0.9)
We can now call it on some input!
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
Feetful of Fun
For more details on how to use LLMs within LangChain, see the LLM getting started guide.
Prompt Templates: Manage prompts for LLMs#
Calling an LLM is a great first step, but it’s just the beginning.
Normally when you use an LLM in an application, you are not sending user input directly to the LLM.
Instead, you are probably taking user input and constructing a prompt, and then sending that to the LLM.
For example, in the previous example, the text we passed in was hardcoded to ask for a name for a company that made colorful socks.
In this imaginary service, what we would want to do is take only the user input describing what the company does, and then format the prompt with that information.
This is easy to do with LangChain!
First lets define the prompt template:
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
6139d3f8e5ca-2
|
This is easy to do with LangChain!
First lets define the prompt template:
from langchain.prompts import PromptTemplate
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
Let’s now see how this works! We can call the .format method to format it.
print(prompt.format(product="colorful socks"))
What is a good name for a company that makes colorful socks?
For more details, check out the getting started guide for prompts.
Chains: Combine LLMs and prompts in multi-step workflows#
Up until now, we’ve worked with the PromptTemplate and LLM primitives by themselves. But of course, a real application is not just one primitive, but rather a combination of them.
A chain in LangChain is made up of links, which can be either primitives like LLMs or other chains.
The most core type of chain is an LLMChain, which consists of a PromptTemplate and an LLM.
Extending the previous example, we can construct an LLMChain which takes user input, formats it with a PromptTemplate, and then passes the formatted response to an LLM.
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
We can now create a very simple chain that will take user input, format the prompt with it, and then send it to the LLM:
from langchain.chains import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)
Now we can run that chain only specifying the product!
chain.run("colorful socks")
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
6139d3f8e5ca-3
|
Now we can run that chain only specifying the product!
chain.run("colorful socks")
# -> '\n\nSocktastic!'
There we go! There’s the first chain - an LLM Chain.
This is one of the simpler types of chains, but understanding how it works will set you up well for working with more complex chains.
For more details, check out the getting started guide for chains.
Agents: Dynamically Call Chains Based on User Input#
So far the chains we’ve looked at run in a predetermined order.
Agents no longer do: they use an LLM to determine which actions to take and in what order. An action can either be using a tool and observing its output, or returning to the user.
When used correctly agents can be extremely powerful. In this tutorial, we show you how to easily use agents through the simplest, highest level API.
In order to load agents, you should understand the following concepts:
Tool: A function that performs a specific duty. This can be things like: Google Search, Database lookup, Python REPL, other chains. The interface for a tool is currently a function that is expected to have a string as an input, with a string as an output.
LLM: The language model powering the agent.
Agent: The agent to use. This should be a string that references a support agent class. Because this notebook focuses on the simplest, highest level API, this only covers using the standard supported agents. If you want to implement a custom agent, see the documentation for custom agents (coming soon).
Agents: For a list of supported agents and their specifications, see here.
Tools: For a list of predefined tools and their specifications, see here.
For this example, you will also need to install the SerpAPI Python package.
pip install google-search-results
And set the appropriate environment variables.
import os
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
6139d3f8e5ca-4
|
pip install google-search-results
And set the appropriate environment variables.
import os
os.environ["SERPAPI_API_KEY"] = "..."
Now we can get started!
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import OpenAI
# First, let's load the language model we're going to use to control the agent.
llm = OpenAI(temperature=0)
# Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in.
tools = load_tools(["serpapi", "llm-math"], llm=llm)
# Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use.
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
# Now let's test it out!
agent.run("What was the high temperature in SF yesterday in Fahrenheit? What is that number raised to the .023 power?")
> Entering new AgentExecutor chain...
I need to find the temperature first, then use the calculator to raise it to the .023 power.
Action: Search
Action Input: "High temperature in SF yesterday"
Observation: San Francisco Temperature Yesterday. Maximum temperature yesterday: 57 °F (at 1:56 pm) Minimum temperature yesterday: 49 °F (at 1:56 am) Average temperature ...
Thought: I now have the temperature, so I can use the calculator to raise it to the .023 power.
Action: Calculator
Action Input: 57^.023
Observation: Answer: 1.0974509573251117
Thought: I now know the final answer
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
6139d3f8e5ca-5
|
Thought: I now know the final answer
Final Answer: The high temperature in SF yesterday in Fahrenheit raised to the .023 power is 1.0974509573251117.
> Finished chain.
Memory: Add State to Chains and Agents#
So far, all the chains and agents we’ve gone through have been stateless. But often, you may want a chain or agent to have some concept of “memory” so that it may remember information about its previous interactions. The clearest and simple example of this is when designing a chatbot - you want it to remember previous messages so it can use context from that to have a better conversation. This would be a type of “short-term memory”. On the more complex side, you could imagine a chain/agent remembering key pieces of information over time - this would be a form of “long-term memory”. For more concrete ideas on the latter, see this awesome paper.
LangChain provides several specially created chains just for this purpose. This notebook walks through using one of those chains (the ConversationChain) with two different types of memory.
By default, the ConversationChain has a simple type of memory that remembers all previous inputs/outputs and adds them to the context that is passed. Let’s take a look at using this chain (setting verbose=True so we can see the prompt).
from langchain import OpenAI, ConversationChain
llm = OpenAI(temperature=0)
conversation = ConversationChain(llm=llm, verbose=True)
output = conversation.predict(input="Hi there!")
print(output)
> Entering new chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI:
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
6139d3f8e5ca-6
|
Current conversation:
Human: Hi there!
AI:
> Finished chain.
' Hello! How are you today?'
output = conversation.predict(input="I'm doing well! Just having a conversation with an AI.")
print(output)
> Entering new chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI: Hello! How are you today?
Human: I'm doing well! Just having a conversation with an AI.
AI:
> Finished chain.
" That's great! What would you like to talk about?"
Building a Language Model Application: Chat Models#
Similarly, you can use chat models instead of LLMs. Chat models are a variation on language models. While chat models use language models under the hood, the interface they expose is a bit different: rather than expose a “text in, text out” API, they expose an interface where “chat messages” are the inputs and outputs.
Chat model APIs are fairly new, so we are still figuring out the correct abstractions.
Get Message Completions from a Chat Model#
You can get chat completions by passing one or more messages to the chat model. The response will be a message. The types of messages currently supported in LangChain are AIMessage, HumanMessage, SystemMessage, and ChatMessage – ChatMessage takes in an arbitrary role parameter. Most of the time, you’ll just be dealing with HumanMessage, AIMessage, and SystemMessage.
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
6139d3f8e5ca-7
|
AIMessage,
HumanMessage,
SystemMessage
)
chat = ChatOpenAI(temperature=0)
You can get completions by passing in a single message.
chat([HumanMessage(content="Translate this sentence from English to French. I love programming.")])
# -> AIMessage(content="J'aime programmer.", additional_kwargs={})
You can also pass in multiple messages for OpenAI’s gpt-3.5-turbo and gpt-4 models.
messages = [
SystemMessage(content="You are a helpful assistant that translates English to French."),
HumanMessage(content="I love programming.")
]
chat(messages)
# -> AIMessage(content="J'aime programmer.", additional_kwargs={})
You can go one step further and generate completions for multiple sets of messages using generate. This returns an LLMResult with an additional message parameter:
batch_messages = [
[
SystemMessage(content="You are a helpful assistant that translates English to French."),
HumanMessage(content="I love programming.")
],
[
SystemMessage(content="You are a helpful assistant that translates English to French."),
HumanMessage(content="I love artificial intelligence.")
],
]
result = chat.generate(batch_messages)
result
# -> LLMResult(generations=[[ChatGeneration(text="J'aime programmer.", generation_info=None, message=AIMessage(content="J'aime programmer.", additional_kwargs={}))], [ChatGeneration(text="J'aime l'intelligence artificielle.", generation_info=None, message=AIMessage(content="J'aime l'intelligence artificielle.", additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 57, 'completion_tokens': 20, 'total_tokens': 77}})
You can recover things like token usage from this LLMResult:
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
6139d3f8e5ca-8
|
You can recover things like token usage from this LLMResult:
result.llm_output['token_usage']
# -> {'prompt_tokens': 57, 'completion_tokens': 20, 'total_tokens': 77}
Chat Prompt Templates#
Similar to LLMs, you can make use of templating by using a MessagePromptTemplate. You can build a ChatPromptTemplate from one or more MessagePromptTemplates. You can use ChatPromptTemplate’s format_prompt – this returns a PromptValue, which you can convert to a string or Message object, depending on whether you want to use the formatted value as input to an llm or chat model.
For convenience, there is a from_template method exposed on the template. If you were to use this template, this is what it would look like:
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
chat = ChatOpenAI(temperature=0)
template = "You are a helpful assistant that translates {input_language} to {output_language}."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
# get a chat completion from the formatted messages
chat(chat_prompt.format_prompt(input_language="English", output_language="French", text="I love programming.").to_messages())
# -> AIMessage(content="J'aime programmer.", additional_kwargs={})
Chains with Chat Models#
The LLMChain discussed in the above section can be used with chat models as well:
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain
from langchain.prompts.chat import (
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
6139d3f8e5ca-9
|
from langchain import LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
chat = ChatOpenAI(temperature=0)
template = "You are a helpful assistant that translates {input_language} to {output_language}."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
chain = LLMChain(llm=chat, prompt=chat_prompt)
chain.run(input_language="English", output_language="French", text="I love programming.")
# -> "J'aime programmer."
Agents with Chat Models#
Agents can also be used with chat models, you can initialize one using AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION as the agent type.
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
# First, let's load the language model we're going to use to control the agent.
chat = ChatOpenAI(temperature=0)
# Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in.
llm = OpenAI(temperature=0)
tools = load_tools(["serpapi", "llm-math"], llm=llm)
# Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use.
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
6139d3f8e5ca-10
|
agent = initialize_agent(tools, chat, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
# Now let's test it out!
agent.run("Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?")
> Entering new AgentExecutor chain...
Thought: I need to use a search engine to find Olivia Wilde's boyfriend and a calculator to raise his age to the 0.23 power.
Action:
{
"action": "Search",
"action_input": "Olivia Wilde boyfriend"
}
Observation: Sudeikis and Wilde's relationship ended in November 2020. Wilde was publicly served with court documents regarding child custody while she was presenting Don't Worry Darling at CinemaCon 2022. In January 2021, Wilde began dating singer Harry Styles after meeting during the filming of Don't Worry Darling.
Thought:I need to use a search engine to find Harry Styles' current age.
Action:
{
"action": "Search",
"action_input": "Harry Styles age"
}
Observation: 29 years
Thought:Now I need to calculate 29 raised to the 0.23 power.
Action:
{
"action": "Calculator",
"action_input": "29^0.23"
}
Observation: Answer: 2.169459462491557
Thought:I now know the final answer.
Final Answer: 2.169459462491557
> Finished chain.
'2.169459462491557'
Memory: Add State to Chains and Agents#
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
6139d3f8e5ca-11
|
'2.169459462491557'
Memory: Add State to Chains and Agents#
You can use Memory with chains and agents initialized with chat models. The main difference between this and Memory for LLMs is that rather than trying to condense all previous messages into a string, we can keep them as their own unique memory object.
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template("The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know."),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}")
])
llm = ChatOpenAI(temperature=0)
memory = ConversationBufferMemory(return_messages=True)
conversation = ConversationChain(memory=memory, prompt=prompt, llm=llm)
conversation.predict(input="Hi there!")
# -> 'Hello! How can I assist you today?'
conversation.predict(input="I'm doing well! Just having a conversation with an AI.")
# -> "That sounds like fun! I'm happy to chat with you. Is there anything specific you'd like to talk about?"
conversation.predict(input="Tell me about yourself.")
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
6139d3f8e5ca-12
|
conversation.predict(input="Tell me about yourself.")
# -> "Sure! I am an AI language model created by OpenAI. I was trained on a large dataset of text from the internet, which allows me to understand and generate human-like language. I can answer questions, provide information, and even have conversations like this one. Is there anything else you'd like to know about me?"
previous
Welcome to LangChain
next
Concepts
Contents
Installation
Environment Setup
Building a Language Model Application: LLMs
LLMs: Get predictions from a language model
Prompt Templates: Manage prompts for LLMs
Chains: Combine LLMs and prompts in multi-step workflows
Agents: Dynamically Call Chains Based on User Input
Memory: Add State to Chains and Agents
Building a Language Model Application: Chat Models
Get Message Completions from a Chat Model
Chat Prompt Templates
Chains with Chat Models
Agents with Chat Models
Memory: Add State to Chains and Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/getting_started/getting_started.html
|
51620c98e82c-0
|
.md
.pdf
Locally Hosted Setup
Contents
Installation
Environment Setup
Locally Hosted Setup#
This page contains instructions for installing and then setting up the environment to use the locally hosted version of tracing.
Installation#
Ensure you have Docker installed (see Get Docker) and that it’s running.
Install the latest version of langchain: pip install langchain or pip install langchain -U to upgrade your
existing version.
Run langchain-server. This command was installed automatically when you ran the above command (pip install langchain).
This will spin up the server in the terminal, hosted on port 4137 by default.
Once you see the terminal
output langchain-langchain-frontend-1 | ➜ Local: [http://localhost:4173/](http://localhost:4173/), navigate
to http://localhost:4173/
You should see a page with your tracing sessions. See the overview page for a walkthrough of the UI.
Currently, trace data is not guaranteed to be persisted between runs of langchain-server. If you want to
persist your data, you can mount a volume to the Docker container. See the Docker docs for more info.
To stop the server, press Ctrl+C in the terminal where you ran langchain-server.
Environment Setup#
After installation, you must now set up your environment to use tracing.
This can be done by setting an environment variable in your terminal by running export LANGCHAIN_HANDLER=langchain.
You can also do this by adding the below snippet to the top of every script. IMPORTANT: this must go at the VERY TOP of your script, before you import anything from langchain.
import os
os.environ["LANGCHAIN_HANDLER"] = "langchain"
Contents
Installation
Environment Setup
By Harrison Chase
© Copyright 2023, Harrison Chase.
|
https://python.langchain.com/en/latest/tracing/local_installation.html
|
51620c98e82c-1
|
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/tracing/local_installation.html
|
2dcf8e6918fc-0
|
.ipynb
.pdf
Tracing Walkthrough
Contents
[Beta] Tracing V2
Tracing Walkthrough#
There are two recommended ways to trace your LangChains:
Setting the LANGCHAIN_TRACING environment variable to “true”.
Using a context manager with tracing_enabled() to trace a particular block of code.
Note if the environment variable is set, all code will be traced, regardless of whether or not it’s within the context manager.
import os
os.environ["LANGCHAIN_TRACING"] = "true"
## Uncomment below if using hosted setup.
# os.environ["LANGCHAIN_ENDPOINT"] = "https://langchain-api-gateway-57eoxz8z.uc.gateway.dev"
## Uncomment below if you want traces to be recorded to "my_session" instead of "default".
# os.environ["LANGCHAIN_SESSION"] = "my_session"
## Better to set this environment variable in the terminal
## Uncomment below if using hosted version. Replace "my_api_key" with your actual API Key.
# os.environ["LANGCHAIN_API_KEY"] = "my_api_key"
import langchain
from langchain.agents import Tool, initialize_agent, load_tools
from langchain.agents import AgentType
from langchain.callbacks import tracing_enabled
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
# Agent run with tracing. Ensure that OPENAI_API_KEY is set appropriately to run this example.
llm = OpenAI(temperature=0)
tools = load_tools(["llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is 2 raised to .123243 power?")
> Entering new AgentExecutor chain...
|
https://python.langchain.com/en/latest/tracing/agent_with_tracing.html
|
2dcf8e6918fc-1
|
> Entering new AgentExecutor chain...
I need to use a calculator to solve this.
Action: Calculator
Action Input: 2^.123243
Observation: Answer: 1.0891804557407723
Thought: I now know the final answer.
Final Answer: 1.0891804557407723
> Finished chain.
'1.0891804557407723'
# Agent run with tracing using a chat model
agent = initialize_agent(
tools, ChatOpenAI(temperature=0), agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is 2 raised to .123243 power?")
> Entering new AgentExecutor chain...
I need to use a calculator to solve this.
Action: Calculator
Action Input: 2 ^ .123243
Observation: Answer: 1.0891804557407723
Thought:I now know the answer to the question.
Final Answer: 1.0891804557407723
> Finished chain.
'1.0891804557407723'
# Both of the agent runs will be traced because the environment variable is set
agent.run("What is 2 raised to .123243 power?")
with tracing_enabled() as session:
agent.run("What is 5 raised to .123243 power?")
> Entering new AgentExecutor chain...
I need to use a calculator to solve this.
Action: Calculator
Action Input: 2 ^ .123243
Observation: Answer: 1.0891804557407723
Thought:I now know the answer to the question.
Final Answer: 1.0891804557407723
> Finished chain.
> Entering new AgentExecutor chain...
I need to use a calculator to solve this.
Action: Calculator
|
https://python.langchain.com/en/latest/tracing/agent_with_tracing.html
|
2dcf8e6918fc-2
|
I need to use a calculator to solve this.
Action: Calculator
Action Input: 5 ^ .123243
Observation: Answer: 1.2193914912400514
Thought:I now know the answer to the question.
Final Answer: 1.2193914912400514
> Finished chain.
# Now, we unset the environment variable and use a context manager.
if "LANGCHAIN_TRACING" in os.environ:
del os.environ["LANGCHAIN_TRACING"]
# here, we are writing traces to "my_test_session"
with tracing_enabled("my_session") as session:
assert session
agent.run("What is 5 raised to .123243 power?") # this should be traced
agent.run("What is 2 raised to .123243 power?") # this should not be traced
> Entering new AgentExecutor chain...
I need to use a calculator to solve this.
Action: Calculator
Action Input: 5 ^ .123243
Observation: Answer: 1.2193914912400514
Thought:I now know the answer to the question.
Final Answer: 1.2193914912400514
> Finished chain.
> Entering new AgentExecutor chain...
I need to use a calculator to solve this.
Action: Calculator
Action Input: 2 ^ .123243
Observation: Answer: 1.0891804557407723
Thought:I now know the answer to the question.
Final Answer: 1.0891804557407723
> Finished chain.
'1.0891804557407723'
# The context manager is concurrency safe:
import asyncio
if "LANGCHAIN_TRACING" in os.environ:
del os.environ["LANGCHAIN_TRACING"]
|
https://python.langchain.com/en/latest/tracing/agent_with_tracing.html
|
2dcf8e6918fc-3
|
del os.environ["LANGCHAIN_TRACING"]
questions = [f"What is {i} raised to .123 power?" for i in range(1,4)]
# start a background task
task = asyncio.create_task(agent.arun(questions[0])) # this should not be traced
with tracing_enabled() as session:
assert session
tasks = [agent.arun(q) for q in questions[1:3]] # these should be traced
await asyncio.gather(*tasks)
await task
> Entering new AgentExecutor chain...
> Entering new AgentExecutor chain...
> Entering new AgentExecutor chain...
I need to use a calculator to solve this.
Action: Calculator
Action Input: 3^0.123I need to use a calculator to solve this.
Action: Calculator
Action Input: 2^0.123Any number raised to the power of 0 is 1, but I'm not sure about a decimal power.
Action: Calculator
Action Input: 1^.123
Observation: Answer: 1.1446847956963533
Thought:
Observation: Answer: 1.0889970153361064
Thought:
Observation: Answer: 1.0
Thought:
> Finished chain.
> Finished chain.
> Finished chain.
'1.0'
[Beta] Tracing V2#
We are rolling out a newer version of our tracing service with more features coming soon. Here are the instructions on how to use it to trace your runs.
To use, you can use the tracing_v2_enabled context manager or set LANGCHAIN_TRACING_V2 = 'true'
Option 1 (Local):
Run the local LangChainPlus Server
pip install --upgrade langchain
langchain plus start
Option 2 (Hosted):
|
https://python.langchain.com/en/latest/tracing/agent_with_tracing.html
|
2dcf8e6918fc-4
|
pip install --upgrade langchain
langchain plus start
Option 2 (Hosted):
After making an account an grabbing a LangChainPlus API Key, set the LANGCHAIN_ENDPOINT and LANGCHAIN_API_KEY environment variables
import os
os.environ["LANGCHAIN_TRACING_V2"] = "true"
# os.environ["LANGCHAIN_ENDPOINT"] = "https://api.langchain.plus" # Uncomment this line if you want to use the hosted version
# os.environ["LANGCHAIN_API_KEY"] = "<YOUR-LANGCHAINPLUS-API-KEY>" # Uncomment this line if you want to use the hosted version.
import langchain
from langchain.agents import Tool, initialize_agent, load_tools
from langchain.agents import AgentType
from langchain.callbacks import tracing_enabled
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
# Agent run with tracing. Ensure that OPENAI_API_KEY is set appropriately to run this example.
llm = OpenAI(temperature=0)
tools = load_tools(["llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is 2 raised to .123243 power?")
> Entering new AgentExecutor chain...
I need to use a calculator to solve this.
Action: Calculator
Action Input: 2^.123243
Observation: Answer: 1.0891804557407723
Thought: I now know the final answer.
Final Answer: 1.0891804557407723
> Finished chain.
'1.0891804557407723'
Contents
[Beta] Tracing V2
By Harrison Chase
© Copyright 2023, Harrison Chase.
|
https://python.langchain.com/en/latest/tracing/agent_with_tracing.html
|
2dcf8e6918fc-5
|
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/tracing/agent_with_tracing.html
|
7f039621dd1e-0
|
.md
.pdf
Cloud Hosted Setup
Contents
Installation
Environment Setup
Cloud Hosted Setup#
We offer a hosted version of tracing at langchainplus.vercel.app. You can use this to view traces from your run without having to run the server locally.
Note: we are currently only offering this to a limited number of users. The hosted platform is VERY alpha, in active development, and data might be dropped at any time. Don’t depend on data being persisted in the system long term and don’t log traces that may contain sensitive information. If you’re interested in using the hosted platform, please fill out the form here.
Installation#
Login to the system and click “API Key” in the top right corner. Generate a new key and keep it safe. You will need it to authenticate with the system.
Environment Setup#
After installation, you must now set up your environment to use tracing.
This can be done by setting an environment variable in your terminal by running export LANGCHAIN_HANDLER=langchain.
You can also do this by adding the below snippet to the top of every script. IMPORTANT: this must go at the VERY TOP of your script, before you import anything from langchain.
import os
os.environ["LANGCHAIN_HANDLER"] = "langchain"
You will also need to set an environment variable to specify the endpoint and your API key. This can be done with the following environment variables:
LANGCHAIN_ENDPOINT = “https://langchain-api-gateway-57eoxz8z.uc.gateway.dev”
LANGCHAIN_API_KEY - set this to the API key you generated during installation.
An example of adding all relevant environment variables is below:
import os
os.environ["LANGCHAIN_HANDLER"] = "langchain"
os.environ["LANGCHAIN_ENDPOINT"] = "https://langchain-api-gateway-57eoxz8z.uc.gateway.dev"
|
https://python.langchain.com/en/latest/tracing/hosted_installation.html
|
7f039621dd1e-1
|
os.environ["LANGCHAIN_API_KEY"] = "my_api_key" # Don't commit this to your repo! Better to set it in your terminal.
Contents
Installation
Environment Setup
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/tracing/hosted_installation.html
|
90d5b0731ef8-0
|
.rst
.pdf
Indexes
Indexes#
Indexes refer to ways to structure documents so that LLMs can best interact with them.
LangChain has a number of modules that help you load, structure, store, and retrieve documents.
Docstore
Text Splitter
Document Loaders
Vector Stores
Retrievers
Document Compressors
Document Transformers
previous
Embeddings
next
Docstore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/reference/indexes.html
|
65ae70732f2f-0
|
.md
.pdf
Installation
Contents
Official Releases
Installing from source
Installation#
Official Releases#
LangChain is available on PyPi, so to it is easily installable with:
pip install langchain
That will install the bare minimum requirements of LangChain.
A lot of the value of LangChain comes when integrating it with various model providers, datastores, etc.
By default, the dependencies needed to do that are NOT installed.
However, there are two other ways to install LangChain that do bring in those dependencies.
To install modules needed for the common LLM providers, run:
pip install langchain[llms]
To install all modules needed for all integrations, run:
pip install langchain[all]
Note that if you are using zsh, you’ll need to quote square brackets when passing them as an argument to a command, for example:
pip install 'langchain[all]'
Installing from source#
If you want to install from source, you can do so by cloning the repo and running:
pip install -e .
previous
SQL Question Answering Benchmarking: Chinook
next
API References
Contents
Official Releases
Installing from source
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/reference/installation.html
|
ae09a6326614-0
|
.rst
.pdf
Models
Models#
LangChain provides interfaces and integrations for a number of different types of models.
LLMs
Chat Models
Embeddings
previous
API References
next
Chat Models
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/reference/models.html
|
c9d91913631d-0
|
.rst
.pdf
Agents
Agents#
Reference guide for Agents and associated abstractions.
Agents
Tools
Agent Toolkits
previous
Memory
next
Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/reference/agents.html
|
68c499df93b8-0
|
.rst
.pdf
Prompts
Prompts#
The reference guides here all relate to objects for working with Prompts.
PromptTemplates
Example Selector
Output Parsers
previous
How to serialize prompts
next
PromptTemplates
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/reference/prompts.html
|
3db2f09db17c-0
|
.rst
.pdf
Tools
Tools#
Core toolkit implementations.
pydantic model langchain.tools.AIPluginTool[source]#
field api_spec: str [Required]#
field args_schema: Type[AIPluginToolSchema] = <class 'langchain.tools.plugin.AIPluginToolSchema'>#
Pydantic model class to validate and parse the tool’s input arguments.
field plugin: AIPlugin [Required]#
classmethod from_plugin_url(url: str) → langchain.tools.plugin.AIPluginTool[source]#
pydantic model langchain.tools.APIOperation[source]#
A model for a single API operation.
field base_url: str [Required]#
The base URL of the operation.
field description: Optional[str] = None#
The description of the operation.
field method: langchain.tools.openapi.utils.openapi_utils.HTTPVerb [Required]#
The HTTP method of the operation.
field operation_id: str [Required]#
The unique identifier of the operation.
field path: str [Required]#
The path of the operation.
field properties: Sequence[langchain.tools.openapi.utils.api_models.APIProperty] [Required]#
field request_body: Optional[langchain.tools.openapi.utils.api_models.APIRequestBody] = None#
The request body of the operation.
classmethod from_openapi_spec(spec: langchain.tools.openapi.utils.openapi_utils.OpenAPISpec, path: str, method: str) → langchain.tools.openapi.utils.api_models.APIOperation[source]#
Create an APIOperation from an OpenAPI spec.
classmethod from_openapi_url(spec_url: str, path: str, method: str) → langchain.tools.openapi.utils.api_models.APIOperation[source]#
Create an APIOperation from an OpenAPI URL.
to_typescript() → str[source]#
Get typescript string representation of the operation.
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-1
|
to_typescript() → str[source]#
Get typescript string representation of the operation.
static ts_type_from_python(type_: Union[str, Type, tuple, None, enum.Enum]) → str[source]#
property body_params: List[str]#
property path_params: List[str]#
property query_params: List[str]#
pydantic model langchain.tools.AzureCogsFormRecognizerTool[source]#
Tool that queries the Azure Cognitive Services Form Recognizer API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/applied-ai-services/form-recognizer/quickstarts/get-started-sdks-rest-api?view=form-recog-3.0.0&pivots=programming-language-python
pydantic model langchain.tools.AzureCogsImageAnalysisTool[source]#
Tool that queries the Azure Cognitive Services Image Analysis API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/cognitive-services/computer-vision/quickstarts-sdk/image-analysis-client-library-40
pydantic model langchain.tools.AzureCogsSpeech2TextTool[source]#
Tool that queries the Azure Cognitive Services Speech2Text API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-speech-to-text?pivots=programming-language-python
pydantic model langchain.tools.AzureCogsText2SpeechTool[source]#
Tool that queries the Azure Cognitive Services Text2Speech API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-text-to-speech?pivots=programming-language-python
pydantic model langchain.tools.BaseTool[source]#
Interface LangChain tools must implement.
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-2
|
Interface LangChain tools must implement.
field args_schema: Optional[Type[pydantic.main.BaseModel]] = None#
Pydantic model class to validate and parse the tool’s input arguments.
field callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None#
Deprecated. Please use callbacks instead.
field callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None#
Callbacks to be called during tool execution.
field description: str [Required]#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field handle_tool_error: Optional[Union[bool, str, Callable[[langchain.tools.base.ToolException], str]]] = False#
Handle the content of the ToolException thrown.
field name: str [Required]#
The unique name of the tool that clearly communicates its purpose.
field return_direct: bool = False#
Whether to return the tool’s output directly. Setting this to True means
that after the tool is called, the AgentExecutor will stop looping.
field verbose: bool = False#
Whether to log the tool’s progress.
async arun(tool_input: Union[str, Dict], verbose: Optional[bool] = None, start_color: Optional[str] = 'green', color: Optional[str] = 'green', callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → Any[source]#
Run the tool asynchronously.
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-3
|
Run the tool asynchronously.
run(tool_input: Union[str, Dict], verbose: Optional[bool] = None, start_color: Optional[str] = 'green', color: Optional[str] = 'green', callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, **kwargs: Any) → Any[source]#
Run the tool.
property args: dict#
property is_single_input: bool#
Whether the tool only accepts a single input.
pydantic model langchain.tools.BingSearchResults[source]#
Tool that has capability to query the Bing Search API and get back json.
field api_wrapper: langchain.utilities.bing_search.BingSearchAPIWrapper [Required]#
field num_results: int = 4#
pydantic model langchain.tools.BingSearchRun[source]#
Tool that adds the capability to query the Bing search API.
field api_wrapper: langchain.utilities.bing_search.BingSearchAPIWrapper [Required]#
pydantic model langchain.tools.BraveSearch[source]#
field search_wrapper: BraveSearchWrapper [Required]#
classmethod from_api_key(api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any) → langchain.tools.brave_search.tool.BraveSearch[source]#
pydantic model langchain.tools.ClickTool[source]#
field args_schema: Type[BaseModel] = <class 'langchain.tools.playwright.click.ClickToolInput'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Click on an element with the given CSS selector'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'click_element'#
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-4
|
field name: str = 'click_element'#
The unique name of the tool that clearly communicates its purpose.
field playwright_strict: bool = False#
Whether to employ Playwright’s strict mode when clicking on elements.
field playwright_timeout: float = 1000#
Timeout (in ms) for Playwright to wait for element to be ready.
field visible_only: bool = True#
Whether to consider only visible elements.
pydantic model langchain.tools.CopyFileTool[source]#
field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.copy.FileCopyInput'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Create a copy of a file in a specified location'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'copy_file'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.CurrentWebPageTool[source]#
field args_schema: Type[BaseModel] = <class 'pydantic.main.BaseModel'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Returns the URL of the current page'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'current_webpage'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.DeleteFileTool[source]#
field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.delete.FileDeleteInput'>#
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-5
|
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Delete a file'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'file_delete'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.DuckDuckGoSearchResults[source]#
Tool that queries the Duck Duck Go Search API and get back json.
field api_wrapper: langchain.utilities.duckduckgo_search.DuckDuckGoSearchAPIWrapper [Optional]#
field num_results: int = 4#
pydantic model langchain.tools.DuckDuckGoSearchRun[source]#
Tool that adds the capability to query the DuckDuckGo search API.
field api_wrapper: langchain.utilities.duckduckgo_search.DuckDuckGoSearchAPIWrapper [Optional]#
pydantic model langchain.tools.ExtractHyperlinksTool[source]#
Extract all hyperlinks on the page.
field args_schema: Type[BaseModel] = <class 'langchain.tools.playwright.extract_hyperlinks.ExtractHyperlinksToolInput'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Extract all hyperlinks on the current webpage'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'extract_hyperlinks'#
The unique name of the tool that clearly communicates its purpose.
static scrape_page(page: Any, html_content: str, absolute_urls: bool) → str[source]#
pydantic model langchain.tools.ExtractTextTool[source]#
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-6
|
pydantic model langchain.tools.ExtractTextTool[source]#
field args_schema: Type[BaseModel] = <class 'pydantic.main.BaseModel'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Extract all the text on the current webpage'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'extract_text'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.FileSearchTool[source]#
field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.file_search.FileSearchInput'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Recursively search for files in a subdirectory that match the regex pattern'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'file_search'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.GetElementsTool[source]#
field args_schema: Type[BaseModel] = <class 'langchain.tools.playwright.get_elements.GetElementsToolInput'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Retrieve elements in the current web page matching the given CSS selector'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'get_elements'#
The unique name of the tool that clearly communicates its purpose.
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-7
|
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.GmailCreateDraft[source]#
field args_schema: Type[langchain.tools.gmail.create_draft.CreateDraftSchema] = <class 'langchain.tools.gmail.create_draft.CreateDraftSchema'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Use this tool to create a draft email with the provided message fields.'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'create_gmail_draft'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.GmailGetMessage[source]#
field args_schema: Type[langchain.tools.gmail.get_message.SearchArgsSchema] = <class 'langchain.tools.gmail.get_message.SearchArgsSchema'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Use this tool to fetch an email by message ID. Returns the thread ID, snipet, body, subject, and sender.'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'get_gmail_message'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.GmailGetThread[source]#
field args_schema: Type[langchain.tools.gmail.get_thread.GetThreadSchema] = <class 'langchain.tools.gmail.get_thread.GetThreadSchema'>#
Pydantic model class to validate and parse the tool’s input arguments.
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-8
|
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Use this tool to search for email messages. The input must be a valid Gmail query. The output is a JSON list of messages.'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'get_gmail_thread'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.GmailSearch[source]#
field args_schema: Type[langchain.tools.gmail.search.SearchArgsSchema] = <class 'langchain.tools.gmail.search.SearchArgsSchema'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Use this tool to search for email messages or threads. The input must be a valid Gmail query. The output is a JSON list of the requested resource.'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'search_gmail'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.GmailSendMessage[source]#
field description: str = 'Use this tool to send email messages. The input is the message, recipents'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'send_gmail_message'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.GooglePlacesTool[source]#
Tool that adds the capability to query the Google places API.
field api_wrapper: langchain.utilities.google_places_api.GooglePlacesAPIWrapper [Optional]#
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-9
|
field api_wrapper: langchain.utilities.google_places_api.GooglePlacesAPIWrapper [Optional]#
pydantic model langchain.tools.GoogleSearchResults[source]#
Tool that has capability to query the Google Search API and get back json.
field api_wrapper: langchain.utilities.google_search.GoogleSearchAPIWrapper [Required]#
field num_results: int = 4#
pydantic model langchain.tools.GoogleSearchRun[source]#
Tool that adds the capability to query the Google search API.
field api_wrapper: langchain.utilities.google_search.GoogleSearchAPIWrapper [Required]#
pydantic model langchain.tools.GoogleSerperResults[source]#
Tool that has capability to query the Serper.dev Google Search API
and get back json.
field api_wrapper: langchain.utilities.google_serper.GoogleSerperAPIWrapper [Optional]#
pydantic model langchain.tools.GoogleSerperRun[source]#
Tool that adds the capability to query the Serper.dev Google search API.
field api_wrapper: langchain.utilities.google_serper.GoogleSerperAPIWrapper [Required]#
pydantic model langchain.tools.HumanInputRun[source]#
Tool that adds the capability to ask user for input.
field input_func: Callable [Optional]#
field prompt_func: Callable[[str], None] [Optional]#
pydantic model langchain.tools.IFTTTWebhook[source]#
IFTTT Webhook.
Parameters
name – name of the tool
description – description of the tool
url – url to hit with the json event.
field url: str [Required]#
pydantic model langchain.tools.InfoPowerBITool[source]#
Tool for getting metadata about a PowerBI Dataset.
field powerbi: langchain.utilities.powerbi.PowerBIDataset [Required]#
pydantic model langchain.tools.ListDirectoryTool[source]#
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-10
|
pydantic model langchain.tools.ListDirectoryTool[source]#
field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.list_dir.DirectoryListingInput'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'List files and directories in a specified folder'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'list_directory'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.ListPowerBITool[source]#
Tool for getting tables names.
field powerbi: langchain.utilities.powerbi.PowerBIDataset [Required]#
pydantic model langchain.tools.MetaphorSearchResults[source]#
Tool that has capability to query the Metaphor Search API and get back json.
field api_wrapper: langchain.utilities.metaphor_search.MetaphorSearchAPIWrapper [Required]#
pydantic model langchain.tools.MoveFileTool[source]#
field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.move.FileMoveInput'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Move or rename a file from one location to another'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'move_file'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.NavigateBackTool[source]#
Navigate back to the previous page in the browser history.
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-11
|
Navigate back to the previous page in the browser history.
field args_schema: Type[BaseModel] = <class 'pydantic.main.BaseModel'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Navigate back to the previous page in the browser history'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'previous_webpage'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.NavigateTool[source]#
field args_schema: Type[BaseModel] = <class 'langchain.tools.playwright.navigate.NavigateToolInput'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Navigate a browser to the specified URL'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'navigate_browser'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.OpenAPISpec[source]#
OpenAPI Model that removes misformatted parts of the spec.
classmethod from_file(path: Union[str, pathlib.Path]) → langchain.tools.openapi.utils.openapi_utils.OpenAPISpec[source]#
Get an OpenAPI spec from a file path.
classmethod from_spec_dict(spec_dict: dict) → langchain.tools.openapi.utils.openapi_utils.OpenAPISpec[source]#
Get an OpenAPI spec from a dict.
classmethod from_text(text: str) → langchain.tools.openapi.utils.openapi_utils.OpenAPISpec[source]#
Get an OpenAPI spec from a text.
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-12
|
Get an OpenAPI spec from a text.
classmethod from_url(url: str) → langchain.tools.openapi.utils.openapi_utils.OpenAPISpec[source]#
Get an OpenAPI spec from a URL.
static get_cleaned_operation_id(operation: openapi_schema_pydantic.v3.v3_1_0.operation.Operation, path: str, method: str) → str[source]#
Get a cleaned operation id from an operation id.
get_methods_for_path(path: str) → List[str][source]#
Return a list of valid methods for the specified path.
get_operation(path: str, method: str) → openapi_schema_pydantic.v3.v3_1_0.operation.Operation[source]#
Get the operation object for a given path and HTTP method.
get_parameters_for_operation(operation: openapi_schema_pydantic.v3.v3_1_0.operation.Operation) → List[openapi_schema_pydantic.v3.v3_1_0.parameter.Parameter][source]#
Get the components for a given operation.
get_referenced_schema(ref: openapi_schema_pydantic.v3.v3_1_0.reference.Reference) → openapi_schema_pydantic.v3.v3_1_0.schema.Schema[source]#
Get a schema (or nested reference) or err.
get_request_body_for_operation(operation: openapi_schema_pydantic.v3.v3_1_0.operation.Operation) → Optional[openapi_schema_pydantic.v3.v3_1_0.request_body.RequestBody][source]#
Get the request body for a given operation.
classmethod parse_obj(obj: dict) → langchain.tools.openapi.utils.openapi_utils.OpenAPISpec[source]#
property base_url: str#
Get the base url.
pydantic model langchain.tools.OpenWeatherMapQueryRun[source]#
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-13
|
pydantic model langchain.tools.OpenWeatherMapQueryRun[source]#
Tool that adds the capability to query using the OpenWeatherMap API.
field api_wrapper: langchain.utilities.openweathermap.OpenWeatherMapAPIWrapper [Optional]#
pydantic model langchain.tools.QueryPowerBITool[source]#
Tool for querying a Power BI Dataset.
Validators
raise_deprecation » all fields
validate_llm_chain_input_variables » llm_chain
field examples: Optional[str] = '\nQuestion: How many rows are in the table <table>?\nDAX: EVALUATE ROW("Number of rows", COUNTROWS(<table>))\n----\nQuestion: How many rows are in the table <table> where <column> is not empty?\nDAX: EVALUATE ROW("Number of rows", COUNTROWS(FILTER(<table>, <table>[<column>] <> "")))\n----\nQuestion: What was the average of <column> in <table>?\nDAX: EVALUATE ROW("Average", AVERAGE(<table>[<column>]))\n----\n'#
field llm_chain: langchain.chains.llm.LLMChain [Required]#
field max_iterations: int = 5#
field powerbi: langchain.utilities.powerbi.PowerBIDataset [Required]#
field session_cache: Dict[str, Any] [Optional]#
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-14
|
field template: Optional[str] = '\nAnswer the question below with a DAX query that can be sent to Power BI. DAX queries have a simple syntax comprised of just one required keyword, EVALUATE, and several optional keywords: ORDER BY, START AT, DEFINE, MEASURE, VAR, TABLE, and COLUMN. Each keyword defines a statement used for the duration of the query. Any time < or > are used in the text below it means that those values need to be replaced by table, columns or other things. If the question is not something you can answer with a DAX query, reply with "I cannot answer this" and the question will be escalated to a human.\n\nSome DAX functions return a table instead of a scalar, and must be wrapped in a function that evaluates the table and returns a scalar; unless the table is a single column, single row table, then it is treated as a scalar value. Most DAX functions require one or more arguments, which can include tables, columns, expressions, and values. However, some functions, such as PI, do not require any arguments, but always require parentheses to indicate the null argument. For example, you must always type PI(), not PI. You can also nest functions within other functions. \n\nSome commonly used functions are:\nEVALUATE <table> - At the most basic level, a DAX query is an EVALUATE statement containing a table expression. At least one EVALUATE statement is required, however, a query can contain any number of EVALUATE statements.\nEVALUATE <table> ORDER BY <expression> ASC or DESC - The optional ORDER BY keyword defines one or more expressions used to sort query results. Any expression that can be evaluated for each row of the result is valid.\nEVALUATE <table> ORDER BY <expression> ASC or DESC START AT <value> or <parameter> - The optional
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-15
|
ORDER BY <expression> ASC or DESC START AT <value> or <parameter> - The optional START AT keyword is used inside an ORDER BY clause. It defines the value at which the query results begin.\nDEFINE MEASURE | VAR; EVALUATE <table> - The optional DEFINE keyword introduces one or more calculated entity definitions that exist only for the duration of the query. Definitions precede the EVALUATE statement and are valid for all EVALUATE statements in the query. Definitions can be variables, measures, tables1, and columns1. Definitions can reference other definitions that appear before or after the current definition. At least one definition is required if the DEFINE keyword is included in a query.\nMEASURE <table name>[<measure name>] = <scalar expression> - Introduces a measure definition in a DEFINE statement of a DAX query.\nVAR <name> = <expression> - Stores the result of an expression as a named variable, which can then be passed as an argument to other measure expressions. Once resultant values have been calculated for a variable expression, those values do not change, even if the variable is referenced in another expression.\n\nFILTER(<table>,<filter>) - Returns a table that represents a subset of another table or expression, where <filter> is a Boolean expression that is to be evaluated for each row of the table. For example, [Amount] > 0 or [Region] = "France"\nROW(<name>, <expression>) - Returns a table with a single row containing values that result from the expressions given to each column.\nDISTINCT(<column>) - Returns a one-column table that contains the distinct values from the specified column. In other words, duplicate values are removed and only unique values are returned. This function cannot be used to Return values into a cell or column on a worksheet; rather, you nest the DISTINCT function within a formula, to get a list of distinct values that can be passed
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-16
|
you nest the DISTINCT function within a formula, to get a list of distinct values that can be passed to another function and then counted, summed, or used for other operations.\nDISTINCT(<table>) - Returns a table by removing duplicate rows from another table or expression.\n\nAggregation functions, names with a A in it, handle booleans and empty strings in appropriate ways, while the same function without A only uses the numeric values in a column. Functions names with an X in it can include a expression as an argument, this will be evaluated for each row in the table and the result will be used in the regular function calculation, these are the functions:\nCOUNT(<column>), COUNTA(<column>), COUNTX(<table>,<expression>), COUNTAX(<table>,<expression>), COUNTROWS([<table>]), COUNTBLANK(<column>), DISTINCTCOUNT(<column>), DISTINCTCOUNTNOBLANK (<column>) - these are all variantions of count functions.\nAVERAGE(<column>), AVERAGEA(<column>), AVERAGEX(<table>,<expression>) - these are all variantions of average functions.\nMAX(<column>), MAXA(<column>), MAXX(<table>,<expression>) - these are all variantions of max functions.\nMIN(<column>), MINA(<column>), MINX(<table>,<expression>) - these are all variantions of min functions.\nPRODUCT(<column>), PRODUCTX(<table>,<expression>) - these are all variantions of product functions.\nSUM(<column>), SUMX(<table>,<expression>) - these are all variantions of sum functions.\n\nDate and time functions:\nDATE(year, month, day) - Returns a date value that represents the specified year, month, and day.\nDATEDIFF(date1, date2, <interval>) - Returns the difference between two date values, in the specified
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-17
|
date2, <interval>) - Returns the difference between two date values, in the specified interval, that can be SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, QUARTER, YEAR.\nDATEVALUE(<date_text>) - Returns a date value that represents the specified date.\nYEAR(<date>), QUARTER(<date>), MONTH(<date>), DAY(<date>), HOUR(<date>), MINUTE(<date>), SECOND(<date>) - Returns the part of the date for the specified date.\n\nFinally, make sure to escape double quotes with a single backslash, and make sure that only table names have single quotes around them, while names of measures or the values of columns that you want to compare against are in escaped double quotes. Newlines are not necessary and can be skipped. The queries are serialized as json and so will have to fit be compliant with json syntax. Sometimes you will get a question, a DAX query and a error, in that case you need to rewrite the DAX query to get the correct answer.\n\nThe following tables exist: {tables}\n\nand the schema\'s for some are given here:\n{schemas}\n\nExamples:\n{examples}\n\nQuestion: {tool_input}\nDAX: \n'#
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-18
|
pydantic model langchain.tools.ReadFileTool[source]#
field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.read.ReadFileInput'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Read file from disk'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'read_file'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.SceneXplainTool[source]#
Tool that adds the capability to explain images.
field api_wrapper: langchain.utilities.scenexplain.SceneXplainAPIWrapper [Optional]#
pydantic model langchain.tools.ShellTool[source]#
Tool to run shell commands.
field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.shell.tool.ShellInput'>#
Schema for input arguments.
field description: str = 'Run shell commands on this Linux machine.'#
Description of tool.
field name: str = 'terminal'#
Name of tool.
field process: langchain.utilities.bash.BashProcess [Optional]#
Bash process to run commands.
pydantic model langchain.tools.SteamshipImageGenerationTool[source]#
field model_name: ModelName [Required]#
field return_urls: Optional[bool] = False#
field size: Optional[str] = '512x512'#
field steamship: Steamship [Required]#
pydantic model langchain.tools.StructuredTool[source]#
Tool that can operate on any number of inputs.
field args_schema: Type[pydantic.main.BaseModel] [Required]#
The input arguments’ schema.
The tool schema.
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-19
|
The input arguments’ schema.
The tool schema.
field coroutine: Optional[Callable[[...], Awaitable[Any]]] = None#
The asynchronous version of the function.
field description: str = ''#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field func: Callable[[...], Any] [Required]#
The function to run when the tool is called.
classmethod from_function(func: Callable, name: Optional[str] = None, description: Optional[str] = None, return_direct: bool = False, args_schema: Optional[Type[pydantic.main.BaseModel]] = None, infer_schema: bool = True, **kwargs: Any) → langchain.tools.base.StructuredTool[source]#
property args: dict#
The tool’s input arguments.
pydantic model langchain.tools.Tool[source]#
Tool that takes in function or coroutine directly.
field args_schema: Optional[Type[pydantic.main.BaseModel]] = None#
Pydantic model class to validate and parse the tool’s input arguments.
field callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None#
Deprecated. Please use callbacks instead.
field callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None#
Callbacks to be called during tool execution.
field coroutine: Optional[Callable[[...], Awaitable[str]]] = None#
The asynchronous version of the function.
field description: str = ''#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field func: Callable[[...], str] [Required]#
The function to run when the tool is called.
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-20
|
The function to run when the tool is called.
field handle_tool_error: Optional[Union[bool, str, Callable[[langchain.tools.base.ToolException], str]]] = False#
Handle the content of the ToolException thrown.
field name: str [Required]#
The unique name of the tool that clearly communicates its purpose.
field return_direct: bool = False#
Whether to return the tool’s output directly. Setting this to True means
that after the tool is called, the AgentExecutor will stop looping.
field verbose: bool = False#
Whether to log the tool’s progress.
classmethod from_function(func: Callable, name: str, description: str, return_direct: bool = False, args_schema: Optional[Type[pydantic.main.BaseModel]] = None, **kwargs: Any) → langchain.tools.base.Tool[source]#
Initialize tool from a function.
property args: dict#
The tool’s input arguments.
pydantic model langchain.tools.VectorStoreQATool[source]#
Tool for the VectorDBQA chain. To be initialized with name and chain.
static get_description(name: str, description: str) → str[source]#
pydantic model langchain.tools.VectorStoreQAWithSourcesTool[source]#
Tool for the VectorDBQAWithSources chain.
static get_description(name: str, description: str) → str[source]#
pydantic model langchain.tools.WikipediaQueryRun[source]#
Tool that adds the capability to search using the Wikipedia API.
field api_wrapper: langchain.utilities.wikipedia.WikipediaAPIWrapper [Required]#
pydantic model langchain.tools.WolframAlphaQueryRun[source]#
Tool that adds the capability to query using the Wolfram Alpha SDK.
field api_wrapper: langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper [Required]#
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-21
|
pydantic model langchain.tools.WriteFileTool[source]#
field args_schema: Type[pydantic.main.BaseModel] = <class 'langchain.tools.file_management.write.WriteFileInput'>#
Pydantic model class to validate and parse the tool’s input arguments.
field description: str = 'Write file to disk'#
Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
field name: str = 'write_file'#
The unique name of the tool that clearly communicates its purpose.
pydantic model langchain.tools.YouTubeSearchTool[source]#
pydantic model langchain.tools.ZapierNLAListActions[source]#
Returns a list of all exposed (enabled) actions associated withcurrent user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{“id”: str,
“description”: str,
“params”: Dict[str, str]
}]
params will always contain an instructions key, the only required
param. All others optional and if provided will override any AI guesses
(see “understanding the AI guessing flow” here:
https://nla.zapier.com/api/v1/docs)
Parameters
None –
field api_wrapper: langchain.utilities.zapier.ZapierNLAWrapper [Optional]#
pydantic model langchain.tools.ZapierNLARunAction[source]#
Executes an action that is identified by action_id, must be exposed(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-22
|
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
Parameters
action_id – a specific action ID (from list actions) of the action to execute
(the set api_key must be associated with the action owner)
instructions – a natural language instruction string for using the action
(eg. “get the latest email from Mike Knoop” for “Gmail: find email” action)
params – a dict, optional. Any params provided will override AI guesses
from instructions (see “understanding the AI guessing flow” here:
https://nla.zapier.com/api/v1/docs)
field action_id: str [Required]#
field api_wrapper: langchain.utilities.zapier.ZapierNLAWrapper [Optional]#
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-23
|
field base_prompt: str = 'A wrapper around Zapier NLA actions. The input to this tool is a natural language instruction, for example "get the latest email from my bank" or "send a slack message to the #general channel". Each tool will have params associated with it that are specified as a list. You MUST take into account the params when creating the instruction. For example, if the params are [\'Message_Text\', \'Channel\'], your instruction should be something like \'send a slack message to the #general channel with the text hello world\'. Another example: if the params are [\'Calendar\', \'Search_Term\'], your instruction should be something like \'find the meeting in my personal calendar at 3pm\'. Do not make up params, they will be explicitly specified in the tool description. If you do not have enough information to fill in the params, just say \'not enough information provided in the instruction, missing <param>\'. If you get a none or null response, STOP EXECUTION, do not try to another tool!This tool specifically used for: {zapier_description}, and has params: {params}'#
field params: Optional[dict] = None#
field params_schema: Dict[str, str] [Optional]#
field zapier_description: str [Required]#
langchain.tools.tool(*args: Union[str, Callable], return_direct: bool = False, args_schema: Optional[Type[pydantic.main.BaseModel]] = None, infer_schema: bool = True) → Callable[source]#
Make tools out of functions, can be used with or without arguments.
Parameters
*args – The arguments to the tool.
return_direct – Whether to return directly from the tool rather
than continuing the agent loop.
args_schema – optional argument schema for user to specify
infer_schema – Whether to infer the schema of the arguments from
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
3db2f09db17c-24
|
infer_schema – Whether to infer the schema of the arguments from
the function’s signature. This also makes the resultant tool
accept a dictionary input to its run() function.
Requires:
Function must be of type (str) -> str
Function must have a docstring
Examples
@tool
def search_api(query: str) -> str:
# Searches the API for the query.
return
@tool("search", return_direct=True)
def search_api(query: str) -> str:
# Searches the API for the query.
return
previous
Agents
next
Agent Toolkits
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/reference/modules/tools.html
|
f9707e242160-0
|
.rst
.pdf
Embeddings
Embeddings#
Wrappers around embedding modules.
pydantic model langchain.embeddings.AlephAlphaAsymmetricSemanticEmbedding[source]#
Wrapper for Aleph Alpha’s Asymmetric Embeddings
AA provides you with an endpoint to embed a document and a query.
The models were optimized to make the embeddings of documents and
the query for a document as similar as possible.
To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/
Example
from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding
embeddings = AlephAlphaSymmetricSemanticEmbedding()
document = "This is a content of the document"
query = "What is the content of the document?"
doc_result = embeddings.embed_documents([document])
query_result = embeddings.embed_query(query)
field aleph_alpha_api_key: Optional[str] = None#
API key for Aleph Alpha API.
field compress_to_size: Optional[int] = 128#
Should the returned embeddings come back as an original 5120-dim vector,
or should it be compressed to 128-dim.
field contextual_control_threshold: Optional[int] = None#
Attention control parameters only apply to those tokens that have
explicitly been set in the request.
field control_log_additive: Optional[bool] = True#
Apply controls on prompt items by adding the log(control_factor)
to attention scores.
field hosting: Optional[str] = 'https://api.aleph-alpha.com'#
Optional parameter that specifies which datacenters may process the request.
field model: Optional[str] = 'luminous-base'#
Model name to use.
field normalize: Optional[bool] = True#
Should returned embeddings be normalized
embed_documents(texts: List[str]) → List[List[float]][source]#
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-1
|
embed_documents(texts: List[str]) → List[List[float]][source]#
Call out to Aleph Alpha’s asymmetric Document endpoint.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to Aleph Alpha’s asymmetric, query embedding endpoint
:param text: The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.AlephAlphaSymmetricSemanticEmbedding[source]#
The symmetric version of the Aleph Alpha’s semantic embeddings.
The main difference is that here, both the documents and
queries are embedded with a SemanticRepresentation.Symmetric
.. rubric:: Example
from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding
embeddings = AlephAlphaAsymmetricSemanticEmbedding()
text = "This is a test text"
doc_result = embeddings.embed_documents([text])
query_result = embeddings.embed_query(text)
embed_documents(texts: List[str]) → List[List[float]][source]#
Call out to Aleph Alpha’s Document endpoint.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to Aleph Alpha’s asymmetric, query embedding endpoint
:param text: The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.BedrockEmbeddings[source]#
Embeddings provider to invoke Bedrock embedding models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-2
|
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
field credentials_profile_name: Optional[str] = None#
The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
field model_id: str = 'amazon.titan-e1t-medium'#
Id of the model to call, e.g., amazon.titan-e1t-medium, this is
equivalent to the modelId property in the list-foundation-models api
field model_kwargs: Optional[Dict] = None#
Key word arguments to pass to the model.
field region_name: Optional[str] = None#
The aws region e.g., us-west-2. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
embed_documents(texts: List[str], chunk_size: int = 1) → List[List[float]][source]#
Compute doc embeddings using a Bedrock model.
Parameters
texts – The list of texts to embed.
chunk_size – Bedrock currently only allows single string
inputs, so chunk size is always 1. This input is here
only for compatibility with the embeddings interface.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a Bedrock model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-3
|
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.CohereEmbeddings[source]#
Wrapper around Cohere embedding models.
To use, you should have the cohere python package installed, and the
environment variable COHERE_API_KEY set with your API key or pass it
as a named parameter to the constructor.
Example
from langchain.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(
model="embed-english-light-v2.0", cohere_api_key="my-api-key"
)
field model: str = 'embed-english-v2.0'#
Model name to use.
field truncate: Optional[str] = None#
Truncate embeddings that are too long from start or end (“NONE”|”START”|”END”)
embed_documents(texts: List[str]) → List[List[float]][source]#
Call out to Cohere’s embedding endpoint.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to Cohere’s embedding endpoint.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
class langchain.embeddings.ElasticsearchEmbeddings(client: MlClient, model_id: str, *, input_field: str = 'text_field')[source]#
Wrapper around Elasticsearch embedding models.
This class provides an interface to generate embeddings using a model deployed
in an Elasticsearch cluster. It requires an Elasticsearch connection object
and the model_id of the model deployed in the cluster.
In Elasticsearch you need to have an embedding model loaded and deployed.
- https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-4
|
- https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html
- https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html
embed_documents(texts: List[str]) → List[List[float]][source]#
Generate embeddings for a list of documents.
Parameters
texts (List[str]) – A list of document text strings to generate embeddings
for.
Returns
A list of embeddings, one for each document in the inputlist.
Return type
List[List[float]]
embed_query(text: str) → List[float][source]#
Generate an embedding for a single query text.
Parameters
text (str) – The query text to generate an embedding for.
Returns
The embedding for the input query text.
Return type
List[float]
classmethod from_credentials(model_id: str, *, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None, es_password: Optional[str] = None, input_field: str = 'text_field') → langchain.embeddings.elasticsearch.ElasticsearchEmbeddings[source]#
Instantiate embeddings from Elasticsearch credentials.
Parameters
model_id (str) – The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str) – The name of the key for the input text field in the
document. Defaults to ‘text_field’.
es_cloud_id – (str, optional): The Elasticsearch cloud ID to connect to.
es_user – (str, optional): Elasticsearch username.
es_password – (str, optional): Elasticsearch password.
Example
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-5
|
input_field = "your_input_field"
# Credentials can be passed in two ways. Either set the env vars
# ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically
# pulled in, or pass them in directly as kwargs.
embeddings = ElasticsearchEmbeddings.from_credentials(
model_id,
input_field=input_field,
# es_cloud_id="foo",
# es_user="bar",
# es_password="baz",
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
classmethod from_es_connection(model_id: str, es_connection: Elasticsearch, input_field: str = 'text_field') → ElasticsearchEmbeddings[source]#
Instantiate embeddings from an existing Elasticsearch connection.
This method provides a way to create an instance of the ElasticsearchEmbeddings
class using an existing Elasticsearch connection. The connection object is used
to create an MlClient, which is then used to initialize the
ElasticsearchEmbeddings instance.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch cluster.
es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch
connection object. input_field (str, optional): The name of the key for the
input text field in the document. Defaults to ‘text_field’.
Returns:
ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class.
Example
from elasticsearch import Elasticsearch
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-6
|
input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
hosts=["localhost:9200"], http_auth=("user", "password")
)
# Instantiate ElasticsearchEmbeddings using the existing connection
embeddings = ElasticsearchEmbeddings.from_es_connection(
model_id,
es_connection,
input_field=input_field,
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
pydantic model langchain.embeddings.FakeEmbeddings[source]#
embed_documents(texts: List[str]) → List[List[float]][source]#
Embed search docs.
embed_query(text: str) → List[float][source]#
Embed query text.
pydantic model langchain.embeddings.HuggingFaceEmbeddings[source]#
Wrapper around sentence_transformers embedding models.
To use, you should have the sentence_transformers python package installed.
Example
from langchain.embeddings import HuggingFaceEmbeddings
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': False}
hf = HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
field cache_folder: Optional[str] = None#
Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.
field encode_kwargs: Dict[str, Any] [Optional]#
Key word arguments to pass when calling the encode method of the model.
field model_kwargs: Dict[str, Any] [Optional]#
Key word arguments to pass to the model.
field model_name: str = 'sentence-transformers/all-mpnet-base-v2'#
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-7
|
field model_name: str = 'sentence-transformers/all-mpnet-base-v2'#
Model name to use.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a HuggingFace transformer model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a HuggingFace transformer model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.HuggingFaceHubEmbeddings[source]#
Wrapper around HuggingFaceHub embedding models.
To use, you should have the huggingface_hub python package installed, and the
environment variable HUGGINGFACEHUB_API_TOKEN set with your API token, or pass
it as a named parameter to the constructor.
Example
from langchain.embeddings import HuggingFaceHubEmbeddings
repo_id = "sentence-transformers/all-mpnet-base-v2"
hf = HuggingFaceHubEmbeddings(
repo_id=repo_id,
task="feature-extraction",
huggingfacehub_api_token="my-api-key",
)
field model_kwargs: Optional[dict] = None#
Key word arguments to pass to the model.
field repo_id: str = 'sentence-transformers/all-mpnet-base-v2'#
Model name to use.
field task: Optional[str] = 'feature-extraction'#
Task to call the model with.
embed_documents(texts: List[str]) → List[List[float]][source]#
Call out to HuggingFaceHub’s embedding endpoint for embedding search docs.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-8
|
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to HuggingFaceHub’s embedding endpoint for embedding query text.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.HuggingFaceInstructEmbeddings[source]#
Wrapper around sentence_transformers embedding models.
To use, you should have the sentence_transformers
and InstructorEmbedding python packages installed.
Example
from langchain.embeddings import HuggingFaceInstructEmbeddings
model_name = "hkunlp/instructor-large"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
hf = HuggingFaceInstructEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
field cache_folder: Optional[str] = None#
Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.
field embed_instruction: str = 'Represent the document for retrieval: '#
Instruction to use for embedding documents.
field encode_kwargs: Dict[str, Any] [Optional]#
Key word arguments to pass when calling the encode method of the model.
field model_kwargs: Dict[str, Any] [Optional]#
Key word arguments to pass to the model.
field model_name: str = 'hkunlp/instructor-large'#
Model name to use.
field query_instruction: str = 'Represent the question for retrieving supporting documents: '#
Instruction to use for embedding query.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a HuggingFace instruct model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-9
|
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a HuggingFace instruct model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.LlamaCppEmbeddings[source]#
Wrapper around llama.cpp embedding models.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: abetlen/llama-cpp-python
Example
from langchain.embeddings import LlamaCppEmbeddings
llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
field f16_kv: bool = False#
Use half-precision for key/value cache.
field logits_all: bool = False#
Return logits for all tokens, not just the last token.
field n_batch: Optional[int] = 8#
Number of tokens to process in parallel.
Should be a number between 1 and n_ctx.
field n_ctx: int = 512#
Token context window.
field n_gpu_layers: Optional[int] = None#
Number of layers to be loaded into gpu memory. Default None.
field n_parts: int = -1#
Number of parts to split the model into.
If -1, the number of parts is automatically determined.
field n_threads: Optional[int] = None#
Number of threads to use. If None, the number
of threads is automatically determined.
field seed: int = -1#
Seed. If -1, a random seed is used.
field use_mlock: bool = False#
Force system to keep model in RAM.
field vocab_only: bool = False#
Only load the vocabulary, no weights.
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-10
|
field vocab_only: bool = False#
Only load the vocabulary, no weights.
embed_documents(texts: List[str]) → List[List[float]][source]#
Embed a list of documents using the Llama model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Embed a query using the Llama model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.MiniMaxEmbeddings[source]#
Wrapper around MiniMax’s embedding inference service.
To use, you should have the environment variable MINIMAX_GROUP_ID and
MINIMAX_API_KEY set with your API token, or pass it as a named parameter to
the constructor.
Example
from langchain.embeddings import MiniMaxEmbeddings
embeddings = MiniMaxEmbeddings()
query_text = "This is a test query."
query_result = embeddings.embed_query(query_text)
document_text = "This is a test document."
document_result = embeddings.embed_documents([document_text])
field embed_type_db: str = 'db'#
For embed_documents
field embed_type_query: str = 'query'#
For embed_query
field endpoint_url: str = 'https://api.minimax.chat/v1/embeddings'#
Endpoint URL to use.
field minimax_api_key: Optional[str] = None#
API Key for MiniMax API.
field minimax_group_id: Optional[str] = None#
Group ID for MiniMax API.
field model: str = 'embo-01'#
Embeddings model name to use.
embed_documents(texts: List[str]) → List[List[float]][source]#
Embed documents using a MiniMax embedding endpoint.
Parameters
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-11
|
Embed documents using a MiniMax embedding endpoint.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Embed a query using a MiniMax embedding endpoint.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.ModelScopeEmbeddings[source]#
Wrapper around modelscope_hub embedding models.
To use, you should have the modelscope python package installed.
Example
from langchain.embeddings import ModelScopeEmbeddings
model_id = "damo/nlp_corom_sentence-embedding_english-base"
embed = ModelScopeEmbeddings(model_id=model_id)
field model_id: str = 'damo/nlp_corom_sentence-embedding_english-base'#
Model name to use.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a modelscope embedding model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a modelscope embedding model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.MosaicMLInstructorEmbeddings[source]#
Wrapper around MosaicML’s embedding inference service.
To use, you should have the
environment variable MOSAICML_API_TOKEN set with your API token, or pass
it as a named parameter to the constructor.
Example
from langchain.llms import MosaicMLInstructorEmbeddings
endpoint_url = (
"https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict"
)
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-12
|
)
mosaic_llm = MosaicMLInstructorEmbeddings(
endpoint_url=endpoint_url,
mosaicml_api_token="my-api-key"
)
field embed_instruction: str = 'Represent the document for retrieval: '#
Instruction used to embed documents.
field endpoint_url: str = 'https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict'#
Endpoint URL to use.
field query_instruction: str = 'Represent the question for retrieving supporting documents: '#
Instruction used to embed the query.
field retry_sleep: float = 1.0#
How long to try sleeping for if a rate limit is encountered
embed_documents(texts: List[str]) → List[List[float]][source]#
Embed documents using a MosaicML deployed instructor embedding model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Embed a query using a MosaicML deployed instructor embedding model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.OpenAIEmbeddings[source]#
Wrapper around OpenAI embedding models.
To use, you should have the openai python package installed, and the
environment variable OPENAI_API_KEY set with your API key or pass it
as a named parameter to the constructor.
Example
from langchain.embeddings import OpenAIEmbeddings
openai = OpenAIEmbeddings(openai_api_key="my-api-key")
In order to use the library with Microsoft Azure endpoints, you need to set
the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and OPENAI_API_VERSION.
The OPENAI_API_TYPE must be set to ‘azure’ and the others correspond to
the properties of your endpoint.
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-13
|
the properties of your endpoint.
In addition, the deployment name must be passed as the model parameter.
Example
import os
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/"
os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_PROXY"] = "http://your-corporate-proxy:8080"
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings(
deployment="your-embeddings-deployment-name",
model="your-embeddings-model-name",
api_base="https://your-endpoint.openai.azure.com/",
api_type="azure",
)
text = "This is a test query."
query_result = embeddings.embed_query(text)
field chunk_size: int = 1000#
Maximum number of texts to embed in each batch
field max_retries: int = 6#
Maximum number of retries to make when generating.
field request_timeout: Optional[Union[float, Tuple[float, float]]] = None#
Timeout in seconds for the OpenAPI request.
embed_documents(texts: List[str], chunk_size: Optional[int] = 0) → List[List[float]][source]#
Call out to OpenAI’s embedding endpoint for embedding search docs.
Parameters
texts – The list of texts to embed.
chunk_size – The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to OpenAI’s embedding endpoint for embedding query text.
Parameters
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-14
|
Call out to OpenAI’s embedding endpoint for embedding query text.
Parameters
text – The text to embed.
Returns
Embedding for the text.
pydantic model langchain.embeddings.SagemakerEndpointEmbeddings[source]#
Wrapper around custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
field content_handler: langchain.embeddings.sagemaker_endpoint.EmbeddingsContentHandler [Required]#
The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
field credentials_profile_name: Optional[str] = None#
The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
field endpoint_kwargs: Optional[Dict] = None#
Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
field endpoint_name: str = ''#
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-15
|
field endpoint_name: str = ''#
The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region.
field model_kwargs: Optional[Dict] = None#
Key word arguments to pass to the model.
field region_name: str = ''#
The aws region where the Sagemaker model is deployed, eg. us-west-2.
embed_documents(texts: List[str], chunk_size: int = 64) → List[List[float]][source]#
Compute doc embeddings using a SageMaker Inference Endpoint.
Parameters
texts – The list of texts to embed.
chunk_size – The chunk size defines how many input texts will
be grouped together as request. If None, will use the
chunk size specified by the class.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a SageMaker inference endpoint.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.SelfHostedEmbeddings[source]#
Runs custom embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the runhouse python package installed.
Example using a model load function:from langchain.embeddings import SelfHostedEmbeddings
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
def get_pipeline():
model_id = "facebook/bart-large"
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-16
|
def get_pipeline():
model_id = "facebook/bart-large"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
embeddings = SelfHostedEmbeddings(
model_load_fn=get_pipeline,
hardware=gpu
model_reqs=["./", "torch", "transformers"],
)
Example passing in a pipeline path:from langchain.embeddings import SelfHostedHFEmbeddings
import runhouse as rh
from transformers import pipeline
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
pipeline = pipeline(model="bert-base-uncased", task="feature-extraction")
rh.blob(pickle.dumps(pipeline),
path="models/pipeline.pkl").save().to(gpu, path="models")
embeddings = SelfHostedHFEmbeddings.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
Validators
raise_deprecation » all fields
set_verbose » verbose
field inference_fn: Callable = <function _embed_documents>#
Inference function to extract the embeddings on the remote hardware.
field inference_kwargs: Any = None#
Any kwargs to pass to the model’s inference function.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a HuggingFace transformer model.
Parameters
texts – The list of texts to embed.s
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a HuggingFace transformer model.
Parameters
text – The text to embed.
Returns
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-17
|
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.SelfHostedHuggingFaceEmbeddings[source]#
Runs sentence_transformers embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the runhouse python package installed.
Example
from langchain.embeddings import SelfHostedHuggingFaceEmbeddings
import runhouse as rh
model_name = "sentence-transformers/all-mpnet-base-v2"
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu)
Validators
raise_deprecation » all fields
set_verbose » verbose
field hardware: Any = None#
Remote hardware to send the inference function to.
field inference_fn: Callable = <function _embed_documents>#
Inference function to extract the embeddings.
field load_fn_kwargs: Optional[dict] = None#
Key word arguments to pass to the model load function.
field model_id: str = 'sentence-transformers/all-mpnet-base-v2'#
Model name to use.
field model_load_fn: Callable = <function load_embedding_model>#
Function to load the model remotely on the server.
field model_reqs: List[str] = ['./', 'sentence_transformers', 'torch']#
Requirements to install on hardware to inference the model.
pydantic model langchain.embeddings.SelfHostedHuggingFaceInstructEmbeddings[source]#
Runs InstructorEmbedding embedding models on self-hosted remote hardware.
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-18
|
Runs InstructorEmbedding embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the runhouse python package installed.
Example
from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings
import runhouse as rh
model_name = "hkunlp/instructor-large"
gpu = rh.cluster(name='rh-a10x', instance_type='A100:1')
hf = SelfHostedHuggingFaceInstructEmbeddings(
model_name=model_name, hardware=gpu)
Validators
raise_deprecation » all fields
set_verbose » verbose
field embed_instruction: str = 'Represent the document for retrieval: '#
Instruction to use for embedding documents.
field model_id: str = 'hkunlp/instructor-large'#
Model name to use.
field model_reqs: List[str] = ['./', 'InstructorEmbedding', 'torch']#
Requirements to install on hardware to inference the model.
field query_instruction: str = 'Represent the question for retrieving supporting documents: '#
Instruction to use for embedding query.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a HuggingFace instruct model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a HuggingFace instruct model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
langchain.embeddings.SentenceTransformerEmbeddings#
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
f9707e242160-19
|
Returns
Embeddings for the text.
langchain.embeddings.SentenceTransformerEmbeddings#
alias of langchain.embeddings.huggingface.HuggingFaceEmbeddings
pydantic model langchain.embeddings.TensorflowHubEmbeddings[source]#
Wrapper around tensorflow_hub embedding models.
To use, you should have the tensorflow_text python package installed.
Example
from langchain.embeddings import TensorflowHubEmbeddings
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
tf = TensorflowHubEmbeddings(model_url=url)
field model_url: str = 'https://tfhub.dev/google/universal-sentence-encoder-multilingual/3'#
Model name to use.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a TensorflowHub embedding model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a TensorflowHub embedding model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
previous
Chat Models
next
Indexes
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/reference/modules/embeddings.html
|
cefa9e8e82a4-0
|
.rst
.pdf
Docstore
Docstore#
Wrappers on top of docstores.
class langchain.docstore.InMemoryDocstore(_dict: Dict[str, langchain.schema.Document])[source]#
Simple in memory docstore in the form of a dict.
add(texts: Dict[str, langchain.schema.Document]) → None[source]#
Add texts to in memory dictionary.
search(search: str) → Union[str, langchain.schema.Document][source]#
Search via direct lookup.
class langchain.docstore.Wikipedia[source]#
Wrapper around wikipedia API.
search(search: str) → Union[str, langchain.schema.Document][source]#
Try to search for wiki page.
If page exists, return the page summary, and a PageWithLookups object.
If page does not exist, return similar entries.
previous
Indexes
next
Text Splitter
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/reference/modules/docstore.html
|
13345dd3c47d-0
|
.rst
.pdf
Retrievers
Retrievers#
pydantic model langchain.retrievers.ArxivRetriever[source]#
It is effectively a wrapper for ArxivAPIWrapper.
It wraps load() to get_relevant_documents().
It uses all ArxivAPIWrapper arguments without any change.
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
pydantic model langchain.retrievers.AzureCognitiveSearchRetriever[source]#
Wrapper around Azure Cognitive Search.
field aiosession: Optional[aiohttp.client.ClientSession] = None#
ClientSession, in case we want to reuse connection for better performance.
field api_key: str = ''#
API Key. Both Admin and Query keys work, but for reading data it’s
recommended to use a Query key.
field api_version: str = '2020-06-30'#
API version
field content_key: str = 'content'#
Key in a retrieved result to set as the Document page_content.
field index_name: str = ''#
Name of Index inside Azure Cognitive Search service
field service_name: str = ''#
Name of Azure Cognitive Search service
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
13345dd3c47d-1
|
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
pydantic model langchain.retrievers.ChatGPTPluginRetriever[source]#
field aiosession: Optional[aiohttp.client.ClientSession] = None#
field bearer_token: str [Required]#
field filter: Optional[dict] = None#
field top_k: int = 3#
field url: str [Required]#
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
pydantic model langchain.retrievers.ContextualCompressionRetriever[source]#
Retriever that wraps a base retriever and compresses the results.
field base_compressor: langchain.retrievers.document_compressors.base.BaseDocumentCompressor [Required]#
Compressor for compressing retrieved documents.
field base_retriever: langchain.schema.BaseRetriever [Required]#
Base Retriever to use for getting relevant documents.
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
13345dd3c47d-2
|
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
Sequence of relevant documents
class langchain.retrievers.DataberryRetriever(datastore_url: str, top_k: Optional[int] = None, api_key: Optional[str] = None)[source]#
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
api_key: Optional[str]#
datastore_url: str#
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
top_k: Optional[int]#
class langchain.retrievers.ElasticSearchBM25Retriever(client: Any, index_name: str)[source]#
Wrapper around Elasticsearch using BM25 as a retrieval method.
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the “Deployments” page.
To obtain your Elastic Cloud password for the default “elastic” user:
Log in to the Elastic Cloud console at https://cloud.elastic.co
Go to “Security” > “Users”
Locate the “elastic” user and click “Edit”
Click “Reset password”
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
13345dd3c47d-3
|
Locate the “elastic” user and click “Edit”
Click “Reset password”
Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
add_texts(texts: Iterable[str], refresh_indices: bool = True) → List[str][source]#
Run more texts through the embeddings and add to the retriver.
Parameters
texts – Iterable of strings to add to the retriever.
refresh_indices – bool to refresh ElasticSearch indices
Returns
List of ids from adding the texts into the retriever.
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
classmethod create(elasticsearch_url: str, index_name: str, k1: float = 2.0, b: float = 0.75) → langchain.retrievers.elastic_search_bm25.ElasticSearchBM25Retriever[source]#
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
pydantic model langchain.retrievers.KNNRetriever[source]#
field embeddings: langchain.embeddings.base.Embeddings [Required]#
field index: Any = None#
field k: int = 4#
field relevancy_threshold: Optional[float] = None#
field texts: List[str] [Required]#
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
13345dd3c47d-4
|
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
classmethod from_texts(texts: List[str], embeddings: langchain.embeddings.base.Embeddings, **kwargs: Any) → langchain.retrievers.knn.KNNRetriever[source]#
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
class langchain.retrievers.MetalRetriever(client: Any, params: Optional[dict] = None)[source]#
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
pydantic model langchain.retrievers.PineconeHybridSearchRetriever[source]#
field alpha: float = 0.5#
field embeddings: langchain.embeddings.base.Embeddings [Required]#
field index: Any = None#
field sparse_encoder: Any = None#
field top_k: int = 4#
add_texts(texts: List[str], ids: Optional[List[str]] = None, metadatas: Optional[List[dict]] = None) → None[source]#
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
13345dd3c47d-5
|
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
pydantic model langchain.retrievers.RemoteLangChainRetriever[source]#
field headers: Optional[dict] = None#
field input_key: str = 'message'#
field metadata_key: str = 'metadata'#
field page_content_key: str = 'page_content'#
field response_key: str = 'response'#
field url: str [Required]#
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
pydantic model langchain.retrievers.SVMRetriever[source]#
field embeddings: langchain.embeddings.base.Embeddings [Required]#
field index: Any = None#
field k: int = 4#
field relevancy_threshold: Optional[float] = None#
field texts: List[str] [Required]#
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
13345dd3c47d-6
|
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
classmethod from_texts(texts: List[str], embeddings: langchain.embeddings.base.Embeddings, **kwargs: Any) → langchain.retrievers.svm.SVMRetriever[source]#
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
pydantic model langchain.retrievers.SelfQueryRetriever[source]#
Retriever that wraps around a vector store and uses an LLM to generate
the vector store queries.
field llm_chain: langchain.chains.llm.LLMChain [Required]#
The LLMChain for generating the vector store queries.
field search_kwargs: dict [Optional]#
Keyword arguments to pass in to the vector store search.
field search_type: str = 'similarity'#
The search type to perform on the vector store.
field structured_query_translator: langchain.chains.query_constructor.ir.Visitor [Required]#
Translator for turning internal query language into vectorstore search params.
field vectorstore: langchain.vectorstores.base.VectorStore [Required]#
The underlying vector store from which documents will be retrieved.
field verbose: bool = False#
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
13345dd3c47d-7
|
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, vectorstore: langchain.vectorstores.base.VectorStore, document_contents: str, metadata_field_info: List[langchain.chains.query_constructor.schema.AttributeInfo], structured_query_translator: Optional[langchain.chains.query_constructor.ir.Visitor] = None, chain_kwargs: Optional[Dict] = None, enable_limit: bool = False, **kwargs: Any) → langchain.retrievers.self_query.base.SelfQueryRetriever[source]#
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
pydantic model langchain.retrievers.TFIDFRetriever[source]#
field docs: List[langchain.schema.Document] [Required]#
field k: int = 4#
field tfidf_array: Any = None#
field vectorizer: Any = None#
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
classmethod from_documents(documents: Iterable[langchain.schema.Document], *, tfidf_params: Optional[Dict[str, Any]] = None, **kwargs: Any) → langchain.retrievers.tfidf.TFIDFRetriever[source]#
classmethod from_texts(texts: Iterable[str], metadatas: Optional[Iterable[dict]] = None, tfidf_params: Optional[Dict[str, Any]] = None, **kwargs: Any) → langchain.retrievers.tfidf.TFIDFRetriever[source]#
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
13345dd3c47d-8
|
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
pydantic model langchain.retrievers.TimeWeightedVectorStoreRetriever[source]#
Retriever combining embedding similarity with recency.
field decay_rate: float = 0.01#
The exponential decay factor used as (1.0-decay_rate)**(hrs_passed).
field default_salience: Optional[float] = None#
The salience to assign memories not retrieved from the vector store.
None assigns no salience to documents not fetched from the vector store.
field k: int = 4#
The maximum number of documents to retrieve in a given call.
field memory_stream: List[langchain.schema.Document] [Optional]#
The memory_stream of documents to search through.
field other_score_keys: List[str] = []#
Other keys in the metadata to factor into the score, e.g. ‘importance’.
field search_kwargs: dict [Optional]#
Keyword arguments to pass to the vectorstore similarity search.
field vectorstore: langchain.vectorstores.base.VectorStore [Required]#
The vectorstore to store documents and determine salience.
async aadd_documents(documents: List[langchain.schema.Document], **kwargs: Any) → List[str][source]#
Add documents to vectorstore.
add_documents(documents: List[langchain.schema.Document], **kwargs: Any) → List[str][source]#
Add documents to vectorstore.
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Return documents that are relevant to the query.
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
13345dd3c47d-9
|
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Return documents that are relevant to the query.
get_salient_docs(query: str) → Dict[int, Tuple[langchain.schema.Document, float]][source]#
Return documents that are salient to the query.
class langchain.retrievers.VespaRetriever(app: Vespa, body: Dict, content_field: str, metadata_fields: Optional[Sequence[str]] = None)[source]#
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
classmethod from_params(url: str, content_field: str, *, k: Optional[int] = None, metadata_fields: Union[Sequence[str], Literal['*']] = (), sources: Optional[Union[Sequence[str], Literal['*']]] = None, _filter: Optional[str] = None, yql: Optional[str] = None, **kwargs: Any) → langchain.retrievers.vespa_retriever.VespaRetriever[source]#
Instantiate retriever from params.
Parameters
url (str) – Vespa app URL.
content_field (str) – Field in results to return as Document page_content.
k (Optional[int]) – Number of Documents to return. Defaults to None.
metadata_fields (Sequence[str] or "*") – Fields in results to include in
document metadata. Defaults to empty tuple ().
sources (Sequence[str] or "*" or None) – Sources to retrieve
from. Defaults to None.
_filter (Optional[str]) – Document filter condition expressed in YQL.
Defaults to None.
yql (Optional[str]) – Full YQL query to be used. Should not be specified
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
13345dd3c47d-10
|
yql (Optional[str]) – Full YQL query to be used. Should not be specified
if _filter or sources are specified. Defaults to None.
kwargs (Any) – Keyword arguments added to query body.
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
get_relevant_documents_with_filter(query: str, *, _filter: Optional[str] = None) → List[langchain.schema.Document][source]#
class langchain.retrievers.WeaviateHybridSearchRetriever(client: Any, index_name: str, text_key: str, alpha: float = 0.5, k: int = 4, attributes: Optional[List[str]] = None, create_schema_if_missing: bool = True)[source]#
class Config[source]#
Configuration for this pydantic object.
arbitrary_types_allowed = True#
extra = 'forbid'#
add_documents(docs: List[langchain.schema.Document], **kwargs: Any) → List[str][source]#
Upload documents to Weaviate.
async aget_relevant_documents(query: str, where_filter: Optional[Dict[str, object]] = None) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
get_relevant_documents(query: str, where_filter: Optional[Dict[str, object]] = None) → List[langchain.schema.Document][source]#
Look up similar documents in Weaviate.
pydantic model langchain.retrievers.WikipediaRetriever[source]#
It is effectively a wrapper for WikipediaAPIWrapper.
It wraps load() to get_relevant_documents().
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
13345dd3c47d-11
|
It wraps load() to get_relevant_documents().
It uses all WikipediaAPIWrapper arguments without any change.
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
class langchain.retrievers.ZepRetriever(session_id: str, url: str, top_k: Optional[int] = None)[source]#
A Retriever implementation for the Zep long-term memory store. Search your
user’s long-term chat history with Zep.
Note: You will need to provide the user’s session_id to use this retriever.
More on Zep:
Zep provides long-term conversation storage for LLM apps. The server stores,
summarizes, embeds, indexes, and enriches conversational AI chat
histories, and exposes them via simple, low-latency APIs.
For server installation instructions, see:
https://getzep.github.io/deployment/quickstart/
async aget_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
get_relevant_documents(query: str) → List[langchain.schema.Document][source]#
Get documents relevant for a query.
Parameters
query – string to find relevant documents for
Returns
List of relevant documents
previous
Vector Stores
next
Document Compressors
By Harrison Chase
© Copyright 2023, Harrison Chase.
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
13345dd3c47d-12
|
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023.
|
https://python.langchain.com/en/latest/reference/modules/retrievers.html
|
5641e0c3fcb0-0
|
.rst
.pdf
Vector Stores
Vector Stores#
Wrappers on top of vector stores.
class langchain.vectorstores.AnalyticDB(connection_string: str, embedding_function: langchain.embeddings.base.Embeddings, collection_name: str = 'langchain', collection_metadata: Optional[dict] = None, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None)[source]#
VectorStore implementation using AnalyticDB.
AnalyticDB is a distributed full PostgresSQL syntax cloud-native database.
- connection_string is a postgres connection string.
- embedding_function any embedding function implementing
langchain.embeddings.base.Embeddings interface.
collection_name is the name of the collection to use. (default: langchain)
NOTE: This is not the name of the table, but the name of the collection.The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
pre_delete_collection if True, will delete the collection if it exists.(default: False)
- Useful for testing.
add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]#
Run more texts through the embeddings and add to the vectorstore.
Parameters
texts – Iterable of strings to add to the vectorstore.
metadatas – Optional list of metadatas associated with the texts.
kwargs – vectorstore specific parameters
Returns
List of ids from adding the texts into the vectorstore.
connect() → sqlalchemy.engine.base.Connection[source]#
classmethod connection_string_from_db_params(driver: str, host: str, port: int, database: str, user: str, password: str) → str[source]#
Return connection string from database parameters.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
5641e0c3fcb0-1
|
Return connection string from database parameters.
create_collection() → None[source]#
create_tables_if_not_exists() → None[source]#
delete_collection() → None[source]#
drop_tables() → None[source]#
classmethod from_documents(documents: List[langchain.schema.Document], embedding: langchain.embeddings.base.Embeddings, collection_name: str = 'langchain', ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any) → langchain.vectorstores.analyticdb.AnalyticDB[source]#
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = 'langchain', ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any) → langchain.vectorstores.analyticdb.AnalyticDB[source]#
Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
get_collection(session: sqlalchemy.orm.session.Session) → Optional[langchain.vectorstores.analyticdb.CollectionStore][source]#
classmethod get_connection_string(kwargs: Dict[str, Any]) → str[source]#
similarity_search(query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[langchain.schema.Document][source]#
Run similarity search with AnalyticDB with distance.
Parameters
query (str) – Query text to search for.
k (int) – Number of results to return. Defaults to 4.
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
5641e0c3fcb0-2
|
k (int) – Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None.
Returns
List of Documents most similar to the query.
similarity_search_by_vector(embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[langchain.schema.Document][source]#
Return docs most similar to embedding vector.
Parameters
embedding – Embedding to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None.
Returns
List of Documents most similar to the query vector.
similarity_search_with_score(query: str, k: int = 4, filter: Optional[dict] = None) → List[Tuple[langchain.schema.Document, float]][source]#
Return docs most similar to query.
Parameters
query – Text to look up documents similar to.
k – Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None.
Returns
List of Documents most similar to the query and score for each
similarity_search_with_score_by_vector(embedding: List[float], k: int = 4, filter: Optional[dict] = None) → List[Tuple[langchain.schema.Document, float]][source]#
class langchain.vectorstores.Annoy(embedding_function: Callable, index: Any, metric: str, docstore: langchain.docstore.base.Docstore, index_to_docstore_id: Dict[int, str])[source]#
Wrapper around Annoy vector database.
To use, you should have the annoy python package installed.
Example
from langchain import Annoy
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
5641e0c3fcb0-3
|
Example
from langchain import Annoy
db = Annoy(embedding_function, index, docstore, index_to_docstore_id)
add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]#
Run more texts through the embeddings and add to the vectorstore.
Parameters
texts – Iterable of strings to add to the vectorstore.
metadatas – Optional list of metadatas associated with the texts.
kwargs – vectorstore specific parameters
Returns
List of ids from adding the texts into the vectorstore.
classmethod from_embeddings(text_embeddings: List[Tuple[str, List[float]]], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, metric: str = 'angular', trees: int = 100, n_jobs: int = - 1, **kwargs: Any) → langchain.vectorstores.annoy.Annoy[source]#
Construct Annoy wrapper from embeddings.
Parameters
text_embeddings – List of tuples of (text, embedding)
embedding – Embedding function to use.
metadatas – List of metadata dictionaries to associate with documents.
metric – Metric to use for indexing. Defaults to “angular”.
trees – Number of trees to use for indexing. Defaults to 100.
n_jobs – Number of jobs to use for indexing. Defaults to -1
This is a user friendly interface that:
Creates an in memory docstore with provided embeddings
Initializes the Annoy database
This is intended to be a quick way to get started.
Example
from langchain import Annoy
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
|
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.