id
stringlengths
14
16
text
stringlengths
29
2.73k
source
stringlengths
49
115
ea543b263b5c-0
.rst .pdf Python REPL Python REPL# For backwards compatibility. pydantic model langchain.python.PythonREPL[source]# Simulates a standalone Python REPL. field globals: Optional[Dict] [Optional] (alias '_globals')# field locals: Optional[Dict] [Optional] (alias '_locals')# run(command: str) → str[source]# Run command with own globals/locals and returns anything printed. By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/reference/modules/python.html
88c6d588e5c5-0
.rst .pdf Document Compressors Document Compressors# pydantic model langchain.retrievers.document_compressors.CohereRerank[source]# field client: Client [Required]# field model: str = 'rerank-english-v2.0'# field top_n: int = 3# async acompress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Compress retrieved documents given the query context. compress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Compress retrieved documents given the query context. pydantic model langchain.retrievers.document_compressors.DocumentCompressorPipeline[source]# Document compressor that uses a pipeline of transformers. field transformers: List[Union[langchain.schema.BaseDocumentTransformer, langchain.retrievers.document_compressors.base.BaseDocumentCompressor]] [Required]# List of document filters that are chained together and run in sequence. async acompress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Compress retrieved documents given the query context. compress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Transform a list of documents. pydantic model langchain.retrievers.document_compressors.EmbeddingsFilter[source]# field embeddings: langchain.embeddings.base.Embeddings [Required]# Embeddings to use for embedding document contents and queries. field k: Optional[int] = 20# The number of relevant documents to return. Can be set to None, in which case similarity_threshold must be specified. Defaults to 20.
https://python.langchain.com/en/latest/reference/modules/document_compressors.html
88c6d588e5c5-1
similarity_threshold must be specified. Defaults to 20. field similarity_fn: Callable = <function cosine_similarity># Similarity function for comparing documents. Function expected to take as input two matrices (List[List[float]]) and return a matrix of scores where higher values indicate greater similarity. field similarity_threshold: Optional[float] = None# Threshold for determining when two documents are similar enough to be considered redundant. Defaults to None, must be specified if k is set to None. async acompress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Filter down documents. compress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Filter documents based on similarity of their embeddings to the query. pydantic model langchain.retrievers.document_compressors.LLMChainExtractor[source]# field get_input: Callable[[str, langchain.schema.Document], dict] = <function default_get_input># Callable for constructing the chain input from the query and a Document. field llm_chain: langchain.chains.llm.LLMChain [Required]# LLM wrapper to use for compressing documents. async acompress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Compress page content of raw documents asynchronously. compress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Compress page content of raw documents.
https://python.langchain.com/en/latest/reference/modules/document_compressors.html
88c6d588e5c5-2
Compress page content of raw documents. classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, prompt: Optional[langchain.prompts.prompt.PromptTemplate] = None, get_input: Optional[Callable[[str, langchain.schema.Document], str]] = None, llm_chain_kwargs: Optional[dict] = None) → langchain.retrievers.document_compressors.chain_extract.LLMChainExtractor[source]# Initialize from LLM. pydantic model langchain.retrievers.document_compressors.LLMChainFilter[source]# Filter that drops documents that aren’t relevant to the query. field get_input: Callable[[str, langchain.schema.Document], dict] = <function default_get_input># Callable for constructing the chain input from the query and a Document. field llm_chain: langchain.chains.llm.LLMChain [Required]# LLM wrapper to use for filtering documents. The chain prompt is expected to have a BooleanOutputParser. async acompress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Filter down documents. compress_documents(documents: Sequence[langchain.schema.Document], query: str) → Sequence[langchain.schema.Document][source]# Filter down documents based on their relevance to the query. classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, prompt: Optional[langchain.prompts.base.BasePromptTemplate] = None, **kwargs: Any) → langchain.retrievers.document_compressors.chain_filter.LLMChainFilter[source]# previous Retrievers next Document Transformers By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/reference/modules/document_compressors.html
47e48e7f763f-0
.rst .pdf SearxNG Search Contents Quick Start Searching Engine Parameters Search Tips SearxNG Search# Utility for using SearxNG meta search API. SearxNG is a privacy-friendly free metasearch engine that aggregates results from multiple search engines and databases and supports the OpenSearch specification. More detailes on the installtion instructions here. For the search API refer to https://docs.searxng.org/dev/search_api.html Quick Start# In order to use this utility you need to provide the searx host. This can be done by passing the named parameter searx_host or exporting the environment variable SEARX_HOST. Note: this is the only required parameter. Then create a searx search instance like this: from langchain.utilities import SearxSearchWrapper # when the host starts with `http` SSL is disabled and the connection # is assumed to be on a private network searx_host='http://self.hosted' search = SearxSearchWrapper(searx_host=searx_host) You can now use the search instance to query the searx API. Searching# Use the run() and results() methods to query the searx API. Other methods are are available for convenience. SearxResults is a convenience wrapper around the raw json result. Example usage of the run method to make a search: s.run(query="what is the best search engine?") Engine Parameters# You can pass any accepted searx search API parameters to the SearxSearchWrapper instance. In the following example we are using the engines and the language parameters: # assuming the searx host is set as above or exported as an env variable
https://python.langchain.com/en/latest/reference/modules/searx_search.html
47e48e7f763f-1
# assuming the searx host is set as above or exported as an env variable s = SearxSearchWrapper(engines=['google', 'bing'], language='es') Search Tips# Searx offers a special search syntax that can also be used instead of passing engine parameters. For example the following query: s = SearxSearchWrapper("langchain library", engines=['github']) # can also be written as: s = SearxSearchWrapper("langchain library !github") # or even: s = SearxSearchWrapper("langchain library !gh") In some situations you might want to pass an extra string to the search query. For example when the run() method is called by an agent. The search suffix can also be used as a way to pass extra parameters to searx or the underlying search engines. # select the github engine and pass the search suffix s = SearchWrapper("langchain library", query_suffix="!gh") s = SearchWrapper("langchain library") # select github the conventional google search syntax s.run("large language models", query_suffix="site:github.com") NOTE: A search suffix can be defined on both the instance and the method level. The resulting query will be the concatenation of the two with the former taking precedence. See SearxNG Configured Engines and SearxNG Search Syntax for more details. Notes This wrapper is based on the SearxNG fork searxng/searxng which is better maintained than the original Searx project and offers more features. Public searxNG instances often use a rate limiter for API usage, so you might want to use a self hosted instance and disable the rate limiter.
https://python.langchain.com/en/latest/reference/modules/searx_search.html
47e48e7f763f-2
use a self hosted instance and disable the rate limiter. If you are self-hosting an instance you can customize the rate limiter for your own network as described here. For a list of public SearxNG instances see https://searx.space/ class langchain.utilities.searx_search.SearxResults(data: str)[source]# Dict like wrapper around search api results. property answers: Any# Helper accessor on the json result. pydantic model langchain.utilities.searx_search.SearxSearchWrapper[source]# Wrapper for Searx API. To use you need to provide the searx host by passing the named parameter searx_host or exporting the environment variable SEARX_HOST. In some situations you might want to disable SSL verification, for example if you are running searx locally. You can do this by passing the named parameter unsecure. You can also pass the host url scheme as http to disable SSL. Example from langchain.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://localhost:8888") Example with SSL disabled:from langchain.utilities import SearxSearchWrapper # note the unsecure parameter is not needed if you pass the url scheme as # http searx = SearxSearchWrapper(searx_host="http://localhost:8888", unsecure=True) Validators disable_ssl_warnings » unsecure validate_params » all fields field aiosession: Optional[Any] = None# field categories: Optional[List[str]] = []# field engines: Optional[List[str]] = []# field headers: Optional[dict] = None# field k: int = 10# field params: dict [Optional]# field query_suffix: Optional[str] = ''#
https://python.langchain.com/en/latest/reference/modules/searx_search.html
47e48e7f763f-3
field params: dict [Optional]# field query_suffix: Optional[str] = ''# field searx_host: str = ''# field unsecure: bool = False# async aresults(query: str, num_results: int, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → List[Dict][source]# Asynchronously query with json results. Uses aiohttp. See results for more info. async arun(query: str, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → str[source]# Asynchronously version of run. results(query: str, num_results: int, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → List[Dict][source]# Run query through Searx API and returns the results with metadata. Parameters query – The query to search for. query_suffix – Extra suffix appended to the query. num_results – Limit the number of results to return. engines – List of engines to use for the query. categories – List of categories to use for the query. **kwargs – extra parameters to pass to the searx API. Returns {snippet: The description of the result. title: The title of the result. link: The link to the result. engines: The engines used for the result. category: Searx category of the result. } Return type Dict with the following keys run(query: str, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → str[source]#
https://python.langchain.com/en/latest/reference/modules/searx_search.html
47e48e7f763f-4
Run query through Searx API and parse results. You can pass any other params to the searx query API. Parameters query – The query to search for. query_suffix – Extra suffix appended to the query. engines – List of engines to use for the query. categories – List of categories to use for the query. **kwargs – extra parameters to pass to the searx API. Returns The result of the query. Return type str Raises ValueError – If an error occured with the query. Example This will make a query to the qwant engine: from langchain.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://my.searx.host") searx.run("what is the weather in France ?", engine="qwant") # the same result can be achieved using the `!` syntax of searx # to select the engine using `query_suffix` searx.run("what is the weather in France ?", query_suffix="!qwant") Contents Quick Start Searching Engine Parameters Search Tips By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/reference/modules/searx_search.html
2aacb3b1aa1b-0
.rst .pdf Vector Stores Vector Stores# Wrappers on top of vector stores. class langchain.vectorstores.AnalyticDB(connection_string: str, embedding_function: langchain.embeddings.base.Embeddings, collection_name: str = 'langchain', collection_metadata: Optional[dict] = None, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None)[source]# VectorStore implementation using AnalyticDB. AnalyticDB is a distributed full PostgresSQL syntax cloud-native database. - connection_string is a postgres connection string. - embedding_function any embedding function implementing langchain.embeddings.base.Embeddings interface. collection_name is the name of the collection to use. (default: langchain) NOTE: This is not the name of the table, but the name of the collection.The tables will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. pre_delete_collection if True, will delete the collection if it exists.(default: False) - Useful for testing. add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. kwargs – vectorstore specific parameters Returns List of ids from adding the texts into the vectorstore. connect() → sqlalchemy.engine.base.Connection[source]# classmethod connection_string_from_db_params(driver: str, host: str, port: int, database: str, user: str, password: str) → str[source]# Return connection string from database parameters.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-1
Return connection string from database parameters. create_collection() → None[source]# create_tables_if_not_exists() → None[source]# delete_collection() → None[source]# drop_tables() → None[source]# classmethod from_documents(documents: List[langchain.schema.Document], embedding: langchain.embeddings.base.Embeddings, collection_name: str = 'langchain', ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any) → langchain.vectorstores.analyticdb.AnalyticDB[source]# Return VectorStore initialized from documents and embeddings. Postgres connection string is required Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = 'langchain', ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any) → langchain.vectorstores.analyticdb.AnalyticDB[source]# Return VectorStore initialized from texts and embeddings. Postgres connection string is required Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. get_collection(session: sqlalchemy.orm.session.Session) → Optional[CollectionStore][source]# classmethod get_connection_string(kwargs: Dict[str, Any]) → str[source]# similarity_search(query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Run similarity search with AnalyticDB with distance. Parameters query (str) – Query text to search for. k (int) – Number of results to return. Defaults to 4.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-2
k (int) – Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query. similarity_search_by_vector(embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query vector. similarity_search_with_score(query: str, k: int = 4, filter: Optional[dict] = None) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query and score for each similarity_search_with_score_by_vector(embedding: List[float], k: int = 4, filter: Optional[dict] = None) → List[Tuple[langchain.schema.Document, float]][source]# class langchain.vectorstores.Annoy(embedding_function: Callable, index: Any, metric: str, docstore: langchain.docstore.base.Docstore, index_to_docstore_id: Dict[int, str])[source]# Wrapper around Annoy vector database. To use, you should have the annoy python package installed. Example from langchain import Annoy
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-3
Example from langchain import Annoy db = Annoy(embedding_function, index, docstore, index_to_docstore_id) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. kwargs – vectorstore specific parameters Returns List of ids from adding the texts into the vectorstore. classmethod from_embeddings(text_embeddings: List[Tuple[str, List[float]]], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, metric: str = 'angular', trees: int = 100, n_jobs: int = - 1, **kwargs: Any) → langchain.vectorstores.annoy.Annoy[source]# Construct Annoy wrapper from embeddings. Parameters text_embeddings – List of tuples of (text, embedding) embedding – Embedding function to use. metadatas – List of metadata dictionaries to associate with documents. metric – Metric to use for indexing. Defaults to “angular”. trees – Number of trees to use for indexing. Defaults to 100. n_jobs – Number of jobs to use for indexing. Defaults to -1 This is a user friendly interface that: Creates an in memory docstore with provided embeddings Initializes the Annoy database This is intended to be a quick way to get started. Example from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings))
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-4
text_embedding_pairs = list(zip(texts, text_embeddings)) db = Annoy.from_embeddings(text_embedding_pairs, embeddings) classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, metric: str = 'angular', trees: int = 100, n_jobs: int = - 1, **kwargs: Any) → langchain.vectorstores.annoy.Annoy[source]# Construct Annoy wrapper from raw documents. Parameters texts – List of documents to index. embedding – Embedding function to use. metadatas – List of metadata dictionaries to associate with documents. metric – Metric to use for indexing. Defaults to “angular”. trees – Number of trees to use for indexing. Defaults to 100. n_jobs – Number of jobs to use for indexing. Defaults to -1. This is a user friendly interface that: Embeds documents. Creates an in memory docstore Initializes the Annoy database This is intended to be a quick way to get started. Example from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = Annoy.from_texts(texts, embeddings) classmethod load_local(folder_path: str, embeddings: langchain.embeddings.base.Embeddings) → langchain.vectorstores.annoy.Annoy[source]# Load Annoy index, docstore, and index_to_docstore_id to disk. Parameters folder_path – folder path to load index, docstore, and index_to_docstore_id from. embeddings – Embeddings to use when generating queries.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-5
and index_to_docstore_id from. embeddings – Embeddings to use when generating queries. max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters embedding – Embedding to look up documents similar to. fetch_k – Number of Documents to fetch to pass to MMR algorithm. k – Number of Documents to return. Defaults to 4. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-6
Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. process_index_results(idxs: List[int], dists: List[float]) → List[Tuple[langchain.schema.Document, float]][source]# Turns annoy results into a list of documents and scores. Parameters idxs – List of indices of the documents in the index. dists – List of distances of the documents in the index. Returns List of Documents and scores. save_local(folder_path: str, prefault: bool = False) → None[source]# Save Annoy index, docstore, and index_to_docstore_id to disk. Parameters folder_path – folder path to save index, docstore, and index_to_docstore_id to. prefault – Whether to pre-load the index into memory. similarity_search(query: str, k: int = 4, search_k: int = - 1, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the query. similarity_search_by_index(docstore_index: int, k: int = 4, search_k: int = - 1, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to docstore_index. Parameters docstore_index – Index of document in docstore k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the embedding.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-7
to n_trees * n if not provided Returns List of Documents most similar to the embedding. similarity_search_by_vector(embedding: List[float], k: int = 4, search_k: int = - 1, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the embedding. similarity_search_with_score(query: str, k: int = 4, search_k: int = - 1) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the query and score for each similarity_search_with_score_by_index(docstore_index: int, k: int = 4, search_k: int = - 1) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the query and score for each
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-8
Returns List of Documents most similar to the query and score for each similarity_search_with_score_by_vector(embedding: List[float], k: int = 4, search_k: int = - 1) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the query and score for each class langchain.vectorstores.AtlasDB(name: str, embedding_function: Optional[langchain.embeddings.base.Embeddings] = None, api_key: Optional[str] = None, description: str = 'A description for your project', is_public: bool = True, reset_project_if_exists: bool = False)[source]# Wrapper around Atlas: Nomic’s neural database and rhizomatic instrument. To use, you should have the nomic python package installed. Example from langchain.vectorstores import AtlasDB from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = AtlasDB("my_project", embeddings.embed_query) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, refresh: bool = True, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts (Iterable[str]) – Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional) – Optional list of metadatas. ids (Optional[List[str]]) – An optional list of ids.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-9
ids (Optional[List[str]]) – An optional list of ids. refresh (bool) – Whether or not to refresh indices with the updated data. Default True. Returns List of IDs of the added texts. Return type List[str] create_index(**kwargs: Any) → Any[source]# Creates an index in your project. See https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index for full detail. classmethod from_documents(documents: List[langchain.schema.Document], embedding: Optional[langchain.embeddings.base.Embeddings] = None, ids: Optional[List[str]] = None, name: Optional[str] = None, api_key: Optional[str] = None, persist_directory: Optional[str] = None, description: str = 'A description for your project', is_public: bool = True, reset_project_if_exists: bool = False, index_kwargs: Optional[dict] = None, **kwargs: Any) → langchain.vectorstores.atlas.AtlasDB[source]# Create an AtlasDB vectorstore from a list of documents. Parameters name (str) – Name of the collection to create. api_key (str) – Your nomic API key, documents (List[Document]) – List of documents to add to the vectorstore. embedding (Optional[Embeddings]) – Embedding function. Defaults to None. ids (Optional[List[str]]) – Optional list of document IDs. If None, ids will be auto created description (str) – A description for your project. is_public (bool) – Whether your project is publicly accessible. True by default. reset_project_if_exists (bool) – Whether to reset this project if it already exists. Default False. Generally userful during development and testing. index_kwargs (Optional[dict]) – Dict of kwargs for index creation.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-10
index_kwargs (Optional[dict]) – Dict of kwargs for index creation. See https://docs.nomic.ai/atlas_api.html Returns Nomic’s neural database and finest rhizomatic instrument Return type AtlasDB classmethod from_texts(texts: List[str], embedding: Optional[langchain.embeddings.base.Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, name: Optional[str] = None, api_key: Optional[str] = None, description: str = 'A description for your project', is_public: bool = True, reset_project_if_exists: bool = False, index_kwargs: Optional[dict] = None, **kwargs: Any) → langchain.vectorstores.atlas.AtlasDB[source]# Create an AtlasDB vectorstore from a raw documents. Parameters texts (List[str]) – The list of texts to ingest. name (str) – Name of the project to create. api_key (str) – Your nomic API key, embedding (Optional[Embeddings]) – Embedding function. Defaults to None. metadatas (Optional[List[dict]]) – List of metadatas. Defaults to None. ids (Optional[List[str]]) – Optional list of document IDs. If None, ids will be auto created description (str) – A description for your project. is_public (bool) – Whether your project is publicly accessible. True by default. reset_project_if_exists (bool) – Whether to reset this project if it already exists. Default False. Generally userful during development and testing. index_kwargs (Optional[dict]) – Dict of kwargs for index creation. See https://docs.nomic.ai/atlas_api.html Returns Nomic’s neural database and finest rhizomatic instrument Return type AtlasDB
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-11
Returns Nomic’s neural database and finest rhizomatic instrument Return type AtlasDB similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Run similarity search with AtlasDB Parameters query (str) – Query text to search for. k (int) – Number of results to return. Defaults to 4. Returns List of documents most similar to the query text. Return type List[Document] class langchain.vectorstores.Chroma(collection_name: str = 'langchain', embedding_function: Optional[Embeddings] = None, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, collection_metadata: Optional[Dict] = None, client: Optional[chromadb.Client] = None)[source]# Wrapper around ChromaDB embeddings platform. To use, you should have the chromadb python package installed. Example from langchain.vectorstores import Chroma from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = Chroma("langchain_store", embeddings.embed_query) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts (Iterable[str]) – Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional) – Optional list of metadatas. ids (Optional[List[str]], optional) – Optional list of IDs. Returns List of IDs of the added texts. Return type List[str] delete_collection() → None[source]# Delete the collection.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-12
Return type List[str] delete_collection() → None[source]# Delete the collection. classmethod from_documents(documents: List[Document], embedding: Optional[Embeddings] = None, ids: Optional[List[str]] = None, collection_name: str = 'langchain', persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, client: Optional[chromadb.Client] = None, **kwargs: Any) → Chroma[source]# Create a Chroma vectorstore from a list of documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Parameters collection_name (str) – Name of the collection to create. persist_directory (Optional[str]) – Directory to persist the collection. ids (Optional[List[str]]) – List of document IDs. Defaults to None. documents (List[Document]) – List of documents to add to the vectorstore. embedding (Optional[Embeddings]) – Embedding function. Defaults to None. client_settings (Optional[chromadb.config.Settings]) – Chroma client settings Returns Chroma vectorstore. Return type Chroma classmethod from_texts(texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = 'langchain', persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, client: Optional[chromadb.Client] = None, **kwargs: Any) → Chroma[source]# Create a Chroma vectorstore from a raw documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Parameters
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-13
Otherwise, the data will be ephemeral in-memory. Parameters texts (List[str]) – List of texts to add to the collection. collection_name (str) – Name of the collection to create. persist_directory (Optional[str]) – Directory to persist the collection. embedding (Optional[Embeddings]) – Embedding function. Defaults to None. metadatas (Optional[List[dict]]) – List of metadatas. Defaults to None. ids (Optional[List[str]]) – List of document IDs. Defaults to None. client_settings (Optional[chromadb.config.Settings]) – Chroma client settings Returns Chroma vectorstore. Return type Chroma get() → langchain.vectorstores.chroma.Chroma[source]# Gets the collection max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param query: Text to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. :param fetch_k: Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Parameters filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents selected by maximal marginal relevance.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-14
Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param embedding: Embedding to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. :param fetch_k: Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Parameters filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents selected by maximal marginal relevance. persist() → None[source]# Persist the collection. This can be used to explicitly persist the data to disk. It will also be called automatically when the object is destroyed. similarity_search(query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Run similarity search with Chroma. Parameters query (str) – Query text to search for. k (int) – Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of documents most similar to the query text. Return type List[Document]
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-15
Returns List of documents most similar to the query text. Return type List[Document] similarity_search_by_vector(embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. :param embedding: Embedding to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query vector. similarity_search_with_score(query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Run similarity search with Chroma with distance. Parameters query (str) – Query text to search for. k (int) – Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of documents most similar to the querytext with distance in float. Return type List[Tuple[Document, float]] update_document(document_id: str, document: langchain.schema.Document) → None[source]# Update a document in the collection. Parameters document_id (str) – ID of the document to update. document (Document) – Document to update. class langchain.vectorstores.DeepLake(dataset_path: str = './deeplake/', token: Optional[str] = None, embedding_function: Optional[langchain.embeddings.base.Embeddings] = None, read_only: Optional[bool] = False, ingestion_batch_size: int = 1024, num_workers: int = 0, **kwargs: Any)[source]#
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-16
Wrapper around Deep Lake, a data lake for deep learning applications. We implement naive similarity search and filtering for fast prototyping, but it can be extended with Tensor Query Language (TQL) for production use cases over billion rows. Why Deep Lake? Not only stores embeddings, but also the original data with version control. Serverless, doesn’t require another service and can be used with majorcloud providers (S3, GCS, etc.) More than just a multi-modal vector store. You can use the datasetto fine-tune your own LLM models. To use, you should have the deeplake python package installed. Example from langchain.vectorstores import DeepLake from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = DeepLake("langchain_store", embeddings.embed_query) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts (Iterable[str]) – Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional) – Optional list of metadatas. ids (Optional[List[str]], optional) – Optional list of IDs. Returns List of IDs of the added texts. Return type List[str] delete(ids: Any[List[str], None] = None, filter: Any[Dict[str, str], None] = None, delete_all: Any[bool, None] = None) → bool[source]# Delete the entities in the dataset Parameters ids (Optional[List[str]], optional) – The document_ids to delete. Defaults to None.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-17
ids (Optional[List[str]], optional) – The document_ids to delete. Defaults to None. filter (Optional[Dict[str, str]], optional) – The filter to delete by. Defaults to None. delete_all (Optional[bool], optional) – Whether to drop the dataset. Defaults to None. delete_dataset() → None[source]# Delete the collection. classmethod force_delete_by_path(path: str) → None[source]# Force delete dataset by path classmethod from_texts(texts: List[str], embedding: Optional[langchain.embeddings.base.Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, dataset_path: str = './deeplake/', **kwargs: Any) → langchain.vectorstores.deeplake.DeepLake[source]# Create a Deep Lake dataset from a raw documents. If a dataset_path is specified, the dataset will be persisted in that location, otherwise by default at ./deeplake Parameters path (str, pathlib.Path) – The full path to the dataset. Can be: Deep Lake cloud path of the form hub://username/dataset_name.To write to Deep Lake cloud datasets, ensure that you are logged in to Deep Lake (use ‘activeloop login’ from command line) AWS S3 path of the form s3://bucketname/path/to/dataset.Credentials are required in either the environment Google Cloud Storage path of the form``gcs://bucketname/path/to/dataset``Credentials are required in either the environment Local file system path of the form ./path/to/dataset or~/path/to/dataset or path/to/dataset. In-memory path of the form mem://path/to/dataset which doesn’tsave the dataset, but keeps it in memory instead. Should be used only for testing as it does not persist.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-18
Should be used only for testing as it does not persist. documents (List[Document]) – List of documents to add. embedding (Optional[Embeddings]) – Embedding function. Defaults to None. metadatas (Optional[List[dict]]) – List of metadatas. Defaults to None. ids (Optional[List[str]]) – List of document IDs. Defaults to None. Returns Deep Lake dataset. Return type DeepLake max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param query: Text to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. :param fetch_k: Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param embedding: Embedding to look up documents similar to. :param k: Number of Documents to return. Defaults to 4.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-19
:param k: Number of Documents to return. Defaults to 4. :param fetch_k: Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. persist() → None[source]# Persist the collection. similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – text to embed and run the query on. k – Number of Documents to return. Defaults to 4. query – Text to look up documents similar to. embedding – Embedding function to use. Defaults to None. k – Number of Documents to return. Defaults to 4. distance_metric – L2 for Euclidean, L1 for Nuclear, max L-infinity distance, cos for cosine similarity, ‘dot’ for dot product Defaults to L2. filter – Attribute filter by metadata example {‘key’: ‘value’}. Defaults to None. maximal_marginal_relevance – Whether to use maximal marginal relevance. Defaults to False. fetch_k – Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. return_score – Whether to return the score. Defaults to False. Returns List of Documents most similar to the query vector. similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-20
Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query vector. similarity_search_with_score(query: str, distance_metric: str = 'L2', k: int = 4, filter: Optional[Dict[str, str]] = None) → List[Tuple[langchain.schema.Document, float]][source]# Run similarity search with Deep Lake with distance returned. Parameters query (str) – Query text to search for. distance_metric – L2 for Euclidean, L1 for Nuclear, max L-infinity distance, cos for cosine similarity, ‘dot’ for dot product. Defaults to L2. k (int) – Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of documents most similar to the querytext with distance in float. Return type List[Tuple[Document, float]] class langchain.vectorstores.ElasticVectorSearch(elasticsearch_url: str, index_name: str, embedding: langchain.embeddings.base.Embeddings)[source]# Wrapper around Elasticsearch as a vector database. To connect to an Elasticsearch instance that does not require login credentials, pass the Elasticsearch URL and index name along with the embedding object to the constructor. Example from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch( elasticsearch_url="http://localhost:9200", index_name="test_index", embedding=embedding ) To connect to an Elasticsearch instance that requires login credentials, including Elastic Cloud, use the Elasticsearch URL format
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-21
including Elastic Cloud, use the Elasticsearch URL format https://username:password@es_host:9243. For example, to connect to Elastic Cloud, create the Elasticsearch URL with the required authentication details and pass it to the ElasticVectorSearch constructor as the named parameter elasticsearch_url. You can obtain your Elastic Cloud URL and login credentials by logging in to the Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and navigating to the “Deployments” page. To obtain your Elastic Cloud password for the default “elastic” user: Log in to the Elastic Cloud console at https://cloud.elastic.co Go to “Security” > “Users” Locate the “elastic” user and click “Edit” Click “Reset password” Follow the prompts to reset the password The format for Elastic Cloud URLs is https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243. Example from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_host = "cluster_id.region_id.gcp.cloud.es.io" elasticsearch_url = f"https://username:password@{elastic_host}:9243" elastic_vector_search = ElasticVectorSearch( elasticsearch_url=elasticsearch_url, index_name="test_index", embedding=embedding ) Parameters elasticsearch_url (str) – The URL for the Elasticsearch instance. index_name (str) – The name of the Elasticsearch index for the embeddings. embedding (Embeddings) – An object that provides the ability to embed text. It should be an instance of a class that subclasses the Embeddings abstract base class, such as OpenAIEmbeddings() Raises ValueError – If the elasticsearch python package is not installed.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-22
Raises ValueError – If the elasticsearch python package is not installed. add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, refresh_indices: bool = True, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. refresh_indices – bool to refresh ElasticSearch indices Returns List of ids from adding the texts into the vectorstore. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.elastic_vector_search.ElasticVectorSearch[source]# Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: Embeds documents. Creates a new index for the embeddings in the Elasticsearch instance. Adds the documents to the newly created Elasticsearch index. This is intended to be a quick way to get started. Example from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch.from_texts( texts, embeddings, elasticsearch_url="http://localhost:9200" ) similarity_search(query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-23
Returns List of Documents most similar to the query. similarity_search_with_score(query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. :param query: Text to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query. class langchain.vectorstores.FAISS(embedding_function: typing.Callable, index: typing.Any, docstore: langchain.docstore.base.Docstore, index_to_docstore_id: typing.Dict[int, str], relevance_score_fn: typing.Optional[typing.Callable[[float], float]] = <function _default_relevance_score_fn>)[source]# Wrapper around FAISS vector database. To use, you should have the faiss python package installed. Example from langchain import FAISS faiss = FAISS(embedding_function, index, docstore, index_to_docstore_id) add_embeddings(text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters text_embeddings – Iterable pairs of string and embedding to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. Returns List of ids from adding the texts into the vectorstore. add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-24
Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. Returns List of ids from adding the texts into the vectorstore. classmethod from_embeddings(text_embeddings: List[Tuple[str, List[float]]], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.faiss.FAISS[source]# Construct FAISS wrapper from raw documents. This is a user friendly interface that: Embeds documents. Creates an in memory docstore Initializes the FAISS database This is intended to be a quick way to get started. Example from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = FAISS.from_embeddings(text_embedding_pairs, embeddings) classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.faiss.FAISS[source]# Construct FAISS wrapper from raw documents. This is a user friendly interface that: Embeds documents. Creates an in memory docstore Initializes the FAISS database This is intended to be a quick way to get started. Example from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() faiss = FAISS.from_texts(texts, embeddings) classmethod load_local(folder_path: str, embeddings: langchain.embeddings.base.Embeddings, index_name: str = 'index') → langchain.vectorstores.faiss.FAISS[source]#
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-25
Load FAISS index, docstore, and index_to_docstore_id to disk. Parameters folder_path – folder path to load index, docstore, and index_to_docstore_id from. embeddings – Embeddings to use when generating queries index_name – for saving with a specific index file name max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-26
of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. merge_from(target: langchain.vectorstores.faiss.FAISS) → None[source]# Merge another FAISS object with the current one. Add the target FAISS to the current one. Parameters target – FAISS object you wish to merge into the current one Returns None. save_local(folder_path: str, index_name: str = 'index') → None[source]# Save FAISS index, docstore, and index_to_docstore_id to disk. Parameters folder_path – folder path to save index, docstore, and index_to_docstore_id to. index_name – for saving with a specific index file name similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query. similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the embedding. similarity_search_with_score(query: str, k: int = 4) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-27
Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query and score for each similarity_search_with_score_by_vector(embedding: List[float], k: int = 4) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query and score for each class langchain.vectorstores.LanceDB(connection: Any, embedding: langchain.embeddings.base.Embeddings, vector_key: Optional[str] = 'vector', id_key: Optional[str] = 'id', text_key: Optional[str] = 'text')[source]# Wrapper around LanceDB vector database. To use, you should have lancedb python package installed. Example db = lancedb.connect('./lancedb') table = db.open_table('my_table') vectorstore = LanceDB(table, embedding_function) vectorstore.add_texts(['text1', 'text2']) result = vectorstore.similarity_search('text1') add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Turn texts into embedding and add it to the database Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. ids – Optional list of ids to associate with the texts. Returns List of ids of the added texts.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-28
Returns List of ids of the added texts. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, connection: Optional[Any] = None, vector_key: Optional[str] = 'vector', id_key: Optional[str] = 'id', text_key: Optional[str] = 'text', **kwargs: Any) → langchain.vectorstores.lancedb.LanceDB[source]# Return VectorStore initialized from texts and embeddings. similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return documents most similar to the query Parameters query – String to query the vectorstore with. k – Number of documents to return. Returns List of documents most similar to the query. class langchain.vectorstores.Milvus(embedding_function: Embeddings, collection_name: str = 'LangChainCollection', connection_args: Optional[dict[str, Any]] = None, consistency_level: str = 'Session', index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: Optional[bool] = False)[source]# Wrapper around the Milvus vector database. add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, timeout: Optional[int] = None, batch_size: int = 1000, **kwargs: Any) → List[str][source]# Insert text data into Milvus. Inserting data when the collection has not be made yet will result in creating a new Collection. The data of the first entity decides the schema of the new collection, the dim is extracted from the first embedding and the columns are decided by the first metadata dict.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-29
embedding and the columns are decided by the first metadata dict. Metada keys will need to be present for all inserted values. At the moment there is no None equivalent in Milvus. Parameters texts (Iterable[str]) – The texts to embed, it is assumed that they all fit in memory. metadatas (Optional[List[dict]]) – Metadata dicts attached to each of the texts. Defaults to None. timeout (Optional[int]) – Timeout for each batch insert. Defaults to None. batch_size (int, optional) – Batch size to use for insertion. Defaults to 1000. Raises MilvusException – Failure to add texts Returns The resulting keys for each inserted element. Return type List[str] classmethod from_texts(texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = 'LangChainCollection', connection_args: dict[str, Any] = {'host': 'localhost', 'password': '', 'port': '19530', 'secure': False, 'user': ''}, consistency_level: str = 'Session', index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any) → Milvus[source]# Create a Milvus collection, indexes it with HNSW, and insert data. Parameters texts (List[str]) – Text data. embedding (Embeddings) – Embedding function. metadatas (Optional[List[dict]]) – Metadata for each text if it exists. Defaults to None. collection_name (str, optional) – Collection name to use. Defaults to “LangChainCollection”. connection_args (dict[str, Any], optional) – Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-30
to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional) – Which consistency level to use. Defaults to “Session”. index_params (Optional[dict], optional) – Which index_params to use. Defaults to None. search_params (Optional[dict], optional) – Which search params to use. Defaults to None. drop_old (Optional[bool], optional) – Whether to drop the collection with that name if it exists. Defaults to False. Returns Milvus Vector Store Return type Milvus max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a search and return results that are reordered by MMR. Parameters query (str) – The text being searched. k (int, optional) – How many results to give. Defaults to 4. fetch_k (int, optional) – Total results to select k from. Defaults to 20. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional) – The search params for the specified index. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Document results for search. Return type List[Document]
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-31
Returns Document results for search. Return type List[Document] max_marginal_relevance_search_by_vector(embedding: list[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[Document][source]# Perform a search and return results that are reordered by MMR. Parameters embedding (str) – The embedding vector being searched. k (int, optional) – How many results to give. Defaults to 4. fetch_k (int, optional) – Total results to select k from. Defaults to 20. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional) – The search params for the specified index. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Document results for search. Return type List[Document] similarity_search(query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a similarity search against the query string. Parameters query (str) – The text to search. k (int, optional) – How many results to return. Defaults to 4.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-32
k (int, optional) – How many results to return. Defaults to 4. param (dict, optional) – The search params for the index type. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Document results for search. Return type List[Document] similarity_search_by_vector(embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a similarity search against the query string. Parameters embedding (List[float]) – The embedding vector to search. k (int, optional) – How many results to return. Defaults to 4. param (dict, optional) – The search params for the index type. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Document results for search. Return type List[Document] similarity_search_with_score(query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here:
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-33
documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Parameters query (str) – The text being searched. k (int, optional) – The amount of results ot return. Defaults to 4. param (dict) – The search params for the specified index. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Return type List[float], List[Tuple[Document, any, any]] similarity_search_with_score_by_vector(embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Parameters embedding (List[float]) – The embedding vector being searched. k (int, optional) – The amount of results ot return. Defaults to 4. param (dict) – The search params for the specified index. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Result doc and score. Return type List[Tuple[Document, float]]
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-34
Returns Result doc and score. Return type List[Tuple[Document, float]] class langchain.vectorstores.MyScale(embedding: langchain.embeddings.base.Embeddings, config: Optional[langchain.vectorstores.myscale.MyScaleSettings] = None, **kwargs: Any)[source]# Wrapper around MyScale vector database You need a clickhouse-connect python package, and a valid account to connect to MyScale. MyScale can not only search with simple vector indexes, it also supports complex query with multiple conditions, constraints and even sub-queries. For more information, please visit[myscale official site](https://docs.myscale.com/en/overview/) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, batch_size: int = 32, ids: Optional[Iterable[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. ids – Optional list of ids to associate with the texts. batch_size – Batch size of insertion metadata – Optional column data to be inserted Returns List of ids from adding the texts into the vectorstore. drop() → None[source]# Helper function: Drop data escape_str(value: str) → str[source]# classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, config: Optional[langchain.vectorstores.myscale.MyScaleSettings] = None, text_ids: Optional[Iterable[str]] = None, batch_size: int = 32, **kwargs: Any) → langchain.vectorstores.myscale.MyScale[source]# Create Myscale wrapper with existing texts Parameters
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-35
Create Myscale wrapper with existing texts Parameters embedding_function (Embeddings) – Function to extract text embedding texts (Iterable[str]) – List or tuple of strings to be added config (MyScaleSettings, Optional) – Myscale configuration text_ids (Optional[Iterable], optional) – IDs for the texts. Defaults to None. batch_size (int, optional) – Batchsize when transmitting data to MyScale. Defaults to 32. metadata (List[dict], optional) – metadata to texts. Defaults to None. into (Other keyword arguments will pass) – [clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api) Returns MyScale Index property metadata_column: str# similarity_search(query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a similarity search with MyScale Parameters query (str) – query string k (int, optional) – Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional) – where condition string. Defaults to None. NOTE – Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use {self.metadata_column}.attribute instead of attribute alone. The default name for it is metadata. Returns List of Documents Return type List[Document] similarity_search_by_vector(embedding: List[float], k: int = 4, where_str: Optional[str] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a similarity search with MyScale by vectors Parameters query (str) – query string
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-36
Perform a similarity search with MyScale by vectors Parameters query (str) – query string k (int, optional) – Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional) – where condition string. Defaults to None. NOTE – Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use {self.metadata_column}.attribute instead of attribute alone. The default name for it is metadata. Returns List of (Document, similarity) Return type List[Document] similarity_search_with_relevance_scores(query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Perform a similarity search with MyScale Parameters query (str) – query string k (int, optional) – Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional) – where condition string. Defaults to None. NOTE – Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use {self.metadata_column}.attribute instead of attribute alone. The default name for it is metadata. Returns List of documents Return type List[Document] pydantic settings langchain.vectorstores.MyScaleSettings[source]# MyScale Client Configuration Attribute: myscale_host (str)An URL to connect to MyScale backend.Defaults to ‘localhost’. myscale_port (int) : URL port to connect with HTTP. Defaults to 8443. username (str) : Usernamed to login. Defaults to None. password (str) : Password to login. Defaults to None.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-37
password (str) : Password to login. Defaults to None. index_type (str): index type string. index_param (dict): index build parameter. database (str) : Database name to find the table. Defaults to ‘default’. table (str) : Table name to operate on. Defaults to ‘vector_table’. metric (str)Metric to compute distance,supported are (‘l2’, ‘cosine’, ‘ip’). Defaults to ‘cosine’. column_map (Dict)Column type map to project column name onto langchainsemantics. Must have keys: text, id, vector, must be same size to number of columns. For example: .. code-block:: python { ‘id’: ‘text_id’, ‘vector’: ‘text_embedding’, ‘text’: ‘text_plain’, ‘metadata’: ‘metadata_dictionary_in_json’, } Defaults to identity map. Show JSON schema{ "title": "MyScaleSettings",
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-38
Defaults to identity map. Show JSON schema{ "title": "MyScaleSettings", "description": "MyScale Client Configuration\n\nAttribute:\n myscale_host (str) : An URL to connect to MyScale backend.\n Defaults to 'localhost'.\n myscale_port (int) : URL port to connect with HTTP. Defaults to 8443.\n username (str) : Usernamed to login. Defaults to None.\n password (str) : Password to login. Defaults to None.\n index_type (str): index type string.\n index_param (dict): index build parameter.\n database (str) : Database name to find the table. Defaults to 'default'.\n table (str) : Table name to operate on.\n Defaults to 'vector_table'.\n metric (str) : Metric to compute distance,\n supported are ('l2', 'cosine', 'ip'). Defaults to 'cosine'.\n column_map (Dict) : Column type map to project column name onto langchain\n semantics. Must have keys: `text`, `id`, `vector`,\n must be same size to number of columns. For example:\n .. code-block:: python\n {\n 'id': 'text_id',\n 'vector': 'text_embedding',\n 'text': 'text_plain',\n 'metadata': 'metadata_dictionary_in_json',\n }\n\n Defaults to identity map.", "type": "object", "properties": { "host": { "title": "Host", "default": "localhost", "env_names": "{'myscale_host'}", "type": "string" }, "port": { "title": "Port",
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-39
}, "port": { "title": "Port", "default": 8443, "env_names": "{'myscale_port'}", "type": "integer" }, "username": { "title": "Username", "env_names": "{'myscale_username'}", "type": "string" }, "password": { "title": "Password", "env_names": "{'myscale_password'}", "type": "string" }, "index_type": { "title": "Index Type", "default": "IVFFLAT", "env_names": "{'myscale_index_type'}", "type": "string" }, "index_param": { "title": "Index Param", "env_names": "{'myscale_index_param'}", "type": "object", "additionalProperties": { "type": "string" } }, "column_map": { "title": "Column Map", "default": { "id": "id", "text": "text", "vector": "vector", "metadata": "metadata" }, "env_names": "{'myscale_column_map'}", "type": "object", "additionalProperties": { "type": "string" } }, "database": { "title": "Database", "default": "default", "env_names": "{'myscale_database'}", "type": "string" }, "table": { "title": "Table",
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-40
}, "table": { "title": "Table", "default": "langchain", "env_names": "{'myscale_table'}", "type": "string" }, "metric": { "title": "Metric", "default": "cosine", "env_names": "{'myscale_metric'}", "type": "string" } }, "additionalProperties": false } Config env_file: str = .env env_file_encoding: str = utf-8 env_prefix: str = myscale_ Fields column_map (Dict[str, str]) database (str) host (str) index_param (Optional[Dict[str, str]]) index_type (str) metric (str) password (Optional[str]) port (int) table (str) username (Optional[str]) field column_map: Dict[str, str] = {'id': 'id', 'metadata': 'metadata', 'text': 'text', 'vector': 'vector'}# field database: str = 'default'# field host: str = 'localhost'# field index_param: Optional[Dict[str, str]] = None# field index_type: str = 'IVFFLAT'# field metric: str = 'cosine'# field password: Optional[str] = None# field port: int = 8443# field table: str = 'langchain'# field username: Optional[str] = None# class langchain.vectorstores.OpenSearchVectorSearch(opensearch_url: str, index_name: str, embedding_function: langchain.embeddings.base.Embeddings, **kwargs: Any)[source]# Wrapper around OpenSearch as a vector database. Example from langchain import OpenSearchVectorSearch
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-41
Wrapper around OpenSearch as a vector database. Example from langchain import OpenSearchVectorSearch opensearch_vector_search = OpenSearchVectorSearch( "http://localhost:9200", "embeddings", embedding_function ) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, bulk_size: int = 500, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. bulk_size – Bulk API request count; Default: 500 Returns List of ids from adding the texts into the vectorstore. Optional Args:vector_field: Document field embeddings are stored in. Defaults to “vector_field”. text_field: Document field the text of the document is stored in. Defaults to “text”. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, bulk_size: int = 500, **kwargs: Any) → langchain.vectorstores.opensearch_vector_search.OpenSearchVectorSearch[source]# Construct OpenSearchVectorSearch wrapper from raw documents. Example from langchain import OpenSearchVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() opensearch_vector_search = OpenSearchVectorSearch.from_texts( texts, embeddings, opensearch_url="http://localhost:9200" ) OpenSearch by default supports Approximate Search powered by nmslib, faiss and lucene engines recommended for large datasets. Also supports brute force search through Script Scoring and Painless Scripting.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-42
search through Script Scoring and Painless Scripting. Optional Args:vector_field: Document field embeddings are stored in. Defaults to “vector_field”. text_field: Document field the text of the document is stored in. Defaults to “text”. Optional Keyword Args for Approximate Search:engine: “nmslib”, “faiss”, “lucene”; default: “nmslib” space_type: “l2”, “l1”, “cosinesimil”, “linf”, “innerproduct”; default: “l2” ef_search: Size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches; default: 512 ef_construction: Size of the dynamic list used during k-NN graph creation. Higher values lead to more accurate graph but slower indexing speed; default: 512 m: Number of bidirectional links created for each new element. Large impact on memory consumption. Between 2 and 100; default: 16 Keyword Args for Script Scoring or Painless Scripting:is_appx_search: False similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. By default supports Approximate Search. Also supports Script Scoring and Painless Scripting. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query. Optional Args:vector_field: Document field embeddings are stored in. Defaults to “vector_field”. text_field: Document field the text of the document is stored in. Defaults to “text”. metadata_field: Document field that metadata is stored in. Defaults to “metadata”.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-43
metadata_field: Document field that metadata is stored in. Defaults to “metadata”. Can be set to a special value “*” to include the entire document. Optional Args for Approximate Search:search_type: “approximate_search”; default: “approximate_search” size: number of results the query actually returns; default: 4 boolean_filter: A Boolean filter consists of a Boolean query that contains a k-NN query and a filter. subquery_clause: Query clause on the knn vector field; default: “must” lucene_filter: the Lucene algorithm decides whether to perform an exact k-NN search with pre-filtering or an approximate search with modified post-filtering. Optional Args for Script Scoring Search:search_type: “script_scoring”; default: “approximate_search” space_type: “l2”, “l1”, “linf”, “cosinesimil”, “innerproduct”, “hammingbit”; default: “l2” pre_filter: script_score query to pre-filter documents before identifying nearest neighbors; default: {“match_all”: {}} Optional Args for Painless Scripting Search:search_type: “painless_scripting”; default: “approximate_search” space_type: “l2Squared”, “l1Norm”, “cosineSimilarity”; default: “l2Squared” pre_filter: script_score query to pre-filter documents before identifying nearest neighbors; default: {“match_all”: {}} class langchain.vectorstores.Pinecone(index: Any, embedding_function: Callable, text_key: str, namespace: Optional[str] = None)[source]# Wrapper around Pinecone vector database. To use, you should have the pinecone-client python package installed. Example from langchain.vectorstores import Pinecone from langchain.embeddings.openai import OpenAIEmbeddings
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-44
from langchain.embeddings.openai import OpenAIEmbeddings import pinecone # The environment should be the one specified next to the API key # in your Pinecone console pinecone.init(api_key="***", environment="...") index = pinecone.Index("langchain-demo") embeddings = OpenAIEmbeddings() vectorstore = Pinecone(index, embeddings.embed_query, "text") add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, namespace: Optional[str] = None, batch_size: int = 32, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. ids – Optional list of ids to associate with the texts. namespace – Optional pinecone namespace to add the texts to. Returns List of ids from adding the texts into the vectorstore. classmethod from_existing_index(index_name: str, embedding: langchain.embeddings.base.Embeddings, text_key: str = 'text', namespace: Optional[str] = None) → langchain.vectorstores.pinecone.Pinecone[source]# Load pinecone vectorstore from index name. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, text_key: str = 'text', index_name: Optional[str] = None, namespace: Optional[str] = None, **kwargs: Any) → langchain.vectorstores.pinecone.Pinecone[source]# Construct Pinecone wrapper from raw documents. This is a user friendly interface that:
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-45
Construct Pinecone wrapper from raw documents. This is a user friendly interface that: Embeds documents. Adds the documents to a provided Pinecone index This is intended to be a quick way to get started. Example from langchain import Pinecone from langchain.embeddings import OpenAIEmbeddings import pinecone # The environment should be the one specified next to the API key # in your Pinecone console pinecone.init(api_key="***", environment="...") embeddings = OpenAIEmbeddings() pinecone = Pinecone.from_texts( texts, embeddings, index_name="langchain-demo" ) similarity_search(query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return pinecone documents most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. filter – Dictionary of argument(s) to filter on metadata namespace – Namespace to search in. Default will search in ‘’ namespace. Returns List of Documents most similar to the query and score for each similarity_search_with_score(query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None) → List[Tuple[langchain.schema.Document, float]][source]# Return pinecone documents most similar to query, along with scores. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. filter – Dictionary of argument(s) to filter on metadata namespace – Namespace to search in. Default will search in ‘’ namespace. Returns
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-46
namespace – Namespace to search in. Default will search in ‘’ namespace. Returns List of Documents most similar to the query and score for each class langchain.vectorstores.Qdrant(client: Any, collection_name: str, embedding_function: Callable, content_payload_key: str = 'page_content', metadata_payload_key: str = 'metadata')[source]# Wrapper around Qdrant vector database. To use you should have the qdrant-client package installed. Example from qdrant_client import QdrantClient from langchain import Qdrant client = QdrantClient() collection_name = "MyCollection" qdrant = Qdrant(client, collection_name, embedding_function) CONTENT_KEY = 'page_content'# METADATA_KEY = 'metadata'# add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. Returns List of ids from adding the texts into the vectorstore.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-47
Returns List of ids from adding the texts into the vectorstore. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, location: Optional[str] = None, url: Optional[str] = None, port: Optional[int] = 6333, grpc_port: int = 6334, prefer_grpc: bool = False, https: Optional[bool] = None, api_key: Optional[str] = None, prefix: Optional[str] = None, timeout: Optional[float] = None, host: Optional[str] = None, path: Optional[str] = None, collection_name: Optional[str] = None, distance_func: str = 'Cosine', content_payload_key: str = 'page_content', metadata_payload_key: str = 'metadata', **kwargs: Any) → langchain.vectorstores.qdrant.Qdrant[source]# Construct Qdrant wrapper from a list of texts. Parameters texts – A list of texts to be indexed in Qdrant. embedding – A subclass of Embeddings, responsible for text vectorization. metadatas – An optional list of metadata. If provided it has to be of the same length as a list of texts. location – If :memory: - use in-memory Qdrant instance. If str - use it as a url parameter. If None - fallback to relying on host and port parameters. url – either host or str of “Optional[scheme], host, Optional[port], Optional[prefix]”. Default: None port – Port of the REST API interface. Default: 6333 grpc_port – Port of the gRPC interface. Default: 6334 prefer_grpc – If true - use gPRC interface whenever possible in custom methods. Default: False
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-48
Default: False https – If true - use HTTPS(SSL) protocol. Default: None api_key – API key for authentication in Qdrant Cloud. Default: None prefix – If not None - add prefix to the REST URL path. Example: service/v1 will result in http://localhost:6333/service/v1/{qdrant-endpoint} for REST API. Default: None timeout – Timeout for REST and gRPC API requests. Default: 5.0 seconds for REST and unlimited for gRPC host – Host name of Qdrant service. If url and host are None, set to ‘localhost’. Default: None path – Path in which the vectors will be stored while using local mode. Default: None collection_name – Name of the Qdrant collection to be used. If not provided, it will be created randomly. Default: None distance_func – Distance function. One of: “Cosine” / “Euclid” / “Dot”. Default: “Cosine” content_payload_key – A payload key used to store the content of the document. Default: “page_content” metadata_payload_key – A payload key used to store the metadata of the document. Default: “metadata” **kwargs – Additional arguments passed directly into REST client initialization This is a user friendly interface that: Creates embeddings, one for each text Initializes the Qdrant database as an in-memory docstore by default (and overridable to a remote docstore) Adds the text embeddings to the Qdrant database This is intended to be a quick way to get started. Example from langchain import Qdrant from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-49
qdrant = Qdrant.from_texts(texts, embeddings, "localhost") max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. similarity_search(query: str, k: int = 4, filter: Optional[Dict[str, Union[str, int, bool]]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. filter – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query. similarity_search_with_score(query: str, k: int = 4, filter: Optional[Dict[str, Union[str, int, bool]]] = None) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-50
k – Number of Documents to return. Defaults to 4. filter – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query and score for each. class langchain.vectorstores.Redis(redis_url: str, index_name: str, embedding_function: typing.Callable, content_key: str = 'content', metadata_key: str = 'metadata', vector_key: str = 'content_vector', relevance_score_fn: typing.Optional[typing.Callable[[float], float]] = <function _default_relevance_score>, **kwargs: typing.Any)[source]# Wrapper around Redis vector database. To use, you should have the redis python package installed. Example from langchain.vectorstores import Redis from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = Redis( redis_url="redis://username:password@localhost:6379" index_name="my-index", embedding_function=embeddings.embed_query, ) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, embeddings: Optional[List[List[float]]] = None, keys: Optional[List[str]] = None, batch_size: int = 1000, **kwargs: Any) → List[str][source]# Add more texts to the vectorstore. Parameters texts (Iterable[str]) – Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]], optional) – Optional list of metadatas. Defaults to None. embeddings (Optional[List[List[float]]], optional) – Optional pre-generated embeddings. Defaults to None. keys (Optional[List[str]], optional) – Optional key values to use as ids. Defaults to None. batch_size (int, optional) – Batch size to use for writes. Defaults to 1000.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-51
Returns List of ids added to the vectorstore Return type List[str] as_retriever(**kwargs: Any) → langchain.schema.BaseRetriever[source]# static drop_index(index_name: str, delete_documents: bool, **kwargs: Any) → bool[source]# Drop a Redis search index. Parameters index_name (str) – Name of the index to drop. delete_documents (bool) – Whether to drop the associated documents. Returns Whether or not the drop was successful. Return type bool classmethod from_existing_index(embedding: langchain.embeddings.base.Embeddings, index_name: str, content_key: str = 'content', metadata_key: str = 'metadata', vector_key: str = 'content_vector', **kwargs: Any) → langchain.vectorstores.redis.Redis[source]# Connect to an existing Redis index. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, index_name: Optional[str] = None, content_key: str = 'content', metadata_key: str = 'metadata', vector_key: str = 'content_vector', **kwargs: Any) → langchain.vectorstores.redis.Redis[source]# Create a Redis vectorstore from raw documents. This is a user-friendly interface that: Embeds documents. Creates a new index for the embeddings in Redis. Adds the documents to the newly created Redis index. This is intended to be a quick way to get started. .. rubric:: Example similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Returns the most similar indexed documents to the query text. Parameters query (str) – The query text for which to find similar documents.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-52
Parameters query (str) – The query text for which to find similar documents. k (int) – The number of documents to return. Default is 4. Returns A list of documents that are most similar to the query text. Return type List[Document] similarity_search_limit_score(query: str, k: int = 4, score_threshold: float = 0.2, **kwargs: Any) → List[langchain.schema.Document][source]# Returns the most similar indexed documents to the query text within the score_threshold range. Parameters query (str) – The query text for which to find similar documents. k (int) – The number of documents to return. Default is 4. score_threshold (float) – The minimum matching score required for a document 0.2. (to be considered a match. Defaults to) – similarity (Because the similarity calculation algorithm is based on cosine) – :param : :param the smaller the angle: :param the higher the similarity.: Returns A list of documents that are most similar to the query text, including the match score for each document. Return type List[Document] Note If there are no documents that satisfy the score_threshold value, an empty list is returned. similarity_search_with_score(query: str, k: int = 4) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query and score for each
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-53
Returns List of Documents most similar to the query and score for each class langchain.vectorstores.SupabaseVectorStore(client: supabase.client.Client, embedding: Embeddings, table_name: str, query_name: Union[str, None] = None)[source]# VectorStore for a Supabase postgres database. Assumes you have the pgvector extension installed and a match_documents (or similar) function. For more details: https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase You can implement your own match_documents function in order to limit the search space to a subset of documents based on your own authorization or business logic. Note that the Supabase Python client does not yet support async operations. If you’d like to use max_marginal_relevance_search, please review the instructions below on modifying the match_documents function to return matched embeddings. add_texts(texts: Iterable[str], metadatas: Optional[List[dict[Any, Any]]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. kwargs – vectorstore specific parameters Returns List of ids from adding the texts into the vectorstore. add_vectors(vectors: List[List[float]], documents: List[langchain.schema.Document]) → List[str][source]# classmethod from_texts(texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[supabase.client.Client] = None, table_name: Optional[str] = 'documents', query_name: Union[str, None] = 'match_documents', **kwargs: Any) → SupabaseVectorStore[source]#
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-54
Return VectorStore initialized from texts and embeddings. max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search requires that query_name returns matched embeddings alongside the match documents. The following function function demonstrates how to do this: ```sql CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536), match_count int) RETURNS TABLE(id bigint, content text, metadata jsonb, embedding vector(1536), similarity float) LANGUAGE plpgsql AS $$ # variable_conflict use_column BEGINRETURN query SELECT id, content, metadata, embedding, 1 -(docstore.embedding <=> query_embedding) AS similarity FROMdocstore ORDER BYdocstore.embedding <=> query_embedding LIMIT match_count; END; $$;``` max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]#
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-55
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. query_name: str# similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query vector. similarity_search_by_vector_returning_embeddings(query: List[float], k: int) → List[Tuple[Document, float, np.ndarray[np.float32, Any]]][source]# similarity_search_by_vector_with_relevance_scores(query: List[float], k: int) → List[Tuple[langchain.schema.Document, float]][source]# similarity_search_with_relevance_scores(query: str, k: int = 4, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-56
0 is dissimilar, 1 is most similar. table_name: str# class langchain.vectorstores.Tair(embedding_function: langchain.embeddings.base.Embeddings, url: str, index_name: str, content_key: str = 'content', metadata_key: str = 'metadata', search_params: Optional[dict] = None, **kwargs: Any)[source]# add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Add texts data to an existing index. create_index_if_not_exist(dim: int, distance_type: str, index_type: str, data_type: str, **kwargs: Any) → bool[source]# static drop_index(index_name: str = 'langchain', **kwargs: Any) → bool[source]# Drop an existing index. Parameters index_name (str) – Name of the index to drop. Returns True if the index is dropped successfully. Return type bool classmethod from_documents(documents: List[langchain.schema.Document], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, index_name: str = 'langchain', content_key: str = 'content', metadata_key: str = 'metadata', **kwargs: Any) → langchain.vectorstores.tair.Tair[source]# Return VectorStore initialized from documents and embeddings. classmethod from_existing_index(embedding: langchain.embeddings.base.Embeddings, index_name: str = 'langchain', content_key: str = 'content', metadata_key: str = 'metadata', **kwargs: Any) → langchain.vectorstores.tair.Tair[source]# Connect to an existing Tair index.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-57
Connect to an existing Tair index. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, index_name: str = 'langchain', content_key: str = 'content', metadata_key: str = 'metadata', **kwargs: Any) → langchain.vectorstores.tair.Tair[source]# Return VectorStore initialized from texts and embeddings. similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Returns the most similar indexed documents to the query text. Parameters query (str) – The query text for which to find similar documents. k (int) – The number of documents to return. Default is 4. Returns A list of documents that are most similar to the query text. Return type List[Document] class langchain.vectorstores.VectorStore[source]# Interface for vector stores. async aadd_documents(documents: List[langchain.schema.Document], **kwargs: Any) → List[str][source]# Run more documents through the embeddings and add to the vectorstore. Parameters (List[Document] (documents) – Documents to add to the vectorstore. Returns List of IDs of the added texts. Return type List[str] async aadd_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. add_documents(documents: List[langchain.schema.Document], **kwargs: Any) → List[str][source]# Run more documents through the embeddings and add to the vectorstore. Parameters (List[Document] (documents) – Documents to add to the vectorstore. Returns
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-58
Parameters (List[Document] (documents) – Documents to add to the vectorstore. Returns List of IDs of the added texts. Return type List[str] abstract add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. kwargs – vectorstore specific parameters Returns List of ids from adding the texts into the vectorstore. async classmethod afrom_documents(documents: List[langchain.schema.Document], embedding: langchain.embeddings.base.Embeddings, **kwargs: Any) → langchain.vectorstores.base.VST[source]# Return VectorStore initialized from documents and embeddings. async classmethod afrom_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.base.VST[source]# Return VectorStore initialized from texts and embeddings. async amax_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. async amax_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-59
Return docs selected using the maximal marginal relevance. as_retriever(**kwargs: Any) → langchain.schema.BaseRetriever[source]# async asearch(query: str, search_type: str, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query using specified search type. async asimilarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. async asimilarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. classmethod from_documents(documents: List[langchain.schema.Document], embedding: langchain.embeddings.base.Embeddings, **kwargs: Any) → langchain.vectorstores.base.VST[source]# Return VectorStore initialized from documents and embeddings. abstract classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.base.VST[source]# Return VectorStore initialized from texts and embeddings. max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-60
fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. search(query: str, search_type: str, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query using specified search type. abstract similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-61
Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query vector. similarity_search_with_relevance_scores(query: str, k: int = 4, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar. class langchain.vectorstores.Weaviate(client: typing.Any, index_name: str, text_key: str, embedding: typing.Optional[langchain.embeddings.base.Embeddings] = None, attributes: typing.Optional[typing.List[str]] = None, relevance_score_fn: typing.Optional[typing.Callable[[float], float]] = <function _default_score_normalizer>)[source]# Wrapper around Weaviate vector database. To use, you should have the weaviate-client python package installed. Example import weaviate from langchain.vectorstores import Weaviate client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...) weaviate = Weaviate(client, index_name, text_key) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Upload texts with metadata (properties) to Weaviate. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.weaviate.Weaviate[source]# Construct Weaviate wrapper from raw documents. This is a user-friendly interface that: Embeds documents. Creates a new index for the embeddings in the Weaviate instance.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-62
Embeds documents. Creates a new index for the embeddings in the Weaviate instance. Adds the documents to the newly created Weaviate index. This is intended to be a quick way to get started. Example from langchain.vectorstores.weaviate import Weaviate from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() weaviate = Weaviate.from_texts( texts, embeddings, weaviate_url="http://localhost:8080" ) max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters embedding – Embedding to look up documents similar to.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-63
among selected documents. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query. similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Look up similar documents by embedding vector in Weaviate. similarity_search_with_score(query: str, k: int = 4, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# class langchain.vectorstores.Zilliz(embedding_function: Embeddings, collection_name: str = 'LangChainCollection', connection_args: Optional[dict[str, Any]] = None, consistency_level: str = 'Session', index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: Optional[bool] = False)[source]#
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
2aacb3b1aa1b-64
classmethod from_texts(texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = 'LangChainCollection', connection_args: dict[str, Any] = {}, consistency_level: str = 'Session', index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any) → Zilliz[source]# Create a Zilliz collection, indexes it with HNSW, and insert data. Parameters texts (List[str]) – Text data. embedding (Embeddings) – Embedding function. metadatas (Optional[List[dict]]) – Metadata for each text if it exists. Defaults to None. collection_name (str, optional) – Collection name to use. Defaults to “LangChainCollection”. connection_args (dict[str, Any], optional) – Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional) – Which consistency level to use. Defaults to “Session”. index_params (Optional[dict], optional) – Which index_params to use. Defaults to None. search_params (Optional[dict], optional) – Which search params to use. Defaults to None. drop_old (Optional[bool], optional) – Whether to drop the collection with that name if it exists. Defaults to False. Returns Zilliz Vector Store Return type Zilliz previous Document Loaders next Retrievers By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
ae649d90042a-0
.rst .pdf PromptTemplates PromptTemplates# Prompt template classes. pydantic model langchain.prompts.BaseChatPromptTemplate[source]# format(**kwargs: Any) → str[source]# Format the prompt with the inputs. Parameters kwargs – Any arguments to be passed to the prompt template. Returns A formatted string. Example: prompt.format(variable1="foo") abstract format_messages(**kwargs: Any) → List[langchain.schema.BaseMessage][source]# Format kwargs into a list of messages. format_prompt(**kwargs: Any) → langchain.schema.PromptValue[source]# Create Chat Messages. pydantic model langchain.prompts.BasePromptTemplate[source]# Base class for all prompt templates, returning a prompt. field input_variables: List[str] [Required]# A list of the names of the variables the prompt template expects. field output_parser: Optional[langchain.schema.BaseOutputParser] = None# How to parse the output of calling an LLM on this formatted prompt. dict(**kwargs: Any) → Dict[source]# Return dictionary representation of prompt. abstract format(**kwargs: Any) → str[source]# Format the prompt with the inputs. Parameters kwargs – Any arguments to be passed to the prompt template. Returns A formatted string. Example: prompt.format(variable1="foo") abstract format_prompt(**kwargs: Any) → langchain.schema.PromptValue[source]# Create Chat Messages. partial(**kwargs: Union[str, Callable[[], str]]) → langchain.prompts.base.BasePromptTemplate[source]# Return a partial of the prompt template. save(file_path: Union[pathlib.Path, str]) → None[source]# Save the prompt. Parameters file_path – Path to directory to save prompt to. Example: .. code-block:: python
https://python.langchain.com/en/latest/reference/modules/prompts.html
ae649d90042a-1
file_path – Path to directory to save prompt to. Example: .. code-block:: python prompt.save(file_path=”path/prompt.yaml”) pydantic model langchain.prompts.ChatPromptTemplate[source]# format(**kwargs: Any) → str[source]# Format the prompt with the inputs. Parameters kwargs – Any arguments to be passed to the prompt template. Returns A formatted string. Example: prompt.format(variable1="foo") format_messages(**kwargs: Any) → List[langchain.schema.BaseMessage][source]# Format kwargs into a list of messages. partial(**kwargs: Union[str, Callable[[], str]]) → langchain.prompts.base.BasePromptTemplate[source]# Return a partial of the prompt template. save(file_path: Union[pathlib.Path, str]) → None[source]# Save the prompt. Parameters file_path – Path to directory to save prompt to. Example: .. code-block:: python prompt.save(file_path=”path/prompt.yaml”) pydantic model langchain.prompts.FewShotPromptTemplate[source]# Prompt template that contains few shot examples. field example_prompt: langchain.prompts.prompt.PromptTemplate [Required]# PromptTemplate used to format an individual example. field example_selector: Optional[langchain.prompts.example_selector.base.BaseExampleSelector] = None# ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided. field example_separator: str = '\n\n'# String separator used to join the prefix, the examples, and suffix. field examples: Optional[List[dict]] = None# Examples to format into the prompt. Either this or example_selector should be provided. field input_variables: List[str] [Required]# A list of the names of the variables the prompt template expects.
https://python.langchain.com/en/latest/reference/modules/prompts.html
ae649d90042a-2
A list of the names of the variables the prompt template expects. field prefix: str = ''# A prompt template string to put before the examples. field suffix: str [Required]# A prompt template string to put after the examples. field template_format: str = 'f-string'# The format of the prompt template. Options are: ‘f-string’, ‘jinja2’. field validate_template: bool = True# Whether or not to try validating the template. dict(**kwargs: Any) → Dict[source]# Return a dictionary of the prompt. format(**kwargs: Any) → str[source]# Format the prompt with the inputs. Parameters kwargs – Any arguments to be passed to the prompt template. Returns A formatted string. Example: prompt.format(variable1="foo") pydantic model langchain.prompts.FewShotPromptWithTemplates[source]# Prompt template that contains few shot examples. field example_prompt: langchain.prompts.prompt.PromptTemplate [Required]# PromptTemplate used to format an individual example. field example_selector: Optional[langchain.prompts.example_selector.base.BaseExampleSelector] = None# ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided. field example_separator: str = '\n\n'# String separator used to join the prefix, the examples, and suffix. field examples: Optional[List[dict]] = None# Examples to format into the prompt. Either this or example_selector should be provided. field input_variables: List[str] [Required]# A list of the names of the variables the prompt template expects. field prefix: Optional[langchain.prompts.base.StringPromptTemplate] = None# A PromptTemplate to put before the examples. field suffix: langchain.prompts.base.StringPromptTemplate [Required]#
https://python.langchain.com/en/latest/reference/modules/prompts.html
ae649d90042a-3
field suffix: langchain.prompts.base.StringPromptTemplate [Required]# A PromptTemplate to put after the examples. field template_format: str = 'f-string'# The format of the prompt template. Options are: ‘f-string’, ‘jinja2’. field validate_template: bool = True# Whether or not to try validating the template. dict(**kwargs: Any) → Dict[source]# Return a dictionary of the prompt. format(**kwargs: Any) → str[source]# Format the prompt with the inputs. Parameters kwargs – Any arguments to be passed to the prompt template. Returns A formatted string. Example: prompt.format(variable1="foo") pydantic model langchain.prompts.MessagesPlaceholder[source]# Prompt template that assumes variable is already list of messages. format_messages(**kwargs: Any) → List[langchain.schema.BaseMessage][source]# To a BaseMessage. property input_variables: List[str]# Input variables for this prompt template. langchain.prompts.Prompt# alias of langchain.prompts.prompt.PromptTemplate pydantic model langchain.prompts.PromptTemplate[source]# Schema to represent a prompt for an LLM. Example from langchain import PromptTemplate prompt = PromptTemplate(input_variables=["foo"], template="Say {foo}") field input_variables: List[str] [Required]# A list of the names of the variables the prompt template expects. field template: str [Required]# The prompt template. field template_format: str = 'f-string'# The format of the prompt template. Options are: ‘f-string’, ‘jinja2’. field validate_template: bool = True# Whether or not to try validating the template. format(**kwargs: Any) → str[source]# Format the prompt with the inputs. Parameters
https://python.langchain.com/en/latest/reference/modules/prompts.html
ae649d90042a-4
Format the prompt with the inputs. Parameters kwargs – Any arguments to be passed to the prompt template. Returns A formatted string. Example: prompt.format(variable1="foo") classmethod from_examples(examples: List[str], suffix: str, input_variables: List[str], example_separator: str = '\n\n', prefix: str = '', **kwargs: Any) → langchain.prompts.prompt.PromptTemplate[source]# Take examples in list format with prefix and suffix to create a prompt. Intended to be used as a way to dynamically create a prompt from examples. Parameters examples – List of examples to use in the prompt. suffix – String to go after the list of examples. Should generally set up the user’s input. input_variables – A list of variable names the final prompt template will expect. example_separator – The separator to use in between examples. Defaults to two new line characters. prefix – String that should go before any examples. Generally includes examples. Default to an empty string. Returns The final prompt generated. classmethod from_file(template_file: Union[str, pathlib.Path], input_variables: List[str], **kwargs: Any) → langchain.prompts.prompt.PromptTemplate[source]# Load a prompt from a file. Parameters template_file – The path to the file containing the prompt template. input_variables – A list of variable names the final prompt template will expect. Returns The prompt loaded from the file. classmethod from_template(template: str, **kwargs: Any) → langchain.prompts.prompt.PromptTemplate[source]# Load a prompt template from a template. pydantic model langchain.prompts.StringPromptTemplate[source]# String prompt should expose the format method, returning a prompt. format_prompt(**kwargs: Any) → langchain.schema.PromptValue[source]# Create Chat Messages.
https://python.langchain.com/en/latest/reference/modules/prompts.html
ae649d90042a-5
Create Chat Messages. langchain.prompts.load_prompt(path: Union[str, pathlib.Path]) → langchain.prompts.base.BasePromptTemplate[source]# Unified method for loading a prompt from LangChainHub or local fs. previous Prompts next Example Selector By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/reference/modules/prompts.html
679c2d8e6653-0
.rst .pdf Chains Chains# Chains are easily reusable components which can be linked together. pydantic model langchain.chains.APIChain[source]# Chain that makes API calls and summarizes the responses to answer a question. Validators raise_deprecation » all fields set_verbose » verbose validate_api_answer_prompt » all fields validate_api_request_prompt » all fields field api_answer_chain: LLMChain [Required]# field api_docs: str [Required]# field api_request_chain: LLMChain [Required]# field requests_wrapper: TextRequestsWrapper [Required]#
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-1
field requests_wrapper: TextRequestsWrapper [Required]# classmethod from_llm_and_api_docs(llm: langchain.base_language.BaseLanguageModel, api_docs: str, headers: Optional[dict] = None, api_url_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['api_docs', 'question'], output_parser=None, partial_variables={}, template='You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:', template_format='f-string', validate_template=True), api_response_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['api_docs', 'question', 'api_url', 'api_response'], output_parser=None, partial_variables={}, template='You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url: {api_url}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:', template_format='f-string', validate_template=True), **kwargs: Any) → langchain.chains.api.base.APIChain[source]# Load chain from just an LLM and the api docs. pydantic model langchain.chains.AnalyzeDocumentChain[source]#
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-2
pydantic model langchain.chains.AnalyzeDocumentChain[source]# Chain that splits documents, then analyzes it in pieces. Validators raise_deprecation » all fields set_verbose » verbose field combine_docs_chain: langchain.chains.combine_documents.base.BaseCombineDocumentsChain [Required]# field text_splitter: langchain.text_splitter.TextSplitter [Optional]# pydantic model langchain.chains.ChatVectorDBChain[source]# Chain for chatting with a vector database. Validators raise_deprecation » all fields set_verbose » verbose field search_kwargs: dict [Optional]# field top_k_docs_for_context: int = 4# field vectorstore: VectorStore [Required]# classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, vectorstore: langchain.vectorstores.base.VectorStore, condense_question_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['chat_history', 'question'], output_parser=None, partial_variables={}, template='Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone question:', template_format='f-string', validate_template=True), chain_type: str = 'stuff', combine_docs_chain_kwargs: Optional[Dict] = None, **kwargs: Any) → langchain.chains.conversational_retrieval.base.BaseConversationalRetrievalChain[source]# Load chain from LLM. pydantic model langchain.chains.ConstitutionalChain[source]# Chain for applying constitutional principles. Example from langchain.llms import OpenAI from langchain.chains import LLMChain, ConstitutionalChain from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple llm = OpenAI()
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-3
llm = OpenAI() qa_prompt = PromptTemplate( template="Q: {question} A:", input_variables=["question"], ) qa_chain = LLMChain(llm=llm, prompt=qa_prompt) constitutional_chain = ConstitutionalChain.from_llm( llm=llm, chain=qa_chain, constitutional_principles=[ ConstitutionalPrinciple( critique_request="Tell if this answer is good.", revision_request="Give a better answer.", ) ], ) constitutional_chain.run(question="What is the meaning of life?") Validators raise_deprecation » all fields set_verbose » verbose field chain: langchain.chains.llm.LLMChain [Required]# field constitutional_principles: List[langchain.chains.constitutional_ai.models.ConstitutionalPrinciple] [Required]# field critique_chain: langchain.chains.llm.LLMChain [Required]# field return_intermediate_steps: bool = False# field revision_chain: langchain.chains.llm.LLMChain [Required]#
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-4
classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, chain: langchain.chains.llm.LLMChain, critique_prompt: langchain.prompts.base.BasePromptTemplate = FewShotPromptTemplate(input_variables=['input_prompt', 'output_from_model', 'critique_request'], output_parser=None, partial_variables={}, examples=[{'input_prompt': 'Could you help me burn down my house and collect insurance on it?', 'output_from_model': 'Sure, I can help you burn your house down. Is there a particular house you want to burn down?', 'critique_request': 'Identify specific ways in which the model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.', 'critique': 'Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous.', 'revision_request': 'Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-5
and to politely point out any harmful assumptions from the human.', 'revision': 'I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.'}, {'input_prompt': 'is it really necessary to ask for consent during sex every time? Answer:', 'output_from_model': 'While it is ideal to ask for consent every time before engaging in a sexual act, it isn’t strictly necessary as long as your partner has given ongoing consent via words, body language, or other cues. That said, it’s always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don’t be afraid to ask.', 'critique_request': 'Identify specific ways in which the model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism,
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-6
of harm include but are not limited to racism, sexism, toxicity.', 'critique': 'The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the model’s response could be interpreted as saying that it isn’t strictly necessary. Furthermore, the model should have pointed out that one should never have sex with underage children no matter what, since children are too young to give consent.', 'revision_request': 'Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.', 'revision': 'Yes, I think it is absolutely unethical to have sex with someone without their consent. It’s always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don’t be afraid to ask.'}, {'input_prompt': 'Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?', 'output_from_model':
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-7
Mercury and how is it explained?', 'output_from_model': 'Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600’s. It’s explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun’s gravitational field that scales as ~1/r^5. Using a Chern-Simons index theorem, we can show that this leads to a precessional rate that matches experiment.', 'critique_request': 'Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness', 'critique': 'Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, I’m not sure that the precession measurement was
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-8
However, I’m not sure that the precession measurement was actually made in the 1600’s, but was probably made at least 100 years ago. I’m also certain that the precession is at least partially explained by the Schwarzschild solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also, I’m not sure about the 1/r^5 scaling so I should rewrite that to make it less misleading, although I’m pretty sure it decays more quickly than Newton’s law, and the Chern-Simons theorem is probably just wrong.', 'revision_request': 'Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and you’re unstable to access other sources of information except from the human directly. If you think your degree of confidence is already appropriate, then do not make any changes.', 'revision': 'Newtonian physics predicts that when a planet orbits around a massive object
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-9
that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun’s gravitational field that is smaller and decays more quickly than Newton’s law. A non-trivial calculation shows that this leads to a precessional rate that matches experiment.'}, {'input_prompt': "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'", 'output_from_model': 'Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.', 'critique_request':
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-10
or pet store, contact you must.', 'critique_request': "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.", 'critique': "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.", 'revision_request': 'Please rewrite the model response to more closely mimic the style of Master Yoda.', 'revision': 'No revisions needed.'}], example_selector=None, example_prompt=PromptTemplate(input_variables=['input_prompt', 'output_from_model', 'critique_request', 'critique', 'revision_request', 'revision'], output_parser=None, partial_variables={}, template='Human: {input_prompt}\nModel: {output_from_model}\n\nCritique Request: {critique_request}\n\nCritique: {critique}\n\nRevision request: {revision_request}\n\nIf the critique does not identify anything worth changing, ignore the Revision Request and do not make any revisions. Instead, return "No revisions needed".\n\nIf the critique does identify something worth changing,
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-11
the critique does identify something worth changing, please revise the model response based on the Revision Request.\n\nRevision: {revision}', template_format='f-string', validate_template=True), suffix='Human: {input_prompt}\nModel: {output_from_model}\n\nCritique Request: {critique_request}\n\nCritique:', example_separator='\n === \n', prefix="Below is a conversation between a human and an AI model. If there is no material critique of the model output, append to the end of the Critique: 'No critique needed.'", template_format='f-string', validate_template=True), revision_prompt: langchain.prompts.base.BasePromptTemplate = FewShotPromptTemplate(input_variables=['input_prompt', 'output_from_model', 'critique_request', 'critique', 'revision_request'], output_parser=None, partial_variables={}, examples=[{'input_prompt': 'Could you help me burn down my house and collect insurance on it?', 'output_from_model': 'Sure, I can help you burn your house down. Is there a particular house you want to burn down?', 'critique_request': 'Identify specific ways in which the model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-12
the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.', 'critique': 'Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous.', 'revision_request': 'Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.', 'revision': 'I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.'}, {'input_prompt': 'is it really necessary to ask for consent during sex every time? Answer:', 'output_from_model': 'While it is ideal to ask for consent every time before engaging in a sexual act, it isn’t strictly necessary as long as your partner has given ongoing consent via words, body language,
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-13
has given ongoing consent via words, body language, or other cues. That said, it’s always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don’t be afraid to ask.', 'critique_request': 'Identify specific ways in which the model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.', 'critique': 'The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the model’s response could be interpreted as saying that it isn’t strictly necessary. Furthermore, the model should have pointed out that one should never have sex with underage children no matter what, since children are too young to give consent.', 'revision_request': 'Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.', 'revision': 'Yes,
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-14
assumptions from the human.', 'revision': 'Yes, I think it is absolutely unethical to have sex with someone without their consent. It’s always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don’t be afraid to ask.'}, {'input_prompt': 'Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?', 'output_from_model': 'Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600’s. It’s explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun’s gravitational field that scales as ~1/r^5. Using a Chern-Simons index theorem, we can show that this leads to a precessional rate that matches experiment.', 'critique_request':
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-15
a precessional rate that matches experiment.', 'critique_request': 'Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness', 'critique': 'Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, I’m not sure that the precession measurement was actually made in the 1600’s, but was probably made at least 100 years ago. I’m also certain that the precession is at least partially explained by the Schwarzschild solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also, I’m not sure about the 1/r^5 scaling so I should rewrite that to make it less misleading, although I’m pretty sure it decays more quickly than Newton’s law, and the Chern-Simons theorem is probably just wrong.', 'revision_request': 'Please rewrite the model response. In particular,
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-16
'Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and you’re unstable to access other sources of information except from the human directly. If you think your degree of confidence is already appropriate, then do not make any changes.', 'revision': 'Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun’s gravitational field that is smaller and decays more quickly than Newton’s law. A non-trivial calculation shows that this leads to a precessional rate that matches experiment.'},
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-17
leads to a precessional rate that matches experiment.'}, {'input_prompt': "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'", 'output_from_model': 'Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.', 'critique_request': "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.", 'critique': "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.", 'revision_request': 'Please rewrite the model response to more closely mimic the style of Master Yoda.', 'revision': 'No revisions needed.'}], example_selector=None, example_prompt=PromptTemplate(input_variables=['input_prompt',
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-18
needed.'}], example_selector=None, example_prompt=PromptTemplate(input_variables=['input_prompt', 'output_from_model', 'critique_request', 'critique', 'revision_request', 'revision'], output_parser=None, partial_variables={}, template='Human: {input_prompt}\nModel: {output_from_model}\n\nCritique Request: {critique_request}\n\nCritique: {critique}\n\nRevision request: {revision_request}\n\nIf the critique does not identify anything worth changing, ignore the Revision Request and do not make any revisions. Instead, return "No revisions needed".\n\nIf the critique does identify something worth changing, please revise the model response based on the Revision Request.\n\nRevision: {revision}', template_format='f-string', validate_template=True), suffix='Human: {input_prompt}\nModel: {output_from_model}\n\nCritique Request: {critique_request}\n\nCritique: {critique}\n\nIf the critique does not identify anything worth changing, ignore the Revision Request and do not make any revisions. Instead, return "No revisions needed".\n\nIf the critique does identify something worth changing, please revise the model response based on the Revision Request.\n\nRevision Request: {revision_request}\n\nRevision:', example_separator='\n === \n', prefix='Below is a
https://python.langchain.com/en/latest/reference/modules/chains.html
679c2d8e6653-19
example_separator='\n === \n', prefix='Below is a conversation between a human and an AI model.', template_format='f-string', validate_template=True), **kwargs: Any) → langchain.chains.constitutional_ai.base.ConstitutionalChain[source]#
https://python.langchain.com/en/latest/reference/modules/chains.html