id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
59
127
ffbc7c5e91ef-1
embedding: Embeddings, table_name: str, query_name: Union[str, None] = None, ) -> None: """Initialize with supabase client.""" try: import supabase # noqa: F401 except ImportError: raise ValueError( "Could not import supabase python package. " "Please install it with `pip install supabase`." ) self._client = client self._embedding: Embeddings = embedding self.table_name = table_name or "documents" self.query_name = query_name or "match_documents" [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict[Any, Any]]] = None, **kwargs: Any, ) -> List[str]: docs = self._texts_to_documents(texts, metadatas) vectors = self._embedding.embed_documents(list(texts)) return self.add_vectors(vectors, docs) [docs] @classmethod def from_texts( cls: Type["SupabaseVectorStore"], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[supabase.client.Client] = None, table_name: Optional[str] = "documents", query_name: Union[str, None] = "match_documents", **kwargs: Any, ) -> "SupabaseVectorStore": """Return VectorStore initialized from texts and embeddings.""" if not client: raise ValueError("Supabase client is required.") if not table_name: raise ValueError("Supabase document table_name is required.")
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ffbc7c5e91ef-2
raise ValueError("Supabase document table_name is required.") embeddings = embedding.embed_documents(texts) docs = cls._texts_to_documents(texts, metadatas) _ids = cls._add_vectors(client, table_name, embeddings, docs) return cls( client=client, embedding=embedding, table_name=table_name, query_name=query_name, ) [docs] def add_vectors( self, vectors: List[List[float]], documents: List[Document] ) -> List[str]: return self._add_vectors(self._client, self.table_name, vectors, documents) [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: vectors = self._embedding.embed_documents([query]) return self.similarity_search_by_vector(vectors[0], k) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: result = self.similarity_search_by_vector_with_relevance_scores(embedding, k) documents = [doc for doc, _ in result] return documents [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: vectors = self._embedding.embed_documents([query]) return self.similarity_search_by_vector_with_relevance_scores(vectors[0], k) [docs] def similarity_search_by_vector_with_relevance_scores( self, query: List[float], k: int ) -> List[Tuple[Document, float]]:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ffbc7c5e91ef-3
) -> List[Tuple[Document, float]]: match_documents_params = dict(query_embedding=query, match_count=k) res = self._client.rpc(self.query_name, match_documents_params).execute() match_result = [ ( Document( metadata=search.get("metadata", {}), # type: ignore page_content=search.get("content", ""), ), search.get("similarity", 0.0), ) for search in res.data if search.get("content") ] return match_result [docs] def similarity_search_by_vector_returning_embeddings( self, query: List[float], k: int ) -> List[Tuple[Document, float, np.ndarray[np.float32, Any]]]: match_documents_params = dict(query_embedding=query, match_count=k) res = self._client.rpc(self.query_name, match_documents_params).execute() match_result = [ ( Document( metadata=search.get("metadata", {}), # type: ignore page_content=search.get("content", ""), ), search.get("similarity", 0.0), # Supabase returns a vector type as its string represation (!). # This is a hack to convert the string to numpy array. np.fromstring( search.get("embedding", "").strip("[]"), np.float32, sep="," ), ) for search in res.data if search.get("content") ] return match_result @staticmethod def _texts_to_documents( texts: Iterable[str], metadatas: Optional[Iterable[dict[Any, Any]]] = None, ) -> List[Document]:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ffbc7c5e91ef-4
) -> List[Document]: """Return list of Documents from list of texts and metadatas.""" if metadatas is None: metadatas = repeat({}) docs = [ Document(page_content=text, metadata=metadata) for text, metadata in zip(texts, metadatas) ] return docs @staticmethod def _add_vectors( client: supabase.client.Client, table_name: str, vectors: List[List[float]], documents: List[Document], ) -> List[str]: """Add vectors to Supabase table.""" rows: List[dict[str, Any]] = [ { "content": documents[idx].page_content, "embedding": embedding, "metadata": documents[idx].metadata, # type: ignore } for idx, embedding in enumerate(vectors) ] # According to the SupabaseVectorStore JS implementation, the best chunk size # is 500 chunk_size = 500 id_list: List[str] = [] for i in range(0, len(rows), chunk_size): chunk = rows[i : i + chunk_size] result = client.from_(table_name).insert(chunk).execute() # type: ignore if len(result.data) == 0: raise Exception("Error inserting: No rows added") # VectorStore.add_vectors returns ids as strings ids = [str(i.get("id")) for i in result.data if i.get("id")] id_list.extend(ids) return id_list [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ffbc7c5e91ef-5
self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ result = self.similarity_search_by_vector_returning_embeddings( embedding, fetch_k ) matched_documents = [doc_tuple[0] for doc_tuple in result] matched_embeddings = [doc_tuple[2] for doc_tuple in result] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), matched_embeddings, k=k, lambda_mult=lambda_mult, ) filtered_documents = [matched_documents[i] for i in mmr_selected] return filtered_documents [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ffbc7c5e91ef-6
**kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. `max_marginal_relevance_search` requires that `query_name` returns matched embeddings alongside the match documents. The following function demonstrates how to do this: ```sql CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536), match_count int) RETURNS TABLE( id bigint, content text, metadata jsonb, embedding vector(1536), similarity float) LANGUAGE plpgsql AS $$ # variable_conflict use_column BEGIN RETURN query SELECT id, content, metadata, embedding, 1 -(docstore.embedding <=> query_embedding) AS similarity FROM docstore ORDER BY docstore.embedding <=> query_embedding LIMIT match_count; END; $$; ``` """ embedding = self._embedding.embed_documents([query]) docs = self.max_marginal_relevance_search_by_vector( embedding[0], k, fetch_k, lambda_mult=lambda_mult ) return docs
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ffbc7c5e91ef-7
) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
dc67dafeea5b-0
Source code for langchain.vectorstores.pinecone """Wrapper around Pinecone vector database.""" from __future__ import annotations import logging import uuid from typing import Any, Callable, Iterable, List, Optional, Tuple import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance logger = logging.getLogger(__name__) [docs]class Pinecone(VectorStore): """Wrapper around Pinecone vector database. To use, you should have the ``pinecone-client`` python package installed. Example: .. code-block:: python from langchain.vectorstores import Pinecone from langchain.embeddings.openai import OpenAIEmbeddings import pinecone # The environment should be the one specified next to the API key # in your Pinecone console pinecone.init(api_key="***", environment="...") index = pinecone.Index("langchain-demo") embeddings = OpenAIEmbeddings() vectorstore = Pinecone(index, embeddings.embed_query, "text") """ def __init__( self, index: Any, embedding_function: Callable, text_key: str, namespace: Optional[str] = None, ): """Initialize with Pinecone client.""" try: import pinecone except ImportError: raise ValueError( "Could not import pinecone python package. " "Please install it with `pip install pinecone-client`." ) if not isinstance(index, pinecone.index.Index): raise ValueError( f"client should be an instance of pinecone.index.Index, "
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/pinecone.html
dc67dafeea5b-1
f"client should be an instance of pinecone.index.Index, " f"got {type(index)}" ) self._index = index self._embedding_function = embedding_function self._text_key = text_key self._namespace = namespace [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, namespace: Optional[str] = None, batch_size: int = 32, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. namespace: Optional pinecone namespace to add the texts to. Returns: List of ids from adding the texts into the vectorstore. """ if namespace is None: namespace = self._namespace # Embed and create the documents docs = [] ids = ids or [str(uuid.uuid4()) for _ in texts] for i, text in enumerate(texts): embedding = self._embedding_function(text) metadata = metadatas[i] if metadatas else {} metadata[self._text_key] = text docs.append((ids[i], embedding, metadata)) # upsert to Pinecone self._index.upsert(vectors=docs, namespace=namespace, batch_size=batch_size) return ids [docs] def similarity_search_with_score( self, query: str,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/pinecone.html
dc67dafeea5b-2
self, query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, ) -> List[Tuple[Document, float]]: """Return pinecone documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Dictionary of argument(s) to filter on metadata namespace: Namespace to search in. Default will search in '' namespace. Returns: List of Documents most similar to the query and score for each """ if namespace is None: namespace = self._namespace query_obj = self._embedding_function(query) docs = [] results = self._index.query( [query_obj], top_k=k, include_metadata=True, namespace=namespace, filter=filter, ) for res in results["matches"]: metadata = res["metadata"] if self._text_key in metadata: text = metadata.pop(self._text_key) score = res["score"] docs.append((Document(page_content=text, metadata=metadata), score)) else: logger.warning( f"Found document with no `{self._text_key}` key. Skipping." ) return docs [docs] def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return pinecone documents most similar to query. Args:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/pinecone.html
dc67dafeea5b-3
"""Return pinecone documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Dictionary of argument(s) to filter on metadata namespace: Namespace to search in. Default will search in '' namespace. Returns: List of Documents most similar to the query and score for each """ docs_and_scores = self.similarity_search_with_score( query, k=k, filter=filter, namespace=namespace, **kwargs ) return [doc for doc, _ in docs_and_scores] [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if namespace is None: namespace = self._namespace
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/pinecone.html
dc67dafeea5b-4
""" if namespace is None: namespace = self._namespace results = self._index.query( [embedding], top_k=fetch_k, include_values=True, include_metadata=True, namespace=namespace, filter=filter, ) mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), [item["values"] for item in results["matches"]], k=k, lambda_mult=lambda_mult, ) selected = [results["matches"][i]["metadata"] for i in mmr_selected] return [ Document(page_content=metadata.pop((self._text_key)), metadata=metadata) for metadata in selected ] [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/pinecone.html
dc67dafeea5b-5
Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embedding_function(query) return self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult, filter, namespace ) [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, text_key: str = "text", index_name: Optional[str] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> Pinecone: """Construct Pinecone wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Pinecone index This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Pinecone from langchain.embeddings import OpenAIEmbeddings import pinecone # The environment should be the one specified next to the API key # in your Pinecone console pinecone.init(api_key="***", environment="...") embeddings = OpenAIEmbeddings() pinecone = Pinecone.from_texts( texts, embeddings, index_name="langchain-demo" ) """ try: import pinecone except ImportError: raise ValueError( "Could not import pinecone python package. " "Please install it with `pip install pinecone-client`."
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/pinecone.html
dc67dafeea5b-6
"Please install it with `pip install pinecone-client`." ) indexes = pinecone.list_indexes() # checks if provided index exists if index_name in indexes: index = pinecone.Index(index_name) elif len(indexes) == 0: raise ValueError( "No active indexes found in your Pinecone project, " "are you sure you're using the right API key and environment?" ) else: raise ValueError( f"Index '{index_name}' not found in your Pinecone project. " f"Did you mean one of the following indexes: {', '.join(indexes)}" ) for i in range(0, len(texts), batch_size): # set end position of batch i_end = min(i + batch_size, len(texts)) # get batch of texts and ids lines_batch = texts[i:i_end] # create ids if not provided if ids: ids_batch = ids[i:i_end] else: ids_batch = [str(uuid.uuid4()) for n in range(i, i_end)] # create embeddings embeds = embedding.embed_documents(lines_batch) # prep metadata and upsert batch if metadatas: metadata = metadatas[i:i_end] else: metadata = [{} for _ in range(i, i_end)] for j, line in enumerate(lines_batch): metadata[j][text_key] = line to_upsert = zip(ids_batch, embeds, metadata) # upsert to Pinecone index.upsert(vectors=list(to_upsert), namespace=namespace) return cls(index, embedding.embed_query, text_key, namespace)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/pinecone.html
dc67dafeea5b-7
return cls(index, embedding.embed_query, text_key, namespace) [docs] @classmethod def from_existing_index( cls, index_name: str, embedding: Embeddings, text_key: str = "text", namespace: Optional[str] = None, ) -> Pinecone: """Load pinecone vectorstore from index name.""" try: import pinecone except ImportError: raise ValueError( "Could not import pinecone python package. " "Please install it with `pip install pinecone-client`." ) return cls( pinecone.Index(index_name), embedding.embed_query, text_key, namespace ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/pinecone.html
96ac11b0e0b1-0
Source code for langchain.vectorstores.weaviate """Wrapper around weaviate vector database.""" from __future__ import annotations import datetime from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type from uuid import uuid4 import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance def _default_schema(index_name: str) -> Dict: return { "class": index_name, "properties": [ { "name": "text", "dataType": ["text"], } ], } def _create_weaviate_client(**kwargs: Any) -> Any: client = kwargs.get("client") if client is not None: return client weaviate_url = get_from_dict_or_env(kwargs, "weaviate_url", "WEAVIATE_URL") try: # the weaviate api key param should not be mandatory weaviate_api_key = get_from_dict_or_env( kwargs, "weaviate_api_key", "WEAVIATE_API_KEY", None ) except ValueError: weaviate_api_key = None try: import weaviate except ImportError: raise ValueError( "Could not import weaviate python package. " "Please install it with `pip instal weaviate-client`" ) auth = ( weaviate.auth.AuthApiKey(api_key=weaviate_api_key) if weaviate_api_key is not None else None )
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
96ac11b0e0b1-1
if weaviate_api_key is not None else None ) client = weaviate.Client(weaviate_url, auth_client_secret=auth) return client def _default_score_normalizer(val: float) -> float: return 1 - 1 / (1 + np.exp(val)) def _json_serializable(value: Any) -> Any: if isinstance(value, datetime.datetime): return value.isoformat() return value [docs]class Weaviate(VectorStore): """Wrapper around Weaviate vector database. To use, you should have the ``weaviate-client`` python package installed. Example: .. code-block:: python import weaviate from langchain.vectorstores import Weaviate client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...) weaviate = Weaviate(client, index_name, text_key) """ def __init__( self, client: Any, index_name: str, text_key: str, embedding: Optional[Embeddings] = None, attributes: Optional[List[str]] = None, relevance_score_fn: Optional[ Callable[[float], float] ] = _default_score_normalizer, by_text: bool = True, ): """Initialize with Weaviate client.""" try: import weaviate except ImportError: raise ValueError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`." ) if not isinstance(client, weaviate.Client): raise ValueError(
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
96ac11b0e0b1-2
) if not isinstance(client, weaviate.Client): raise ValueError( f"client should be an instance of weaviate.Client, got {type(client)}" ) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._query_attrs = [self._text_key] self._relevance_score_fn = relevance_score_fn self._by_text = by_text if attributes is not None: self._query_attrs.extend(attributes) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Upload texts with metadata (properties) to Weaviate.""" from weaviate.util import get_valid_uuid ids = [] with self._client.batch as batch: for i, text in enumerate(texts): data_properties = {self._text_key: text} if metadatas is not None: for key, val in metadatas[i].items(): data_properties[key] = _json_serializable(val) # If the UUID of one of the objects already exists # then the existing object will be replaced by the new object. _id = ( kwargs["uuids"][i] if "uuids" in kwargs else get_valid_uuid(uuid4()) ) if self._embedding is not None: vector = self._embedding.embed_documents([text])[0] else: vector = None batch.add_data_object( data_object=data_properties, class_name=self._index_name, uuid=_id,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
96ac11b0e0b1-3
class_name=self._index_name, uuid=_id, vector=vector, ) ids.append(_id) return ids [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ if self._by_text: return self.similarity_search_by_text(query, k, **kwargs) else: if self._embedding is None: raise ValueError( "_embedding cannot be None for similarity_search when " "_by_text=False" ) embedding = self._embedding.embed_query(query) return self.similarity_search_by_vector(embedding, k, **kwargs) [docs] def similarity_search_by_text( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ content: Dict[str, Any] = {"concepts": [query]} if kwargs.get("search_distance"): content["certainty"] = kwargs.get("search_distance") query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"):
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
96ac11b0e0b1-4
if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("additional"): query_obj = query_obj.with_additional(kwargs.get("additional")) result = query_obj.with_near_text(content).with_limit(k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Look up similar documents by embedding vector in Weaviate.""" vector = {"vector": embedding} query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("additional"): query_obj = query_obj.with_additional(kwargs.get("additional")) result = query_obj.with_near_vector(vector).with_limit(k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
96ac11b0e0b1-5
k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding is not None: embedding = self._embedding.embed_query(query) else: raise ValueError( "max_marginal_relevance_search requires a suitable Embeddings object" ) return self.max_marginal_relevance_search_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs ) [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
96ac11b0e0b1-6
among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ vector = {"vector": embedding} query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) results = ( query_obj.with_additional("vector") .with_near_vector(vector) .with_limit(fetch_k) .do() ) payload = results["data"]["Get"][self._index_name] embeddings = [result["_additional"]["vector"] for result in payload] mmr_selected = maximal_marginal_relevance( np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult ) docs = [] for idx in mmr_selected: text = payload[idx].pop(self._text_key) payload[idx].pop("_additional") meta = payload[idx] docs.append(Document(page_content=text, metadata=meta)) return docs [docs] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """ Return list of documents most similar to the query
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
96ac11b0e0b1-7
""" Return list of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ if self._embedding is None: raise ValueError( "_embedding cannot be None for similarity_search_with_score" ) content: Dict[str, Any] = {"concepts": [query]} if kwargs.get("search_distance"): content["certainty"] = kwargs.get("search_distance") query_obj = self._client.query.get(self._index_name, self._query_attrs) if not self._by_text: embedding = self._embedding.embed_query(query) vector = {"vector": embedding} result = ( query_obj.with_near_vector(vector) .with_limit(k) .with_additional("vector") .do() ) else: result = ( query_obj.with_near_text(content) .with_limit(k) .with_additional("vector") .do() ) if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs_and_scores = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) score = np.dot( res["_additional"]["vector"], self._embedding.embed_query(query) ) docs_and_scores.append((Document(page_content=text, metadata=res), score)) return docs_and_scores def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
96ac11b0e0b1-8
**kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores, normalized on a scale from 0 to 1. 0 is dissimilar, 1 is most similar. """ if self._relevance_score_fn is None: raise ValueError( "relevance_score_fn must be provided to" " Weaviate constructor to normalize scores" ) docs_and_scores = self.similarity_search_with_score(query, k=k, **kwargs) return [ (doc, self._relevance_score_fn(score)) for doc, score in docs_and_scores ] [docs] @classmethod def from_texts( cls: Type[Weaviate], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Weaviate: """Construct Weaviate wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Weaviate instance. 3. Adds the documents to the newly created Weaviate index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores.weaviate import Weaviate from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() weaviate = Weaviate.from_texts( texts, embeddings, weaviate_url="http://localhost:8080" ) """ client = _create_weaviate_client(**kwargs)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
96ac11b0e0b1-9
) """ client = _create_weaviate_client(**kwargs) from weaviate.util import get_valid_uuid index_name = kwargs.get("index_name", f"LangChain_{uuid4().hex}") embeddings = embedding.embed_documents(texts) if embedding else None text_key = "text" schema = _default_schema(index_name) attributes = list(metadatas[0].keys()) if metadatas else None # check whether the index already exists if not client.schema.contains(schema): client.schema.create_class(schema) with client.batch as batch: for i, text in enumerate(texts): data_properties = { text_key: text, } if metadatas is not None: for key in metadatas[i].keys(): data_properties[key] = metadatas[i][key] # If the UUID of one of the objects already exists # then the existing objectwill be replaced by the new object. if "uuids" in kwargs: _id = kwargs["uuids"][i] else: _id = get_valid_uuid(uuid4()) # if an embedding strategy is not provided, we let # weaviate create the embedding. Note that this will only # work if weaviate has been installed with a vectorizer module # like text2vec-contextionary for example params = { "uuid": _id, "data_object": data_properties, "class_name": index_name, } if embeddings is not None: params["vector"] = embeddings[i] batch.add_data_object(**params) batch.flush()
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
96ac11b0e0b1-10
batch.add_data_object(**params) batch.flush() relevance_score_fn = kwargs.get("relevance_score_fn") by_text: bool = kwargs.get("by_text", False) return cls( client, index_name, text_key, embedding=embedding, attributes=attributes, relevance_score_fn=relevance_score_fn, by_text=by_text, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
bbbc79145bb2-0
Source code for langchain.vectorstores.zilliz from __future__ import annotations import logging from typing import Any, List, Optional from langchain.embeddings.base import Embeddings from langchain.vectorstores.milvus import Milvus logger = logging.getLogger(__name__) [docs]class Zilliz(Milvus): def _create_index(self) -> None: """Create a index on the collection""" from pymilvus import Collection, MilvusException if isinstance(self.col, Collection) and self._get_index() is None: try: # If no index params, use a default AutoIndex based one if self.index_params is None: self.index_params = { "metric_type": "L2", "index_type": "AUTOINDEX", "params": {}, } try: self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) # If default did not work, most likely Milvus self-hosted except MilvusException: # Use HNSW based index self.index_params = { "metric_type": "L2", "index_type": "HNSW", "params": {"M": 8, "efConstruction": 64}, } self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) logger.debug( "Successfully created an index on collection: %s", self.collection_name, ) except MilvusException as e: logger.error( "Failed to create an index on collection: %s", self.collection_name
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/zilliz.html
bbbc79145bb2-1
"Failed to create an index on collection: %s", self.collection_name ) raise e [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = "LangChainCollection", connection_args: dict[str, Any] = {}, consistency_level: str = "Session", index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any, ) -> Zilliz: """Create a Zilliz collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collection_name (str, optional): Collection name to use. Defaults to "LangChainCollection". connection_args (dict[str, Any], optional): Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional): Which consistency level to use. Defaults to "Session". index_params (Optional[dict], optional): Which index_params to use. Defaults to None. search_params (Optional[dict], optional): Which search params to use. Defaults to None. drop_old (Optional[bool], optional): Whether to drop the collection with that name if it exists. Defaults to False. Returns: Zilliz: Zilliz Vector Store """ vector_db = cls(
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/zilliz.html
bbbc79145bb2-2
""" vector_db = cls( embedding_function=embedding, collection_name=collection_name, connection_args=connection_args, consistency_level=consistency_level, index_params=index_params, search_params=search_params, drop_old=drop_old, **kwargs, ) vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/zilliz.html
88eb7242e864-0
Source code for langchain.vectorstores.singlestoredb """Wrapper around SingleStore DB.""" from __future__ import annotations import json from typing import ( Any, ClassVar, Collection, Iterable, List, Optional, Tuple, Type, ) from sqlalchemy.pool import QueuePool from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore, VectorStoreRetriever [docs]class SingleStoreDB(VectorStore): """ This class serves as a Pythonic interface to the SingleStore DB database. The prerequisite for using this class is the installation of the ``singlestoredb`` Python package. The SingleStoreDB vectorstore can be created by providing an embedding function and the relevant parameters for the database connection, connection pool, and optionally, the names of the table and the fields to use. """ def _get_connection(self: SingleStoreDB) -> Any: try: import singlestoredb as s2 except ImportError: raise ImportError( "Could not import singlestoredb python package. " "Please install it with `pip install singlestoredb`." ) return s2.connect(**self.connection_kwargs) def __init__( self, embedding: Embeddings, *, table_name: str = "embeddings", content_field: str = "content", metadata_field: str = "metadata", vector_field: str = "vector", pool_size: int = 5, max_overflow: int = 10, timeout: float = 30, **kwargs: Any, ):
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/singlestoredb.html
88eb7242e864-1
timeout: float = 30, **kwargs: Any, ): """Initialize with necessary components. Args: embedding (Embeddings): A text embedding model. table_name (str, optional): Specifies the name of the table in use. Defaults to "embeddings". content_field (str, optional): Specifies the field to store the content. Defaults to "content". metadata_field (str, optional): Specifies the field to store metadata. Defaults to "metadata". vector_field (str, optional): Specifies the field to store the vector. Defaults to "vector". Following arguments pertain to the connection pool: pool_size (int, optional): Determines the number of active connections in the pool. Defaults to 5. max_overflow (int, optional): Determines the maximum number of connections allowed beyond the pool_size. Defaults to 10. timeout (float, optional): Specifies the maximum wait time in seconds for establishing a connection. Defaults to 30. Following arguments pertain to the database connection: host (str, optional): Specifies the hostname, IP address, or URL for the database connection. The default scheme is "mysql". user (str, optional): Database username. password (str, optional): Database password. port (int, optional): Database port. Defaults to 3306 for non-HTTP connections, 80 for HTTP connections, and 443 for HTTPS connections. database (str, optional): Database name. Additional optional arguments provide further customization over the database connection: pure_python (bool, optional): Toggles the connector mode. If True, operates in pure Python mode. local_infile (bool, optional): Allows local file uploads.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/singlestoredb.html
88eb7242e864-2
local_infile (bool, optional): Allows local file uploads. charset (str, optional): Specifies the character set for string values. ssl_key (str, optional): Specifies the path of the file containing the SSL key. ssl_cert (str, optional): Specifies the path of the file containing the SSL certificate. ssl_ca (str, optional): Specifies the path of the file containing the SSL certificate authority. ssl_cipher (str, optional): Sets the SSL cipher list. ssl_disabled (bool, optional): Disables SSL usage. ssl_verify_cert (bool, optional): Verifies the server's certificate. Automatically enabled if ``ssl_ca`` is specified. ssl_verify_identity (bool, optional): Verifies the server's identity. conv (dict[int, Callable], optional): A dictionary of data conversion functions. credential_type (str, optional): Specifies the type of authentication to use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO. autocommit (bool, optional): Enables autocommits. results_type (str, optional): Determines the structure of the query results: tuples, namedtuples, dicts. results_format (str, optional): Deprecated. This option has been renamed to results_type. Examples: Basic Usage: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import SingleStoreDB vectorstore = SingleStoreDB( OpenAIEmbeddings(), host="https://user:password@127.0.0.1:3306/database" ) Advanced Usage: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/singlestoredb.html
88eb7242e864-3
.. code-block:: python from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import SingleStoreDB vectorstore = SingleStoreDB( OpenAIEmbeddings(), host="127.0.0.1", port=3306, user="user", password="password", database="db", table_name="my_custom_table", pool_size=10, timeout=60, ) Using environment variables: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import SingleStoreDB os.environ['SINGLESTOREDB_URL'] = 'me:p455w0rd@s2-host.com/my_db' vectorstore = SingleStoreDB(OpenAIEmbeddings()) """ self.embedding = embedding self.table_name = table_name self.content_field = content_field self.metadata_field = metadata_field self.vector_field = vector_field """Pass the rest of the kwargs to the connection.""" self.connection_kwargs = kwargs """Create connection pool.""" self.connection_pool = QueuePool( self._get_connection, max_overflow=max_overflow, pool_size=pool_size, timeout=timeout, ) self._create_table() def _create_table(self: SingleStoreDB) -> None: """Create table if it doesn't exist.""" conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute( """CREATE TABLE IF NOT EXISTS {} ({} TEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, {} BLOB, {} JSON);""".format(
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/singlestoredb.html
88eb7242e864-4
{} BLOB, {} JSON);""".format( self.table_name, self.content_field, self.vector_field, self.metadata_field, ), ) finally: cur.close() finally: conn.close() [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, embeddings: Optional[List[List[float]]] = None, **kwargs: Any, ) -> List[str]: """Add more texts to the vectorstore. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. Defaults to None. embeddings (Optional[List[List[float]]], optional): Optional pre-generated embeddings. Defaults to None. Returns: List[str]: empty list """ conn = self.connection_pool.connect() try: cur = conn.cursor() try: # Write data to singlestore db for i, text in enumerate(texts): # Use provided values by default or fallback metadata = metadatas[i] if metadatas else {} embedding = ( embeddings[i] if embeddings else self.embedding.embed_documents([text])[0] ) cur.execute( "INSERT INTO {} VALUES (%s, JSON_ARRAY_PACK(%s), %s)".format( self.table_name ), ( text, "[{}]".format(",".join(map(str, embedding))), json.dumps(metadata), ), ) finally: cur.close() finally: conn.close()
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/singlestoredb.html
88eb7242e864-5
finally: cur.close() finally: conn.close() return [] [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Returns the most similar indexed documents to the query text. Uses cosine similarity. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text. """ docs_and_scores = self.similarity_search_with_score(query, k=k) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search_with_score( self, query: str, k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Uses cosine similarity. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ # Creates embedding vector from user query embedding = self.embedding.embed_query(query) conn = self.connection_pool.connect() result = [] try: cur = conn.cursor() try: cur.execute( """SELECT {}, {}, DOT_PRODUCT({}, JSON_ARRAY_PACK(%s)) as __score FROM {} ORDER BY __score DESC LIMIT %s""".format( self.content_field, self.metadata_field, self.vector_field, self.table_name, ), (
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/singlestoredb.html
88eb7242e864-6
self.vector_field, self.table_name, ), ( "[{}]".format(",".join(map(str, embedding))), k, ), ) for row in cur.fetchall(): doc = Document(page_content=row[0], metadata=row[1]) result.append((doc, float(row[2]))) finally: cur.close() finally: conn.close() return result [docs] @classmethod def from_texts( cls: Type[SingleStoreDB], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, table_name: str = "embeddings", content_field: str = "content", metadata_field: str = "metadata", vector_field: str = "vector", pool_size: int = 5, max_overflow: int = 10, timeout: float = 30, **kwargs: Any, ) -> SingleStoreDB: """Create a SingleStoreDB vectorstore from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new table for the embeddings in SingleStoreDB. 3. Adds the documents to the newly created table. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import SingleStoreDB from langchain.embeddings import OpenAIEmbeddings s2 = SingleStoreDB.from_texts( texts, OpenAIEmbeddings(), host="username:password@localhost:3306/database" ) """ instance = cls( embedding,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/singlestoredb.html
88eb7242e864-7
) """ instance = cls( embedding, table_name=table_name, content_field=content_field, metadata_field=metadata_field, vector_field=vector_field, pool_size=pool_size, max_overflow=max_overflow, timeout=timeout, **kwargs, ) instance.add_texts(texts, metadatas, embedding.embed_documents(texts), **kwargs) return instance [docs] def as_retriever(self, **kwargs: Any) -> SingleStoreDBRetriever: return SingleStoreDBRetriever(vectorstore=self, **kwargs) class SingleStoreDBRetriever(VectorStoreRetriever): vectorstore: SingleStoreDB k: int = 4 allowed_search_types: ClassVar[Collection[str]] = ("similarity",) def get_relevant_documents(self, query: str) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.similarity_search(query, k=self.k) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError( "SingleStoreDBVectorStoreRetriever does not support async" ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/singlestoredb.html
dabc47655c46-0
Source code for langchain.vectorstores.annoy """Wrapper around Annoy vector database.""" from __future__ import annotations import os import pickle import uuid from configparser import ConfigParser from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import numpy as np from langchain.docstore.base import Docstore from langchain.docstore.document import Document from langchain.docstore.in_memory import InMemoryDocstore from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance INDEX_METRICS = frozenset(["angular", "euclidean", "manhattan", "hamming", "dot"]) DEFAULT_METRIC = "angular" def dependable_annoy_import() -> Any: """Import annoy if available, otherwise raise error.""" try: import annoy except ImportError: raise ValueError( "Could not import annoy python package. " "Please install it with `pip install --user annoy` " ) return annoy [docs]class Annoy(VectorStore): """Wrapper around Annoy vector database. To use, you should have the ``annoy`` python package installed. Example: .. code-block:: python from langchain import Annoy db = Annoy(embedding_function, index, docstore, index_to_docstore_id) """ def __init__( self, embedding_function: Callable, index: Any, metric: str, docstore: Docstore, index_to_docstore_id: Dict[int, str], ): """Initialize with necessary components.""" self.embedding_function = embedding_function
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/annoy.html
dabc47655c46-1
): """Initialize with necessary components.""" self.embedding_function = embedding_function self.index = index self.metric = metric self.docstore = docstore self.index_to_docstore_id = index_to_docstore_id [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: raise NotImplementedError( "Annoy does not allow to add new data once the index is build." ) [docs] def process_index_results( self, idxs: List[int], dists: List[float] ) -> List[Tuple[Document, float]]: """Turns annoy results into a list of documents and scores. Args: idxs: List of indices of the documents in the index. dists: List of distances of the documents in the index. Returns: List of Documents and scores. """ docs = [] for idx, dist in zip(idxs, dists): _id = self.index_to_docstore_id[idx] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append((doc, dist)) return docs [docs] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/annoy.html
dabc47655c46-2
Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ idxs, dists = self.index.get_nns_by_vector( embedding, k, search_k=search_k, include_distances=True ) return self.process_index_results(idxs, dists) [docs] def similarity_search_with_score_by_index( self, docstore_index: int, k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ idxs, dists = self.index.get_nns_by_item( docstore_index, k, search_k=search_k, include_distances=True ) return self.process_index_results(idxs, dists) [docs] def similarity_search_with_score( self, query: str, k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/annoy.html
dabc47655c46-3
k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k, search_k) return docs [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding, k, search_k ) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search_by_index( self, docstore_index: int, k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to docstore_index. Args: docstore_index: Index of document in docstore k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding. """
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/annoy.html
dabc47655c46-4
Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_index( docstore_index, k, search_k ) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search( self, query: str, k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, search_k) return [doc for doc, _ in docs_and_scores] [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. fetch_k: Number of Documents to fetch to pass to MMR algorithm. k: Number of Documents to return. Defaults to 4. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/annoy.html
dabc47655c46-5
of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ idxs = self.index.get_nns_by_vector( embedding, fetch_k, search_k=-1, include_distances=False ) embeddings = [self.index.get_item_vector(i) for i in idxs] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), embeddings, k=k, lambda_mult=lambda_mult, ) # ignore the -1's if not enough docs are returned/indexed selected_indices = [idxs[i] for i in mmr_selected if i != -1] docs = [] for i in selected_indices: _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append(doc) return docs [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/annoy.html
dabc47655c46-6
k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult ) return docs @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: if metric not in INDEX_METRICS: raise ValueError( ( f"Unsupported distance metric: {metric}. " f"Expected one of {list(INDEX_METRICS)}" ) ) annoy = dependable_annoy_import() if not embeddings: raise ValueError("embeddings must be provided to build AnnoyIndex") f = len(embeddings[0]) index = annoy.AnnoyIndex(f, metric=metric) for i, emb in enumerate(embeddings): index.add_item(i, emb) index.build(trees, n_jobs=n_jobs) documents = [] for i, text in enumerate(texts):
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/annoy.html
dabc47655c46-7
documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))} docstore = InMemoryDocstore( {index_to_id[i]: doc for i, doc in enumerate(documents)} ) return cls(embedding.embed_query, index, metric, docstore, index_to_id) [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: """Construct Annoy wrapper from raw documents. Args: texts: List of documents to index. embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/annoy.html
dabc47655c46-8
from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = Annoy.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts) return cls.__from( texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs ) [docs] @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: """Construct Annoy wrapper from embeddings. Args: text_embeddings: List of tuples of (text, embedding) embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1 This is a user friendly interface that: 1. Creates an in memory docstore with provided embeddings 2. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings))
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/annoy.html
dabc47655c46-9
text_embedding_pairs = list(zip(texts, text_embeddings)) db = Annoy.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs ) [docs] def save_local(self, folder_path: str, prefault: bool = False) -> None: """Save Annoy index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. prefault: Whether to pre-load the index into memory. """ path = Path(folder_path) os.makedirs(path, exist_ok=True) # save index, index config, docstore and index_to_docstore_id config_object = ConfigParser() config_object["ANNOY"] = { "f": self.index.f, "metric": self.metric, } self.index.save(str(path / "index.annoy"), prefault=prefault) with open(path / "index.pkl", "wb") as file: pickle.dump((self.docstore, self.index_to_docstore_id, config_object), file) [docs] @classmethod def load_local( cls, folder_path: str, embeddings: Embeddings, ) -> Annoy: """Load Annoy index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to load index, docstore,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/annoy.html
dabc47655c46-10
Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries. """ path = Path(folder_path) # load index separately since it is not picklable annoy = dependable_annoy_import() # load docstore and index_to_docstore_id with open(path / "index.pkl", "rb") as file: docstore, index_to_docstore_id, config_object = pickle.load(file) f = int(config_object["ANNOY"]["f"]) metric = config_object["ANNOY"]["metric"] index = annoy.AnnoyIndex(f, metric=metric) index.load(str(path / "index.annoy")) return cls( embeddings.embed_query, index, metric, docstore, index_to_docstore_id ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/annoy.html
1f15a931ccb2-0
Source code for langchain.vectorstores.elastic_vector_search """Wrapper around Elasticsearch vector database.""" from __future__ import annotations import uuid from abc import ABC from typing import ( TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union, ) from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from elasticsearch import Elasticsearch def _default_text_mapping(dim: int) -> Dict: return { "properties": { "text": {"type": "text"}, "vector": {"type": "dense_vector", "dims": dim}, } } def _default_script_query(query_vector: List[float], filter: Optional[dict]) -> Dict: if filter: ((key, value),) = filter.items() filter = {"match": {f"metadata.{key}.keyword": f"{value}"}} else: filter = {"match_all": {}} return { "script_score": { "query": filter, "script": { "source": "cosineSimilarity(params.query_vector, 'vector') + 1.0", "params": {"query_vector": query_vector}, }, } } # ElasticVectorSearch is a concrete implementation of the abstract base class # VectorStore, which defines a common interface for all vector database # implementations. By inheriting from the ABC class, ElasticVectorSearch can be # defined as an abstract base class itself, allowing the creation of subclasses with
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
1f15a931ccb2-1
# defined as an abstract base class itself, allowing the creation of subclasses with # their own specific implementations. If you plan to subclass ElasticVectorSearch, # you can inherit from it and define your own implementation of the necessary methods # and attributes. [docs]class ElasticVectorSearch(VectorStore, ABC): """Wrapper around Elasticsearch as a vector database. To connect to an Elasticsearch instance that does not require login credentials, pass the Elasticsearch URL and index name along with the embedding object to the constructor. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch( elasticsearch_url="http://localhost:9200", index_name="test_index", embedding=embedding ) To connect to an Elasticsearch instance that requires login credentials, including Elastic Cloud, use the Elasticsearch URL format https://username:password@es_host:9243. For example, to connect to Elastic Cloud, create the Elasticsearch URL with the required authentication details and pass it to the ElasticVectorSearch constructor as the named parameter elasticsearch_url. You can obtain your Elastic Cloud URL and login credentials by logging in to the Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and navigating to the "Deployments" page. To obtain your Elastic Cloud password for the default "elastic" user: 1. Log in to the Elastic Cloud console at https://cloud.elastic.co 2. Go to "Security" > "Users" 3. Locate the "elastic" user and click "Edit" 4. Click "Reset password"
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
1f15a931ccb2-2
4. Click "Reset password" 5. Follow the prompts to reset the password The format for Elastic Cloud URLs is https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_host = "cluster_id.region_id.gcp.cloud.es.io" elasticsearch_url = f"https://username:password@{elastic_host}:9243" elastic_vector_search = ElasticVectorSearch( elasticsearch_url=elasticsearch_url, index_name="test_index", embedding=embedding ) Args: elasticsearch_url (str): The URL for the Elasticsearch instance. index_name (str): The name of the Elasticsearch index for the embeddings. embedding (Embeddings): An object that provides the ability to embed text. It should be an instance of a class that subclasses the Embeddings abstract base class, such as OpenAIEmbeddings() Raises: ValueError: If the elasticsearch python package is not installed. """ def __init__( self, elasticsearch_url: str, index_name: str, embedding: Embeddings, *, ssl_verify: Optional[Dict[str, Any]] = None, ): """Initialize with necessary components.""" try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) self.embedding = embedding self.index_name = index_name _ssl_verify = ssl_verify or {}
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
1f15a931ccb2-3
self.index_name = index_name _ssl_verify = ssl_verify or {} try: self.client = elasticsearch.Elasticsearch(elasticsearch_url, **_ssl_verify) except ValueError as e: raise ValueError( f"Your elasticsearch client string is mis-formatted. Got error: {e} " ) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, refresh_indices: bool = True, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the vectorstore. """ try: from elasticsearch.exceptions import NotFoundError from elasticsearch.helpers import bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) requests = [] ids = [] embeddings = self.embedding.embed_documents(list(texts)) dim = len(embeddings[0]) mapping = _default_text_mapping(dim) # check to see if the index already exists try: self.client.indices.get(index=self.index_name) except NotFoundError: # TODO would be nice to create index before embedding, # just to save expensive steps for last self.create_index(self.client, self.index_name, mapping) for i, text in enumerate(texts):
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
1f15a931ccb2-4
for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} _id = str(uuid.uuid4()) request = { "_op_type": "index", "_index": self.index_name, "vector": embeddings[i], "text": text, "metadata": metadata, "_id": _id, } ids.append(_id) requests.append(request) bulk(self.client, requests) if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids [docs] def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, filter=filter) documents = [d[0] for d in docs_and_scores] return documents [docs] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ embedding = self.embedding.embed_query(query)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
1f15a931ccb2-5
""" embedding = self.embedding.embed_query(query) script_query = _default_script_query(embedding, filter) response = self.client_search( self.client, self.index_name, script_query, size=k ) hits = [hit for hit in response["hits"]["hits"]] docs_and_scores = [ ( Document( page_content=hit["_source"]["text"], metadata=hit["_source"]["metadata"], ), hit["_score"], ) for hit in hits ] return docs_and_scores [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, elasticsearch_url: Optional[str] = None, index_name: Optional[str] = None, refresh_indices: bool = True, **kwargs: Any, ) -> ElasticVectorSearch: """Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Elasticsearch instance. 3. Adds the documents to the newly created Elasticsearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch.from_texts( texts, embeddings, elasticsearch_url="http://localhost:9200" ) """ elasticsearch_url = elasticsearch_url or get_from_env(
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
1f15a931ccb2-6
) """ elasticsearch_url = elasticsearch_url or get_from_env( "elasticsearch_url", "ELASTICSEARCH_URL" ) index_name = index_name or uuid.uuid4().hex vectorsearch = cls(elasticsearch_url, index_name, embedding, **kwargs) vectorsearch.add_texts( texts, metadatas=metadatas, refresh_indices=refresh_indices ) return vectorsearch [docs] def create_index(self, client: Any, index_name: str, mapping: Dict) -> None: version_num = client.info()["version"]["number"][0] version_num = int(version_num) if version_num >= 8: client.indices.create(index=index_name, mappings=mapping) else: client.indices.create(index=index_name, body={"mappings": mapping}) [docs] def client_search( self, client: Any, index_name: str, script_query: Dict, size: int ) -> Any: version_num = client.info()["version"]["number"][0] version_num = int(version_num) if version_num >= 8: response = client.search(index=index_name, query=script_query, size=size) else: response = client.search( index=index_name, body={"query": script_query, "size": size} ) return response class ElasticKnnSearch(ElasticVectorSearch): """ A class for performing k-Nearest Neighbors (k-NN) search on an Elasticsearch index. The class is designed for a text search scenario where documents are text strings and their embeddings are vector representations of those strings. """ def __init__( self, index_name: str,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
1f15a931ccb2-7
""" def __init__( self, index_name: str, embedding: Embeddings, es_connection: Optional["Elasticsearch"] = None, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None, es_password: Optional[str] = None, vector_query_field: Optional[str] = "vector", query_field: Optional[str] = "text", ): """ Initializes an instance of the ElasticKnnSearch class and sets up the Elasticsearch client. Args: index_name: The name of the Elasticsearch index. embedding: An instance of the Embeddings class, used to generate vector representations of text strings. es_connection: An existing Elasticsearch connection. es_cloud_id: The Cloud ID of the Elasticsearch instance. Required if creating a new connection. es_user: The username for the Elasticsearch instance. Required if creating a new connection. es_password: The password for the Elasticsearch instance. Required if creating a new connection. """ try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) self.embedding = embedding self.index_name = index_name self.query_field = query_field self.vector_query_field = vector_query_field # If a pre-existing Elasticsearch connection is provided, use it. if es_connection is not None: self.client = es_connection else: # If credentials for a new Elasticsearch connection are provided, # create a new connection. if es_cloud_id and es_user and es_password:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
1f15a931ccb2-8
if es_cloud_id and es_user and es_password: self.client = elasticsearch.Elasticsearch( cloud_id=es_cloud_id, basic_auth=(es_user, es_password) ) else: raise ValueError( """Either provide a pre-existing Elasticsearch connection, \ or valid credentials for creating a new connection.""" ) @staticmethod def _default_knn_mapping(dims: int) -> Dict: """Generates a default index mapping for kNN search.""" return { "properties": { "text": {"type": "text"}, "vector": { "type": "dense_vector", "dims": dims, "index": True, "similarity": "dot_product", }, } } def _default_knn_query( self, query_vector: Optional[List[float]] = None, query: Optional[str] = None, model_id: Optional[str] = None, k: Optional[int] = 10, num_candidates: Optional[int] = 10, ) -> Dict: knn: Dict = { "field": self.vector_query_field, "k": k, "num_candidates": num_candidates, } # Case 1: `query_vector` is provided, but not `model_id` -> use query_vector if query_vector and not model_id: knn["query_vector"] = query_vector # Case 2: `query` and `model_id` are provided, -> use query_vector_builder elif query and model_id: knn["query_vector_builder"] = { "text_embedding": {
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
1f15a931ccb2-9
knn["query_vector_builder"] = { "text_embedding": { "model_id": model_id, # use 'model_id' argument "model_text": query, # use 'query' argument } } else: raise ValueError( "Either `query_vector` or `model_id` must be provided, but not both." ) return knn def knn_search( self, query: Optional[str] = None, k: Optional[int] = 10, query_vector: Optional[List[float]] = None, model_id: Optional[str] = None, size: Optional[int] = 10, source: Optional[bool] = True, fields: Optional[ Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None] ] = None, ) -> Dict: """ Performs a k-nearest neighbor (k-NN) search on the Elasticsearch index. The search can be conducted using either a raw query vector or a model ID. The method first generates the body of the search query, which can be interpreted by Elasticsearch. It then performs the k-NN search on the Elasticsearch index and returns the results. Args: query: The query or queries to be used for the search. Required if `query_vector` is not provided. k: The number of nearest neighbors to return. Defaults to 10. query_vector: The query vector to be used for the search. Required if `query` is not provided. model_id: The ID of the model to use for generating the query vector, if `query` is provided.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
1f15a931ccb2-10
`query` is provided. size: The number of search hits to return. Defaults to 10. source: Whether to include the source of each hit in the results. fields: The fields to include in the source of each hit. If None, all fields are included. vector_query_field: Field name to use in knn search if not default 'vector' Returns: The search results. Raises: ValueError: If neither `query_vector` nor `model_id` is provided, or if both are provided. """ knn_query_body = self._default_knn_query( query_vector=query_vector, query=query, model_id=model_id, k=k ) # Perform the kNN search on the Elasticsearch index and return the results. res = self.client.search( index=self.index_name, knn=knn_query_body, size=size, source=source, fields=fields, ) return dict(res) def knn_hybrid_search( self, query: Optional[str] = None, k: Optional[int] = 10, query_vector: Optional[List[float]] = None, model_id: Optional[str] = None, size: Optional[int] = 10, source: Optional[bool] = True, knn_boost: Optional[float] = 0.9, query_boost: Optional[float] = 0.1, fields: Optional[ Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None] ] = None, ) -> Dict[Any, Any]:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
1f15a931ccb2-11
] = None, ) -> Dict[Any, Any]: """Performs a hybrid k-nearest neighbor (k-NN) and text-based search on the Elasticsearch index. The search can be conducted using either a raw query vector or a model ID. The method first generates the body of the k-NN search query and the text-based query, which can be interpreted by Elasticsearch. It then performs the hybrid search on the Elasticsearch index and returns the results. Args: query: The query or queries to be used for the search. Required if `query_vector` is not provided. k: The number of nearest neighbors to return. Defaults to 10. query_vector: The query vector to be used for the search. Required if `query` is not provided. model_id: The ID of the model to use for generating the query vector, if `query` is provided. size: The number of search hits to return. Defaults to 10. source: Whether to include the source of each hit in the results. knn_boost: The boost factor for the k-NN part of the search. query_boost: The boost factor for the text-based part of the search. fields The fields to include in the source of each hit. If None, all fields are included. Defaults to None. vector_query_field: Field name to use in knn search if not default 'vector' query_field: Field name to use in search if not default 'text' Returns: The search results. Raises: ValueError: If neither `query_vector` nor `model_id` is provided, or if both are provided. """
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
1f15a931ccb2-12
both are provided. """ knn_query_body = self._default_knn_query( query_vector=query_vector, query=query, model_id=model_id, k=k ) # Modify the knn_query_body to add a "boost" parameter knn_query_body["boost"] = knn_boost # Generate the body of the standard Elasticsearch query match_query_body = { "match": {self.query_field: {"query": query, "boost": query_boost}} } # Perform the hybrid search on the Elasticsearch index and return the results. res = self.client.search( index=self.index_name, query=match_query_body, knn=knn_query_body, fields=fields, size=size, source=source, ) return dict(res) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/elastic_vector_search.html
46209c3a05aa-0
Source code for langchain.vectorstores.qdrant """Wrapper around Qdrant vector database.""" from __future__ import annotations import uuid import warnings from itertools import islice from operator import itemgetter from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Type, Union, ) import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: from qdrant_client.conversions import common_types from qdrant_client.http import models as rest DictFilter = Dict[str, Union[str, int, bool, dict, list]] MetadataFilter = Union[DictFilter, common_types.Filter] [docs]class Qdrant(VectorStore): """Wrapper around Qdrant vector database. To use you should have the ``qdrant-client`` package installed. Example: .. code-block:: python from qdrant_client import QdrantClient from langchain import Qdrant client = QdrantClient() collection_name = "MyCollection" qdrant = Qdrant(client, collection_name, embedding_function) """ CONTENT_KEY = "page_content" METADATA_KEY = "metadata" def __init__( self, client: Any, collection_name: str, embeddings: Optional[Embeddings] = None, content_payload_key: str = CONTENT_KEY, metadata_payload_key: str = METADATA_KEY,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-1
metadata_payload_key: str = METADATA_KEY, embedding_function: Optional[Callable] = None, # deprecated ): """Initialize with necessary components.""" try: import qdrant_client except ImportError: raise ValueError( "Could not import qdrant-client python package. " "Please install it with `pip install qdrant-client`." ) if not isinstance(client, qdrant_client.QdrantClient): raise ValueError( f"client should be an instance of qdrant_client.QdrantClient, " f"got {type(client)}" ) if embeddings is None and embedding_function is None: raise ValueError( "`embeddings` value can't be None. Pass `Embeddings` instance." ) if embeddings is not None and embedding_function is not None: raise ValueError( "Both `embeddings` and `embedding_function` are passed. " "Use `embeddings` only." ) self.embeddings = embeddings self._embeddings_function = embedding_function self.client: qdrant_client.QdrantClient = client self.collection_name = collection_name self.content_payload_key = content_payload_key or self.CONTENT_KEY self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY if embedding_function is not None: warnings.warn( "Using `embedding_function` is deprecated. " "Pass `Embeddings` instance to `embeddings` instead." ) if not isinstance(embeddings, Embeddings): warnings.warn( "`embeddings` should be an instance of `Embeddings`." "Using `embeddings` as `embedding_function` which is deprecated"
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-2
"Using `embeddings` as `embedding_function` which is deprecated" ) self._embeddings_function = embeddings self.embeddings = None [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[Sequence[str]] = None, batch_size: int = 64, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Ids have to be uuid-like strings. batch_size: How many vectors upload per-request. Default: 64 Returns: List of ids from adding the texts into the vectorstore. """ from qdrant_client.http import models as rest added_ids = [] texts_iterator = iter(texts) metadatas_iterator = iter(metadatas or []) ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)]) while batch_texts := list(islice(texts_iterator, batch_size)): # Take the corresponding metadata and id for each text in a batch batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None batch_ids = list(islice(ids_iterator, batch_size)) self.client.upsert( collection_name=self.collection_name, points=rest.Batch.construct( ids=batch_ids, vectors=self._embed_texts(batch_texts),
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-3
ids=batch_ids, vectors=self._embed_texts(batch_texts), payloads=self._build_payloads( batch_texts, batch_metadatas, self.content_payload_key, self.metadata_payload_key, ), ), ) added_ids.extend(batch_ids) return added_ids [docs] def similarity_search( self, query: str, k: int = 4, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, offset: int = 0, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-4
- int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas Returns: List of Documents most similar to the query. """ results = self.similarity_search_with_score( query, k, filter=filter, search_params=search_params, offset=offset, score_threshold=score_threshold, consistency=consistency, **kwargs, ) return list(map(itemgetter(0), results)) [docs] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, offset: int = 0, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. score_threshold: Define a minimal score threshold for the result.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-5
score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas Returns: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ if filter is not None and isinstance(filter, dict): warnings.warn( "Using dict as a `filter` is deprecated. Please use qdrant-client " "filters directly: " "https://qdrant.tech/documentation/concepts/filtering/", DeprecationWarning, ) qdrant_filter = self._qdrant_filter_from_dict(filter) else: qdrant_filter = filter results = self.client.search( collection_name=self.collection_name, query_vector=self._embed_query(query), query_filter=qdrant_filter, search_params=search_params, limit=k, offset=offset, with_payload=True, with_vectors=False, # Langchain does not expect vectors to be returned
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-6
with_vectors=False, # Langchain does not expect vectors to be returned score_threshold=score_threshold, consistency=consistency, **kwargs, ) return [ ( self._document_from_scored_point( result, self.content_payload_key, self.metadata_payload_key ), result.score, ) for result in results ] def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar. Args: query: input text k: Number of Documents to return. Defaults to 4. **kwargs: kwargs to be passed to similarity search. Should include: score_threshold: Optional, a floating point value between 0 to 1 to filter the resulting set of retrieved docs Returns: List of Tuples of (doc, similarity_score) """ return self.similarity_search_with_score(query, k, **kwargs) [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-7
Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embed_query(query) results = self.client.search( collection_name=self.collection_name, query_vector=embedding, with_payload=True, with_vectors=True, limit=fetch_k, ) embeddings = [result.vector for result in results] mmr_selected = maximal_marginal_relevance( np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult ) return [ self._document_from_scored_point( results[i], self.content_payload_key, self.metadata_payload_key ) for i in mmr_selected ] [docs] @classmethod def from_texts( cls: Type[Qdrant], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[Sequence[str]] = None, location: Optional[str] = None, url: Optional[str] = None, port: Optional[int] = 6333, grpc_port: int = 6334, prefer_grpc: bool = False, https: Optional[bool] = None, api_key: Optional[str] = None,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-8
api_key: Optional[str] = None, prefix: Optional[str] = None, timeout: Optional[float] = None, host: Optional[str] = None, path: Optional[str] = None, collection_name: Optional[str] = None, distance_func: str = "Cosine", content_payload_key: str = CONTENT_KEY, metadata_payload_key: str = METADATA_KEY, batch_size: int = 64, shard_number: Optional[int] = None, replication_factor: Optional[int] = None, write_consistency_factor: Optional[int] = None, on_disk_payload: Optional[bool] = None, hnsw_config: Optional[common_types.HnswConfigDiff] = None, optimizers_config: Optional[common_types.OptimizersConfigDiff] = None, wal_config: Optional[common_types.WalConfigDiff] = None, quantization_config: Optional[common_types.QuantizationConfig] = None, init_from: Optional[common_types.InitFrom] = None, **kwargs: Any, ) -> Qdrant: """Construct Qdrant wrapper from a list of texts. Args: texts: A list of texts to be indexed in Qdrant. embedding: A subclass of `Embeddings`, responsible for text vectorization. metadatas: An optional list of metadata. If provided it has to be of the same length as a list of texts. ids: Optional list of ids to associate with the texts. Ids have to be uuid-like strings. location: If `:memory:` - use in-memory Qdrant instance.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-9
location: If `:memory:` - use in-memory Qdrant instance. If `str` - use it as a `url` parameter. If `None` - fallback to relying on `host` and `port` parameters. url: either host or str of "Optional[scheme], host, Optional[port], Optional[prefix]". Default: `None` port: Port of the REST API interface. Default: 6333 grpc_port: Port of the gRPC interface. Default: 6334 prefer_grpc: If true - use gPRC interface whenever possible in custom methods. Default: False https: If true - use HTTPS(SSL) protocol. Default: None api_key: API key for authentication in Qdrant Cloud. Default: None prefix: If not None - add prefix to the REST URL path. Example: service/v1 will result in http://localhost:6333/service/v1/{qdrant-endpoint} for REST API. Default: None timeout: Timeout for REST and gRPC API requests. Default: 5.0 seconds for REST and unlimited for gRPC host: Host name of Qdrant service. If url and host are None, set to 'localhost'. Default: None path: Path in which the vectors will be stored while using local mode. Default: None collection_name: Name of the Qdrant collection to be used. If not provided, it will be created randomly. Default: None distance_func: Distance function. One of: "Cosine" / "Euclid" / "Dot". Default: "Cosine" content_payload_key:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-10
Default: "Cosine" content_payload_key: A payload key used to store the content of the document. Default: "page_content" metadata_payload_key: A payload key used to store the metadata of the document. Default: "metadata" batch_size: How many vectors upload per-request. Default: 64 shard_number: Number of shards in collection. Default is 1, minimum is 1. replication_factor: Replication factor for collection. Default is 1, minimum is 1. Defines how many copies of each shard will be created. Have effect only in distributed mode. write_consistency_factor: Write consistency factor for collection. Default is 1, minimum is 1. Defines how many replicas should apply the operation for us to consider it successful. Increasing this number will make the collection more resilient to inconsistencies, but will also make it fail if not enough replicas are available. Does not have any performance impact. Have effect only in distributed mode. on_disk_payload: If true - point`s payload will not be stored in memory. It will be read from the disk every time it is requested. This setting saves RAM by (slightly) increasing the response time. Note: those payload values that are involved in filtering and are indexed - remain in RAM. hnsw_config: Params for HNSW index optimizers_config: Params for optimizer wal_config: Params for Write-Ahead-Log quantization_config: Params for quantization, if None - quantization will be disabled init_from: Use data stored in another collection to initialize this collection **kwargs: Additional arguments passed directly into REST client initialization
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-11
**kwargs: Additional arguments passed directly into REST client initialization This is a user-friendly interface that: 1. Creates embeddings, one for each text 2. Initializes the Qdrant database as an in-memory docstore by default (and overridable to a remote docstore) 3. Adds the text embeddings to the Qdrant database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Qdrant from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() qdrant = Qdrant.from_texts(texts, embeddings, "localhost") """ try: import qdrant_client except ImportError: raise ValueError( "Could not import qdrant-client python package. " "Please install it with `pip install qdrant-client`." ) from qdrant_client.http import models as rest # Just do a single quick embedding to get vector size partial_embeddings = embedding.embed_documents(texts[:1]) vector_size = len(partial_embeddings[0]) collection_name = collection_name or uuid.uuid4().hex distance_func = distance_func.upper() client = qdrant_client.QdrantClient( location=location, url=url, port=port, grpc_port=grpc_port, prefer_grpc=prefer_grpc, https=https, api_key=api_key, prefix=prefix, timeout=timeout, host=host, path=path, **kwargs, ) client.recreate_collection( collection_name=collection_name,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-12
) client.recreate_collection( collection_name=collection_name, vectors_config=rest.VectorParams( size=vector_size, distance=rest.Distance[distance_func], ), shard_number=shard_number, replication_factor=replication_factor, write_consistency_factor=write_consistency_factor, on_disk_payload=on_disk_payload, hnsw_config=hnsw_config, optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, init_from=init_from, timeout=timeout, # type: ignore[arg-type] ) texts_iterator = iter(texts) metadatas_iterator = iter(metadatas or []) ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)]) while batch_texts := list(islice(texts_iterator, batch_size)): # Take the corresponding metadata and id for each text in a batch batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None batch_ids = list(islice(ids_iterator, batch_size)) # Generate the embeddings for all the texts in a batch batch_embeddings = embedding.embed_documents(batch_texts) client.upsert( collection_name=collection_name, points=rest.Batch.construct( ids=batch_ids, vectors=batch_embeddings, payloads=cls._build_payloads( batch_texts, batch_metadatas, content_payload_key, metadata_payload_key, ), ), ) return cls( client=client, collection_name=collection_name, embeddings=embedding, content_payload_key=content_payload_key,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-13
embeddings=embedding, content_payload_key=content_payload_key, metadata_payload_key=metadata_payload_key, ) @classmethod def _build_payloads( cls, texts: Iterable[str], metadatas: Optional[List[dict]], content_payload_key: str, metadata_payload_key: str, ) -> List[dict]: payloads = [] for i, text in enumerate(texts): if text is None: raise ValueError( "At least one of the texts is None. Please remove it before " "calling .from_texts or .add_texts on Qdrant instance." ) metadata = metadatas[i] if metadatas is not None else None payloads.append( { content_payload_key: text, metadata_payload_key: metadata, } ) return payloads @classmethod def _document_from_scored_point( cls, scored_point: Any, content_payload_key: str, metadata_payload_key: str, ) -> Document: return Document( page_content=scored_point.payload.get(content_payload_key), metadata=scored_point.payload.get(metadata_payload_key) or {}, ) def _build_condition(self, key: str, value: Any) -> List[rest.FieldCondition]: from qdrant_client.http import models as rest out = [] if isinstance(value, dict): for _key, value in value.items(): out.extend(self._build_condition(f"{key}.{_key}", value)) elif isinstance(value, list): for _value in value: if isinstance(_value, dict):
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-14
for _value in value: if isinstance(_value, dict): out.extend(self._build_condition(f"{key}[]", _value)) else: out.extend(self._build_condition(f"{key}", _value)) else: out.append( rest.FieldCondition( key=f"{self.metadata_payload_key}.{key}", match=rest.MatchValue(value=value), ) ) return out def _qdrant_filter_from_dict( self, filter: Optional[DictFilter] ) -> Optional[rest.Filter]: from qdrant_client.http import models as rest if not filter: return None return rest.Filter( must=[ condition for key, value in filter.items() for condition in self._build_condition(key, value) ] ) def _embed_query(self, query: str) -> List[float]: """Embed query text. Used to provide backward compatibility with `embedding_function` argument. Args: query: Query text. Returns: List of floats representing the query embedding. """ if self.embeddings is not None: embedding = self.embeddings.embed_query(query) else: if self._embeddings_function is not None: embedding = self._embeddings_function(query) else: raise ValueError("Neither of embeddings or embedding_function is set") return embedding.tolist() if hasattr(embedding, "tolist") else embedding def _embed_texts(self, texts: Iterable[str]) -> List[List[float]]: """Embed search texts. Used to provide backward compatibility with `embedding_function` argument. Args: texts: Iterable of texts to embed. Returns:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
46209c3a05aa-15
Args: texts: Iterable of texts to embed. Returns: List of floats representing the texts embedding. """ if self.embeddings is not None: embeddings = self.embeddings.embed_documents(list(texts)) if hasattr(embeddings, "tolist"): embeddings = embeddings.tolist() elif self._embeddings_function is not None: embeddings = [] for text in texts: embedding = self._embeddings_function(text) if hasattr(embeddings, "tolist"): embedding = embedding.tolist() embeddings.append(embedding) else: raise ValueError("Neither of embeddings or embedding_function is set") return embeddings By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/qdrant.html
20f80b0c4075-0
Source code for langchain.vectorstores.hologres """VectorStore wrapper around a Hologres database.""" from __future__ import annotations import json import logging import uuid from typing import Any, Dict, Iterable, List, Optional, Tuple, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore ADA_TOKEN_COUNT = 1536 _LANGCHAIN_DEFAULT_TABLE_NAME = "langchain_pg_embedding" class HologresWrapper: def __init__(self, connection_string: str, ndims: int, table_name: str) -> None: import psycopg2 self.table_name = table_name self.conn = psycopg2.connect(connection_string) self.cursor = self.conn.cursor() self.conn.autocommit = False self.ndims = ndims def create_vector_extension(self) -> None: self.cursor.execute("create extension if not exists proxima") self.conn.commit() def create_table(self, drop_if_exist: bool = True) -> None: if drop_if_exist: self.cursor.execute(f"drop table if exists {self.table_name}") self.conn.commit() self.cursor.execute( f"""create table if not exists {self.table_name} ( id text, embedding float4[] check(array_ndims(embedding) = 1 and \ array_length(embedding, 1) = {self.ndims}), metadata json, document text);""" ) self.cursor.execute( f"call set_table_property('{self.table_name}'" + """, 'proxima_vectors', '{"embedding":{"algorithm":"Graph", "distance_method":"SquaredEuclidean",
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/hologres.html
20f80b0c4075-1
'{"embedding":{"algorithm":"Graph", "distance_method":"SquaredEuclidean", "build_params":{"min_flush_proxima_row_count" : 1, "min_compaction_proxima_row_count" : 1, "max_total_size_to_merge_mb" : 2000}}}');""" ) self.conn.commit() def get_by_id(self, id: str) -> List[Tuple]: statement = ( f"select id, embedding, metadata, " f"document from {self.table_name} where id = %s;" ) self.cursor.execute( statement, (id), ) self.conn.commit() return self.cursor.fetchall() def insert( self, embedding: List[float], metadata: dict, document: str, id: Optional[str] = None, ) -> None: self.cursor.execute( f'insert into "{self.table_name}" ' f"values (%s, array{json.dumps(embedding)}::float4[], %s, %s)", (id if id is not None else "null", json.dumps(metadata), document), ) self.conn.commit() def query_nearest_neighbours( self, embedding: List[float], k: int, filter: Optional[Dict[str, str]] = None ) -> List[Tuple[str, str, float]]: params = [] filter_clause = "" if filter is not None: conjuncts = [] for key, val in filter.items(): conjuncts.append("metadata->>%s=%s") params.append(key) params.append(val)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/hologres.html
20f80b0c4075-2
params.append(key) params.append(val) filter_clause = "where " + " and ".join(conjuncts) sql = ( f"select document, metadata::text, " f"pm_approx_squared_euclidean_distance(array{json.dumps(embedding)}" f"::float4[], embedding) as distance from" f" {self.table_name} {filter_clause} order by distance asc limit {k};" ) self.cursor.execute(sql, tuple(params)) self.conn.commit() return self.cursor.fetchall() [docs]class Hologres(VectorStore): """ VectorStore implementation using Hologres. - `connection_string` is a hologres connection string. - `embedding_function` any embedding function implementing `langchain.embeddings.base.Embeddings` interface. - `ndims` is the number of dimensions of the embedding output. - `table_name` is the name of the table to store embeddings and data. (default: langchain_pg_embedding) - NOTE: The table will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `pre_delete_table` if True, will delete the table if it exists. (default: False) - Useful for testing. """ def __init__( self, connection_string: str, embedding_function: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, logger: Optional[logging.Logger] = None, ) -> None: self.connection_string = connection_string
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/hologres.html
20f80b0c4075-3
) -> None: self.connection_string = connection_string self.ndims = ndims self.table_name = table_name self.embedding_function = embedding_function self.pre_delete_table = pre_delete_table self.logger = logger or logging.getLogger(__name__) self.__post_init__() def __post_init__( self, ) -> None: """ Initialize the store. """ self.storage = HologresWrapper( self.connection_string, self.ndims, self.table_name ) self.create_vector_extension() self.create_table() [docs] def create_vector_extension(self) -> None: try: self.storage.create_vector_extension() except Exception as e: self.logger.exception(e) raise e [docs] def create_table(self) -> None: self.storage.create_table(self.pre_delete_table) @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding_function: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, embedding_function=embedding_function, ndims=ndims,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/hologres.html
20f80b0c4075-4
embedding_function=embedding_function, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store [docs] def add_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: List[dict], ids: List[str], **kwargs: Any, ) -> None: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ try: for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): self.storage.insert(embedding, metadata, text, id) except Exception as e: self.logger.exception(e) self.storage.conn.commit() [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/hologres.html
20f80b0c4075-5
List of ids from adding the texts into the vectorstore. """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] self.add_embeddings(texts, embeddings, metadatas, ids, **kwargs) return ids [docs] def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with Hologres with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/hologres.html
20f80b0c4075-6
Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs [docs] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: results: List[Tuple[str, str, float]] = self.storage.query_nearest_neighbours( embedding, k, filter ) docs = [ ( Document( page_content=result[0], metadata=json.loads(result[1]), ), result[2], ) for result in results ] return docs [docs] @classmethod def from_texts(
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/hologres.html
20f80b0c4075-7
] return docs [docs] @classmethod def from_texts( cls: Type[Hologres], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, **kwargs, ) [docs] @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """Construct Hologres wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/hologres.html
20f80b0c4075-8
Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. Example: .. code-block:: python from langchain import Hologres from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = Hologres.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, **kwargs, ) [docs] @classmethod def from_existing_index( cls: Type[Hologres], embedding: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """ Get intsance of an existing Hologres store.This method will return the instance of the store without inserting any new embeddings """ connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, ndims=ndims, table_name=table_name, embedding_function=embedding, pre_delete_table=pre_delete_table,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/hologres.html
20f80b0c4075-9
embedding_function=embedding, pre_delete_table=pre_delete_table, ) return store [docs] @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="HOLOGRES_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the HOLOGRES_CONNECTION_STRING environment variable." ) return connection_string [docs] @classmethod def from_documents( cls: Type[Hologres], documents: List[Document], embedding: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> Hologres: """ Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/hologres.html
20f80b0c4075-10
ndims=ndims, table_name=table_name, **kwargs, ) [docs] @classmethod def connection_string_from_db_params( cls, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return ( f"dbname={database} user={user} password={password} host={host} port={port}" ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/hologres.html
889538c38d3a-0
Source code for langchain.vectorstores.sklearn """ Wrapper around scikit-learn NearestNeighbors implementation. The vector store can be persisted in json, bson or parquet format. """ import json import math import os from abc import ABC, abstractmethod from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple, Type from uuid import uuid4 from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import guard_import from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance DEFAULT_K = 4 # Number of Documents to return. DEFAULT_FETCH_K = 20 # Number of Documents to initially fetch during MMR search. class BaseSerializer(ABC): """Abstract base class for saving and loading data.""" def __init__(self, persist_path: str) -> None: self.persist_path = persist_path @classmethod @abstractmethod def extension(cls) -> str: """The file extension suggested by this serializer (without dot).""" @abstractmethod def save(self, data: Any) -> None: """Saves the data to the persist_path""" @abstractmethod def load(self) -> Any: """Loads the data from the persist_path""" class JsonSerializer(BaseSerializer): """Serializes data in json using the json package from python standard library.""" @classmethod def extension(cls) -> str: return "json" def save(self, data: Any) -> None: with open(self.persist_path, "w") as fp: json.dump(data, fp) def load(self) -> Any: with open(self.persist_path, "r") as fp:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/sklearn.html
889538c38d3a-1
with open(self.persist_path, "r") as fp: return json.load(fp) class BsonSerializer(BaseSerializer): """Serializes data in binary json using the bson python package.""" def __init__(self, persist_path: str) -> None: super().__init__(persist_path) self.bson = guard_import("bson") @classmethod def extension(cls) -> str: return "bson" def save(self, data: Any) -> None: with open(self.persist_path, "wb") as fp: fp.write(self.bson.dumps(data)) def load(self) -> Any: with open(self.persist_path, "rb") as fp: return self.bson.loads(fp.read()) class ParquetSerializer(BaseSerializer): """Serializes data in Apache Parquet format using the pyarrow package.""" def __init__(self, persist_path: str) -> None: super().__init__(persist_path) self.pd = guard_import("pandas") self.pa = guard_import("pyarrow") self.pq = guard_import("pyarrow.parquet") @classmethod def extension(cls) -> str: return "parquet" def save(self, data: Any) -> None: df = self.pd.DataFrame(data) table = self.pa.Table.from_pandas(df) if os.path.exists(self.persist_path): backup_path = str(self.persist_path) + "-backup" os.rename(self.persist_path, backup_path) try: self.pq.write_table(table, self.persist_path) except Exception as exc: os.rename(backup_path, self.persist_path) raise exc else: os.remove(backup_path)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/sklearn.html
889538c38d3a-2
raise exc else: os.remove(backup_path) else: self.pq.write_table(table, self.persist_path) def load(self) -> Any: table = self.pq.read_table(self.persist_path) df = table.to_pandas() return {col: series.tolist() for col, series in df.items()} SERIALIZER_MAP: Dict[str, Type[BaseSerializer]] = { "json": JsonSerializer, "bson": BsonSerializer, "parquet": ParquetSerializer, } class SKLearnVectorStoreException(RuntimeError): pass [docs]class SKLearnVectorStore(VectorStore): """A simple in-memory vector store based on the scikit-learn library NearestNeighbors implementation.""" def __init__( self, embedding: Embeddings, *, persist_path: Optional[str] = None, serializer: Literal["json", "bson", "parquet"] = "json", metric: str = "cosine", **kwargs: Any, ) -> None: np = guard_import("numpy") sklearn_neighbors = guard_import("sklearn.neighbors", pip_name="scikit-learn") # non-persistent properties self._np = np self._neighbors = sklearn_neighbors.NearestNeighbors(metric=metric, **kwargs) self._neighbors_fitted = False self._embedding_function = embedding self._persist_path = persist_path self._serializer: Optional[BaseSerializer] = None if self._persist_path is not None: serializer_cls = SERIALIZER_MAP[serializer] self._serializer = serializer_cls(persist_path=self._persist_path) # data properties
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/sklearn.html
889538c38d3a-3
# data properties self._embeddings: List[List[float]] = [] self._texts: List[str] = [] self._metadatas: List[dict] = [] self._ids: List[str] = [] # cache properties self._embeddings_np: Any = np.asarray([]) if self._persist_path is not None and os.path.isfile(self._persist_path): self._load() [docs] def persist(self) -> None: if self._serializer is None: raise SKLearnVectorStoreException( "You must specify a persist_path on creation to persist the " "collection." ) data = { "ids": self._ids, "texts": self._texts, "metadatas": self._metadatas, "embeddings": self._embeddings, } self._serializer.save(data) def _load(self) -> None: if self._serializer is None: raise SKLearnVectorStoreException( "You must specify a persist_path on creation to load the " "collection." ) data = self._serializer.load() self._embeddings = data["embeddings"] self._texts = data["texts"] self._metadatas = data["metadatas"] self._ids = data["ids"] self._update_neighbors() [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: _texts = list(texts)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/sklearn.html
889538c38d3a-4
) -> List[str]: _texts = list(texts) _ids = ids or [str(uuid4()) for _ in _texts] self._texts.extend(_texts) self._embeddings.extend(self._embedding_function.embed_documents(_texts)) self._metadatas.extend(metadatas or ([{}] * len(_texts))) self._ids.extend(_ids) self._update_neighbors() return _ids def _update_neighbors(self) -> None: if len(self._embeddings) == 0: raise SKLearnVectorStoreException( "No data was added to SKLearnVectorStore." ) self._embeddings_np = self._np.asarray(self._embeddings) self._neighbors.fit(self._embeddings_np) self._neighbors_fitted = True def _similarity_index_search_with_score( self, query_embedding: List[float], *, k: int = DEFAULT_K, **kwargs: Any ) -> List[Tuple[int, float]]: """Search k embeddings similar to the query embedding. Returns a list of (index, distance) tuples.""" if not self._neighbors_fitted: raise SKLearnVectorStoreException( "No data was added to SKLearnVectorStore." ) neigh_dists, neigh_idxs = self._neighbors.kneighbors( [query_embedding], n_neighbors=k ) return list(zip(neigh_idxs[0], neigh_dists[0])) [docs] def similarity_search_with_score( self, query: str, *, k: int = DEFAULT_K, **kwargs: Any ) -> List[Tuple[Document, float]]: query_embedding = self._embedding_function.embed_query(query)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/sklearn.html
889538c38d3a-5
query_embedding = self._embedding_function.embed_query(query) indices_dists = self._similarity_index_search_with_score( query_embedding, k=k, **kwargs ) return [ ( Document( page_content=self._texts[idx], metadata={"id": self._ids[idx], **self._metadatas[idx]}, ), dist, ) for idx, dist in indices_dists ] [docs] def similarity_search( self, query: str, k: int = DEFAULT_K, **kwargs: Any ) -> List[Document]: docs_scores = self.similarity_search_with_score(query, k=k, **kwargs) return [doc for doc, _ in docs_scores] def _similarity_search_with_relevance_scores( self, query: str, k: int = DEFAULT_K, **kwargs: Any ) -> List[Tuple[Document, float]]: docs_dists = self.similarity_search_with_score(query, k=k, **kwargs) docs, dists = zip(*docs_dists) scores = [1 / math.exp(dist) for dist in dists] return list(zip(list(docs), scores)) [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/sklearn.html
889538c38d3a-6
Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ indices_dists = self._similarity_index_search_with_score( embedding, k=fetch_k, **kwargs ) indices, _ = zip(*indices_dists) result_embeddings = self._embeddings_np[indices,] mmr_selected = maximal_marginal_relevance( self._np.array(embedding, dtype=self._np.float32), result_embeddings, k=k, lambda_mult=lambda_mult, ) mmr_indices = [indices[i] for i in mmr_selected] return [ Document( page_content=self._texts[idx], metadata={"id": self._ids[idx], **self._metadatas[idx]}, ) for idx in mmr_indices ] [docs] def max_marginal_relevance_search( self, query: str, k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/sklearn.html
889538c38d3a-7
among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding_function is None: raise ValueError( "For MMR search, you must specify an embedding function on creation." ) embedding = self._embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mul=lambda_mult ) return docs [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, persist_path: Optional[str] = None, **kwargs: Any, ) -> "SKLearnVectorStore": vs = SKLearnVectorStore(embedding, persist_path=persist_path, **kwargs) vs.add_texts(texts, metadatas=metadatas, ids=ids) return vs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/sklearn.html
efa99fd80b9c-0
Source code for langchain.vectorstores.myscale """Wrapper around MyScale vector database.""" from __future__ import annotations import json import logging from hashlib import sha1 from threading import Thread from typing import Any, Dict, Iterable, List, Optional, Tuple from pydantic import BaseSettings from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore logger = logging.getLogger() def has_mul_sub_str(s: str, *args: Any) -> bool: for a in args: if a not in s: return False return True [docs]class MyScaleSettings(BaseSettings): """MyScale Client Configuration Attribute: myscale_host (str) : An URL to connect to MyScale backend. Defaults to 'localhost'. myscale_port (int) : URL port to connect with HTTP. Defaults to 8443. username (str) : Username to login. Defaults to None. password (str) : Password to login. Defaults to None. index_type (str): index type string. index_param (dict): index build parameter. database (str) : Database name to find the table. Defaults to 'default'. table (str) : Table name to operate on. Defaults to 'vector_table'. metric (str) : Metric to compute distance, supported are ('l2', 'cosine', 'ip'). Defaults to 'cosine'. column_map (Dict) : Column type map to project column name onto langchain semantics. Must have keys: `text`, `id`, `vector`, must be same size to number of columns. For example: .. code-block:: python {
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/myscale.html
efa99fd80b9c-1
.. code-block:: python { 'id': 'text_id', 'vector': 'text_embedding', 'text': 'text_plain', 'metadata': 'metadata_dictionary_in_json', } Defaults to identity map. """ host: str = "localhost" port: int = 8443 username: Optional[str] = None password: Optional[str] = None index_type: str = "IVFFLAT" index_param: Optional[Dict[str, str]] = None column_map: Dict[str, str] = { "id": "id", "text": "text", "vector": "vector", "metadata": "metadata", } database: str = "default" table: str = "langchain" metric: str = "cosine" def __getitem__(self, item: str) -> Any: return getattr(self, item) class Config: env_file = ".env" env_prefix = "myscale_" env_file_encoding = "utf-8" [docs]class MyScale(VectorStore): """Wrapper around MyScale vector database You need a `clickhouse-connect` python package, and a valid account to connect to MyScale. MyScale can not only search with simple vector indexes, it also supports complex query with multiple conditions, constraints and even sub-queries. For more information, please visit [myscale official site](https://docs.myscale.com/en/overview/) """ def __init__( self, embedding: Embeddings, config: Optional[MyScaleSettings] = None, **kwargs: Any,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/myscale.html
efa99fd80b9c-2
config: Optional[MyScaleSettings] = None, **kwargs: Any, ) -> None: """MyScale Wrapper to LangChain embedding_function (Embeddings): config (MyScaleSettings): Configuration to MyScale Client Other keyword arguments will pass into [clickhouse-connect](https://docs.myscale.com/) """ try: from clickhouse_connect import get_client except ImportError: raise ValueError( "Could not import clickhouse connect python package. " "Please install it with `pip install clickhouse-connect`." ) try: from tqdm import tqdm self.pgbar = tqdm except ImportError: # Just in case if tqdm is not installed self.pgbar = lambda x: x super().__init__() if config is not None: self.config = config else: self.config = MyScaleSettings() assert self.config assert self.config.host and self.config.port assert ( self.config.column_map and self.config.database and self.config.table and self.config.metric ) for k in ["id", "vector", "text", "metadata"]: assert k in self.config.column_map assert self.config.metric in ["ip", "cosine", "l2"] # initialize the schema dim = len(embedding.embed_query("try this out")) index_params = ( ", " + ",".join([f"'{k}={v}'" for k, v in self.config.index_param.items()]) if self.config.index_param else "" ) schema_ = f""" CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/myscale.html
efa99fd80b9c-3
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( {self.config.column_map['id']} String, {self.config.column_map['text']} String, {self.config.column_map['vector']} Array(Float32), {self.config.column_map['metadata']} JSON, CONSTRAINT cons_vec_len CHECK length(\ {self.config.column_map['vector']}) = {dim}, VECTOR INDEX vidx {self.config.column_map['vector']} \ TYPE {self.config.index_type}(\ 'metric_type={self.config.metric}'{index_params}) ) ENGINE = MergeTree ORDER BY {self.config.column_map['id']} """ self.dim = dim self.BS = "\\" self.must_escape = ("\\", "'") self.embedding_function = embedding.embed_query self.dist_order = "ASC" if self.config.metric in ["cosine", "l2"] else "DESC" # Create a connection to myscale self.client = get_client( host=self.config.host, port=self.config.port, username=self.config.username, password=self.config.password, **kwargs, ) self.client.command("SET allow_experimental_object_type=1") self.client.command(schema_) [docs] def escape_str(self, value: str) -> str: return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value) def _build_istr(self, transac: Iterable, column_names: Iterable[str]) -> str: ks = ",".join(column_names) _data = [] for n in transac: n = ",".join([f"'{self.escape_str(str(_n))}'" for _n in n])
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/vectorstores/myscale.html