id
stringlengths
14
16
text
stringlengths
31
2.41k
source
stringlengths
53
121
725ad95173bc-4
"""Call out to Aleph Alpha's Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ document_embeddings = [] for text in texts: document_embeddings.append(self._embed(text)) return document_embeddings [docs] def embed_query(self, text: str) -> List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embed(text)
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/aleph_alpha.html
3dbce31cee9c-0
Source code for langchain.embeddings.elasticsearch from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from langchain.utils import get_from_env if TYPE_CHECKING: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient from langchain.embeddings.base import Embeddings [docs]class ElasticsearchEmbeddings(Embeddings): """ Wrapper around Elasticsearch embedding models. This class provides an interface to generate embeddings using a model deployed in an Elasticsearch cluster. It requires an Elasticsearch connection object and the model_id of the model deployed in the cluster. In Elasticsearch you need to have an embedding model loaded and deployed. - https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html - https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html """ # noqa: E501 def __init__( self, client: MlClient, model_id: str, *, input_field: str = "text_field", ): """ Initialize the ElasticsearchEmbeddings instance. Args: client (MlClient): An Elasticsearch ML client object. model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. """ self.client = client self.model_id = model_id self.input_field = input_field [docs] @classmethod def from_credentials( cls, model_id: str, *, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None,
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/elasticsearch.html
3dbce31cee9c-1
es_user: Optional[str] = None, es_password: Optional[str] = None, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """Instantiate embeddings from Elasticsearch credentials. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to. es_user: (str, optional): Elasticsearch username. es_password: (str, optional): Elasticsearch password. Example: .. code-block:: python from langchain.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Credentials can be passed in two ways. Either set the env vars # ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically # pulled in, or pass them in directly as kwargs. embeddings = ElasticsearchEmbeddings.from_credentials( model_id, input_field=input_field, # es_cloud_id="foo", # es_user="bar", # es_password="baz", ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ try: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient except ImportError: raise ImportError(
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/elasticsearch.html
3dbce31cee9c-2
from elasticsearch.client import MlClient except ImportError: raise ImportError( "elasticsearch package not found, please install with 'pip install " "elasticsearch'" ) es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID") es_user = es_user or get_from_env("es_user", "ES_USER") es_password = es_password or get_from_env("es_password", "ES_PASSWORD") # Connect to Elasticsearch es_connection = Elasticsearch( cloud_id=es_cloud_id, basic_auth=(es_user, es_password) ) client = MlClient(es_connection) return cls(client, model_id, input_field=input_field) [docs] @classmethod def from_es_connection( cls, model_id: str, es_connection: Elasticsearch, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """ Instantiate embeddings from an existing Elasticsearch connection. This method provides a way to create an instance of the ElasticsearchEmbeddings class using an existing Elasticsearch connection. The connection object is used to create an MlClient, which is then used to initialize the ElasticsearchEmbeddings instance. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch connection object. input_field (str, optional): The name of the key for the input text field in the document. Defaults to 'text_field'. Returns: ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class. Example: .. code-block:: python from elasticsearch import Elasticsearch
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/elasticsearch.html
3dbce31cee9c-3
Example: .. code-block:: python from elasticsearch import Elasticsearch from langchain.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Create Elasticsearch connection es_connection = Elasticsearch( hosts=["localhost:9200"], http_auth=("user", "password") ) # Instantiate ElasticsearchEmbeddings using the existing connection embeddings = ElasticsearchEmbeddings.from_es_connection( model_id, es_connection, input_field=input_field, ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ # Importing MlClient from elasticsearch.client within the method to # avoid unnecessary import if the method is not used from elasticsearch.client import MlClient # Create an MlClient from the given Elasticsearch connection client = MlClient(es_connection) # Return a new instance of the ElasticsearchEmbeddings class with # the MlClient, model_id, and input_field return cls(client, model_id, input_field=input_field) def _embedding_func(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for the given texts using the Elasticsearch model. Args: texts (List[str]): A list of text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each text in the input list. """ response = self.client.infer_trained_model(
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/elasticsearch.html
3dbce31cee9c-4
list. """ response = self.client.infer_trained_model( model_id=self.model_id, docs=[{self.input_field: text} for text in texts] ) embeddings = [doc["predicted_value"] for doc in response["inference_results"]] return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for a list of documents. Args: texts (List[str]): A list of document text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each document in the input list. """ return self._embedding_func(texts) [docs] def embed_query(self, text: str) -> List[float]: """ Generate an embedding for a single query text. Args: text (str): The query text to generate an embedding for. Returns: List[float]: The embedding for the input query text. """ return self._embedding_func([text])[0]
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/elasticsearch.html
81b5636e1f74-0
Source code for langchain.embeddings.cohere """Wrapper around Cohere embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class CohereEmbeddings(BaseModel, Embeddings): """Wrapper around Cohere embedding models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import CohereEmbeddings cohere = CohereEmbeddings( model="embed-english-light-v2.0", cohere_api_key="my-api-key" ) """ client: Any #: :meta private: model: str = "embed-english-v2.0" """Model name to use.""" truncate: Optional[str] = None """Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")""" cohere_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ValueError(
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/cohere.html
81b5636e1f74-1
except ImportError: raise ValueError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Cohere's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = self.client.embed( model=self.model, texts=texts, truncate=self.truncate ).embeddings return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed( model=self.model, texts=[text], truncate=self.truncate ).embeddings[0] return list(map(float, embedding))
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/cohere.html
4b92cc5c07ca-0
Source code for langchain.embeddings.dashscope """Wrapper around DashScope embedding models.""" from __future__ import annotations import logging from typing import ( Any, Callable, Dict, List, Optional, ) from pydantic import BaseModel, Extra, root_validator from requests.exceptions import HTTPError from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: DashScopeEmbeddings) -> Callable[[Any], Any]: multiplier = 1 min_seconds = 1 max_seconds = 4 # Wait 2^x * 1 second between each retry starting with # 1 seconds, then up to 4 seconds, then 4 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(HTTPError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) def embed_with_retry(embeddings: DashScopeEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: resp = embeddings.client.call(**kwargs) if resp.status_code == 200: return resp.output["embeddings"] elif resp.status_code in [400, 401]:
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/dashscope.html
4b92cc5c07ca-1
elif resp.status_code in [400, 401]: raise ValueError( f"status_code: {resp.status_code} \n " f"code: {resp.code} \n message: {resp.message}" ) else: raise HTTPError( f"HTTP error occurred: status_code: {resp.status_code} \n " f"code: {resp.code} \n message: {resp.message}" ) return _embed_with_retry(**kwargs) [docs]class DashScopeEmbeddings(BaseModel, Embeddings): """Wrapper around DashScope embedding models. To use, you should have the ``dashscope`` python package installed, and the environment variable ``DASHSCOPE_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import DashScopeEmbeddings embeddings = DashScopeEmbeddings(dashscope_api_key="my-api-key") Example: .. code-block:: python import os os.environ["DASHSCOPE_API_KEY"] = "your DashScope API KEY" from langchain.embeddings.dashscope import DashScopeEmbeddings embeddings = DashScopeEmbeddings( model="text-embedding-v1", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any #: :meta private: model: str = "text-embedding-v1" dashscope_api_key: Optional[str] = None """Maximum number of retries to make when generating.""" max_retries: int = 5 class Config: """Configuration for this pydantic object."""
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/dashscope.html
4b92cc5c07ca-2
class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: import dashscope """Validate that api key and python package exists in environment.""" values["dashscope_api_key"] = get_from_dict_or_env( values, "dashscope_api_key", "DASHSCOPE_API_KEY" ) dashscope.api_key = values["dashscope_api_key"] try: import dashscope values["client"] = dashscope.TextEmbedding except ImportError: raise ImportError( "Could not import dashscope python package. " "Please install it with `pip install dashscope`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to DashScope's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ embeddings = embed_with_retry( self, input=texts, text_type="document", model=self.model ) embedding_list = [item["embedding"] for item in embeddings] return embedding_list [docs] def embed_query(self, text: str) -> List[float]: """Call out to DashScope's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = embed_with_retry(
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/dashscope.html
4b92cc5c07ca-3
Embedding for the text. """ embedding = embed_with_retry( self, input=text, text_type="query", model=self.model )[0]["embedding"] return embedding
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/dashscope.html
10646c643030-0
Source code for langchain.embeddings.modelscope_hub """Wrapper around ModelScopeHub embedding models.""" from typing import Any, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings [docs]class ModelScopeEmbeddings(BaseModel, Embeddings): """Wrapper around modelscope_hub embedding models. To use, you should have the ``modelscope`` python package installed. Example: .. code-block:: python from langchain.embeddings import ModelScopeEmbeddings model_id = "damo/nlp_corom_sentence-embedding_english-base" embed = ModelScopeEmbeddings(model_id=model_id) """ embed: Any model_id: str = "damo/nlp_corom_sentence-embedding_english-base" """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the modelscope""" super().__init__(**kwargs) try: from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks self.embed = pipeline(Tasks.sentence_embedding, model=self.model_id) except ImportError as e: raise ImportError( "Could not import some python packages." "Please install it with `pip install modelscope`." ) from e class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a modelscope embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts))
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/modelscope_hub.html
10646c643030-1
texts = list(map(lambda x: x.replace("\n", " "), texts)) inputs = {"source_sentence": texts} embeddings = self.embed(input=inputs)["text_embedding"] return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a modelscope embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") inputs = {"source_sentence": [text]} embedding = self.embed(input=inputs)["text_embedding"][0] return embedding.tolist()
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/modelscope_hub.html
72258ea6fc8a-0
Source code for langchain.embeddings.minimax """Wrapper around MiniMax APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional import requests from pydantic import BaseModel, Extra, root_validator from tenacity import ( before_sleep_log, retry, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator() -> Callable[[Any], Any]: """Returns a tenacity retry decorator.""" multiplier = 1 min_seconds = 1 max_seconds = 4 max_retries = 6 return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), before_sleep=before_sleep_log(logger, logging.WARNING), ) def embed_with_retry(embeddings: MiniMaxEmbeddings, *args: Any, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _embed_with_retry(*args: Any, **kwargs: Any) -> Any: return embeddings.embed(*args, **kwargs) return _embed_with_retry(*args, **kwargs) [docs]class MiniMaxEmbeddings(BaseModel, Embeddings): """Wrapper around MiniMax's embedding inference service. To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and ``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to the constructor.
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/minimax.html
72258ea6fc8a-1
the constructor. Example: .. code-block:: python from langchain.embeddings import MiniMaxEmbeddings embeddings = MiniMaxEmbeddings() query_text = "This is a test query." query_result = embeddings.embed_query(query_text) document_text = "This is a test document." document_result = embeddings.embed_documents([document_text]) """ endpoint_url: str = "https://api.minimax.chat/v1/embeddings" """Endpoint URL to use.""" model: str = "embo-01" """Embeddings model name to use.""" embed_type_db: str = "db" """For embed_documents""" embed_type_query: str = "query" """For embed_query""" minimax_group_id: Optional[str] = None """Group ID for MiniMax API.""" minimax_api_key: Optional[str] = None """API Key for MiniMax API.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that group id and api key exists in environment.""" minimax_group_id = get_from_dict_or_env( values, "minimax_group_id", "MINIMAX_GROUP_ID" ) minimax_api_key = get_from_dict_or_env( values, "minimax_api_key", "MINIMAX_API_KEY" ) values["minimax_group_id"] = minimax_group_id values["minimax_api_key"] = minimax_api_key return values def embed( self, texts: List[str], embed_type: str,
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/minimax.html
72258ea6fc8a-2
self, texts: List[str], embed_type: str, ) -> List[List[float]]: payload = { "model": self.model, "type": embed_type, "texts": texts, } # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.minimax_api_key}", "Content-Type": "application/json", } params = { "GroupId": self.minimax_group_id, } # send request response = requests.post( self.endpoint_url, params=params, headers=headers, json=payload ) parsed_response = response.json() # check for errors if parsed_response["base_resp"]["status_code"] != 0: raise ValueError( f"MiniMax API returned an error: {parsed_response['base_resp']}" ) embeddings = parsed_response["vectors"] return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a MiniMax embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = embed_with_retry(self, texts=texts, embed_type=self.embed_type_db) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a MiniMax embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ embeddings = embed_with_retry( self, texts=[text], embed_type=self.embed_type_query ) return embeddings[0]
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/minimax.html
281f36154d82-0
Source code for langchain.embeddings.sagemaker_endpoint """Wrapper around Sagemaker InvokeEndpoint API.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.llms.sagemaker_endpoint import ContentHandlerBase class EmbeddingsContentHandler(ContentHandlerBase[List[str], List[List[float]]]): """Content handler for LLM class.""" [docs]class SagemakerEndpointEmbeddings(BaseModel, Embeddings): """Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain.embeddings import SagemakerEndpointEmbeddings endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" ) se = SagemakerEndpointEmbeddings( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/sagemaker_endpoint.html
281f36154d82-1
credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: EmbeddingsContentHandler """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler class ContentHandler(EmbeddingsContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompts: List[str], model_kwargs: Dict) -> bytes: input_str = json.dumps({prompts: prompts, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> List[List[float]]: response_json = json.loads(output.read().decode("utf-8")) return response_json["vectors"] """ # noqa: E501 model_kwargs: Optional[Dict] = None
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/sagemaker_endpoint.html
281f36154d82-2
""" # noqa: E501 model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ValueError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) return values def _embedding_func(self, texts: List[str]) -> List[List[float]]: """Call out to SageMaker Inference embedding endpoint.""" # replace newlines, which can negatively affect performance.
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/sagemaker_endpoint.html
281f36154d82-3
# replace newlines, which can negatively affect performance. texts = list(map(lambda x: x.replace("\n", " "), texts)) _model_kwargs = self.model_kwargs or {} _endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(texts, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return self.content_handler.transform_output(response["Body"]) [docs] def embed_documents( self, texts: List[str], chunk_size: int = 64 ) -> List[List[float]]: """Compute doc embeddings using a SageMaker Inference Endpoint. Args: texts: The list of texts to embed. chunk_size: The chunk size defines how many input texts will be grouped together as request. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ results = [] _chunk_size = len(texts) if chunk_size > len(texts) else chunk_size for i in range(0, len(texts), _chunk_size): response = self._embedding_func(texts[i : i + _chunk_size]) results.extend(response) return results [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a SageMaker inference endpoint.
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/sagemaker_endpoint.html
281f36154d82-4
"""Compute query embeddings using a SageMaker inference endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func([text])[0]
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/sagemaker_endpoint.html
0fdc280c40cc-0
Source code for langchain.embeddings.mosaicml """Wrapper around MosaicML APIs.""" from typing import Any, Dict, List, Mapping, Optional, Tuple import requests from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class MosaicMLInstructorEmbeddings(BaseModel, Embeddings): """Wrapper around MosaicML's embedding inference service. To use, you should have the environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import MosaicMLInstructorEmbeddings endpoint_url = ( "https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict" ) mosaic_llm = MosaicMLInstructorEmbeddings( endpoint_url=endpoint_url, mosaicml_api_token="my-api-key" ) """ endpoint_url: str = ( "https://models.hosted-on.mosaicml.hosting/instructor-xl/v1/predict" ) """Endpoint URL to use.""" embed_instruction: str = "Represent the document for retrieval: " """Instruction used to embed documents.""" query_instruction: str = ( "Represent the question for retrieving supporting documents: " ) """Instruction used to embed the query.""" retry_sleep: float = 1.0 """How long to try sleeping for if a rate limit is encountered""" mosaicml_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/mosaicml.html
0fdc280c40cc-1
"""Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" mosaicml_api_token = get_from_dict_or_env( values, "mosaicml_api_token", "MOSAICML_API_TOKEN" ) values["mosaicml_api_token"] = mosaicml_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"endpoint_url": self.endpoint_url} def _embed( self, input: List[Tuple[str, str]], is_retry: bool = False ) -> List[List[float]]: payload = {"input_strings": input} # HTTP headers for authorization headers = { "Authorization": f"{self.mosaicml_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post(self.endpoint_url, headers=headers, json=payload) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") try: parsed_response = response.json() if "error" in parsed_response: # if we get rate limited, try sleeping for 1 second if ( not is_retry and "rate limit exceeded" in parsed_response["error"].lower() ): import time time.sleep(self.retry_sleep) return self._embed(input, is_retry=True) raise ValueError( f"Error raised by inference API: {parsed_response['error']}" )
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/mosaicml.html
0fdc280c40cc-2
f"Error raised by inference API: {parsed_response['error']}" ) # The inference API has changed a couple of times, so we add some handling # to be robust to multiple response formats. if isinstance(parsed_response, dict): if "data" in parsed_response: output_item = parsed_response["data"] elif "output" in parsed_response: output_item = parsed_response["output"] else: raise ValueError( f"No key data or output in response: {parsed_response}" ) if isinstance(output_item, list) and isinstance(output_item[0], list): embeddings = output_item else: embeddings = [output_item] elif isinstance(parsed_response, list): first_item = parsed_response[0] if isinstance(first_item, list): embeddings = parsed_response elif isinstance(first_item, dict): if "output" in first_item: embeddings = [item["output"] for item in parsed_response] else: raise ValueError( f"No key data or output in response: {parsed_response}" ) else: raise ValueError(f"Unexpected response format: {parsed_response}") else: raise ValueError(f"Unexpected response type: {parsed_response}") except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {response.text}" ) return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a MosaicML deployed instructor embedding model. Args: texts: The list of texts to embed. Returns:
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/mosaicml.html
0fdc280c40cc-3
Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [(self.embed_instruction, text) for text in texts] embeddings = self._embed(instruction_pairs) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a MosaicML deployed instructor embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = (self.query_instruction, text) embedding = self._embed([instruction_pair])[0] return embedding
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/mosaicml.html
757909223b8a-0
Source code for langchain.embeddings.self_hosted_hugging_face """Wrapper around HuggingFace embedding models for self-hosted remote hardware.""" import importlib import logging from typing import Any, Callable, List, Optional from langchain.embeddings.self_hosted import SelfHostedEmbeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) logger = logging.getLogger(__name__) def _embed_documents(client: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return client.encode(*args, **kwargs) def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0) -> Any: """Load the embedding model.""" if not instruct: import sentence_transformers client = sentence_transformers.SentenceTransformer(model_id) else: from InstructorEmbedding import INSTRUCTOR client = INSTRUCTOR(model_id) if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning(
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/self_hosted_hugging_face.html
757909223b8a-1
if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) client = client.to(device) return client [docs]class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings): """Runs sentence_transformers embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceEmbeddings import runhouse as rh model_name = "sentence-transformers/all-mpnet-base-v2" gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu) """ client: Any #: :meta private: model_id: str = DEFAULT_MODEL_NAME """Model name to use.""" model_reqs: List[str] = ["./", "sentence_transformers", "torch"] """Requirements to install on hardware to inference the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_load_fn: Callable = load_embedding_model
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/self_hosted_hugging_face.html
757909223b8a-2
model_load_fn: Callable = load_embedding_model """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None """Key word arguments to pass to the model load function.""" inference_fn: Callable = _embed_documents """Inference function to extract the embeddings.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get("model_id", DEFAULT_MODEL_NAME) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", False) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs]class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings): """Runs InstructorEmbedding embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings import runhouse as rh model_name = "hkunlp/instructor-large" gpu = rh.cluster(name='rh-a10x', instance_type='A100:1') hf = SelfHostedHuggingFaceInstructEmbeddings( model_name=model_name, hardware=gpu) """
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/self_hosted_hugging_face.html
757909223b8a-3
model_name=model_name, hardware=gpu) """ model_id: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" model_reqs: List[str] = ["./", "InstructorEmbedding", "torch"] """Requirements to install on hardware to inference the model.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get( "model_id", DEFAULT_INSTRUCT_MODEL ) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", True) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [] for text in texts: instruction_pairs.append([self.embed_instruction, text]) embeddings = self.client(self.pipeline_ref, instruction_pairs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text.
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/self_hosted_hugging_face.html
757909223b8a-4
Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client(self.pipeline_ref, [instruction_pair])[0] return embedding.tolist()
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/self_hosted_hugging_face.html
2b8dd8f46a90-0
Source code for langchain.embeddings.huggingface_hub """Wrapper around HuggingFace Hub embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "sentence-transformers/all-mpnet-base-v2" VALID_TASKS = ("feature-extraction",) [docs]class HuggingFaceHubEmbeddings(BaseModel, Embeddings): """Wrapper around HuggingFaceHub embedding models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import HuggingFaceHubEmbeddings repo_id = "sentence-transformers/all-mpnet-base-v2" hf = HuggingFaceHubEmbeddings( repo_id=repo_id, task="feature-extraction", huggingfacehub_api_token="my-api-key", ) """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = "feature-extraction" """Task to call the model with.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict:
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/huggingface_hub.html
2b8dd8f46a90-1
@root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] if not repo_id.startswith("sentence-transformers"): raise ValueError( "Currently only 'sentence-transformers' embedding models " f"are supported. Got invalid 'repo_id' {repo_id}." ) client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to HuggingFaceHub's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ # replace newlines, which can negatively affect performance. texts = [text.replace("\n", " ") for text in texts]
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/huggingface_hub.html
2b8dd8f46a90-2
texts = [text.replace("\n", " ") for text in texts] _model_kwargs = self.model_kwargs or {} responses = self.client(inputs=texts, params=_model_kwargs) return responses [docs] def embed_query(self, text: str) -> List[float]: """Call out to HuggingFaceHub's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embeddings for the text. """ response = self.embed_documents([text])[0] return response
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/huggingface_hub.html
8fd960051249-0
Source code for langchain.embeddings.deepinfra from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env DEFAULT_MODEL_ID = "sentence-transformers/clip-ViT-B-32" [docs]class DeepInfraEmbeddings(BaseModel, Embeddings): """Wrapper around Deep Infra's embedding inference service. To use, you should have the environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. There are multiple embeddings models available, see https://deepinfra.com/models?type=embeddings. Example: .. code-block:: python from langchain.embeddings import DeepInfraEmbeddings deepinfra_emb = DeepInfraEmbeddings( model_id="sentence-transformers/clip-ViT-B-32", deepinfra_api_token="my-api-key" ) r1 = deepinfra_emb.embed_documents( [ "Alpha is the first letter of Greek alphabet", "Beta is the second letter of Greek alphabet", ] ) r2 = deepinfra_emb.embed_query( "What is the second letter of Greek alphabet" ) """ model_id: str = DEFAULT_MODEL_ID """Embeddings model to use.""" normalize: bool = False """whether to normalize the computed embeddings""" embed_instruction: str = "passage: " """Instruction used to embed documents.""" query_instruction: str = "query: " """Instruction used to embed the query.""" model_kwargs: Optional[dict] = None
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/deepinfra.html
8fd960051249-1
model_kwargs: Optional[dict] = None """Other model keyword args""" deepinfra_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env( values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN" ) values["deepinfra_api_token"] = deepinfra_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"model_id": self.model_id} def _embed(self, input: List[str]) -> List[List[float]]: _model_kwargs = self.model_kwargs or {} # HTTP headers for authorization headers = { "Authorization": f"bearer {self.deepinfra_api_token}", "Content-Type": "application/json", } # send request try: res = requests.post( f"https://api.deepinfra.com/v1/inference/{self.model_id}", headers=headers, json={"inputs": input, "normalize": self.normalize, **_model_kwargs}, ) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") if res.status_code != 200: raise ValueError( "Error raised by inference API HTTP code: %s, %s" % (res.status_code, res.text) ) try: t = res.json() embeddings = t["embeddings"]
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/deepinfra.html
8fd960051249-2
try: t = res.json() embeddings = t["embeddings"] except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {res.text}" ) return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a Deep Infra deployed embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [f"{self.query_instruction}{text}" for text in texts] embeddings = self._embed(instruction_pairs) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a Deep Infra deployed embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = f"{self.query_instruction}{text}" embedding = self._embed([instruction_pair])[0] return embedding
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/deepinfra.html
0c6f98659d2f-0
Source code for langchain.embeddings.embaas """Wrapper around embaas embeddings API.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from typing_extensions import NotRequired, TypedDict from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env # Currently supported maximum batch size for embedding requests MAX_BATCH_SIZE = 256 EMBAAS_API_URL = "https://api.embaas.io/v1/embeddings/" class EmbaasEmbeddingsPayload(TypedDict): """Payload for the embaas embeddings API.""" model: str texts: List[str] instruction: NotRequired[str] [docs]class EmbaasEmbeddings(BaseModel, Embeddings): """Wrapper around embaas's embedding service. To use, you should have the environment variable ``EMBAAS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python # Initialise with default model and instruction from langchain.embeddings import EmbaasEmbeddings emb = EmbaasEmbeddings() # Initialise with custom model and instruction from langchain.embeddings import EmbaasEmbeddings emb_model = "instructor-large" emb_inst = "Represent the Wikipedia document for retrieval" emb = EmbaasEmbeddings( model=emb_model, instruction=emb_inst ) """ model: str = "e5-large-v2" """The model used for embeddings.""" instruction: Optional[str] = None """Instruction used for domain-specific embeddings.""" api_url: str = EMBAAS_API_URL
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/embaas.html
0c6f98659d2f-1
api_url: str = EMBAAS_API_URL """The URL for the embaas embeddings API.""" embaas_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" embaas_api_key = get_from_dict_or_env( values, "embaas_api_key", "EMBAAS_API_KEY" ) values["embaas_api_key"] = embaas_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying params.""" return {"model": self.model, "instruction": self.instruction} def _generate_payload(self, texts: List[str]) -> EmbaasEmbeddingsPayload: """Generates payload for the API request.""" payload = EmbaasEmbeddingsPayload(texts=texts, model=self.model) if self.instruction: payload["instruction"] = self.instruction return payload def _handle_request(self, payload: EmbaasEmbeddingsPayload) -> List[List[float]]: """Sends a request to the Embaas API and handles the response.""" headers = { "Authorization": f"Bearer {self.embaas_api_key}", "Content-Type": "application/json", } response = requests.post(self.api_url, headers=headers, json=payload) response.raise_for_status() parsed_response = response.json() embeddings = [item["embedding"] for item in parsed_response["data"]] return embeddings
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/embaas.html
0c6f98659d2f-2
return embeddings def _generate_embeddings(self, texts: List[str]) -> List[List[float]]: """Generate embeddings using the Embaas API.""" payload = self._generate_payload(texts) try: return self._handle_request(payload) except requests.exceptions.RequestException as e: if e.response is None or not e.response.text: raise ValueError(f"Error raised by embaas embeddings API: {e}") parsed_response = e.response.json() if "message" in parsed_response: raise ValueError( "Validation Error raised by embaas embeddings API:" f"{parsed_response['message']}" ) raise [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Get embeddings for a list of texts. Args: texts: The list of texts to get embeddings for. Returns: List of embeddings, one for each text. """ batches = [ texts[i : i + MAX_BATCH_SIZE] for i in range(0, len(texts), MAX_BATCH_SIZE) ] embeddings = [self._generate_embeddings(batch) for batch in batches] # flatten the list of lists into a single list return [embedding for batch in embeddings for embedding in batch] [docs] def embed_query(self, text: str) -> List[float]: """Get embeddings for a single text. Args: text: The text to get embeddings for. Returns: List of embeddings. """ return self.embed_documents([text])[0]
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/embaas.html
bf5dc5e21714-0
Source code for langchain.embeddings.fake from typing import List import numpy as np from pydantic import BaseModel from langchain.embeddings.base import Embeddings [docs]class FakeEmbeddings(Embeddings, BaseModel): size: int def _get_embedding(self) -> List[float]: return list(np.random.normal(size=self.size)) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: return [self._get_embedding() for _ in texts] [docs] def embed_query(self, text: str) -> List[float]: return self._get_embedding()
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/fake.html
44eb55d4992d-0
Source code for langchain.embeddings.huggingface """Wrapper around HuggingFace embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field from langchain.embeddings.base import Embeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) [docs]class HuggingFaceEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers`` python package installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceEmbeddings model_name = "sentence-transformers/all-mpnet-base-v2" model_kwargs = {'device': 'cpu'} encode_kwargs = {'normalize_embeddings': False} hf = HuggingFaceEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) """ client: Any #: :meta private: model_name: str = DEFAULT_MODEL_NAME """Model name to use.""" cache_folder: Optional[str] = None """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass to the model.""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass when calling the `encode` method of the model."""
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/huggingface.html
44eb55d4992d-1
"""Key word arguments to pass when calling the `encode` method of the model.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: import sentence_transformers except ImportError as exc: raise ImportError( "Could not import sentence_transformers python package. " "Please install it with `pip install sentence_transformers`." ) from exc self.client = sentence_transformers.SentenceTransformer( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client.encode(texts, **self.encode_kwargs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.client.encode(text, **self.encode_kwargs) return embedding.tolist() [docs]class HuggingFaceInstructEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers``
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/huggingface.html
44eb55d4992d-2
To use, you should have the ``sentence_transformers`` and ``InstructorEmbedding`` python packages installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceInstructEmbeddings model_name = "hkunlp/instructor-large" model_kwargs = {'device': 'cpu'} encode_kwargs = {'normalize_embeddings': True} hf = HuggingFaceInstructEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) """ client: Any #: :meta private: model_name: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" cache_folder: Optional[str] = None """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass to the model.""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass when calling the `encode` method of the model.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: from InstructorEmbedding import INSTRUCTOR self.client = INSTRUCTOR( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) except ImportError as e: raise ValueError("Dependencies for InstructorEmbedding not found.") from e class Config:
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/huggingface.html
44eb55d4992d-3
raise ValueError("Dependencies for InstructorEmbedding not found.") from e class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [[self.embed_instruction, text] for text in texts] embeddings = self.client.encode(instruction_pairs, **self.encode_kwargs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client.encode([instruction_pair], **self.encode_kwargs)[0] return embedding.tolist()
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/huggingface.html
c2a7c061b2ee-0
Source code for langchain.embeddings.tensorflow_hub """Wrapper around TensorflowHub embedding models.""" from typing import Any, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" [docs]class TensorflowHubEmbeddings(BaseModel, Embeddings): """Wrapper around tensorflow_hub embedding models. To use, you should have the ``tensorflow_text`` python package installed. Example: .. code-block:: python from langchain.embeddings import TensorflowHubEmbeddings url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" tf = TensorflowHubEmbeddings(model_url=url) """ embed: Any #: :meta private: model_url: str = DEFAULT_MODEL_URL """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the tensorflow_hub and tensorflow_text.""" super().__init__(**kwargs) try: import tensorflow_hub except ImportError: raise ImportError( "Could not import tensorflow-hub python package. " "Please install it with `pip install tensorflow-hub``." ) try: import tensorflow_text # noqa except ImportError: raise ImportError( "Could not import tensorflow_text python package. " "Please install it with `pip install tensorflow_text``." ) self.embed = tensorflow_hub.load(self.model_url) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/tensorflow_hub.html
c2a7c061b2ee-1
"""Compute doc embeddings using a TensorflowHub embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.embed(texts).numpy() return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a TensorflowHub embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.embed([text]).numpy()[0] return embedding.tolist()
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/tensorflow_hub.html
fb20f7245f37-0
Source code for langchain.embeddings.bedrock import json import os from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings [docs]class BedrockEmbeddings(BaseModel, Embeddings): """Embeddings provider to invoke Bedrock embedding models. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Bedrock service. """ """ Example: .. code-block:: python from langchain.bedrock_embeddings import BedrockEmbeddings region_name ="us-east-1" credentials_profile_name = "default" model_id = "amazon.titan-e1t-medium" be = BedrockEmbeddings( credentials_profile_name=credentials_profile_name, region_name=region_name, model_id=model_id ) """ client: Any #: :meta private: region_name: Optional[str] = None """The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here. """ credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance,
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/bedrock.html
fb20f7245f37-1
If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ model_id: str = "amazon.titan-e1t-medium" """Id of the model to call, e.g., amazon.titan-e1t-medium, this is equivalent to the modelId property in the list-foundation-models api""" model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" if values["client"] is not None: return values try: import boto3 if values["credentials_profile_name"] is not None: session = boto3.Session(profile_name=values["credentials_profile_name"]) else: # use default credentials session = boto3.Session() client_params = {} if values["region_name"]: client_params["region_name"] = values["region_name"] values["client"] = session.client("bedrock", **client_params) except ImportError: raise ModuleNotFoundError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e return values
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/bedrock.html
fb20f7245f37-2
"profile name are valid." ) from e return values def _embedding_func(self, text: str) -> List[float]: """Call out to Bedrock embedding endpoint.""" # replace newlines, which can negatively affect performance. text = text.replace(os.linesep, " ") _model_kwargs = self.model_kwargs or {} input_body = {**_model_kwargs} input_body["inputText"] = text body = json.dumps(input_body) content_type = "application/json" accepts = "application/json" embeddings = [] try: response = self.client.invoke_model( body=body, modelId=self.model_id, accept=accepts, contentType=content_type, ) response_body = json.loads(response.get("body").read()) embeddings = response_body.get("embedding") except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return embeddings [docs] def embed_documents( self, texts: List[str], chunk_size: int = 1 ) -> List[List[float]]: """Compute doc embeddings using a Bedrock model. Args: texts: The list of texts to embed. chunk_size: Bedrock currently only allows single string inputs, so chunk size is always 1. This input is here only for compatibility with the embeddings interface. Returns: List of embeddings, one for each text. """ results = [] for text in texts: response = self._embedding_func(text) results.append(response) return results [docs] def embed_query(self, text: str) -> List[float]:
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/bedrock.html
fb20f7245f37-3
[docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a Bedrock model. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func(text)
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/bedrock.html
b655b8d2e1c9-0
Source code for langchain.embeddings.openai """Wrapper around OpenAI embedding models.""" from __future__ import annotations import logging from typing import ( Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union, ) import numpy as np from pydantic import BaseModel, Extra, root_validator from tenacity import ( AsyncRetrying, before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any: import openai
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/openai.html
b655b8d2e1c9-1
import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards async_retrying = AsyncRetrying( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def wrap(func: Callable) -> Callable: async def wrapped_f(*args: Any, **kwargs: Any) -> Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError("this is unreachable") return wrapped_f return wrap def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: return embeddings.client.create(**kwargs) return _embed_with_retry(**kwargs) async def async_embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" @_async_retry_decorator(embeddings)
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/openai.html
b655b8d2e1c9-2
@_async_retry_decorator(embeddings) async def _async_embed_with_retry(**kwargs: Any) -> Any: return await embeddings.client.acreate(**kwargs) return await _async_embed_with_retry(**kwargs) [docs]class OpenAIEmbeddings(BaseModel, Embeddings): """Wrapper around OpenAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and OPENAI_API_VERSION. The OPENAI_API_TYPE must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the model parameter. Example: .. code-block:: python import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview" os.environ["OPENAI_PROXY"] = "http://your-corporate-proxy:8080" from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings( deployment="your-embeddings-deployment-name",
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/openai.html
b655b8d2e1c9-3
deployment="your-embeddings-deployment-name", model="your-embeddings-model-name", openai_api_base="https://your-endpoint.openai.azure.com/", openai_api_type="azure", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model # to support Azure OpenAI Service custom deployment names openai_api_version: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_base: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_type: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None embedding_ctx_length: int = 8191 openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout in seconds for the OpenAPI request.""" headers: Any = None tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/openai.html
b655b8d2e1c9-4
Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) if values["openai_api_type"] in ("azure", "azure_ad", "azuread"): default_api_version = "2022-12-01"
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/openai.html
b655b8d2e1c9-5
default_api_version = "2022-12-01" else: default_api_version = "" values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", default=default_api_version, ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai values["client"] = openai.Embedding except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _invocation_params(self) -> Dict: openai_args = { "engine": self.deployment, "request_timeout": self.request_timeout, "headers": self.headers, "api_key": self.openai_api_key, "organization": self.openai_organization, "api_base": self.openai_api_base, "api_type": self.openai_api_type, "api_version": self.openai_api_version, } if self.openai_proxy: import openai openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, } # type: ignore[assignment] # noqa: E501 return openai_args # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb def _get_len_safe_embeddings(
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/openai.html
b655b8d2e1c9-6
def _get_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens += [token[j : j + self.embedding_ctx_length]] indices += [i] batched_embeddings = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = embed_with_retry( self,
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/openai.html
b655b8d2e1c9-7
response = embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings += [r["embedding"] for r in response["data"]] results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = embed_with_retry( self, input="", **self._invocation_params, )[ "data" ][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb async def _aget_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`."
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/openai.html
b655b8d2e1c9-8
"Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens += [token[j : j + self.embedding_ctx_length]] indices += [i] batched_embeddings = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = await async_embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings += [r["embedding"] for r in response["data"]] results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i])
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/openai.html
b655b8d2e1c9-9
results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = ( await async_embed_with_retry( self, input="", **self._invocation_params, ) )["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings def _embedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to OpenAI's embedding endpoint.""" # handle large input text if len(text) > self.embedding_ctx_length: return self._get_len_safe_embeddings([text], engine=engine)[0] else: if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return embed_with_retry( self, input=[text], **self._invocation_params, )[ "data" ][0]["embedding"] async def _aembedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to OpenAI's embedding endpoint.""" # handle large input text if len(text) > self.embedding_ctx_length: return (await self._aget_len_safe_embeddings([text], engine=engine))[0] else:
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/openai.html
b655b8d2e1c9-10
else: if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return ( await async_embed_with_retry( self, input=[text], **self._invocation_params, ) )["data"][0]["embedding"] [docs] def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return self._get_len_safe_embeddings(texts, engine=self.deployment) [docs] async def aembed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/openai.html
b655b8d2e1c9-11
# NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return await self._aget_len_safe_embeddings(texts, engine=self.deployment) [docs] def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = self._embedding_func(text, engine=self.deployment) return embedding [docs] async def aembed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = await self._aembedding_func(text, engine=self.deployment) return embedding
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/openai.html
d7cbd3a38ba1-0
Source code for langchain.embeddings.self_hosted """Running custom embedding models on self-hosted remote hardware.""" from typing import Any, Callable, List from pydantic import Extra from langchain.embeddings.base import Embeddings from langchain.llms import SelfHostedPipeline def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return pipeline(*args, **kwargs) [docs]class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings): """Runs custom embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example using a model load function: .. code-block:: python from langchain.embeddings import SelfHostedEmbeddings from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") def get_pipeline(): model_id = "facebook/bart-large" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) return pipeline("feature-extraction", model=model, tokenizer=tokenizer) embeddings = SelfHostedEmbeddings( model_load_fn=get_pipeline, hardware=gpu
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/self_hosted.html
d7cbd3a38ba1-1
model_load_fn=get_pipeline, hardware=gpu model_reqs=["./", "torch", "transformers"], ) Example passing in a pipeline path: .. code-block:: python from langchain.embeddings import SelfHostedHFEmbeddings import runhouse as rh from transformers import pipeline gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") pipeline = pipeline(model="bert-base-uncased", task="feature-extraction") rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(gpu, path="models") embeddings = SelfHostedHFEmbeddings.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) """ inference_fn: Callable = _embed_documents """Inference function to extract the embeddings on the remote hardware.""" inference_kwargs: Any = None """Any kwargs to pass to the model's inference function.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed.s Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client(self.pipeline_ref, texts) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings [docs] def embed_query(self, text: str) -> List[float]:
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/self_hosted.html
d7cbd3a38ba1-2
[docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embeddings = self.client(self.pipeline_ref, text) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/self_hosted.html
6a7d0da999b0-0
Source code for langchain.callbacks.file """Callback Handler that writes to a file.""" from typing import Any, Dict, Optional, TextIO, cast from langchain.callbacks.base import BaseCallbackHandler from langchain.input import print_text from langchain.schema import AgentAction, AgentFinish [docs]class FileCallbackHandler(BaseCallbackHandler): """Callback Handler that writes to a file.""" def __init__( self, filename: str, mode: str = "a", color: Optional[str] = None ) -> None: """Initialize callback handler.""" self.file = cast(TextIO, open(filename, mode)) self.color = color def __del__(self) -> None: """Destructor to cleanup when done.""" self.file.close() [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Print out that we are entering a chain.""" class_name = serialized["name"] print_text( f"\n\n\033[1m> Entering new {class_name} chain...\033[0m", end="\n", file=self.file, ) [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Print out that we finished a chain.""" print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file) [docs] def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: """Run on agent action."""
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/file.html
6a7d0da999b0-1
) -> Any: """Run on agent action.""" print_text(action.log, color=color if color else self.color, file=self.file) [docs] def on_tool_end( self, output: str, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """If not the final action, print out observation.""" if observation_prefix is not None: print_text(f"\n{observation_prefix}", file=self.file) print_text(output, color=color if color else self.color, file=self.file) if llm_prefix is not None: print_text(f"\n{llm_prefix}", file=self.file) [docs] def on_text( self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any, ) -> None: """Run when agent ends.""" print_text(text, color=color if color else self.color, end=end, file=self.file) [docs] def on_agent_finish( self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any ) -> None: """Run on agent end.""" print_text( finish.log, color=color if self.color else color, end="\n", file=self.file )
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/file.html
419da811ddf5-0
Source code for langchain.callbacks.streamlit from __future__ import annotations from typing import TYPE_CHECKING, Optional from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.streamlit.streamlit_callback_handler import ( LLMThoughtLabeler as LLMThoughtLabeler, ) from langchain.callbacks.streamlit.streamlit_callback_handler import ( StreamlitCallbackHandler as _InternalStreamlitCallbackHandler, ) if TYPE_CHECKING: from streamlit.delta_generator import DeltaGenerator [docs]def StreamlitCallbackHandler( parent_container: DeltaGenerator, *, max_thought_containers: int = 4, expand_new_thoughts: bool = True, collapse_completed_thoughts: bool = True, thought_labeler: Optional[LLMThoughtLabeler] = None, ) -> BaseCallbackHandler: """Construct a new StreamlitCallbackHandler. This CallbackHandler is geared towards use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts" inside a series of Streamlit expanders. Parameters ---------- parent_container The `st.container` that will contain all the Streamlit elements that the Handler creates. max_thought_containers The max number of completed LLM thought containers to show at once. When this threshold is reached, a new thought will cause the oldest thoughts to be collapsed into a "History" expander. Defaults to 4. expand_new_thoughts Each LLM "thought" gets its own `st.expander`. This param controls whether that expander is expanded by default. Defaults to True. collapse_completed_thoughts If True, LLM thought expanders will be collapsed when completed.
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/streamlit.html
419da811ddf5-1
If True, LLM thought expanders will be collapsed when completed. Defaults to True. thought_labeler An optional custom LLMThoughtLabeler instance. If unspecified, the handler will use the default thought labeling logic. Defaults to None. Returns ------- A new StreamlitCallbackHandler instance. Note that this is an "auto-updating" API: if the installed version of Streamlit has a more recent StreamlitCallbackHandler implementation, an instance of that class will be used. """ # If we're using a version of Streamlit that implements StreamlitCallbackHandler, # delegate to it instead of using our built-in handler. The official handler is # guaranteed to support the same set of kwargs. try: from streamlit.external.langchain import ( StreamlitCallbackHandler as OfficialStreamlitCallbackHandler, # type: ignore # noqa: 501 ) return OfficialStreamlitCallbackHandler( parent_container, max_thought_containers=max_thought_containers, expand_new_thoughts=expand_new_thoughts, collapse_completed_thoughts=collapse_completed_thoughts, thought_labeler=thought_labeler, ) except ImportError: return _InternalStreamlitCallbackHandler( parent_container, max_thought_containers=max_thought_containers, expand_new_thoughts=expand_new_thoughts, collapse_completed_thoughts=collapse_completed_thoughts, thought_labeler=thought_labeler, )
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/streamlit.html
aca133cb7b57-0
Source code for langchain.callbacks.stdout """Callback Handler that prints to std out.""" from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.input import print_text from langchain.schema import AgentAction, AgentFinish, LLMResult [docs]class StdOutCallbackHandler(BaseCallbackHandler): """Callback Handler that prints to std out.""" def __init__(self, color: Optional[str] = None) -> None: """Initialize callback handler.""" self.color = color [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Print out the prompts.""" pass [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Do nothing.""" pass [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing.""" pass [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Print out that we are entering a chain.""" class_name = serialized.get("name", "") print(f"\n\n\033[1m> Entering new {class_name} chain...\033[0m") [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/stdout.html
aca133cb7b57-1
"""Print out that we finished a chain.""" print("\n\033[1m> Finished chain.\033[0m") [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing.""" pass [docs] def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: """Run on agent action.""" print_text(action.log, color=color if color else self.color) [docs] def on_tool_end( self, output: str, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """If not the final action, print out observation.""" if observation_prefix is not None: print_text(f"\n{observation_prefix}") print_text(output, color=color if color else self.color) if llm_prefix is not None: print_text(f"\n{llm_prefix}") [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_text( self, text: str, color: Optional[str] = None, end: str = "",
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/stdout.html
aca133cb7b57-2
color: Optional[str] = None, end: str = "", **kwargs: Any, ) -> None: """Run when agent ends.""" print_text(text, color=color if color else self.color, end=end) [docs] def on_agent_finish( self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any ) -> None: """Run on agent end.""" print_text(finish.log, color=color if self.color else color, end="\n")
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/stdout.html
48ca6cfc3cba-0
Source code for langchain.callbacks.wandb_callback import json import tempfile from copy import deepcopy from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.utils import ( BaseMetadataCallbackHandler, flatten_dict, hash_string, import_pandas, import_spacy, import_textstat, ) from langchain.schema import AgentAction, AgentFinish, LLMResult def import_wandb() -> Any: """Import the wandb python package and raise an error if it is not installed.""" try: import wandb # noqa: F401 except ImportError: raise ImportError( "To use the wandb callback manager you need to have the `wandb` python " "package installed. Please install it with `pip install wandb`" ) return wandb def load_json_to_dict(json_path: Union[str, Path]) -> dict: """Load json file to a dictionary. Parameters: json_path (str): The path to the json file. Returns: (dict): The dictionary representation of the json file. """ with open(json_path, "r") as f: data = json.load(f) return data def analyze_text( text: str, complexity_metrics: bool = True, visualize: bool = True, nlp: Any = None, output_dir: Optional[Union[str, Path]] = None, ) -> dict: """Analyze text using textstat and spacy. Parameters: text (str): The text to analyze. complexity_metrics (bool): Whether to compute complexity metrics.
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
48ca6cfc3cba-1
complexity_metrics (bool): Whether to compute complexity metrics. visualize (bool): Whether to visualize the text. nlp (spacy.lang): The spacy language model to use for visualization. output_dir (str): The directory to save the visualization files to. Returns: (dict): A dictionary containing the complexity metrics and visualization files serialized in a wandb.Html element. """ resp = {} textstat = import_textstat() wandb = import_wandb() spacy = import_spacy() if complexity_metrics: text_complexity_metrics = { "flesch_reading_ease": textstat.flesch_reading_ease(text), "flesch_kincaid_grade": textstat.flesch_kincaid_grade(text), "smog_index": textstat.smog_index(text), "coleman_liau_index": textstat.coleman_liau_index(text), "automated_readability_index": textstat.automated_readability_index(text), "dale_chall_readability_score": textstat.dale_chall_readability_score(text), "difficult_words": textstat.difficult_words(text), "linsear_write_formula": textstat.linsear_write_formula(text), "gunning_fog": textstat.gunning_fog(text), "text_standard": textstat.text_standard(text), "fernandez_huerta": textstat.fernandez_huerta(text), "szigriszt_pazos": textstat.szigriszt_pazos(text), "gutierrez_polini": textstat.gutierrez_polini(text), "crawford": textstat.crawford(text),
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
48ca6cfc3cba-2
"crawford": textstat.crawford(text), "gulpease_index": textstat.gulpease_index(text), "osman": textstat.osman(text), } resp.update(text_complexity_metrics) if visualize and nlp and output_dir is not None: doc = nlp(text) dep_out = spacy.displacy.render( # type: ignore doc, style="dep", jupyter=False, page=True ) dep_output_path = Path(output_dir, hash_string(f"dep-{text}") + ".html") dep_output_path.open("w", encoding="utf-8").write(dep_out) ent_out = spacy.displacy.render( # type: ignore doc, style="ent", jupyter=False, page=True ) ent_output_path = Path(output_dir, hash_string(f"ent-{text}") + ".html") ent_output_path.open("w", encoding="utf-8").write(ent_out) text_visualizations = { "dependency_tree": wandb.Html(str(dep_output_path)), "entities": wandb.Html(str(ent_output_path)), } resp.update(text_visualizations) return resp def construct_html_from_prompt_and_generation(prompt: str, generation: str) -> Any: """Construct an html element from a prompt and a generation. Parameters: prompt (str): The prompt. generation (str): The generation. Returns: (wandb.Html): The html element.""" wandb = import_wandb() formatted_prompt = prompt.replace("\n", "<br>") formatted_generation = generation.replace("\n", "<br>") return wandb.Html( f"""
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
48ca6cfc3cba-3
return wandb.Html( f""" <p style="color:black;">{formatted_prompt}:</p> <blockquote> <p style="color:green;"> {formatted_generation} </p> </blockquote> """, inject=False, ) [docs]class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): """Callback Handler that logs to Weights and Biases. Parameters: job_type (str): The type of job. project (str): The project to log to. entity (str): The entity to log to. tags (list): The tags to log. group (str): The group to log to. name (str): The name of the run. notes (str): The notes to log. visualize (bool): Whether to visualize the run. complexity_metrics (bool): Whether to log complexity metrics. stream_logs (bool): Whether to stream callback actions to W&B This handler will utilize the associated callback method called and formats the input of each callback function with metadata regarding the state of LLM run, and adds the response to the list of records for both the {method}_records and action. It then logs the response using the run.log() method to Weights and Biases. """ def __init__( self, job_type: Optional[str] = None, project: Optional[str] = "langchain_callback_demo", entity: Optional[str] = None, tags: Optional[Sequence] = None, group: Optional[str] = None, name: Optional[str] = None, notes: Optional[str] = None, visualize: bool = False,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
48ca6cfc3cba-4
notes: Optional[str] = None, visualize: bool = False, complexity_metrics: bool = False, stream_logs: bool = False, ) -> None: """Initialize callback handler.""" wandb = import_wandb() import_pandas() import_textstat() spacy = import_spacy() super().__init__() self.job_type = job_type self.project = project self.entity = entity self.tags = tags self.group = group self.name = name self.notes = notes self.visualize = visualize self.complexity_metrics = complexity_metrics self.stream_logs = stream_logs self.temp_dir = tempfile.TemporaryDirectory() self.run: wandb.sdk.wandb_run.Run = wandb.init( # type: ignore job_type=self.job_type, project=self.project, entity=self.entity, tags=self.tags, group=self.group, name=self.name, notes=self.notes, ) warning = ( "DEPRECATION: The `WandbCallbackHandler` will soon be deprecated in favor " "of the `WandbTracer`. Please update your code to use the `WandbTracer` " "instead." ) wandb.termwarn( warning, repeat=False, ) self.callback_columns: list = [] self.action_records: list = [] self.complexity_metrics = complexity_metrics self.visualize = visualize self.nlp = spacy.load("en_core_web_sm") def _init_resp(self) -> Dict: return {k: None for k in self.callback_columns}
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
48ca6cfc3cba-5
return {k: None for k in self.callback_columns} [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts.""" self.step += 1 self.llm_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_llm_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) for prompt in prompts: prompt_resp = deepcopy(resp) prompt_resp["prompts"] = prompt self.on_llm_start_records.append(prompt_resp) self.action_records.append(prompt_resp) if self.stream_logs: self.run.log(prompt_resp) [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run when LLM generates a new token.""" self.step += 1 self.llm_streams += 1 resp = self._init_resp() resp.update({"action": "on_llm_new_token", "token": token}) resp.update(self.get_custom_callback_meta()) self.on_llm_token_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" self.step += 1 self.llm_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_llm_end"})
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
48ca6cfc3cba-6
resp.update({"action": "on_llm_end"}) resp.update(flatten_dict(response.llm_output or {})) resp.update(self.get_custom_callback_meta()) for generations in response.generations: for generation in generations: generation_resp = deepcopy(resp) generation_resp.update(flatten_dict(generation.dict())) generation_resp.update( analyze_text( generation.text, complexity_metrics=self.complexity_metrics, visualize=self.visualize, nlp=self.nlp, output_dir=self.temp_dir.name, ) ) self.on_llm_end_records.append(generation_resp) self.action_records.append(generation_resp) if self.stream_logs: self.run.log(generation_resp) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when LLM errors.""" self.step += 1 self.errors += 1 [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" self.step += 1 self.chain_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_chain_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) chain_input = inputs["input"] if isinstance(chain_input, str): input_resp = deepcopy(resp) input_resp["input"] = chain_input self.on_chain_start_records.append(input_resp) self.action_records.append(input_resp) if self.stream_logs:
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
48ca6cfc3cba-7
self.action_records.append(input_resp) if self.stream_logs: self.run.log(input_resp) elif isinstance(chain_input, list): for inp in chain_input: input_resp = deepcopy(resp) input_resp.update(inp) self.on_chain_start_records.append(input_resp) self.action_records.append(input_resp) if self.stream_logs: self.run.log(input_resp) else: raise ValueError("Unexpected data format provided!") [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" self.step += 1 self.chain_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_chain_end", "outputs": outputs["output"]}) resp.update(self.get_custom_callback_meta()) self.on_chain_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when chain errors.""" self.step += 1 self.errors += 1 [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_tool_start", "input_str": input_str}) resp.update(flatten_dict(serialized))
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
48ca6cfc3cba-8
resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) self.on_tool_start_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" self.step += 1 self.tool_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_tool_end", "output": output}) resp.update(self.get_custom_callback_meta()) self.on_tool_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when tool errors.""" self.step += 1 self.errors += 1 [docs] def on_text(self, text: str, **kwargs: Any) -> None: """ Run when agent is ending. """ self.step += 1 self.text_ctr += 1 resp = self._init_resp() resp.update({"action": "on_text", "text": text}) resp.update(self.get_custom_callback_meta()) self.on_text_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run when agent ends running.""" self.step += 1 self.agent_ends += 1 self.ends += 1
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
48ca6cfc3cba-9
self.agent_ends += 1 self.ends += 1 resp = self._init_resp() resp.update( { "action": "on_agent_finish", "output": finish.return_values["output"], "log": finish.log, } ) resp.update(self.get_custom_callback_meta()) self.on_agent_finish_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update( { "action": "on_agent_action", "tool": action.tool, "tool_input": action.tool_input, "log": action.log, } ) resp.update(self.get_custom_callback_meta()) self.on_agent_action_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) def _create_session_analysis_df(self) -> Any: """Create a dataframe with all the information from the session.""" pd = import_pandas() on_llm_start_records_df = pd.DataFrame(self.on_llm_start_records) on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records) llm_input_prompts_df = ( on_llm_start_records_df[["step", "prompts", "name"]] .dropna(axis=1) .rename({"step": "prompt_step"}, axis=1) ) complexity_metrics_columns = []
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
48ca6cfc3cba-10
) complexity_metrics_columns = [] visualizations_columns = [] if self.complexity_metrics: complexity_metrics_columns = [ "flesch_reading_ease", "flesch_kincaid_grade", "smog_index", "coleman_liau_index", "automated_readability_index", "dale_chall_readability_score", "difficult_words", "linsear_write_formula", "gunning_fog", "text_standard", "fernandez_huerta", "szigriszt_pazos", "gutierrez_polini", "crawford", "gulpease_index", "osman", ] if self.visualize: visualizations_columns = ["dependency_tree", "entities"] llm_outputs_df = ( on_llm_end_records_df[ [ "step", "text", "token_usage_total_tokens", "token_usage_prompt_tokens", "token_usage_completion_tokens", ] + complexity_metrics_columns + visualizations_columns ] .dropna(axis=1) .rename({"step": "output_step", "text": "output"}, axis=1) ) session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1) session_analysis_df["chat_html"] = session_analysis_df[ ["prompts", "output"] ].apply( lambda row: construct_html_from_prompt_and_generation( row["prompts"], row["output"] ), axis=1, ) return session_analysis_df
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
48ca6cfc3cba-11
), axis=1, ) return session_analysis_df [docs] def flush_tracker( self, langchain_asset: Any = None, reset: bool = True, finish: bool = False, job_type: Optional[str] = None, project: Optional[str] = None, entity: Optional[str] = None, tags: Optional[Sequence] = None, group: Optional[str] = None, name: Optional[str] = None, notes: Optional[str] = None, visualize: Optional[bool] = None, complexity_metrics: Optional[bool] = None, ) -> None: """Flush the tracker and reset the session. Args: langchain_asset: The langchain asset to save. reset: Whether to reset the session. finish: Whether to finish the run. job_type: The job type. project: The project. entity: The entity. tags: The tags. group: The group. name: The name. notes: The notes. visualize: Whether to visualize. complexity_metrics: Whether to compute complexity metrics. Returns: None """ pd = import_pandas() wandb = import_wandb() action_records_table = wandb.Table(dataframe=pd.DataFrame(self.action_records)) session_analysis_table = wandb.Table( dataframe=self._create_session_analysis_df() ) self.run.log( { "action_records": action_records_table, "session_analysis": session_analysis_table, } ) if langchain_asset:
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
48ca6cfc3cba-12
} ) if langchain_asset: langchain_asset_path = Path(self.temp_dir.name, "model.json") model_artifact = wandb.Artifact(name="model", type="model") model_artifact.add(action_records_table, name="action_records") model_artifact.add(session_analysis_table, name="session_analysis") try: langchain_asset.save(langchain_asset_path) model_artifact.add_file(str(langchain_asset_path)) model_artifact.metadata = load_json_to_dict(langchain_asset_path) except ValueError: langchain_asset.save_agent(langchain_asset_path) model_artifact.add_file(str(langchain_asset_path)) model_artifact.metadata = load_json_to_dict(langchain_asset_path) except NotImplementedError as e: print("Could not save model.") print(repr(e)) pass self.run.log_artifact(model_artifact) if finish or reset: self.run.finish() self.temp_dir.cleanup() self.reset_callback_meta() if reset: self.__init__( # type: ignore job_type=job_type if job_type else self.job_type, project=project if project else self.project, entity=entity if entity else self.entity, tags=tags if tags else self.tags, group=group if group else self.group, name=name if name else self.name, notes=notes if notes else self.notes, visualize=visualize if visualize else self.visualize, complexity_metrics=complexity_metrics if complexity_metrics else self.complexity_metrics, )
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/wandb_callback.html
d9175c39d030-0
Source code for langchain.callbacks.openai_info """Callback Handler that prints to std out.""" from typing import Any, Dict, List from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import LLMResult MODEL_COST_PER_1K_TOKENS = { # GPT-4 input "gpt-4": 0.03, "gpt-4-0314": 0.03, "gpt-4-0613": 0.03, "gpt-4-32k": 0.06, "gpt-4-32k-0314": 0.06, "gpt-4-32k-0613": 0.06, # GPT-4 output "gpt-4-completion": 0.06, "gpt-4-0314-completion": 0.06, "gpt-4-0613-completion": 0.06, "gpt-4-32k-completion": 0.12, "gpt-4-32k-0314-completion": 0.12, "gpt-4-32k-0613-completion": 0.12, # GPT-3.5 input "gpt-3.5-turbo": 0.0015, "gpt-3.5-turbo-0301": 0.0015, "gpt-3.5-turbo-0613": 0.0015, "gpt-3.5-turbo-16k": 0.003,
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/openai_info.html
d9175c39d030-1
"gpt-3.5-turbo-16k-0613": 0.003, # GPT-3.5 output "gpt-3.5-turbo-completion": 0.002, "gpt-3.5-turbo-0301-completion": 0.002, "gpt-3.5-turbo-0613-completion": 0.002, "gpt-3.5-turbo-16k-completion": 0.004, "gpt-3.5-turbo-16k-0613-completion": 0.004, # Others "gpt-35-turbo": 0.002, # Azure OpenAI version of ChatGPT "text-ada-001": 0.0004, "ada": 0.0004, "text-babbage-001": 0.0005, "babbage": 0.0005, "text-curie-001": 0.002, "curie": 0.002, "text-davinci-003": 0.02, "text-davinci-002": 0.02, "code-davinci-002": 0.02, "ada-finetuned": 0.0016, "babbage-finetuned": 0.0024, "curie-finetuned": 0.012, "davinci-finetuned": 0.12, } def standardize_model_name( model_name: str, is_completion: bool = False, ) -> str: """
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/openai_info.html
d9175c39d030-2
is_completion: bool = False, ) -> str: """ Standardize the model name to a format that can be used in the OpenAI API. Args: model_name: Model name to standardize. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Standardized model name. """ model_name = model_name.lower() if "ft-" in model_name: return model_name.split(":")[0] + "-finetuned" elif is_completion and ( model_name.startswith("gpt-4") or model_name.startswith("gpt-3.5") ): return model_name + "-completion" else: return model_name def get_openai_token_cost_for_model( model_name: str, num_tokens: int, is_completion: bool = False ) -> float: """ Get the cost in USD for a given model and number of tokens. Args: model_name: Name of the model num_tokens: Number of tokens. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Cost in USD. """ model_name = standardize_model_name(model_name, is_completion=is_completion) if model_name not in MODEL_COST_PER_1K_TOKENS: raise ValueError( f"Unknown model: {model_name}. Please provide a valid OpenAI model name." "Known models are: " + ", ".join(MODEL_COST_PER_1K_TOKENS.keys()) ) return MODEL_COST_PER_1K_TOKENS[model_name] * (num_tokens / 1000)
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/openai_info.html
d9175c39d030-3
[docs]class OpenAICallbackHandler(BaseCallbackHandler): """Callback Handler that tracks OpenAI info.""" total_tokens: int = 0 prompt_tokens: int = 0 completion_tokens: int = 0 successful_requests: int = 0 total_cost: float = 0.0 def __repr__(self) -> str: return ( f"Tokens Used: {self.total_tokens}\n" f"\tPrompt Tokens: {self.prompt_tokens}\n" f"\tCompletion Tokens: {self.completion_tokens}\n" f"Successful Requests: {self.successful_requests}\n" f"Total Cost (USD): ${self.total_cost}" ) @property def always_verbose(self) -> bool: """Whether to call verbose callbacks even if verbose is False.""" return True [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Print out the prompts.""" pass [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Print out the token.""" pass [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Collect token usage.""" if response.llm_output is None: return None self.successful_requests += 1 if "token_usage" not in response.llm_output: return None token_usage = response.llm_output["token_usage"] completion_tokens = token_usage.get("completion_tokens", 0) prompt_tokens = token_usage.get("prompt_tokens", 0)
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/openai_info.html
d9175c39d030-4
prompt_tokens = token_usage.get("prompt_tokens", 0) model_name = standardize_model_name(response.llm_output.get("model_name", "")) if model_name in MODEL_COST_PER_1K_TOKENS: completion_cost = get_openai_token_cost_for_model( model_name, completion_tokens, is_completion=True ) prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens) self.total_cost += prompt_cost + completion_cost self.total_tokens += token_usage.get("total_tokens", 0) self.prompt_tokens += prompt_tokens self.completion_tokens += completion_tokens def __copy__(self) -> "OpenAICallbackHandler": """Return a copy of the callback handler.""" return self def __deepcopy__(self, memo: Any) -> "OpenAICallbackHandler": """Return a deep copy of the callback handler.""" return self
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/openai_info.html
bb59a1b9c4f6-0
Source code for langchain.callbacks.whylabs_callback from __future__ import annotations import logging from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentAction, AgentFinish, Generation, LLMResult from langchain.utils import get_from_env if TYPE_CHECKING: from whylogs.api.logger.logger import Logger diagnostic_logger = logging.getLogger(__name__) def import_langkit( sentiment: bool = False, toxicity: bool = False, themes: bool = False, ) -> Any: """Import the langkit python package and raise an error if it is not installed. Args: sentiment: Whether to import the langkit.sentiment module. Defaults to False. toxicity: Whether to import the langkit.toxicity module. Defaults to False. themes: Whether to import the langkit.themes module. Defaults to False. Returns: The imported langkit module. """ try: import langkit # noqa: F401 import langkit.regexes # noqa: F401 import langkit.textstat # noqa: F401 if sentiment: import langkit.sentiment # noqa: F401 if toxicity: import langkit.toxicity # noqa: F401 if themes: import langkit.themes # noqa: F401 except ImportError: raise ImportError( "To use the whylabs callback manager you need to have the `langkit` python " "package installed. Please install it with `pip install langkit`." ) return langkit [docs]class WhyLabsCallbackHandler(BaseCallbackHandler):
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/whylabs_callback.html
bb59a1b9c4f6-1
return langkit [docs]class WhyLabsCallbackHandler(BaseCallbackHandler): """WhyLabs CallbackHandler.""" def __init__(self, logger: Logger): """Initiate the rolling logger""" super().__init__() self.logger = logger diagnostic_logger.info( "Initialized WhyLabs callback handler with configured whylogs Logger." ) def _profile_generations(self, generations: List[Generation]) -> None: for gen in generations: self.logger.log({"response": gen.text}) [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Pass the input prompts to the logger""" for prompt in prompts: self.logger.log({"prompt": prompt}) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Pass the generated response to the logger.""" for generations in response.generations: self._profile_generations(generations) [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing.""" pass [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Do nothing.""" [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Do nothing."""
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/whylabs_callback.html
bb59a1b9c4f6-2
"""Do nothing.""" [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing.""" [docs] def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: """Do nothing.""" [docs] def on_tool_end( self, output: str, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """Do nothing.""" [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_text(self, text: str, **kwargs: Any) -> None: """Do nothing.""" [docs] def on_agent_finish( self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any ) -> None: """Run on agent end.""" pass [docs] def flush(self) -> None: self.logger._do_rollover() diagnostic_logger.info("Flushing WhyLabs logger, writing profile...") [docs] def close(self) -> None: self.logger.close()
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/whylabs_callback.html
bb59a1b9c4f6-3
[docs] def close(self) -> None: self.logger.close() diagnostic_logger.info("Closing WhyLabs logger, see you next time!") def __enter__(self) -> WhyLabsCallbackHandler: return self def __exit__( self, exception_type: Any, exception_value: Any, traceback: Any ) -> None: self.close() [docs] @classmethod def from_params( cls, *, api_key: Optional[str] = None, org_id: Optional[str] = None, dataset_id: Optional[str] = None, sentiment: bool = False, toxicity: bool = False, themes: bool = False, ) -> Logger: """Instantiate whylogs Logger from params. Args: api_key (Optional[str]): WhyLabs API key. Optional because the preferred way to specify the API key is with environment variable WHYLABS_API_KEY. org_id (Optional[str]): WhyLabs organization id to write profiles to. If not set must be specified in environment variable WHYLABS_DEFAULT_ORG_ID. dataset_id (Optional[str]): The model or dataset this callback is gathering telemetry for. If not set must be specified in environment variable WHYLABS_DEFAULT_DATASET_ID. sentiment (bool): If True will initialize a model to perform sentiment analysis compound score. Defaults to False and will not gather this metric. toxicity (bool): If True will initialize a model to score toxicity. Defaults to False and will not gather this metric. themes (bool): If True will initialize a model to calculate distance to configured themes. Defaults to None and will not gather this metric. """
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/whylabs_callback.html
bb59a1b9c4f6-4
metric. """ # langkit library will import necessary whylogs libraries import_langkit(sentiment=sentiment, toxicity=toxicity, themes=themes) import whylogs as why from whylogs.api.writer.whylabs import WhyLabsWriter from whylogs.core.schema import DeclarativeSchema from whylogs.experimental.core.metrics.udf_metric import generate_udf_schema api_key = api_key or get_from_env("api_key", "WHYLABS_API_KEY") org_id = org_id or get_from_env("org_id", "WHYLABS_DEFAULT_ORG_ID") dataset_id = dataset_id or get_from_env( "dataset_id", "WHYLABS_DEFAULT_DATASET_ID" ) whylabs_writer = WhyLabsWriter( api_key=api_key, org_id=org_id, dataset_id=dataset_id ) langkit_schema = DeclarativeSchema(generate_udf_schema()) whylabs_logger = why.logger( mode="rolling", interval=5, when="M", schema=langkit_schema ) whylabs_logger.append_writer(writer=whylabs_writer) diagnostic_logger.info( "Started whylogs Logger with WhyLabsWriter and initialized LangKit. 📝" ) return cls(whylabs_logger)
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/whylabs_callback.html
3e3567f9b8a3-0
Source code for langchain.callbacks.streaming_aiter from __future__ import annotations import asyncio from typing import Any, AsyncIterator, Dict, List, Literal, Union, cast from langchain.callbacks.base import AsyncCallbackHandler from langchain.schema import LLMResult # TODO If used by two LLM runs in parallel this won't work as expected [docs]class AsyncIteratorCallbackHandler(AsyncCallbackHandler): """Callback handler that returns an async iterator.""" queue: asyncio.Queue[str] done: asyncio.Event @property def always_verbose(self) -> bool: return True def __init__(self) -> None: self.queue = asyncio.Queue() self.done = asyncio.Event() [docs] async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: # If two calls are made in a row, this resets the state self.done.clear() [docs] async def on_llm_new_token(self, token: str, **kwargs: Any) -> None: self.queue.put_nowait(token) [docs] async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: self.done.set() [docs] async def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: self.done.set() # TODO implement the other methods [docs] async def aiter(self) -> AsyncIterator[str]: while not self.queue.empty() or not self.done.is_set(): # Wait for the next token in the queue, # but stop waiting if the done event is set done, other = await asyncio.wait(
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/streaming_aiter.html
3e3567f9b8a3-1
done, other = await asyncio.wait( [ # NOTE: If you add other tasks here, update the code below, # which assumes each set has exactly one task each asyncio.ensure_future(self.queue.get()), asyncio.ensure_future(self.done.wait()), ], return_when=asyncio.FIRST_COMPLETED, ) # Cancel the other task if other: other.pop().cancel() # Extract the value of the first completed task token_or_done = cast(Union[str, Literal[True]], done.pop().result()) # If the extracted value is the boolean True, the done event was set if token_or_done is True: break # Otherwise, the extracted value is a token, which we yield yield token_or_done
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/streaming_aiter.html
9d2bc6c9fd07-0
Source code for langchain.callbacks.streaming_stdout_final_only """Callback Handler streams to stdout on new llm token.""" import sys from typing import Any, Dict, List, Optional from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"] [docs]class FinalStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler): """Callback handler for streaming in agents. Only works with agents using LLMs that support streaming. Only the final output of the agent will be streamed. """ [docs] def append_to_last_tokens(self, token: str) -> None: self.last_tokens.append(token) self.last_tokens_stripped.append(token.strip()) if len(self.last_tokens) > len(self.answer_prefix_tokens): self.last_tokens.pop(0) self.last_tokens_stripped.pop(0) [docs] def check_if_answer_reached(self) -> bool: if self.strip_tokens: return self.last_tokens_stripped == self.answer_prefix_tokens_stripped else: return self.last_tokens == self.answer_prefix_tokens def __init__( self, *, answer_prefix_tokens: Optional[List[str]] = None, strip_tokens: bool = True, stream_prefix: bool = False ) -> None: """Instantiate FinalStreamingStdOutCallbackHandler. Args: answer_prefix_tokens: Token sequence that prefixes the anwer. Default is ["Final", "Answer", ":"] strip_tokens: Ignore white spaces and new lines when comparing answer_prefix_tokens to last tokens? (to determine if answer has been reached) stream_prefix: Should answer prefix itself also be streamed? """ super().__init__()
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/streaming_stdout_final_only.html
9d2bc6c9fd07-1
""" super().__init__() if answer_prefix_tokens is None: self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS else: self.answer_prefix_tokens = answer_prefix_tokens if strip_tokens: self.answer_prefix_tokens_stripped = [ token.strip() for token in self.answer_prefix_tokens ] else: self.answer_prefix_tokens_stripped = self.answer_prefix_tokens self.last_tokens = [""] * len(self.answer_prefix_tokens) self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens) self.strip_tokens = strip_tokens self.stream_prefix = stream_prefix self.answer_reached = False [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts running.""" self.answer_reached = False [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run on new LLM token. Only available when streaming is enabled.""" # Remember the last n tokens, where n = len(answer_prefix_tokens) self.append_to_last_tokens(token) # Check if the last n tokens match the answer_prefix_tokens list ... if self.check_if_answer_reached(): self.answer_reached = True if self.stream_prefix: for t in self.last_tokens: sys.stdout.write(t) sys.stdout.flush() return # ... if yes, then print tokens from now on if self.answer_reached: sys.stdout.write(token) sys.stdout.flush()
https://api.python.langchain.com/en/stable/_modules/langchain/callbacks/streaming_stdout_final_only.html