id
stringlengths
14
16
text
stringlengths
29
2.73k
source
stringlengths
49
115
69119724a4af-0
Source code for langchain.embeddings.huggingface """Wrapper around HuggingFace embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field from langchain.embeddings.base import Embeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) [docs]class HuggingFaceEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers`` python package installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceEmbeddings model_name = "sentence-transformers/all-mpnet-base-v2" model_kwargs = {'device': 'cpu'} hf = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs) """ client: Any #: :meta private: model_name: str = DEFAULT_MODEL_NAME """Model name to use.""" cache_folder: Optional[str] = None """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME enviroment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass to the model.""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass when calling the `encode` method of the model.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs)
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
69119724a4af-1
"""Initialize the sentence_transformer.""" super().__init__(**kwargs) try: import sentence_transformers except ImportError as exc: raise ValueError( "Could not import sentence_transformers python package. " "Please install it with `pip install sentence_transformers`." ) from exc self.client = sentence_transformers.SentenceTransformer( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client.encode(texts, **self.encode_kwargs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.client.encode(text, **self.encode_kwargs) return embedding.tolist() [docs]class HuggingFaceInstructEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers`` and ``InstructorEmbedding`` python packages installed. Example: .. code-block:: python
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
69119724a4af-2
Example: .. code-block:: python from langchain.embeddings import HuggingFaceInstructEmbeddings model_name = "hkunlp/instructor-large" model_kwargs = {'device': 'cpu'} hf = HuggingFaceInstructEmbeddings( model_name=model_name, model_kwargs=model_kwargs ) """ client: Any #: :meta private: model_name: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" cache_folder: Optional[str] = None """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass to the model.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: from InstructorEmbedding import INSTRUCTOR self.client = INSTRUCTOR( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) except ImportError as e: raise ValueError("Dependencies for InstructorEmbedding not found.") from e class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
69119724a4af-3
Returns: List of embeddings, one for each text. """ instruction_pairs = [[self.embed_instruction, text] for text in texts] embeddings = self.client.encode(instruction_pairs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client.encode([instruction_pair])[0] return embedding.tolist() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
60d2ed03455b-0
Source code for langchain.embeddings.llamacpp """Wrapper around llama.cpp embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.embeddings.base import Embeddings [docs]class LlamaCppEmbeddings(BaseModel, Embeddings): """Wrapper around llama.cpp embedding models. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain.embeddings import LlamaCppEmbeddings llama = LlamaCppEmbeddings(model_path="/path/to/model.bin") """ client: Any #: :meta private: model_path: str n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(False, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock")
https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
60d2ed03455b-1
use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] n_ctx = values["n_ctx"] n_parts = values["n_parts"] seed = values["seed"] f16_kv = values["f16_kv"] logits_all = values["logits_all"] vocab_only = values["vocab_only"] use_mlock = values["use_mlock"] n_threads = values["n_threads"] n_batch = values["n_batch"] try: from llama_cpp import Llama values["client"] = Llama( model_path=model_path, n_ctx=n_ctx, n_parts=n_parts, seed=seed, f16_kv=f16_kv, logits_all=logits_all, vocab_only=vocab_only, use_mlock=use_mlock, n_threads=n_threads, n_batch=n_batch, embedding=True, ) except ImportError: raise ModuleNotFoundError( "Could not import llama-cpp-python library. "
https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
60d2ed03455b-2
raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception: raise NameError(f"Could not load Llama model from path: {model_path}") return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using the Llama model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = [self.client.embed(text) for text in texts] return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using the Llama model. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed(text) return list(map(float, embedding)) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
1b16475a599c-0
Source code for langchain.embeddings.sagemaker_endpoint """Wrapper around Sagemaker InvokeEndpoint API.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.llms.sagemaker_endpoint import ContentHandlerBase class EmbeddingsContentHandler(ContentHandlerBase[List[str], List[List[float]]]): """Content handler for LLM class.""" [docs]class SagemakerEndpointEmbeddings(BaseModel, Embeddings): """Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain.embeddings import SagemakerEndpointEmbeddings endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" ) se = SagemakerEndpointEmbeddings( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """
https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
1b16475a599c-1
credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: EmbeddingsContentHandler """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler class ContentHandler(EmbeddingsContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompts: List[str], model_kwargs: Dict) -> bytes: input_str = json.dumps({prompts: prompts, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> List[List[float]]: response_json = json.loads(output.read().decode("utf-8")) return response_json["vectors"] """ # noqa: E501 model_kwargs: Optional[Dict] = None
https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
1b16475a599c-2
""" # noqa: E501 model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ValueError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) return values def _embedding_func(self, texts: List[str]) -> List[List[float]]: """Call out to SageMaker Inference embedding endpoint.""" # replace newlines, which can negatively affect performance.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
1b16475a599c-3
# replace newlines, which can negatively affect performance. texts = list(map(lambda x: x.replace("\n", " "), texts)) _model_kwargs = self.model_kwargs or {} _endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(texts, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return self.content_handler.transform_output(response["Body"]) [docs] def embed_documents( self, texts: List[str], chunk_size: int = 64 ) -> List[List[float]]: """Compute doc embeddings using a SageMaker Inference Endpoint. Args: texts: The list of texts to embed. chunk_size: The chunk size defines how many input texts will be grouped together as request. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ results = [] _chunk_size = len(texts) if chunk_size > len(texts) else chunk_size for i in range(0, len(texts), _chunk_size): response = self._embedding_func(texts[i : i + _chunk_size]) results.extend(response) return results [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a SageMaker inference endpoint.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
1b16475a599c-4
"""Compute query embeddings using a SageMaker inference endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func([text])[0] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
4261469444ae-0
Source code for langchain.embeddings.huggingface_hub """Wrapper around HuggingFace Hub embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "sentence-transformers/all-mpnet-base-v2" VALID_TASKS = ("feature-extraction",) [docs]class HuggingFaceHubEmbeddings(BaseModel, Embeddings): """Wrapper around HuggingFaceHub embedding models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import HuggingFaceHubEmbeddings repo_id = "sentence-transformers/all-mpnet-base-v2" hf = HuggingFaceHubEmbeddings( repo_id=repo_id, task="feature-extraction", huggingfacehub_api_token="my-api-key", ) """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = "feature-extraction" """Task to call the model with.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict:
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
4261469444ae-1
@root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] if not repo_id.startswith("sentence-transformers"): raise ValueError( "Currently only 'sentence-transformers' embedding models " f"are supported. Got invalid 'repo_id' {repo_id}." ) client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to HuggingFaceHub's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ # replace newlines, which can negatively affect performance. texts = [text.replace("\n", " ") for text in texts]
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
4261469444ae-2
texts = [text.replace("\n", " ") for text in texts] _model_kwargs = self.model_kwargs or {} responses = self.client(inputs=texts, params=_model_kwargs) return responses [docs] def embed_query(self, text: str) -> List[float]: """Call out to HuggingFaceHub's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embeddings for the text. """ response = self.embed_documents([text])[0] return response By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
1b2a3f08e14d-0
Source code for langchain.embeddings.fake from typing import List import numpy as np from pydantic import BaseModel from langchain.embeddings.base import Embeddings [docs]class FakeEmbeddings(Embeddings, BaseModel): size: int def _get_embedding(self) -> List[float]: return list(np.random.normal(size=self.size)) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: return [self._get_embedding() for _ in texts] [docs] def embed_query(self, text: str) -> List[float]: return self._get_embedding() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/fake.html
473da1c53ce1-0
Source code for langchain.embeddings.openai """Wrapper around OpenAI embedding models.""" from __future__ import annotations import logging from typing import ( Any, Callable, Dict, List, Literal, Optional, Set, Tuple, Union, ) import numpy as np from pydantic import BaseModel, Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call."""
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
473da1c53ce1-1
"""Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: return embeddings.client.create(**kwargs) return _embed_with_retry(**kwargs) [docs]class OpenAIEmbeddings(BaseModel, Embeddings): """Wrapper around OpenAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and optionally and API_VERSION. The OPENAI_API_TYPE must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the model parameter. Example: .. code-block:: python import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings( deployment="your-embeddings-deployment-name", model="your-embeddings-model-name", api_base="https://your-endpoint.openai.azure.com/", api_type="azure",
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
473da1c53ce1-2
api_type="azure", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model # to support Azure OpenAI Service custom deployment names openai_api_version: str = "2022-12-01" # to support Azure OpenAI Service custom endpoints openai_api_base: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_type: Optional[str] = None embedding_ctx_length: int = 8191 openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Tuple[()]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", )
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
473da1c53ce1-3
"OPENAI_API_BASE", default="", ) openai_api_type = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="", ) openai_api_version = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization if openai_api_base: openai.api_base = openai_api_base openai.api_version = openai_api_version if openai_api_type: openai.api_type = openai_api_type values["client"] = openai.Embedding except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb def _get_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken tokens = [] indices = [] encoding = tiktoken.model.encoding_for_model(self.model) for i, text in enumerate(texts):
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
473da1c53ce1-4
for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens += [token[j : j + self.embedding_ctx_length]] indices += [i] batched_embeddings = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = embed_with_retry( self, input=tokens[i : i + _chunk_size], engine=self.deployment, ) batched_embeddings += [r["embedding"] for r in response["data"]] results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = embed_with_retry(self, input="", engine=self.deployment)[ "data" ][0]["embedding"] else: average = np.average( _result, axis=0, weights=num_tokens_in_batch[i] )
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
473da1c53ce1-5
_result, axis=0, weights=num_tokens_in_batch[i] ) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings except ImportError: raise ValueError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) def _embedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to OpenAI's embedding endpoint.""" # handle large input text if len(text) > self.embedding_ctx_length: return self._get_len_safe_embeddings([text], engine=engine)[0] else: if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return embed_with_retry(self, input=[text], engine=engine)["data"][0][ "embedding" ] [docs] def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
473da1c53ce1-6
# than the maximum context and use length-safe embedding function. return self._get_len_safe_embeddings(texts, engine=self.deployment) [docs] def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = self._embedding_func(text, engine=self.deployment) return embedding By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7c64a6156d9b-0
Source code for langchain.embeddings.self_hosted_hugging_face """Wrapper around HuggingFace embedding models for self-hosted remote hardware.""" import importlib import logging from typing import Any, Callable, List, Optional from langchain.embeddings.self_hosted import SelfHostedEmbeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) logger = logging.getLogger(__name__) def _embed_documents(client: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return client.encode(*args, **kwargs) def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0) -> Any: """Load the embedding model.""" if not instruct: import sentence_transformers client = sentence_transformers.SentenceTransformer(model_id) else: from InstructorEmbedding import INSTRUCTOR client = INSTRUCTOR(model_id) if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning(
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
7c64a6156d9b-1
if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) client = client.to(device) return client [docs]class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings): """Runs sentence_transformers embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceEmbeddings import runhouse as rh model_name = "sentence-transformers/all-mpnet-base-v2" gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu) """ client: Any #: :meta private: model_id: str = DEFAULT_MODEL_NAME """Model name to use.""" model_reqs: List[str] = ["./", "sentence_transformers", "torch"] """Requirements to install on hardware to inference the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_load_fn: Callable = load_embedding_model
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
7c64a6156d9b-2
model_load_fn: Callable = load_embedding_model """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None """Key word arguments to pass to the model load function.""" inference_fn: Callable = _embed_documents """Inference function to extract the embeddings.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get("model_id", DEFAULT_MODEL_NAME) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", False) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs]class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings): """Runs InstructorEmbedding embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings import runhouse as rh model_name = "hkunlp/instructor-large" gpu = rh.cluster(name='rh-a10x', instance_type='A100:1') hf = SelfHostedHuggingFaceInstructEmbeddings( model_name=model_name, hardware=gpu) """
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
7c64a6156d9b-3
model_name=model_name, hardware=gpu) """ model_id: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" model_reqs: List[str] = ["./", "InstructorEmbedding", "torch"] """Requirements to install on hardware to inference the model.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get( "model_id", DEFAULT_INSTRUCT_MODEL ) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", True) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [] for text in texts: instruction_pairs.append([self.embed_instruction, text]) embeddings = self.client(self.pipeline_ref, instruction_pairs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
7c64a6156d9b-4
text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client(self.pipeline_ref, [instruction_pair])[0] return embedding.tolist() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
ecdea3a3bbe1-0
Source code for langchain.embeddings.tensorflow_hub """Wrapper around TensorflowHub embedding models.""" from typing import Any, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" [docs]class TensorflowHubEmbeddings(BaseModel, Embeddings): """Wrapper around tensorflow_hub embedding models. To use, you should have the ``tensorflow_text`` python package installed. Example: .. code-block:: python from langchain.embeddings import TensorflowHubEmbeddings url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" tf = TensorflowHubEmbeddings(model_url=url) """ embed: Any #: :meta private: model_url: str = DEFAULT_MODEL_URL """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the tensorflow_hub and tensorflow_text.""" super().__init__(**kwargs) try: import tensorflow_hub import tensorflow_text # noqa self.embed = tensorflow_hub.load(self.model_url) except ImportError as e: raise ValueError( "Could not import some python packages." "Please install them." ) from e class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a TensorflowHub embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """
https://python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html
ecdea3a3bbe1-1
Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.embed(texts).numpy() return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a TensorflowHub embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.embed([text]).numpy()[0] return embedding.tolist() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html
ab3fd7e1795d-0
Source code for langchain.embeddings.self_hosted """Running custom embedding models on self-hosted remote hardware.""" from typing import Any, Callable, List from pydantic import Extra from langchain.embeddings.base import Embeddings from langchain.llms import SelfHostedPipeline def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return pipeline(*args, **kwargs) [docs]class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings): """Runs custom embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example using a model load function: .. code-block:: python from langchain.embeddings import SelfHostedEmbeddings from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") def get_pipeline(): model_id = "facebook/bart-large" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) return pipeline("feature-extraction", model=model, tokenizer=tokenizer) embeddings = SelfHostedEmbeddings( model_load_fn=get_pipeline, hardware=gpu
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
ab3fd7e1795d-1
model_load_fn=get_pipeline, hardware=gpu model_reqs=["./", "torch", "transformers"], ) Example passing in a pipeline path: .. code-block:: python from langchain.embeddings import SelfHostedHFEmbeddings import runhouse as rh from transformers import pipeline gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") pipeline = pipeline(model="bert-base-uncased", task="feature-extraction") rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(gpu, path="models") embeddings = SelfHostedHFEmbeddings.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) """ inference_fn: Callable = _embed_documents """Inference function to extract the embeddings on the remote hardware.""" inference_kwargs: Any = None """Any kwargs to pass to the model's inference function.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed.s Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client(self.pipeline_ref, texts) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings [docs] def embed_query(self, text: str) -> List[float]:
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
ab3fd7e1795d-2
[docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embeddings = self.client(self.pipeline_ref, text) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
993f5b4517f8-0
Source code for langchain.prompts.loading """Load prompts from disk.""" import importlib import json import logging from pathlib import Path from typing import Union import yaml from langchain.output_parsers.regex import RegexParser from langchain.prompts.base import BasePromptTemplate from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/" logger = logging.getLogger(__name__) def load_prompt_from_config(config: dict) -> BasePromptTemplate: """Load prompt from Config Dict.""" if "_type" not in config: logger.warning("No `_type` key found, defaulting to `prompt`.") config_type = config.pop("_type", "prompt") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} prompt not supported") prompt_loader = type_to_loader_dict[config_type] return prompt_loader(config) def _load_template(var_name: str, config: dict) -> dict: """Load template from disk if applicable.""" # Check if template_path exists in config. if f"{var_name}_path" in config: # If it does, make sure template variable doesn't also exist. if var_name in config: raise ValueError( f"Both `{var_name}_path` and `{var_name}` cannot be provided." ) # Pop the template path from the config. template_path = Path(config.pop(f"{var_name}_path")) # Load the template. if template_path.suffix == ".txt": with open(template_path) as f:
https://python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
993f5b4517f8-1
if template_path.suffix == ".txt": with open(template_path) as f: template = f.read() else: raise ValueError # Set the template variable to the extracted variable. config[var_name] = template return config def _load_examples(config: dict) -> dict: """Load examples if necessary.""" if isinstance(config["examples"], list): pass elif isinstance(config["examples"], str): with open(config["examples"]) as f: if config["examples"].endswith(".json"): examples = json.load(f) elif config["examples"].endswith((".yaml", ".yml")): examples = yaml.safe_load(f) else: raise ValueError( "Invalid file format. Only json or yaml formats are supported." ) config["examples"] = examples else: raise ValueError("Invalid examples format. Only list or string are supported.") return config def _load_output_parser(config: dict) -> dict: """Load output parser.""" if "output_parsers" in config: if config["output_parsers"] is not None: _config = config["output_parsers"] output_parser_type = _config["_type"] if output_parser_type == "regex_parser": output_parser = RegexParser(**_config) else: raise ValueError(f"Unsupported output parser {output_parser_type}") config["output_parsers"] = output_parser return config def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate: """Load the few shot prompt from the config.""" # Load the suffix and prefix templates. config = _load_template("suffix", config)
https://python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
993f5b4517f8-2
config = _load_template("suffix", config) config = _load_template("prefix", config) # Load the example prompt. if "example_prompt_path" in config: if "example_prompt" in config: raise ValueError( "Only one of example_prompt and example_prompt_path should " "be specified." ) config["example_prompt"] = load_prompt(config.pop("example_prompt_path")) else: config["example_prompt"] = load_prompt_from_config(config["example_prompt"]) # Load the examples. config = _load_examples(config) config = _load_output_parser(config) return FewShotPromptTemplate(**config) def _load_prompt(config: dict) -> PromptTemplate: """Load the prompt template from config.""" # Load the template from disk if necessary. config = _load_template("template", config) config = _load_output_parser(config) return PromptTemplate(**config) [docs]def load_prompt(path: Union[str, Path]) -> BasePromptTemplate: """Unified method for loading a prompt from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"} ): return hub_result else: return _load_prompt_from_file(path) def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate: """Load prompt from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json":
https://python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
993f5b4517f8-3
# Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) elif file_path.suffix == ".py": spec = importlib.util.spec_from_loader( "prompt", loader=None, origin=str(file_path) ) if spec is None: raise ValueError("could not load spec") helper = importlib.util.module_from_spec(spec) with open(file_path, "rb") as f: exec(f.read(), helper.__dict__) if not isinstance(helper.PROMPT, BasePromptTemplate): raise ValueError("Did not get object of type BasePromptTemplate.") return helper.PROMPT else: raise ValueError(f"Got unsupported file type {file_path.suffix}") # Load the prompt from the config now. return load_prompt_from_config(config) type_to_loader_dict = { "prompt": _load_prompt, "few_shot": _load_few_shot_prompt, # "few_shot_with_templates": _load_few_shot_with_templates_prompt, } By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
a7df052e5c9b-0
Source code for langchain.prompts.prompt """Prompt schema definition.""" from __future__ import annotations from pathlib import Path from string import Formatter from typing import Any, Dict, List, Union from pydantic import Extra, root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, _get_jinja2_variables_from_template, check_valid_template, ) [docs]class PromptTemplate(StringPromptTemplate): """Schema to represent a prompt for an LLM. Example: .. code-block:: python from langchain import PromptTemplate prompt = PromptTemplate(input_variables=["foo"], template="Say {foo}") """ input_variables: List[str] """A list of the names of the variables the prompt template expects.""" template: str """The prompt template.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "prompt" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs)
https://python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
a7df052e5c9b-1
""" kwargs = self._merge_partial_and_user_variables(**kwargs) return DEFAULT_FORMATTER_MAPPING[self.template_format](self.template, **kwargs) @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that template and input variables are consistent.""" if values["validate_template"]: all_inputs = values["input_variables"] + list(values["partial_variables"]) check_valid_template( values["template"], values["template_format"], all_inputs ) return values [docs] @classmethod def from_examples( cls, examples: List[str], suffix: str, input_variables: List[str], example_separator: str = "\n\n", prefix: str = "", **kwargs: Any, ) -> PromptTemplate: """Take examples in list format with prefix and suffix to create a prompt. Intended to be used as a way to dynamically create a prompt from examples. Args: examples: List of examples to use in the prompt. suffix: String to go after the list of examples. Should generally set up the user's input. input_variables: A list of variable names the final prompt template will expect. example_separator: The separator to use in between examples. Defaults to two new line characters. prefix: String that should go before any examples. Generally includes examples. Default to an empty string. Returns: The final prompt generated. """ template = example_separator.join([prefix, *examples, suffix]) return cls(input_variables=input_variables, template=template, **kwargs) [docs] @classmethod def from_file(
https://python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
a7df052e5c9b-2
[docs] @classmethod def from_file( cls, template_file: Union[str, Path], input_variables: List[str], **kwargs: Any ) -> PromptTemplate: """Load a prompt from a file. Args: template_file: The path to the file containing the prompt template. input_variables: A list of variable names the final prompt template will expect. Returns: The prompt loaded from the file. """ with open(str(template_file), "r") as f: template = f.read() return cls(input_variables=input_variables, template=template, **kwargs) [docs] @classmethod def from_template(cls, template: str, **kwargs: Any) -> PromptTemplate: """Load a prompt template from a template.""" if "template_format" in kwargs and kwargs["template_format"] == "jinja2": # Get the variables for the template input_variables = _get_jinja2_variables_from_template(template) else: input_variables = { v for _, v, _, _ in Formatter().parse(template) if v is not None } return cls( input_variables=list(sorted(input_variables)), template=template, **kwargs ) # For backwards compatibility. Prompt = PromptTemplate By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
fefa10ececee-0
Source code for langchain.prompts.few_shot """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, check_valid_template, ) from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptTemplate(StringPromptTemplate): """Prompt template that contains few shot examples.""" examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: str """A prompt template string to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: str = "" """A prompt template string to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided."""
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
fefa10ececee-1
"""Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix and input variables are consistent.""" if values["validate_template"]: check_valid_template( values["prefix"] + values["suffix"], values["template_format"], values["input_variables"] + list(values["partial_variables"]), ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use.
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
fefa10ececee-2
# Get the examples to use. examples = self._get_examples(**kwargs) # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall template. pieces = [self.prefix, *example_strings, self.suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
45011ad08163-0
Source code for langchain.prompts.chat """Chat prompt template.""" from __future__ import annotations from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, List, Sequence, Tuple, Type, Union from pydantic import BaseModel, Field from langchain.memory.buffer import get_buffer_string from langchain.prompts.base import BasePromptTemplate, StringPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( AIMessage, BaseMessage, ChatMessage, HumanMessage, PromptValue, SystemMessage, ) class BaseMessagePromptTemplate(BaseModel, ABC): @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To messages.""" @property @abstractmethod def input_variables(self) -> List[str]: """Input variables for this prompt template.""" [docs]class MessagesPlaceholder(BaseMessagePromptTemplate): """Prompt template that assumes variable is already list of messages.""" variable_name: str [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To a BaseMessage.""" value = kwargs[self.variable_name] if not isinstance(value, list): raise ValueError( f"variable {self.variable_name} should be a list of base messages, " f"got {value}" ) for v in value: if not isinstance(v, BaseMessage): raise ValueError( f"variable {self.variable_name} should be a list of base messages," f" got {value}" ) return value @property def input_variables(self) -> List[str]: """Input variables for this prompt template."""
https://python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
45011ad08163-1
def input_variables(self) -> List[str]: """Input variables for this prompt template.""" return [self.variable_name] class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC): prompt: StringPromptTemplate additional_kwargs: dict = Field(default_factory=dict) @classmethod def from_template(cls, template: str, **kwargs: Any) -> BaseMessagePromptTemplate: prompt = PromptTemplate.from_template(template) return cls(prompt=prompt, **kwargs) @abstractmethod def format(self, **kwargs: Any) -> BaseMessage: """To a BaseMessage.""" def format_messages(self, **kwargs: Any) -> List[BaseMessage]: return [self.format(**kwargs)] @property def input_variables(self) -> List[str]: return self.prompt.input_variables class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate): role: str def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return ChatMessage( content=text, role=self.role, additional_kwargs=self.additional_kwargs ) class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate): def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return HumanMessage(content=text, additional_kwargs=self.additional_kwargs) class AIMessagePromptTemplate(BaseStringMessagePromptTemplate): def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return AIMessage(content=text, additional_kwargs=self.additional_kwargs) class SystemMessagePromptTemplate(BaseStringMessagePromptTemplate): def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs)
https://python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
45011ad08163-2
text = self.prompt.format(**kwargs) return SystemMessage(content=text, additional_kwargs=self.additional_kwargs) class ChatPromptValue(PromptValue): messages: List[BaseMessage] def to_string(self) -> str: """Return prompt as string.""" return get_buffer_string(self.messages) def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return self.messages [docs]class BaseChatPromptTemplate(BasePromptTemplate, ABC): [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string() [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: messages = self.format_messages(**kwargs) return ChatPromptValue(messages=messages) [docs] @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format kwargs into a list of messages.""" [docs]class ChatPromptTemplate(BaseChatPromptTemplate, ABC): input_variables: List[str] messages: List[Union[BaseMessagePromptTemplate, BaseMessage]] @classmethod def from_role_strings( cls, string_messages: List[Tuple[str, str]] ) -> ChatPromptTemplate: messages = [ ChatMessagePromptTemplate( content=PromptTemplate.from_template(template), role=role ) for role, template in string_messages ] return cls.from_messages(messages) @classmethod def from_strings( cls, string_messages: List[Tuple[Type[BaseMessagePromptTemplate], str]] ) -> ChatPromptTemplate: messages = [ role(content=PromptTemplate.from_template(template)) for role, template in string_messages ]
https://python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
45011ad08163-3
for role, template in string_messages ] return cls.from_messages(messages) @classmethod def from_messages( cls, messages: Sequence[Union[BaseMessagePromptTemplate, BaseMessage]] ) -> ChatPromptTemplate: input_vars = set() for message in messages: if isinstance(message, BaseMessagePromptTemplate): input_vars.update(message.input_variables) return cls(input_variables=list(input_vars), messages=messages) [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string() [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: kwargs = self._merge_partial_and_user_variables(**kwargs) result = [] for message_template in self.messages: if isinstance(message_template, BaseMessage): result.extend([message_template]) elif isinstance(message_template, BaseMessagePromptTemplate): rel_params = { k: v for k, v in kwargs.items() if k in message_template.input_variables } message = message_template.format_messages(**rel_params) result.extend(message) else: raise ValueError(f"Unexpected input: {message_template}") return result [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: raise NotImplementedError @property def _prompt_type(self) -> str: raise NotImplementedError [docs] def save(self, file_path: Union[Path, str]) -> None: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
14bd143874f4-0
Source code for langchain.prompts.base """BasePrompt schema definition.""" from __future__ import annotations import json from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Union import yaml from pydantic import BaseModel, Extra, Field, root_validator from langchain.formatting import formatter from langchain.schema import BaseMessage, BaseOutputParser, HumanMessage, PromptValue def jinja2_formatter(template: str, **kwargs: Any) -> str: """Format a template using jinja2.""" try: from jinja2 import Template except ImportError: raise ImportError( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) return Template(template).render(**kwargs) def validate_jinja2(template: str, input_variables: List[str]) -> None: input_variables_set = set(input_variables) valid_variables = _get_jinja2_variables_from_template(template) missing_variables = valid_variables - input_variables_set extra_variables = input_variables_set - valid_variables error_message = "" if missing_variables: error_message += f"Missing variables: {missing_variables} " if extra_variables: error_message += f"Extra variables: {extra_variables}" if error_message: raise KeyError(error_message.strip()) def _get_jinja2_variables_from_template(template: str) -> Set[str]: try: from jinja2 import Environment, meta except ImportError: raise ImportError( "jinja2 not installed, which is needed to use the jinja2_formatter. "
https://python.langchain.com/en/latest/_modules/langchain/prompts/base.html
14bd143874f4-1
"jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) env = Environment() ast = env.parse(template) variables = meta.find_undeclared_variables(ast) return variables DEFAULT_FORMATTER_MAPPING: Dict[str, Callable] = { "f-string": formatter.format, "jinja2": jinja2_formatter, } DEFAULT_VALIDATOR_MAPPING: Dict[str, Callable] = { "f-string": formatter.validate_input_variables, "jinja2": validate_jinja2, } def check_valid_template( template: str, template_format: str, input_variables: List[str] ) -> None: """Check that template string is valid.""" if template_format not in DEFAULT_FORMATTER_MAPPING: valid_formats = list(DEFAULT_FORMATTER_MAPPING) raise ValueError( f"Invalid template format. Got `{template_format}`;" f" should be one of {valid_formats}" ) try: validator_func = DEFAULT_VALIDATOR_MAPPING[template_format] validator_func(template, input_variables) except KeyError as e: raise ValueError( "Invalid prompt schema; check for mismatched or missing input parameters. " + str(e) ) class StringPromptValue(PromptValue): text: str def to_string(self) -> str: """Return prompt as string.""" return self.text def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return [HumanMessage(content=self.text)] [docs]class BasePromptTemplate(BaseModel, ABC): """Base class for all prompt templates, returning a prompt."""
https://python.langchain.com/en/latest/_modules/langchain/prompts/base.html
14bd143874f4-2
"""Base class for all prompt templates, returning a prompt.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" output_parser: Optional[BaseOutputParser] = None """How to parse the output of calling an LLM on this formatted prompt.""" partial_variables: Mapping[str, Union[str, Callable[[], str]]] = Field( default_factory=dict ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] @abstractmethod def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" @root_validator() def validate_variable_names(cls, values: Dict) -> Dict: """Validate variable names do not include restricted names.""" if "stop" in values["input_variables"]: raise ValueError( "Cannot have an input variable named 'stop', as it is used internally," " please rename." ) if "stop" in values["partial_variables"]: raise ValueError( "Cannot have an partial variable named 'stop', as it is used " "internally, please rename." ) overall = set(values["input_variables"]).intersection( values["partial_variables"] ) if overall: raise ValueError( f"Found overlapping input and partial variables: {overall}" ) return values [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: """Return a partial of the prompt template.""" prompt_dict = self.__dict__.copy() prompt_dict["input_variables"] = list( set(self.input_variables).difference(kwargs)
https://python.langchain.com/en/latest/_modules/langchain/prompts/base.html
14bd143874f4-3
prompt_dict["input_variables"] = list( set(self.input_variables).difference(kwargs) ) prompt_dict["partial_variables"] = {**self.partial_variables, **kwargs} return type(self)(**prompt_dict) def _merge_partial_and_user_variables(self, **kwargs: Any) -> Dict[str, Any]: # Get partial params: partial_kwargs = { k: v if isinstance(v, str) else v() for k, v in self.partial_variables.items() } return {**partial_kwargs, **kwargs} [docs] @abstractmethod def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ @property def _prompt_type(self) -> str: """Return the prompt type key.""" raise NotImplementedError [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of prompt.""" prompt_dict = super().dict(**kwargs) prompt_dict["_type"] = self._prompt_type return prompt_dict [docs] def save(self, file_path: Union[Path, str]) -> None: """Save the prompt. Args: file_path: Path to directory to save prompt to. Example: .. code-block:: python prompt.save(file_path="path/prompt.yaml") """ if self.partial_variables: raise ValueError("Cannot save prompt with partial variables.") # Convert file to Path object. if isinstance(file_path, str):
https://python.langchain.com/en/latest/_modules/langchain/prompts/base.html
14bd143874f4-4
# Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") [docs]class StringPromptTemplate(BasePromptTemplate, ABC): """String prompt should expose the format method, returning a prompt.""" [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" return StringPromptValue(text=self.format(**kwargs)) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/base.html
0d41c32dc9ed-0
Source code for langchain.prompts.few_shot_with_templates """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.prompts.base import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptWithTemplates(StringPromptTemplate): """Prompt template that contains few shot examples.""" examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: StringPromptTemplate """A PromptTemplate to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: Optional[StringPromptTemplate] = None """A PromptTemplate to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None)
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
0d41c32dc9ed-1
examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix and input variables are consistent.""" if values["validate_template"]: input_variables = values["input_variables"] expected_input_variables = set(values["suffix"].input_variables) expected_input_variables |= set(values["partial_variables"]) if values["prefix"] is not None: expected_input_variables |= set(values["prefix"].input_variables) missing_vars = expected_input_variables.difference(input_variables) if missing_vars: raise ValueError( f"Got input_variables={input_variables}, but based on " f"prefix/suffix expected {expected_input_variables}" ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns:
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
0d41c32dc9ed-2
kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use. examples = self._get_examples(**kwargs) # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall prefix. if self.prefix is None: prefix = "" else: prefix_kwargs = { k: v for k, v in kwargs.items() if k in self.prefix.input_variables } for k in prefix_kwargs.keys(): kwargs.pop(k) prefix = self.prefix.format(**prefix_kwargs) # Create the overall suffix suffix_kwargs = { k: v for k, v in kwargs.items() if k in self.suffix.input_variables } for k in suffix_kwargs.keys(): kwargs.pop(k) suffix = self.suffix.format( **suffix_kwargs, ) pieces = [prefix, *example_strings, suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot_with_templates" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported")
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
0d41c32dc9ed-3
if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
698954b954d3-0
Source code for langchain.prompts.example_selector.semantic_similarity """Example selector that selects examples based on SemanticSimilarity.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Type from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.vectorstores.base import VectorStore def sorted_values(values: Dict[str, str]) -> List[Any]: """Return a list of values in dict sorted by key.""" return [values[val] for val in sorted(values)] [docs]class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel): """Example selector that selects examples based on SemanticSimilarity.""" vectorstore: VectorStore """VectorStore than contains information about examples.""" k: int = 4 """Number of examples to select.""" example_keys: Optional[List[str]] = None """Optional keys to filter examples to.""" input_keys: Optional[List[str]] = None """Optional keys to filter input to. If provided, the search is based on the input variables instead of all variables.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def add_example(self, example: Dict[str, str]) -> str: """Add new example to vectorstore.""" if self.input_keys: string_example = " ".join( sorted_values({key: example[key] for key in self.input_keys}) ) else: string_example = " ".join(sorted_values(example)) ids = self.vectorstore.add_texts([string_example], metadatas=[example]) return ids[0]
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
698954b954d3-1
return ids[0] [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on semantic similarity.""" # Get the docs with the highest similarity. if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} query = " ".join(sorted_values(input_variables)) example_docs = self.vectorstore.similarity_search(query, k=self.k) # Get the examples from the metadata. # This assumes that examples are stored in metadata. examples = [dict(e.metadata) for e in example_docs] # If example keys are provided, filter examples to those keys. if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples [docs] @classmethod def from_examples( cls, examples: List[dict], embeddings: Embeddings, vectorstore_cls: Type[VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, **vectorstore_cls_kwargs: Any, ) -> SemanticSimilarityExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Args: examples: List of examples to use in the prompt. embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls: A vector store DB interface class, e.g. FAISS. k: Number of examples to select input_keys: If provided, the search is based on the input variables instead of all variables.
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
698954b954d3-2
instead of all variables. vectorstore_cls_kwargs: optional kwargs containing url for vector store Returns: The ExampleSelector instantiated, backed by a vector store. """ if input_keys: string_examples = [ " ".join(sorted_values({k: eg[k] for k in input_keys})) for eg in examples ] else: string_examples = [" ".join(sorted_values(eg)) for eg in examples] vectorstore = vectorstore_cls.from_texts( string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs ) return cls(vectorstore=vectorstore, k=k, input_keys=input_keys) [docs]class MaxMarginalRelevanceExampleSelector(SemanticSimilarityExampleSelector): """ExampleSelector that selects examples based on Max Marginal Relevance. This was shown to improve performance in this paper: https://arxiv.org/pdf/2211.13892.pdf """ fetch_k: int = 20 """Number of examples to fetch to rerank.""" [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on semantic similarity.""" # Get the docs with the highest similarity. if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} query = " ".join(sorted_values(input_variables)) example_docs = self.vectorstore.max_marginal_relevance_search( query, k=self.k, fetch_k=self.fetch_k ) # Get the examples from the metadata. # This assumes that examples are stored in metadata. examples = [dict(e.metadata) for e in example_docs]
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
698954b954d3-3
examples = [dict(e.metadata) for e in example_docs] # If example keys are provided, filter examples to those keys. if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples [docs] @classmethod def from_examples( cls, examples: List[dict], embeddings: Embeddings, vectorstore_cls: Type[VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, fetch_k: int = 20, **vectorstore_cls_kwargs: Any, ) -> MaxMarginalRelevanceExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Args: examples: List of examples to use in the prompt. embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls: A vector store DB interface class, e.g. FAISS. k: Number of examples to select input_keys: If provided, the search is based on the input variables instead of all variables. vectorstore_cls_kwargs: optional kwargs containing url for vector store Returns: The ExampleSelector instantiated, backed by a vector store. """ if input_keys: string_examples = [ " ".join(sorted_values({k: eg[k] for k in input_keys})) for eg in examples ] else: string_examples = [" ".join(sorted_values(eg)) for eg in examples] vectorstore = vectorstore_cls.from_texts( string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs )
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
698954b954d3-4
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs ) return cls(vectorstore=vectorstore, k=k, fetch_k=fetch_k, input_keys=input_keys) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
ff51f473adf9-0
Source code for langchain.prompts.example_selector.length_based """Select examples based on length.""" import re from typing import Callable, Dict, List from pydantic import BaseModel, validator from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate def _get_length_based(text: str) -> int: return len(re.split("\n| ", text)) [docs]class LengthBasedExampleSelector(BaseExampleSelector, BaseModel): """Select examples based on length.""" examples: List[dict] """A list of the examples that the prompt template expects.""" example_prompt: PromptTemplate """Prompt template used to format the examples.""" get_text_length: Callable[[str], int] = _get_length_based """Function to measure prompt length. Defaults to word count.""" max_length: int = 2048 """Max length for the prompt, beyond which examples are cut.""" example_text_lengths: List[int] = [] #: :meta private: [docs] def add_example(self, example: Dict[str, str]) -> None: """Add new example to list.""" self.examples.append(example) string_example = self.example_prompt.format(**example) self.example_text_lengths.append(self.get_text_length(string_example)) @validator("example_text_lengths", always=True) def calculate_example_text_lengths(cls, v: List[int], values: Dict) -> List[int]: """Calculate text lengths if they don't exist.""" # Check if text lengths were passed in if v: return v # If they were not, calculate them example_prompt = values["example_prompt"] get_text_length = values["get_text_length"]
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/length_based.html
ff51f473adf9-1
get_text_length = values["get_text_length"] string_examples = [example_prompt.format(**eg) for eg in values["examples"]] return [get_text_length(eg) for eg in string_examples] [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on the input lengths.""" inputs = " ".join(input_variables.values()) remaining_length = self.max_length - self.get_text_length(inputs) i = 0 examples = [] while remaining_length > 0 and i < len(self.examples): new_length = remaining_length - self.example_text_lengths[i] if new_length < 0: break else: examples.append(self.examples[i]) remaining_length = new_length i += 1 return examples By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/length_based.html
df312bff5f77-0
Source code for langchain.agents.agent_types from enum import Enum [docs]class AgentType(str, Enum): ZERO_SHOT_REACT_DESCRIPTION = "zero-shot-react-description" REACT_DOCSTORE = "react-docstore" SELF_ASK_WITH_SEARCH = "self-ask-with-search" CONVERSATIONAL_REACT_DESCRIPTION = "conversational-react-description" CHAT_ZERO_SHOT_REACT_DESCRIPTION = "chat-zero-shot-react-description" CHAT_CONVERSATIONAL_REACT_DESCRIPTION = "chat-conversational-react-description" STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION = ( "structured-chat-zero-shot-react-description" ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/agent_types.html
2899bd35aa90-0
Source code for langchain.agents.loading """Functionality for loading agents.""" import json from pathlib import Path from typing import Any, List, Optional, Union import yaml from langchain.agents.agent import BaseSingleActionAgent from langchain.agents.tools import Tool from langchain.agents.types import AGENT_TO_CLASS from langchain.chains.loading import load_chain, load_chain_from_config from langchain.llms.base import BaseLLM from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/" def _load_agent_from_tools( config: dict, llm: BaseLLM, tools: List[Tool], **kwargs: Any ) -> BaseSingleActionAgent: config_type = config.pop("_type") if config_type not in AGENT_TO_CLASS: raise ValueError(f"Loading {config_type} agent not supported") agent_cls = AGENT_TO_CLASS[config_type] combined_config = {**config, **kwargs} return agent_cls.from_llm_and_tools(llm, tools, **combined_config) def load_agent_from_config( config: dict, llm: Optional[BaseLLM] = None, tools: Optional[List[Tool]] = None, **kwargs: Any, ) -> BaseSingleActionAgent: """Load agent from Config Dict.""" if "_type" not in config: raise ValueError("Must specify an agent Type in config") load_from_tools = config.pop("load_from_llm_and_tools", False) if load_from_tools: if llm is None: raise ValueError( "If `load_from_llm_and_tools` is set to True, "
https://python.langchain.com/en/latest/_modules/langchain/agents/loading.html
2899bd35aa90-1
"If `load_from_llm_and_tools` is set to True, " "then LLM must be provided" ) if tools is None: raise ValueError( "If `load_from_llm_and_tools` is set to True, " "then tools must be provided" ) return _load_agent_from_tools(config, llm, tools, **kwargs) config_type = config.pop("_type") if config_type not in AGENT_TO_CLASS: raise ValueError(f"Loading {config_type} agent not supported") agent_cls = AGENT_TO_CLASS[config_type] if "llm_chain" in config: config["llm_chain"] = load_chain_from_config(config.pop("llm_chain")) elif "llm_chain_path" in config: config["llm_chain"] = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.") combined_config = {**config, **kwargs} return agent_cls(**combined_config) # type: ignore [docs]def load_agent(path: Union[str, Path], **kwargs: Any) -> BaseSingleActionAgent: """Unified method for loading a agent from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_agent_from_file, "agents", {"json", "yaml"} ): return hub_result else: return _load_agent_from_file(path, **kwargs) def _load_agent_from_file( file: Union[str, Path], **kwargs: Any ) -> BaseSingleActionAgent: """Load agent from file.""" # Convert file to Path object.
https://python.langchain.com/en/latest/_modules/langchain/agents/loading.html
2899bd35aa90-2
"""Load agent from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Load the agent from the config now. return load_agent_from_config(config, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/loading.html
e4631a6ffabd-0
Source code for langchain.agents.load_tools # flake8: noqa """Load tools.""" import warnings from typing import Any, Dict, List, Optional, Callable, Tuple from mypy_extensions import Arg, KwArg from langchain.agents.tools import Tool from langchain.callbacks.base import BaseCallbackManager from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs from langchain.chains.api.base import APIChain from langchain.chains.llm_math.base import LLMMathChain from langchain.chains.pal.base import PALChain from langchain.llms.base import BaseLLM from langchain.requests import TextRequestsWrapper from langchain.tools.arxiv.tool import ArxivQueryRun from langchain.tools.base import BaseTool from langchain.tools.bing_search.tool import BingSearchRun from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun from langchain.tools.human.tool import HumanInputRun from langchain.tools.python.tool import PythonREPLTool from langchain.tools.requests.tool import ( RequestsDeleteTool, RequestsGetTool, RequestsPatchTool, RequestsPostTool, RequestsPutTool, ) from langchain.tools.scenexplain.tool import SceneXplainTool from langchain.tools.searx_search.tool import SearxSearchResults, SearxSearchRun from langchain.tools.shell.tool import ShellTool from langchain.tools.wikipedia.tool import WikipediaQueryRun from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun from langchain.utilities import ArxivAPIWrapper from langchain.utilities.apify import ApifyWrapper from langchain.utilities.bash import BashProcess from langchain.utilities.bing_search import BingSearchAPIWrapper
https://python.langchain.com/en/latest/_modules/langchain/agents/load_tools.html
e4631a6ffabd-1
from langchain.utilities.bing_search import BingSearchAPIWrapper from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper from langchain.utilities.google_search import GoogleSearchAPIWrapper from langchain.utilities.google_serper import GoogleSerperAPIWrapper from langchain.utilities.awslambda import LambdaWrapper from langchain.utilities.searx_search import SearxSearchWrapper from langchain.utilities.serpapi import SerpAPIWrapper from langchain.utilities.wikipedia import WikipediaAPIWrapper from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper def _get_python_repl() -> BaseTool: return PythonREPLTool() def _get_tools_requests_get() -> BaseTool: return RequestsGetTool(requests_wrapper=TextRequestsWrapper()) def _get_tools_requests_post() -> BaseTool: return RequestsPostTool(requests_wrapper=TextRequestsWrapper()) def _get_tools_requests_patch() -> BaseTool: return RequestsPatchTool(requests_wrapper=TextRequestsWrapper()) def _get_tools_requests_put() -> BaseTool: return RequestsPutTool(requests_wrapper=TextRequestsWrapper()) def _get_tools_requests_delete() -> BaseTool: return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper()) def _get_terminal() -> BaseTool: return ShellTool() _BASE_TOOLS: Dict[str, Callable[[], BaseTool]] = { "python_repl": _get_python_repl, "requests": _get_tools_requests_get, # preserved for backwards compatability "requests_get": _get_tools_requests_get, "requests_post": _get_tools_requests_post, "requests_patch": _get_tools_requests_patch, "requests_put": _get_tools_requests_put, "requests_delete": _get_tools_requests_delete,
https://python.langchain.com/en/latest/_modules/langchain/agents/load_tools.html
e4631a6ffabd-2
"requests_delete": _get_tools_requests_delete, "terminal": _get_terminal, } def _get_pal_math(llm: BaseLLM) -> BaseTool: return Tool( name="PAL-MATH", description="A language model that is really good at solving complex word math problems. Input should be a fully worded hard word math problem.", func=PALChain.from_math_prompt(llm).run, ) def _get_pal_colored_objects(llm: BaseLLM) -> BaseTool: return Tool( name="PAL-COLOR-OBJ", description="A language model that is really good at reasoning about position and the color attributes of objects. Input should be a fully worded hard reasoning problem. Make sure to include all information about the objects AND the final question you want to answer.", func=PALChain.from_colored_object_prompt(llm).run, ) def _get_llm_math(llm: BaseLLM) -> BaseTool: return Tool( name="Calculator", description="Useful for when you need to answer questions about math.", func=LLMMathChain.from_llm(llm=llm).run, coroutine=LLMMathChain.from_llm(llm=llm).arun, ) def _get_open_meteo_api(llm: BaseLLM) -> BaseTool: chain = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS) return Tool( name="Open Meteo API", description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.", func=chain.run, )
https://python.langchain.com/en/latest/_modules/langchain/agents/load_tools.html
e4631a6ffabd-3
func=chain.run, ) _LLM_TOOLS: Dict[str, Callable[[BaseLLM], BaseTool]] = { "pal-math": _get_pal_math, "pal-colored-objects": _get_pal_colored_objects, "llm-math": _get_llm_math, "open-meteo-api": _get_open_meteo_api, } def _get_news_api(llm: BaseLLM, **kwargs: Any) -> BaseTool: news_api_key = kwargs["news_api_key"] chain = APIChain.from_llm_and_api_docs( llm, news_docs.NEWS_DOCS, headers={"X-Api-Key": news_api_key} ) return Tool( name="News API", description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.", func=chain.run, ) def _get_tmdb_api(llm: BaseLLM, **kwargs: Any) -> BaseTool: tmdb_bearer_token = kwargs["tmdb_bearer_token"] chain = APIChain.from_llm_and_api_docs( llm, tmdb_docs.TMDB_DOCS, headers={"Authorization": f"Bearer {tmdb_bearer_token}"}, ) return Tool( name="TMDB API", description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.", func=chain.run, ) def _get_podcast_api(llm: BaseLLM, **kwargs: Any) -> BaseTool: listen_api_key = kwargs["listen_api_key"]
https://python.langchain.com/en/latest/_modules/langchain/agents/load_tools.html
e4631a6ffabd-4
listen_api_key = kwargs["listen_api_key"] chain = APIChain.from_llm_and_api_docs( llm, podcast_docs.PODCAST_DOCS, headers={"X-ListenAPI-Key": listen_api_key}, ) return Tool( name="Podcast API", description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.", func=chain.run, ) def _get_lambda_api(**kwargs: Any) -> BaseTool: return Tool( name=kwargs["awslambda_tool_name"], description=kwargs["awslambda_tool_description"], func=LambdaWrapper(**kwargs).run, ) def _get_wolfram_alpha(**kwargs: Any) -> BaseTool: return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs)) def _get_google_search(**kwargs: Any) -> BaseTool: return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs)) def _get_wikipedia(**kwargs: Any) -> BaseTool: return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs)) def _get_arxiv(**kwargs: Any) -> BaseTool: return ArxivQueryRun(api_wrapper=ArxivAPIWrapper(**kwargs)) def _get_google_serper(**kwargs: Any) -> BaseTool: return Tool( name="Serper Search", func=GoogleSerperAPIWrapper(**kwargs).run, description="A low-cost Google Search API. Useful for when you need to answer questions about current events. Input should be a search query.", ) def _get_google_search_results_json(**kwargs: Any) -> BaseTool:
https://python.langchain.com/en/latest/_modules/langchain/agents/load_tools.html
e4631a6ffabd-5
) def _get_google_search_results_json(**kwargs: Any) -> BaseTool: return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs)) def _get_serpapi(**kwargs: Any) -> BaseTool: return Tool( name="Search", description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query.", func=SerpAPIWrapper(**kwargs).run, coroutine=SerpAPIWrapper(**kwargs).arun, ) def _get_searx_search(**kwargs: Any) -> BaseTool: return SearxSearchRun(wrapper=SearxSearchWrapper(**kwargs)) def _get_searx_search_results_json(**kwargs: Any) -> BaseTool: wrapper_kwargs = {k: v for k, v in kwargs.items() if k != "num_results"} return SearxSearchResults(wrapper=SearxSearchWrapper(**wrapper_kwargs), **kwargs) def _get_bing_search(**kwargs: Any) -> BaseTool: return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs)) def _get_ddg_search(**kwargs: Any) -> BaseTool: return DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(**kwargs)) def _get_human_tool(**kwargs: Any) -> BaseTool: return HumanInputRun(**kwargs) def _get_scenexplain(**kwargs: Any) -> BaseTool: return SceneXplainTool(**kwargs) _EXTRA_LLM_TOOLS: Dict[ str, Tuple[Callable[[Arg(BaseLLM, "llm"), KwArg(Any)], BaseTool], List[str]] ] = { "news-api": (_get_news_api, ["news_api_key"]),
https://python.langchain.com/en/latest/_modules/langchain/agents/load_tools.html
e4631a6ffabd-6
] = { "news-api": (_get_news_api, ["news_api_key"]), "tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]), "podcast-api": (_get_podcast_api, ["listen_api_key"]), } _EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = { "wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]), "google-search": (_get_google_search, ["google_api_key", "google_cse_id"]), "google-search-results-json": ( _get_google_search_results_json, ["google_api_key", "google_cse_id", "num_results"], ), "searx-search-results-json": ( _get_searx_search_results_json, ["searx_host", "engines", "num_results", "aiosession"], ), "bing-search": (_get_bing_search, ["bing_subscription_key", "bing_search_url"]), "ddg-search": (_get_ddg_search, []), "google-serper": (_get_google_serper, ["serper_api_key"]), "serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]), "searx-search": (_get_searx_search, ["searx_host", "engines", "aiosession"]), "wikipedia": (_get_wikipedia, ["top_k_results", "lang"]), "arxiv": ( _get_arxiv, ["top_k_results", "load_max_docs", "load_all_available_meta"], ), "human": (_get_human_tool, ["prompt_func", "input_func"]),
https://python.langchain.com/en/latest/_modules/langchain/agents/load_tools.html
e4631a6ffabd-7
), "human": (_get_human_tool, ["prompt_func", "input_func"]), "awslambda": ( _get_lambda_api, ["awslambda_tool_name", "awslambda_tool_description", "function_name"], ), "sceneXplain": (_get_scenexplain, []), } [docs]def load_tools( tool_names: List[str], llm: Optional[BaseLLM] = None, callback_manager: Optional[BaseCallbackManager] = None, **kwargs: Any, ) -> List[BaseTool]: """Load tools based on their name. Args: tool_names: name of tools to load. llm: Optional language model, may be needed to initialize certain tools. callback_manager: Optional callback manager. If not provided, default global callback manager will be used. Returns: List of tools. """ tools = [] for name in tool_names: if name == "requests": warnings.warn( "tool name `requests` is deprecated - " "please use `requests_all` or specify the requests method" ) if name == "requests_all": # expand requests into various methods requests_method_tools = [ _tool for _tool in _BASE_TOOLS if _tool.startswith("requests_") ] tool_names.extend(requests_method_tools) elif name in _BASE_TOOLS: tools.append(_BASE_TOOLS[name]()) elif name in _LLM_TOOLS: if llm is None: raise ValueError(f"Tool {name} requires an LLM to be provided") tool = _LLM_TOOLS[name](llm)
https://python.langchain.com/en/latest/_modules/langchain/agents/load_tools.html
e4631a6ffabd-8
tool = _LLM_TOOLS[name](llm) if callback_manager is not None: tool.callback_manager = callback_manager tools.append(tool) elif name in _EXTRA_LLM_TOOLS: if llm is None: raise ValueError(f"Tool {name} requires an LLM to be provided") _get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name] missing_keys = set(extra_keys).difference(kwargs) if missing_keys: raise ValueError( f"Tool {name} requires some parameters that were not " f"provided: {missing_keys}" ) sub_kwargs = {k: kwargs[k] for k in extra_keys} tool = _get_llm_tool_func(llm=llm, **sub_kwargs) if callback_manager is not None: tool.callback_manager = callback_manager tools.append(tool) elif name in _EXTRA_OPTIONAL_TOOLS: _get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name] sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs} tool = _get_tool_func(**sub_kwargs) if callback_manager is not None: tool.callback_manager = callback_manager tools.append(tool) else: raise ValueError(f"Got unknown tool {name}") return tools [docs]def get_all_tool_names() -> List[str]: """Get a list of all possible tool names.""" return ( list(_BASE_TOOLS) + list(_EXTRA_OPTIONAL_TOOLS) + list(_EXTRA_LLM_TOOLS) + list(_LLM_TOOLS) ) By Harrison Chase
https://python.langchain.com/en/latest/_modules/langchain/agents/load_tools.html
e4631a6ffabd-9
+ list(_LLM_TOOLS) ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/load_tools.html
430e17973d1e-0
Source code for langchain.agents.initialize """Load agent.""" from typing import Any, Optional, Sequence from langchain.agents.agent import AgentExecutor from langchain.agents.agent_types import AgentType from langchain.agents.loading import AGENT_TO_CLASS, load_agent from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.tools.base import BaseTool [docs]def initialize_agent( tools: Sequence[BaseTool], llm: BaseLanguageModel, agent: Optional[AgentType] = None, callback_manager: Optional[BaseCallbackManager] = None, agent_path: Optional[str] = None, agent_kwargs: Optional[dict] = None, **kwargs: Any, ) -> AgentExecutor: """Load an agent executor given tools and LLM. Args: tools: List of tools this agent has access to. llm: Language model to use as the agent. agent: Agent type to use. If None and agent_path is also None, will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. callback_manager: CallbackManager to use. Global callback manager is used if not provided. Defaults to None. agent_path: Path to serialized agent to use. agent_kwargs: Additional key word arguments to pass to the underlying agent **kwargs: Additional key word arguments passed to the agent executor Returns: An agent executor """ if agent is None and agent_path is None: agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION if agent is not None and agent_path is not None: raise ValueError( "Both `agent` and `agent_path` are specified, " "but at most only one should be."
https://python.langchain.com/en/latest/_modules/langchain/agents/initialize.html
430e17973d1e-1
"but at most only one should be." ) if agent is not None: if agent not in AGENT_TO_CLASS: raise ValueError( f"Got unknown agent type: {agent}. " f"Valid types are: {AGENT_TO_CLASS.keys()}." ) agent_cls = AGENT_TO_CLASS[agent] agent_kwargs = agent_kwargs or {} agent_obj = agent_cls.from_llm_and_tools( llm, tools, callback_manager=callback_manager, **agent_kwargs ) elif agent_path is not None: agent_obj = load_agent( agent_path, llm=llm, tools=tools, callback_manager=callback_manager ) else: raise ValueError( "Somehow both `agent` and `agent_path` are None, " "this should never happen." ) return AgentExecutor.from_agent_and_tools( agent=agent_obj, tools=tools, callback_manager=callback_manager, **kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/initialize.html
0bbf4da26039-0
Source code for langchain.agents.agent """Chain that takes in an input and produces an action and action input.""" from __future__ import annotations import asyncio import json import logging import time from abc import abstractmethod from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Union import yaml from pydantic import BaseModel, root_validator from langchain.agents.tools import InvalidTool from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, AsyncCallbackManagerForToolRun, CallbackManagerForChainRun, CallbackManagerForToolRun, Callbacks, ) from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.input import get_color_mapping from langchain.prompts.base import BasePromptTemplate from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( AgentAction, AgentFinish, BaseMessage, BaseOutputParser, ) from langchain.tools.base import BaseTool from langchain.utilities.asyncio import asyncio_timeout logger = logging.getLogger(__name__) [docs]class BaseSingleActionAgent(BaseModel): """Base Agent class.""" @property def return_values(self) -> List[str]: """Return values of the agent.""" return ["output"] [docs] def get_allowed_tools(self) -> Set[str]: return set() [docs] @abstractmethod def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any,
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-1
callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ [docs] @abstractmethod async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ @property @abstractmethod def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ [docs] def return_stopped_response( self, early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any, ) -> AgentFinish: """Return response when agent has been stopped due to max iterations.""" if early_stopping_method == "force": # `force` just returns a constant string return AgentFinish( {"output": "Agent stopped due to iteration limit or time limit."}, "" ) else: raise ValueError( f"Got unsupported early_stopping_method `{early_stopping_method}`"
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-2
f"Got unsupported early_stopping_method `{early_stopping_method}`" ) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, **kwargs: Any, ) -> BaseSingleActionAgent: raise NotImplementedError @property def _agent_type(self) -> str: """Return Identifier of agent type.""" raise NotImplementedError [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of agent.""" _dict = super().dict() _dict["_type"] = str(self._agent_type) return _dict [docs] def save(self, file_path: Union[Path, str]) -> None: """Save the agent. Args: file_path: Path to file to save the agent to. Example: .. code-block:: python # If working with agent executor agent.agent.save(file_path="path/agent.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save agent_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(agent_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f:
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-3
with open(file_path, "w") as f: yaml.dump(agent_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") [docs] def tool_run_logging_kwargs(self) -> Dict: return {} [docs]class BaseMultiActionAgent(BaseModel): """Base Agent class.""" @property def return_values(self) -> List[str]: """Return values of the agent.""" return ["output"] [docs] def get_allowed_tools(self) -> Set[str]: return set() [docs] @abstractmethod def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[List[AgentAction], AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Actions specifying what tool to use. """ [docs] @abstractmethod async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[List[AgentAction], AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Actions specifying what tool to use. """ @property @abstractmethod
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-4
Actions specifying what tool to use. """ @property @abstractmethod def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ [docs] def return_stopped_response( self, early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any, ) -> AgentFinish: """Return response when agent has been stopped due to max iterations.""" if early_stopping_method == "force": # `force` just returns a constant string return AgentFinish({"output": "Agent stopped due to max iterations."}, "") else: raise ValueError( f"Got unsupported early_stopping_method `{early_stopping_method}`" ) @property def _agent_type(self) -> str: """Return Identifier of agent type.""" raise NotImplementedError [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of agent.""" _dict = super().dict() _dict["_type"] = str(self._agent_type) return _dict [docs] def save(self, file_path: Union[Path, str]) -> None: """Save the agent. Args: file_path: Path to file to save the agent to. Example: .. code-block:: python # If working with agent executor agent.agent.save(file_path="path/agent.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-5
else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save agent_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(agent_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(agent_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") [docs] def tool_run_logging_kwargs(self) -> Dict: return {} [docs]class AgentOutputParser(BaseOutputParser): [docs] @abstractmethod def parse(self, text: str) -> Union[AgentAction, AgentFinish]: """Parse text into agent action/finish.""" [docs]class LLMSingleActionAgent(BaseSingleActionAgent): llm_chain: LLMChain output_parser: AgentOutputParser stop: List[str] @property def input_keys(self) -> List[str]: return list(set(self.llm_chain.input_keys) - {"intermediate_steps"}) [docs] def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use.
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-6
**kwargs: User inputs. Returns: Action specifying what tool to use. """ output = self.llm_chain.run( intermediate_steps=intermediate_steps, stop=self.stop, callbacks=callbacks, **kwargs, ) return self.output_parser.parse(output) [docs] async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ output = await self.llm_chain.arun( intermediate_steps=intermediate_steps, stop=self.stop, callbacks=callbacks, **kwargs, ) return self.output_parser.parse(output) [docs] def tool_run_logging_kwargs(self) -> Dict: return { "llm_prefix": "", "observation_prefix": "" if len(self.stop) == 0 else self.stop[0], } [docs]class Agent(BaseSingleActionAgent): """Class responsible for calling the language model and deciding the action. This is driven by an LLMChain. The prompt in the LLMChain MUST include a variable called "agent_scratchpad" where the agent can put its intermediary work. """ llm_chain: LLMChain output_parser: AgentOutputParser allowed_tools: Set[str] = set()
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-7
output_parser: AgentOutputParser allowed_tools: Set[str] = set() [docs] def get_allowed_tools(self) -> Set[str]: return self.allowed_tools @property def return_values(self) -> List[str]: return ["output"] def _fix_text(self, text: str) -> str: """Fix the text.""" raise ValueError("fix_text not implemented for this agent.") @property def _stop(self) -> List[str]: return [ f"\n{self.observation_prefix.rstrip()}", f"\n\t{self.observation_prefix.rstrip()}", ] def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> Union[str, List[BaseMessage]]: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}" return thoughts [docs] def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-8
""" full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs) return self.output_parser.parse(full_output) [docs] async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) full_output = await self.llm_chain.apredict(callbacks=callbacks, **full_inputs) return self.output_parser.parse(full_output) [docs] def get_full_inputs( self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any ) -> Dict[str, Any]: """Create the full inputs for the LLMChain from intermediate steps.""" thoughts = self._construct_scratchpad(intermediate_steps) new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop} full_inputs = {**kwargs, **new_inputs} return full_inputs @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return list(set(self.llm_chain.input_keys) - {"agent_scratchpad"}) @root_validator() def validate_prompt(cls, values: Dict) -> Dict:
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-9
@root_validator() def validate_prompt(cls, values: Dict) -> Dict: """Validate that prompt matches format.""" prompt = values["llm_chain"].prompt if "agent_scratchpad" not in prompt.input_variables: logger.warning( "`agent_scratchpad` should be a variable in prompt.input_variables." " Did not find it, so adding it at the end." ) prompt.input_variables.append("agent_scratchpad") if isinstance(prompt, PromptTemplate): prompt.template += "\n{agent_scratchpad}" elif isinstance(prompt, FewShotPromptTemplate): prompt.suffix += "\n{agent_scratchpad}" else: raise ValueError(f"Got unexpected prompt type {type(prompt)}") return values @property @abstractmethod def observation_prefix(self) -> str: """Prefix to append the observation with.""" @property @abstractmethod def llm_prefix(self) -> str: """Prefix to append the LLM call with.""" [docs] @classmethod @abstractmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Create a prompt for this class.""" @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: """Validate that appropriate tools are passed in.""" pass @classmethod @abstractmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: """Get default output parser for this class.""" [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool],
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-10
llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) llm_chain = LLMChain( llm=llm, prompt=cls.create_prompt(tools), callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) [docs] def return_stopped_response( self, early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any, ) -> AgentFinish: """Return response when agent has been stopped due to max iterations.""" if early_stopping_method == "force": # `force` just returns a constant string return AgentFinish( {"output": "Agent stopped due to iteration limit or time limit."}, "" ) elif early_stopping_method == "generate": # Generate does one final forward pass thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += ( f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}" ) # Adding to the previous steps, we now tell the LLM to make a final pred thoughts += (
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-11
thoughts += ( "\n\nI now need to return a final answer based on the previous steps:" ) new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop} full_inputs = {**kwargs, **new_inputs} full_output = self.llm_chain.predict(**full_inputs) # We try to extract a final answer parsed_output = self.output_parser.parse(full_output) if isinstance(parsed_output, AgentFinish): # If we can extract, we send the correct stuff return parsed_output else: # If we can extract, but the tool is not the final tool, # we just return the full output return AgentFinish({"output": full_output}, full_output) else: raise ValueError( "early_stopping_method should be one of `force` or `generate`, " f"got {early_stopping_method}" ) [docs] def tool_run_logging_kwargs(self) -> Dict: return { "llm_prefix": self.llm_prefix, "observation_prefix": self.observation_prefix, } class ExceptionTool(BaseTool): name = "_Exception" description = "Exception tool" def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: return query async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: return query [docs]class AgentExecutor(Chain): """Consists of an agent using tools.""" agent: Union[BaseSingleActionAgent, BaseMultiActionAgent]
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-12
agent: Union[BaseSingleActionAgent, BaseMultiActionAgent] tools: Sequence[BaseTool] return_intermediate_steps: bool = False max_iterations: Optional[int] = 15 max_execution_time: Optional[float] = None early_stopping_method: str = "force" handle_parsing_errors: bool = False [docs] @classmethod def from_agent_and_tools( cls, agent: Union[BaseSingleActionAgent, BaseMultiActionAgent], tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, **kwargs: Any, ) -> AgentExecutor: """Create from agent and tools.""" return cls( agent=agent, tools=tools, callback_manager=callback_manager, **kwargs ) @root_validator() def validate_tools(cls, values: Dict) -> Dict: """Validate that tools are compatible with agent.""" agent = values["agent"] tools = values["tools"] allowed_tools = agent.get_allowed_tools() if allowed_tools != set([tool.name for tool in tools]): raise ValueError( f"Allowed tools ({allowed_tools}) different than " f"provided tools ({[tool.name for tool in tools]})" ) return values @root_validator() def validate_return_direct_tool(cls, values: Dict) -> Dict: """Validate that tools are compatible with agent.""" agent = values["agent"] tools = values["tools"] if isinstance(agent, BaseMultiActionAgent): for tool in tools: if tool.return_direct: raise ValueError( "Tools that have `return_direct=True` are not allowed "
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-13
raise ValueError( "Tools that have `return_direct=True` are not allowed " "in multi-action agents" ) return values [docs] def save(self, file_path: Union[Path, str]) -> None: """Raise error - saving not supported for Agent Executors.""" raise ValueError( "Saving not supported for agent executors. " "If you are trying to save the agent, please use the " "`.save_agent(...)`" ) [docs] def save_agent(self, file_path: Union[Path, str]) -> None: """Save the underlying agent.""" return self.agent.save(file_path) @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return self.agent.input_keys @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if self.return_intermediate_steps: return self.agent.return_values + ["intermediate_steps"] else: return self.agent.return_values [docs] def lookup_tool(self, name: str) -> BaseTool: """Lookup tool by name.""" return {tool.name: tool for tool in self.tools}[name] def _should_continue(self, iterations: int, time_elapsed: float) -> bool: if self.max_iterations is not None and iterations >= self.max_iterations: return False if ( self.max_execution_time is not None and time_elapsed >= self.max_execution_time ): return False return True def _return( self, output: AgentFinish, intermediate_steps: list,
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-14
self, output: AgentFinish, intermediate_steps: list, run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: if run_manager: run_manager.on_agent_finish(output, color="green", verbose=self.verbose) final_output = output.return_values if self.return_intermediate_steps: final_output["intermediate_steps"] = intermediate_steps return final_output async def _areturn( self, output: AgentFinish, intermediate_steps: list, run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: if run_manager: await run_manager.on_agent_finish( output, color="green", verbose=self.verbose ) final_output = output.return_values if self.return_intermediate_steps: final_output["intermediate_steps"] = intermediate_steps return final_output def _take_next_step( self, name_to_tool_map: Dict[str, BaseTool], color_mapping: Dict[str, str], inputs: Dict[str, str], intermediate_steps: List[Tuple[AgentAction, str]], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]: """Take a single step in the thought-action-observation loop. Override this to take control of how the agent makes and acts on choices. """ try: # Call the LLM to see what to do. output = self.agent.plan( intermediate_steps, callbacks=run_manager.get_child() if run_manager else None, **inputs, )
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-15
**inputs, ) except Exception as e: if not self.handle_parsing_errors: raise e text = str(e).split("`")[1] observation = "Invalid or incomplete response" output = AgentAction("_Exception", observation, text) tool_run_kwargs = self.agent.tool_run_logging_kwargs() observation = ExceptionTool().run( output.tool, verbose=self.verbose, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) return [(output, observation)] # If the tool chosen is the finishing tool, then we end and return. if isinstance(output, AgentFinish): return output actions: List[AgentAction] if isinstance(output, AgentAction): actions = [output] else: actions = output result = [] for agent_action in actions: if run_manager: run_manager.on_agent_action(agent_action, color="green") # Otherwise we lookup the tool if agent_action.tool in name_to_tool_map: tool = name_to_tool_map[agent_action.tool] return_direct = tool.return_direct color = color_mapping[agent_action.tool] tool_run_kwargs = self.agent.tool_run_logging_kwargs() if return_direct: tool_run_kwargs["llm_prefix"] = "" # We then call the tool on the tool input to get an observation observation = tool.run( agent_action.tool_input, verbose=self.verbose, color=color, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) else: tool_run_kwargs = self.agent.tool_run_logging_kwargs()
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-16
) else: tool_run_kwargs = self.agent.tool_run_logging_kwargs() observation = InvalidTool().run( agent_action.tool, verbose=self.verbose, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) result.append((agent_action, observation)) return result async def _atake_next_step( self, name_to_tool_map: Dict[str, BaseTool], color_mapping: Dict[str, str], inputs: Dict[str, str], intermediate_steps: List[Tuple[AgentAction, str]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]: """Take a single step in the thought-action-observation loop. Override this to take control of how the agent makes and acts on choices. """ try: # Call the LLM to see what to do. output = await self.agent.aplan( intermediate_steps, callbacks=run_manager.get_child() if run_manager else None, **inputs, ) except Exception as e: if not self.handle_parsing_errors: raise e text = str(e).split("`")[1] observation = "Invalid or incomplete response" output = AgentAction("_Exception", observation, text) tool_run_kwargs = self.agent.tool_run_logging_kwargs() observation = await ExceptionTool().arun( output.tool, verbose=self.verbose, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) return [(output, observation)]
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-17
**tool_run_kwargs, ) return [(output, observation)] # If the tool chosen is the finishing tool, then we end and return. if isinstance(output, AgentFinish): return output actions: List[AgentAction] if isinstance(output, AgentAction): actions = [output] else: actions = output async def _aperform_agent_action( agent_action: AgentAction, ) -> Tuple[AgentAction, str]: if run_manager: await run_manager.on_agent_action( agent_action, verbose=self.verbose, color="green" ) # Otherwise we lookup the tool if agent_action.tool in name_to_tool_map: tool = name_to_tool_map[agent_action.tool] return_direct = tool.return_direct color = color_mapping[agent_action.tool] tool_run_kwargs = self.agent.tool_run_logging_kwargs() if return_direct: tool_run_kwargs["llm_prefix"] = "" # We then call the tool on the tool input to get an observation observation = await tool.arun( agent_action.tool_input, verbose=self.verbose, color=color, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) else: tool_run_kwargs = self.agent.tool_run_logging_kwargs() observation = await InvalidTool().arun( agent_action.tool, verbose=self.verbose, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) return agent_action, observation # Use asyncio.gather to run multiple tool.arun() calls concurrently result = await asyncio.gather(
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-18
result = await asyncio.gather( *[_aperform_agent_action(agent_action) for agent_action in actions] ) return list(result) def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run text through and get agent response.""" # Construct a mapping of tool name to tool for easy lookup name_to_tool_map = {tool.name: tool for tool in self.tools} # We construct a mapping from each tool to a color, used for logging. color_mapping = get_color_mapping( [tool.name for tool in self.tools], excluded_colors=["green"] ) intermediate_steps: List[Tuple[AgentAction, str]] = [] # Let's start tracking the number of iterations and time elapsed iterations = 0 time_elapsed = 0.0 start_time = time.time() # We now enter the agent loop (until it returns something). while self._should_continue(iterations, time_elapsed): next_step_output = self._take_next_step( name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager=run_manager, ) if isinstance(next_step_output, AgentFinish): return self._return( next_step_output, intermediate_steps, run_manager=run_manager ) intermediate_steps.extend(next_step_output) if len(next_step_output) == 1: next_step_action = next_step_output[0] # See if tool should return directly tool_return = self._get_tool_return(next_step_action) if tool_return is not None:
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-19
if tool_return is not None: return self._return(tool_return, intermediate_steps) iterations += 1 time_elapsed = time.time() - start_time output = self.agent.return_stopped_response( self.early_stopping_method, intermediate_steps, **inputs ) return self._return(output, intermediate_steps) async def _acall( self, inputs: Dict[str, str], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Run text through and get agent response.""" # Construct a mapping of tool name to tool for easy lookup name_to_tool_map = {tool.name: tool for tool in self.tools} # We construct a mapping from each tool to a color, used for logging. color_mapping = get_color_mapping( [tool.name for tool in self.tools], excluded_colors=["green"] ) intermediate_steps: List[Tuple[AgentAction, str]] = [] # Let's start tracking the number of iterations and time elapsed iterations = 0 time_elapsed = 0.0 start_time = time.time() # We now enter the agent loop (until it returns something). async with asyncio_timeout(self.max_execution_time): try: while self._should_continue(iterations, time_elapsed): next_step_output = await self._atake_next_step( name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager=run_manager, ) if isinstance(next_step_output, AgentFinish): return await self._areturn(next_step_output, intermediate_steps) intermediate_steps.extend(next_step_output)
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html
0bbf4da26039-20
intermediate_steps.extend(next_step_output) if len(next_step_output) == 1: next_step_action = next_step_output[0] # See if tool should return directly tool_return = self._get_tool_return(next_step_action) if tool_return is not None: return await self._areturn(tool_return, intermediate_steps) iterations += 1 time_elapsed = time.time() - start_time output = self.agent.return_stopped_response( self.early_stopping_method, intermediate_steps, **inputs ) return await self._areturn( output, intermediate_steps, run_manager=run_manager ) except TimeoutError: # stop early when interrupted by the async timeout output = self.agent.return_stopped_response( self.early_stopping_method, intermediate_steps, **inputs ) return await self._areturn(output, intermediate_steps) def _get_tool_return( self, next_step_output: Tuple[AgentAction, str] ) -> Optional[AgentFinish]: """Check if the tool is a returning tool.""" agent_action, observation = next_step_output name_to_tool_map = {tool.name: tool for tool in self.tools} # Invalid tools won't be in the map, so we return False. if agent_action.tool in name_to_tool_map: if name_to_tool_map[agent_action.tool].return_direct: return AgentFinish( {self.agent.return_values[0]: observation}, "", ) return None By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/agent.html