id
stringlengths
36
36
text
stringlengths
114
429k
url
stringlengths
54
121
c34ddfeb-037f-497d-be30-e58c938750c0
Source code for langchain.embeddings.huggingface """Wrapper around HuggingFace embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field from langchain.embeddings.base import Embeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) [docs]class HuggingFaceEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers`` python package installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceEmbeddings model_name = "sentence-transformers/all-mpnet-base-v2" model_kwargs = {'device': 'cpu'} encode_kwargs = {'normalize_embeddings': False} hf = HuggingFaceEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) """ client: Any #: :meta private: model_name: str = DEFAULT_MODEL_NAME """Model name to use.""" cache_folder: Optional[str] = None """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass to the model.""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass when calling the `encode` method of the model.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: import sentence_transformers except ImportError as exc: raise ImportError( "Could not import sentence_transformers python package. " "Please install it with `pip install sentence_transformers`." ) from exc self.client = sentence_transformers.SentenceTransformer( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client.encode(texts, **self.encode_kwargs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.client.encode(text, **self.encode_kwargs) return embedding.tolist() [docs]class HuggingFaceInstructEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers`` and ``InstructorEmbedding`` python packages installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceInstructEmbeddings model_name = "hkunlp/instructor-large" model_kwargs = {'device': 'cpu'} encode_kwargs = {'normalize_embeddings': True} hf = HuggingFaceInstructEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) """ client: Any #: :meta private: model_name: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" cache_folder: Optional[str] = None """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass to the model.""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass when calling the `encode` method of the model.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: from InstructorEmbedding import INSTRUCTOR self.client = INSTRUCTOR( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) except ImportError as e: raise ValueError("Dependencies for InstructorEmbedding not found.") from e class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [[self.embed_instruction, text] for text in texts] embeddings = self.client.encode(instruction_pairs, **self.encode_kwargs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client.encode([instruction_pair], **self.encode_kwargs)[0] return embedding.tolist()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
a50cc9d8-7a6c-47e4-9bd9-7aff79de458d
Source code for langchain.embeddings.tensorflow_hub """Wrapper around TensorflowHub embedding models.""" from typing import Any, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" [docs]class TensorflowHubEmbeddings(BaseModel, Embeddings): """Wrapper around tensorflow_hub embedding models. To use, you should have the ``tensorflow_text`` python package installed. Example: .. code-block:: python from langchain.embeddings import TensorflowHubEmbeddings url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" tf = TensorflowHubEmbeddings(model_url=url) """ embed: Any #: :meta private: model_url: str = DEFAULT_MODEL_URL """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the tensorflow_hub and tensorflow_text.""" super().__init__(**kwargs) try: import tensorflow_hub except ImportError: raise ImportError( "Could not import tensorflow-hub python package. " "Please install it with `pip install tensorflow-hub``." ) try: import tensorflow_text # noqa except ImportError: raise ImportError( "Could not import tensorflow_text python package. " "Please install it with `pip install tensorflow_text``." ) self.embed = tensorflow_hub.load(self.model_url) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a TensorflowHub embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.embed(texts).numpy() return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a TensorflowHub embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.embed([text]).numpy()[0] return embedding.tolist()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html
a677b4c7-a40b-49c6-9bee-ee6d4aa2e15f
Source code for langchain.embeddings.fake from typing import List import numpy as np from pydantic import BaseModel from langchain.embeddings.base import Embeddings [docs]class FakeEmbeddings(Embeddings, BaseModel): size: int def _get_embedding(self) -> List[float]: return list(np.random.normal(size=self.size)) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: return [self._get_embedding() for _ in texts] [docs] def embed_query(self, text: str) -> List[float]: return self._get_embedding()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/fake.html
d8d1d067-0a66-4dcb-b7d7-80a2fe901bcb
Source code for langchain.embeddings.openai """Wrapper around OpenAI embedding models.""" from __future__ import annotations import logging from typing import ( Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union, ) import numpy as np from pydantic import BaseModel, Extra, root_validator from tenacity import ( AsyncRetrying, before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards async_retrying = AsyncRetrying( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def wrap(func: Callable) -> Callable: async def wrapped_f(*args: Any, **kwargs: Any) -> Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError("this is unreachable") return wrapped_f return wrap def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: return embeddings.client.create(**kwargs) return _embed_with_retry(**kwargs) async def async_embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" @_async_retry_decorator(embeddings) async def _async_embed_with_retry(**kwargs: Any) -> Any: return await embeddings.client.acreate(**kwargs) return await _async_embed_with_retry(**kwargs) [docs]class OpenAIEmbeddings(BaseModel, Embeddings): """Wrapper around OpenAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and OPENAI_API_VERSION. The OPENAI_API_TYPE must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the model parameter. Example: .. code-block:: python import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview" os.environ["OPENAI_PROXY"] = "http://your-corporate-proxy:8080" from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings( deployment="your-embeddings-deployment-name", model="your-embeddings-model-name", openai_api_base="https://your-endpoint.openai.azure.com/", openai_api_type="azure", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model # to support Azure OpenAI Service custom deployment names openai_api_version: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_base: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_type: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None embedding_ctx_length: int = 8191 openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout in seconds for the OpenAPI request.""" headers: Any = None tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) if values["openai_api_type"] in ("azure", "azure_ad", "azuread"): default_api_version = "2022-12-01" else: default_api_version = "" values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", default=default_api_version, ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai values["client"] = openai.Embedding except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _invocation_params(self) -> Dict: openai_args = { "engine": self.deployment, "request_timeout": self.request_timeout, "headers": self.headers, "api_key": self.openai_api_key, "organization": self.openai_organization, "api_base": self.openai_api_base, "api_type": self.openai_api_type, "api_version": self.openai_api_version, } if self.openai_proxy: import openai openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, } # type: ignore[assignment] # noqa: E501 return openai_args # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb def _get_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens += [token[j : j + self.embedding_ctx_length]] indices += [i] batched_embeddings = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings += [r["embedding"] for r in response["data"]] results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = embed_with_retry( self, input="", **self._invocation_params, )[ "data" ][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb async def _aget_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens += [token[j : j + self.embedding_ctx_length]] indices += [i] batched_embeddings = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = await async_embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings += [r["embedding"] for r in response["data"]] results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = ( await async_embed_with_retry( self, input="", **self._invocation_params, ) )["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings def _embedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to OpenAI's embedding endpoint.""" # handle large input text if len(text) > self.embedding_ctx_length: return self._get_len_safe_embeddings([text], engine=engine)[0] else: if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return embed_with_retry( self, input=[text], **self._invocation_params, )[ "data" ][0]["embedding"] async def _aembedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to OpenAI's embedding endpoint.""" # handle large input text if len(text) > self.embedding_ctx_length: return (await self._aget_len_safe_embeddings([text], engine=engine))[0] else: if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return ( await async_embed_with_retry( self, input=[text], **self._invocation_params, ) )["data"][0]["embedding"] [docs] def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return self._get_len_safe_embeddings(texts, engine=self.deployment) [docs] async def aembed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return await self._aget_len_safe_embeddings(texts, engine=self.deployment) [docs] def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = self._embedding_func(text, engine=self.deployment) return embedding [docs] async def aembed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = await self._aembedding_func(text, engine=self.deployment) return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
37f5b390-a5ca-4939-8641-0dc4991a67ec
Source code for langchain.embeddings.huggingface_hub """Wrapper around HuggingFace Hub embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "sentence-transformers/all-mpnet-base-v2" VALID_TASKS = ("feature-extraction",) [docs]class HuggingFaceHubEmbeddings(BaseModel, Embeddings): """Wrapper around HuggingFaceHub embedding models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import HuggingFaceHubEmbeddings repo_id = "sentence-transformers/all-mpnet-base-v2" hf = HuggingFaceHubEmbeddings( repo_id=repo_id, task="feature-extraction", huggingfacehub_api_token="my-api-key", ) """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = "feature-extraction" """Task to call the model with.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] if not repo_id.startswith("sentence-transformers"): raise ValueError( "Currently only 'sentence-transformers' embedding models " f"are supported. Got invalid 'repo_id' {repo_id}." ) client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to HuggingFaceHub's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ # replace newlines, which can negatively affect performance. texts = [text.replace("\n", " ") for text in texts] _model_kwargs = self.model_kwargs or {} responses = self.client(inputs=texts, params=_model_kwargs) return responses [docs] def embed_query(self, text: str) -> List[float]: """Call out to HuggingFaceHub's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embeddings for the text. """ response = self.embed_documents([text])[0] return response
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
4665ba45-e917-4766-a2b1-b0ee5a40df7d
Source code for langchain.embeddings.deepinfra from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env DEFAULT_MODEL_ID = "sentence-transformers/clip-ViT-B-32" [docs]class DeepInfraEmbeddings(BaseModel, Embeddings): """Wrapper around Deep Infra's embedding inference service. To use, you should have the environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. There are multiple embeddings models available, see https://deepinfra.com/models?type=embeddings. Example: .. code-block:: python from langchain.embeddings import DeepInfraEmbeddings deepinfra_emb = DeepInfraEmbeddings( model_id="sentence-transformers/clip-ViT-B-32", deepinfra_api_token="my-api-key" ) r1 = deepinfra_emb.embed_documents( [ "Alpha is the first letter of Greek alphabet", "Beta is the second letter of Greek alphabet", ] ) r2 = deepinfra_emb.embed_query( "What is the second letter of Greek alphabet" ) """ model_id: str = DEFAULT_MODEL_ID """Embeddings model to use.""" normalize: bool = False """whether to normalize the computed embeddings""" embed_instruction: str = "passage: " """Instruction used to embed documents.""" query_instruction: str = "query: " """Instruction used to embed the query.""" model_kwargs: Optional[dict] = None """Other model keyword args""" deepinfra_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env( values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN" ) values["deepinfra_api_token"] = deepinfra_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"model_id": self.model_id} def _embed(self, input: List[str]) -> List[List[float]]: _model_kwargs = self.model_kwargs or {} # HTTP headers for authorization headers = { "Authorization": f"bearer {self.deepinfra_api_token}", "Content-Type": "application/json", } # send request try: res = requests.post( f"https://api.deepinfra.com/v1/inference/{self.model_id}", headers=headers, json={"inputs": input, "normalize": self.normalize, **_model_kwargs}, ) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") if res.status_code != 200: raise ValueError( "Error raised by inference API HTTP code: %s, %s" % (res.status_code, res.text) ) try: t = res.json() embeddings = t["embeddings"] except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {res.text}" ) return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a Deep Infra deployed embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [f"{self.query_instruction}{text}" for text in texts] embeddings = self._embed(instruction_pairs) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a Deep Infra deployed embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = f"{self.query_instruction}{text}" embedding = self._embed([instruction_pair])[0] return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html
e9d60ae6-6e15-43f0-b599-33c2340c27c8
Source code for langchain.embeddings.elasticsearch from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from langchain.utils import get_from_env if TYPE_CHECKING: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient from langchain.embeddings.base import Embeddings [docs]class ElasticsearchEmbeddings(Embeddings): """ Wrapper around Elasticsearch embedding models. This class provides an interface to generate embeddings using a model deployed in an Elasticsearch cluster. It requires an Elasticsearch connection object and the model_id of the model deployed in the cluster. In Elasticsearch you need to have an embedding model loaded and deployed. - https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html - https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html """ # noqa: E501 def __init__( self, client: MlClient, model_id: str, *, input_field: str = "text_field", ): """ Initialize the ElasticsearchEmbeddings instance. Args: client (MlClient): An Elasticsearch ML client object. model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. """ self.client = client self.model_id = model_id self.input_field = input_field [docs] @classmethod def from_credentials( cls, model_id: str, *, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None, es_password: Optional[str] = None, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """Instantiate embeddings from Elasticsearch credentials. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to. es_user: (str, optional): Elasticsearch username. es_password: (str, optional): Elasticsearch password. Example: .. code-block:: python from langchain.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Credentials can be passed in two ways. Either set the env vars # ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically # pulled in, or pass them in directly as kwargs. embeddings = ElasticsearchEmbeddings.from_credentials( model_id, input_field=input_field, # es_cloud_id="foo", # es_user="bar", # es_password="baz", ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ try: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient except ImportError: raise ImportError( "elasticsearch package not found, please install with 'pip install " "elasticsearch'" ) es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID") es_user = es_user or get_from_env("es_user", "ES_USER") es_password = es_password or get_from_env("es_password", "ES_PASSWORD") # Connect to Elasticsearch es_connection = Elasticsearch( cloud_id=es_cloud_id, basic_auth=(es_user, es_password) ) client = MlClient(es_connection) return cls(client, model_id, input_field=input_field) [docs] @classmethod def from_es_connection( cls, model_id: str, es_connection: Elasticsearch, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """ Instantiate embeddings from an existing Elasticsearch connection. This method provides a way to create an instance of the ElasticsearchEmbeddings class using an existing Elasticsearch connection. The connection object is used to create an MlClient, which is then used to initialize the ElasticsearchEmbeddings instance. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch connection object. input_field (str, optional): The name of the key for the input text field in the document. Defaults to 'text_field'. Returns: ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class. Example: .. code-block:: python from elasticsearch import Elasticsearch from langchain.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Create Elasticsearch connection es_connection = Elasticsearch( hosts=["localhost:9200"], http_auth=("user", "password") ) # Instantiate ElasticsearchEmbeddings using the existing connection embeddings = ElasticsearchEmbeddings.from_es_connection( model_id, es_connection, input_field=input_field, ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ # Importing MlClient from elasticsearch.client within the method to # avoid unnecessary import if the method is not used from elasticsearch.client import MlClient # Create an MlClient from the given Elasticsearch connection client = MlClient(es_connection) # Return a new instance of the ElasticsearchEmbeddings class with # the MlClient, model_id, and input_field return cls(client, model_id, input_field=input_field) def _embedding_func(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for the given texts using the Elasticsearch model. Args: texts (List[str]): A list of text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each text in the input list. """ response = self.client.infer_trained_model( model_id=self.model_id, docs=[{self.input_field: text} for text in texts] ) embeddings = [doc["predicted_value"] for doc in response["inference_results"]] return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for a list of documents. Args: texts (List[str]): A list of document text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each document in the input list. """ return self._embedding_func(texts) [docs] def embed_query(self, text: str) -> List[float]: """ Generate an embedding for a single query text. Args: text (str): The query text to generate an embedding for. Returns: List[float]: The embedding for the input query text. """ return self._embedding_func([text])[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
f9ac4828-265c-46f6-b559-82b97f7a1812
Source code for langchain.embeddings.minimax """Wrapper around MiniMax APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional import requests from pydantic import BaseModel, Extra, root_validator from tenacity import ( before_sleep_log, retry, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator() -> Callable[[Any], Any]: """Returns a tenacity retry decorator.""" multiplier = 1 min_seconds = 1 max_seconds = 4 max_retries = 6 return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), before_sleep=before_sleep_log(logger, logging.WARNING), ) def embed_with_retry(embeddings: MiniMaxEmbeddings, *args: Any, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _embed_with_retry(*args: Any, **kwargs: Any) -> Any: return embeddings.embed(*args, **kwargs) return _embed_with_retry(*args, **kwargs) [docs]class MiniMaxEmbeddings(BaseModel, Embeddings): """Wrapper around MiniMax's embedding inference service. To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and ``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import MiniMaxEmbeddings embeddings = MiniMaxEmbeddings() query_text = "This is a test query." query_result = embeddings.embed_query(query_text) document_text = "This is a test document." document_result = embeddings.embed_documents([document_text]) """ endpoint_url: str = "https://api.minimax.chat/v1/embeddings" """Endpoint URL to use.""" model: str = "embo-01" """Embeddings model name to use.""" embed_type_db: str = "db" """For embed_documents""" embed_type_query: str = "query" """For embed_query""" minimax_group_id: Optional[str] = None """Group ID for MiniMax API.""" minimax_api_key: Optional[str] = None """API Key for MiniMax API.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that group id and api key exists in environment.""" minimax_group_id = get_from_dict_or_env( values, "minimax_group_id", "MINIMAX_GROUP_ID" ) minimax_api_key = get_from_dict_or_env( values, "minimax_api_key", "MINIMAX_API_KEY" ) values["minimax_group_id"] = minimax_group_id values["minimax_api_key"] = minimax_api_key return values def embed( self, texts: List[str], embed_type: str, ) -> List[List[float]]: payload = { "model": self.model, "type": embed_type, "texts": texts, } # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.minimax_api_key}", "Content-Type": "application/json", } params = { "GroupId": self.minimax_group_id, } # send request response = requests.post( self.endpoint_url, params=params, headers=headers, json=payload ) parsed_response = response.json() # check for errors if parsed_response["base_resp"]["status_code"] != 0: raise ValueError( f"MiniMax API returned an error: {parsed_response['base_resp']}" ) embeddings = parsed_response["vectors"] return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a MiniMax embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = embed_with_retry(self, texts=texts, embed_type=self.embed_type_db) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a MiniMax embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ embeddings = embed_with_retry( self, texts=[text], embed_type=self.embed_type_query ) return embeddings[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html
495b9b34-9c46-4f1e-be66-b4cc653f7d9c
Source code for langchain.embeddings.cohere """Wrapper around Cohere embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class CohereEmbeddings(BaseModel, Embeddings): """Wrapper around Cohere embedding models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import CohereEmbeddings cohere = CohereEmbeddings( model="embed-english-light-v2.0", cohere_api_key="my-api-key" ) """ client: Any #: :meta private: model: str = "embed-english-v2.0" """Model name to use.""" truncate: Optional[str] = None """Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")""" cohere_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ValueError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Cohere's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = self.client.embed( model=self.model, texts=texts, truncate=self.truncate ).embeddings return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed( model=self.model, texts=[text], truncate=self.truncate ).embeddings[0] return list(map(float, embedding))
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html
c813c6eb-1b2b-4ec6-8c13-8939921e8d6d
Source code for langchain.embeddings.mosaicml """Wrapper around MosaicML APIs.""" from typing import Any, Dict, List, Mapping, Optional, Tuple import requests from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class MosaicMLInstructorEmbeddings(BaseModel, Embeddings): """Wrapper around MosaicML's embedding inference service. To use, you should have the environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import MosaicMLInstructorEmbeddings endpoint_url = ( "https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict" ) mosaic_llm = MosaicMLInstructorEmbeddings( endpoint_url=endpoint_url, mosaicml_api_token="my-api-key" ) """ endpoint_url: str = ( "https://models.hosted-on.mosaicml.hosting/instructor-xl/v1/predict" ) """Endpoint URL to use.""" embed_instruction: str = "Represent the document for retrieval: " """Instruction used to embed documents.""" query_instruction: str = ( "Represent the question for retrieving supporting documents: " ) """Instruction used to embed the query.""" retry_sleep: float = 1.0 """How long to try sleeping for if a rate limit is encountered""" mosaicml_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" mosaicml_api_token = get_from_dict_or_env( values, "mosaicml_api_token", "MOSAICML_API_TOKEN" ) values["mosaicml_api_token"] = mosaicml_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"endpoint_url": self.endpoint_url} def _embed( self, input: List[Tuple[str, str]], is_retry: bool = False ) -> List[List[float]]: payload = {"input_strings": input} # HTTP headers for authorization headers = { "Authorization": f"{self.mosaicml_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post(self.endpoint_url, headers=headers, json=payload) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") try: parsed_response = response.json() if "error" in parsed_response: # if we get rate limited, try sleeping for 1 second if ( not is_retry and "rate limit exceeded" in parsed_response["error"].lower() ): import time time.sleep(self.retry_sleep) return self._embed(input, is_retry=True) raise ValueError( f"Error raised by inference API: {parsed_response['error']}" ) # The inference API has changed a couple of times, so we add some handling # to be robust to multiple response formats. if isinstance(parsed_response, dict): if "data" in parsed_response: output_item = parsed_response["data"] elif "output" in parsed_response: output_item = parsed_response["output"] else: raise ValueError( f"No key data or output in response: {parsed_response}" ) if isinstance(output_item, list) and isinstance(output_item[0], list): embeddings = output_item else: embeddings = [output_item] elif isinstance(parsed_response, list): first_item = parsed_response[0] if isinstance(first_item, list): embeddings = parsed_response elif isinstance(first_item, dict): if "output" in first_item: embeddings = [item["output"] for item in parsed_response] else: raise ValueError( f"No key data or output in response: {parsed_response}" ) else: raise ValueError(f"Unexpected response format: {parsed_response}") else: raise ValueError(f"Unexpected response type: {parsed_response}") except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {response.text}" ) return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a MosaicML deployed instructor embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [(self.embed_instruction, text) for text in texts] embeddings = self._embed(instruction_pairs) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a MosaicML deployed instructor embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = (self.query_instruction, text) embedding = self._embed([instruction_pair])[0] return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
4159a0c4-ae99-42e4-aab0-e49bb651e854
Source code for langchain.embeddings.self_hosted_hugging_face """Wrapper around HuggingFace embedding models for self-hosted remote hardware.""" import importlib import logging from typing import Any, Callable, List, Optional from langchain.embeddings.self_hosted import SelfHostedEmbeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) logger = logging.getLogger(__name__) def _embed_documents(client: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return client.encode(*args, **kwargs) def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0) -> Any: """Load the embedding model.""" if not instruct: import sentence_transformers client = sentence_transformers.SentenceTransformer(model_id) else: from InstructorEmbedding import INSTRUCTOR client = INSTRUCTOR(model_id) if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) client = client.to(device) return client [docs]class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings): """Runs sentence_transformers embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceEmbeddings import runhouse as rh model_name = "sentence-transformers/all-mpnet-base-v2" gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu) """ client: Any #: :meta private: model_id: str = DEFAULT_MODEL_NAME """Model name to use.""" model_reqs: List[str] = ["./", "sentence_transformers", "torch"] """Requirements to install on hardware to inference the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_load_fn: Callable = load_embedding_model """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None """Key word arguments to pass to the model load function.""" inference_fn: Callable = _embed_documents """Inference function to extract the embeddings.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get("model_id", DEFAULT_MODEL_NAME) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", False) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs]class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings): """Runs InstructorEmbedding embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings import runhouse as rh model_name = "hkunlp/instructor-large" gpu = rh.cluster(name='rh-a10x', instance_type='A100:1') hf = SelfHostedHuggingFaceInstructEmbeddings( model_name=model_name, hardware=gpu) """ model_id: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" model_reqs: List[str] = ["./", "InstructorEmbedding", "torch"] """Requirements to install on hardware to inference the model.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get( "model_id", DEFAULT_INSTRUCT_MODEL ) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", True) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [] for text in texts: instruction_pairs.append([self.embed_instruction, text]) embeddings = self.client(self.pipeline_ref, instruction_pairs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client(self.pipeline_ref, [instruction_pair])[0] return embedding.tolist()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
472c5600-2e0a-4545-afb4-966ae287e371
Source code for langchain.embeddings.embaas """Wrapper around embaas embeddings API.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from typing_extensions import NotRequired, TypedDict from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env # Currently supported maximum batch size for embedding requests MAX_BATCH_SIZE = 256 EMBAAS_API_URL = "https://api.embaas.io/v1/embeddings/" class EmbaasEmbeddingsPayload(TypedDict): """Payload for the embaas embeddings API.""" model: str texts: List[str] instruction: NotRequired[str] [docs]class EmbaasEmbeddings(BaseModel, Embeddings): """Wrapper around embaas's embedding service. To use, you should have the environment variable ``EMBAAS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python # Initialise with default model and instruction from langchain.embeddings import EmbaasEmbeddings emb = EmbaasEmbeddings() # Initialise with custom model and instruction from langchain.embeddings import EmbaasEmbeddings emb_model = "instructor-large" emb_inst = "Represent the Wikipedia document for retrieval" emb = EmbaasEmbeddings( model=emb_model, instruction=emb_inst ) """ model: str = "e5-large-v2" """The model used for embeddings.""" instruction: Optional[str] = None """Instruction used for domain-specific embeddings.""" api_url: str = EMBAAS_API_URL """The URL for the embaas embeddings API.""" embaas_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" embaas_api_key = get_from_dict_or_env( values, "embaas_api_key", "EMBAAS_API_KEY" ) values["embaas_api_key"] = embaas_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying params.""" return {"model": self.model, "instruction": self.instruction} def _generate_payload(self, texts: List[str]) -> EmbaasEmbeddingsPayload: """Generates payload for the API request.""" payload = EmbaasEmbeddingsPayload(texts=texts, model=self.model) if self.instruction: payload["instruction"] = self.instruction return payload def _handle_request(self, payload: EmbaasEmbeddingsPayload) -> List[List[float]]: """Sends a request to the Embaas API and handles the response.""" headers = { "Authorization": f"Bearer {self.embaas_api_key}", "Content-Type": "application/json", } response = requests.post(self.api_url, headers=headers, json=payload) response.raise_for_status() parsed_response = response.json() embeddings = [item["embedding"] for item in parsed_response["data"]] return embeddings def _generate_embeddings(self, texts: List[str]) -> List[List[float]]: """Generate embeddings using the Embaas API.""" payload = self._generate_payload(texts) try: return self._handle_request(payload) except requests.exceptions.RequestException as e: if e.response is None or not e.response.text: raise ValueError(f"Error raised by embaas embeddings API: {e}") parsed_response = e.response.json() if "message" in parsed_response: raise ValueError( "Validation Error raised by embaas embeddings API:" f"{parsed_response['message']}" ) raise [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Get embeddings for a list of texts. Args: texts: The list of texts to get embeddings for. Returns: List of embeddings, one for each text. """ batches = [ texts[i : i + MAX_BATCH_SIZE] for i in range(0, len(texts), MAX_BATCH_SIZE) ] embeddings = [self._generate_embeddings(batch) for batch in batches] # flatten the list of lists into a single list return [embedding for batch in embeddings for embedding in batch] [docs] def embed_query(self, text: str) -> List[float]: """Get embeddings for a single text. Args: text: The text to get embeddings for. Returns: List of embeddings. """ return self.embed_documents([text])[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
57912d0f-f23f-4e42-ade6-4c97ba9b4e78
Source code for langchain.embeddings.sagemaker_endpoint """Wrapper around Sagemaker InvokeEndpoint API.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.llms.sagemaker_endpoint import ContentHandlerBase class EmbeddingsContentHandler(ContentHandlerBase[List[str], List[List[float]]]): """Content handler for LLM class.""" [docs]class SagemakerEndpointEmbeddings(BaseModel, Embeddings): """Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain.embeddings import SagemakerEndpointEmbeddings endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" ) se = SagemakerEndpointEmbeddings( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: EmbeddingsContentHandler """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler class ContentHandler(EmbeddingsContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompts: List[str], model_kwargs: Dict) -> bytes: input_str = json.dumps({prompts: prompts, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> List[List[float]]: response_json = json.loads(output.read().decode("utf-8")) return response_json["vectors"] """ # noqa: E501 model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ValueError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) return values def _embedding_func(self, texts: List[str]) -> List[List[float]]: """Call out to SageMaker Inference embedding endpoint.""" # replace newlines, which can negatively affect performance. texts = list(map(lambda x: x.replace("\n", " "), texts)) _model_kwargs = self.model_kwargs or {} _endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(texts, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return self.content_handler.transform_output(response["Body"]) [docs] def embed_documents( self, texts: List[str], chunk_size: int = 64 ) -> List[List[float]]: """Compute doc embeddings using a SageMaker Inference Endpoint. Args: texts: The list of texts to embed. chunk_size: The chunk size defines how many input texts will be grouped together as request. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ results = [] _chunk_size = len(texts) if chunk_size > len(texts) else chunk_size for i in range(0, len(texts), _chunk_size): response = self._embedding_func(texts[i : i + _chunk_size]) results.extend(response) return results [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a SageMaker inference endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func([text])[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
c735dffc-f8f6-4e54-a91e-f6f2bb7484ab
Source code for langchain.embeddings.llamacpp """Wrapper around llama.cpp embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.embeddings.base import Embeddings [docs]class LlamaCppEmbeddings(BaseModel, Embeddings): """Wrapper around llama.cpp embedding models. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain.embeddings import LlamaCppEmbeddings llama = LlamaCppEmbeddings(model_path="/path/to/model.bin") """ client: Any #: :meta private: model_path: str n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(False, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] model_param_names = [ "n_ctx", "n_parts", "seed", "f16_kv", "logits_all", "vocab_only", "use_mlock", "n_threads", "n_batch", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null. if values["n_gpu_layers"] is not None: model_params["n_gpu_layers"] = values["n_gpu_layers"] try: from llama_cpp import Llama values["client"] = Llama(model_path, embedding=True, **model_params) except ImportError: raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception as e: raise ValueError( f"Could not load Llama model from path: {model_path}. " f"Received error {e}" ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using the Llama model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = [self.client.embed(text) for text in texts] return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using the Llama model. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed(text) return list(map(float, embedding))
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
47915982-8c25-491e-b254-13d355acdfc5
Source code for langchain.embeddings.dashscope """Wrapper around DashScope embedding models.""" from __future__ import annotations import logging from typing import ( Any, Callable, Dict, List, Optional, ) from pydantic import BaseModel, Extra, root_validator from requests.exceptions import HTTPError from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: DashScopeEmbeddings) -> Callable[[Any], Any]: multiplier = 1 min_seconds = 1 max_seconds = 4 # Wait 2^x * 1 second between each retry starting with # 1 seconds, then up to 4 seconds, then 4 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(HTTPError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) def embed_with_retry(embeddings: DashScopeEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: resp = embeddings.client.call(**kwargs) if resp.status_code == 200: return resp.output["embeddings"] elif resp.status_code in [400, 401]: raise ValueError( f"status_code: {resp.status_code} \n " f"code: {resp.code} \n message: {resp.message}" ) else: raise HTTPError( f"HTTP error occurred: status_code: {resp.status_code} \n " f"code: {resp.code} \n message: {resp.message}" ) return _embed_with_retry(**kwargs) [docs]class DashScopeEmbeddings(BaseModel, Embeddings): """Wrapper around DashScope embedding models. To use, you should have the ``dashscope`` python package installed, and the environment variable ``DASHSCOPE_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import DashScopeEmbeddings embeddings = DashScopeEmbeddings(dashscope_api_key="my-api-key") Example: .. code-block:: python import os os.environ["DASHSCOPE_API_KEY"] = "your DashScope API KEY" from langchain.embeddings.dashscope import DashScopeEmbeddings embeddings = DashScopeEmbeddings( model="text-embedding-v1", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any #: :meta private: model: str = "text-embedding-v1" dashscope_api_key: Optional[str] = None """Maximum number of retries to make when generating.""" max_retries: int = 5 class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: import dashscope """Validate that api key and python package exists in environment.""" values["dashscope_api_key"] = get_from_dict_or_env( values, "dashscope_api_key", "DASHSCOPE_API_KEY" ) dashscope.api_key = values["dashscope_api_key"] try: import dashscope values["client"] = dashscope.TextEmbedding except ImportError: raise ImportError( "Could not import dashscope python package. " "Please install it with `pip install dashscope`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to DashScope's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ embeddings = embed_with_retry( self, input=texts, text_type="document", model=self.model ) embedding_list = [item["embedding"] for item in embeddings] return embedding_list [docs] def embed_query(self, text: str) -> List[float]: """Call out to DashScope's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = embed_with_retry( self, input=text, text_type="query", model=self.model )[0]["embedding"] return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
cefcf16e-d695-4faf-bc91-93b69048a379
Source code for langchain.memory.motorhead_memory from typing import Any, Dict, List, Optional import requests from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import get_buffer_string MANAGED_URL = "https://api.getmetal.io/v1/motorhead" # LOCAL_URL = "http://localhost:8080" [docs]class MotorheadMemory(BaseChatMemory): url: str = MANAGED_URL timeout = 3000 memory_key = "history" session_id: str context: Optional[str] = None # Managed Params api_key: Optional[str] = None client_id: Optional[str] = None def __get_headers(self) -> Dict[str, str]: is_managed = self.url == MANAGED_URL headers = { "Content-Type": "application/json", } if is_managed and not (self.api_key and self.client_id): raise ValueError( """ You must provide an API key or a client ID to use the managed version of Motorhead. Visit https://getmetal.io for more information. """ ) if is_managed and self.api_key and self.client_id: headers["x-metal-api-key"] = self.api_key headers["x-metal-client-id"] = self.client_id return headers [docs] async def init(self) -> None: res = requests.get( f"{self.url}/sessions/{self.session_id}/memory", timeout=self.timeout, headers=self.__get_headers(), ) res_data = res.json() res_data = res_data.get("data", res_data) # Handle Managed Version messages = res_data.get("messages", []) context = res_data.get("context", "NONE") for message in reversed(messages): if message["role"] == "AI": self.chat_memory.add_ai_message(message["content"]) else: self.chat_memory.add_user_message(message["content"]) if context and context != "NONE": self.context = context [docs] def load_memory_variables(self, values: Dict[str, Any]) -> Dict[str, Any]: if self.return_messages: return {self.memory_key: self.chat_memory.messages} else: return {self.memory_key: get_buffer_string(self.chat_memory.messages)} @property def memory_variables(self) -> List[str]: return [self.memory_key] [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: input_str, output_str = self._get_input_output(inputs, outputs) requests.post( f"{self.url}/sessions/{self.session_id}/memory", timeout=self.timeout, json={ "messages": [ {"role": "Human", "content": f"{input_str}"}, {"role": "AI", "content": f"{output_str}"}, ] }, headers=self.__get_headers(), ) super().save_context(inputs, outputs) [docs] def delete_session(self) -> None: """Delete a session""" requests.delete(f"{self.url}/sessions/{self.session_id}/memory")
https://api.python.langchain.com/en/latest/_modules/langchain/memory/motorhead_memory.html
979073a4-1da4-4c31-860e-fff55f26de31
Source code for langchain.memory.entity import logging from abc import ABC, abstractmethod from itertools import islice from typing import Any, Dict, Iterable, List, Optional from pydantic import BaseModel, Field from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, ENTITY_SUMMARIZATION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseMessage, get_buffer_string logger = logging.getLogger(__name__) class BaseEntityStore(BaseModel, ABC): @abstractmethod def get(self, key: str, default: Optional[str] = None) -> Optional[str]: """Get entity value from store.""" pass @abstractmethod def set(self, key: str, value: Optional[str]) -> None: """Set entity value in store.""" pass @abstractmethod def delete(self, key: str) -> None: """Delete entity value from store.""" pass @abstractmethod def exists(self, key: str) -> bool: """Check if entity exists in store.""" pass @abstractmethod def clear(self) -> None: """Delete all entities from store.""" pass [docs]class InMemoryEntityStore(BaseEntityStore): """Basic in-memory entity store.""" store: Dict[str, Optional[str]] = {} [docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]: return self.store.get(key, default) [docs] def set(self, key: str, value: Optional[str]) -> None: self.store[key] = value [docs] def delete(self, key: str) -> None: del self.store[key] [docs] def exists(self, key: str) -> bool: return key in self.store [docs] def clear(self) -> None: return self.store.clear() [docs]class RedisEntityStore(BaseEntityStore): """Redis-backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ redis_client: Any session_id: str = "default" key_prefix: str = "memory_store" ttl: Optional[int] = 60 * 60 * 24 recall_ttl: Optional[int] = 60 * 60 * 24 * 3 def __init__( self, session_id: str = "default", url: str = "redis://localhost:6379/0", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: import redis except ImportError: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = redis.Redis.from_url(url=url, decode_responses=True) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" [docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'") return res [docs] def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) [docs] def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") [docs] def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 [docs] def clear(self) -> None: # iterate a list in batches of size batch_size def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]: iterator = iter(iterable) while batch := list(islice(iterator, batch_size)): yield batch for keybatch in batched( self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500 ): self.redis_client.delete(*keybatch) [docs]class SQLiteEntityStore(BaseEntityStore): """SQLite-backed Entity store""" session_id: str = "default" table_name: str = "memory_store" def __init__( self, session_id: str = "default", db_file: str = "entities.db", table_name: str = "memory_store", *args: Any, **kwargs: Any, ): try: import sqlite3 except ImportError: raise ImportError( "Could not import sqlite3 python package. " "Please install it with `pip install sqlite3`." ) super().__init__(*args, **kwargs) self.conn = sqlite3.connect(db_file) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists() @property def full_table_name(self) -> str: return f"{self.table_name}_{self.session_id}" def _create_table_if_not_exists(self) -> None: create_table_query = f""" CREATE TABLE IF NOT EXISTS {self.full_table_name} ( key TEXT PRIMARY KEY, value TEXT ) """ with self.conn: self.conn.execute(create_table_query) [docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]: query = f""" SELECT value FROM {self.full_table_name} WHERE key = ? """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() if result is not None: value = result[0] return value return default [docs] def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) query = f""" INSERT OR REPLACE INTO {self.full_table_name} (key, value) VALUES (?, ?) """ with self.conn: self.conn.execute(query, (key, value)) [docs] def delete(self, key: str) -> None: query = f""" DELETE FROM {self.full_table_name} WHERE key = ? """ with self.conn: self.conn.execute(query, (key,)) [docs] def exists(self, key: str) -> bool: query = f""" SELECT 1 FROM {self.full_table_name} WHERE key = ? LIMIT 1 """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() return result is not None [docs] def clear(self) -> None: query = f""" DELETE FROM {self.full_table_name} """ with self.conn: self.conn.execute(query) [docs]class ConversationEntityMemory(BaseChatMemory): """Entity extractor & summarizer memory. Extracts named entities from the recent chat history and generates summaries. With a swapable entity store, persisting entities across conversations. Defaults to an in-memory entity store, and can be swapped out for a Redis, SQLite, or other entity store. """ human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT # Cache of recently detected entity names, if any # It is updated when load_memory_variables is called: entity_cache: List[str] = [] # Number of recent message pairs to consider when updating entities: k: int = 3 chat_history_key: str = "history" # Store to manage entity-related data: entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore) @property def buffer(self) -> List[BaseMessage]: """Access chat memory messages.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return ["entities", self.chat_history_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """ Returns chat history and all generated entities with summaries if available, and updates or clears the recent entity cache. New entity name can be found when calling this method, before the entity summaries are generated, so the entity cache values may be empty if no entity descriptions are generated yet. """ # Create an LLMChain for predicting entity names from the recent chat history: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) # Generates a comma-separated list of named entities, # e.g. "Jane, White House, UFO" # or "NONE" if no named entities are extracted: output = chain.predict( history=buffer_string, input=inputs[prompt_input_key], ) # If no named entities are extracted, assigns an empty list. if output.strip() == "NONE": entities = [] else: # Make a list of the extracted entities: entities = [w.strip() for w in output.split(",")] # Make a dictionary of entities with summary if exists: entity_summaries = {} for entity in entities: entity_summaries[entity] = self.entity_store.get(entity, "") # Replaces the entity name cache with the most recently discussed entities, # or if no entities were extracted, clears the cache: self.entity_cache = entities # Should we return as message objects or as a string? if self.return_messages: # Get last `k` pair of chat messages: buffer: Any = self.buffer[-self.k * 2 :] else: # Reuse the string we made earlier: buffer = buffer_string return { self.chat_history_key: buffer, "entities": entity_summaries, } [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """ Save context from this conversation history to the entity store. Generates a summary for each entity in the entity cache by prompting the model, and saves these summaries to the entity store. """ super().save_context(inputs, outputs) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) input_data = inputs[prompt_input_key] # Create an LLMChain for predicting entity summarization from the context chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt) # Generate new summaries for entities and save them in the entity store for entity in self.entity_cache: # Get existing summary if it exists existing_summary = self.entity_store.get(entity, "") output = chain.predict( summary=existing_summary, entity=entity, history=buffer_string, input=input_data, ) # Save the updated summary to the entity store self.entity_store.set(entity, output.strip()) [docs] def clear(self) -> None: """Clear memory contents.""" self.chat_memory.clear() self.entity_cache.clear() self.entity_store.clear()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/entity.html
cca1e42a-b621-4737-bda1-bd74da65ab8d
Source code for langchain.memory.buffer from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.memory.chat_memory import BaseChatMemory, BaseMemory from langchain.memory.utils import get_prompt_input_key from langchain.schema import get_buffer_string [docs]class ConversationBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: @property def buffer(self) -> Any: """String buffer of memory.""" if self.return_messages: return self.chat_memory.messages else: return get_buffer_string( self.chat_memory.messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer} [docs]class ConversationStringBufferMemory(BaseMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" """Prefix to use for AI generated responses.""" buffer: str = "" output_key: Optional[str] = None input_key: Optional[str] = None memory_key: str = "history" #: :meta private: @root_validator() def validate_chains(cls, values: Dict) -> Dict: """Validate that return messages is not True.""" if values.get("return_messages", False): raise ValueError( "return_messages must be False for ConversationStringBufferMemory" ) return values @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return {self.memory_key: self.buffer} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") output_key = list(outputs.keys())[0] else: output_key = self.output_key human = f"{self.human_prefix}: " + inputs[prompt_input_key] ai = f"{self.ai_prefix}: " + outputs[output_key] self.buffer += "\n" + "\n".join([human, ai]) [docs] def clear(self) -> None: """Clear memory contents.""" self.buffer = ""
https://api.python.langchain.com/en/latest/_modules/langchain/memory/buffer.html
f48b481f-6b8c-466e-ad44-762f33e61ef4
Source code for langchain.memory.vectorstore """Class for a VectorStore-backed memory object.""" from typing import Any, Dict, List, Optional, Union from pydantic import Field from langchain.memory.chat_memory import BaseMemory from langchain.memory.utils import get_prompt_input_key from langchain.schema import Document from langchain.vectorstores.base import VectorStoreRetriever [docs]class VectorStoreRetrieverMemory(BaseMemory): """Class for a VectorStore-backed memory object.""" retriever: VectorStoreRetriever = Field(exclude=True) """VectorStoreRetriever object to connect to.""" memory_key: str = "history" #: :meta private: """Key name to locate the memories in the result of load_memory_variables.""" input_key: Optional[str] = None """Key name to index the inputs to load_memory_variables.""" return_docs: bool = False """Whether or not to return the result of querying the database directly.""" @property def memory_variables(self) -> List[str]: """The list of keys emitted from the load_memory_variables method.""" return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key [docs] def load_memory_variables( self, inputs: Dict[str, Any] ) -> Dict[str, Union[List[Document], str]]: """Return history buffer.""" input_key = self._get_prompt_input_key(inputs) query = inputs[input_key] docs = self.retriever.get_relevant_documents(query) result: Union[List[Document], str] if not self.return_docs: result = "\n".join([doc.page_content for doc in docs]) else: result = docs return {self.memory_key: result} def _form_documents( self, inputs: Dict[str, Any], outputs: Dict[str, str] ) -> List[Document]: """Format context from this conversation to buffer.""" # Each document should only include the current turn, not the chat history filtered_inputs = {k: v for k, v in inputs.items() if k != self.memory_key} texts = [ f"{k}: {v}" for k, v in list(filtered_inputs.items()) + list(outputs.items()) ] page_content = "\n".join(texts) return [Document(page_content=page_content)] [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" documents = self._form_documents(inputs, outputs) self.retriever.add_documents(documents) [docs] def clear(self) -> None: """Nothing to clear."""
https://api.python.langchain.com/en/latest/_modules/langchain/memory/vectorstore.html
ac94fae5-3be7-4d75-8f5c-b984d27c081e
Source code for langchain.memory.buffer_window from typing import Any, Dict, List from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import BaseMessage, get_buffer_string [docs]class ConversationBufferWindowMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: k: int = 5 @property def buffer(self) -> List[BaseMessage]: """String buffer of memory.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" buffer: Any = self.buffer[-self.k * 2 :] if self.k > 0 else [] if not self.return_messages: buffer = get_buffer_string( buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) return {self.memory_key: buffer}
https://api.python.langchain.com/en/latest/_modules/langchain/memory/buffer_window.html
38d5fd16-7003-46fb-bb52-21419ab9eb6b
Source code for langchain.memory.summary from __future__ import annotations from typing import Any, Dict, List, Type from pydantic import BaseModel, root_validator from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import SUMMARY_PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.schema import ( BaseChatMessageHistory, BaseMessage, SystemMessage, get_buffer_string, ) class SummarizerMixin(BaseModel): human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel prompt: BasePromptTemplate = SUMMARY_PROMPT summary_message_cls: Type[BaseMessage] = SystemMessage def predict_new_summary( self, messages: List[BaseMessage], existing_summary: str ) -> str: new_lines = get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) chain = LLMChain(llm=self.llm, prompt=self.prompt) return chain.predict(summary=existing_summary, new_lines=new_lines) [docs]class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin): """Conversation summarizer to memory.""" buffer: str = "" memory_key: str = "history" #: :meta private: [docs] @classmethod def from_messages( cls, llm: BaseLanguageModel, chat_memory: BaseChatMessageHistory, *, summarize_step: int = 2, **kwargs: Any, ) -> ConversationSummaryMemory: obj = cls(llm=llm, chat_memory=chat_memory, **kwargs) for i in range(0, len(obj.chat_memory.messages), summarize_step): obj.buffer = obj.predict_new_summary( obj.chat_memory.messages[i : i + summarize_step], obj.buffer ) return obj @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" if self.return_messages: buffer: Any = [self.summary_message_cls(content=self.buffer)] else: buffer = self.buffer return {self.memory_key: buffer} @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" prompt_variables = values["prompt"].input_variables expected_keys = {"summary", "new_lines"} if expected_keys != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but it should have {expected_keys}." ) return values [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self.buffer = self.predict_new_summary( self.chat_memory.messages[-2:], self.buffer ) [docs] def clear(self) -> None: """Clear memory contents.""" super().clear() self.buffer = ""
https://api.python.langchain.com/en/latest/_modules/langchain/memory/summary.html
bbc6f8cb-1337-40de-af72-590ebd306c4f
Source code for langchain.memory.combined import warnings from typing import Any, Dict, List, Set from pydantic import validator from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import BaseMemory [docs]class CombinedMemory(BaseMemory): """Class for combining multiple memories' data together.""" memories: List[BaseMemory] """For tracking all the memories that should be accessed.""" @validator("memories") def check_repeated_memory_variable( cls, value: List[BaseMemory] ) -> List[BaseMemory]: all_variables: Set[str] = set() for val in value: overlap = all_variables.intersection(val.memory_variables) if overlap: raise ValueError( f"The same variables {overlap} are found in multiple" "memory object, which is not allowed by CombinedMemory." ) all_variables |= set(val.memory_variables) return value @validator("memories") def check_input_key(cls, value: List[BaseMemory]) -> List[BaseMemory]: """Check that if memories are of type BaseChatMemory that input keys exist.""" for val in value: if isinstance(val, BaseChatMemory): if val.input_key is None: warnings.warn( "When using CombinedMemory, " "input keys should be so the input is known. " f" Was not set on {val}" ) return value @property def memory_variables(self) -> List[str]: """All the memory variables that this instance provides.""" """Collected from the all the linked memories.""" memory_variables = [] for memory in self.memories: memory_variables.extend(memory.memory_variables) return memory_variables [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Load all vars from sub-memories.""" memory_data: Dict[str, Any] = {} # Collect vars from all sub-memories for memory in self.memories: data = memory.load_memory_variables(inputs) memory_data = { **memory_data, **data, } return memory_data [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this session for every memory.""" # Save context for all sub-memories for memory in self.memories: memory.save_context(inputs, outputs) [docs] def clear(self) -> None: """Clear context from this session for every memory.""" for memory in self.memories: memory.clear()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/combined.html
fb575b19-d593-4e86-82f4-d1c48dfa8596
Source code for langchain.memory.readonly from typing import Any, Dict, List from langchain.schema import BaseMemory [docs]class ReadOnlySharedMemory(BaseMemory): """A memory wrapper that is read-only and cannot be changed.""" memory: BaseMemory @property def memory_variables(self) -> List[str]: """Return memory variables.""" return self.memory.memory_variables [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Load memory variables from memory.""" return self.memory.load_memory_variables(inputs) [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Nothing should be saved or changed""" pass [docs] def clear(self) -> None: """Nothing to clear, got a memory like a vault.""" pass
https://api.python.langchain.com/en/latest/_modules/langchain/memory/readonly.html
684d6476-7561-4976-95f6-67c2930138ca
Source code for langchain.memory.simple from typing import Any, Dict, List from langchain.schema import BaseMemory [docs]class SimpleMemory(BaseMemory): """Simple memory for storing context or other bits of information that shouldn't ever change between prompts. """ memories: Dict[str, Any] = dict() @property def memory_variables(self) -> List[str]: return list(self.memories.keys()) [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: return self.memories [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Nothing should be saved or changed, my memory is set in stone.""" pass [docs] def clear(self) -> None: """Nothing to clear, got a memory like a vault.""" pass
https://api.python.langchain.com/en/latest/_modules/langchain/memory/simple.html
145a190f-8dc7-44b5-9328-372137064a47
Source code for langchain.memory.token_buffer from typing import Any, Dict, List from langchain.base_language import BaseLanguageModel from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import BaseMessage, get_buffer_string [docs]class ConversationTokenBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel memory_key: str = "history" max_token_limit: int = 2000 @property def buffer(self) -> List[BaseMessage]: """String buffer of memory.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" buffer: Any = self.buffer if self.return_messages: final_buffer: Any = buffer else: final_buffer = get_buffer_string( buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) return {self.memory_key: final_buffer} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer. Pruned.""" super().save_context(inputs, outputs) # Prune buffer if it exceeds max token limit buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_length > self.max_token_limit: pruned_memory = [] while curr_buffer_length > self.max_token_limit: pruned_memory.append(buffer.pop(0)) curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
https://api.python.langchain.com/en/latest/_modules/langchain/memory/token_buffer.html
06076791-1cad-40da-be4c-20a5b55755f7
Source code for langchain.memory.summary_buffer from typing import Any, Dict, List from pydantic import root_validator from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.summary import SummarizerMixin from langchain.schema import BaseMessage, get_buffer_string [docs]class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin): """Buffer with summarizer for storing conversation memory.""" max_token_limit: int = 2000 moving_summary_buffer: str = "" memory_key: str = "history" @property def buffer(self) -> List[BaseMessage]: return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" buffer = self.buffer if self.moving_summary_buffer != "": first_messages: List[BaseMessage] = [ self.summary_message_cls(content=self.moving_summary_buffer) ] buffer = first_messages + buffer if self.return_messages: final_buffer: Any = buffer else: final_buffer = get_buffer_string( buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix ) return {self.memory_key: final_buffer} @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" prompt_variables = values["prompt"].input_variables expected_keys = {"summary", "new_lines"} if expected_keys != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but it should have {expected_keys}." ) return values [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self.prune() [docs] def prune(self) -> None: """Prune buffer if it exceeds max token limit""" buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_length > self.max_token_limit: pruned_memory = [] while curr_buffer_length > self.max_token_limit: pruned_memory.append(buffer.pop(0)) curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) self.moving_summary_buffer = self.predict_new_summary( pruned_memory, self.moving_summary_buffer ) [docs] def clear(self) -> None: """Clear memory contents.""" super().clear() self.moving_summary_buffer = ""
https://api.python.langchain.com/en/latest/_modules/langchain/memory/summary_buffer.html
492e5e14-96b5-4cb9-9733-0b4f9ea97b8d
Source code for langchain.memory.kg from typing import Any, Dict, List, Type, Union from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.graphs import NetworkxEntityGraph from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key from langchain.prompts.base import BasePromptTemplate from langchain.schema import ( BaseMessage, SystemMessage, get_buffer_string, ) [docs]class ConversationKGMemory(BaseChatMemory): """Knowledge graph memory for storing conversation memory. Integrates with external knowledge graph to store and retrieve information about knowledge triples in the conversation. """ k: int = 2 human_prefix: str = "Human" ai_prefix: str = "AI" kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph) knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT llm: BaseLanguageModel summary_message_cls: Type[BaseMessage] = SystemMessage """Number of previous utterances to include in the context.""" memory_key: str = "history" #: :meta private: [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" entities = self._get_current_entities(inputs) summary_strings = [] for entity in entities: knowledge = self.kg.get_entity_knowledge(entity) if knowledge: summary = f"On {entity}: {'. '.join(knowledge)}." summary_strings.append(summary) context: Union[str, List] if not summary_strings: context = [] if self.return_messages else "" elif self.return_messages: context = [ self.summary_message_cls(content=text) for text in summary_strings ] else: context = "\n".join(summary_strings) return {self.memory_key: context} @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str: """Get the output key for the prompt.""" if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") return list(outputs.keys())[0] return self.output_key [docs] def get_current_entities(self, input_string: str) -> List[str]: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, ) return get_entities(output) def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]: """Get the current entities in the conversation.""" prompt_input_key = self._get_prompt_input_key(inputs) return self.get_current_entities(inputs[prompt_input_key]) [docs] def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]: chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, verbose=True, ) knowledge = parse_triples(output) return knowledge def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None: """Get and update knowledge graph from the conversation history.""" prompt_input_key = self._get_prompt_input_key(inputs) knowledge = self.get_knowledge_triplets(inputs[prompt_input_key]) for triple in knowledge: self.kg.add_triple(triple) [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self._get_and_update_kg(inputs) [docs] def clear(self) -> None: """Clear memory contents.""" super().clear() self.kg.clear()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/kg.html
ca3c5d42-2115-4571-b2e0-bbd8daa179bc
Source code for langchain.memory.chat_message_histories.zep from __future__ import annotations import logging from typing import TYPE_CHECKING, Dict, List, Optional from langchain.schema import ( AIMessage, BaseChatMessageHistory, BaseMessage, HumanMessage, ) if TYPE_CHECKING: from zep_python import Memory, MemorySearchResult, Message, NotFoundError logger = logging.getLogger(__name__) [docs]class ZepChatMessageHistory(BaseChatMessageHistory): """A ChatMessageHistory implementation that uses Zep as a backend. Recommended usage:: # Set up Zep Chat History zep_chat_history = ZepChatMessageHistory( session_id=session_id, url=ZEP_API_URL, ) # Use a standard ConversationBufferMemory to encapsulate the Zep chat history memory = ConversationBufferMemory( memory_key="chat_history", chat_memory=zep_chat_history ) Zep provides long-term conversation storage for LLM apps. The server stores, summarizes, embeds, indexes, and enriches conversational AI chat histories, and exposes them via simple, low-latency APIs. For server installation instructions and more, see: https://getzep.github.io/ This class is a thin wrapper around the zep-python package. Additional Zep functionality is exposed via the `zep_summary` and `zep_messages` properties. For more information on the zep-python package, see: https://github.com/getzep/zep-python """ def __init__( self, session_id: str, url: str = "http://localhost:8000", ) -> None: try: from zep_python import ZepClient except ImportError: raise ValueError( "Could not import zep-python package. " "Please install it with `pip install zep-python`." ) self.zep_client = ZepClient(base_url=url) self.session_id = session_id @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve messages from Zep memory""" zep_memory: Optional[Memory] = self._get_memory() if not zep_memory: return [] messages: List[BaseMessage] = [] # Extract summary, if present, and messages if zep_memory.summary: if len(zep_memory.summary.content) > 0: messages.append(HumanMessage(content=zep_memory.summary.content)) if zep_memory.messages: msg: Message for msg in zep_memory.messages: if msg.role == "ai": messages.append(AIMessage(content=msg.content)) else: messages.append(HumanMessage(content=msg.content)) return messages @property def zep_messages(self) -> List[Message]: """Retrieve summary from Zep memory""" zep_memory: Optional[Memory] = self._get_memory() if not zep_memory: return [] return zep_memory.messages @property def zep_summary(self) -> Optional[str]: """Retrieve summary from Zep memory""" zep_memory: Optional[Memory] = self._get_memory() if not zep_memory or not zep_memory.summary: return None return zep_memory.summary.content def _get_memory(self) -> Optional[Memory]: """Retrieve memory from Zep""" from zep_python import NotFoundError try: zep_memory: Memory = self.zep_client.get_memory(self.session_id) except NotFoundError: logger.warning( f"Session {self.session_id} not found in Zep. Returning None" ) return None return zep_memory [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the Zep memory history""" from zep_python import Memory, Message zep_message: Message if isinstance(message, HumanMessage): zep_message = Message(content=message.content, role="human") else: zep_message = Message(content=message.content, role="ai") zep_memory = Memory(messages=[zep_message]) self.zep_client.add_memory(self.session_id, zep_memory) [docs] def search( self, query: str, metadata: Optional[Dict] = None, limit: Optional[int] = None ) -> List[MemorySearchResult]: """Search Zep memory for messages matching the query""" from zep_python import MemorySearchPayload payload: MemorySearchPayload = MemorySearchPayload( text=query, metadata=metadata ) return self.zep_client.search_memory(self.session_id, payload, limit=limit) [docs] def clear(self) -> None: """Clear session memory from Zep. Note that Zep is long-term storage for memory and this is not advised unless you have specific data retention requirements. """ try: self.zep_client.delete_memory(self.session_id) except NotFoundError: logger.warning( f"Session {self.session_id} not found in Zep. Skipping delete." )
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/zep.html
5ea01f8e-4ea6-4512-9f0f-4db66c483e3e
Source code for langchain.memory.chat_message_histories.cosmos_db """Azure CosmosDB Memory History.""" from __future__ import annotations import logging from types import TracebackType from typing import TYPE_CHECKING, Any, List, Optional, Type from langchain.schema import ( BaseChatMessageHistory, BaseMessage, messages_from_dict, messages_to_dict, ) logger = logging.getLogger(__name__) if TYPE_CHECKING: from azure.cosmos import ContainerProxy [docs]class CosmosDBChatMessageHistory(BaseChatMessageHistory): """Chat history backed by Azure CosmosDB.""" def __init__( self, cosmos_endpoint: str, cosmos_database: str, cosmos_container: str, session_id: str, user_id: str, credential: Any = None, connection_string: Optional[str] = None, ttl: Optional[int] = None, cosmos_client_kwargs: Optional[dict] = None, ): """ Initializes a new instance of the CosmosDBChatMessageHistory class. Make sure to call prepare_cosmos or use the context manager to make sure your database is ready. Either a credential or a connection string must be provided. :param cosmos_endpoint: The connection endpoint for the Azure Cosmos DB account. :param cosmos_database: The name of the database to use. :param cosmos_container: The name of the container to use. :param session_id: The session ID to use, can be overwritten while loading. :param user_id: The user ID to use, can be overwritten while loading. :param credential: The credential to use to authenticate to Azure Cosmos DB. :param connection_string: The connection string to use to authenticate. :param ttl: The time to live (in seconds) to use for documents in the container. :param cosmos_client_kwargs: Additional kwargs to pass to the CosmosClient. """ self.cosmos_endpoint = cosmos_endpoint self.cosmos_database = cosmos_database self.cosmos_container = cosmos_container self.credential = credential self.conn_string = connection_string self.session_id = session_id self.user_id = user_id self.ttl = ttl self.messages: List[BaseMessage] = [] try: from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501 CosmosClient, ) except ImportError as exc: raise ImportError( "You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501 ) from exc if self.credential: self._client = CosmosClient( url=self.cosmos_endpoint, credential=self.credential, **cosmos_client_kwargs or {}, ) elif self.conn_string: self._client = CosmosClient.from_connection_string( conn_str=self.conn_string, **cosmos_client_kwargs or {}, ) else: raise ValueError("Either a connection string or a credential must be set.") self._container: Optional[ContainerProxy] = None [docs] def prepare_cosmos(self) -> None: """Prepare the CosmosDB client. Use this function or the context manager to make sure your database is ready. """ try: from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501 PartitionKey, ) except ImportError as exc: raise ImportError( "You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501 ) from exc database = self._client.create_database_if_not_exists(self.cosmos_database) self._container = database.create_container_if_not_exists( self.cosmos_container, partition_key=PartitionKey("/user_id"), default_ttl=self.ttl, ) self.load_messages() def __enter__(self) -> "CosmosDBChatMessageHistory": """Context manager entry point.""" self._client.__enter__() self.prepare_cosmos() return self def __exit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: """Context manager exit""" self.upsert_messages() self._client.__exit__(exc_type, exc_val, traceback) [docs] def load_messages(self) -> None: """Retrieve the messages from Cosmos""" if not self._container: raise ValueError("Container not initialized") try: from azure.cosmos.exceptions import ( # pylint: disable=import-outside-toplevel # noqa: E501 CosmosHttpResponseError, ) except ImportError as exc: raise ImportError( "You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501 ) from exc try: item = self._container.read_item( item=self.session_id, partition_key=self.user_id ) except CosmosHttpResponseError: logger.info("no session found") return if "messages" in item and len(item["messages"]) > 0: self.messages = messages_from_dict(item["messages"]) [docs] def add_message(self, message: BaseMessage) -> None: """Add a self-created message to the store""" self.messages.append(message) self.upsert_messages() [docs] def upsert_messages(self) -> None: """Update the cosmosdb item.""" if not self._container: raise ValueError("Container not initialized") self._container.upsert_item( body={ "id": self.session_id, "user_id": self.user_id, "messages": messages_to_dict(self.messages), } ) [docs] def clear(self) -> None: """Clear session memory from this memory and cosmos.""" self.messages = [] if self._container: self._container.delete_item( item=self.session_id, partition_key=self.user_id )
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cosmos_db.html
a2daaf33-bd55-4180-b8d7-4ce98cf352ac
Source code for langchain.memory.chat_message_histories.in_memory from typing import List from pydantic import BaseModel from langchain.schema import ( BaseChatMessageHistory, BaseMessage, ) [docs]class ChatMessageHistory(BaseChatMessageHistory, BaseModel): messages: List[BaseMessage] = [] [docs] def add_message(self, message: BaseMessage) -> None: """Add a self-created message to the store""" self.messages.append(message) [docs] def clear(self) -> None: self.messages = []
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/in_memory.html
eb77c1be-16b1-43a4-997f-d48282ad1ed3
Source code for langchain.memory.chat_message_histories.cassandra import json import logging from typing import List from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, ) logger = logging.getLogger(__name__) DEFAULT_KEYSPACE_NAME = "chat_history" DEFAULT_TABLE_NAME = "message_store" DEFAULT_USERNAME = "cassandra" DEFAULT_PASSWORD = "cassandra" DEFAULT_PORT = 9042 [docs]class CassandraChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in Cassandra. Args: contact_points: list of ips to connect to Cassandra cluster session_id: arbitrary key that is used to store the messages of a single chat session. port: port to connect to Cassandra cluster username: username to connect to Cassandra cluster password: password to connect to Cassandra cluster keyspace_name: name of the keyspace to use table_name: name of the table to use """ def __init__( self, contact_points: List[str], session_id: str, port: int = DEFAULT_PORT, username: str = DEFAULT_USERNAME, password: str = DEFAULT_PASSWORD, keyspace_name: str = DEFAULT_KEYSPACE_NAME, table_name: str = DEFAULT_TABLE_NAME, ): self.contact_points = contact_points self.session_id = session_id self.port = port self.username = username self.password = password self.keyspace_name = keyspace_name self.table_name = table_name try: from cassandra import ( AuthenticationFailed, OperationTimedOut, UnresolvableContactPoints, ) from cassandra.cluster import Cluster, PlainTextAuthProvider except ImportError: raise ValueError( "Could not import cassandra-driver python package. " "Please install it with `pip install cassandra-driver`." ) self.cluster: Cluster = Cluster( contact_points, port=port, auth_provider=PlainTextAuthProvider( username=self.username, password=self.password ), ) try: self.session = self.cluster.connect() except ( AuthenticationFailed, UnresolvableContactPoints, OperationTimedOut, ) as error: logger.error( "Unable to establish connection with \ cassandra chat message history database" ) raise error self._prepare_cassandra() def _prepare_cassandra(self) -> None: """Create the keyspace and table if they don't exist yet""" from cassandra import OperationTimedOut, Unavailable try: self.session.execute( f"""CREATE KEYSPACE IF NOT EXISTS {self.keyspace_name} WITH REPLICATION = {{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }};""" ) except (OperationTimedOut, Unavailable) as error: logger.error( f"Unable to create cassandra \ chat message history keyspace: {self.keyspace_name}." ) raise error self.session.set_keyspace(self.keyspace_name) try: self.session.execute( f"""CREATE TABLE IF NOT EXISTS {self.table_name} (id UUID, session_id varchar, history text, PRIMARY KEY ((session_id), id) );""" ) except (OperationTimedOut, Unavailable) as error: logger.error( f"Unable to create cassandra \ chat message history table: {self.table_name}" ) raise error @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from Cassandra""" from cassandra import ReadFailure, ReadTimeout, Unavailable try: rows = self.session.execute( f"""SELECT * FROM {self.table_name} WHERE session_id = '{self.session_id}' ;""" ) except (Unavailable, ReadTimeout, ReadFailure) as error: logger.error("Unable to Retreive chat history messages from cassadra") raise error if rows: items = [json.loads(row.history) for row in rows] else: items = [] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in Cassandra""" import uuid from cassandra import Unavailable, WriteFailure, WriteTimeout try: self.session.execute( """INSERT INTO message_store (id, session_id, history) VALUES (%s, %s, %s);""", (uuid.uuid4(), self.session_id, json.dumps(_message_to_dict(message))), ) except (Unavailable, WriteTimeout, WriteFailure) as error: logger.error("Unable to write chat history messages to cassandra") raise error [docs] def clear(self) -> None: """Clear session memory from Cassandra""" from cassandra import OperationTimedOut, Unavailable try: self.session.execute( f"DELETE FROM {self.table_name} WHERE session_id = '{self.session_id}';" ) except (Unavailable, OperationTimedOut) as error: logger.error("Unable to clear chat history messages from cassandra") raise error def __del__(self) -> None: if self.session: self.session.shutdown() if self.cluster: self.cluster.shutdown()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html
b7c4954f-dba0-4932-9a60-7101c749febd
Source code for langchain.memory.chat_message_histories.momento from __future__ import annotations import json from datetime import timedelta from typing import TYPE_CHECKING, Any, Optional from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, ) from langchain.utils import get_from_env if TYPE_CHECKING: import momento def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None: """Create cache if it doesn't exist. Raises: SdkException: Momento service or network error Exception: Unexpected response """ from momento.responses import CreateCache create_cache_response = cache_client.create_cache(cache_name) if isinstance(create_cache_response, CreateCache.Success) or isinstance( create_cache_response, CreateCache.CacheAlreadyExists ): return None elif isinstance(create_cache_response, CreateCache.Error): raise create_cache_response.inner_exception else: raise Exception(f"Unexpected response cache creation: {create_cache_response}") [docs]class MomentoChatMessageHistory(BaseChatMessageHistory): """Chat message history cache that uses Momento as a backend. See https://gomomento.com/""" def __init__( self, session_id: str, cache_client: momento.CacheClient, cache_name: str, *, key_prefix: str = "message_store:", ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True, ): """Instantiate a chat message history cache that uses Momento as a backend. Note: to instantiate the cache client passed to MomentoChatMessageHistory, you must have a Momento account at https://gomomento.com/. Args: session_id (str): The session ID to use for this chat session. cache_client (CacheClient): The Momento cache client. cache_name (str): The name of the cache to use to store the messages. key_prefix (str, optional): The prefix to apply to the cache key. Defaults to "message_store:". ttl (Optional[timedelta], optional): The TTL to use for the messages. Defaults to None, ie the default TTL of the cache will be used. ensure_cache_exists (bool, optional): Create the cache if it doesn't exist. Defaults to True. Raises: ImportError: Momento python package is not installed. TypeError: cache_client is not of type momento.CacheClientObject """ try: from momento import CacheClient from momento.requests import CollectionTtl except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if not isinstance(cache_client, CacheClient): raise TypeError("cache_client must be a momento.CacheClient object.") if ensure_cache_exists: _ensure_cache_exists(cache_client, cache_name) self.key = key_prefix + session_id self.cache_client = cache_client self.cache_name = cache_name if ttl is not None: self.ttl = CollectionTtl.of(ttl) else: self.ttl = CollectionTtl.from_cache_ttl() [docs] @classmethod def from_client_params( cls, session_id: str, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, auth_token: Optional[str] = None, **kwargs: Any, ) -> MomentoChatMessageHistory: """Construct cache from CacheClient parameters.""" try: from momento import CacheClient, Configurations, CredentialProvider except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if configuration is None: configuration = Configurations.Laptop.v1() auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN") credentials = CredentialProvider.from_string(auth_token) cache_client = CacheClient(configuration, credentials, default_ttl=ttl) return cls(session_id, cache_client, cache_name, ttl=ttl, **kwargs) @property def messages(self) -> list[BaseMessage]: # type: ignore[override] """Retrieve the messages from Momento. Raises: SdkException: Momento service or network error Exception: Unexpected response Returns: list[BaseMessage]: List of cached messages """ from momento.responses import CacheListFetch fetch_response = self.cache_client.list_fetch(self.cache_name, self.key) if isinstance(fetch_response, CacheListFetch.Hit): items = [json.loads(m) for m in fetch_response.value_list_string] return messages_from_dict(items) elif isinstance(fetch_response, CacheListFetch.Miss): return [] elif isinstance(fetch_response, CacheListFetch.Error): raise fetch_response.inner_exception else: raise Exception(f"Unexpected response: {fetch_response}") [docs] def add_message(self, message: BaseMessage) -> None: """Store a message in the cache. Args: message (BaseMessage): The message object to store. Raises: SdkException: Momento service or network error. Exception: Unexpected response. """ from momento.responses import CacheListPushBack item = json.dumps(_message_to_dict(message)) push_response = self.cache_client.list_push_back( self.cache_name, self.key, item, ttl=self.ttl ) if isinstance(push_response, CacheListPushBack.Success): return None elif isinstance(push_response, CacheListPushBack.Error): raise push_response.inner_exception else: raise Exception(f"Unexpected response: {push_response}") [docs] def clear(self) -> None: """Remove the session's messages from the cache. Raises: SdkException: Momento service or network error. Exception: Unexpected response. """ from momento.responses import CacheDelete delete_response = self.cache_client.delete(self.cache_name, self.key) if isinstance(delete_response, CacheDelete.Success): return None elif isinstance(delete_response, CacheDelete.Error): raise delete_response.inner_exception else: raise Exception(f"Unexpected response: {delete_response}")
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/momento.html
30dfbb16-6add-4830-8da5-441c23babefd
Source code for langchain.memory.chat_message_histories.sql import json import logging from typing import List from sqlalchemy import Column, Integer, Text, create_engine try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, ) logger = logging.getLogger(__name__) def create_message_model(table_name, DynamicBase): # type: ignore """ Create a message model for a given table name. Args: table_name: The name of the table to use. DynamicBase: The base class to use for the model. Returns: The model class. """ # Model decleared inside a function to have a dynamic table name class Message(DynamicBase): __tablename__ = table_name id = Column(Integer, primary_key=True) session_id = Column(Text) message = Column(Text) return Message [docs]class SQLChatMessageHistory(BaseChatMessageHistory): """Chat message history stored in an SQL database.""" def __init__( self, session_id: str, connection_string: str, table_name: str = "message_store", ): self.table_name = table_name self.connection_string = connection_string self.engine = create_engine(connection_string, echo=False) self._create_table_if_not_exists() self.session_id = session_id self.Session = sessionmaker(self.engine) def _create_table_if_not_exists(self) -> None: DynamicBase = declarative_base() self.Message = create_message_model(self.table_name, DynamicBase) # Create all does the check for us in case the table exists. DynamicBase.metadata.create_all(self.engine) @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve all messages from db""" with self.Session() as session: result = session.query(self.Message).where( self.Message.session_id == self.session_id ) items = [json.loads(record.message) for record in result] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in db""" with self.Session() as session: jsonstr = json.dumps(_message_to_dict(message)) session.add(self.Message(session_id=self.session_id, message=jsonstr)) session.commit() [docs] def clear(self) -> None: """Clear session memory from db""" with self.Session() as session: session.query(self.Message).filter( self.Message.session_id == self.session_id ).delete() session.commit()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/sql.html
ee107192-8807-4f72-8fc3-392db19e8367
Source code for langchain.memory.chat_message_histories.file import json import logging from pathlib import Path from typing import List from langchain.schema import ( BaseChatMessageHistory, BaseMessage, messages_from_dict, messages_to_dict, ) logger = logging.getLogger(__name__) [docs]class FileChatMessageHistory(BaseChatMessageHistory): """ Chat message history that stores history in a local file. Args: file_path: path of the local file to store the messages. """ def __init__(self, file_path: str): self.file_path = Path(file_path) if not self.file_path.exists(): self.file_path.touch() self.file_path.write_text(json.dumps([])) @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from the local file""" items = json.loads(self.file_path.read_text()) messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in the local file""" messages = messages_to_dict(self.messages) messages.append(messages_to_dict([message])[0]) self.file_path.write_text(json.dumps(messages)) [docs] def clear(self) -> None: """Clear session memory from the local file""" self.file_path.write_text(json.dumps([]))
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/file.html
619f0a10-d6f5-4f2f-85b0-cdb4f8d71c41
Source code for langchain.memory.chat_message_histories.dynamodb import logging from typing import List, Optional from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, messages_to_dict, ) logger = logging.getLogger(__name__) [docs]class DynamoDBChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in AWS DynamoDB. This class expects that a DynamoDB table with name `table_name` and a partition Key of `SessionId` is present. Args: table_name: name of the DynamoDB table session_id: arbitrary key that is used to store the messages of a single chat session. endpoint_url: URL of the AWS endpoint to connect to. This argument is optional and useful for test purposes, like using Localstack. If you plan to use AWS cloud service, you normally don't have to worry about setting the endpoint_url. """ def __init__( self, table_name: str, session_id: str, endpoint_url: Optional[str] = None ): import boto3 if endpoint_url: client = boto3.resource("dynamodb", endpoint_url=endpoint_url) else: client = boto3.resource("dynamodb") self.table = client.Table(table_name) self.session_id = session_id @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from DynamoDB""" from botocore.exceptions import ClientError response = None try: response = self.table.get_item(Key={"SessionId": self.session_id}) except ClientError as error: if error.response["Error"]["Code"] == "ResourceNotFoundException": logger.warning("No record found with session id: %s", self.session_id) else: logger.error(error) if response and "Item" in response: items = response["Item"]["History"] else: items = [] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in DynamoDB""" from botocore.exceptions import ClientError messages = messages_to_dict(self.messages) _message = _message_to_dict(message) messages.append(_message) try: self.table.put_item( Item={"SessionId": self.session_id, "History": messages} ) except ClientError as err: logger.error(err) [docs] def clear(self) -> None: """Clear session memory from DynamoDB""" from botocore.exceptions import ClientError try: self.table.delete_item(Key={"SessionId": self.session_id}) except ClientError as err: logger.error(err)
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/dynamodb.html
321a2c6d-b2c8-47c6-8343-b7b1f4c70219
Source code for langchain.memory.chat_message_histories.mongodb import json import logging from typing import List from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, ) logger = logging.getLogger(__name__) DEFAULT_DBNAME = "chat_history" DEFAULT_COLLECTION_NAME = "message_store" [docs]class MongoDBChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in MongoDB. Args: connection_string: connection string to connect to MongoDB session_id: arbitrary key that is used to store the messages of a single chat session. database_name: name of the database to use collection_name: name of the collection to use """ def __init__( self, connection_string: str, session_id: str, database_name: str = DEFAULT_DBNAME, collection_name: str = DEFAULT_COLLECTION_NAME, ): from pymongo import MongoClient, errors self.connection_string = connection_string self.session_id = session_id self.database_name = database_name self.collection_name = collection_name try: self.client: MongoClient = MongoClient(connection_string) except errors.ConnectionFailure as error: logger.error(error) self.db = self.client[database_name] self.collection = self.db[collection_name] self.collection.create_index("SessionId") @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from MongoDB""" from pymongo import errors try: cursor = self.collection.find({"SessionId": self.session_id}) except errors.OperationFailure as error: logger.error(error) if cursor: items = [json.loads(document["History"]) for document in cursor] else: items = [] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in MongoDB""" from pymongo import errors try: self.collection.insert_one( { "SessionId": self.session_id, "History": json.dumps(_message_to_dict(message)), } ) except errors.WriteError as err: logger.error(err) [docs] def clear(self) -> None: """Clear session memory from MongoDB""" from pymongo import errors try: self.collection.delete_many({"SessionId": self.session_id}) except errors.WriteError as err: logger.error(err)
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/mongodb.html
5f8eec07-562f-44f0-936b-035ee82f3b10
Source code for langchain.memory.chat_message_histories.redis import json import logging from typing import List, Optional from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, ) logger = logging.getLogger(__name__) [docs]class RedisChatMessageHistory(BaseChatMessageHistory): """Chat message history stored in a Redis database.""" def __init__( self, session_id: str, url: str = "redis://localhost:6379/0", key_prefix: str = "message_store:", ttl: Optional[int] = None, ): try: import redis except ImportError: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) try: self.redis_client = redis.Redis.from_url(url=url) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl @property def key(self) -> str: """Construct the record key to use""" return self.key_prefix + self.session_id @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from Redis""" _items = self.redis_client.lrange(self.key, 0, -1) items = [json.loads(m.decode("utf-8")) for m in _items[::-1]] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in Redis""" self.redis_client.lpush(self.key, json.dumps(_message_to_dict(message))) if self.ttl: self.redis_client.expire(self.key, self.ttl) [docs] def clear(self) -> None: """Clear session memory from Redis""" self.redis_client.delete(self.key)
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/redis.html
dd4eb5f7-72de-4efd-a2af-38c9eba0e01f
Source code for langchain.memory.chat_message_histories.postgres import json import logging from typing import List from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, ) logger = logging.getLogger(__name__) DEFAULT_CONNECTION_STRING = "postgresql://postgres:mypassword@localhost/chat_history" [docs]class PostgresChatMessageHistory(BaseChatMessageHistory): """Chat message history stored in a Postgres database.""" def __init__( self, session_id: str, connection_string: str = DEFAULT_CONNECTION_STRING, table_name: str = "message_store", ): import psycopg from psycopg.rows import dict_row try: self.connection = psycopg.connect(connection_string) self.cursor = self.connection.cursor(row_factory=dict_row) except psycopg.OperationalError as error: logger.error(error) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists() def _create_table_if_not_exists(self) -> None: create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} ( id SERIAL PRIMARY KEY, session_id TEXT NOT NULL, message JSONB NOT NULL );""" self.cursor.execute(create_table_query) self.connection.commit() @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from PostgreSQL""" query = f"SELECT message FROM {self.table_name} WHERE session_id = %s;" self.cursor.execute(query, (self.session_id,)) items = [record["message"] for record in self.cursor.fetchall()] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in PostgreSQL""" from psycopg import sql query = sql.SQL("INSERT INTO {} (session_id, message) VALUES (%s, %s);").format( sql.Identifier(self.table_name) ) self.cursor.execute( query, (self.session_id, json.dumps(_message_to_dict(message))) ) self.connection.commit() [docs] def clear(self) -> None: """Clear session memory from PostgreSQL""" query = f"DELETE FROM {self.table_name} WHERE session_id = %s;" self.cursor.execute(query, (self.session_id,)) self.connection.commit() def __del__(self) -> None: if self.cursor: self.cursor.close() if self.connection: self.connection.close()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/postgres.html
ea873e47-11b5-4a8d-97ac-d7befe3484df
Source code for langchain.agents.loading """Functionality for loading agents.""" import json import logging from pathlib import Path from typing import Any, List, Optional, Union import yaml from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent from langchain.agents.tools import Tool from langchain.agents.types import AGENT_TO_CLASS from langchain.base_language import BaseLanguageModel from langchain.chains.loading import load_chain, load_chain_from_config from langchain.utilities.loading import try_load_from_hub logger = logging.getLogger(__file__) URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/" def _load_agent_from_tools( config: dict, llm: BaseLanguageModel, tools: List[Tool], **kwargs: Any ) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: config_type = config.pop("_type") if config_type not in AGENT_TO_CLASS: raise ValueError(f"Loading {config_type} agent not supported") agent_cls = AGENT_TO_CLASS[config_type] combined_config = {**config, **kwargs} return agent_cls.from_llm_and_tools(llm, tools, **combined_config) def load_agent_from_config( config: dict, llm: Optional[BaseLanguageModel] = None, tools: Optional[List[Tool]] = None, **kwargs: Any, ) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: """Load agent from Config Dict.""" if "_type" not in config: raise ValueError("Must specify an agent Type in config") load_from_tools = config.pop("load_from_llm_and_tools", False) if load_from_tools: if llm is None: raise ValueError( "If `load_from_llm_and_tools` is set to True, " "then LLM must be provided" ) if tools is None: raise ValueError( "If `load_from_llm_and_tools` is set to True, " "then tools must be provided" ) return _load_agent_from_tools(config, llm, tools, **kwargs) config_type = config.pop("_type") if config_type not in AGENT_TO_CLASS: raise ValueError(f"Loading {config_type} agent not supported") agent_cls = AGENT_TO_CLASS[config_type] if "llm_chain" in config: config["llm_chain"] = load_chain_from_config(config.pop("llm_chain")) elif "llm_chain_path" in config: config["llm_chain"] = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.") if "output_parser" in config: logger.warning( "Currently loading output parsers on agent is not supported, " "will just use the default one." ) del config["output_parser"] combined_config = {**config, **kwargs} return agent_cls(**combined_config) # type: ignore [docs]def load_agent( path: Union[str, Path], **kwargs: Any ) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: """Unified method for loading a agent from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_agent_from_file, "agents", {"json", "yaml"} ): return hub_result else: return _load_agent_from_file(path, **kwargs) def _load_agent_from_file( file: Union[str, Path], **kwargs: Any ) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: """Load agent from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Load the agent from the config now. return load_agent_from_config(config, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/loading.html
c7c1a909-faa3-44e9-9fae-ef8a3a289b7b
Source code for langchain.agents.initialize """Load agent.""" from typing import Any, Optional, Sequence from langchain.agents.agent import AgentExecutor from langchain.agents.agent_types import AgentType from langchain.agents.loading import AGENT_TO_CLASS, load_agent from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.tools.base import BaseTool [docs]def initialize_agent( tools: Sequence[BaseTool], llm: BaseLanguageModel, agent: Optional[AgentType] = None, callback_manager: Optional[BaseCallbackManager] = None, agent_path: Optional[str] = None, agent_kwargs: Optional[dict] = None, *, tags: Optional[Sequence[str]] = None, **kwargs: Any, ) -> AgentExecutor: """Load an agent executor given tools and LLM. Args: tools: List of tools this agent has access to. llm: Language model to use as the agent. agent: Agent type to use. If None and agent_path is also None, will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. callback_manager: CallbackManager to use. Global callback manager is used if not provided. Defaults to None. agent_path: Path to serialized agent to use. agent_kwargs: Additional key word arguments to pass to the underlying agent tags: Tags to apply to the traced runs. **kwargs: Additional key word arguments passed to the agent executor Returns: An agent executor """ tags_ = list(tags) if tags else [] if agent is None and agent_path is None: agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION if agent is not None and agent_path is not None: raise ValueError( "Both `agent` and `agent_path` are specified, " "but at most only one should be." ) if agent is not None: if agent not in AGENT_TO_CLASS: raise ValueError( f"Got unknown agent type: {agent}. " f"Valid types are: {AGENT_TO_CLASS.keys()}." ) tags_.append(agent.value if isinstance(agent, AgentType) else agent) agent_cls = AGENT_TO_CLASS[agent] agent_kwargs = agent_kwargs or {} agent_obj = agent_cls.from_llm_and_tools( llm, tools, callback_manager=callback_manager, **agent_kwargs ) elif agent_path is not None: agent_obj = load_agent( agent_path, llm=llm, tools=tools, callback_manager=callback_manager ) try: # TODO: Add tags from the serialized object directly. tags_.append(agent_obj._agent_type) except NotImplementedError: pass else: raise ValueError( "Somehow both `agent` and `agent_path` are None, " "this should never happen." ) return AgentExecutor.from_agent_and_tools( agent=agent_obj, tools=tools, callback_manager=callback_manager, tags=tags_, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/initialize.html
350c9e3b-e748-4aa6-a5c3-410883fffcc1
Source code for langchain.agents.load_tools # flake8: noqa """Load tools.""" import warnings from typing import Any, Dict, List, Optional, Callable, Tuple from mypy_extensions import Arg, KwArg from langchain.agents.tools import Tool from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs from langchain.chains.api.base import APIChain from langchain.chains.llm_math.base import LLMMathChain from langchain.chains.pal.base import PALChain from langchain.requests import TextRequestsWrapper from langchain.tools.arxiv.tool import ArxivQueryRun from langchain.tools.pubmed.tool import PubmedQueryRun from langchain.tools.base import BaseTool from langchain.tools.bing_search.tool import BingSearchRun from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun from langchain.tools.metaphor_search.tool import MetaphorSearchResults from langchain.tools.google_serper.tool import GoogleSerperResults, GoogleSerperRun from langchain.tools.graphql.tool import BaseGraphQLTool from langchain.tools.human.tool import HumanInputRun from langchain.tools.python.tool import PythonREPLTool from langchain.tools.requests.tool import ( RequestsDeleteTool, RequestsGetTool, RequestsPatchTool, RequestsPostTool, RequestsPutTool, ) from langchain.tools.scenexplain.tool import SceneXplainTool from langchain.tools.searx_search.tool import SearxSearchResults, SearxSearchRun from langchain.tools.shell.tool import ShellTool from langchain.tools.sleep.tool import SleepTool from langchain.tools.wikipedia.tool import WikipediaQueryRun from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun from langchain.tools.openweathermap.tool import OpenWeatherMapQueryRun from langchain.utilities import ArxivAPIWrapper from langchain.utilities import PubMedAPIWrapper from langchain.utilities.bing_search import BingSearchAPIWrapper from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper from langchain.utilities.google_search import GoogleSearchAPIWrapper from langchain.utilities.google_serper import GoogleSerperAPIWrapper from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper from langchain.utilities.awslambda import LambdaWrapper from langchain.utilities.graphql import GraphQLAPIWrapper from langchain.utilities.searx_search import SearxSearchWrapper from langchain.utilities.serpapi import SerpAPIWrapper from langchain.utilities.twilio import TwilioAPIWrapper from langchain.utilities.wikipedia import WikipediaAPIWrapper from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper def _get_python_repl() -> BaseTool: return PythonREPLTool() def _get_tools_requests_get() -> BaseTool: return RequestsGetTool(requests_wrapper=TextRequestsWrapper()) def _get_tools_requests_post() -> BaseTool: return RequestsPostTool(requests_wrapper=TextRequestsWrapper()) def _get_tools_requests_patch() -> BaseTool: return RequestsPatchTool(requests_wrapper=TextRequestsWrapper()) def _get_tools_requests_put() -> BaseTool: return RequestsPutTool(requests_wrapper=TextRequestsWrapper()) def _get_tools_requests_delete() -> BaseTool: return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper()) def _get_terminal() -> BaseTool: return ShellTool() def _get_sleep() -> BaseTool: return SleepTool() _BASE_TOOLS: Dict[str, Callable[[], BaseTool]] = { "python_repl": _get_python_repl, "requests": _get_tools_requests_get, # preserved for backwards compatability "requests_get": _get_tools_requests_get, "requests_post": _get_tools_requests_post, "requests_patch": _get_tools_requests_patch, "requests_put": _get_tools_requests_put, "requests_delete": _get_tools_requests_delete, "terminal": _get_terminal, "sleep": _get_sleep, } def _get_pal_math(llm: BaseLanguageModel) -> BaseTool: return Tool( name="PAL-MATH", description="A language model that is really good at solving complex word math problems. Input should be a fully worded hard word math problem.", func=PALChain.from_math_prompt(llm).run, ) def _get_pal_colored_objects(llm: BaseLanguageModel) -> BaseTool: return Tool( name="PAL-COLOR-OBJ", description="A language model that is really good at reasoning about position and the color attributes of objects. Input should be a fully worded hard reasoning problem. Make sure to include all information about the objects AND the final question you want to answer.", func=PALChain.from_colored_object_prompt(llm).run, ) def _get_llm_math(llm: BaseLanguageModel) -> BaseTool: return Tool( name="Calculator", description="Useful for when you need to answer questions about math.", func=LLMMathChain.from_llm(llm=llm).run, coroutine=LLMMathChain.from_llm(llm=llm).arun, ) def _get_open_meteo_api(llm: BaseLanguageModel) -> BaseTool: chain = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS) return Tool( name="Open Meteo API", description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.", func=chain.run, ) _LLM_TOOLS: Dict[str, Callable[[BaseLanguageModel], BaseTool]] = { "pal-math": _get_pal_math, "pal-colored-objects": _get_pal_colored_objects, "llm-math": _get_llm_math, "open-meteo-api": _get_open_meteo_api, } def _get_news_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool: news_api_key = kwargs["news_api_key"] chain = APIChain.from_llm_and_api_docs( llm, news_docs.NEWS_DOCS, headers={"X-Api-Key": news_api_key} ) return Tool( name="News API", description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.", func=chain.run, ) def _get_tmdb_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool: tmdb_bearer_token = kwargs["tmdb_bearer_token"] chain = APIChain.from_llm_and_api_docs( llm, tmdb_docs.TMDB_DOCS, headers={"Authorization": f"Bearer {tmdb_bearer_token}"}, ) return Tool( name="TMDB API", description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.", func=chain.run, ) def _get_podcast_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool: listen_api_key = kwargs["listen_api_key"] chain = APIChain.from_llm_and_api_docs( llm, podcast_docs.PODCAST_DOCS, headers={"X-ListenAPI-Key": listen_api_key}, ) return Tool( name="Podcast API", description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.", func=chain.run, ) def _get_lambda_api(**kwargs: Any) -> BaseTool: return Tool( name=kwargs["awslambda_tool_name"], description=kwargs["awslambda_tool_description"], func=LambdaWrapper(**kwargs).run, ) def _get_wolfram_alpha(**kwargs: Any) -> BaseTool: return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs)) def _get_google_search(**kwargs: Any) -> BaseTool: return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs)) def _get_wikipedia(**kwargs: Any) -> BaseTool: return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs)) def _get_arxiv(**kwargs: Any) -> BaseTool: return ArxivQueryRun(api_wrapper=ArxivAPIWrapper(**kwargs)) def _get_pupmed(**kwargs: Any) -> BaseTool: return PubmedQueryRun(api_wrapper=PubMedAPIWrapper(**kwargs)) def _get_google_serper(**kwargs: Any) -> BaseTool: return GoogleSerperRun(api_wrapper=GoogleSerperAPIWrapper(**kwargs)) def _get_google_serper_results_json(**kwargs: Any) -> BaseTool: return GoogleSerperResults(api_wrapper=GoogleSerperAPIWrapper(**kwargs)) def _get_google_search_results_json(**kwargs: Any) -> BaseTool: return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs)) def _get_serpapi(**kwargs: Any) -> BaseTool: return Tool( name="Search", description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query.", func=SerpAPIWrapper(**kwargs).run, coroutine=SerpAPIWrapper(**kwargs).arun, ) def _get_twilio(**kwargs: Any) -> BaseTool: return Tool( name="Text Message", description="Useful for when you need to send a text message to a provided phone number.", func=TwilioAPIWrapper(**kwargs).run, ) def _get_searx_search(**kwargs: Any) -> BaseTool: return SearxSearchRun(wrapper=SearxSearchWrapper(**kwargs)) def _get_searx_search_results_json(**kwargs: Any) -> BaseTool: wrapper_kwargs = {k: v for k, v in kwargs.items() if k != "num_results"} return SearxSearchResults(wrapper=SearxSearchWrapper(**wrapper_kwargs), **kwargs) def _get_bing_search(**kwargs: Any) -> BaseTool: return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs)) def _get_metaphor_search(**kwargs: Any) -> BaseTool: return MetaphorSearchResults(api_wrapper=MetaphorSearchAPIWrapper(**kwargs)) def _get_ddg_search(**kwargs: Any) -> BaseTool: return DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(**kwargs)) def _get_human_tool(**kwargs: Any) -> BaseTool: return HumanInputRun(**kwargs) def _get_scenexplain(**kwargs: Any) -> BaseTool: return SceneXplainTool(**kwargs) def _get_graphql_tool(**kwargs: Any) -> BaseTool: graphql_endpoint = kwargs["graphql_endpoint"] wrapper = GraphQLAPIWrapper(graphql_endpoint=graphql_endpoint) return BaseGraphQLTool(graphql_wrapper=wrapper) def _get_openweathermap(**kwargs: Any) -> BaseTool: return OpenWeatherMapQueryRun(api_wrapper=OpenWeatherMapAPIWrapper(**kwargs)) _EXTRA_LLM_TOOLS: Dict[ str, Tuple[Callable[[Arg(BaseLanguageModel, "llm"), KwArg(Any)], BaseTool], List[str]], ] = { "news-api": (_get_news_api, ["news_api_key"]), "tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]), "podcast-api": (_get_podcast_api, ["listen_api_key"]), } _EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = { "wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]), "google-search": (_get_google_search, ["google_api_key", "google_cse_id"]), "google-search-results-json": ( _get_google_search_results_json, ["google_api_key", "google_cse_id", "num_results"], ), "searx-search-results-json": ( _get_searx_search_results_json, ["searx_host", "engines", "num_results", "aiosession"], ), "bing-search": (_get_bing_search, ["bing_subscription_key", "bing_search_url"]), "metaphor-search": (_get_metaphor_search, ["metaphor_api_key"]), "ddg-search": (_get_ddg_search, []), "google-serper": (_get_google_serper, ["serper_api_key", "aiosession"]), "google-serper-results-json": ( _get_google_serper_results_json, ["serper_api_key", "aiosession"], ), "serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]), "twilio": (_get_twilio, ["account_sid", "auth_token", "from_number"]), "searx-search": (_get_searx_search, ["searx_host", "engines", "aiosession"]), "wikipedia": (_get_wikipedia, ["top_k_results", "lang"]), "arxiv": ( _get_arxiv, ["top_k_results", "load_max_docs", "load_all_available_meta"], ), "pupmed": ( _get_pupmed, ["top_k_results", "load_max_docs", "load_all_available_meta"], ), "human": (_get_human_tool, ["prompt_func", "input_func"]), "awslambda": ( _get_lambda_api, ["awslambda_tool_name", "awslambda_tool_description", "function_name"], ), "sceneXplain": (_get_scenexplain, []), "graphql": (_get_graphql_tool, ["graphql_endpoint"]), "openweathermap-api": (_get_openweathermap, ["openweathermap_api_key"]), } def _handle_callbacks( callback_manager: Optional[BaseCallbackManager], callbacks: Callbacks ) -> Callbacks: if callback_manager is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) if callbacks is not None: raise ValueError( "Cannot specify both callback_manager and callbacks arguments." ) return callback_manager return callbacks [docs]def load_huggingface_tool( task_or_repo_id: str, model_repo_id: Optional[str] = None, token: Optional[str] = None, remote: bool = False, **kwargs: Any, ) -> BaseTool: """Loads a tool from the HuggingFace Hub. Args: task_or_repo_id: Task or model repo id. model_repo_id: Optional model repo id. token: Optional token. remote: Optional remote. Defaults to False. **kwargs: Returns: A tool. """ try: from transformers import load_tool except ImportError: raise ImportError( "HuggingFace tools require the libraries `transformers>=4.29.0`" " and `huggingface_hub>=0.14.1` to be installed." " Please install it with" " `pip install --upgrade transformers huggingface_hub`." ) hf_tool = load_tool( task_or_repo_id, model_repo_id=model_repo_id, token=token, remote=remote, **kwargs, ) outputs = hf_tool.outputs if set(outputs) != {"text"}: raise NotImplementedError("Multimodal outputs not supported yet.") inputs = hf_tool.inputs if set(inputs) != {"text"}: raise NotImplementedError("Multimodal inputs not supported yet.") return Tool.from_function( hf_tool.__call__, name=hf_tool.name, description=hf_tool.description ) [docs]def load_tools( tool_names: List[str], llm: Optional[BaseLanguageModel] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> List[BaseTool]: """Load tools based on their name. Args: tool_names: name of tools to load. llm: Optional language model, may be needed to initialize certain tools. callbacks: Optional callback manager or list of callback handlers. If not provided, default global callback manager will be used. Returns: List of tools. """ tools = [] callbacks = _handle_callbacks( callback_manager=kwargs.get("callback_manager"), callbacks=callbacks ) for name in tool_names: if name == "requests": warnings.warn( "tool name `requests` is deprecated - " "please use `requests_all` or specify the requests method" ) if name == "requests_all": # expand requests into various methods requests_method_tools = [ _tool for _tool in _BASE_TOOLS if _tool.startswith("requests_") ] tool_names.extend(requests_method_tools) elif name in _BASE_TOOLS: tools.append(_BASE_TOOLS[name]()) elif name in _LLM_TOOLS: if llm is None: raise ValueError(f"Tool {name} requires an LLM to be provided") tool = _LLM_TOOLS[name](llm) tools.append(tool) elif name in _EXTRA_LLM_TOOLS: if llm is None: raise ValueError(f"Tool {name} requires an LLM to be provided") _get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name] missing_keys = set(extra_keys).difference(kwargs) if missing_keys: raise ValueError( f"Tool {name} requires some parameters that were not " f"provided: {missing_keys}" ) sub_kwargs = {k: kwargs[k] for k in extra_keys} tool = _get_llm_tool_func(llm=llm, **sub_kwargs) tools.append(tool) elif name in _EXTRA_OPTIONAL_TOOLS: _get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name] sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs} tool = _get_tool_func(**sub_kwargs) tools.append(tool) else: raise ValueError(f"Got unknown tool {name}") if callbacks is not None: for tool in tools: tool.callbacks = callbacks return tools [docs]def get_all_tool_names() -> List[str]: """Get a list of all possible tool names.""" return ( list(_BASE_TOOLS) + list(_EXTRA_OPTIONAL_TOOLS) + list(_EXTRA_LLM_TOOLS) + list(_LLM_TOOLS) )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/load_tools.html
bb45c035-b32d-43d3-a535-3fae66c7d868
Source code for langchain.agents.agent_types from enum import Enum [docs]class AgentType(str, Enum): """Enumerator with the Agent types.""" ZERO_SHOT_REACT_DESCRIPTION = "zero-shot-react-description" REACT_DOCSTORE = "react-docstore" SELF_ASK_WITH_SEARCH = "self-ask-with-search" CONVERSATIONAL_REACT_DESCRIPTION = "conversational-react-description" CHAT_ZERO_SHOT_REACT_DESCRIPTION = "chat-zero-shot-react-description" CHAT_CONVERSATIONAL_REACT_DESCRIPTION = "chat-conversational-react-description" STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION = ( "structured-chat-zero-shot-react-description" ) OPENAI_FUNCTIONS = "openai-functions" OPENAI_MULTI_FUNCTIONS = "openai-multi-functions"
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_types.html
21d3fc3d-d6d8-466f-8ecd-d200eb94c852
Source code for langchain.agents.agent """Chain that takes in an input and produces an action and action input.""" from __future__ import annotations import asyncio import json import logging import time from abc import abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import yaml from pydantic import BaseModel, root_validator from langchain.agents.agent_types import AgentType from langchain.agents.tools import InvalidTool from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, AsyncCallbackManagerForToolRun, CallbackManagerForChainRun, CallbackManagerForToolRun, Callbacks, ) from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.input import get_color_mapping from langchain.prompts.base import BasePromptTemplate from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( AgentAction, AgentFinish, BaseMessage, BaseOutputParser, OutputParserException, ) from langchain.tools.base import BaseTool from langchain.utilities.asyncio import asyncio_timeout logger = logging.getLogger(__name__) [docs]class BaseSingleActionAgent(BaseModel): """Base Agent class.""" @property def return_values(self) -> List[str]: """Return values of the agent.""" return ["output"] [docs] def get_allowed_tools(self) -> Optional[List[str]]: return None [docs] @abstractmethod def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ [docs] @abstractmethod async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ @property @abstractmethod def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ [docs] def return_stopped_response( self, early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any, ) -> AgentFinish: """Return response when agent has been stopped due to max iterations.""" if early_stopping_method == "force": # `force` just returns a constant string return AgentFinish( {"output": "Agent stopped due to iteration limit or time limit."}, "" ) else: raise ValueError( f"Got unsupported early_stopping_method `{early_stopping_method}`" ) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, **kwargs: Any, ) -> BaseSingleActionAgent: raise NotImplementedError @property def _agent_type(self) -> str: """Return Identifier of agent type.""" raise NotImplementedError [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of agent.""" _dict = super().dict() _type = self._agent_type if isinstance(_type, AgentType): _dict["_type"] = str(_type.value) else: _dict["_type"] = _type return _dict [docs] def save(self, file_path: Union[Path, str]) -> None: """Save the agent. Args: file_path: Path to file to save the agent to. Example: .. code-block:: python # If working with agent executor agent.agent.save(file_path="path/agent.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save agent_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(agent_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(agent_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") [docs] def tool_run_logging_kwargs(self) -> Dict: return {} [docs]class BaseMultiActionAgent(BaseModel): """Base Agent class.""" @property def return_values(self) -> List[str]: """Return values of the agent.""" return ["output"] [docs] def get_allowed_tools(self) -> Optional[List[str]]: return None [docs] @abstractmethod def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[List[AgentAction], AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Actions specifying what tool to use. """ [docs] @abstractmethod async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[List[AgentAction], AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Actions specifying what tool to use. """ @property @abstractmethod def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ [docs] def return_stopped_response( self, early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any, ) -> AgentFinish: """Return response when agent has been stopped due to max iterations.""" if early_stopping_method == "force": # `force` just returns a constant string return AgentFinish({"output": "Agent stopped due to max iterations."}, "") else: raise ValueError( f"Got unsupported early_stopping_method `{early_stopping_method}`" ) @property def _agent_type(self) -> str: """Return Identifier of agent type.""" raise NotImplementedError [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of agent.""" _dict = super().dict() _dict["_type"] = str(self._agent_type) return _dict [docs] def save(self, file_path: Union[Path, str]) -> None: """Save the agent. Args: file_path: Path to file to save the agent to. Example: .. code-block:: python # If working with agent executor agent.agent.save(file_path="path/agent.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save agent_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(agent_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(agent_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") [docs] def tool_run_logging_kwargs(self) -> Dict: return {} [docs]class AgentOutputParser(BaseOutputParser): [docs] @abstractmethod def parse(self, text: str) -> Union[AgentAction, AgentFinish]: """Parse text into agent action/finish.""" [docs]class LLMSingleActionAgent(BaseSingleActionAgent): llm_chain: LLMChain output_parser: AgentOutputParser stop: List[str] @property def input_keys(self) -> List[str]: return list(set(self.llm_chain.input_keys) - {"intermediate_steps"}) [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of agent.""" _dict = super().dict() del _dict["output_parser"] return _dict [docs] def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ output = self.llm_chain.run( intermediate_steps=intermediate_steps, stop=self.stop, callbacks=callbacks, **kwargs, ) return self.output_parser.parse(output) [docs] async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ output = await self.llm_chain.arun( intermediate_steps=intermediate_steps, stop=self.stop, callbacks=callbacks, **kwargs, ) return self.output_parser.parse(output) [docs] def tool_run_logging_kwargs(self) -> Dict: return { "llm_prefix": "", "observation_prefix": "" if len(self.stop) == 0 else self.stop[0], } [docs]class Agent(BaseSingleActionAgent): """Class responsible for calling the language model and deciding the action. This is driven by an LLMChain. The prompt in the LLMChain MUST include a variable called "agent_scratchpad" where the agent can put its intermediary work. """ llm_chain: LLMChain output_parser: AgentOutputParser allowed_tools: Optional[List[str]] = None [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of agent.""" _dict = super().dict() del _dict["output_parser"] return _dict [docs] def get_allowed_tools(self) -> Optional[List[str]]: return self.allowed_tools @property def return_values(self) -> List[str]: return ["output"] def _fix_text(self, text: str) -> str: """Fix the text.""" raise ValueError("fix_text not implemented for this agent.") @property def _stop(self) -> List[str]: return [ f"\n{self.observation_prefix.rstrip()}", f"\n\t{self.observation_prefix.rstrip()}", ] def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> Union[str, List[BaseMessage]]: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}" return thoughts [docs] def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs) return self.output_parser.parse(full_output) [docs] async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) full_output = await self.llm_chain.apredict(callbacks=callbacks, **full_inputs) return self.output_parser.parse(full_output) [docs] def get_full_inputs( self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any ) -> Dict[str, Any]: """Create the full inputs for the LLMChain from intermediate steps.""" thoughts = self._construct_scratchpad(intermediate_steps) new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop} full_inputs = {**kwargs, **new_inputs} return full_inputs @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return list(set(self.llm_chain.input_keys) - {"agent_scratchpad"}) @root_validator() def validate_prompt(cls, values: Dict) -> Dict: """Validate that prompt matches format.""" prompt = values["llm_chain"].prompt if "agent_scratchpad" not in prompt.input_variables: logger.warning( "`agent_scratchpad` should be a variable in prompt.input_variables." " Did not find it, so adding it at the end." ) prompt.input_variables.append("agent_scratchpad") if isinstance(prompt, PromptTemplate): prompt.template += "\n{agent_scratchpad}" elif isinstance(prompt, FewShotPromptTemplate): prompt.suffix += "\n{agent_scratchpad}" else: raise ValueError(f"Got unexpected prompt type {type(prompt)}") return values @property @abstractmethod def observation_prefix(self) -> str: """Prefix to append the observation with.""" @property @abstractmethod def llm_prefix(self) -> str: """Prefix to append the LLM call with.""" [docs] @classmethod @abstractmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Create a prompt for this class.""" @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: """Validate that appropriate tools are passed in.""" pass @classmethod @abstractmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: """Get default output parser for this class.""" [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) llm_chain = LLMChain( llm=llm, prompt=cls.create_prompt(tools), callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) [docs] def return_stopped_response( self, early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any, ) -> AgentFinish: """Return response when agent has been stopped due to max iterations.""" if early_stopping_method == "force": # `force` just returns a constant string return AgentFinish( {"output": "Agent stopped due to iteration limit or time limit."}, "" ) elif early_stopping_method == "generate": # Generate does one final forward pass thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += ( f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}" ) # Adding to the previous steps, we now tell the LLM to make a final pred thoughts += ( "\n\nI now need to return a final answer based on the previous steps:" ) new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop} full_inputs = {**kwargs, **new_inputs} full_output = self.llm_chain.predict(**full_inputs) # We try to extract a final answer parsed_output = self.output_parser.parse(full_output) if isinstance(parsed_output, AgentFinish): # If we can extract, we send the correct stuff return parsed_output else: # If we can extract, but the tool is not the final tool, # we just return the full output return AgentFinish({"output": full_output}, full_output) else: raise ValueError( "early_stopping_method should be one of `force` or `generate`, " f"got {early_stopping_method}" ) [docs] def tool_run_logging_kwargs(self) -> Dict: return { "llm_prefix": self.llm_prefix, "observation_prefix": self.observation_prefix, } class ExceptionTool(BaseTool): name = "_Exception" description = "Exception tool" def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: return query async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: return query [docs]class AgentExecutor(Chain): """Consists of an agent using tools.""" agent: Union[BaseSingleActionAgent, BaseMultiActionAgent] """The agent to run for creating a plan and determining actions to take at each step of the execution loop.""" tools: Sequence[BaseTool] """The valid tools the agent can call.""" return_intermediate_steps: bool = False """Whether to return the agent's trajectory of intermediate steps at the end in addition to the final output.""" max_iterations: Optional[int] = 15 """The maximum number of steps to take before ending the execution loop. Setting to 'None' could lead to an infinite loop.""" max_execution_time: Optional[float] = None """The maximum amount of wall clock time to spend in the execution loop. """ early_stopping_method: str = "force" """The method to use for early stopping if the agent never returns `AgentFinish`. Either 'force' or 'generate'. `"force"` returns a string saying that it stopped because it met a time or iteration limit. `"generate"` calls the agent's LLM Chain one final time to generate a final answer based on the previous steps. """ handle_parsing_errors: Union[ bool, str, Callable[[OutputParserException], str] ] = False """How to handle errors raised by the agent's output parser. Defaults to `False`, which raises the error. s If `true`, the error will be sent back to the LLM as an observation. If a string, the string itself will be sent to the LLM as an observation. If a callable function, the function will be called with the exception as an argument, and the result of that function will be passed to the agent as an observation. """ [docs] @classmethod def from_agent_and_tools( cls, agent: Union[BaseSingleActionAgent, BaseMultiActionAgent], tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, **kwargs: Any, ) -> AgentExecutor: """Create from agent and tools.""" return cls( agent=agent, tools=tools, callback_manager=callback_manager, **kwargs ) @root_validator() def validate_tools(cls, values: Dict) -> Dict: """Validate that tools are compatible with agent.""" agent = values["agent"] tools = values["tools"] allowed_tools = agent.get_allowed_tools() if allowed_tools is not None: if set(allowed_tools) != set([tool.name for tool in tools]): raise ValueError( f"Allowed tools ({allowed_tools}) different than " f"provided tools ({[tool.name for tool in tools]})" ) return values @root_validator() def validate_return_direct_tool(cls, values: Dict) -> Dict: """Validate that tools are compatible with agent.""" agent = values["agent"] tools = values["tools"] if isinstance(agent, BaseMultiActionAgent): for tool in tools: if tool.return_direct: raise ValueError( "Tools that have `return_direct=True` are not allowed " "in multi-action agents" ) return values [docs] def save(self, file_path: Union[Path, str]) -> None: """Raise error - saving not supported for Agent Executors.""" raise ValueError( "Saving not supported for agent executors. " "If you are trying to save the agent, please use the " "`.save_agent(...)`" ) [docs] def save_agent(self, file_path: Union[Path, str]) -> None: """Save the underlying agent.""" return self.agent.save(file_path) @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return self.agent.input_keys @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if self.return_intermediate_steps: return self.agent.return_values + ["intermediate_steps"] else: return self.agent.return_values [docs] def lookup_tool(self, name: str) -> BaseTool: """Lookup tool by name.""" return {tool.name: tool for tool in self.tools}[name] def _should_continue(self, iterations: int, time_elapsed: float) -> bool: if self.max_iterations is not None and iterations >= self.max_iterations: return False if ( self.max_execution_time is not None and time_elapsed >= self.max_execution_time ): return False return True def _return( self, output: AgentFinish, intermediate_steps: list, run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: if run_manager: run_manager.on_agent_finish(output, color="green", verbose=self.verbose) final_output = output.return_values if self.return_intermediate_steps: final_output["intermediate_steps"] = intermediate_steps return final_output async def _areturn( self, output: AgentFinish, intermediate_steps: list, run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: if run_manager: await run_manager.on_agent_finish( output, color="green", verbose=self.verbose ) final_output = output.return_values if self.return_intermediate_steps: final_output["intermediate_steps"] = intermediate_steps return final_output def _take_next_step( self, name_to_tool_map: Dict[str, BaseTool], color_mapping: Dict[str, str], inputs: Dict[str, str], intermediate_steps: List[Tuple[AgentAction, str]], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]: """Take a single step in the thought-action-observation loop. Override this to take control of how the agent makes and acts on choices. """ try: # Call the LLM to see what to do. output = self.agent.plan( intermediate_steps, callbacks=run_manager.get_child() if run_manager else None, **inputs, ) except OutputParserException as e: if isinstance(self.handle_parsing_errors, bool): raise_error = not self.handle_parsing_errors else: raise_error = False if raise_error: raise e text = str(e) if isinstance(self.handle_parsing_errors, bool): if e.send_to_llm: observation = str(e.observation) text = str(e.llm_output) else: observation = "Invalid or incomplete response" elif isinstance(self.handle_parsing_errors, str): observation = self.handle_parsing_errors elif callable(self.handle_parsing_errors): observation = self.handle_parsing_errors(e) else: raise ValueError("Got unexpected type of `handle_parsing_errors`") output = AgentAction("_Exception", observation, text) if run_manager: run_manager.on_agent_action(output, color="green") tool_run_kwargs = self.agent.tool_run_logging_kwargs() observation = ExceptionTool().run( output.tool_input, verbose=self.verbose, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) return [(output, observation)] # If the tool chosen is the finishing tool, then we end and return. if isinstance(output, AgentFinish): return output actions: List[AgentAction] if isinstance(output, AgentAction): actions = [output] else: actions = output result = [] for agent_action in actions: if run_manager: run_manager.on_agent_action(agent_action, color="green") # Otherwise we lookup the tool if agent_action.tool in name_to_tool_map: tool = name_to_tool_map[agent_action.tool] return_direct = tool.return_direct color = color_mapping[agent_action.tool] tool_run_kwargs = self.agent.tool_run_logging_kwargs() if return_direct: tool_run_kwargs["llm_prefix"] = "" # We then call the tool on the tool input to get an observation observation = tool.run( agent_action.tool_input, verbose=self.verbose, color=color, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) else: tool_run_kwargs = self.agent.tool_run_logging_kwargs() observation = InvalidTool().run( agent_action.tool, verbose=self.verbose, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) result.append((agent_action, observation)) return result async def _atake_next_step( self, name_to_tool_map: Dict[str, BaseTool], color_mapping: Dict[str, str], inputs: Dict[str, str], intermediate_steps: List[Tuple[AgentAction, str]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]: """Take a single step in the thought-action-observation loop. Override this to take control of how the agent makes and acts on choices. """ try: # Call the LLM to see what to do. output = await self.agent.aplan( intermediate_steps, callbacks=run_manager.get_child() if run_manager else None, **inputs, ) except OutputParserException as e: if isinstance(self.handle_parsing_errors, bool): raise_error = not self.handle_parsing_errors else: raise_error = False if raise_error: raise e text = str(e) if isinstance(self.handle_parsing_errors, bool): if e.send_to_llm: observation = str(e.observation) text = str(e.llm_output) else: observation = "Invalid or incomplete response" elif isinstance(self.handle_parsing_errors, str): observation = self.handle_parsing_errors elif callable(self.handle_parsing_errors): observation = self.handle_parsing_errors(e) else: raise ValueError("Got unexpected type of `handle_parsing_errors`") output = AgentAction("_Exception", observation, text) tool_run_kwargs = self.agent.tool_run_logging_kwargs() observation = await ExceptionTool().arun( output.tool_input, verbose=self.verbose, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) return [(output, observation)] # If the tool chosen is the finishing tool, then we end and return. if isinstance(output, AgentFinish): return output actions: List[AgentAction] if isinstance(output, AgentAction): actions = [output] else: actions = output async def _aperform_agent_action( agent_action: AgentAction, ) -> Tuple[AgentAction, str]: if run_manager: await run_manager.on_agent_action( agent_action, verbose=self.verbose, color="green" ) # Otherwise we lookup the tool if agent_action.tool in name_to_tool_map: tool = name_to_tool_map[agent_action.tool] return_direct = tool.return_direct color = color_mapping[agent_action.tool] tool_run_kwargs = self.agent.tool_run_logging_kwargs() if return_direct: tool_run_kwargs["llm_prefix"] = "" # We then call the tool on the tool input to get an observation observation = await tool.arun( agent_action.tool_input, verbose=self.verbose, color=color, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) else: tool_run_kwargs = self.agent.tool_run_logging_kwargs() observation = await InvalidTool().arun( agent_action.tool, verbose=self.verbose, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) return agent_action, observation # Use asyncio.gather to run multiple tool.arun() calls concurrently result = await asyncio.gather( *[_aperform_agent_action(agent_action) for agent_action in actions] ) return list(result) def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run text through and get agent response.""" # Construct a mapping of tool name to tool for easy lookup name_to_tool_map = {tool.name: tool for tool in self.tools} # We construct a mapping from each tool to a color, used for logging. color_mapping = get_color_mapping( [tool.name for tool in self.tools], excluded_colors=["green", "red"] ) intermediate_steps: List[Tuple[AgentAction, str]] = [] # Let's start tracking the number of iterations and time elapsed iterations = 0 time_elapsed = 0.0 start_time = time.time() # We now enter the agent loop (until it returns something). while self._should_continue(iterations, time_elapsed): next_step_output = self._take_next_step( name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager=run_manager, ) if isinstance(next_step_output, AgentFinish): return self._return( next_step_output, intermediate_steps, run_manager=run_manager ) intermediate_steps.extend(next_step_output) if len(next_step_output) == 1: next_step_action = next_step_output[0] # See if tool should return directly tool_return = self._get_tool_return(next_step_action) if tool_return is not None: return self._return( tool_return, intermediate_steps, run_manager=run_manager ) iterations += 1 time_elapsed = time.time() - start_time output = self.agent.return_stopped_response( self.early_stopping_method, intermediate_steps, **inputs ) return self._return(output, intermediate_steps, run_manager=run_manager) async def _acall( self, inputs: Dict[str, str], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Run text through and get agent response.""" # Construct a mapping of tool name to tool for easy lookup name_to_tool_map = {tool.name: tool for tool in self.tools} # We construct a mapping from each tool to a color, used for logging. color_mapping = get_color_mapping( [tool.name for tool in self.tools], excluded_colors=["green"] ) intermediate_steps: List[Tuple[AgentAction, str]] = [] # Let's start tracking the number of iterations and time elapsed iterations = 0 time_elapsed = 0.0 start_time = time.time() # We now enter the agent loop (until it returns something). async with asyncio_timeout(self.max_execution_time): try: while self._should_continue(iterations, time_elapsed): next_step_output = await self._atake_next_step( name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager=run_manager, ) if isinstance(next_step_output, AgentFinish): return await self._areturn( next_step_output, intermediate_steps, run_manager=run_manager, ) intermediate_steps.extend(next_step_output) if len(next_step_output) == 1: next_step_action = next_step_output[0] # See if tool should return directly tool_return = self._get_tool_return(next_step_action) if tool_return is not None: return await self._areturn( tool_return, intermediate_steps, run_manager=run_manager ) iterations += 1 time_elapsed = time.time() - start_time output = self.agent.return_stopped_response( self.early_stopping_method, intermediate_steps, **inputs ) return await self._areturn( output, intermediate_steps, run_manager=run_manager ) except TimeoutError: # stop early when interrupted by the async timeout output = self.agent.return_stopped_response( self.early_stopping_method, intermediate_steps, **inputs ) return await self._areturn( output, intermediate_steps, run_manager=run_manager ) def _get_tool_return( self, next_step_output: Tuple[AgentAction, str] ) -> Optional[AgentFinish]: """Check if the tool is a returning tool.""" agent_action, observation = next_step_output name_to_tool_map = {tool.name: tool for tool in self.tools} # Invalid tools won't be in the map, so we return False. if agent_action.tool in name_to_tool_map: if name_to_tool_map[agent_action.tool].return_direct: return AgentFinish( {self.agent.return_values[0]: observation}, "", ) return None
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent.html
123063bd-790f-45ff-b357-9ffa255e556f
Source code for langchain.agents.structured_chat.base import re from typing import Any, List, Optional, Sequence, Tuple from pydantic import Field from langchain.agents.agent import Agent, AgentOutputParser from langchain.agents.structured_chat.output_parser import ( StructuredChatOutputParserWithRetries, ) from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.prompts.base import BasePromptTemplate from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain.schema import AgentAction from langchain.tools import BaseTool HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}" [docs]class StructuredChatAgent(Agent): output_parser: AgentOutputParser = Field( default_factory=StructuredChatOutputParserWithRetries ) @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> str: agent_scratchpad = super()._construct_scratchpad(intermediate_steps) if not isinstance(agent_scratchpad, str): raise ValueError("agent_scratchpad should be of type string.") if agent_scratchpad: return ( f"This was your previous work " f"(but I haven't seen any of it! I only see what " f"you return as final answer):\n{agent_scratchpad}" ) else: return agent_scratchpad @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: pass @classmethod def _get_default_output_parser( cls, llm: Optional[BaseLanguageModel] = None, **kwargs: Any ) -> AgentOutputParser: return StructuredChatOutputParserWithRetries.from_llm(llm=llm) @property def _stop(self) -> List[str]: return ["Observation:"] [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], prefix: str = PREFIX, suffix: str = SUFFIX, human_message_template: str = HUMAN_MESSAGE_TEMPLATE, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, memory_prompts: Optional[List[BasePromptTemplate]] = None, ) -> BasePromptTemplate: tool_strings = [] for tool in tools: args_schema = re.sub("}", "}}}}", re.sub("{", "{{{{", str(tool.args))) tool_strings.append(f"{tool.name}: {tool.description}, args: {args_schema}") formatted_tools = "\n".join(tool_strings) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format(tool_names=tool_names) template = "\n\n".join([prefix, formatted_tools, format_instructions, suffix]) if input_variables is None: input_variables = ["input", "agent_scratchpad"] _memory_prompts = memory_prompts or [] messages = [ SystemMessagePromptTemplate.from_template(template), *_memory_prompts, HumanMessagePromptTemplate.from_template(human_message_template), ] return ChatPromptTemplate(input_variables=input_variables, messages=messages) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = PREFIX, suffix: str = SUFFIX, human_message_template: str = HUMAN_MESSAGE_TEMPLATE, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, memory_prompts: Optional[List[BasePromptTemplate]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, prefix=prefix, suffix=suffix, human_message_template=human_message_template, format_instructions=format_instructions, input_variables=input_variables, memory_prompts=memory_prompts, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser(llm=llm) return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) @property def _agent_type(self) -> str: raise ValueError
https://api.python.langchain.com/en/latest/_modules/langchain/agents/structured_chat/base.html
c68770d7-e7ce-442e-85cc-08b337c692fd
Source code for langchain.agents.agent_toolkits.sql.base """SQL agent.""" from typing import Any, Dict, List, Optional from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent from langchain.agents.agent_toolkits.sql.prompt import ( SQL_FUNCTIONS_SUFFIX, SQL_PREFIX, SQL_SUFFIX, ) from langchain.agents.agent_toolkits.sql.toolkit import SQLDatabaseToolkit from langchain.agents.agent_types import AgentType from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, ) from langchain.schema import AIMessage, SystemMessage [docs]def create_sql_agent( llm: BaseLanguageModel, toolkit: SQLDatabaseToolkit, agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = SQL_PREFIX, suffix: Optional[str] = None, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, top_k: int = 10, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a sql agent from an LLM and tools.""" tools = toolkit.get_tools() prefix = prefix.format(dialect=toolkit.dialect, top_k=top_k) agent: BaseSingleActionAgent if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION: prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix or SQL_SUFFIX, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) elif agent_type == AgentType.OPENAI_FUNCTIONS: messages = [ SystemMessage(content=prefix), HumanMessagePromptTemplate.from_template("{input}"), AIMessage(content=suffix or SQL_FUNCTIONS_SUFFIX), MessagesPlaceholder(variable_name="agent_scratchpad"), ] input_variables = ["input", "agent_scratchpad"] _prompt = ChatPromptTemplate(input_variables=input_variables, messages=messages) agent = OpenAIFunctionsAgent( llm=llm, prompt=_prompt, tools=tools, callback_manager=callback_manager, **kwargs, ) else: raise ValueError(f"Agent type {agent_type} not supported at the moment.") return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/base.html
096a7039-ca3e-4e16-8221-58a0d8034c23
Source code for langchain.agents.agent_toolkits.sql.toolkit """Toolkit for interacting with a SQL database.""" from typing import List from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.base_language import BaseLanguageModel from langchain.sql_database import SQLDatabase from langchain.tools import BaseTool from langchain.tools.sql_database.tool import ( InfoSQLDatabaseTool, ListSQLDatabaseTool, QuerySQLCheckerTool, QuerySQLDataBaseTool, ) [docs]class SQLDatabaseToolkit(BaseToolkit): """Toolkit for interacting with SQL databases.""" db: SQLDatabase = Field(exclude=True) llm: BaseLanguageModel = Field(exclude=True) @property def dialect(self) -> str: """Return string representation of dialect to use.""" return self.db.dialect class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" query_sql_database_tool_description = ( "Input to this tool is a detailed and correct SQL query, output is a " "result from the database. If the query is not correct, an error message " "will be returned. If an error is returned, rewrite the query, check the " "query, and try again. If you encounter an issue with Unknown column " "'xxxx' in 'field list', using schema_sql_db to query the correct table " "fields." ) info_sql_database_tool_description = ( "Input to this tool is a comma-separated list of tables, output is the " "schema and sample rows for those tables. " "Be sure that the tables actually exist by calling list_tables_sql_db " "first! Example Input: 'table1, table2, table3'" ) return [ QuerySQLDataBaseTool( db=self.db, description=query_sql_database_tool_description ), InfoSQLDatabaseTool( db=self.db, description=info_sql_database_tool_description ), ListSQLDatabaseTool(db=self.db), QuerySQLCheckerTool(db=self.db, llm=self.llm), ]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/toolkit.html
e120f654-ac10-45ef-90fe-4243ad8367ab
Source code for langchain.agents.agent_toolkits.python.base """Python agent.""" from typing import Any, Dict, Optional from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent from langchain.agents.agent_toolkits.python.prompt import PREFIX from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.agents.types import AgentType from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.schema import SystemMessage from langchain.tools.python.tool import PythonREPLTool [docs]def create_python_agent( llm: BaseLanguageModel, tool: PythonREPLTool, agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION, callback_manager: Optional[BaseCallbackManager] = None, verbose: bool = False, prefix: str = PREFIX, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a python agent from an LLM and tool.""" tools = [tool] agent: BaseSingleActionAgent if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION: prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) elif agent_type == AgentType.OPENAI_FUNCTIONS: system_message = SystemMessage(content=prefix) _prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message) agent = OpenAIFunctionsAgent( llm=llm, prompt=_prompt, tools=tools, callback_manager=callback_manager, **kwargs, ) else: raise ValueError(f"Agent type {agent_type} not supported at the moment.") return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/python/base.html
37ee2390-a1eb-405b-b9fb-76704d9925b3
Source code for langchain.agents.agent_toolkits.nla.toolkit """Toolkit for interacting with API's using natural language.""" from __future__ import annotations from typing import Any, List, Optional, Sequence from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.agents.agent_toolkits.nla.tool import NLATool from langchain.base_language import BaseLanguageModel from langchain.requests import Requests from langchain.tools.base import BaseTool from langchain.tools.openapi.utils.openapi_utils import OpenAPISpec from langchain.tools.plugin import AIPlugin [docs]class NLAToolkit(BaseToolkit): """Natural Language API Toolkit Definition.""" nla_tools: Sequence[NLATool] = Field(...) """List of API Endpoint Tools.""" [docs] def get_tools(self) -> List[BaseTool]: """Get the tools for all the API operations.""" return list(self.nla_tools) @staticmethod def _get_http_operation_tools( llm: BaseLanguageModel, spec: OpenAPISpec, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> List[NLATool]: """Get the tools for all the API operations.""" if not spec.paths: return [] http_operation_tools = [] for path in spec.paths: for method in spec.get_methods_for_path(path): endpoint_tool = NLATool.from_llm_and_method( llm=llm, path=path, method=method, spec=spec, requests=requests, verbose=verbose, **kwargs, ) http_operation_tools.append(endpoint_tool) return http_operation_tools [docs] @classmethod def from_llm_and_spec( cls, llm: BaseLanguageModel, spec: OpenAPISpec, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit by creating tools for each operation.""" http_operation_tools = cls._get_http_operation_tools( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs ) return cls(nla_tools=http_operation_tools) [docs] @classmethod def from_llm_and_url( cls, llm: BaseLanguageModel, open_api_url: str, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" spec = OpenAPISpec.from_url(open_api_url) return cls.from_llm_and_spec( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs ) [docs] @classmethod def from_llm_and_ai_plugin( cls, llm: BaseLanguageModel, ai_plugin: AIPlugin, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" spec = OpenAPISpec.from_url(ai_plugin.api.url) # TODO: Merge optional Auth information with the `requests` argument return cls.from_llm_and_spec( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs, ) [docs] @classmethod def from_llm_and_ai_plugin_url( cls, llm: BaseLanguageModel, ai_plugin_url: str, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" plugin = AIPlugin.from_url(ai_plugin_url) return cls.from_llm_and_ai_plugin( llm=llm, ai_plugin=plugin, requests=requests, verbose=verbose, **kwargs )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/nla/toolkit.html
38aedfcd-0337-47d5-877e-90183131a036
Source code for langchain.agents.agent_toolkits.powerbi.base """Power BI agent.""" from typing import Any, Dict, List, Optional from langchain.agents import AgentExecutor from langchain.agents.agent_toolkits.powerbi.prompt import ( POWERBI_PREFIX, POWERBI_SUFFIX, ) from langchain.agents.agent_toolkits.powerbi.toolkit import PowerBIToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.utilities.powerbi import PowerBIDataset [docs]def create_pbi_agent( llm: BaseLanguageModel, toolkit: Optional[PowerBIToolkit], powerbi: Optional[PowerBIDataset] = None, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = POWERBI_PREFIX, suffix: str = POWERBI_SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, examples: Optional[str] = None, input_variables: Optional[List[str]] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a pbi agent from an LLM and tools.""" if toolkit is None: if powerbi is None: raise ValueError("Must provide either a toolkit or powerbi dataset") toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples) tools = toolkit.get_tools() agent = ZeroShotAgent( llm_chain=LLMChain( llm=llm, prompt=ZeroShotAgent.create_prompt( tools, prefix=prefix.format(top_k=top_k), suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ), callback_manager=callback_manager, # type: ignore verbose=verbose, ), allowed_tools=[tool.name for tool in tools], **kwargs, ) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/base.html
899a52c0-9787-4085-8a7e-09c61c7e39b7
Source code for langchain.agents.agent_toolkits.powerbi.toolkit """Toolkit for interacting with a Power BI dataset.""" from typing import List, Optional from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.prompts import PromptTemplate from langchain.tools import BaseTool from langchain.tools.powerbi.prompt import QUESTION_TO_QUERY from langchain.tools.powerbi.tool import ( InfoPowerBITool, ListPowerBITool, QueryPowerBITool, ) from langchain.utilities.powerbi import PowerBIDataset [docs]class PowerBIToolkit(BaseToolkit): """Toolkit for interacting with PowerBI dataset.""" powerbi: PowerBIDataset = Field(exclude=True) llm: BaseLanguageModel = Field(exclude=True) examples: Optional[str] = None max_iterations: int = 5 callback_manager: Optional[BaseCallbackManager] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" if self.callback_manager: chain = LLMChain( llm=self.llm, callback_manager=self.callback_manager, prompt=PromptTemplate( template=QUESTION_TO_QUERY, input_variables=["tool_input", "tables", "schemas", "examples"], ), ) else: chain = LLMChain( llm=self.llm, prompt=PromptTemplate( template=QUESTION_TO_QUERY, input_variables=["tool_input", "tables", "schemas", "examples"], ), ) return [ QueryPowerBITool( llm_chain=chain, powerbi=self.powerbi, examples=self.examples, max_iterations=self.max_iterations, ), InfoPowerBITool(powerbi=self.powerbi), ListPowerBITool(powerbi=self.powerbi), ]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/toolkit.html
c2ed783a-3152-4b59-9709-aed785a70889
Source code for langchain.agents.agent_toolkits.powerbi.chat_base """Power BI agent.""" from typing import Any, Dict, List, Optional from langchain.agents import AgentExecutor from langchain.agents.agent import AgentOutputParser from langchain.agents.agent_toolkits.powerbi.prompt import ( POWERBI_CHAT_PREFIX, POWERBI_CHAT_SUFFIX, ) from langchain.agents.agent_toolkits.powerbi.toolkit import PowerBIToolkit from langchain.agents.conversational_chat.base import ConversationalChatAgent from langchain.callbacks.base import BaseCallbackManager from langchain.chat_models.base import BaseChatModel from langchain.memory import ConversationBufferMemory from langchain.memory.chat_memory import BaseChatMemory from langchain.utilities.powerbi import PowerBIDataset [docs]def create_pbi_chat_agent( llm: BaseChatModel, toolkit: Optional[PowerBIToolkit], powerbi: Optional[PowerBIDataset] = None, callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = POWERBI_CHAT_PREFIX, suffix: str = POWERBI_CHAT_SUFFIX, examples: Optional[str] = None, input_variables: Optional[List[str]] = None, memory: Optional[BaseChatMemory] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a pbi agent from an Chat LLM and tools. If you supply only a toolkit and no powerbi dataset, the same LLM is used for both. """ if toolkit is None: if powerbi is None: raise ValueError("Must provide either a toolkit or powerbi dataset") toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples) tools = toolkit.get_tools() agent = ConversationalChatAgent.from_llm_and_tools( llm=llm, tools=tools, system_message=prefix.format(top_k=top_k), human_message=suffix, input_variables=input_variables, callback_manager=callback_manager, output_parser=output_parser, verbose=verbose, **kwargs, ) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, memory=memory or ConversationBufferMemory(memory_key="chat_history", return_messages=True), verbose=verbose, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/chat_base.html
7689fa53-e0fc-4df5-8f36-2369b1675d66
Source code for langchain.agents.agent_toolkits.json.base """Json agent.""" from typing import Any, Dict, List, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain [docs]def create_json_agent( llm: BaseLanguageModel, toolkit: JsonToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = JSON_PREFIX, suffix: str = JSON_SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a json agent from an LLM and tools.""" tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/json/base.html
10251219-5ef0-4b30-a5a9-48f86dfe1555
Source code for langchain.agents.agent_toolkits.json.toolkit """Toolkit for interacting with a JSON spec.""" from __future__ import annotations from typing import List from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.json.tool import JsonGetValueTool, JsonListKeysTool, JsonSpec [docs]class JsonToolkit(BaseToolkit): """Toolkit for interacting with a JSON spec.""" spec: JsonSpec [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ JsonListKeysTool(spec=self.spec), JsonGetValueTool(spec=self.spec), ]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/json/toolkit.html
fa410ba1-d68c-4778-ba4e-818fd449e0b0
Source code for langchain.agents.agent_toolkits.pandas.base """Agent for working with pandas objects.""" from typing import Any, Dict, List, Optional, Tuple from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent from langchain.agents.agent_toolkits.pandas.prompt import ( FUNCTIONS_WITH_DF, FUNCTIONS_WITH_MULTI_DF, MULTI_DF_PREFIX, MULTI_DF_PREFIX_FUNCTIONS, PREFIX, PREFIX_FUNCTIONS, SUFFIX_NO_DF, SUFFIX_WITH_DF, SUFFIX_WITH_MULTI_DF, ) from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.agents.types import AgentType from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.prompts.base import BasePromptTemplate from langchain.schema import SystemMessage from langchain.tools.python.tool import PythonAstREPLTool def _get_multi_prompt( dfs: List[Any], prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: num_dfs = len(dfs) if suffix is not None: suffix_to_use = suffix include_dfs_head = True elif include_df_in_prompt: suffix_to_use = SUFFIX_WITH_MULTI_DF include_dfs_head = True else: suffix_to_use = SUFFIX_NO_DF include_dfs_head = False if input_variables is None: input_variables = ["input", "agent_scratchpad", "num_dfs"] if include_dfs_head: input_variables += ["dfs_head"] if prefix is None: prefix = MULTI_DF_PREFIX df_locals = {} for i, dataframe in enumerate(dfs): df_locals[f"df{i + 1}"] = dataframe tools = [PythonAstREPLTool(locals=df_locals)] prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix_to_use, input_variables=input_variables ) partial_prompt = prompt.partial() if "dfs_head" in input_variables: dfs_head = "\n\n".join([d.head().to_markdown() for d in dfs]) partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs), dfs_head=dfs_head) if "num_dfs" in input_variables: partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs)) return partial_prompt, tools def _get_single_prompt( df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: if suffix is not None: suffix_to_use = suffix include_df_head = True elif include_df_in_prompt: suffix_to_use = SUFFIX_WITH_DF include_df_head = True else: suffix_to_use = SUFFIX_NO_DF include_df_head = False if input_variables is None: input_variables = ["input", "agent_scratchpad"] if include_df_head: input_variables += ["df_head"] if prefix is None: prefix = PREFIX tools = [PythonAstREPLTool(locals={"df": df})] prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix_to_use, input_variables=input_variables ) partial_prompt = prompt.partial() if "df_head" in input_variables: partial_prompt = partial_prompt.partial(df_head=str(df.head().to_markdown())) return partial_prompt, tools def _get_prompt_and_tools( df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: try: import pandas as pd except ImportError: raise ValueError( "pandas package not found, please install with `pip install pandas`" ) if include_df_in_prompt is not None and suffix is not None: raise ValueError("If suffix is specified, include_df_in_prompt should not be.") if isinstance(df, list): for item in df: if not isinstance(item, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_multi_prompt( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, ) else: if not isinstance(df, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_single_prompt( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, ) def _get_functions_single_prompt( df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, include_df_in_prompt: Optional[bool] = True, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: if suffix is not None: suffix_to_use = suffix if include_df_in_prompt: suffix_to_use = suffix_to_use.format(df_head=str(df.head().to_markdown())) elif include_df_in_prompt: suffix_to_use = FUNCTIONS_WITH_DF.format(df_head=str(df.head().to_markdown())) else: suffix_to_use = "" if prefix is None: prefix = PREFIX_FUNCTIONS tools = [PythonAstREPLTool(locals={"df": df})] system_message = SystemMessage(content=prefix + suffix_to_use) prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message) return prompt, tools def _get_functions_multi_prompt( dfs: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, include_df_in_prompt: Optional[bool] = True, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: if suffix is not None: suffix_to_use = suffix if include_df_in_prompt: dfs_head = "\n\n".join([d.head().to_markdown() for d in dfs]) suffix_to_use = suffix_to_use.format( dfs_head=dfs_head, ) elif include_df_in_prompt: dfs_head = "\n\n".join([d.head().to_markdown() for d in dfs]) suffix_to_use = FUNCTIONS_WITH_MULTI_DF.format( dfs_head=dfs_head, ) else: suffix_to_use = "" if prefix is None: prefix = MULTI_DF_PREFIX_FUNCTIONS prefix = prefix.format(num_dfs=str(len(dfs))) df_locals = {} for i, dataframe in enumerate(dfs): df_locals[f"df{i + 1}"] = dataframe tools = [PythonAstREPLTool(locals=df_locals)] system_message = SystemMessage(content=prefix + suffix_to_use) prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message) return prompt, tools def _get_functions_prompt_and_tools( df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: try: import pandas as pd except ImportError: raise ValueError( "pandas package not found, please install with `pip install pandas`" ) if input_variables is not None: raise ValueError("`input_variables` is not supported at the moment.") if include_df_in_prompt is not None and suffix is not None: raise ValueError("If suffix is specified, include_df_in_prompt should not be.") if isinstance(df, list): for item in df: if not isinstance(item, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_functions_multi_prompt( df, prefix=prefix, suffix=suffix, include_df_in_prompt=include_df_in_prompt, ) else: if not isinstance(df, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_functions_single_prompt( df, prefix=prefix, suffix=suffix, include_df_in_prompt=include_df_in_prompt, ) [docs]def create_pandas_dataframe_agent( llm: BaseLanguageModel, df: Any, agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION, callback_manager: Optional[BaseCallbackManager] = None, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", agent_executor_kwargs: Optional[Dict[str, Any]] = None, include_df_in_prompt: Optional[bool] = True, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a pandas agent from an LLM and dataframe.""" agent: BaseSingleActionAgent if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION: prompt, tools = _get_prompt_and_tools( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent( llm_chain=llm_chain, allowed_tools=tool_names, callback_manager=callback_manager, **kwargs, ) elif agent_type == AgentType.OPENAI_FUNCTIONS: _prompt, tools = _get_functions_prompt_and_tools( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, ) agent = OpenAIFunctionsAgent( llm=llm, prompt=_prompt, tools=tools, callback_manager=callback_manager, **kwargs, ) else: raise ValueError(f"Agent type {agent_type} not supported at the moment.") return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html
ed9771b1-aa28-4b6d-ab59-03da6ae8af10
Source code for langchain.agents.agent_toolkits.gmail.toolkit from __future__ import annotations from typing import TYPE_CHECKING, List from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.gmail.create_draft import GmailCreateDraft from langchain.tools.gmail.get_message import GmailGetMessage from langchain.tools.gmail.get_thread import GmailGetThread from langchain.tools.gmail.search import GmailSearch from langchain.tools.gmail.send_message import GmailSendMessage from langchain.tools.gmail.utils import build_resource_service if TYPE_CHECKING: # This is for linting and IDE typehints from googleapiclient.discovery import Resource else: try: # We do this so pydantic can resolve the types when instantiating from googleapiclient.discovery import Resource except ImportError: pass SCOPES = ["https://mail.google.com/"] [docs]class GmailToolkit(BaseToolkit): """Toolkit for interacting with Gmail.""" api_resource: Resource = Field(default_factory=build_resource_service) class Config: """Pydantic config.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ GmailCreateDraft(api_resource=self.api_resource), GmailSendMessage(api_resource=self.api_resource), GmailSearch(api_resource=self.api_resource), GmailGetMessage(api_resource=self.api_resource), GmailGetThread(api_resource=self.api_resource), ]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/gmail/toolkit.html
dde961ca-3446-41ab-a8f3-44a829829486
Source code for langchain.agents.agent_toolkits.vectorstore.base """VectorStore agent.""" from typing import Any, Dict, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.vectorstore.prompt import PREFIX, ROUTER_PREFIX from langchain.agents.agent_toolkits.vectorstore.toolkit import ( VectorStoreRouterToolkit, VectorStoreToolkit, ) from langchain.agents.mrkl.base import ZeroShotAgent from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain [docs]def create_vectorstore_agent( llm: BaseLanguageModel, toolkit: VectorStoreToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = PREFIX, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a vectorstore agent from an LLM and tools.""" tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), ) [docs]def create_vectorstore_router_agent( llm: BaseLanguageModel, toolkit: VectorStoreRouterToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = ROUTER_PREFIX, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a vectorstore router agent from an LLM and tools.""" tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/vectorstore/base.html
5c910677-c358-45f9-8a84-8f5432ebe8db
Source code for langchain.agents.agent_toolkits.vectorstore.toolkit """Toolkit for interacting with a vector store.""" from typing import List from pydantic import BaseModel, Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.base_language import BaseLanguageModel from langchain.llms.openai import OpenAI from langchain.tools import BaseTool from langchain.tools.vectorstore.tool import ( VectorStoreQATool, VectorStoreQAWithSourcesTool, ) from langchain.vectorstores.base import VectorStore [docs]class VectorStoreInfo(BaseModel): """Information about a vectorstore.""" vectorstore: VectorStore = Field(exclude=True) name: str description: str class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs]class VectorStoreToolkit(BaseToolkit): """Toolkit for interacting with a vector store.""" vectorstore_info: VectorStoreInfo = Field(exclude=True) llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" description = VectorStoreQATool.get_description( self.vectorstore_info.name, self.vectorstore_info.description ) qa_tool = VectorStoreQATool( name=self.vectorstore_info.name, description=description, vectorstore=self.vectorstore_info.vectorstore, llm=self.llm, ) description = VectorStoreQAWithSourcesTool.get_description( self.vectorstore_info.name, self.vectorstore_info.description ) qa_with_sources_tool = VectorStoreQAWithSourcesTool( name=f"{self.vectorstore_info.name}_with_sources", description=description, vectorstore=self.vectorstore_info.vectorstore, llm=self.llm, ) return [qa_tool, qa_with_sources_tool] [docs]class VectorStoreRouterToolkit(BaseToolkit): """Toolkit for routing between vector stores.""" vectorstores: List[VectorStoreInfo] = Field(exclude=True) llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" tools: List[BaseTool] = [] for vectorstore_info in self.vectorstores: description = VectorStoreQATool.get_description( vectorstore_info.name, vectorstore_info.description ) qa_tool = VectorStoreQATool( name=vectorstore_info.name, description=description, vectorstore=vectorstore_info.vectorstore, llm=self.llm, ) tools.append(qa_tool) return tools
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/vectorstore/toolkit.html
52ed7f1a-8ca7-4c55-86b3-2c73407c383e
Source code for langchain.agents.agent_toolkits.spark.base """Agent for working with pandas objects.""" from typing import Any, Dict, List, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.spark.prompt import PREFIX, SUFFIX from langchain.agents.mrkl.base import ZeroShotAgent from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.llms.base import BaseLLM from langchain.tools.python.tool import PythonAstREPLTool def _validate_spark_df(df: Any) -> bool: try: from pyspark.sql import DataFrame as SparkLocalDataFrame return isinstance(df, SparkLocalDataFrame) except ImportError: return False def _validate_spark_connect_df(df: Any) -> bool: try: from pyspark.sql.connect.dataframe import DataFrame as SparkConnectDataFrame return isinstance(df, SparkConnectDataFrame) except ImportError: return False [docs]def create_spark_dataframe_agent( llm: BaseLLM, df: Any, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = PREFIX, suffix: str = SUFFIX, input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a spark agent from an LLM and dataframe.""" if not _validate_spark_df(df) and not _validate_spark_connect_df(df): raise ValueError("Spark is not installed. run `pip install pyspark`.") if input_variables is None: input_variables = ["df", "input", "agent_scratchpad"] tools = [PythonAstREPLTool(locals={"df": df})] prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, input_variables=input_variables ) partial_prompt = prompt.partial(df=str(df.first())) llm_chain = LLMChain( llm=llm, prompt=partial_prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent( llm_chain=llm_chain, allowed_tools=tool_names, callback_manager=callback_manager, **kwargs, ) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/spark/base.html
53826aad-e2f5-4be1-85f0-a8a58e22287b
Source code for langchain.agents.agent_toolkits.playwright.toolkit """Playwright web browser toolkit.""" from __future__ import annotations from typing import TYPE_CHECKING, List, Optional, Type, cast from pydantic import Extra, root_validator from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools.base import BaseTool from langchain.tools.playwright.base import ( BaseBrowserTool, lazy_import_playwright_browsers, ) from langchain.tools.playwright.click import ClickTool from langchain.tools.playwright.current_page import CurrentWebPageTool from langchain.tools.playwright.extract_hyperlinks import ExtractHyperlinksTool from langchain.tools.playwright.extract_text import ExtractTextTool from langchain.tools.playwright.get_elements import GetElementsTool from langchain.tools.playwright.navigate import NavigateTool from langchain.tools.playwright.navigate_back import NavigateBackTool if TYPE_CHECKING: from playwright.async_api import Browser as AsyncBrowser from playwright.sync_api import Browser as SyncBrowser else: try: # We do this so pydantic can resolve the types when instantiating from playwright.async_api import Browser as AsyncBrowser from playwright.sync_api import Browser as SyncBrowser except ImportError: pass [docs]class PlayWrightBrowserToolkit(BaseToolkit): """Toolkit for web browser tools.""" sync_browser: Optional["SyncBrowser"] = None async_browser: Optional["AsyncBrowser"] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator def validate_imports_and_browser_provided(cls, values: dict) -> dict: """Check that the arguments are valid.""" lazy_import_playwright_browsers() if values.get("async_browser") is None and values.get("sync_browser") is None: raise ValueError("Either async_browser or sync_browser must be specified.") return values [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" tool_classes: List[Type[BaseBrowserTool]] = [ ClickTool, NavigateTool, NavigateBackTool, ExtractTextTool, ExtractHyperlinksTool, GetElementsTool, CurrentWebPageTool, ] tools = [ tool_cls.from_browser( sync_browser=self.sync_browser, async_browser=self.async_browser ) for tool_cls in tool_classes ] return cast(List[BaseTool], tools) [docs] @classmethod def from_browser( cls, sync_browser: Optional[SyncBrowser] = None, async_browser: Optional[AsyncBrowser] = None, ) -> PlayWrightBrowserToolkit: """Instantiate the toolkit.""" # This is to raise a better error than the forward ref ones Pydantic would have lazy_import_playwright_browsers() return cls(sync_browser=sync_browser, async_browser=async_browser)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/playwright/toolkit.html
f72080c7-3760-4ddb-a5ec-d0d93bc5c1b9
Source code for langchain.agents.agent_toolkits.csv.base """Agent for working with csvs.""" from typing import Any, List, Optional, Union from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent from langchain.base_language import BaseLanguageModel [docs]def create_csv_agent( llm: BaseLanguageModel, path: Union[str, List[str]], pandas_kwargs: Optional[dict] = None, **kwargs: Any, ) -> AgentExecutor: """Create csv agent by loading to a dataframe and using pandas agent.""" try: import pandas as pd except ImportError: raise ValueError( "pandas package not found, please install with `pip install pandas`" ) _kwargs = pandas_kwargs or {} if isinstance(path, str): df = pd.read_csv(path, **_kwargs) elif isinstance(path, list): df = [] for item in path: if not isinstance(item, str): raise ValueError(f"Expected str, got {type(path)}") df.append(pd.read_csv(item, **_kwargs)) else: raise ValueError(f"Expected str or list, got {type(path)}") return create_pandas_dataframe_agent(llm, df, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/csv/base.html
d56dd031-2cc6-4424-80fa-4fb422c35f3b
Source code for langchain.agents.agent_toolkits.openapi.base """OpenAPI spec agent.""" from typing import Any, Dict, List, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.openapi.prompt import ( OPENAPI_PREFIX, OPENAPI_SUFFIX, ) from langchain.agents.agent_toolkits.openapi.toolkit import OpenAPIToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain [docs]def create_openapi_agent( llm: BaseLanguageModel, toolkit: OpenAPIToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = OPENAPI_PREFIX, suffix: str = OPENAPI_SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", verbose: bool = False, return_intermediate_steps: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a json agent from an LLM and tools.""" tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/base.html
61a4a007-851b-44da-8aef-f7fa1bf28d6e
Source code for langchain.agents.agent_toolkits.openapi.toolkit """Requests toolkit.""" from __future__ import annotations from typing import Any, List from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.agents.agent_toolkits.json.base import create_json_agent from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit from langchain.agents.agent_toolkits.openapi.prompt import DESCRIPTION from langchain.agents.tools import Tool from langchain.base_language import BaseLanguageModel from langchain.requests import TextRequestsWrapper from langchain.tools import BaseTool from langchain.tools.json.tool import JsonSpec from langchain.tools.requests.tool import ( RequestsDeleteTool, RequestsGetTool, RequestsPatchTool, RequestsPostTool, RequestsPutTool, ) class RequestsToolkit(BaseToolkit): """Toolkit for making requests.""" requests_wrapper: TextRequestsWrapper def get_tools(self) -> List[BaseTool]: """Return a list of tools.""" return [ RequestsGetTool(requests_wrapper=self.requests_wrapper), RequestsPostTool(requests_wrapper=self.requests_wrapper), RequestsPatchTool(requests_wrapper=self.requests_wrapper), RequestsPutTool(requests_wrapper=self.requests_wrapper), RequestsDeleteTool(requests_wrapper=self.requests_wrapper), ] [docs]class OpenAPIToolkit(BaseToolkit): """Toolkit for interacting with a OpenAPI api.""" json_agent: AgentExecutor requests_wrapper: TextRequestsWrapper [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" json_agent_tool = Tool( name="json_explorer", func=self.json_agent.run, description=DESCRIPTION, ) request_toolkit = RequestsToolkit(requests_wrapper=self.requests_wrapper) return [*request_toolkit.get_tools(), json_agent_tool] [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, json_spec: JsonSpec, requests_wrapper: TextRequestsWrapper, **kwargs: Any, ) -> OpenAPIToolkit: """Create json agent from llm, then initialize.""" json_agent = create_json_agent(llm, JsonToolkit(spec=json_spec), **kwargs) return cls(json_agent=json_agent, requests_wrapper=requests_wrapper)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/openapi/toolkit.html
d7d7b6a8-923b-4d35-b09e-90d3e3c761b7
Source code for langchain.agents.agent_toolkits.jira.toolkit """Jira Toolkit.""" from typing import List from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.jira.tool import JiraAction from langchain.utilities.jira import JiraAPIWrapper [docs]class JiraToolkit(BaseToolkit): """Jira Toolkit.""" tools: List[BaseTool] = [] [docs] @classmethod def from_jira_api_wrapper(cls, jira_api_wrapper: JiraAPIWrapper) -> "JiraToolkit": actions = jira_api_wrapper.list() tools = [ JiraAction( name=action["name"], description=action["description"], mode=action["mode"], api_wrapper=jira_api_wrapper, ) for action in actions ] return cls(tools=tools) [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return self.tools
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/jira/toolkit.html
8d522601-dc79-427b-bfc1-346c5b6e9136
Source code for langchain.agents.agent_toolkits.azure_cognitive_services.toolkit from __future__ import annotations import sys from typing import List from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools.azure_cognitive_services import ( AzureCogsFormRecognizerTool, AzureCogsImageAnalysisTool, AzureCogsSpeech2TextTool, AzureCogsText2SpeechTool, ) from langchain.tools.base import BaseTool [docs]class AzureCognitiveServicesToolkit(BaseToolkit): """Toolkit for Azure Cognitive Services.""" [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" tools = [ AzureCogsFormRecognizerTool(), AzureCogsSpeech2TextTool(), AzureCogsText2SpeechTool(), ] # TODO: Remove check once azure-ai-vision supports MacOS. if sys.platform.startswith("linux") or sys.platform.startswith("win"): tools.append(AzureCogsImageAnalysisTool()) return tools
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/azure_cognitive_services/toolkit.html
5dcc5750-8d46-41a1-a2a5-a0211f73f2ae
Source code for langchain.agents.agent_toolkits.spark_sql.base """Spark SQL agent.""" from typing import Any, Dict, List, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.spark_sql.prompt import SQL_PREFIX, SQL_SUFFIX from langchain.agents.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain [docs]def create_spark_sql_agent( llm: BaseLanguageModel, toolkit: SparkSQLToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = SQL_PREFIX, suffix: str = SQL_SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, top_k: int = 10, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a sql agent from an LLM and tools.""" tools = toolkit.get_tools() prefix = prefix.format(top_k=top_k) prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/spark_sql/base.html
8c22f64b-2ad7-48ca-804d-4d3ebe8a0d95
Source code for langchain.agents.agent_toolkits.spark_sql.toolkit """Toolkit for interacting with Spark SQL.""" from typing import List from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.base_language import BaseLanguageModel from langchain.tools import BaseTool from langchain.tools.spark_sql.tool import ( InfoSparkSQLTool, ListSparkSQLTool, QueryCheckerTool, QuerySparkSQLTool, ) from langchain.utilities.spark_sql import SparkSQL [docs]class SparkSQLToolkit(BaseToolkit): """Toolkit for interacting with Spark SQL.""" db: SparkSQL = Field(exclude=True) llm: BaseLanguageModel = Field(exclude=True) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ QuerySparkSQLTool(db=self.db), InfoSparkSQLTool(db=self.db), ListSparkSQLTool(db=self.db), QueryCheckerTool(db=self.db, llm=self.llm), ]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/spark_sql/toolkit.html
71943ddd-8743-4cda-bfe6-839ae2fc66a4
Source code for langchain.agents.agent_toolkits.zapier.toolkit """Zapier Toolkit.""" from typing import List from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.zapier.tool import ZapierNLARunAction from langchain.utilities.zapier import ZapierNLAWrapper [docs]class ZapierToolkit(BaseToolkit): """Zapier Toolkit.""" tools: List[BaseTool] = [] [docs] @classmethod def from_zapier_nla_wrapper( cls, zapier_nla_wrapper: ZapierNLAWrapper ) -> "ZapierToolkit": """Create a toolkit from a ZapierNLAWrapper.""" actions = zapier_nla_wrapper.list() tools = [ ZapierNLARunAction( action_id=action["id"], zapier_description=action["description"], params_schema=action["params"], api_wrapper=zapier_nla_wrapper, ) for action in actions ] return cls(tools=tools) [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return self.tools
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/zapier/toolkit.html
f5f3e4a2-2a38-453d-b5fe-ea2be556148f
Source code for langchain.agents.agent_toolkits.file_management.toolkit """Toolkit for interacting with the local filesystem.""" from __future__ import annotations from typing import List, Optional from pydantic import root_validator from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.file_management.copy import CopyFileTool from langchain.tools.file_management.delete import DeleteFileTool from langchain.tools.file_management.file_search import FileSearchTool from langchain.tools.file_management.list_dir import ListDirectoryTool from langchain.tools.file_management.move import MoveFileTool from langchain.tools.file_management.read import ReadFileTool from langchain.tools.file_management.write import WriteFileTool _FILE_TOOLS = { tool_cls.__fields__["name"].default: tool_cls for tool_cls in [ CopyFileTool, DeleteFileTool, FileSearchTool, MoveFileTool, ReadFileTool, WriteFileTool, ListDirectoryTool, ] } [docs]class FileManagementToolkit(BaseToolkit): """Toolkit for interacting with a Local Files.""" root_dir: Optional[str] = None """If specified, all file operations are made relative to root_dir.""" selected_tools: Optional[List[str]] = None """If provided, only provide the selected tools. Defaults to all.""" @root_validator def validate_tools(cls, values: dict) -> dict: selected_tools = values.get("selected_tools") or [] for tool_name in selected_tools: if tool_name not in _FILE_TOOLS: raise ValueError( f"File Tool of name {tool_name} not supported." f" Permitted tools: {list(_FILE_TOOLS)}" ) return values [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" allowed_tools = self.selected_tools or _FILE_TOOLS.keys() tools: List[BaseTool] = [] for tool in allowed_tools: tool_cls = _FILE_TOOLS[tool] tools.append(tool_cls(root_dir=self.root_dir)) # type: ignore return tools __all__ = ["FileManagementToolkit"]
https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/file_management/toolkit.html
01de31af-8de3-4c44-8f51-0133e321bb92
Source code for langchain.agents.conversational_chat.base """An agent designed to hold a conversation in addition to using tools.""" from __future__ import annotations from typing import Any, List, Optional, Sequence, Tuple from pydantic import Field from langchain.agents.agent import Agent, AgentOutputParser from langchain.agents.conversational_chat.output_parser import ConvoOutputParser from langchain.agents.conversational_chat.prompt import ( PREFIX, SUFFIX, TEMPLATE_TOOL_RESPONSE, ) from langchain.agents.utils import validate_tools_single_input from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts.base import BasePromptTemplate from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, ) from langchain.schema import ( AgentAction, AIMessage, BaseMessage, BaseOutputParser, HumanMessage, ) from langchain.tools.base import BaseTool [docs]class ConversationalChatAgent(Agent): """An agent designed to hold a conversation in addition to using tools.""" output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser) template_tool_response: str = TEMPLATE_TOOL_RESPONSE @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return ConvoOutputParser() @property def _agent_type(self) -> str: raise NotImplementedError @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: super()._validate_tools(tools) validate_tools_single_input(cls.__name__, tools) [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], system_message: str = PREFIX, human_message: str = SUFFIX, input_variables: Optional[List[str]] = None, output_parser: Optional[BaseOutputParser] = None, ) -> BasePromptTemplate: tool_strings = "\n".join( [f"> {tool.name}: {tool.description}" for tool in tools] ) tool_names = ", ".join([tool.name for tool in tools]) _output_parser = output_parser or cls._get_default_output_parser() format_instructions = human_message.format( format_instructions=_output_parser.get_format_instructions() ) final_prompt = format_instructions.format( tool_names=tool_names, tools=tool_strings ) if input_variables is None: input_variables = ["input", "chat_history", "agent_scratchpad"] messages = [ SystemMessagePromptTemplate.from_template(system_message), MessagesPlaceholder(variable_name="chat_history"), HumanMessagePromptTemplate.from_template(final_prompt), MessagesPlaceholder(variable_name="agent_scratchpad"), ] return ChatPromptTemplate(input_variables=input_variables, messages=messages) def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> List[BaseMessage]: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts: List[BaseMessage] = [] for action, observation in intermediate_steps: thoughts.append(AIMessage(content=action.log)) human_message = HumanMessage( content=self.template_tool_response.format(observation=observation) ) thoughts.append(human_message) return thoughts [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, system_message: str = PREFIX, human_message: str = SUFFIX, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) _output_parser = output_parser or cls._get_default_output_parser() prompt = cls.create_prompt( tools, system_message=system_message, human_message=human_message, input_variables=input_variables, output_parser=_output_parser, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html
1840f8a9-54d5-4f89-b3fb-e97adcab5227
Source code for langchain.agents.openai_functions_agent.base """Module implements an agent that uses OpenAI's APIs function enabled API.""" import json from dataclasses import dataclass from json import JSONDecodeError from typing import Any, List, Optional, Sequence, Tuple, Union from pydantic import root_validator from langchain.agents import BaseSingleActionAgent from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.chat_models.openai import ChatOpenAI from langchain.prompts.base import BasePromptTemplate from langchain.prompts.chat import ( BaseMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, ) from langchain.schema import ( AgentAction, AgentFinish, AIMessage, BaseMessage, FunctionMessage, OutputParserException, SystemMessage, ) from langchain.tools import BaseTool from langchain.tools.convert_to_openai import format_tool_to_openai_function @dataclass class _FunctionsAgentAction(AgentAction): message_log: List[BaseMessage] def _convert_agent_action_to_messages( agent_action: AgentAction, observation: str ) -> List[BaseMessage]: """Convert an agent action to a message. This code is used to reconstruct the original AI message from the agent action. Args: agent_action: Agent action to convert. Returns: AIMessage that corresponds to the original tool invocation. """ if isinstance(agent_action, _FunctionsAgentAction): return agent_action.message_log + [ _create_function_message(agent_action, observation) ] else: return [AIMessage(content=agent_action.log)] def _create_function_message( agent_action: AgentAction, observation: str ) -> FunctionMessage: """Convert agent action and observation into a function message. Args: agent_action: the tool invocation request from the agent observation: the result of the tool invocation Returns: FunctionMessage that corresponds to the original tool invocation """ if not isinstance(observation, str): try: content = json.dumps(observation, ensure_ascii=False) except Exception: content = str(observation) else: content = observation return FunctionMessage( name=agent_action.tool, content=content, ) def _format_intermediate_steps( intermediate_steps: List[Tuple[AgentAction, str]], ) -> List[BaseMessage]: """Format intermediate steps. Args: intermediate_steps: Steps the LLM has taken to date, along with observations Returns: list of messages to send to the LLM for the next prediction """ messages = [] for intermediate_step in intermediate_steps: agent_action, observation = intermediate_step messages.extend(_convert_agent_action_to_messages(agent_action, observation)) return messages def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]: """Parse an AI message.""" if not isinstance(message, AIMessage): raise TypeError(f"Expected an AI message got {type(message)}") function_call = message.additional_kwargs.get("function_call", {}) if function_call: function_call = message.additional_kwargs["function_call"] function_name = function_call["name"] try: _tool_input = json.loads(function_call["arguments"]) except JSONDecodeError: raise OutputParserException( f"Could not parse tool input: {function_call} because " f"the `arguments` is not valid JSON." ) # HACK HACK HACK: # The code that encodes tool input into Open AI uses a special variable # name called `__arg1` to handle old style tools that do not expose a # schema and expect a single string argument as an input. # We unpack the argument here if it exists. # Open AI does not support passing in a JSON array as an argument. if "__arg1" in _tool_input: tool_input = _tool_input["__arg1"] else: tool_input = _tool_input content_msg = "responded: {content}\n" if message.content else "\n" return _FunctionsAgentAction( tool=function_name, tool_input=tool_input, log=f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n", message_log=[message], ) return AgentFinish(return_values={"output": message.content}, log=message.content) [docs]class OpenAIFunctionsAgent(BaseSingleActionAgent): """An Agent driven by OpenAIs function powered API. Args: llm: This should be an instance of ChatOpenAI, specifically a model that supports using `functions`. tools: The tools this agent has access to. prompt: The prompt for this agent, should support agent_scratchpad as one of the variables. For an easy way to construct this prompt, use `OpenAIFunctionsAgent.create_prompt(...)` """ llm: BaseLanguageModel tools: Sequence[BaseTool] prompt: BasePromptTemplate [docs] def get_allowed_tools(self) -> List[str]: """Get allowed tools.""" return list([t.name for t in self.tools]) @root_validator def validate_llm(cls, values: dict) -> dict: if not isinstance(values["llm"], ChatOpenAI): raise ValueError("Only supported with ChatOpenAI models.") return values @root_validator def validate_prompt(cls, values: dict) -> dict: prompt: BasePromptTemplate = values["prompt"] if "agent_scratchpad" not in prompt.input_variables: raise ValueError( "`agent_scratchpad` should be one of the variables in the prompt, " f"got {prompt.input_variables}" ) return values @property def input_keys(self) -> List[str]: """Get input keys. Input refers to user input here.""" return ["input"] @property def functions(self) -> List[dict]: return [dict(format_tool_to_openai_function(t)) for t in self.tools] [docs] def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations **kwargs: User inputs. Returns: Action specifying what tool to use. """ agent_scratchpad = _format_intermediate_steps(intermediate_steps) selected_inputs = { k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad" } full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad) prompt = self.prompt.format_prompt(**full_inputs) messages = prompt.to_messages() predicted_message = self.llm.predict_messages( messages, functions=self.functions, callbacks=callbacks ) agent_decision = _parse_ai_message(predicted_message) return agent_decision [docs] async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations **kwargs: User inputs. Returns: Action specifying what tool to use. """ agent_scratchpad = _format_intermediate_steps(intermediate_steps) selected_inputs = { k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad" } full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad) prompt = self.prompt.format_prompt(**full_inputs) messages = prompt.to_messages() predicted_message = await self.llm.apredict_messages( messages, functions=self.functions, callbacks=callbacks ) agent_decision = _parse_ai_message(predicted_message) return agent_decision [docs] @classmethod def create_prompt( cls, system_message: Optional[SystemMessage] = SystemMessage( content="You are a helpful AI assistant." ), extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None, ) -> BasePromptTemplate: """Create prompt for this agent. Args: system_message: Message to use as the system message that will be the first in the prompt. extra_prompt_messages: Prompt messages that will be placed between the system message and the new human input. Returns: A prompt template to pass into this agent. """ _prompts = extra_prompt_messages or [] messages: List[Union[BaseMessagePromptTemplate, BaseMessage]] if system_message: messages = [system_message] else: messages = [] messages.extend( [ *_prompts, HumanMessagePromptTemplate.from_template("{input}"), MessagesPlaceholder(variable_name="agent_scratchpad"), ] ) return ChatPromptTemplate(messages=messages) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None, system_message: Optional[SystemMessage] = SystemMessage( content="You are a helpful AI assistant." ), **kwargs: Any, ) -> BaseSingleActionAgent: """Construct an agent from an LLM and tools.""" if not isinstance(llm, ChatOpenAI): raise ValueError("Only supported with ChatOpenAI models.") prompt = cls.create_prompt( extra_prompt_messages=extra_prompt_messages, system_message=system_message, ) return cls( llm=llm, prompt=prompt, tools=tools, callback_manager=callback_manager, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html
0a3930a4-24e1-4e8b-8272-95ebfcf6eeb7
Source code for langchain.agents.mrkl.base """Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf.""" from __future__ import annotations from typing import Any, Callable, List, NamedTuple, Optional, Sequence from pydantic import Field from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.mrkl.output_parser import MRKLOutputParser from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.tools import Tool from langchain.agents.utils import validate_tools_single_input from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.tools.base import BaseTool class ChainConfig(NamedTuple): """Configuration for chain to use in MRKL system. Args: action_name: Name of the action. action: Action function to call. action_description: Description of the action. """ action_name: str action: Callable action_description: str [docs]class ZeroShotAgent(Agent): """Agent for the MRKL chain.""" output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return MRKLOutputParser() @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.ZERO_SHOT_REACT_DESCRIPTION @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, ) -> PromptTemplate: """Create prompt in the style of the zero shot agent. Args: tools: List of tools the agent will have access to, used to format the prompt. prefix: String to put before the list of tools. suffix: String to put after the list of tools. input_variables: List of input variables the final prompt will expect. Returns: A PromptTemplate with the template assembled from the pieces here. """ tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format(tool_names=tool_names) template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) if input_variables is None: input_variables = ["input", "agent_scratchpad"] return PromptTemplate(template=template, input_variables=input_variables) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: validate_tools_single_input(cls.__name__, tools) if len(tools) == 0: raise ValueError( f"Got no tools for {cls.__name__}. At least one tool must be provided." ) for tool in tools: if tool.description is None: raise ValueError( f"Got a tool {tool.name} without a description. For this agent, " f"a description must always be provided." ) super()._validate_tools(tools) [docs]class MRKLChain(AgentExecutor): """Chain that implements the MRKL system. Example: .. code-block:: python from langchain import OpenAI, MRKLChain from langchain.chains.mrkl.base import ChainConfig llm = OpenAI(temperature=0) prompt = PromptTemplate(...) chains = [...] mrkl = MRKLChain.from_chains(llm=llm, prompt=prompt) """ [docs] @classmethod def from_chains( cls, llm: BaseLanguageModel, chains: List[ChainConfig], **kwargs: Any ) -> AgentExecutor: """User friendly way to initialize the MRKL chain. This is intended to be an easy way to get up and running with the MRKL chain. Args: llm: The LLM to use as the agent LLM. chains: The chains the MRKL system has access to. **kwargs: parameters to be passed to initialization. Returns: An initialized MRKL chain. Example: .. code-block:: python from langchain import LLMMathChain, OpenAI, SerpAPIWrapper, MRKLChain from langchain.chains.mrkl.base import ChainConfig llm = OpenAI(temperature=0) search = SerpAPIWrapper() llm_math_chain = LLMMathChain(llm=llm) chains = [ ChainConfig( action_name = "Search", action=search.search, action_description="useful for searching" ), ChainConfig( action_name="Calculator", action=llm_math_chain.run, action_description="useful for doing math" ) ] mrkl = MRKLChain.from_chains(llm, chains) """ tools = [ Tool( name=c.action_name, func=c.action, description=c.action_description, ) for c in chains ] agent = ZeroShotAgent.from_llm_and_tools(llm, tools) return cls(agent=agent, tools=tools, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
e6caa44f-2503-412b-ac91-377a919da1f5
Source code for langchain.agents.react.base """Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf.""" from typing import Any, List, Optional, Sequence from pydantic import Field from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.react.output_parser import ReActOutputParser from langchain.agents.react.textworld_prompt import TEXTWORLD_PROMPT from langchain.agents.react.wiki_prompt import WIKI_PROMPT from langchain.agents.tools import Tool from langchain.agents.utils import validate_tools_single_input from langchain.base_language import BaseLanguageModel from langchain.docstore.base import Docstore from langchain.docstore.document import Document from langchain.prompts.base import BasePromptTemplate from langchain.tools.base import BaseTool class ReActDocstoreAgent(Agent): """Agent for the ReAct chain.""" output_parser: AgentOutputParser = Field(default_factory=ReActOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return ReActOutputParser() @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.REACT_DOCSTORE @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Return default prompt.""" return WIKI_PROMPT @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: validate_tools_single_input(cls.__name__, tools) super()._validate_tools(tools) if len(tools) != 2: raise ValueError(f"Exactly two tools must be specified, but got {tools}") tool_names = {tool.name for tool in tools} if tool_names != {"Lookup", "Search"}: raise ValueError( f"Tool names should be Lookup and Search, got {tool_names}" ) @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def _stop(self) -> List[str]: return ["\nObservation:"] @property def llm_prefix(self) -> str: """Prefix to append the LLM call with.""" return "Thought:" class DocstoreExplorer: """Class to assist with exploration of a document store.""" def __init__(self, docstore: Docstore): """Initialize with a docstore, and set initial document to None.""" self.docstore = docstore self.document: Optional[Document] = None self.lookup_str = "" self.lookup_index = 0 def search(self, term: str) -> str: """Search for a term in the docstore, and if found save.""" result = self.docstore.search(term) if isinstance(result, Document): self.document = result return self._summary else: self.document = None return result def lookup(self, term: str) -> str: """Lookup a term in document (if saved).""" if self.document is None: raise ValueError("Cannot lookup without a successful search first") if term.lower() != self.lookup_str: self.lookup_str = term.lower() self.lookup_index = 0 else: self.lookup_index += 1 lookups = [p for p in self._paragraphs if self.lookup_str in p.lower()] if len(lookups) == 0: return "No Results" elif self.lookup_index >= len(lookups): return "No More Results" else: result_prefix = f"(Result {self.lookup_index + 1}/{len(lookups)})" return f"{result_prefix} {lookups[self.lookup_index]}" @property def _summary(self) -> str: return self._paragraphs[0] @property def _paragraphs(self) -> List[str]: if self.document is None: raise ValueError("Cannot get paragraphs without a document") return self.document.page_content.split("\n\n") [docs]class ReActTextWorldAgent(ReActDocstoreAgent): """Agent for the ReAct TextWorld chain.""" [docs] @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Return default prompt.""" return TEXTWORLD_PROMPT @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: validate_tools_single_input(cls.__name__, tools) super()._validate_tools(tools) if len(tools) != 1: raise ValueError(f"Exactly one tool must be specified, but got {tools}") tool_names = {tool.name for tool in tools} if tool_names != {"Play"}: raise ValueError(f"Tool name should be Play, got {tool_names}") [docs]class ReActChain(AgentExecutor): """Chain that implements the ReAct paper. Example: .. code-block:: python from langchain import ReActChain, OpenAI react = ReAct(llm=OpenAI()) """ def __init__(self, llm: BaseLanguageModel, docstore: Docstore, **kwargs: Any): """Initialize with the LLM and a docstore.""" docstore_explorer = DocstoreExplorer(docstore) tools = [ Tool( name="Search", func=docstore_explorer.search, description="Search for a term in the docstore.", ), Tool( name="Lookup", func=docstore_explorer.lookup, description="Lookup a term in the docstore.", ), ] agent = ReActDocstoreAgent.from_llm_and_tools(llm, tools) super().__init__(agent=agent, tools=tools, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
70479637-8dd0-494c-996b-e2d9f08e89e8
Source code for langchain.agents.self_ask_with_search.base """Chain that does self ask with search.""" from typing import Any, Sequence, Union from pydantic import Field from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.self_ask_with_search.output_parser import SelfAskOutputParser from langchain.agents.self_ask_with_search.prompt import PROMPT from langchain.agents.tools import Tool from langchain.agents.utils import validate_tools_single_input from langchain.base_language import BaseLanguageModel from langchain.prompts.base import BasePromptTemplate from langchain.tools.base import BaseTool from langchain.utilities.google_serper import GoogleSerperAPIWrapper from langchain.utilities.serpapi import SerpAPIWrapper class SelfAskWithSearchAgent(Agent): """Agent for the self-ask-with-search paper.""" output_parser: AgentOutputParser = Field(default_factory=SelfAskOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return SelfAskOutputParser() @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.SELF_ASK_WITH_SEARCH @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Prompt does not depend on tools.""" return PROMPT @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: validate_tools_single_input(cls.__name__, tools) super()._validate_tools(tools) if len(tools) != 1: raise ValueError(f"Exactly one tool must be specified, but got {tools}") tool_names = {tool.name for tool in tools} if tool_names != {"Intermediate Answer"}: raise ValueError( f"Tool name should be Intermediate Answer, got {tool_names}" ) @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Intermediate answer: " @property def llm_prefix(self) -> str: """Prefix to append the LLM call with.""" return "" [docs]class SelfAskWithSearchChain(AgentExecutor): """Chain that does self ask with search. Example: .. code-block:: python from langchain import SelfAskWithSearchChain, OpenAI, GoogleSerperAPIWrapper search_chain = GoogleSerperAPIWrapper() self_ask = SelfAskWithSearchChain(llm=OpenAI(), search_chain=search_chain) """ def __init__( self, llm: BaseLanguageModel, search_chain: Union[GoogleSerperAPIWrapper, SerpAPIWrapper], **kwargs: Any, ): """Initialize with just an LLM and a search chain.""" search_tool = Tool( name="Intermediate Answer", func=search_chain.run, coroutine=search_chain.arun, description="Search", ) agent = SelfAskWithSearchAgent.from_llm_and_tools(llm, [search_tool]) super().__init__(agent=agent, tools=[search_tool], **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/self_ask_with_search/base.html
8ac54ebe-55e3-4dbb-a672-5df44012403b
Source code for langchain.agents.conversational.base """An agent designed to hold a conversation in addition to using tools.""" from __future__ import annotations from typing import Any, List, Optional, Sequence from pydantic import Field from langchain.agents.agent import Agent, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.conversational.output_parser import ConvoOutputParser from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.utils import validate_tools_single_input from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.tools.base import BaseTool [docs]class ConversationalAgent(Agent): """An agent designed to hold a conversation in addition to using tools.""" ai_prefix: str = "AI" output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser) @classmethod def _get_default_output_parser( cls, ai_prefix: str = "AI", **kwargs: Any ) -> AgentOutputParser: return ConvoOutputParser(ai_prefix=ai_prefix) @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.CONVERSATIONAL_REACT_DESCRIPTION @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, ai_prefix: str = "AI", human_prefix: str = "Human", input_variables: Optional[List[str]] = None, ) -> PromptTemplate: """Create prompt in the style of the zero shot agent. Args: tools: List of tools the agent will have access to, used to format the prompt. prefix: String to put before the list of tools. suffix: String to put after the list of tools. ai_prefix: String to use before AI output. human_prefix: String to use before human output. input_variables: List of input variables the final prompt will expect. Returns: A PromptTemplate with the template assembled from the pieces here. """ tool_strings = "\n".join( [f"> {tool.name}: {tool.description}" for tool in tools] ) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format( tool_names=tool_names, ai_prefix=ai_prefix, human_prefix=human_prefix ) template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) if input_variables is None: input_variables = ["input", "chat_history", "agent_scratchpad"] return PromptTemplate(template=template, input_variables=input_variables) @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: super()._validate_tools(tools) validate_tools_single_input(cls.__name__, tools) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, ai_prefix: str = "AI", human_prefix: str = "Human", input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, ai_prefix=ai_prefix, human_prefix=human_prefix, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser( ai_prefix=ai_prefix ) return cls( llm_chain=llm_chain, allowed_tools=tool_names, ai_prefix=ai_prefix, output_parser=_output_parser, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
3f92002d-98c6-4f1a-87bf-bf812633f187
Source code for langchain.chains.loading """Functionality for loading chains.""" import json from pathlib import Path from typing import Any, Union import yaml from langchain.chains.api.base import APIChain from langchain.chains.base import Chain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.graph_qa.cypher import GraphCypherQAChain from langchain.chains.hyde.base import HypotheticalDocumentEmbedder from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.base import LLMBashChain from langchain.chains.llm_checker.base import LLMCheckerChain from langchain.chains.llm_math.base import LLMMathChain from langchain.chains.llm_requests import LLMRequestsChain from langchain.chains.pal.base import PALChain from langchain.chains.qa_with_sources.base import QAWithSourcesChain from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain from langchain.chains.retrieval_qa.base import RetrievalQA, VectorDBQA from langchain.chains.sql_database.base import SQLDatabaseChain from langchain.llms.loading import load_llm, load_llm_from_config from langchain.prompts.loading import ( _load_output_parser, load_prompt, load_prompt_from_config, ) from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/" def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain: """Load LLM chain from config dict.""" if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") _load_output_parser(config) return LLMChain(llm=llm, prompt=prompt, **config) def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder: """Load hypothetical document embedder chain from config dict.""" if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "embeddings" in kwargs: embeddings = kwargs.pop("embeddings") else: raise ValueError("`embeddings` must be present.") return HypotheticalDocumentEmbedder( llm_chain=llm_chain, base_embeddings=embeddings, **config ) def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) else: raise ValueError( "One of `document_prompt` or `document_prompt_path` must be present." ) return StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, **config ) def _load_map_reduce_documents_chain( config: dict, **kwargs: Any ) -> MapReduceDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "combine_document_chain" in config: combine_document_chain_config = config.pop("combine_document_chain") combine_document_chain = load_chain_from_config(combine_document_chain_config) elif "combine_document_chain_path" in config: combine_document_chain = load_chain(config.pop("combine_document_chain_path")) else: raise ValueError( "One of `combine_document_chain` or " "`combine_document_chain_path` must be present." ) if "collapse_document_chain" in config: collapse_document_chain_config = config.pop("collapse_document_chain") if collapse_document_chain_config is None: collapse_document_chain = None else: collapse_document_chain = load_chain_from_config( collapse_document_chain_config ) elif "collapse_document_chain_path" in config: collapse_document_chain = load_chain(config.pop("collapse_document_chain_path")) return MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=combine_document_chain, collapse_document_chain=collapse_document_chain, **config, ) def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain: llm_chain = None if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) # llm attribute is deprecated in favor of llm_chain, here to support old configs elif "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) # llm_path attribute is deprecated in favor of llm_chain_path, # its to support old configs elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) if llm_chain: return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config) else: return LLMBashChain(llm=llm, prompt=prompt, **config) def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "create_draft_answer_prompt" in config: create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt") create_draft_answer_prompt = load_prompt_from_config( create_draft_answer_prompt_config ) elif "create_draft_answer_prompt_path" in config: create_draft_answer_prompt = load_prompt( config.pop("create_draft_answer_prompt_path") ) if "list_assertions_prompt" in config: list_assertions_prompt_config = config.pop("list_assertions_prompt") list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config) elif "list_assertions_prompt_path" in config: list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path")) if "check_assertions_prompt" in config: check_assertions_prompt_config = config.pop("check_assertions_prompt") check_assertions_prompt = load_prompt_from_config( check_assertions_prompt_config ) elif "check_assertions_prompt_path" in config: check_assertions_prompt = load_prompt( config.pop("check_assertions_prompt_path") ) if "revised_answer_prompt" in config: revised_answer_prompt_config = config.pop("revised_answer_prompt") revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config) elif "revised_answer_prompt_path" in config: revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path")) return LLMCheckerChain( llm=llm, create_draft_answer_prompt=create_draft_answer_prompt, list_assertions_prompt=list_assertions_prompt, check_assertions_prompt=check_assertions_prompt, revised_answer_prompt=revised_answer_prompt, **config, ) def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain: llm_chain = None if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) # llm attribute is deprecated in favor of llm_chain, here to support old configs elif "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) # llm_path attribute is deprecated in favor of llm_chain_path, # its to support old configs elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) if llm_chain: return LLMMathChain(llm_chain=llm_chain, prompt=prompt, **config) else: return LLMMathChain(llm=llm, prompt=prompt, **config) def _load_map_rerank_documents_chain( config: dict, **kwargs: Any ) -> MapRerankDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") return MapRerankDocumentsChain(llm_chain=llm_chain, **config) def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain: llm_chain = None if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) # llm attribute is deprecated in favor of llm_chain, here to support old configs elif "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) # llm_path attribute is deprecated in favor of llm_chain_path, # its to support old configs elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") if llm_chain: return PALChain(llm_chain=llm_chain, prompt=prompt, **config) else: return PALChain(llm=llm, prompt=prompt, **config) def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain: if "initial_llm_chain" in config: initial_llm_chain_config = config.pop("initial_llm_chain") initial_llm_chain = load_chain_from_config(initial_llm_chain_config) elif "initial_llm_chain_path" in config: initial_llm_chain = load_chain(config.pop("initial_llm_chain_path")) else: raise ValueError( "One of `initial_llm_chain` or `initial_llm_chain_config` must be present." ) if "refine_llm_chain" in config: refine_llm_chain_config = config.pop("refine_llm_chain") refine_llm_chain = load_chain_from_config(refine_llm_chain_config) elif "refine_llm_chain_path" in config: refine_llm_chain = load_chain(config.pop("refine_llm_chain_path")) else: raise ValueError( "One of `refine_llm_chain` or `refine_llm_chain_config` must be present." ) if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) return RefineDocumentsChain( initial_llm_chain=initial_llm_chain, refine_llm_chain=refine_llm_chain, document_prompt=document_prompt, **config, ) def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain: if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config) def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain: if "database" in kwargs: database = kwargs.pop("database") else: raise ValueError("`database` must be present.") if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) else: prompt = None return SQLDatabaseChain.from_llm(llm, database, prompt=prompt, **config) def _load_vector_db_qa_with_sources_chain( config: dict, **kwargs: Any ) -> VectorDBQAWithSourcesChain: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQAWithSourcesChain( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_retrieval_qa(config: dict, **kwargs: Any) -> RetrievalQA: if "retriever" in kwargs: retriever = kwargs.pop("retriever") else: raise ValueError("`retriever` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return RetrievalQA( combine_documents_chain=combine_documents_chain, retriever=retriever, **config, ) def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQA( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_graph_cypher_chain(config: dict, **kwargs: Any) -> GraphCypherQAChain: if "graph" in kwargs: graph = kwargs.pop("graph") else: raise ValueError("`graph` must be present.") if "cypher_generation_chain" in config: cypher_generation_chain_config = config.pop("cypher_generation_chain") cypher_generation_chain = load_chain_from_config(cypher_generation_chain_config) else: raise ValueError("`cypher_generation_chain` must be present.") if "qa_chain" in config: qa_chain_config = config.pop("qa_chain") qa_chain = load_chain_from_config(qa_chain_config) else: raise ValueError("`qa_chain` must be present.") return GraphCypherQAChain( graph=graph, cypher_generation_chain=cypher_generation_chain, qa_chain=qa_chain, **config, ) def _load_api_chain(config: dict, **kwargs: Any) -> APIChain: if "api_request_chain" in config: api_request_chain_config = config.pop("api_request_chain") api_request_chain = load_chain_from_config(api_request_chain_config) elif "api_request_chain_path" in config: api_request_chain = load_chain(config.pop("api_request_chain_path")) else: raise ValueError( "One of `api_request_chain` or `api_request_chain_path` must be present." ) if "api_answer_chain" in config: api_answer_chain_config = config.pop("api_answer_chain") api_answer_chain = load_chain_from_config(api_answer_chain_config) elif "api_answer_chain_path" in config: api_answer_chain = load_chain(config.pop("api_answer_chain_path")) else: raise ValueError( "One of `api_answer_chain` or `api_answer_chain_path` must be present." ) if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") else: raise ValueError("`requests_wrapper` must be present.") return APIChain( api_request_chain=api_request_chain, api_answer_chain=api_answer_chain, requests_wrapper=requests_wrapper, **config, ) def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") return LLMRequestsChain( llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config ) else: return LLMRequestsChain(llm_chain=llm_chain, **config) type_to_loader_dict = { "api_chain": _load_api_chain, "hyde_chain": _load_hyde_chain, "llm_chain": _load_llm_chain, "llm_bash_chain": _load_llm_bash_chain, "llm_checker_chain": _load_llm_checker_chain, "llm_math_chain": _load_llm_math_chain, "llm_requests_chain": _load_llm_requests_chain, "pal_chain": _load_pal_chain, "qa_with_sources_chain": _load_qa_with_sources_chain, "stuff_documents_chain": _load_stuff_documents_chain, "map_reduce_documents_chain": _load_map_reduce_documents_chain, "map_rerank_documents_chain": _load_map_rerank_documents_chain, "refine_documents_chain": _load_refine_documents_chain, "sql_database_chain": _load_sql_database_chain, "vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain, "vector_db_qa": _load_vector_db_qa, "retrieval_qa": _load_retrieval_qa, "graph_cypher_chain": _load_graph_cypher_chain, } def load_chain_from_config(config: dict, **kwargs: Any) -> Chain: """Load chain from Config Dict.""" if "_type" not in config: raise ValueError("Must specify a chain Type in config") config_type = config.pop("_type") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} chain not supported") chain_loader = type_to_loader_dict[config_type] return chain_loader(config, **kwargs) [docs]def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain: """Unified method for loading a chain from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs ): return hub_result else: return _load_chain_from_file(path, **kwargs) def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain: """Load chain from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Override default 'verbose' and 'memory' for the chain if "verbose" in kwargs: config["verbose"] = kwargs.pop("verbose") if "memory" in kwargs: config["memory"] = kwargs.pop("memory") # Load the chain from the config now. return load_chain_from_config(config, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
35afffd0-a42a-42ee-ac6f-92b5491183fb
Source code for langchain.chains.llm """Chain that just formats a prompt and calls an LLM.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional, Sequence, Tuple, Union from pydantic import Extra, Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks, ) from langchain.chains.base import Chain from langchain.input import get_colored_text from langchain.load.dump import dumpd from langchain.prompts.base import BasePromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( BaseLLMOutputParser, LLMResult, NoOpOutputParser, PromptValue, ) [docs]class LLMChain(Chain): """Chain to run queries against LLMs. Example: .. code-block:: python from langchain import LLMChain, OpenAI, PromptTemplate prompt_template = "Tell me a {adjective} joke" prompt = PromptTemplate( input_variables=["adjective"], template=prompt_template ) llm = LLMChain(llm=OpenAI(), prompt=prompt) """ @property def lc_serializable(self) -> bool: return True prompt: BasePromptTemplate """Prompt object to use.""" llm: BaseLanguageModel """Language model to call.""" output_key: str = "text" #: :meta private: output_parser: BaseLLMOutputParser = Field(default_factory=NoOpOutputParser) """Output parser to use. Defaults to one that takes the most likely string but does not change it otherwise.""" return_final_only: bool = True """Whether to return only the final parsed result. Defaults to True. If false, will return a bunch of extra information about the generation.""" llm_kwargs: dict = Field(default_factory=dict) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the prompt expects. :meta private: """ return self.prompt.input_variables @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ if self.return_final_only: return [self.output_key] else: return [self.output_key, "full_generation"] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: response = self.generate([inputs], run_manager=run_manager) return self.create_outputs(response)[0] [docs] def generate( self, input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> LLMResult: """Generate LLM result from inputs.""" prompts, stop = self.prep_prompts(input_list, run_manager=run_manager) return self.llm.generate_prompt( prompts, stop, callbacks=run_manager.get_child() if run_manager else None, **self.llm_kwargs, ) [docs] async def agenerate( self, input_list: List[Dict[str, Any]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> LLMResult: """Generate LLM result from inputs.""" prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager) return await self.llm.agenerate_prompt( prompts, stop, callbacks=run_manager.get_child() if run_manager else None, **self.llm_kwargs, ) [docs] def prep_prompts( self, input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Tuple[List[PromptValue], Optional[List[str]]]: """Prepare prompts from inputs.""" stop = None if "stop" in input_list[0]: stop = input_list[0]["stop"] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inputs) _colored_text = get_colored_text(prompt.to_string(), "green") _text = "Prompt after formatting:\n" + _colored_text if run_manager: run_manager.on_text(_text, end="\n", verbose=self.verbose) if "stop" in inputs and inputs["stop"] != stop: raise ValueError( "If `stop` is present in any inputs, should be present in all." ) prompts.append(prompt) return prompts, stop [docs] async def aprep_prompts( self, input_list: List[Dict[str, Any]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Tuple[List[PromptValue], Optional[List[str]]]: """Prepare prompts from inputs.""" stop = None if "stop" in input_list[0]: stop = input_list[0]["stop"] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inputs) _colored_text = get_colored_text(prompt.to_string(), "green") _text = "Prompt after formatting:\n" + _colored_text if run_manager: await run_manager.on_text(_text, end="\n", verbose=self.verbose) if "stop" in inputs and inputs["stop"] != stop: raise ValueError( "If `stop` is present in any inputs, should be present in all." ) prompts.append(prompt) return prompts, stop [docs] def apply( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> List[Dict[str, str]]: """Utilize the LLM generate method for speed gains.""" callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) run_manager = callback_manager.on_chain_start( dumpd(self), {"input_list": input_list}, ) try: response = self.generate(input_list, run_manager=run_manager) except (KeyboardInterrupt, Exception) as e: run_manager.on_chain_error(e) raise e outputs = self.create_outputs(response) run_manager.on_chain_end({"outputs": outputs}) return outputs [docs] async def aapply( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> List[Dict[str, str]]: """Utilize the LLM generate method for speed gains.""" callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) run_manager = await callback_manager.on_chain_start( dumpd(self), {"input_list": input_list}, ) try: response = await self.agenerate(input_list, run_manager=run_manager) except (KeyboardInterrupt, Exception) as e: await run_manager.on_chain_error(e) raise e outputs = self.create_outputs(response) await run_manager.on_chain_end({"outputs": outputs}) return outputs @property def _run_output_key(self) -> str: return self.output_key [docs] def create_outputs(self, llm_result: LLMResult) -> List[Dict[str, Any]]: """Create outputs from response.""" result = [ # Get the text of the top generated string. { self.output_key: self.output_parser.parse_result(generation), "full_generation": generation, } for generation in llm_result.generations ] if self.return_final_only: result = [{self.output_key: r[self.output_key]} for r in result] return result async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: response = await self.agenerate([inputs], run_manager=run_manager) return self.create_outputs(response)[0] [docs] def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str: """Format prompt with kwargs and pass to LLM. Args: callbacks: Callbacks to pass to LLMChain **kwargs: Keys to pass to prompt template. Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """ return self(kwargs, callbacks=callbacks)[self.output_key] [docs] async def apredict(self, callbacks: Callbacks = None, **kwargs: Any) -> str: """Format prompt with kwargs and pass to LLM. Args: callbacks: Callbacks to pass to LLMChain **kwargs: Keys to pass to prompt template. Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """ return (await self.acall(kwargs, callbacks=callbacks))[self.output_key] [docs] def predict_and_parse( self, callbacks: Callbacks = None, **kwargs: Any ) -> Union[str, List[str], Dict[str, Any]]: """Call predict and then parse the results.""" warnings.warn( "The predict_and_parse method is deprecated, " "instead pass an output parser directly to LLMChain." ) result = self.predict(callbacks=callbacks, **kwargs) if self.prompt.output_parser is not None: return self.prompt.output_parser.parse(result) else: return result [docs] async def apredict_and_parse( self, callbacks: Callbacks = None, **kwargs: Any ) -> Union[str, List[str], Dict[str, str]]: """Call apredict and then parse the results.""" warnings.warn( "The apredict_and_parse method is deprecated, " "instead pass an output parser directly to LLMChain." ) result = await self.apredict(callbacks=callbacks, **kwargs) if self.prompt.output_parser is not None: return self.prompt.output_parser.parse(result) else: return result [docs] def apply_and_parse( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> Sequence[Union[str, List[str], Dict[str, str]]]: """Call apply and then parse the results.""" warnings.warn( "The apply_and_parse method is deprecated, " "instead pass an output parser directly to LLMChain." ) result = self.apply(input_list, callbacks=callbacks) return self._parse_generation(result) def _parse_generation( self, generation: List[Dict[str, str]] ) -> Sequence[Union[str, List[str], Dict[str, str]]]: if self.prompt.output_parser is not None: return [ self.prompt.output_parser.parse(res[self.output_key]) for res in generation ] else: return generation [docs] async def aapply_and_parse( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> Sequence[Union[str, List[str], Dict[str, str]]]: """Call apply and then parse the results.""" warnings.warn( "The aapply_and_parse method is deprecated, " "instead pass an output parser directly to LLMChain." ) result = await self.aapply(input_list, callbacks=callbacks) return self._parse_generation(result) @property def _chain_type(self) -> str: return "llm_chain" [docs] @classmethod def from_string(cls, llm: BaseLanguageModel, template: str) -> LLMChain: """Create LLMChain from LLM and template.""" prompt_template = PromptTemplate.from_template(template) return cls(llm=llm, prompt=prompt_template)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm.html
98123f88-6ba0-4e8b-8788-535a5f24213d
Source code for langchain.chains.moderation """Pass input through a moderation endpoint.""" from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.utils import get_from_dict_or_env [docs]class OpenAIModerationChain(Chain): """Pass input through a moderation endpoint. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.chains import OpenAIModerationChain moderation = OpenAIModerationChain() """ client: Any #: :meta private: model_name: Optional[str] = None """Moderation model name to use.""" error: bool = False """Whether or not to error if bad content was found.""" input_key: str = "input" #: :meta private: output_key: str = "output" #: :meta private: openai_api_key: Optional[str] = None openai_organization: Optional[str] = None @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization values["client"] = openai.Moderation except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _moderate(self, text: str, results: dict) -> str: if results["flagged"]: error_str = "Text was found that violates OpenAI's content policy." if self.error: raise ValueError(error_str) else: return error_str return text def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: text = inputs[self.input_key] results = self.client.create(text) output = self._moderate(text, results["results"][0]) return {self.output_key: output}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/moderation.html
8ac39ea1-1c45-4fb3-a96f-93470fcd4087
Source code for langchain.chains.transform """Chain that runs an arbitrary python function.""" from typing import Callable, Dict, List, Optional from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain [docs]class TransformChain(Chain): """Chain transform chain output. Example: .. code-block:: python from langchain import TransformChain transform_chain = TransformChain(input_variables=["text"], output_variables["entities"], transform=func()) """ input_variables: List[str] output_variables: List[str] transform: Callable[[Dict[str, str]], Dict[str, str]] @property def input_keys(self) -> List[str]: """Expect input keys. :meta private: """ return self.input_variables @property def output_keys(self) -> List[str]: """Return output keys. :meta private: """ return self.output_variables def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: return self.transform(inputs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/transform.html
802fc638-0fb2-410b-b35e-b83a6e540a8d
Source code for langchain.chains.sequential """Chain pipeline where the outputs of one step feed directly into next.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.input import get_color_mapping [docs]class SequentialChain(Chain): """Chain where the outputs of one chain feed directly into next.""" chains: List[Chain] input_variables: List[str] output_variables: List[str] #: :meta private: return_all: bool = False class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Return expected input keys to the chain. :meta private: """ return self.input_variables @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return self.output_variables @root_validator(pre=True) def validate_chains(cls, values: Dict) -> Dict: """Validate that the correct inputs exist for all chains.""" chains = values["chains"] input_variables = values["input_variables"] memory_keys = list() if "memory" in values and values["memory"] is not None: """Validate that prompt input variables are consistent.""" memory_keys = values["memory"].memory_variables if set(input_variables).intersection(set(memory_keys)): overlapping_keys = set(input_variables) & set(memory_keys) raise ValueError( f"The the input key(s) {''.join(overlapping_keys)} are found " f"in the Memory keys ({memory_keys}) - please use input and " f"memory keys that don't overlap." ) known_variables = set(input_variables + memory_keys) for chain in chains: missing_vars = set(chain.input_keys).difference(known_variables) if missing_vars: raise ValueError( f"Missing required input keys: {missing_vars}, " f"only had {known_variables}" ) overlapping_keys = known_variables.intersection(chain.output_keys) if overlapping_keys: raise ValueError( f"Chain returned keys that already exist: {overlapping_keys}" ) known_variables |= set(chain.output_keys) if "output_variables" not in values: if values.get("return_all", False): output_keys = known_variables.difference(input_variables) else: output_keys = chains[-1].output_keys values["output_variables"] = output_keys else: missing_vars = set(values["output_variables"]).difference(known_variables) if missing_vars: raise ValueError( f"Expected output variables that were not found: {missing_vars}." ) return values def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: known_values = inputs.copy() _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() for i, chain in enumerate(self.chains): callbacks = _run_manager.get_child() outputs = chain(known_values, return_only_outputs=True, callbacks=callbacks) known_values.update(outputs) return {k: known_values[k] for k in self.output_variables} async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: known_values = inputs.copy() _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() for i, chain in enumerate(self.chains): outputs = await chain.acall( known_values, return_only_outputs=True, callbacks=callbacks ) known_values.update(outputs) return {k: known_values[k] for k in self.output_variables} [docs]class SimpleSequentialChain(Chain): """Simple chain where the outputs of one step feed directly into next.""" chains: List[Chain] strip_outputs: bool = False input_key: str = "input" #: :meta private: output_key: str = "output" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] @root_validator() def validate_chains(cls, values: Dict) -> Dict: """Validate that chains are all single input/output.""" for chain in values["chains"]: if len(chain.input_keys) != 1: raise ValueError( "Chains used in SimplePipeline should all have one input, got " f"{chain} with {len(chain.input_keys)} inputs." ) if len(chain.output_keys) != 1: raise ValueError( "Chains used in SimplePipeline should all have one output, got " f"{chain} with {len(chain.output_keys)} outputs." ) return values def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _input = inputs[self.input_key] color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))]) for i, chain in enumerate(self.chains): _input = chain.run(_input, callbacks=_run_manager.get_child(f"step_{i+1}")) if self.strip_outputs: _input = _input.strip() _run_manager.on_text( _input, color=color_mapping[str(i)], end="\n", verbose=self.verbose ) return {self.output_key: _input} async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() _input = inputs[self.input_key] color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))]) for i, chain in enumerate(self.chains): _input = await chain.arun(_input, callbacks=callbacks) if self.strip_outputs: _input = _input.strip() await _run_manager.on_text( _input, color=color_mapping[str(i)], end="\n", verbose=self.verbose ) return {self.output_key: _input}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sequential.html
5b5f9db4-1384-40f4-83b9-d1d0bd40b96c
Source code for langchain.chains.llm_requests """Chain that hits a URL and then uses an LLM to parse results.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains import LLMChain from langchain.chains.base import Chain from langchain.requests import TextRequestsWrapper DEFAULT_HEADERS = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" # noqa: E501 } [docs]class LLMRequestsChain(Chain): """Chain that hits a URL and then uses an LLM to parse results.""" llm_chain: LLMChain requests_wrapper: TextRequestsWrapper = Field( default_factory=lambda: TextRequestsWrapper(headers=DEFAULT_HEADERS), exclude=True, ) text_length: int = 8000 requests_key: str = "requests_result" #: :meta private: input_key: str = "url" #: :meta private: output_key: str = "output" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the prompt expects. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ return [self.output_key] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ValueError( "Could not import bs4 python package. " "Please install it with `pip install bs4`." ) return values def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: from bs4 import BeautifulSoup _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() # Other keys are assumed to be needed for LLM prediction other_keys = {k: v for k, v in inputs.items() if k != self.input_key} url = inputs[self.input_key] res = self.requests_wrapper.get(url) # extract the text from the html soup = BeautifulSoup(res, "html.parser") other_keys[self.requests_key] = soup.get_text()[: self.text_length] result = self.llm_chain.predict( callbacks=_run_manager.get_child(), **other_keys ) return {self.output_key: result} @property def _chain_type(self) -> str: return "llm_requests_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_requests.html
a97ada2d-e6d4-4be2-b1d2-c6b6b7a51a94
Source code for langchain.chains.mapreduce """Map-reduce chain. Splits up a document, sends the smaller parts to the LLM with one prompt, then combines the results with another one. """ from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.prompts.base import BasePromptTemplate from langchain.text_splitter import TextSplitter [docs]class MapReduceChain(Chain): """Map-reduce chain.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" text_splitter: TextSplitter """Text splitter to use.""" input_key: str = "input_text" #: :meta private: output_key: str = "output_text" #: :meta private: [docs] @classmethod def from_params( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, text_splitter: TextSplitter, callbacks: Callbacks = None, combine_chain_kwargs: Optional[Mapping[str, Any]] = None, reduce_chain_kwargs: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> MapReduceChain: """Construct a map-reduce chain that uses the chain for map and reduce.""" llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks) reduce_chain = StuffDocumentsChain( llm_chain=llm_chain, callbacks=callbacks, **(reduce_chain_kwargs if reduce_chain_kwargs else {}), ) combine_documents_chain = MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=reduce_chain, callbacks=callbacks, **(combine_chain_kwargs if combine_chain_kwargs else {}), ) return cls( combine_documents_chain=combine_documents_chain, text_splitter=text_splitter, callbacks=callbacks, **kwargs, ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() # Split the larger text into smaller chunks. doc_text = inputs.pop(self.input_key) texts = self.text_splitter.split_text(doc_text) docs = [Document(page_content=text) for text in texts] _inputs: Dict[str, Any] = { **inputs, self.combine_documents_chain.input_key: docs, } outputs = self.combine_documents_chain.run( _inputs, callbacks=_run_manager.get_child() ) return {self.output_key: outputs}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/mapreduce.html
efa491c5-1dc2-45ff-9713-cbc0d548e516
Source code for langchain.chains.router.multi_retrieval_qa """Use a single chain to route an input to one of multiple retrieval qa chains.""" from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain.base_language import BaseLanguageModel from langchain.chains import ConversationChain from langchain.chains.base import Chain from langchain.chains.conversation.prompt import DEFAULT_TEMPLATE from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA from langchain.chains.router.base import MultiRouteChain from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser from langchain.chains.router.multi_retrieval_prompt import ( MULTI_RETRIEVAL_ROUTER_TEMPLATE, ) from langchain.chat_models import ChatOpenAI from langchain.prompts import PromptTemplate from langchain.schema import BaseRetriever [docs]class MultiRetrievalQAChain(MultiRouteChain): """A multi-route chain that uses an LLM router chain to choose amongst retrieval qa chains.""" router_chain: LLMRouterChain """Chain for deciding a destination chain and the input to it.""" destination_chains: Mapping[str, BaseRetrievalQA] """Map of name to candidate chains that inputs can be routed to.""" default_chain: Chain """Default chain to use when router doesn't map input to one of the destinations.""" @property def output_keys(self) -> List[str]: return ["result"] [docs] @classmethod def from_retrievers( cls, llm: BaseLanguageModel, retriever_infos: List[Dict[str, Any]], default_retriever: Optional[BaseRetriever] = None, default_prompt: Optional[PromptTemplate] = None, default_chain: Optional[Chain] = None, **kwargs: Any, ) -> MultiRetrievalQAChain: if default_prompt and not default_retriever: raise ValueError( "`default_retriever` must be specified if `default_prompt` is " "provided. Received only `default_prompt`." ) destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos] destinations_str = "\n".join(destinations) router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format( destinations=destinations_str ) router_prompt = PromptTemplate( template=router_template, input_variables=["input"], output_parser=RouterOutputParser(next_inputs_inner_key="query"), ) router_chain = LLMRouterChain.from_llm(llm, router_prompt) destination_chains = {} for r_info in retriever_infos: prompt = r_info.get("prompt") retriever = r_info["retriever"] chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever) name = r_info["name"] destination_chains[name] = chain if default_chain: _default_chain = default_chain elif default_retriever: _default_chain = RetrievalQA.from_llm( llm, prompt=default_prompt, retriever=default_retriever ) else: prompt_template = DEFAULT_TEMPLATE.replace("input", "query") prompt = PromptTemplate( template=prompt_template, input_variables=["history", "query"] ) _default_chain = ConversationChain( llm=ChatOpenAI(), prompt=prompt, input_key="query", output_key="result" ) return cls( router_chain=router_chain, destination_chains=destination_chains, default_chain=_default_chain, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_retrieval_qa.html
c13cde83-0c27-455f-b8b0-6c65d2febef6
Source code for langchain.chains.router.base """Base classes for chain routing.""" from __future__ import annotations from abc import ABC from typing import Any, Dict, List, Mapping, NamedTuple, Optional from pydantic import Extra from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, Callbacks, ) from langchain.chains.base import Chain class Route(NamedTuple): destination: Optional[str] next_inputs: Dict[str, Any] [docs]class RouterChain(Chain, ABC): """Chain that outputs the name of a destination chain and the inputs to it.""" @property def output_keys(self) -> List[str]: return ["destination", "next_inputs"] [docs] def route(self, inputs: Dict[str, Any], callbacks: Callbacks = None) -> Route: result = self(inputs, callbacks=callbacks) return Route(result["destination"], result["next_inputs"]) [docs] async def aroute( self, inputs: Dict[str, Any], callbacks: Callbacks = None ) -> Route: result = await self.acall(inputs, callbacks=callbacks) return Route(result["destination"], result["next_inputs"]) [docs]class MultiRouteChain(Chain): """Use a single chain to route an input to one of multiple candidate chains.""" router_chain: RouterChain """Chain that routes inputs to destination chains.""" destination_chains: Mapping[str, Chain] """Chains that return final answer to inputs.""" default_chain: Chain """Default chain to use when none of the destination chains are suitable.""" silent_errors: bool = False """If True, use default_chain when an invalid destination name is provided. Defaults to False.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the router chain prompt expects. :meta private: """ return self.router_chain.input_keys @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ return [] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() route = self.router_chain.route(inputs, callbacks=callbacks) _run_manager.on_text( str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose ) if not route.destination: return self.default_chain(route.next_inputs, callbacks=callbacks) elif route.destination in self.destination_chains: return self.destination_chains[route.destination]( route.next_inputs, callbacks=callbacks ) elif self.silent_errors: return self.default_chain(route.next_inputs, callbacks=callbacks) else: raise ValueError( f"Received invalid destination chain name '{route.destination}'" ) async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() route = await self.router_chain.aroute(inputs, callbacks=callbacks) _run_manager.on_text( str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose ) if not route.destination: return await self.default_chain.acall( route.next_inputs, callbacks=callbacks ) elif route.destination in self.destination_chains: return await self.destination_chains[route.destination].acall( route.next_inputs, callbacks=callbacks ) elif self.silent_errors: return await self.default_chain.acall( route.next_inputs, callbacks=callbacks ) else: raise ValueError( f"Received invalid destination chain name '{route.destination}'" )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/base.html
4c05c00c-7c71-4cb8-8297-d9b361d12c2c
Source code for langchain.chains.router.multi_prompt """Use a single chain to route an input to one of multiple llm chains.""" from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain.base_language import BaseLanguageModel from langchain.chains import ConversationChain from langchain.chains.llm import LLMChain from langchain.chains.router.base import MultiRouteChain, RouterChain from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE from langchain.prompts import PromptTemplate [docs]class MultiPromptChain(MultiRouteChain): """A multi-route chain that uses an LLM router chain to choose amongst prompts.""" router_chain: RouterChain """Chain for deciding a destination chain and the input to it.""" destination_chains: Mapping[str, LLMChain] """Map of name to candidate chains that inputs can be routed to.""" default_chain: LLMChain """Default chain to use when router doesn't map input to one of the destinations.""" @property def output_keys(self) -> List[str]: return ["text"] [docs] @classmethod def from_prompts( cls, llm: BaseLanguageModel, prompt_infos: List[Dict[str, str]], default_chain: Optional[LLMChain] = None, **kwargs: Any, ) -> MultiPromptChain: """Convenience constructor for instantiating from destination prompts.""" destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos] destinations_str = "\n".join(destinations) router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format( destinations=destinations_str ) router_prompt = PromptTemplate( template=router_template, input_variables=["input"], output_parser=RouterOutputParser(), ) router_chain = LLMRouterChain.from_llm(llm, router_prompt) destination_chains = {} for p_info in prompt_infos: name = p_info["name"] prompt_template = p_info["prompt_template"] prompt = PromptTemplate(template=prompt_template, input_variables=["input"]) chain = LLMChain(llm=llm, prompt=prompt) destination_chains[name] = chain _default_chain = default_chain or ConversationChain(llm=llm, output_key="text") return cls( router_chain=router_chain, destination_chains=destination_chains, default_chain=_default_chain, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_prompt.html
0ad2eaeb-1073-4ed2-bde3-0cba089418d1
Source code for langchain.chains.router.llm_router """Base classes for LLM-powered router chains.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Type, cast from pydantic import root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains import LLMChain from langchain.chains.router.base import RouterChain from langchain.output_parsers.json import parse_and_check_json_markdown from langchain.prompts import BasePromptTemplate from langchain.schema import BaseOutputParser, OutputParserException [docs]class LLMRouterChain(RouterChain): """A router chain that uses an LLM chain to perform routing.""" llm_chain: LLMChain """LLM chain used to perform routing""" @root_validator() def validate_prompt(cls, values: dict) -> dict: prompt = values["llm_chain"].prompt if prompt.output_parser is None: raise ValueError( "LLMRouterChain requires base llm_chain prompt to have an output" " parser that converts LLM text output to a dictionary with keys" " 'destination' and 'next_inputs'. Received a prompt with no output" " parser." ) return values @property def input_keys(self) -> List[str]: """Will be whatever keys the LLM chain prompt expects. :meta private: """ return self.llm_chain.input_keys def _validate_outputs(self, outputs: Dict[str, Any]) -> None: super()._validate_outputs(outputs) if not isinstance(outputs["next_inputs"], dict): raise ValueError def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() output = cast( Dict[str, Any], self.llm_chain.predict_and_parse(callbacks=callbacks, **inputs), ) return output async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() output = cast( Dict[str, Any], await self.llm_chain.apredict_and_parse(callbacks=callbacks, **inputs), ) return output [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, **kwargs: Any ) -> LLMRouterChain: """Convenience constructor.""" llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs) class RouterOutputParser(BaseOutputParser[Dict[str, str]]): """Parser for output of router chain int he multi-prompt chain.""" default_destination: str = "DEFAULT" next_inputs_type: Type = str next_inputs_inner_key: str = "input" def parse(self, text: str) -> Dict[str, Any]: try: expected_keys = ["destination", "next_inputs"] parsed = parse_and_check_json_markdown(text, expected_keys) if not isinstance(parsed["destination"], str): raise ValueError("Expected 'destination' to be a string.") if not isinstance(parsed["next_inputs"], self.next_inputs_type): raise ValueError( f"Expected 'next_inputs' to be {self.next_inputs_type}." ) parsed["next_inputs"] = {self.next_inputs_inner_key: parsed["next_inputs"]} if ( parsed["destination"].strip().lower() == self.default_destination.lower() ): parsed["destination"] = None else: parsed["destination"] = parsed["destination"].strip() return parsed except Exception as e: raise OutputParserException( f"Parsing text\n{text}\n raised following error:\n{e}" )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/llm_router.html
2c6e1723-0320-45b5-8e15-df84faefdbb7
Source code for langchain.chains.natbot.base """Implement an LLM driven browser.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.natbot.prompt import PROMPT from langchain.llms.openai import OpenAI [docs]class NatBotChain(Chain): """Implement an LLM driven browser. Example: .. code-block:: python from langchain import NatBotChain natbot = NatBotChain.from_default("Buy me a new hat.") """ llm_chain: LLMChain objective: str """Objective that NatBot is tasked with completing.""" llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" input_url_key: str = "url" #: :meta private: input_browser_content_key: str = "browser_content" #: :meta private: previous_command: str = "" #: :meta private: output_key: str = "command" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an NatBotChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the from_llm " "class method." ) if "llm_chain" not in values and values["llm"] is not None: values["llm_chain"] = LLMChain(llm=values["llm"], prompt=PROMPT) return values [docs] @classmethod def from_default(cls, objective: str, **kwargs: Any) -> NatBotChain: """Load with default LLMChain.""" llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50) return cls.from_llm(llm, objective, **kwargs) [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, objective: str, **kwargs: Any ) -> NatBotChain: """Load from LLM.""" llm_chain = LLMChain(llm=llm, prompt=PROMPT) return cls(llm_chain=llm_chain, objective=objective, **kwargs) @property def input_keys(self) -> List[str]: """Expect url and browser content. :meta private: """ return [self.input_url_key, self.input_browser_content_key] @property def output_keys(self) -> List[str]: """Return command. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() url = inputs[self.input_url_key] browser_content = inputs[self.input_browser_content_key] llm_cmd = self.llm_chain.predict( objective=self.objective, url=url[:100], previous_command=self.previous_command, browser_content=browser_content[:4500], callbacks=_run_manager.get_child(), ) llm_cmd = llm_cmd.strip() self.previous_command = llm_cmd return {self.output_key: llm_cmd} [docs] def execute(self, url: str, browser_content: str) -> str: """Figure out next browser command to run. Args: url: URL of the site currently on. browser_content: Content of the page as currently displayed by the browser. Returns: Next browser command to run. Example: .. code-block:: python browser_content = "...." llm_command = natbot.run("www.google.com", browser_content) """ _inputs = { self.input_url_key: url, self.input_browser_content_key: browser_content, } return self(_inputs)[self.output_key] @property def _chain_type(self) -> str: return "nat_bot_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/base.html
b02cb80c-ffb1-40b8-a109-3a4ac54b643a
Source code for langchain.chains.graph_qa.base """Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ENTITY_EXTRACTION_PROMPT, PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.networkx_graph import NetworkxEntityGraph, get_entities from langchain.prompts.base import BasePromptTemplate [docs]class GraphQAChain(Chain): """Chain for question-answering against a graph.""" graph: NetworkxEntityGraph = Field(exclude=True) entity_extraction_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, qa_prompt: BasePromptTemplate = PROMPT, entity_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT, **kwargs: Any, ) -> GraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) entity_chain = LLMChain(llm=llm, prompt=entity_prompt) return cls( qa_chain=qa_chain, entity_extraction_chain=entity_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Extract entities, look up info and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] entity_string = self.entity_extraction_chain.run(question) _run_manager.on_text("Entities Extracted:", end="\n", verbose=self.verbose) _run_manager.on_text( entity_string, color="green", end="\n", verbose=self.verbose ) entities = get_entities(entity_string) context = "" for entity in entities: triplets = self.graph.get_entity_knowledge(entity) context += "\n".join(triplets) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text(context, color="green", end="\n", verbose=self.verbose) result = self.qa_chain( {"question": question, "context": context}, callbacks=_run_manager.get_child(), ) return {self.output_key: result[self.qa_chain.output_key]}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/base.html
361e42c3-d313-4701-aaaf-e8d537b5efec
Source code for langchain.chains.graph_qa.cypher """Question answering over a graph.""" from __future__ import annotations import re from typing import Any, Dict, List, Optional from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.neo4j_graph import Neo4jGraph from langchain.prompts.base import BasePromptTemplate INTERMEDIATE_STEPS_KEY = "intermediate_steps" def extract_cypher(text: str) -> str: """ Extract Cypher code from a text. Args: text: Text to extract Cypher code from. Returns: Cypher code extracted from the text. """ # The pattern to find Cypher code enclosed in triple backticks pattern = r"```(.*?)```" # Find all matches in the input text matches = re.findall(pattern, text, re.DOTALL) return matches[0] if matches else text [docs]class GraphCypherQAChain(Chain): """Chain for question-answering against a graph by generating Cypher statements.""" graph: Neo4jGraph = Field(exclude=True) cypher_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: top_k: int = 10 """Number of results to return from the query""" return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" return_direct: bool = False """Whether or not to return the result of querying the graph directly.""" @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys @property def _chain_type(self) -> str: return "graph_cypher_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, cypher_prompt: BasePromptTemplate = CYPHER_GENERATION_PROMPT, **kwargs: Any, ) -> GraphCypherQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt) return cls( qa_chain=qa_chain, cypher_generation_chain=cypher_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] intermediate_steps: List = [] generated_cypher = self.cypher_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) # Extract Cypher code if it is wrapped in backticks generated_cypher = extract_cypher(generated_cypher) _run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_cypher, color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"query": generated_cypher}) # Retrieve and limit the number of results context = self.graph.query(generated_cypher)[: self.top_k] if self.return_direct: final_result = context else: _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"context": context}) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) final_result = result[self.qa_chain.output_key] chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/cypher.html
a277bb58-8178-41cc-86d0-4acd256cc251
Source code for langchain.chains.graph_qa.nebulagraph """Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_QA_PROMPT, NGQL_GENERATION_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.nebula_graph import NebulaGraph from langchain.prompts.base import BasePromptTemplate [docs]class NebulaGraphQAChain(Chain): """Chain for question-answering against a graph by generating nGQL statements.""" graph: NebulaGraph = Field(exclude=True) ngql_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, ngql_prompt: BasePromptTemplate = NGQL_GENERATION_PROMPT, **kwargs: Any, ) -> NebulaGraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) ngql_generation_chain = LLMChain(llm=llm, prompt=ngql_prompt) return cls( qa_chain=qa_chain, ngql_generation_chain=ngql_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Generate nGQL statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_ngql = self.ngql_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) _run_manager.on_text("Generated nGQL:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_ngql, color="green", end="\n", verbose=self.verbose ) context = self.graph.query(generated_ngql) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) return {self.output_key: result[self.qa_chain.output_key]}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/nebulagraph.html
1c812f77-f67c-4ad0-a5ef-4c4814c5ddc1
Source code for langchain.chains.graph_qa.kuzu """Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_QA_PROMPT, KUZU_GENERATION_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.kuzu_graph import KuzuGraph from langchain.prompts.base import BasePromptTemplate [docs]class KuzuQAChain(Chain): """Chain for question-answering against a graph by generating Cypher statements for Kùzu. """ graph: KuzuGraph = Field(exclude=True) cypher_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, cypher_prompt: BasePromptTemplate = KUZU_GENERATION_PROMPT, **kwargs: Any, ) -> KuzuQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt) return cls( qa_chain=qa_chain, cypher_generation_chain=cypher_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_cypher = self.cypher_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) _run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_cypher, color="green", end="\n", verbose=self.verbose ) context = self.graph.query(generated_cypher) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) return {self.output_key: result[self.qa_chain.output_key]}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/kuzu.html
689a7d15-ab66-4a4f-a183-ac2a68289449
Source code for langchain.chains.llm_bash.base """Chain that interprets a prompt and executes bash code to perform bash operations.""" from __future__ import annotations import logging import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.prompt import PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.schema import OutputParserException from langchain.utilities.bash import BashProcess logger = logging.getLogger(__name__) [docs]class LLMBashChain(Chain): """Chain that interprets a prompt and executes bash code to perform bash operations. Example: .. code-block:: python from langchain import LLMBashChain, OpenAI llm_bash = LLMBashChain.from_llm(OpenAI()) """ llm_chain: LLMChain llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" input_key: str = "question" #: :meta private: output_key: str = "answer" #: :meta private: prompt: BasePromptTemplate = PROMPT """[Deprecated]""" bash_process: BashProcess = Field(default_factory=BashProcess) #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an LLMBashChain with an llm is deprecated. " "Please instantiate with llm_chain or using the from_llm class method." ) if "llm_chain" not in values and values["llm"] is not None: prompt = values.get("prompt", PROMPT) values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt) return values @root_validator def validate_prompt(cls, values: Dict) -> Dict: if values["llm_chain"].prompt.output_parser is None: raise ValueError( "The prompt used by llm_chain is expected to have an output_parser." ) return values @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(inputs[self.input_key], verbose=self.verbose) t = self.llm_chain.predict( question=inputs[self.input_key], callbacks=_run_manager.get_child() ) _run_manager.on_text(t, color="green", verbose=self.verbose) t = t.strip() try: parser = self.llm_chain.prompt.output_parser command_list = parser.parse(t) # type: ignore[union-attr] except OutputParserException as e: _run_manager.on_chain_error(e, verbose=self.verbose) raise e if self.verbose: _run_manager.on_text("\nCode: ", verbose=self.verbose) _run_manager.on_text( str(command_list), color="yellow", verbose=self.verbose ) output = self.bash_process.run(command_list) _run_manager.on_text("\nAnswer: ", verbose=self.verbose) _run_manager.on_text(output, color="yellow", verbose=self.verbose) return {self.output_key: output} @property def _chain_type(self) -> str: return "llm_bash_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate = PROMPT, **kwargs: Any, ) -> LLMBashChain: llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/base.html
6693f9cb-84c4-45be-9a2b-c1b658a41cb5
Source code for langchain.chains.retrieval_qa.base """Chain for question-answering against a vector database.""" from __future__ import annotations import warnings from abc import abstractmethod from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR from langchain.prompts import PromptTemplate from langchain.schema import BaseRetriever, Document from langchain.vectorstores.base import VectorStore class BaseRetrievalQA(Chain): combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine the documents.""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: return_source_documents: bool = False """Return the source documents.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True allow_population_by_field_name = True @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] return _output_keys @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, **kwargs: Any, ) -> BaseRetrievalQA: """Initialize from LLM.""" _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain(llm=llm, prompt=_prompt) document_prompt = PromptTemplate( input_variables=["page_content"], template="Context:\n{page_content}" ) combine_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_variable_name="context", document_prompt=document_prompt, ) return cls(combine_documents_chain=combine_documents_chain, **kwargs) @classmethod def from_chain_type( cls, llm: BaseLanguageModel, chain_type: str = "stuff", chain_type_kwargs: Optional[dict] = None, **kwargs: Any, ) -> BaseRetrievalQA: """Load chain from chain type.""" _chain_type_kwargs = chain_type_kwargs or {} combine_documents_chain = load_qa_chain( llm, chain_type=chain_type, **_chain_type_kwargs ) return cls(combine_documents_chain=combine_documents_chain, **kwargs) @abstractmethod def _get_docs(self, question: str) -> List[Document]: """Get documents to do question answering over.""" def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run get_relevant_text and llm on input query. If chain has 'return_source_documents' as 'True', returns the retrieved documents as well under the key 'source_documents'. Example: .. code-block:: python res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] """ _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] docs = self._get_docs(question) answer = self.combine_documents_chain.run( input_documents=docs, question=question, callbacks=_run_manager.get_child() ) if self.return_source_documents: return {self.output_key: answer, "source_documents": docs} else: return {self.output_key: answer} @abstractmethod async def _aget_docs(self, question: str) -> List[Document]: """Get documents to do question answering over.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run get_relevant_text and llm on input query. If chain has 'return_source_documents' as 'True', returns the retrieved documents as well under the key 'source_documents'. Example: .. code-block:: python res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] """ _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] docs = await self._aget_docs(question) answer = await self.combine_documents_chain.arun( input_documents=docs, question=question, callbacks=_run_manager.get_child() ) if self.return_source_documents: return {self.output_key: answer, "source_documents": docs} else: return {self.output_key: answer} [docs]class RetrievalQA(BaseRetrievalQA): """Chain for question-answering against an index. Example: .. code-block:: python from langchain.llms import OpenAI from langchain.chains import RetrievalQA from langchain.faiss import FAISS from langchain.vectorstores.base import VectorStoreRetriever retriever = VectorStoreRetriever(vectorstore=FAISS(...)) retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=retriever) """ retriever: BaseRetriever = Field(exclude=True) def _get_docs(self, question: str) -> List[Document]: return self.retriever.get_relevant_documents(question) async def _aget_docs(self, question: str) -> List[Document]: return await self.retriever.aget_relevant_documents(question) @property def _chain_type(self) -> str: """Return the chain type.""" return "retrieval_qa" [docs]class VectorDBQA(BaseRetrievalQA): """Chain for question-answering against a vector database.""" vectorstore: VectorStore = Field(exclude=True, alias="vectorstore") """Vector Database to connect to.""" k: int = 4 """Number of documents to query for.""" search_type: str = "similarity" """Search type to use over vectorstore. `similarity` or `mmr`.""" search_kwargs: Dict[str, Any] = Field(default_factory=dict) """Extra search args.""" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`VectorDBQA` is deprecated - " "please use `from langchain.chains import RetrievalQA`" ) return values @root_validator() def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" if "search_type" in values: search_type = values["search_type"] if search_type not in ("similarity", "mmr"): raise ValueError(f"search_type of {search_type} not allowed.") return values def _get_docs(self, question: str) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.similarity_search( question, k=self.k, **self.search_kwargs ) elif self.search_type == "mmr": docs = self.vectorstore.max_marginal_relevance_search( question, k=self.k, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs async def _aget_docs(self, question: str) -> List[Document]: raise NotImplementedError("VectorDBQA does not support async") @property def _chain_type(self) -> str: """Return the chain type.""" return "vector_db_qa"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
7b5e1334-5b96-4bb4-9377-6b5d41b2c3d4
Source code for langchain.chains.llm_math.base """Chain that interprets a prompt and executes python code to do math.""" from __future__ import annotations import math import re import warnings from typing import Any, Dict, List, Optional import numexpr from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_math.prompt import PROMPT from langchain.prompts.base import BasePromptTemplate [docs]class LLMMathChain(Chain): """Chain that interprets a prompt and executes python code to do math. Example: .. code-block:: python from langchain import LLMMathChain, OpenAI llm_math = LLMMathChain.from_llm(OpenAI()) """ llm_chain: LLMChain llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" prompt: BasePromptTemplate = PROMPT """[Deprecated] Prompt to use to translate to python if necessary.""" input_key: str = "question" #: :meta private: output_key: str = "answer" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an LLMMathChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the from_llm " "class method." ) if "llm_chain" not in values and values["llm"] is not None: prompt = values.get("prompt", PROMPT) values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt) return values @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] def _evaluate_expression(self, expression: str) -> str: try: local_dict = {"pi": math.pi, "e": math.e} output = str( numexpr.evaluate( expression.strip(), global_dict={}, # restrict access to globals local_dict=local_dict, # add common mathematical functions ) ) except Exception as e: raise ValueError( f'LLMMathChain._evaluate("{expression}") raised error: {e}.' " Please try again with a valid numerical expression" ) # Remove any leading and trailing brackets from the output return re.sub(r"^\[|\]$", "", output) def _process_llm_result( self, llm_output: str, run_manager: CallbackManagerForChainRun ) -> Dict[str, str]: run_manager.on_text(llm_output, color="green", verbose=self.verbose) llm_output = llm_output.strip() text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) if text_match: expression = text_match.group(1) output = self._evaluate_expression(expression) run_manager.on_text("\nAnswer: ", verbose=self.verbose) run_manager.on_text(output, color="yellow", verbose=self.verbose) answer = "Answer: " + output elif llm_output.startswith("Answer:"): answer = llm_output elif "Answer:" in llm_output: answer = "Answer: " + llm_output.split("Answer:")[-1] else: raise ValueError(f"unknown format from LLM: {llm_output}") return {self.output_key: answer} async def _aprocess_llm_result( self, llm_output: str, run_manager: AsyncCallbackManagerForChainRun, ) -> Dict[str, str]: await run_manager.on_text(llm_output, color="green", verbose=self.verbose) llm_output = llm_output.strip() text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) if text_match: expression = text_match.group(1) output = self._evaluate_expression(expression) await run_manager.on_text("\nAnswer: ", verbose=self.verbose) await run_manager.on_text(output, color="yellow", verbose=self.verbose) answer = "Answer: " + output elif llm_output.startswith("Answer:"): answer = llm_output elif "Answer:" in llm_output: answer = "Answer: " + llm_output.split("Answer:")[-1] else: raise ValueError(f"unknown format from LLM: {llm_output}") return {self.output_key: answer} def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(inputs[self.input_key]) llm_output = self.llm_chain.predict( question=inputs[self.input_key], stop=["```output"], callbacks=_run_manager.get_child(), ) return self._process_llm_result(llm_output, _run_manager) async def _acall( self, inputs: Dict[str, str], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() await _run_manager.on_text(inputs[self.input_key]) llm_output = await self.llm_chain.apredict( question=inputs[self.input_key], stop=["```output"], callbacks=_run_manager.get_child(), ) return await self._aprocess_llm_result(llm_output, _run_manager) @property def _chain_type(self) -> str: return "llm_math_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate = PROMPT, **kwargs: Any, ) -> LLMMathChain: llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
cb400598-0e82-47a5-aa27-d44577b4a457
Source code for langchain.chains.hyde.base """Hypothetical Document Embeddings. https://arxiv.org/abs/2212.10496 """ from __future__ import annotations from typing import Any, Dict, List, Optional import numpy as np from pydantic import Extra from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.hyde.prompts import PROMPT_MAP from langchain.chains.llm import LLMChain from langchain.embeddings.base import Embeddings [docs]class HypotheticalDocumentEmbedder(Chain, Embeddings): """Generate hypothetical document for query, and then embed that. Based on https://arxiv.org/abs/2212.10496 """ base_embeddings: Embeddings llm_chain: LLMChain class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Input keys for Hyde's LLM chain.""" return self.llm_chain.input_keys @property def output_keys(self) -> List[str]: """Output keys for Hyde's LLM chain.""" return self.llm_chain.output_keys [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call the base embeddings.""" return self.base_embeddings.embed_documents(texts) [docs] def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]: """Combine embeddings into final embeddings.""" return list(np.array(embeddings).mean(axis=0)) [docs] def embed_query(self, text: str) -> List[float]: """Generate a hypothetical document and embedded it.""" var_name = self.llm_chain.input_keys[0] result = self.llm_chain.generate([{var_name: text}]) documents = [generation.text for generation in result.generations[0]] embeddings = self.embed_documents(documents) return self.combine_embeddings(embeddings) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Call the internal llm chain.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() return self.llm_chain(inputs, callbacks=_run_manager.get_child()) [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, base_embeddings: Embeddings, prompt_key: str, **kwargs: Any, ) -> HypotheticalDocumentEmbedder: """Load and use LLMChain for a specific prompt key.""" prompt = PROMPT_MAP[prompt_key] llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs) @property def _chain_type(self) -> str: return "hyde_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/hyde/base.html
3cd419ea-5991-4686-b5ce-ec95abf33566
Source code for langchain.chains.llm_checker.base """Chain for question-answering with self-verification.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_checker.prompt import ( CHECK_ASSERTIONS_PROMPT, CREATE_DRAFT_ANSWER_PROMPT, LIST_ASSERTIONS_PROMPT, REVISED_ANSWER_PROMPT, ) from langchain.chains.sequential import SequentialChain from langchain.prompts import PromptTemplate def _load_question_to_checked_assertions_chain( llm: BaseLanguageModel, create_draft_answer_prompt: PromptTemplate, list_assertions_prompt: PromptTemplate, check_assertions_prompt: PromptTemplate, revised_answer_prompt: PromptTemplate, ) -> SequentialChain: create_draft_answer_chain = LLMChain( llm=llm, prompt=create_draft_answer_prompt, output_key="statement", ) list_assertions_chain = LLMChain( llm=llm, prompt=list_assertions_prompt, output_key="assertions", ) check_assertions_chain = LLMChain( llm=llm, prompt=check_assertions_prompt, output_key="checked_assertions", ) revised_answer_chain = LLMChain( llm=llm, prompt=revised_answer_prompt, output_key="revised_statement", ) chains = [ create_draft_answer_chain, list_assertions_chain, check_assertions_chain, revised_answer_chain, ] question_to_checked_assertions_chain = SequentialChain( chains=chains, input_variables=["question"], output_variables=["revised_statement"], verbose=True, ) return question_to_checked_assertions_chain [docs]class LLMCheckerChain(Chain): """Chain for question-answering with self-verification. Example: .. code-block:: python from langchain import OpenAI, LLMCheckerChain llm = OpenAI(temperature=0.7) checker_chain = LLMCheckerChain.from_llm(llm) """ question_to_checked_assertions_chain: SequentialChain llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT """[Deprecated]""" list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT """[Deprecated]""" check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT """[Deprecated]""" revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT """[Deprecated] Prompt to use when questioning the documents.""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an LLMCheckerChain with an llm is deprecated. " "Please instantiate with question_to_checked_assertions_chain " "or using the from_llm class method." ) if ( "question_to_checked_assertions_chain" not in values and values["llm"] is not None ): question_to_checked_assertions_chain = ( _load_question_to_checked_assertions_chain( values["llm"], values.get( "create_draft_answer_prompt", CREATE_DRAFT_ANSWER_PROMPT ), values.get("list_assertions_prompt", LIST_ASSERTIONS_PROMPT), values.get("check_assertions_prompt", CHECK_ASSERTIONS_PROMPT), values.get("revised_answer_prompt", REVISED_ANSWER_PROMPT), ) ) values[ "question_to_checked_assertions_chain" ] = question_to_checked_assertions_chain return values @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] output = self.question_to_checked_assertions_chain( {"question": question}, callbacks=_run_manager.get_child() ) return {self.output_key: output["revised_statement"]} @property def _chain_type(self) -> str: return "llm_checker_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT, list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT, check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT, revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT, **kwargs: Any, ) -> LLMCheckerChain: question_to_checked_assertions_chain = ( _load_question_to_checked_assertions_chain( llm, create_draft_answer_prompt, list_assertions_prompt, check_assertions_prompt, revised_answer_prompt, ) ) return cls( question_to_checked_assertions_chain=question_to_checked_assertions_chain, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
993db45b-4e3b-431d-a2a6-48ed5944912a
Source code for langchain.chains.constitutional_ai.base """Chain for applying constitutional principles to the outputs of another chain.""" from typing import Any, Dict, List, Optional from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple from langchain.chains.constitutional_ai.principles import PRINCIPLES from langchain.chains.constitutional_ai.prompts import CRITIQUE_PROMPT, REVISION_PROMPT from langchain.chains.llm import LLMChain from langchain.prompts.base import BasePromptTemplate [docs]class ConstitutionalChain(Chain): """Chain for applying constitutional principles. Example: .. code-block:: python from langchain.llms import OpenAI from langchain.chains import LLMChain, ConstitutionalChain from langchain.chains.constitutional_ai.models \ import ConstitutionalPrinciple llm = OpenAI() qa_prompt = PromptTemplate( template="Q: {question} A:", input_variables=["question"], ) qa_chain = LLMChain(llm=llm, prompt=qa_prompt) constitutional_chain = ConstitutionalChain.from_llm( llm=llm, chain=qa_chain, constitutional_principles=[ ConstitutionalPrinciple( critique_request="Tell if this answer is good.", revision_request="Give a better answer.", ) ], ) constitutional_chain.run(question="What is the meaning of life?") """ chain: LLMChain constitutional_principles: List[ConstitutionalPrinciple] critique_chain: LLMChain revision_chain: LLMChain return_intermediate_steps: bool = False [docs] @classmethod def get_principles( cls, names: Optional[List[str]] = None ) -> List[ConstitutionalPrinciple]: if names is None: return list(PRINCIPLES.values()) else: return [PRINCIPLES[name] for name in names] [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, chain: LLMChain, critique_prompt: BasePromptTemplate = CRITIQUE_PROMPT, revision_prompt: BasePromptTemplate = REVISION_PROMPT, **kwargs: Any, ) -> "ConstitutionalChain": """Create a chain from an LLM.""" critique_chain = LLMChain(llm=llm, prompt=critique_prompt) revision_chain = LLMChain(llm=llm, prompt=revision_prompt) return cls( chain=chain, critique_chain=critique_chain, revision_chain=revision_chain, **kwargs, ) @property def input_keys(self) -> List[str]: """Defines the input keys.""" return self.chain.input_keys @property def output_keys(self) -> List[str]: """Defines the output keys.""" if self.return_intermediate_steps: return ["output", "critiques_and_revisions", "initial_output"] return ["output"] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() response = self.chain.run( **inputs, callbacks=_run_manager.get_child("original"), ) initial_response = response input_prompt = self.chain.prompt.format(**inputs) _run_manager.on_text( text="Initial response: " + response + "\n\n", verbose=self.verbose, color="yellow", ) critiques_and_revisions = [] for constitutional_principle in self.constitutional_principles: # Do critique raw_critique = self.critique_chain.run( input_prompt=input_prompt, output_from_model=response, critique_request=constitutional_principle.critique_request, callbacks=_run_manager.get_child("critique"), ) critique = self._parse_critique( output_string=raw_critique, ).strip() # if the critique contains "No critique needed", then we're done # in this case, initial_output is the same as output, # but we'll keep it for consistency if "no critique needed" in critique.lower(): critiques_and_revisions.append((critique, "")) continue # Do revision revision = self.revision_chain.run( input_prompt=input_prompt, output_from_model=response, critique_request=constitutional_principle.critique_request, critique=critique, revision_request=constitutional_principle.revision_request, callbacks=_run_manager.get_child("revision"), ).strip() response = revision critiques_and_revisions.append((critique, revision)) _run_manager.on_text( text=f"Applying {constitutional_principle.name}..." + "\n\n", verbose=self.verbose, color="green", ) _run_manager.on_text( text="Critique: " + critique + "\n\n", verbose=self.verbose, color="blue", ) _run_manager.on_text( text="Updated response: " + revision + "\n\n", verbose=self.verbose, color="yellow", ) final_output: Dict[str, Any] = {"output": response} if self.return_intermediate_steps: final_output["initial_output"] = initial_response final_output["critiques_and_revisions"] = critiques_and_revisions return final_output @staticmethod def _parse_critique(output_string: str) -> str: if "Revision request:" not in output_string: return output_string output_string = output_string.split("Revision request:")[0] if "\n\n" in output_string: output_string = output_string.split("\n\n")[0] return output_string
https://api.python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html
adea5d40-2691-4bc9-9403-3360345bc25e
Source code for langchain.chains.conversation.base """Chain that carries on a conversation and calls an LLM.""" from typing import Dict, List from pydantic import Extra, Field, root_validator from langchain.chains.conversation.prompt import PROMPT from langchain.chains.llm import LLMChain from langchain.memory.buffer import ConversationBufferMemory from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseMemory [docs]class ConversationChain(LLMChain): """Chain to have a conversation and load context from memory. Example: .. code-block:: python from langchain import ConversationChain, OpenAI conversation = ConversationChain(llm=OpenAI()) """ memory: BaseMemory = Field(default_factory=ConversationBufferMemory) """Default memory store.""" prompt: BasePromptTemplate = PROMPT """Default conversation prompt to use.""" input_key: str = "input" #: :meta private: output_key: str = "response" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Use this since so some prompt vars come from history.""" return [self.input_key] @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" memory_keys = values["memory"].memory_variables input_key = values["input_key"] if input_key in memory_keys: raise ValueError( f"The input key {input_key} was also found in the memory keys " f"({memory_keys}) - please provide keys that don't overlap." ) prompt_variables = values["prompt"].input_variables expected_keys = memory_keys + [input_key] if set(expected_keys) != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but got {memory_keys} as inputs from " f"memory, and {input_key} as the normal input key." ) return values
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversation/base.html
65384a39-7c49-4bd1-abfb-04ab08aeeb03
Source code for langchain.chains.qa_with_sources.retrieval """Question-answering with sources over an index.""" from typing import Any, Dict, List from pydantic import Field from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain from langchain.docstore.document import Document from langchain.schema import BaseRetriever [docs]class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain): """Question-answering with sources over an index.""" retriever: BaseRetriever = Field(exclude=True) """Index to connect to.""" reduce_k_below_max_tokens: bool = False """Reduce the number of results to return from store based on tokens limit""" max_tokens_limit: int = 3375 """Restrict the docs to return from store based on tokens, enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true""" def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]: num_docs = len(docs) if self.reduce_k_below_max_tokens and isinstance( self.combine_documents_chain, StuffDocumentsChain ): tokens = [ self.combine_documents_chain.llm_chain.llm.get_num_tokens( doc.page_content ) for doc in docs ] token_count = sum(tokens[:num_docs]) while token_count > self.max_tokens_limit: num_docs -= 1 token_count -= tokens[num_docs] return docs[:num_docs] def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]: question = inputs[self.question_key] docs = self.retriever.get_relevant_documents(question) return self._reduce_tokens_below_limit(docs) async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]: question = inputs[self.question_key] docs = await self.retriever.aget_relevant_documents(question) return self._reduce_tokens_below_limit(docs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/retrieval.html
438ee92d-42f9-44b8-8b67-ea3c88e039bd
Source code for langchain.chains.qa_with_sources.base """Question answering with sources over documents.""" from __future__ import annotations import re from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain from langchain.chains.qa_with_sources.map_reduce_prompt import ( COMBINE_PROMPT, EXAMPLE_PROMPT, QUESTION_PROMPT, ) from langchain.docstore.document import Document from langchain.prompts.base import BasePromptTemplate class BaseQAWithSourcesChain(Chain, ABC): """Question answering with sources over documents.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" question_key: str = "question" #: :meta private: input_docs_key: str = "docs" #: :meta private: answer_key: str = "answer" #: :meta private: sources_answer_key: str = "sources" #: :meta private: return_source_documents: bool = False """Return the source documents.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, document_prompt: BasePromptTemplate = EXAMPLE_PROMPT, question_prompt: BasePromptTemplate = QUESTION_PROMPT, combine_prompt: BasePromptTemplate = COMBINE_PROMPT, **kwargs: Any, ) -> BaseQAWithSourcesChain: """Construct the chain from an LLM.""" llm_question_chain = LLMChain(llm=llm, prompt=question_prompt) llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt) combine_results_chain = StuffDocumentsChain( llm_chain=llm_combine_chain, document_prompt=document_prompt, document_variable_name="summaries", ) combine_document_chain = MapReduceDocumentsChain( llm_chain=llm_question_chain, combine_document_chain=combine_results_chain, document_variable_name="context", ) return cls( combine_documents_chain=combine_document_chain, **kwargs, ) @classmethod def from_chain_type( cls, llm: BaseLanguageModel, chain_type: str = "stuff", chain_type_kwargs: Optional[dict] = None, **kwargs: Any, ) -> BaseQAWithSourcesChain: """Load chain from chain type.""" _chain_kwargs = chain_type_kwargs or {} combine_document_chain = load_qa_with_sources_chain( llm, chain_type=chain_type, **_chain_kwargs ) return cls(combine_documents_chain=combine_document_chain, **kwargs) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.question_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ _output_keys = [self.answer_key, self.sources_answer_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] return _output_keys @root_validator(pre=True) def validate_naming(cls, values: Dict) -> Dict: """Fix backwards compatability in naming.""" if "combine_document_chain" in values: values["combine_documents_chain"] = values.pop("combine_document_chain") return values @abstractmethod def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]: """Get docs to run questioning over.""" def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() docs = self._get_docs(inputs) answer = self.combine_documents_chain.run( input_documents=docs, callbacks=_run_manager.get_child(), **inputs ) if re.search(r"SOURCES:\s", answer): answer, sources = re.split(r"SOURCES:\s", answer) else: sources = "" result: Dict[str, Any] = { self.answer_key: answer, self.sources_answer_key: sources, } if self.return_source_documents: result["source_documents"] = docs return result @abstractmethod async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]: """Get docs to run questioning over.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() docs = await self._aget_docs(inputs) answer = await self.combine_documents_chain.arun( input_documents=docs, callbacks=_run_manager.get_child(), **inputs ) if re.search(r"SOURCES:\s", answer): answer, sources = re.split(r"SOURCES:\s", answer) else: sources = "" result: Dict[str, Any] = { self.answer_key: answer, self.sources_answer_key: sources, } if self.return_source_documents: result["source_documents"] = docs return result [docs]class QAWithSourcesChain(BaseQAWithSourcesChain): """Question answering with sources over documents.""" input_docs_key: str = "docs" #: :meta private: @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_docs_key, self.question_key] def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]: return inputs.pop(self.input_docs_key) async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]: return inputs.pop(self.input_docs_key) @property def _chain_type(self) -> str: return "qa_with_sources_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html
456351ea-3a7f-41c7-bf63-a755634f158f
Source code for langchain.chains.qa_with_sources.vector_db """Question-answering with sources over a vector database.""" import warnings from typing import Any, Dict, List from pydantic import Field, root_validator from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain from langchain.docstore.document import Document from langchain.vectorstores.base import VectorStore [docs]class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain): """Question-answering with sources over a vector database.""" vectorstore: VectorStore = Field(exclude=True) """Vector Database to connect to.""" k: int = 4 """Number of results to return from store""" reduce_k_below_max_tokens: bool = False """Reduce the number of results to return from store based on tokens limit""" max_tokens_limit: int = 3375 """Restrict the docs to return from store based on tokens, enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true""" search_kwargs: Dict[str, Any] = Field(default_factory=dict) """Extra search args.""" def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]: num_docs = len(docs) if self.reduce_k_below_max_tokens and isinstance( self.combine_documents_chain, StuffDocumentsChain ): tokens = [ self.combine_documents_chain.llm_chain.llm.get_num_tokens( doc.page_content ) for doc in docs ] token_count = sum(tokens[:num_docs]) while token_count > self.max_tokens_limit: num_docs -= 1 token_count -= tokens[num_docs] return docs[:num_docs] def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]: question = inputs[self.question_key] docs = self.vectorstore.similarity_search( question, k=self.k, **self.search_kwargs ) return self._reduce_tokens_below_limit(docs) async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]: raise NotImplementedError("VectorDBQAWithSourcesChain does not support async") @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`VectorDBQAWithSourcesChain` is deprecated - " "please use `from langchain.chains import RetrievalQAWithSourcesChain`" ) return values @property def _chain_type(self) -> str: return "vector_db_qa_with_sources_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/vector_db.html